32#include "llvm/IR/IntrinsicsSPIRV.h"
36#define DEBUG_TYPE "spirv-isel"
43 std::vector<std::pair<SPIRV::InstructionSet::InstructionSet, uint32_t>>;
47llvm::SPIRV::SelectionControl::SelectionControl
48getSelectionOperandForImm(
int Imm) {
50 return SPIRV::SelectionControl::Flatten;
52 return SPIRV::SelectionControl::DontFlatten;
54 return SPIRV::SelectionControl::None;
58#define GET_GLOBALISEL_PREDICATE_BITSET
59#include "SPIRVGenGlobalISel.inc"
60#undef GET_GLOBALISEL_PREDICATE_BITSET
87#define GET_GLOBALISEL_PREDICATES_DECL
88#include "SPIRVGenGlobalISel.inc"
89#undef GET_GLOBALISEL_PREDICATES_DECL
91#define GET_GLOBALISEL_TEMPORARIES_DECL
92#include "SPIRVGenGlobalISel.inc"
93#undef GET_GLOBALISEL_TEMPORARIES_DECL
115 unsigned BitSetOpcode)
const;
119 unsigned BitSetOpcode)
const;
123 unsigned BitSetOpcode,
bool SwapPrimarySide)
const;
127 unsigned BitSetOpcode,
128 bool SwapPrimarySide)
const;
135 unsigned Opcode)
const;
138 unsigned Opcode)
const;
155 unsigned NegateOpcode = 0)
const;
215 template <
bool Signed>
218 template <
bool Signed>
239 bool IsSigned,
unsigned Opcode)
const;
241 bool IsSigned)
const;
247 bool IsSigned)
const;
286 GL::GLSLExtInst GLInst)
const;
291 GL::GLSLExtInst GLInst)
const;
313 bool selectCounterHandleFromBinding(
Register &ResVReg,
322 bool selectResourceNonUniformIndex(
Register &ResVReg,
332 std::pair<Register, bool>
334 const SPIRVType *ResType =
nullptr)
const;
346 SPIRV::StorageClass::StorageClass SC)
const;
353 SPIRV::StorageClass::StorageClass SC,
365 bool loadVec3BuiltinInputID(SPIRV::BuiltIn::BuiltIn BuiltInValue,
368 bool loadBuiltinInputID(SPIRV::BuiltIn::BuiltIn BuiltInValue,
373 void decorateUsesAsNonUniform(
Register &NonUniformReg)
const;
376bool sampledTypeIsSignedInteger(
const llvm::Type *HandleType) {
378 if (
TET->getTargetExtName() ==
"spirv.Image") {
381 assert(
TET->getTargetExtName() ==
"spirv.SignedImage");
382 return TET->getTypeParameter(0)->isIntegerTy();
386#define GET_GLOBALISEL_IMPL
387#include "SPIRVGenGlobalISel.inc"
388#undef GET_GLOBALISEL_IMPL
394 TRI(*ST.getRegisterInfo()), RBI(RBI), GR(*ST.getSPIRVGlobalRegistry()),
397#include
"SPIRVGenGlobalISel.inc"
400#include
"SPIRVGenGlobalISel.inc"
412 InstructionSelector::setupMF(MF, VT, CoverageInfo, PSI, BFI);
416void SPIRVInstructionSelector::resetVRegsType(MachineFunction &MF) {
417 if (HasVRegsReset == &MF)
422 for (
unsigned I = 0,
E =
MRI.getNumVirtRegs();
I !=
E; ++
I) {
424 LLT RegType =
MRI.getType(
Reg);
432 for (
const auto &
MBB : MF) {
433 for (
const auto &
MI :
MBB) {
436 if (
MI.getOpcode() != SPIRV::ASSIGN_TYPE)
440 LLT DstType =
MRI.getType(DstReg);
442 LLT SrcType =
MRI.getType(SrcReg);
443 if (DstType != SrcType)
444 MRI.setType(DstReg,
MRI.getType(SrcReg));
446 const TargetRegisterClass *DstRC =
MRI.getRegClassOrNull(DstReg);
447 const TargetRegisterClass *SrcRC =
MRI.getRegClassOrNull(SrcReg);
448 if (DstRC != SrcRC && SrcRC)
449 MRI.setRegClass(DstReg, SrcRC);
465 case TargetOpcode::G_CONSTANT:
466 case TargetOpcode::G_FCONSTANT:
468 case TargetOpcode::G_INTRINSIC:
469 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
470 case TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS:
472 Intrinsic::spv_const_composite;
473 case TargetOpcode::G_BUILD_VECTOR:
474 case TargetOpcode::G_SPLAT_VECTOR: {
485 case SPIRV::OpConstantTrue:
486 case SPIRV::OpConstantFalse:
487 case SPIRV::OpConstantI:
488 case SPIRV::OpConstantF:
489 case SPIRV::OpConstantComposite:
490 case SPIRV::OpConstantCompositeContinuedINTEL:
491 case SPIRV::OpConstantSampler:
492 case SPIRV::OpConstantNull:
494 case SPIRV::OpConstantFunctionPointerINTEL:
510 for (
const auto &MO :
MI.all_defs()) {
512 if (
Reg.isPhysical() || !
MRI.use_nodbg_empty(
Reg))
515 if (
MI.getOpcode() == TargetOpcode::LOCAL_ESCAPE ||
MI.isFakeUse() ||
516 MI.isLifetimeMarker())
520 if (
MI.mayStore() ||
MI.isCall() ||
521 (
MI.mayLoad() &&
MI.hasOrderedMemoryRef()) ||
MI.isPosition() ||
522 MI.isDebugInstr() ||
MI.isTerminator() ||
MI.isJumpTableDebugInfo())
527bool SPIRVInstructionSelector::select(MachineInstr &
I) {
528 resetVRegsType(*
I.getParent()->getParent());
530 assert(
I.getParent() &&
"Instruction should be in a basic block!");
531 assert(
I.getParent()->getParent() &&
"Instruction should be in a function!");
536 if (Opcode == SPIRV::ASSIGN_TYPE) {
537 Register DstReg =
I.getOperand(0).getReg();
538 Register SrcReg =
I.getOperand(1).getReg();
539 auto *
Def =
MRI->getVRegDef(SrcReg);
541 Def->getOpcode() != TargetOpcode::G_CONSTANT &&
542 Def->getOpcode() != TargetOpcode::G_FCONSTANT) {
544 if (
Def->getOpcode() == TargetOpcode::G_SELECT) {
545 Register SelectDstReg =
Def->getOperand(0).getReg();
549 Def->removeFromParent();
550 MRI->replaceRegWith(DstReg, SelectDstReg);
552 I.removeFromParent();
554 Res = selectImpl(
I, *CoverageInfo);
556 if (!Res &&
Def->getOpcode() != TargetOpcode::G_CONSTANT) {
557 dbgs() <<
"Unexpected pattern in ASSIGN_TYPE.\nInstruction: ";
561 assert(Res ||
Def->getOpcode() == TargetOpcode::G_CONSTANT);
568 MRI->setRegClass(SrcReg,
MRI->getRegClass(DstReg));
569 MRI->replaceRegWith(SrcReg, DstReg);
571 I.removeFromParent();
573 }
else if (
I.getNumDefs() == 1) {
580 if (DeadMIs.contains(&
I)) {
590 if (
I.getNumOperands() !=
I.getNumExplicitOperands()) {
591 LLVM_DEBUG(
errs() <<
"Generic instr has unexpected implicit operands\n");
597 bool HasDefs =
I.getNumDefs() > 0;
600 assert(!HasDefs || ResType ||
I.getOpcode() == TargetOpcode::G_GLOBAL_VALUE ||
601 I.getOpcode() == TargetOpcode::G_IMPLICIT_DEF);
602 if (spvSelect(ResVReg, ResType,
I)) {
604 for (
unsigned i = 0; i <
I.getNumDefs(); ++i)
607 I.removeFromParent();
615 case TargetOpcode::G_CONSTANT:
616 case TargetOpcode::G_FCONSTANT:
618 case TargetOpcode::G_SADDO:
619 case TargetOpcode::G_SSUBO:
626 MachineInstr &
I)
const {
627 const TargetRegisterClass *DstRC =
MRI->getRegClassOrNull(DestReg);
628 const TargetRegisterClass *SrcRC =
MRI->getRegClassOrNull(SrcReg);
629 if (DstRC != SrcRC && SrcRC)
630 MRI->setRegClass(DestReg, SrcRC);
631 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
632 TII.get(TargetOpcode::COPY))
638bool SPIRVInstructionSelector::spvSelect(
Register ResVReg,
640 MachineInstr &
I)
const {
641 const unsigned Opcode =
I.getOpcode();
643 return selectImpl(
I, *CoverageInfo);
645 case TargetOpcode::G_CONSTANT:
646 case TargetOpcode::G_FCONSTANT:
647 return selectConst(ResVReg, ResType,
I);
648 case TargetOpcode::G_GLOBAL_VALUE:
649 return selectGlobalValue(ResVReg,
I);
650 case TargetOpcode::G_IMPLICIT_DEF:
651 return selectOpUndef(ResVReg, ResType,
I);
652 case TargetOpcode::G_FREEZE:
653 return selectFreeze(ResVReg, ResType,
I);
655 case TargetOpcode::G_INTRINSIC:
656 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
657 case TargetOpcode::G_INTRINSIC_CONVERGENT:
658 case TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS:
659 return selectIntrinsic(ResVReg, ResType,
I);
660 case TargetOpcode::G_BITREVERSE:
661 return selectBitreverse(ResVReg, ResType,
I);
663 case TargetOpcode::G_BUILD_VECTOR:
664 return selectBuildVector(ResVReg, ResType,
I);
665 case TargetOpcode::G_SPLAT_VECTOR:
666 return selectSplatVector(ResVReg, ResType,
I);
668 case TargetOpcode::G_SHUFFLE_VECTOR: {
669 MachineBasicBlock &BB = *
I.getParent();
670 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpVectorShuffle))
673 .
addUse(
I.getOperand(1).getReg())
674 .
addUse(
I.getOperand(2).getReg());
675 for (
auto V :
I.getOperand(3).getShuffleMask())
679 case TargetOpcode::G_MEMMOVE:
680 case TargetOpcode::G_MEMCPY:
681 case TargetOpcode::G_MEMSET:
682 return selectMemOperation(ResVReg,
I);
684 case TargetOpcode::G_ICMP:
685 return selectICmp(ResVReg, ResType,
I);
686 case TargetOpcode::G_FCMP:
687 return selectFCmp(ResVReg, ResType,
I);
689 case TargetOpcode::G_FRAME_INDEX:
690 return selectFrameIndex(ResVReg, ResType,
I);
692 case TargetOpcode::G_LOAD:
693 return selectLoad(ResVReg, ResType,
I);
694 case TargetOpcode::G_STORE:
695 return selectStore(
I);
697 case TargetOpcode::G_BR:
698 return selectBranch(
I);
699 case TargetOpcode::G_BRCOND:
700 return selectBranchCond(
I);
702 case TargetOpcode::G_PHI:
703 return selectPhi(ResVReg, ResType,
I);
705 case TargetOpcode::G_FPTOSI:
706 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpConvertFToS);
707 case TargetOpcode::G_FPTOUI:
708 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpConvertFToU);
710 case TargetOpcode::G_FPTOSI_SAT:
711 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpConvertFToS);
712 case TargetOpcode::G_FPTOUI_SAT:
713 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpConvertFToU);
715 case TargetOpcode::G_SITOFP:
716 return selectIToF(ResVReg, ResType,
I,
true, SPIRV::OpConvertSToF);
717 case TargetOpcode::G_UITOFP:
718 return selectIToF(ResVReg, ResType,
I,
false, SPIRV::OpConvertUToF);
720 case TargetOpcode::G_CTPOP:
721 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpBitCount);
722 case TargetOpcode::G_SMIN:
723 return selectExtInst(ResVReg, ResType,
I, CL::s_min, GL::SMin);
724 case TargetOpcode::G_UMIN:
725 return selectExtInst(ResVReg, ResType,
I, CL::u_min, GL::UMin);
727 case TargetOpcode::G_SMAX:
728 return selectExtInst(ResVReg, ResType,
I, CL::s_max, GL::SMax);
729 case TargetOpcode::G_UMAX:
730 return selectExtInst(ResVReg, ResType,
I, CL::u_max, GL::UMax);
732 case TargetOpcode::G_SCMP:
733 return selectSUCmp(ResVReg, ResType,
I,
true);
734 case TargetOpcode::G_UCMP:
735 return selectSUCmp(ResVReg, ResType,
I,
false);
736 case TargetOpcode::G_LROUND:
737 case TargetOpcode::G_LLROUND: {
739 MRI->createVirtualRegister(
MRI->getRegClass(ResVReg),
"lround");
740 MRI->setRegClass(regForLround, &SPIRV::iIDRegClass);
742 regForLround, *(
I.getParent()->getParent()));
744 I, CL::round, GL::Round);
746 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpConvertFToS))
752 case TargetOpcode::G_STRICT_FMA:
753 case TargetOpcode::G_FMA:
754 return selectExtInst(ResVReg, ResType,
I, CL::fma, GL::Fma);
756 case TargetOpcode::G_STRICT_FLDEXP:
757 return selectExtInst(ResVReg, ResType,
I, CL::ldexp);
759 case TargetOpcode::G_FPOW:
760 return selectExtInst(ResVReg, ResType,
I, CL::pow, GL::Pow);
761 case TargetOpcode::G_FPOWI:
762 return selectExtInst(ResVReg, ResType,
I, CL::pown);
764 case TargetOpcode::G_FEXP:
765 return selectExtInst(ResVReg, ResType,
I, CL::exp, GL::Exp);
766 case TargetOpcode::G_FEXP2:
767 return selectExtInst(ResVReg, ResType,
I, CL::exp2, GL::Exp2);
768 case TargetOpcode::G_FMODF:
769 return selectModf(ResVReg, ResType,
I);
771 case TargetOpcode::G_FLOG:
772 return selectExtInst(ResVReg, ResType,
I, CL::log, GL::Log);
773 case TargetOpcode::G_FLOG2:
774 return selectExtInst(ResVReg, ResType,
I, CL::log2, GL::Log2);
775 case TargetOpcode::G_FLOG10:
776 return selectLog10(ResVReg, ResType,
I);
778 case TargetOpcode::G_FABS:
779 return selectExtInst(ResVReg, ResType,
I, CL::fabs, GL::FAbs);
780 case TargetOpcode::G_ABS:
781 return selectExtInst(ResVReg, ResType,
I, CL::s_abs, GL::SAbs);
783 case TargetOpcode::G_FMINNUM:
784 case TargetOpcode::G_FMINIMUM:
785 return selectExtInst(ResVReg, ResType,
I, CL::fmin, GL::NMin);
786 case TargetOpcode::G_FMAXNUM:
787 case TargetOpcode::G_FMAXIMUM:
788 return selectExtInst(ResVReg, ResType,
I, CL::fmax, GL::NMax);
790 case TargetOpcode::G_FCOPYSIGN:
791 return selectExtInst(ResVReg, ResType,
I, CL::copysign);
793 case TargetOpcode::G_FCEIL:
794 return selectExtInst(ResVReg, ResType,
I, CL::ceil, GL::Ceil);
795 case TargetOpcode::G_FFLOOR:
796 return selectExtInst(ResVReg, ResType,
I, CL::floor, GL::Floor);
798 case TargetOpcode::G_FCOS:
799 return selectExtInst(ResVReg, ResType,
I, CL::cos, GL::Cos);
800 case TargetOpcode::G_FSIN:
801 return selectExtInst(ResVReg, ResType,
I, CL::sin, GL::Sin);
802 case TargetOpcode::G_FTAN:
803 return selectExtInst(ResVReg, ResType,
I, CL::tan, GL::Tan);
804 case TargetOpcode::G_FACOS:
805 return selectExtInst(ResVReg, ResType,
I, CL::acos, GL::Acos);
806 case TargetOpcode::G_FASIN:
807 return selectExtInst(ResVReg, ResType,
I, CL::asin, GL::Asin);
808 case TargetOpcode::G_FATAN:
809 return selectExtInst(ResVReg, ResType,
I, CL::atan, GL::Atan);
810 case TargetOpcode::G_FATAN2:
811 return selectExtInst(ResVReg, ResType,
I, CL::atan2, GL::Atan2);
812 case TargetOpcode::G_FCOSH:
813 return selectExtInst(ResVReg, ResType,
I, CL::cosh, GL::Cosh);
814 case TargetOpcode::G_FSINH:
815 return selectExtInst(ResVReg, ResType,
I, CL::sinh, GL::Sinh);
816 case TargetOpcode::G_FTANH:
817 return selectExtInst(ResVReg, ResType,
I, CL::tanh, GL::Tanh);
819 case TargetOpcode::G_STRICT_FSQRT:
820 case TargetOpcode::G_FSQRT:
821 return selectExtInst(ResVReg, ResType,
I, CL::sqrt, GL::Sqrt);
823 case TargetOpcode::G_CTTZ:
824 case TargetOpcode::G_CTTZ_ZERO_UNDEF:
825 return selectExtInst(ResVReg, ResType,
I, CL::ctz);
826 case TargetOpcode::G_CTLZ:
827 case TargetOpcode::G_CTLZ_ZERO_UNDEF:
828 return selectExtInst(ResVReg, ResType,
I, CL::clz);
830 case TargetOpcode::G_INTRINSIC_ROUND:
831 return selectExtInst(ResVReg, ResType,
I, CL::round, GL::Round);
832 case TargetOpcode::G_INTRINSIC_ROUNDEVEN:
833 return selectExtInst(ResVReg, ResType,
I, CL::rint, GL::RoundEven);
834 case TargetOpcode::G_INTRINSIC_TRUNC:
835 return selectExtInst(ResVReg, ResType,
I, CL::trunc, GL::Trunc);
836 case TargetOpcode::G_FRINT:
837 case TargetOpcode::G_FNEARBYINT:
838 return selectExtInst(ResVReg, ResType,
I, CL::rint, GL::RoundEven);
840 case TargetOpcode::G_SMULH:
841 return selectExtInst(ResVReg, ResType,
I, CL::s_mul_hi);
842 case TargetOpcode::G_UMULH:
843 return selectExtInst(ResVReg, ResType,
I, CL::u_mul_hi);
845 case TargetOpcode::G_SADDSAT:
846 return selectExtInst(ResVReg, ResType,
I, CL::s_add_sat);
847 case TargetOpcode::G_UADDSAT:
848 return selectExtInst(ResVReg, ResType,
I, CL::u_add_sat);
849 case TargetOpcode::G_SSUBSAT:
850 return selectExtInst(ResVReg, ResType,
I, CL::s_sub_sat);
851 case TargetOpcode::G_USUBSAT:
852 return selectExtInst(ResVReg, ResType,
I, CL::u_sub_sat);
854 case TargetOpcode::G_FFREXP:
855 return selectFrexp(ResVReg, ResType,
I);
857 case TargetOpcode::G_UADDO:
858 return selectOverflowArith(ResVReg, ResType,
I,
859 ResType->
getOpcode() == SPIRV::OpTypeVector
860 ? SPIRV::OpIAddCarryV
861 : SPIRV::OpIAddCarryS);
862 case TargetOpcode::G_USUBO:
863 return selectOverflowArith(ResVReg, ResType,
I,
864 ResType->
getOpcode() == SPIRV::OpTypeVector
865 ? SPIRV::OpISubBorrowV
866 : SPIRV::OpISubBorrowS);
867 case TargetOpcode::G_UMULO:
868 return selectOverflowArith(ResVReg, ResType,
I, SPIRV::OpUMulExtended);
869 case TargetOpcode::G_SMULO:
870 return selectOverflowArith(ResVReg, ResType,
I, SPIRV::OpSMulExtended);
872 case TargetOpcode::G_SEXT:
873 return selectExt(ResVReg, ResType,
I,
true);
874 case TargetOpcode::G_ANYEXT:
875 case TargetOpcode::G_ZEXT:
876 return selectExt(ResVReg, ResType,
I,
false);
877 case TargetOpcode::G_TRUNC:
878 return selectTrunc(ResVReg, ResType,
I);
879 case TargetOpcode::G_FPTRUNC:
880 case TargetOpcode::G_FPEXT:
881 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpFConvert);
883 case TargetOpcode::G_PTRTOINT:
884 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpConvertPtrToU);
885 case TargetOpcode::G_INTTOPTR:
886 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpConvertUToPtr);
887 case TargetOpcode::G_BITCAST:
888 return selectBitcast(ResVReg, ResType,
I);
889 case TargetOpcode::G_ADDRSPACE_CAST:
890 return selectAddrSpaceCast(ResVReg, ResType,
I);
891 case TargetOpcode::G_PTR_ADD: {
893 assert(
I.getOperand(1).isReg() &&
I.getOperand(2).isReg());
897 assert(((*II).getOpcode() == TargetOpcode::G_GLOBAL_VALUE ||
898 (*II).getOpcode() == TargetOpcode::COPY ||
899 (*II).getOpcode() == SPIRV::OpVariable) &&
902 bool IsGVInit =
false;
904 UseIt =
MRI->use_instr_begin(
I.getOperand(0).getReg()),
905 UseEnd =
MRI->use_instr_end();
906 UseIt != UseEnd; UseIt = std::next(UseIt)) {
907 if ((*UseIt).getOpcode() == TargetOpcode::G_GLOBAL_VALUE ||
908 (*UseIt).getOpcode() == SPIRV::OpVariable) {
918 if (GVPointeeType && ResPointeeType && GVPointeeType != ResPointeeType) {
921 Register NewVReg =
MRI->createGenericVirtualRegister(
MRI->getType(GV));
922 MRI->setRegClass(NewVReg,
MRI->getRegClass(GV));
931 "incompatible result and operand types in a bitcast");
933 MachineInstrBuilder MIB =
934 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpBitcast))
941 ? SPIRV::OpInBoundsAccessChain
942 : SPIRV::OpInBoundsPtrAccessChain))
946 .
addUse(
I.getOperand(2).getReg())
949 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpSpecConstantOp))
953 static_cast<uint32_t
>(SPIRV::Opcode::InBoundsPtrAccessChain))
955 .
addUse(
I.getOperand(2).getReg())
963 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpSpecConstantOp))
966 .
addImm(
static_cast<uint32_t
>(
967 SPIRV::Opcode::InBoundsPtrAccessChain))
970 .
addUse(
I.getOperand(2).getReg());
974 case TargetOpcode::G_ATOMICRMW_OR:
975 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicOr);
976 case TargetOpcode::G_ATOMICRMW_ADD:
977 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicIAdd);
978 case TargetOpcode::G_ATOMICRMW_AND:
979 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicAnd);
980 case TargetOpcode::G_ATOMICRMW_MAX:
981 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicSMax);
982 case TargetOpcode::G_ATOMICRMW_MIN:
983 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicSMin);
984 case TargetOpcode::G_ATOMICRMW_SUB:
985 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicISub);
986 case TargetOpcode::G_ATOMICRMW_XOR:
987 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicXor);
988 case TargetOpcode::G_ATOMICRMW_UMAX:
989 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicUMax);
990 case TargetOpcode::G_ATOMICRMW_UMIN:
991 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicUMin);
992 case TargetOpcode::G_ATOMICRMW_XCHG:
993 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicExchange);
994 case TargetOpcode::G_ATOMIC_CMPXCHG:
995 return selectAtomicCmpXchg(ResVReg, ResType,
I);
997 case TargetOpcode::G_ATOMICRMW_FADD:
998 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicFAddEXT);
999 case TargetOpcode::G_ATOMICRMW_FSUB:
1001 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicFAddEXT,
1003 case TargetOpcode::G_ATOMICRMW_FMIN:
1004 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicFMinEXT);
1005 case TargetOpcode::G_ATOMICRMW_FMAX:
1006 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicFMaxEXT);
1008 case TargetOpcode::G_FENCE:
1009 return selectFence(
I);
1011 case TargetOpcode::G_STACKSAVE:
1012 return selectStackSave(ResVReg, ResType,
I);
1013 case TargetOpcode::G_STACKRESTORE:
1014 return selectStackRestore(
I);
1016 case TargetOpcode::G_UNMERGE_VALUES:
1022 case TargetOpcode::G_TRAP:
1023 case TargetOpcode::G_UBSANTRAP:
1024 case TargetOpcode::DBG_LABEL:
1026 case TargetOpcode::G_DEBUGTRAP:
1027 return selectDebugTrap(ResVReg, ResType,
I);
1034bool SPIRVInstructionSelector::selectDebugTrap(
Register ResVReg,
1036 MachineInstr &
I)
const {
1037 unsigned Opcode = SPIRV::OpNop;
1039 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(Opcode))
1043bool SPIRVInstructionSelector::selectExtInst(
Register ResVReg,
1046 GL::GLSLExtInst GLInst)
const {
1048 SPIRV::InstructionSet::InstructionSet::GLSL_std_450)) {
1049 std::string DiagMsg;
1050 raw_string_ostream OS(DiagMsg);
1051 I.print(OS,
true,
false,
false,
false);
1052 DiagMsg +=
" is only supported with the GLSL extended instruction set.\n";
1055 return selectExtInst(ResVReg, ResType,
I,
1056 {{SPIRV::InstructionSet::GLSL_std_450, GLInst}});
1059bool SPIRVInstructionSelector::selectExtInst(
Register ResVReg,
1062 CL::OpenCLExtInst CLInst)
const {
1063 return selectExtInst(ResVReg, ResType,
I,
1064 {{SPIRV::InstructionSet::OpenCL_std, CLInst}});
1067bool SPIRVInstructionSelector::selectExtInst(
Register ResVReg,
1070 CL::OpenCLExtInst CLInst,
1071 GL::GLSLExtInst GLInst)
const {
1072 ExtInstList ExtInsts = {{SPIRV::InstructionSet::OpenCL_std, CLInst},
1073 {SPIRV::InstructionSet::GLSL_std_450, GLInst}};
1074 return selectExtInst(ResVReg, ResType,
I, ExtInsts);
1077bool SPIRVInstructionSelector::selectExtInst(
Register ResVReg,
1082 for (
const auto &Ex : Insts) {
1083 SPIRV::InstructionSet::InstructionSet
Set = Ex.first;
1084 uint32_t Opcode = Ex.second;
1087 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpExtInst))
1090 .
addImm(
static_cast<uint32_t
>(Set))
1093 const unsigned NumOps =
I.getNumOperands();
1096 I.getOperand(Index).getType() ==
1097 MachineOperand::MachineOperandType::MO_IntrinsicID)
1100 MIB.
add(
I.getOperand(Index));
1106bool SPIRVInstructionSelector::selectExtInstForLRound(
1108 CL::OpenCLExtInst CLInst, GL::GLSLExtInst GLInst)
const {
1109 ExtInstList ExtInsts = {{SPIRV::InstructionSet::OpenCL_std, CLInst},
1110 {SPIRV::InstructionSet::GLSL_std_450, GLInst}};
1111 return selectExtInstForLRound(ResVReg, ResType,
I, ExtInsts);
1114bool SPIRVInstructionSelector::selectExtInstForLRound(
1117 for (
const auto &Ex : Insts) {
1118 SPIRV::InstructionSet::InstructionSet
Set = Ex.first;
1119 uint32_t Opcode = Ex.second;
1122 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpExtInst))
1125 .
addImm(
static_cast<uint32_t
>(Set))
1127 const unsigned NumOps =
I.getNumOperands();
1130 I.getOperand(Index).getType() ==
1131 MachineOperand::MachineOperandType::MO_IntrinsicID)
1134 MIB.
add(
I.getOperand(Index));
1142bool SPIRVInstructionSelector::selectFrexp(
Register ResVReg,
1144 MachineInstr &
I)
const {
1145 ExtInstList ExtInsts = {{SPIRV::InstructionSet::OpenCL_std, CL::frexp},
1146 {SPIRV::InstructionSet::GLSL_std_450, GL::Frexp}};
1147 for (
const auto &Ex : ExtInsts) {
1148 SPIRV::InstructionSet::InstructionSet
Set = Ex.first;
1149 uint32_t Opcode = Ex.second;
1153 MachineIRBuilder MIRBuilder(
I);
1156 PointeeTy, MIRBuilder, SPIRV::StorageClass::Function);
1161 auto MIB =
BuildMI(*It->getParent(), It, It->getDebugLoc(),
1162 TII.get(SPIRV::OpVariable))
1165 .
addImm(
static_cast<uint32_t
>(SPIRV::StorageClass::Function))
1169 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpExtInst))
1172 .
addImm(
static_cast<uint32_t
>(Ex.first))
1174 .
add(
I.getOperand(2))
1179 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpLoad))
1180 .
addDef(
I.getOperand(1).getReg())
1189bool SPIRVInstructionSelector::selectOpWithSrcs(
Register ResVReg,
1192 std::vector<Register> Srcs,
1193 unsigned Opcode)
const {
1194 auto MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(Opcode))
1203bool SPIRVInstructionSelector::selectUnOp(
Register ResVReg,
1206 unsigned Opcode)
const {
1208 Register SrcReg =
I.getOperand(1).getReg();
1211 MRI->def_instr_begin(SrcReg);
1212 DefIt !=
MRI->def_instr_end(); DefIt = std::next(DefIt)) {
1213 unsigned DefOpCode = DefIt->getOpcode();
1214 if (DefOpCode == SPIRV::ASSIGN_TYPE) {
1217 if (
auto *VRD =
getVRegDef(*
MRI, DefIt->getOperand(1).getReg()))
1218 DefOpCode = VRD->getOpcode();
1220 if (DefOpCode == TargetOpcode::G_GLOBAL_VALUE ||
1221 DefOpCode == TargetOpcode::G_CONSTANT ||
1222 DefOpCode == SPIRV::OpVariable || DefOpCode == SPIRV::OpConstantI) {
1228 uint32_t SpecOpcode = 0;
1230 case SPIRV::OpConvertPtrToU:
1231 SpecOpcode =
static_cast<uint32_t
>(SPIRV::Opcode::ConvertPtrToU);
1233 case SPIRV::OpConvertUToPtr:
1234 SpecOpcode =
static_cast<uint32_t
>(SPIRV::Opcode::ConvertUToPtr);
1238 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
1239 TII.get(SPIRV::OpSpecConstantOp))
1247 return selectOpWithSrcs(ResVReg, ResType,
I, {
I.getOperand(1).getReg()},
1251bool SPIRVInstructionSelector::selectBitcast(
Register ResVReg,
1253 MachineInstr &
I)
const {
1254 Register OpReg =
I.getOperand(1).getReg();
1258 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpBitcast);
1266 if (
MemOp->isVolatile())
1267 SpvMemOp |=
static_cast<uint32_t>(SPIRV::MemoryOperand::Volatile);
1268 if (
MemOp->isNonTemporal())
1269 SpvMemOp |=
static_cast<uint32_t>(SPIRV::MemoryOperand::Nontemporal);
1270 if (
MemOp->getAlign().value())
1271 SpvMemOp |=
static_cast<uint32_t>(SPIRV::MemoryOperand::Aligned);
1277 if (ST->canUseExtension(SPIRV::Extension::SPV_INTEL_memory_access_aliasing)) {
1278 if (
auto *MD =
MemOp->getAAInfo().Scope) {
1282 static_cast<uint32_t>(SPIRV::MemoryOperand::AliasScopeINTELMask);
1284 if (
auto *MD =
MemOp->getAAInfo().NoAlias) {
1288 static_cast<uint32_t>(SPIRV::MemoryOperand::NoAliasINTELMask);
1292 if (SpvMemOp !=
static_cast<uint32_t>(SPIRV::MemoryOperand::None)) {
1294 if (SpvMemOp &
static_cast<uint32_t>(SPIRV::MemoryOperand::Aligned))
1306 SpvMemOp |=
static_cast<uint32_t>(SPIRV::MemoryOperand::Volatile);
1308 SpvMemOp |=
static_cast<uint32_t>(SPIRV::MemoryOperand::Nontemporal);
1310 if (SpvMemOp !=
static_cast<uint32_t>(SPIRV::MemoryOperand::None))
1314bool SPIRVInstructionSelector::selectLoad(
Register ResVReg,
1316 MachineInstr &
I)
const {
1323 IntPtrDef->getIntrinsicID() == Intrinsic::spv_resource_getpointer) {
1324 Register HandleReg = IntPtrDef->getOperand(2).getReg();
1326 if (HandleType->
getOpcode() == SPIRV::OpTypeImage) {
1328 MRI->createVirtualRegister(
MRI->getRegClass(HandleReg));
1330 if (!loadHandleBeforePosition(NewHandleReg, HandleType, *HandleDef,
I)) {
1334 Register IdxReg = IntPtrDef->getOperand(3).getReg();
1335 return generateImageReadOrFetch(ResVReg, ResType, NewHandleReg, IdxReg,
1336 I.getDebugLoc(),
I);
1340 auto MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpLoad))
1344 if (!
I.getNumMemOperands()) {
1345 assert(
I.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS ||
1347 TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS);
1350 MachineIRBuilder MIRBuilder(
I);
1356bool SPIRVInstructionSelector::selectStore(MachineInstr &
I)
const {
1358 Register StoreVal =
I.getOperand(0 + OpOffset).getReg();
1364 IntPtrDef->getIntrinsicID() == Intrinsic::spv_resource_getpointer) {
1365 Register HandleReg = IntPtrDef->getOperand(2).getReg();
1367 MRI->createVirtualRegister(
MRI->getRegClass(HandleReg));
1370 if (!loadHandleBeforePosition(NewHandleReg, HandleType, *HandleDef,
I)) {
1374 Register IdxReg = IntPtrDef->getOperand(3).getReg();
1375 if (HandleType->
getOpcode() == SPIRV::OpTypeImage) {
1376 auto BMI =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
1377 TII.get(SPIRV::OpImageWrite))
1383 if (sampledTypeIsSignedInteger(LLVMHandleType))
1386 return BMI.constrainAllUses(
TII,
TRI, RBI);
1391 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpStore))
1394 if (!
I.getNumMemOperands()) {
1395 assert(
I.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS ||
1397 TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS);
1400 MachineIRBuilder MIRBuilder(
I);
1406bool SPIRVInstructionSelector::selectStackSave(
Register ResVReg,
1408 MachineInstr &
I)
const {
1409 if (!STI.
canUseExtension(SPIRV::Extension::SPV_INTEL_variable_length_array))
1411 "llvm.stacksave intrinsic: this instruction requires the following "
1412 "SPIR-V extension: SPV_INTEL_variable_length_array",
1415 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpSaveMemoryINTEL))
1421bool SPIRVInstructionSelector::selectStackRestore(MachineInstr &
I)
const {
1422 if (!STI.
canUseExtension(SPIRV::Extension::SPV_INTEL_variable_length_array))
1424 "llvm.stackrestore intrinsic: this instruction requires the following "
1425 "SPIR-V extension: SPV_INTEL_variable_length_array",
1427 if (!
I.getOperand(0).isReg())
1430 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpRestoreMemoryINTEL))
1431 .
addUse(
I.getOperand(0).getReg())
1435bool SPIRVInstructionSelector::selectMemOperation(
Register ResVReg,
1436 MachineInstr &
I)
const {
1438 Register SrcReg =
I.getOperand(1).getReg();
1440 if (
I.getOpcode() == TargetOpcode::G_MEMSET) {
1441 MachineIRBuilder MIRBuilder(
I);
1442 assert(
I.getOperand(1).isReg() &&
I.getOperand(2).isReg());
1445 Type *ValTy = Type::getInt8Ty(
I.getMF()->getFunction().getContext());
1446 Type *ArrTy = ArrayType::get(ValTy, Num);
1448 ArrTy, MIRBuilder, SPIRV::StorageClass::UniformConstant);
1451 ArrTy, MIRBuilder, SPIRV::AccessQualifier::None,
false);
1458 GlobalVariable *GV =
new GlobalVariable(*CurFunction.
getParent(), LLVMArrTy,
1463 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpVariable))
1466 .
addImm(SPIRV::StorageClass::UniformConstant)
1475 ValTy,
I, SPIRV::StorageClass::UniformConstant);
1477 selectOpWithSrcs(SrcReg, SourceTy,
I, {VarReg}, SPIRV::OpBitcast);
1479 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpCopyMemorySized))
1480 .
addUse(
I.getOperand(0).getReg())
1482 .
addUse(
I.getOperand(2).getReg());
1483 if (
I.getNumMemOperands()) {
1484 MachineIRBuilder MIRBuilder(
I);
1493bool SPIRVInstructionSelector::selectAtomicRMW(
Register ResVReg,
1497 unsigned NegateOpcode)
const {
1500 const MachineMemOperand *MemOp = *
I.memoperands_begin();
1503 auto ScopeConstant = buildI32Constant(Scope,
I);
1504 Register ScopeReg = ScopeConstant.first;
1505 Result &= ScopeConstant.second;
1513 auto MemSemConstant = buildI32Constant(MemSem ,
I);
1514 Register MemSemReg = MemSemConstant.first;
1515 Result &= MemSemConstant.second;
1517 Register ValueReg =
I.getOperand(2).getReg();
1518 if (NegateOpcode != 0) {
1521 Result &= selectOpWithSrcs(TmpReg, ResType,
I, {ValueReg}, NegateOpcode);
1526 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(NewOpcode))
1536bool SPIRVInstructionSelector::selectUnmergeValues(MachineInstr &
I)
const {
1537 unsigned ArgI =
I.getNumOperands() - 1;
1539 I.getOperand(ArgI).isReg() ?
I.getOperand(ArgI).getReg() :
Register(0);
1542 if (!DefType || DefType->
getOpcode() != SPIRV::OpTypeVector)
1544 "cannot select G_UNMERGE_VALUES with a non-vector argument");
1550 for (
unsigned i = 0; i <
I.getNumDefs(); ++i) {
1551 Register ResVReg =
I.getOperand(i).getReg();
1555 ResType = ScalarType;
1561 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpCompositeExtract))
1565 .
addImm(
static_cast<int64_t
>(i));
1571bool SPIRVInstructionSelector::selectFence(MachineInstr &
I)
const {
1574 auto MemSemConstant = buildI32Constant(MemSem,
I);
1575 Register MemSemReg = MemSemConstant.first;
1576 bool Result = MemSemConstant.second;
1578 uint32_t
Scope =
static_cast<uint32_t
>(
1580 auto ScopeConstant = buildI32Constant(Scope,
I);
1581 Register ScopeReg = ScopeConstant.first;
1582 Result &= ScopeConstant.second;
1585 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpMemoryBarrier))
1591bool SPIRVInstructionSelector::selectOverflowArith(
Register ResVReg,
1594 unsigned Opcode)
const {
1595 Type *ResTy =
nullptr;
1599 "Not enough info to select the arithmetic with overflow instruction");
1602 "with overflow instruction");
1608 MachineIRBuilder MIRBuilder(
I);
1610 ResTy, MIRBuilder, SPIRV::AccessQualifier::ReadWrite,
false);
1611 assert(
I.getNumDefs() > 1 &&
"Not enought operands");
1617 Register ZeroReg = buildZerosVal(ResType,
I);
1620 MRI->setRegClass(StructVReg, &SPIRV::IDRegClass);
1622 if (ResName.
size() > 0)
1627 BuildMI(BB, MIRBuilder.getInsertPt(),
I.getDebugLoc(),
TII.get(Opcode))
1630 for (
unsigned i =
I.getNumDefs(); i <
I.getNumOperands(); ++i)
1631 MIB.
addUse(
I.getOperand(i).getReg());
1636 MRI->setRegClass(HigherVReg, &SPIRV::iIDRegClass);
1637 for (
unsigned i = 0; i <
I.getNumDefs(); ++i) {
1639 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpCompositeExtract))
1640 .
addDef(i == 1 ? HigherVReg :
I.getOperand(i).getReg())
1648 .
addDef(
I.getOperand(1).getReg())
1655bool SPIRVInstructionSelector::selectAtomicCmpXchg(
Register ResVReg,
1657 MachineInstr &
I)
const {
1665 const MachineMemOperand *MemOp = *
I.memoperands_begin();
1668 auto ScopeConstant = buildI32Constant(Scope,
I);
1669 ScopeReg = ScopeConstant.first;
1670 Result &= ScopeConstant.second;
1672 unsigned ScSem =
static_cast<uint32_t
>(
1675 unsigned MemSemEq =
static_cast<uint32_t
>(
getMemSemantics(AO)) | ScSem;
1676 auto MemSemEqConstant = buildI32Constant(MemSemEq,
I);
1677 MemSemEqReg = MemSemEqConstant.first;
1678 Result &= MemSemEqConstant.second;
1680 unsigned MemSemNeq =
static_cast<uint32_t
>(
getMemSemantics(FO)) | ScSem;
1681 if (MemSemEq == MemSemNeq)
1682 MemSemNeqReg = MemSemEqReg;
1684 auto MemSemNeqConstant = buildI32Constant(MemSemEq,
I);
1685 MemSemNeqReg = MemSemNeqConstant.first;
1686 Result &= MemSemNeqConstant.second;
1689 ScopeReg =
I.getOperand(5).getReg();
1690 MemSemEqReg =
I.getOperand(6).getReg();
1691 MemSemNeqReg =
I.getOperand(7).getReg();
1695 Register Val =
I.getOperand(4).getReg();
1700 BuildMI(*
I.getParent(),
I,
DL,
TII.get(SPIRV::OpAtomicCompareExchange))
1727 BuildMI(*
I.getParent(),
I,
DL,
TII.get(SPIRV::OpCompositeInsert))
1738 case SPIRV::StorageClass::DeviceOnlyINTEL:
1739 case SPIRV::StorageClass::HostOnlyINTEL:
1748 bool IsGRef =
false;
1749 bool IsAllowedRefs =
1750 llvm::all_of(
MRI->use_instructions(ResVReg), [&IsGRef](
auto const &It) {
1751 unsigned Opcode = It.getOpcode();
1752 if (Opcode == SPIRV::OpConstantComposite ||
1753 Opcode == SPIRV::OpVariable ||
1754 isSpvIntrinsic(It, Intrinsic::spv_init_global))
1755 return IsGRef = true;
1756 return Opcode == SPIRV::OpName;
1758 return IsAllowedRefs && IsGRef;
1761Register SPIRVInstructionSelector::getUcharPtrTypeReg(
1762 MachineInstr &
I, SPIRV::StorageClass::StorageClass SC)
const {
1764 Type::getInt8Ty(
I.getMF()->getFunction().getContext()),
I, SC));
1768SPIRVInstructionSelector::buildSpecConstantOp(MachineInstr &
I,
Register Dest,
1770 uint32_t Opcode)
const {
1771 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
1772 TII.get(SPIRV::OpSpecConstantOp))
1780SPIRVInstructionSelector::buildConstGenericPtr(MachineInstr &
I,
Register SrcPtr,
1784 Register Tmp =
MRI->createVirtualRegister(&SPIRV::pIDRegClass);
1786 SPIRV::StorageClass::Generic),
1788 MachineFunction *MF =
I.getParent()->getParent();
1790 MachineInstrBuilder MIB = buildSpecConstantOp(
1792 static_cast<uint32_t
>(SPIRV::Opcode::PtrCastToGeneric));
1802bool SPIRVInstructionSelector::selectAddrSpaceCast(
Register ResVReg,
1804 MachineInstr &
I)
const {
1808 Register SrcPtr =
I.getOperand(1).getReg();
1812 if (SrcPtrTy->
getOpcode() != SPIRV::OpTypePointer ||
1813 ResType->
getOpcode() != SPIRV::OpTypePointer)
1814 return BuildCOPY(ResVReg, SrcPtr,
I);
1824 unsigned SpecOpcode =
1826 ?
static_cast<uint32_t
>(SPIRV::Opcode::PtrCastToGeneric)
1829 ? static_cast<uint32_t>(
SPIRV::Opcode::GenericCastToPtr)
1836 return buildSpecConstantOp(
I, ResVReg, SrcPtr,
1837 getUcharPtrTypeReg(
I, DstSC), SpecOpcode)
1838 .constrainAllUses(
TII,
TRI, RBI);
1840 MachineInstrBuilder MIB = buildConstGenericPtr(
I, SrcPtr, SrcPtrTy);
1842 buildSpecConstantOp(
1844 getUcharPtrTypeReg(
I, DstSC),
1845 static_cast<uint32_t
>(SPIRV::Opcode::GenericCastToPtr))
1846 .constrainAllUses(
TII,
TRI, RBI);
1852 return BuildCOPY(ResVReg, SrcPtr,
I);
1854 if ((SrcSC == SPIRV::StorageClass::Function &&
1855 DstSC == SPIRV::StorageClass::Private) ||
1856 (DstSC == SPIRV::StorageClass::Function &&
1857 SrcSC == SPIRV::StorageClass::Private))
1858 return BuildCOPY(ResVReg, SrcPtr,
I);
1862 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpPtrCastToGeneric);
1865 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpGenericCastToPtr);
1886 return selectUnOp(ResVReg, ResType,
I,
1887 SPIRV::OpPtrCastToCrossWorkgroupINTEL);
1889 return selectUnOp(ResVReg, ResType,
I,
1890 SPIRV::OpCrossWorkgroupCastToPtrINTEL);
1892 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpPtrCastToGeneric);
1894 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpGenericCastToPtr);
1904 return SPIRV::OpFOrdEqual;
1906 return SPIRV::OpFOrdGreaterThanEqual;
1908 return SPIRV::OpFOrdGreaterThan;
1910 return SPIRV::OpFOrdLessThanEqual;
1912 return SPIRV::OpFOrdLessThan;
1914 return SPIRV::OpFOrdNotEqual;
1916 return SPIRV::OpOrdered;
1918 return SPIRV::OpFUnordEqual;
1920 return SPIRV::OpFUnordGreaterThanEqual;
1922 return SPIRV::OpFUnordGreaterThan;
1924 return SPIRV::OpFUnordLessThanEqual;
1926 return SPIRV::OpFUnordLessThan;
1928 return SPIRV::OpFUnordNotEqual;
1930 return SPIRV::OpUnordered;
1940 return SPIRV::OpIEqual;
1942 return SPIRV::OpINotEqual;
1944 return SPIRV::OpSGreaterThanEqual;
1946 return SPIRV::OpSGreaterThan;
1948 return SPIRV::OpSLessThanEqual;
1950 return SPIRV::OpSLessThan;
1952 return SPIRV::OpUGreaterThanEqual;
1954 return SPIRV::OpUGreaterThan;
1956 return SPIRV::OpULessThanEqual;
1958 return SPIRV::OpULessThan;
1967 return SPIRV::OpPtrEqual;
1969 return SPIRV::OpPtrNotEqual;
1980 return SPIRV::OpLogicalEqual;
1982 return SPIRV::OpLogicalNotEqual;
2016bool SPIRVInstructionSelector::selectAnyOrAll(
Register ResVReg,
2019 unsigned OpAnyOrAll)
const {
2020 assert(
I.getNumOperands() == 3);
2021 assert(
I.getOperand(2).isReg());
2023 Register InputRegister =
I.getOperand(2).getReg();
2030 bool IsVectorTy = InputType->
getOpcode() == SPIRV::OpTypeVector;
2031 if (IsBoolTy && !IsVectorTy) {
2032 assert(ResVReg ==
I.getOperand(0).getReg());
2033 return BuildCOPY(ResVReg, InputRegister,
I);
2037 unsigned SpirvNotEqualId =
2038 IsFloatTy ? SPIRV::OpFOrdNotEqual : SPIRV::OpINotEqual;
2045 IsBoolTy ? InputRegister
2054 IsFloatTy ? buildZerosValF(InputType,
I) : buildZerosVal(InputType,
I);
2074bool SPIRVInstructionSelector::selectAll(
Register ResVReg,
2076 MachineInstr &
I)
const {
2077 return selectAnyOrAll(ResVReg, ResType,
I, SPIRV::OpAll);
2080bool SPIRVInstructionSelector::selectAny(
Register ResVReg,
2082 MachineInstr &
I)
const {
2083 return selectAnyOrAll(ResVReg, ResType,
I, SPIRV::OpAny);
2087bool SPIRVInstructionSelector::selectFloatDot(
Register ResVReg,
2089 MachineInstr &
I)
const {
2090 assert(
I.getNumOperands() == 4);
2091 assert(
I.getOperand(2).isReg());
2092 assert(
I.getOperand(3).isReg());
2099 "dot product requires a vector of at least 2 components");
2107 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpDot))
2110 .
addUse(
I.getOperand(2).getReg())
2111 .
addUse(
I.getOperand(3).getReg())
2115bool SPIRVInstructionSelector::selectIntegerDot(
Register ResVReg,
2119 assert(
I.getNumOperands() == 4);
2120 assert(
I.getOperand(2).isReg());
2121 assert(
I.getOperand(3).isReg());
2124 auto DotOp =
Signed ? SPIRV::OpSDot : SPIRV::OpUDot;
2128 .
addUse(
I.getOperand(2).getReg())
2129 .
addUse(
I.getOperand(3).getReg())
2135bool SPIRVInstructionSelector::selectIntegerDotExpansion(
2137 assert(
I.getNumOperands() == 4);
2138 assert(
I.getOperand(2).isReg());
2139 assert(
I.getOperand(3).isReg());
2143 Register Vec0 =
I.getOperand(2).getReg();
2144 Register Vec1 =
I.getOperand(3).getReg();
2157 "dot product requires a vector of at least 2 components");
2171 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpCompositeExtract))
2194bool SPIRVInstructionSelector::selectOpIsInf(
Register ResVReg,
2196 MachineInstr &
I)
const {
2198 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpIsInf))
2201 .
addUse(
I.getOperand(2).getReg())
2205bool SPIRVInstructionSelector::selectOpIsNan(
Register ResVReg,
2207 MachineInstr &
I)
const {
2209 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpIsNan))
2212 .
addUse(
I.getOperand(2).getReg())
2216template <
bool Signed>
2217bool SPIRVInstructionSelector::selectDot4AddPacked(
Register ResVReg,
2219 MachineInstr &
I)
const {
2220 assert(
I.getNumOperands() == 5);
2221 assert(
I.getOperand(2).isReg());
2222 assert(
I.getOperand(3).isReg());
2223 assert(
I.getOperand(4).isReg());
2226 Register Acc =
I.getOperand(2).getReg();
2230 auto DotOp =
Signed ? SPIRV::OpSDot : SPIRV::OpUDot;
2250template <
bool Signed>
2251bool SPIRVInstructionSelector::selectDot4AddPackedExpansion(
2253 assert(
I.getNumOperands() == 5);
2254 assert(
I.getOperand(2).isReg());
2255 assert(
I.getOperand(3).isReg());
2256 assert(
I.getOperand(4).isReg());
2261 Register Acc =
I.getOperand(2).getReg();
2267 Signed ? SPIRV::OpBitFieldSExtract : SPIRV::OpBitFieldUExtract;
2271 for (
unsigned i = 0; i < 4; i++) {
2273 Register AElt =
MRI->createVirtualRegister(&SPIRV::IDRegClass);
2284 Register BElt =
MRI->createVirtualRegister(&SPIRV::IDRegClass);
2304 Register MaskMul =
MRI->createVirtualRegister(&SPIRV::IDRegClass);
2316 i < 3 ?
MRI->createVirtualRegister(&SPIRV::IDRegClass) : ResVReg;
2332bool SPIRVInstructionSelector::selectSaturate(
Register ResVReg,
2334 MachineInstr &
I)
const {
2335 assert(
I.getNumOperands() == 3);
2336 assert(
I.getOperand(2).isReg());
2338 Register VZero = buildZerosValF(ResType,
I);
2339 Register VOne = buildOnesValF(ResType,
I);
2341 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpExtInst))
2344 .
addImm(
static_cast<uint32_t
>(SPIRV::InstructionSet::GLSL_std_450))
2346 .
addUse(
I.getOperand(2).getReg())
2352bool SPIRVInstructionSelector::selectSign(
Register ResVReg,
2354 MachineInstr &
I)
const {
2355 assert(
I.getNumOperands() == 3);
2356 assert(
I.getOperand(2).isReg());
2358 Register InputRegister =
I.getOperand(2).getReg();
2360 auto &
DL =
I.getDebugLoc();
2370 bool NeedsConversion = IsFloatTy || SignBitWidth != ResBitWidth;
2372 auto SignOpcode = IsFloatTy ? GL::FSign : GL::SSign;
2374 ?
MRI->createVirtualRegister(&SPIRV::IDRegClass)
2381 .
addImm(
static_cast<uint32_t
>(SPIRV::InstructionSet::GLSL_std_450))
2386 if (NeedsConversion) {
2387 auto ConvertOpcode = IsFloatTy ? SPIRV::OpConvertFToS : SPIRV::OpSConvert;
2398bool SPIRVInstructionSelector::selectWaveOpInst(
Register ResVReg,
2401 unsigned Opcode)
const {
2405 auto BMI =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(Opcode))
2411 for (
unsigned J = 2; J <
I.getNumOperands(); J++) {
2412 BMI.addUse(
I.getOperand(J).getReg());
2418bool SPIRVInstructionSelector::selectWaveActiveCountBits(
2424 bool Result = selectWaveOpInst(BallotReg, BallotType,
I,
2425 SPIRV::OpGroupNonUniformBallot);
2429 TII.get(SPIRV::OpGroupNonUniformBallotBitCount))
2434 .
addImm(SPIRV::GroupOperation::Reduce)
2441bool SPIRVInstructionSelector::selectWaveReduceMax(
Register ResVReg,
2444 bool IsUnsigned)
const {
2445 assert(
I.getNumOperands() == 3);
2446 assert(
I.getOperand(2).isReg());
2448 Register InputRegister =
I.getOperand(2).getReg();
2457 auto IntegerOpcodeType =
2458 IsUnsigned ? SPIRV::OpGroupNonUniformUMax : SPIRV::OpGroupNonUniformSMax;
2459 auto Opcode = IsFloatTy ? SPIRV::OpGroupNonUniformFMax : IntegerOpcodeType;
2460 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(Opcode))
2465 .
addImm(SPIRV::GroupOperation::Reduce)
2466 .
addUse(
I.getOperand(2).getReg())
2470bool SPIRVInstructionSelector::selectWaveReduceMin(
Register ResVReg,
2473 bool IsUnsigned)
const {
2474 assert(
I.getNumOperands() == 3);
2475 assert(
I.getOperand(2).isReg());
2477 Register InputRegister =
I.getOperand(2).getReg();
2486 auto IntegerOpcodeType =
2487 IsUnsigned ? SPIRV::OpGroupNonUniformUMin : SPIRV::OpGroupNonUniformSMin;
2488 auto Opcode = IsFloatTy ? SPIRV::OpGroupNonUniformFMin : IntegerOpcodeType;
2489 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(Opcode))
2494 .
addImm(SPIRV::GroupOperation::Reduce)
2495 .
addUse(
I.getOperand(2).getReg())
2499bool SPIRVInstructionSelector::selectWaveReduceSum(
Register ResVReg,
2501 MachineInstr &
I)
const {
2502 assert(
I.getNumOperands() == 3);
2503 assert(
I.getOperand(2).isReg());
2505 Register InputRegister =
I.getOperand(2).getReg();
2515 IsFloatTy ? SPIRV::OpGroupNonUniformFAdd : SPIRV::OpGroupNonUniformIAdd;
2516 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(Opcode))
2521 .
addImm(SPIRV::GroupOperation::Reduce)
2522 .
addUse(
I.getOperand(2).getReg());
2525bool SPIRVInstructionSelector::selectBitreverse(
Register ResVReg,
2527 MachineInstr &
I)
const {
2529 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpBitReverse))
2532 .
addUse(
I.getOperand(1).getReg())
2536bool SPIRVInstructionSelector::selectFreeze(
Register ResVReg,
2538 MachineInstr &
I)
const {
2544 if (!
I.getOperand(0).isReg() || !
I.getOperand(1).isReg())
2546 Register OpReg =
I.getOperand(1).getReg();
2547 if (MachineInstr *Def =
MRI->getVRegDef(OpReg)) {
2548 if (
Def->getOpcode() == TargetOpcode::COPY)
2549 Def =
MRI->getVRegDef(
Def->getOperand(1).getReg());
2551 switch (
Def->getOpcode()) {
2552 case SPIRV::ASSIGN_TYPE:
2553 if (MachineInstr *AssignToDef =
2554 MRI->getVRegDef(
Def->getOperand(1).getReg())) {
2555 if (AssignToDef->getOpcode() == TargetOpcode::G_IMPLICIT_DEF)
2556 Reg =
Def->getOperand(2).getReg();
2559 case SPIRV::OpUndef:
2560 Reg =
Def->getOperand(1).getReg();
2563 unsigned DestOpCode;
2565 DestOpCode = SPIRV::OpConstantNull;
2567 DestOpCode = TargetOpcode::COPY;
2570 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(DestOpCode))
2571 .
addDef(
I.getOperand(0).getReg())
2578bool SPIRVInstructionSelector::selectBuildVector(
Register ResVReg,
2580 MachineInstr &
I)
const {
2582 if (ResType->
getOpcode() == SPIRV::OpTypeVector)
2584 else if (ResType->
getOpcode() == SPIRV::OpTypeArray)
2588 if (
I.getNumExplicitOperands() -
I.getNumExplicitDefs() !=
N)
2593 for (
unsigned i =
I.getNumExplicitDefs();
2594 i <
I.getNumExplicitOperands() && IsConst; ++i)
2598 if (!IsConst &&
N < 2)
2600 "There must be at least two constituent operands in a vector");
2603 auto MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
2604 TII.get(IsConst ? SPIRV::OpConstantComposite
2605 : SPIRV::OpCompositeConstruct))
2608 for (
unsigned i =
I.getNumExplicitDefs(); i <
I.getNumExplicitOperands(); ++i)
2609 MIB.
addUse(
I.getOperand(i).getReg());
2613bool SPIRVInstructionSelector::selectSplatVector(
Register ResVReg,
2615 MachineInstr &
I)
const {
2617 if (ResType->
getOpcode() == SPIRV::OpTypeVector)
2619 else if (ResType->
getOpcode() == SPIRV::OpTypeArray)
2625 if (!
I.getOperand(
OpIdx).isReg())
2632 if (!IsConst &&
N < 2)
2634 "There must be at least two constituent operands in a vector");
2637 auto MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
2638 TII.get(IsConst ? SPIRV::OpConstantComposite
2639 : SPIRV::OpCompositeConstruct))
2642 for (
unsigned i = 0; i <
N; ++i)
2647bool SPIRVInstructionSelector::selectDiscard(
Register ResVReg,
2649 MachineInstr &
I)
const {
2654 SPIRV::Extension::SPV_EXT_demote_to_helper_invocation) ||
2656 Opcode = SPIRV::OpDemoteToHelperInvocation;
2658 Opcode = SPIRV::OpKill;
2660 if (MachineInstr *NextI =
I.getNextNode()) {
2662 NextI->removeFromParent();
2667 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(Opcode))
2671bool SPIRVInstructionSelector::selectCmp(
Register ResVReg,
2674 MachineInstr &
I)
const {
2675 Register Cmp0 =
I.getOperand(2).getReg();
2676 Register Cmp1 =
I.getOperand(3).getReg();
2679 "CMP operands should have the same type");
2680 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(CmpOpc))
2689bool SPIRVInstructionSelector::selectICmp(
Register ResVReg,
2691 MachineInstr &
I)
const {
2692 auto Pred =
I.getOperand(1).getPredicate();
2695 Register CmpOperand =
I.getOperand(2).getReg();
2702 return selectCmp(ResVReg, ResType, CmpOpc,
I);
2705std::pair<Register, bool>
2706SPIRVInstructionSelector::buildI32Constant(uint32_t Val, MachineInstr &
I,
2712 auto ConstInt = ConstantInt::get(LLVMTy, Val);
2720 ?
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpConstantNull))
2723 :
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpConstantI))
2726 .
addImm(APInt(32, Val).getZExtValue());
2728 GR.
add(ConstInt,
MI);
2733bool SPIRVInstructionSelector::selectFCmp(
Register ResVReg,
2735 MachineInstr &
I)
const {
2737 return selectCmp(ResVReg, ResType, CmpOp,
I);
2741 MachineInstr &
I)
const {
2744 if (ResType->
getOpcode() == SPIRV::OpTypeVector)
2750 MachineInstr &
I)
const {
2754 if (ResType->
getOpcode() == SPIRV::OpTypeVector)
2760 MachineInstr &
I)
const {
2764 if (ResType->
getOpcode() == SPIRV::OpTypeVector)
2771 MachineInstr &
I)
const {
2775 if (ResType->
getOpcode() == SPIRV::OpTypeVector)
2780bool SPIRVInstructionSelector::selectSelect(
Register ResVReg,
2782 MachineInstr &
I)
const {
2783 Register SelectFirstArg =
I.getOperand(2).getReg();
2784 Register SelectSecondArg =
I.getOperand(3).getReg();
2793 SPIRV::OpTypeVector;
2800 Opcode = IsScalarBool ? SPIRV::OpSelectVFSCond : SPIRV::OpSelectVFVCond;
2801 }
else if (IsPtrTy) {
2802 Opcode = IsScalarBool ? SPIRV::OpSelectVPSCond : SPIRV::OpSelectVPVCond;
2804 Opcode = IsScalarBool ? SPIRV::OpSelectVISCond : SPIRV::OpSelectVIVCond;
2808 Opcode = IsScalarBool ? SPIRV::OpSelectSFSCond : SPIRV::OpSelectVFVCond;
2809 }
else if (IsPtrTy) {
2810 Opcode = IsScalarBool ? SPIRV::OpSelectSPSCond : SPIRV::OpSelectVPVCond;
2812 Opcode = IsScalarBool ? SPIRV::OpSelectSISCond : SPIRV::OpSelectVIVCond;
2815 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(Opcode))
2818 .
addUse(
I.getOperand(1).getReg())
2824bool SPIRVInstructionSelector::selectSelectDefaultArgs(
Register ResVReg,
2827 bool IsSigned)
const {
2829 Register ZeroReg = buildZerosVal(ResType,
I);
2830 Register OneReg = buildOnesVal(IsSigned, ResType,
I);
2834 IsScalarBool ? SPIRV::OpSelectSISCond : SPIRV::OpSelectVIVCond;
2835 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(Opcode))
2838 .
addUse(
I.getOperand(1).getReg())
2844bool SPIRVInstructionSelector::selectIToF(
Register ResVReg,
2846 MachineInstr &
I,
bool IsSigned,
2847 unsigned Opcode)
const {
2848 Register SrcReg =
I.getOperand(1).getReg();
2854 if (ResType->
getOpcode() == SPIRV::OpTypeVector) {
2859 selectSelectDefaultArgs(SrcReg, TmpType,
I,
false);
2861 return selectOpWithSrcs(ResVReg, ResType,
I, {SrcReg}, Opcode);
2864bool SPIRVInstructionSelector::selectExt(
Register ResVReg,
2866 MachineInstr &
I,
bool IsSigned)
const {
2867 Register SrcReg =
I.getOperand(1).getReg();
2869 return selectSelectDefaultArgs(ResVReg, ResType,
I, IsSigned);
2872 if (SrcType == ResType)
2873 return BuildCOPY(ResVReg, SrcReg,
I);
2875 unsigned Opcode = IsSigned ? SPIRV::OpSConvert : SPIRV::OpUConvert;
2876 return selectUnOp(ResVReg, ResType,
I, Opcode);
2879bool SPIRVInstructionSelector::selectSUCmp(
Register ResVReg,
2882 bool IsSigned)
const {
2883 MachineIRBuilder MIRBuilder(
I);
2884 MachineRegisterInfo *
MRI = MIRBuilder.getMRI();
2899 TII.get(IsSigned ? SPIRV::OpSLessThanEqual
2900 : SPIRV::OpULessThanEqual))
2903 .
addUse(
I.getOperand(1).getReg())
2904 .
addUse(
I.getOperand(2).getReg())
2910 TII.get(IsSigned ? SPIRV::OpSLessThan : SPIRV::OpULessThan))
2913 .
addUse(
I.getOperand(1).getReg())
2914 .
addUse(
I.getOperand(2).getReg())
2922 unsigned SelectOpcode =
2923 N > 1 ? SPIRV::OpSelectVIVCond : SPIRV::OpSelectSISCond;
2928 .
addUse(buildOnesVal(
true, ResType,
I))
2929 .
addUse(buildZerosVal(ResType,
I))
2936 .
addUse(buildOnesVal(
false, ResType,
I))
2940bool SPIRVInstructionSelector::selectIntToBool(
Register IntReg,
2947 bool IsVectorTy = IntTy->
getOpcode() == SPIRV::OpTypeVector;
2948 unsigned Opcode = IsVectorTy ? SPIRV::OpBitwiseAndV : SPIRV::OpBitwiseAndS;
2950 Register One = buildOnesVal(
false, IntTy,
I);
2966bool SPIRVInstructionSelector::selectTrunc(
Register ResVReg,
2968 MachineInstr &
I)
const {
2969 Register IntReg =
I.getOperand(1).getReg();
2972 return selectIntToBool(IntReg, ResVReg,
I, ArgType, ResType);
2973 if (ArgType == ResType)
2974 return BuildCOPY(ResVReg, IntReg,
I);
2976 unsigned Opcode = IsSigned ? SPIRV::OpSConvert : SPIRV::OpUConvert;
2977 return selectUnOp(ResVReg, ResType,
I, Opcode);
2980bool SPIRVInstructionSelector::selectConst(
Register ResVReg,
2982 MachineInstr &
I)
const {
2983 unsigned Opcode =
I.getOpcode();
2984 unsigned TpOpcode = ResType->
getOpcode();
2986 if (TpOpcode == SPIRV::OpTypePointer || TpOpcode == SPIRV::OpTypeEvent) {
2987 assert(Opcode == TargetOpcode::G_CONSTANT &&
2988 I.getOperand(1).getCImm()->isZero());
2989 MachineBasicBlock &DepMBB =
I.getMF()->front();
2992 }
else if (Opcode == TargetOpcode::G_FCONSTANT) {
2999 return Reg == ResVReg ?
true : BuildCOPY(ResVReg,
Reg,
I);
3002bool SPIRVInstructionSelector::selectOpUndef(
Register ResVReg,
3004 MachineInstr &
I)
const {
3005 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpUndef))
3011bool SPIRVInstructionSelector::selectInsertVal(
Register ResVReg,
3013 MachineInstr &
I)
const {
3015 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpCompositeInsert))
3019 .
addUse(
I.getOperand(3).getReg())
3021 .
addUse(
I.getOperand(2).getReg());
3022 for (
unsigned i = 4; i <
I.getNumOperands(); i++)
3027bool SPIRVInstructionSelector::selectExtractVal(
Register ResVReg,
3029 MachineInstr &
I)
const {
3031 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpCompositeExtract))
3034 .
addUse(
I.getOperand(2).getReg());
3035 for (
unsigned i = 3; i <
I.getNumOperands(); i++)
3040bool SPIRVInstructionSelector::selectInsertElt(
Register ResVReg,
3042 MachineInstr &
I)
const {
3044 return selectInsertVal(ResVReg, ResType,
I);
3046 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpVectorInsertDynamic))
3049 .
addUse(
I.getOperand(2).getReg())
3050 .
addUse(
I.getOperand(3).getReg())
3051 .
addUse(
I.getOperand(4).getReg())
3055bool SPIRVInstructionSelector::selectExtractElt(
Register ResVReg,
3057 MachineInstr &
I)
const {
3059 return selectExtractVal(ResVReg, ResType,
I);
3061 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpVectorExtractDynamic))
3064 .
addUse(
I.getOperand(2).getReg())
3065 .
addUse(
I.getOperand(3).getReg())
3069bool SPIRVInstructionSelector::selectGEP(
Register ResVReg,
3071 MachineInstr &
I)
const {
3072 const bool IsGEPInBounds =
I.getOperand(2).getImm();
3078 ? (IsGEPInBounds ? SPIRV::OpInBoundsAccessChain
3079 : SPIRV::OpAccessChain)
3080 : (IsGEPInBounds ?
SPIRV::OpInBoundsPtrAccessChain
3081 :
SPIRV::OpPtrAccessChain);
3083 auto Res =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(Opcode))
3087 .
addUse(
I.getOperand(3).getReg());
3089 const unsigned StartingIndex =
3090 (Opcode == SPIRV::OpAccessChain || Opcode == SPIRV::OpInBoundsAccessChain)
3093 for (
unsigned i = StartingIndex; i <
I.getNumExplicitOperands(); ++i)
3094 Res.addUse(
I.getOperand(i).getReg());
3095 return Res.constrainAllUses(
TII,
TRI, RBI);
3099bool SPIRVInstructionSelector::wrapIntoSpecConstantOp(
3102 unsigned Lim =
I.getNumExplicitOperands();
3103 for (
unsigned i =
I.getNumExplicitDefs() + 1; i < Lim; ++i) {
3104 Register OpReg =
I.getOperand(i).getReg();
3105 MachineInstr *OpDefine =
MRI->getVRegDef(OpReg);
3107 SmallPtrSet<SPIRVType *, 4> Visited;
3108 if (!OpDefine || !OpType ||
isConstReg(
MRI, OpDefine, Visited) ||
3109 OpDefine->
getOpcode() == TargetOpcode::G_ADDRSPACE_CAST ||
3110 OpDefine->
getOpcode() == TargetOpcode::G_INTTOPTR ||
3117 MachineFunction *MF =
I.getMF();
3129 auto MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
3130 TII.get(SPIRV::OpSpecConstantOp))
3133 .
addImm(
static_cast<uint32_t
>(SPIRV::Opcode::Bitcast))
3135 GR.
add(OpDefine, MIB);
3143bool SPIRVInstructionSelector::selectIntrinsic(
Register ResVReg,
3145 MachineInstr &
I)
const {
3149 case Intrinsic::spv_load:
3150 return selectLoad(ResVReg, ResType,
I);
3151 case Intrinsic::spv_store:
3152 return selectStore(
I);
3153 case Intrinsic::spv_extractv:
3154 return selectExtractVal(ResVReg, ResType,
I);
3155 case Intrinsic::spv_insertv:
3156 return selectInsertVal(ResVReg, ResType,
I);
3157 case Intrinsic::spv_extractelt:
3158 return selectExtractElt(ResVReg, ResType,
I);
3159 case Intrinsic::spv_insertelt:
3160 return selectInsertElt(ResVReg, ResType,
I);
3161 case Intrinsic::spv_gep:
3162 return selectGEP(ResVReg, ResType,
I);
3163 case Intrinsic::spv_bitcast: {
3164 Register OpReg =
I.getOperand(2).getReg();
3169 return selectOpWithSrcs(ResVReg, ResType,
I, {OpReg}, SPIRV::OpBitcast);
3171 case Intrinsic::spv_unref_global:
3172 case Intrinsic::spv_init_global: {
3173 MachineInstr *
MI =
MRI->getVRegDef(
I.getOperand(1).getReg());
3174 MachineInstr *Init =
I.getNumExplicitOperands() > 2
3175 ?
MRI->getVRegDef(
I.getOperand(2).getReg())
3178 Register GVarVReg =
MI->getOperand(0).getReg();
3179 bool Res = selectGlobalValue(GVarVReg, *
MI, Init);
3183 if (
MI->getOpcode() == TargetOpcode::G_GLOBAL_VALUE) {
3185 MI->removeFromParent();
3189 case Intrinsic::spv_undef: {
3190 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpUndef))
3195 case Intrinsic::spv_const_composite: {
3197 bool IsNull =
I.getNumExplicitDefs() + 1 ==
I.getNumExplicitOperands();
3203 if (!wrapIntoSpecConstantOp(
I, CompositeArgs))
3205 MachineIRBuilder MIR(
I);
3207 MIR, SPIRV::OpConstantComposite, 3,
3208 SPIRV::OpConstantCompositeContinuedINTEL, CompositeArgs, ResVReg,
3210 for (
auto *Instr : Instructions) {
3211 Instr->setDebugLoc(
I.getDebugLoc());
3217 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpConstantNull))
3223 case Intrinsic::spv_assign_name: {
3224 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpName));
3225 MIB.
addUse(
I.getOperand(
I.getNumExplicitDefs() + 1).getReg());
3226 for (
unsigned i =
I.getNumExplicitDefs() + 2;
3227 i <
I.getNumExplicitOperands(); ++i) {
3228 MIB.
addImm(
I.getOperand(i).getImm());
3232 case Intrinsic::spv_switch: {
3233 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpSwitch));
3234 for (
unsigned i = 1; i <
I.getNumExplicitOperands(); ++i) {
3235 if (
I.getOperand(i).isReg())
3236 MIB.
addReg(
I.getOperand(i).getReg());
3237 else if (
I.getOperand(i).isCImm())
3238 addNumImm(
I.getOperand(i).getCImm()->getValue(), MIB);
3239 else if (
I.getOperand(i).isMBB())
3240 MIB.
addMBB(
I.getOperand(i).getMBB());
3246 case Intrinsic::spv_loop_merge: {
3247 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpLoopMerge));
3248 for (
unsigned i = 1; i <
I.getNumExplicitOperands(); ++i) {
3249 if (
I.getOperand(i).isMBB())
3250 MIB.
addMBB(
I.getOperand(i).getMBB());
3256 case Intrinsic::spv_selection_merge: {
3258 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpSelectionMerge));
3259 assert(
I.getOperand(1).isMBB() &&
3260 "operand 1 to spv_selection_merge must be a basic block");
3261 MIB.
addMBB(
I.getOperand(1).getMBB());
3262 MIB.
addImm(getSelectionOperandForImm(
I.getOperand(2).getImm()));
3265 case Intrinsic::spv_cmpxchg:
3266 return selectAtomicCmpXchg(ResVReg, ResType,
I);
3267 case Intrinsic::spv_unreachable:
3268 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpUnreachable))
3270 case Intrinsic::spv_alloca:
3271 return selectFrameIndex(ResVReg, ResType,
I);
3272 case Intrinsic::spv_alloca_array:
3273 return selectAllocaArray(ResVReg, ResType,
I);
3274 case Intrinsic::spv_assume:
3276 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpAssumeTrueKHR))
3277 .
addUse(
I.getOperand(1).getReg())
3280 case Intrinsic::spv_expect:
3282 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpExpectKHR))
3285 .
addUse(
I.getOperand(2).getReg())
3286 .
addUse(
I.getOperand(3).getReg())
3289 case Intrinsic::arithmetic_fence:
3292 TII.get(SPIRV::OpArithmeticFenceEXT))
3295 .
addUse(
I.getOperand(2).getReg())
3298 return BuildCOPY(ResVReg,
I.getOperand(2).getReg(),
I);
3300 case Intrinsic::spv_thread_id:
3306 return loadVec3BuiltinInputID(SPIRV::BuiltIn::GlobalInvocationId, ResVReg,
3308 case Intrinsic::spv_thread_id_in_group:
3314 return loadVec3BuiltinInputID(SPIRV::BuiltIn::LocalInvocationId, ResVReg,
3316 case Intrinsic::spv_group_id:
3322 return loadVec3BuiltinInputID(SPIRV::BuiltIn::WorkgroupId, ResVReg, ResType,
3324 case Intrinsic::spv_flattened_thread_id_in_group:
3331 return loadBuiltinInputID(SPIRV::BuiltIn::LocalInvocationIndex, ResVReg,
3333 case Intrinsic::spv_workgroup_size:
3334 return loadVec3BuiltinInputID(SPIRV::BuiltIn::WorkgroupSize, ResVReg,
3336 case Intrinsic::spv_global_size:
3337 return loadVec3BuiltinInputID(SPIRV::BuiltIn::GlobalSize, ResVReg, ResType,
3339 case Intrinsic::spv_global_offset:
3340 return loadVec3BuiltinInputID(SPIRV::BuiltIn::GlobalOffset, ResVReg,
3342 case Intrinsic::spv_num_workgroups:
3343 return loadVec3BuiltinInputID(SPIRV::BuiltIn::NumWorkgroups, ResVReg,
3345 case Intrinsic::spv_subgroup_size:
3346 return loadBuiltinInputID(SPIRV::BuiltIn::SubgroupSize, ResVReg, ResType,
3348 case Intrinsic::spv_num_subgroups:
3349 return loadBuiltinInputID(SPIRV::BuiltIn::NumSubgroups, ResVReg, ResType,
3351 case Intrinsic::spv_subgroup_id:
3352 return loadBuiltinInputID(SPIRV::BuiltIn::SubgroupId, ResVReg, ResType,
I);
3353 case Intrinsic::spv_subgroup_local_invocation_id:
3354 return loadBuiltinInputID(SPIRV::BuiltIn::SubgroupLocalInvocationId,
3355 ResVReg, ResType,
I);
3356 case Intrinsic::spv_subgroup_max_size:
3357 return loadBuiltinInputID(SPIRV::BuiltIn::SubgroupMaxSize, ResVReg, ResType,
3359 case Intrinsic::spv_fdot:
3360 return selectFloatDot(ResVReg, ResType,
I);
3361 case Intrinsic::spv_udot:
3362 case Intrinsic::spv_sdot:
3363 if (STI.
canUseExtension(SPIRV::Extension::SPV_KHR_integer_dot_product) ||
3365 return selectIntegerDot(ResVReg, ResType,
I,
3366 IID == Intrinsic::spv_sdot);
3367 return selectIntegerDotExpansion(ResVReg, ResType,
I);
3368 case Intrinsic::spv_dot4add_i8packed:
3369 if (STI.
canUseExtension(SPIRV::Extension::SPV_KHR_integer_dot_product) ||
3371 return selectDot4AddPacked<true>(ResVReg, ResType,
I);
3372 return selectDot4AddPackedExpansion<true>(ResVReg, ResType,
I);
3373 case Intrinsic::spv_dot4add_u8packed:
3374 if (STI.
canUseExtension(SPIRV::Extension::SPV_KHR_integer_dot_product) ||
3376 return selectDot4AddPacked<false>(ResVReg, ResType,
I);
3377 return selectDot4AddPackedExpansion<false>(ResVReg, ResType,
I);
3378 case Intrinsic::spv_all:
3379 return selectAll(ResVReg, ResType,
I);
3380 case Intrinsic::spv_any:
3381 return selectAny(ResVReg, ResType,
I);
3382 case Intrinsic::spv_cross:
3383 return selectExtInst(ResVReg, ResType,
I, CL::cross, GL::Cross);
3384 case Intrinsic::spv_distance:
3385 return selectExtInst(ResVReg, ResType,
I, CL::distance, GL::Distance);
3386 case Intrinsic::spv_lerp:
3387 return selectExtInst(ResVReg, ResType,
I, CL::mix, GL::FMix);
3388 case Intrinsic::spv_length:
3389 return selectExtInst(ResVReg, ResType,
I, CL::length, GL::Length);
3390 case Intrinsic::spv_degrees:
3391 return selectExtInst(ResVReg, ResType,
I, CL::degrees, GL::Degrees);
3392 case Intrinsic::spv_faceforward:
3393 return selectExtInst(ResVReg, ResType,
I, GL::FaceForward);
3394 case Intrinsic::spv_frac:
3395 return selectExtInst(ResVReg, ResType,
I, CL::fract, GL::Fract);
3396 case Intrinsic::spv_isinf:
3397 return selectOpIsInf(ResVReg, ResType,
I);
3398 case Intrinsic::spv_isnan:
3399 return selectOpIsNan(ResVReg, ResType,
I);
3400 case Intrinsic::spv_normalize:
3401 return selectExtInst(ResVReg, ResType,
I, CL::normalize, GL::Normalize);
3402 case Intrinsic::spv_refract:
3403 return selectExtInst(ResVReg, ResType,
I, GL::Refract);
3404 case Intrinsic::spv_reflect:
3405 return selectExtInst(ResVReg, ResType,
I, GL::Reflect);
3406 case Intrinsic::spv_rsqrt:
3407 return selectExtInst(ResVReg, ResType,
I, CL::rsqrt, GL::InverseSqrt);
3408 case Intrinsic::spv_sign:
3409 return selectSign(ResVReg, ResType,
I);
3410 case Intrinsic::spv_smoothstep:
3411 return selectExtInst(ResVReg, ResType,
I, CL::smoothstep, GL::SmoothStep);
3412 case Intrinsic::spv_firstbituhigh:
3413 return selectFirstBitHigh(ResVReg, ResType,
I,
false);
3414 case Intrinsic::spv_firstbitshigh:
3415 return selectFirstBitHigh(ResVReg, ResType,
I,
true);
3416 case Intrinsic::spv_firstbitlow:
3417 return selectFirstBitLow(ResVReg, ResType,
I);
3418 case Intrinsic::spv_group_memory_barrier_with_group_sync: {
3420 auto MemSemConstant =
3421 buildI32Constant(SPIRV::MemorySemantics::SequentiallyConsistent,
I);
3422 Register MemSemReg = MemSemConstant.first;
3423 Result &= MemSemConstant.second;
3424 auto ScopeConstant = buildI32Constant(SPIRV::Scope::Workgroup,
I);
3425 Register ScopeReg = ScopeConstant.first;
3426 Result &= ScopeConstant.second;
3429 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpControlBarrier))
3435 case Intrinsic::spv_generic_cast_to_ptr_explicit: {
3436 Register PtrReg =
I.getOperand(
I.getNumExplicitDefs() + 1).getReg();
3437 SPIRV::StorageClass::StorageClass ResSC =
3441 "Generic storage class");
3443 TII.get(SPIRV::OpGenericCastToPtrExplicit))
3450 case Intrinsic::spv_lifetime_start:
3451 case Intrinsic::spv_lifetime_end: {
3452 unsigned Op = IID == Intrinsic::spv_lifetime_start ? SPIRV::OpLifetimeStart
3453 : SPIRV::OpLifetimeStop;
3454 int64_t
Size =
I.getOperand(
I.getNumExplicitDefs() + 1).getImm();
3455 Register PtrReg =
I.getOperand(
I.getNumExplicitDefs() + 2).getReg();
3463 case Intrinsic::spv_saturate:
3464 return selectSaturate(ResVReg, ResType,
I);
3465 case Intrinsic::spv_nclamp:
3466 return selectExtInst(ResVReg, ResType,
I, CL::fclamp, GL::NClamp);
3467 case Intrinsic::spv_uclamp:
3468 return selectExtInst(ResVReg, ResType,
I, CL::u_clamp, GL::UClamp);
3469 case Intrinsic::spv_sclamp:
3470 return selectExtInst(ResVReg, ResType,
I, CL::s_clamp, GL::SClamp);
3471 case Intrinsic::spv_wave_active_countbits:
3472 return selectWaveActiveCountBits(ResVReg, ResType,
I);
3473 case Intrinsic::spv_wave_all:
3474 return selectWaveOpInst(ResVReg, ResType,
I, SPIRV::OpGroupNonUniformAll);
3475 case Intrinsic::spv_wave_any:
3476 return selectWaveOpInst(ResVReg, ResType,
I, SPIRV::OpGroupNonUniformAny);
3477 case Intrinsic::spv_wave_is_first_lane:
3478 return selectWaveOpInst(ResVReg, ResType,
I, SPIRV::OpGroupNonUniformElect);
3479 case Intrinsic::spv_wave_reduce_umax:
3480 return selectWaveReduceMax(ResVReg, ResType,
I,
true);
3481 case Intrinsic::spv_wave_reduce_max:
3482 return selectWaveReduceMax(ResVReg, ResType,
I,
false);
3483 case Intrinsic::spv_wave_reduce_umin:
3484 return selectWaveReduceMin(ResVReg, ResType,
I,
true);
3485 case Intrinsic::spv_wave_reduce_min:
3486 return selectWaveReduceMin(ResVReg, ResType,
I,
false);
3487 case Intrinsic::spv_wave_reduce_sum:
3488 return selectWaveReduceSum(ResVReg, ResType,
I);
3489 case Intrinsic::spv_wave_readlane:
3490 return selectWaveOpInst(ResVReg, ResType,
I,
3491 SPIRV::OpGroupNonUniformShuffle);
3492 case Intrinsic::spv_step:
3493 return selectExtInst(ResVReg, ResType,
I, CL::step, GL::Step);
3494 case Intrinsic::spv_radians:
3495 return selectExtInst(ResVReg, ResType,
I, CL::radians, GL::Radians);
3499 case Intrinsic::instrprof_increment:
3500 case Intrinsic::instrprof_increment_step:
3501 case Intrinsic::instrprof_value_profile:
3504 case Intrinsic::spv_value_md:
3506 case Intrinsic::spv_resource_handlefrombinding: {
3507 return selectHandleFromBinding(ResVReg, ResType,
I);
3509 case Intrinsic::spv_resource_counterhandlefrombinding:
3510 return selectCounterHandleFromBinding(ResVReg, ResType,
I);
3511 case Intrinsic::spv_resource_updatecounter:
3512 return selectUpdateCounter(ResVReg, ResType,
I);
3513 case Intrinsic::spv_resource_store_typedbuffer: {
3514 return selectImageWriteIntrinsic(
I);
3516 case Intrinsic::spv_resource_load_typedbuffer: {
3517 return selectReadImageIntrinsic(ResVReg, ResType,
I);
3519 case Intrinsic::spv_resource_getpointer: {
3520 return selectResourceGetPointer(ResVReg, ResType,
I);
3522 case Intrinsic::spv_discard: {
3523 return selectDiscard(ResVReg, ResType,
I);
3525 case Intrinsic::spv_resource_nonuniformindex: {
3526 return selectResourceNonUniformIndex(ResVReg, ResType,
I);
3528 case Intrinsic::spv_unpackhalf2x16: {
3529 return selectExtInst(ResVReg, ResType,
I, GL::UnpackHalf2x16);
3533 std::string DiagMsg;
3534 raw_string_ostream OS(DiagMsg);
3536 DiagMsg =
"Intrinsic selection not implemented: " + DiagMsg;
3543bool SPIRVInstructionSelector::selectHandleFromBinding(
Register &ResVReg,
3545 MachineInstr &
I)
const {
3548 if (ResType->
getOpcode() == SPIRV::OpTypeImage)
3555bool SPIRVInstructionSelector::selectCounterHandleFromBinding(
3558 assert(Intr.getIntrinsicID() ==
3559 Intrinsic::spv_resource_counterhandlefrombinding);
3562 Register MainHandleReg = Intr.getOperand(2).getReg();
3564 assert(MainHandleDef->getIntrinsicID() ==
3565 Intrinsic::spv_resource_handlefrombinding);
3569 uint32_t ArraySize =
getIConstVal(MainHandleDef->getOperand(4).getReg(),
MRI);
3570 Register IndexReg = MainHandleDef->getOperand(5).getReg();
3571 std::string CounterName =
3576 MachineIRBuilder MIRBuilder(
I);
3577 Register CounterVarReg = buildPointerToResource(
3579 Binding, ArraySize, IndexReg, CounterName, MIRBuilder);
3581 return BuildCOPY(ResVReg, CounterVarReg,
I);
3584bool SPIRVInstructionSelector::selectUpdateCounter(
Register &ResVReg,
3586 MachineInstr &
I)
const {
3588 assert(Intr.getIntrinsicID() == Intrinsic::spv_resource_updatecounter);
3590 Register CounterHandleReg = Intr.getOperand(2).getReg();
3591 Register IncrReg = Intr.getOperand(3).getReg();
3599 assert(CounterVarPointeeType &&
3600 CounterVarPointeeType->
getOpcode() == SPIRV::OpTypeStruct &&
3601 "Counter variable must be a struct");
3603 SPIRV::StorageClass::StorageBuffer &&
3604 "Counter variable must be in the storage buffer storage class");
3606 "Counter variable must have exactly 1 member in the struct");
3610 "Counter variable struct must have a single i32 member");
3614 MachineIRBuilder MIRBuilder(
I);
3616 Type::getInt32Ty(
I.getMF()->getFunction().getContext());
3619 LLVMIntType, MIRBuilder, SPIRV::StorageClass::StorageBuffer);
3621 auto Zero = buildI32Constant(0,
I);
3627 if (!
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
3628 TII.get(SPIRV::OpAccessChain))
3631 .
addUse(CounterHandleReg)
3639 auto Scope = buildI32Constant(SPIRV::Scope::Device,
I);
3642 auto Semantics = buildI32Constant(SPIRV::MemorySemantics::None,
I);
3643 if (!Semantics.second)
3647 auto Incr = buildI32Constant(
static_cast<uint32_t
>(IncrVal),
I);
3652 if (!
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpAtomicIAdd))
3663 return BuildCOPY(ResVReg, AtomicRes,
I);
3671 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpIAddS))
3678bool SPIRVInstructionSelector::selectReadImageIntrinsic(
3687 Register ImageReg =
I.getOperand(2).getReg();
3689 Register NewImageReg =
MRI->createVirtualRegister(
MRI->getRegClass(ImageReg));
3695 Register IdxReg =
I.getOperand(3).getReg();
3697 MachineInstr &Pos =
I;
3699 return generateImageReadOrFetch(ResVReg, ResType, NewImageReg, IdxReg, Loc,
3703bool SPIRVInstructionSelector::generateImageReadOrFetch(
3708 "ImageReg is not an image type.");
3710 bool IsSignedInteger =
3715 bool IsFetch = (SampledOp.getImm() == 1);
3718 if (ResultSize == 4) {
3721 TII.get(IsFetch ? SPIRV::OpImageFetch : SPIRV::OpImageRead))
3727 if (IsSignedInteger)
3732 SPIRVType *ReadType = widenTypeToVec4(ResType, Pos);
3736 TII.get(IsFetch ? SPIRV::OpImageFetch : SPIRV::OpImageRead))
3741 if (IsSignedInteger)
3747 if (ResultSize == 1) {
3749 TII.get(SPIRV::OpCompositeExtract))
3756 return extractSubvector(ResVReg, ResType, ReadReg, Pos);
3759bool SPIRVInstructionSelector::selectResourceGetPointer(
3761 Register ResourcePtr =
I.getOperand(2).getReg();
3763 if (RegType->
getOpcode() == SPIRV::OpTypeImage) {
3772 MachineIRBuilder MIRBuilder(
I);
3774 Register IndexReg =
I.getOperand(3).getReg();
3777 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
3778 TII.get(SPIRV::OpAccessChain))
3787bool SPIRVInstructionSelector::selectResourceNonUniformIndex(
3789 Register ObjReg =
I.getOperand(2).getReg();
3790 if (!BuildCOPY(ResVReg, ObjReg,
I))
3800 decorateUsesAsNonUniform(ResVReg);
3804void SPIRVInstructionSelector::decorateUsesAsNonUniform(
3807 while (WorkList.
size() > 0) {
3811 bool IsDecorated =
false;
3812 for (MachineInstr &Use :
MRI->use_instructions(CurrentReg)) {
3813 if (
Use.getOpcode() == SPIRV::OpDecorate &&
3814 Use.getOperand(1).getImm() == SPIRV::Decoration::NonUniformEXT) {
3820 if (
Use.getOperand(0).isReg() &&
Use.getOperand(0).isDef()) {
3822 if (ResultReg == CurrentReg)
3830 SPIRV::Decoration::NonUniformEXT, {});
3835bool SPIRVInstructionSelector::extractSubvector(
3837 MachineInstr &InsertionPoint)
const {
3839 [[maybe_unused]] uint64_t InputSize =
3842 assert(InputSize > 1 &&
"The input must be a vector.");
3843 assert(ResultSize > 1 &&
"The result must be a vector.");
3844 assert(ResultSize < InputSize &&
3845 "Cannot extract more element than there are in the input.");
3848 const TargetRegisterClass *ScalarRegClass = GR.
getRegClass(ScalarType);
3849 for (uint64_t
I = 0;
I < ResultSize;
I++) {
3850 Register ComponentReg =
MRI->createVirtualRegister(ScalarRegClass);
3853 TII.get(SPIRV::OpCompositeExtract))
3864 MachineInstrBuilder MIB =
BuildMI(*InsertionPoint.
getParent(), InsertionPoint,
3866 TII.get(SPIRV::OpCompositeConstruct))
3870 for (
Register ComponentReg : ComponentRegisters)
3871 MIB.
addUse(ComponentReg);
3875bool SPIRVInstructionSelector::selectImageWriteIntrinsic(
3876 MachineInstr &
I)
const {
3883 Register ImageReg =
I.getOperand(1).getReg();
3885 Register NewImageReg =
MRI->createVirtualRegister(
MRI->getRegClass(ImageReg));
3891 Register CoordinateReg =
I.getOperand(2).getReg();
3892 Register DataReg =
I.getOperand(3).getReg();
3895 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
3896 TII.get(SPIRV::OpImageWrite))
3903Register SPIRVInstructionSelector::buildPointerToResource(
3904 const SPIRVType *SpirvResType, SPIRV::StorageClass::StorageClass SC,
3905 uint32_t Set, uint32_t
Binding, uint32_t ArraySize,
Register IndexReg,
3906 StringRef Name, MachineIRBuilder MIRBuilder)
const {
3908 if (ArraySize == 1) {
3912 "SpirvResType did not have an explicit layout.");
3917 const Type *VarType = ArrayType::get(
const_cast<Type *
>(ResType), ArraySize);
3921 VarPointerType, Set,
Binding, Name, MIRBuilder);
3936bool SPIRVInstructionSelector::selectFirstBitSet16(
3938 unsigned ExtendOpcode,
unsigned BitSetOpcode)
const {
3940 bool Result = selectOpWithSrcs(ExtReg, ResType,
I, {
I.getOperand(2).getReg()},
3944 selectFirstBitSet32(ResVReg, ResType,
I, ExtReg, BitSetOpcode);
3947bool SPIRVInstructionSelector::selectFirstBitSet32(
3949 Register SrcReg,
unsigned BitSetOpcode)
const {
3950 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpExtInst))
3953 .
addImm(
static_cast<uint32_t
>(SPIRV::InstructionSet::GLSL_std_450))
3959bool SPIRVInstructionSelector::selectFirstBitSet64Overflow(
3961 Register SrcReg,
unsigned BitSetOpcode,
bool SwapPrimarySide)
const {
3968 assert(ComponentCount < 5 &&
"Vec 5+ will generate invalid SPIR-V ops");
3970 MachineIRBuilder MIRBuilder(
I);
3978 std::vector<Register> PartialRegs;
3981 unsigned CurrentComponent = 0;
3982 for (; CurrentComponent + 1 < ComponentCount; CurrentComponent += 2) {
3988 auto MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
3989 TII.get(SPIRV::OpVectorShuffle))
3994 .
addImm(CurrentComponent)
3995 .
addImm(CurrentComponent + 1);
4003 if (!selectFirstBitSet64(SubVecBitSetReg, Vec2ResType,
I, BitSetResult,
4004 BitSetOpcode, SwapPrimarySide))
4007 PartialRegs.push_back(SubVecBitSetReg);
4011 if (CurrentComponent != ComponentCount) {
4017 if (!selectOpWithSrcs(FinalElemReg, I64Type,
I, {SrcReg, ConstIntLastIdx},
4018 SPIRV::OpVectorExtractDynamic))
4024 if (!selectFirstBitSet64(FinalElemBitSetReg,
BaseType,
I, FinalElemReg,
4025 BitSetOpcode, SwapPrimarySide))
4028 PartialRegs.push_back(FinalElemBitSetReg);
4033 return selectOpWithSrcs(ResVReg, ResType,
I, std::move(PartialRegs),
4034 SPIRV::OpCompositeConstruct);
4037bool SPIRVInstructionSelector::selectFirstBitSet64(
4039 Register SrcReg,
unsigned BitSetOpcode,
bool SwapPrimarySide)
const {
4052 if (ComponentCount > 2) {
4053 return selectFirstBitSet64Overflow(ResVReg, ResType,
I, SrcReg,
4054 BitSetOpcode, SwapPrimarySide);
4058 MachineIRBuilder MIRBuilder(
I);
4060 BaseType, 2 * ComponentCount, MIRBuilder,
false);
4064 if (!selectOpWithSrcs(BitcastReg, PostCastType,
I, {SrcReg},
4070 if (!selectFirstBitSet32(FBSReg, PostCastType,
I, BitcastReg, BitSetOpcode))
4077 bool IsScalarRes = ResType->
getOpcode() != SPIRV::OpTypeVector;
4080 if (!selectOpWithSrcs(HighReg, ResType,
I, {FBSReg, ConstIntZero},
4081 SPIRV::OpVectorExtractDynamic))
4083 if (!selectOpWithSrcs(LowReg, ResType,
I, {FBSReg, ConstIntOne},
4084 SPIRV::OpVectorExtractDynamic))
4088 auto MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
4089 TII.get(SPIRV::OpVectorShuffle))
4097 for (
unsigned J = 0; J < ComponentCount * 2; J += 2) {
4104 MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
4105 TII.get(SPIRV::OpVectorShuffle))
4113 for (
unsigned J = 1; J < ComponentCount * 2; J += 2) {
4134 SelectOp = SPIRV::OpSelectSISCond;
4135 AddOp = SPIRV::OpIAddS;
4143 SelectOp = SPIRV::OpSelectVIVCond;
4144 AddOp = SPIRV::OpIAddV;
4154 if (SwapPrimarySide) {
4155 PrimaryReg = LowReg;
4156 SecondaryReg = HighReg;
4157 PrimaryShiftReg = Reg0;
4158 SecondaryShiftReg = Reg32;
4163 if (!selectOpWithSrcs(BReg, BoolType,
I, {PrimaryReg, NegOneReg},
4169 if (!selectOpWithSrcs(TmpReg, ResType,
I, {
BReg, SecondaryReg, PrimaryReg},
4175 if (!selectOpWithSrcs(ValReg, ResType,
I,
4176 {
BReg, SecondaryShiftReg, PrimaryShiftReg}, SelectOp))
4179 return selectOpWithSrcs(ResVReg, ResType,
I, {ValReg, TmpReg}, AddOp);
4182bool SPIRVInstructionSelector::selectFirstBitHigh(
Register ResVReg,
4185 bool IsSigned)
const {
4187 Register OpReg =
I.getOperand(2).getReg();
4190 unsigned ExtendOpcode = IsSigned ? SPIRV::OpSConvert : SPIRV::OpUConvert;
4191 unsigned BitSetOpcode = IsSigned ? GL::FindSMsb : GL::FindUMsb;
4195 return selectFirstBitSet16(ResVReg, ResType,
I, ExtendOpcode, BitSetOpcode);
4197 return selectFirstBitSet32(ResVReg, ResType,
I, OpReg, BitSetOpcode);
4199 return selectFirstBitSet64(ResVReg, ResType,
I, OpReg, BitSetOpcode,
4203 "spv_firstbituhigh and spv_firstbitshigh only support 16,32,64 bits.");
4207bool SPIRVInstructionSelector::selectFirstBitLow(
Register ResVReg,
4209 MachineInstr &
I)
const {
4211 Register OpReg =
I.getOperand(2).getReg();
4216 unsigned ExtendOpcode = SPIRV::OpUConvert;
4217 unsigned BitSetOpcode = GL::FindILsb;
4221 return selectFirstBitSet16(ResVReg, ResType,
I, ExtendOpcode, BitSetOpcode);
4223 return selectFirstBitSet32(ResVReg, ResType,
I, OpReg, BitSetOpcode);
4225 return selectFirstBitSet64(ResVReg, ResType,
I, OpReg, BitSetOpcode,
4232bool SPIRVInstructionSelector::selectAllocaArray(
Register ResVReg,
4234 MachineInstr &
I)
const {
4238 bool Res =
BuildMI(BB,
I,
I.getDebugLoc(),
4239 TII.get(SPIRV::OpVariableLengthArrayINTEL))
4242 .
addUse(
I.getOperand(2).getReg())
4245 unsigned Alignment =
I.getOperand(3).getImm();
4251bool SPIRVInstructionSelector::selectFrameIndex(
Register ResVReg,
4253 MachineInstr &
I)
const {
4257 bool Res =
BuildMI(*It->getParent(), It, It->getDebugLoc(),
4258 TII.get(SPIRV::OpVariable))
4261 .
addImm(
static_cast<uint32_t
>(SPIRV::StorageClass::Function))
4264 unsigned Alignment =
I.getOperand(2).getImm();
4271bool SPIRVInstructionSelector::selectBranch(MachineInstr &
I)
const {
4276 const MachineInstr *PrevI =
I.getPrevNode();
4278 if (PrevI !=
nullptr && PrevI->
getOpcode() == TargetOpcode::G_BRCOND) {
4279 return BuildMI(
MBB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpBranchConditional))
4282 .
addMBB(
I.getOperand(0).getMBB())
4286 .
addMBB(
I.getOperand(0).getMBB())
4290bool SPIRVInstructionSelector::selectBranchCond(MachineInstr &
I)
const {
4301 const MachineInstr *NextI =
I.getNextNode();
4303 if (NextI !=
nullptr && NextI->
getOpcode() == SPIRV::OpBranchConditional)
4309 MachineBasicBlock *NextMBB =
I.getMF()->getBlockNumbered(NextMBBNum);
4310 return BuildMI(
MBB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpBranchConditional))
4311 .
addUse(
I.getOperand(0).getReg())
4312 .
addMBB(
I.getOperand(1).getMBB())
4317bool SPIRVInstructionSelector::selectPhi(
Register ResVReg,
4319 MachineInstr &
I)
const {
4320 auto MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpPhi))
4323 const unsigned NumOps =
I.getNumOperands();
4324 for (
unsigned i = 1; i <
NumOps; i += 2) {
4325 MIB.
addUse(
I.getOperand(i + 0).getReg());
4326 MIB.
addMBB(
I.getOperand(i + 1).getMBB());
4334bool SPIRVInstructionSelector::selectGlobalValue(
4335 Register ResVReg, MachineInstr &
I,
const MachineInstr *Init)
const {
4337 MachineIRBuilder MIRBuilder(
I);
4338 const GlobalValue *GV =
I.
getOperand(1).getGlobal();
4341 std::string GlobalIdent;
4343 unsigned &
ID = UnnamedGlobalIDs[GV];
4345 ID = UnnamedGlobalIDs.size();
4346 GlobalIdent =
"__unnamed_" + Twine(
ID).str();
4373 GVFun ? SPIRV::StorageClass::CodeSectionINTEL
4380 MachineRegisterInfo *
MRI = MIRBuilder.
getMRI();
4383 MRI->setRegClass(FuncVReg, &SPIRV::pIDRegClass);
4384 MachineInstrBuilder MIB1 =
4385 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpUndef))
4388 MachineInstrBuilder MIB2 =
4390 TII.get(SPIRV::OpConstantFunctionPointerINTEL))
4394 GR.
add(ConstVal, MIB2);
4400 MachineInstrBuilder MIB3 =
4401 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpConstantNull))
4404 GR.
add(ConstVal, MIB3);
4407 assert(NewReg != ResVReg);
4408 return BuildCOPY(ResVReg, NewReg,
I);
4418 const std::optional<SPIRV::LinkageType::LinkageType> LnkType =
4427 GlobalVar->isConstant(), LnkType, MIRBuilder,
true);
4431bool SPIRVInstructionSelector::selectLog10(
Register ResVReg,
4433 MachineInstr &
I)
const {
4435 return selectExtInst(ResVReg, ResType,
I, CL::log10);
4443 MachineIRBuilder MIRBuilder(
I);
4449 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpExtInst))
4452 .
addImm(
static_cast<uint32_t
>(SPIRV::InstructionSet::GLSL_std_450))
4454 .
add(
I.getOperand(1))
4459 ResType->
getOpcode() == SPIRV::OpTypeFloat);
4462 ResType->
getOpcode() == SPIRV::OpTypeVector
4469 auto Opcode = ResType->
getOpcode() == SPIRV::OpTypeVector
4470 ? SPIRV::OpVectorTimesScalar
4480bool SPIRVInstructionSelector::selectModf(
Register ResVReg,
4482 MachineInstr &
I)
const {
4498 MachineIRBuilder MIRBuilder(
I);
4501 ResType, MIRBuilder, SPIRV::StorageClass::Function);
4513 MachineBasicBlock &EntryBB =
I.getMF()->front();
4517 BuildMI(EntryBB, VarPos,
I.getDebugLoc(),
TII.get(SPIRV::OpVariable))
4520 .
addImm(
static_cast<uint32_t
>(SPIRV::StorageClass::Function));
4526 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpExtInst))
4529 .
addImm(
static_cast<uint32_t
>(SPIRV::InstructionSet::OpenCL_std))
4532 .
add(
I.getOperand(
I.getNumExplicitDefs()))
4536 Register IntegralPartReg =
I.getOperand(1).getReg();
4537 if (IntegralPartReg.
isValid()) {
4539 auto LoadMIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpLoad))
4548 assert(
false &&
"GLSL::Modf is deprecated.");
4559bool SPIRVInstructionSelector::loadVec3BuiltinInputID(
4560 SPIRV::BuiltIn::BuiltIn BuiltInValue,
Register ResVReg,
4561 const SPIRVType *ResType, MachineInstr &
I)
const {
4562 MachineIRBuilder MIRBuilder(
I);
4566 Vec3Ty, MIRBuilder, SPIRV::StorageClass::Input);
4578 SPIRV::StorageClass::Input,
nullptr,
true, std::nullopt, MIRBuilder,
4582 MachineRegisterInfo *
MRI = MIRBuilder.
getMRI();
4583 Register LoadedRegister =
MRI->createVirtualRegister(&SPIRV::iIDRegClass);
4589 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpLoad))
4596 assert(
I.getOperand(2).isReg());
4597 const uint32_t ThreadId =
foldImm(
I.getOperand(2),
MRI);
4601 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpCompositeExtract))
4611bool SPIRVInstructionSelector::loadBuiltinInputID(
4612 SPIRV::BuiltIn::BuiltIn BuiltInValue,
Register ResVReg,
4613 const SPIRVType *ResType, MachineInstr &
I)
const {
4614 MachineIRBuilder MIRBuilder(
I);
4616 ResType, MIRBuilder, SPIRV::StorageClass::Input);
4631 SPIRV::StorageClass::Input,
nullptr,
true, std::nullopt, MIRBuilder,
4635 auto MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpLoad))
4644 MachineInstr &
I)
const {
4645 MachineIRBuilder MIRBuilder(
I);
4646 if (
Type->getOpcode() != SPIRV::OpTypeVector)
4650 if (VectorSize == 4)
4658bool SPIRVInstructionSelector::loadHandleBeforePosition(
4660 MachineInstr &Pos)
const {
4663 Intrinsic::spv_resource_handlefrombinding);
4671 bool IsStructuredBuffer = ResType->
getOpcode() == SPIRV::OpTypePointer;
4672 MachineIRBuilder MIRBuilder(HandleDef);
4674 SPIRV::StorageClass::StorageClass SC = SPIRV::StorageClass::UniformConstant;
4676 if (IsStructuredBuffer) {
4681 Register VarReg = buildPointerToResource(VarType, SC, Set,
Binding, ArraySize,
4682 IndexReg, Name, MIRBuilder);
4686 uint32_t LoadOpcode =
4687 IsStructuredBuffer ? SPIRV::OpCopyObject : SPIRV::OpLoad;
4690 TII.get(LoadOpcode))
4698InstructionSelector *
4702 return new SPIRVInstructionSelector(TM, Subtarget, RBI);
unsigned const MachineRegisterInfo * MRI
#define GET_GLOBALISEL_PREDICATES_INIT
#define GET_GLOBALISEL_TEMPORARIES_INIT
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
This file declares a class to represent arbitrary precision floating point values and provide a varie...
static bool selectUnmergeValues(MachineInstrBuilder &MIB, const ARMBaseInstrInfo &TII, MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
DXIL Resource Implicit Binding
Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...
const HexagonInstrInfo * TII
const size_t AbstractManglingParser< Derived, Alloc >::NumOps
Register const TargetRegisterInfo * TRI
Promote Memory to Register
MachineInstr unsigned OpIdx
uint64_t IntrinsicInst * II
static StringRef getName(Value *V)
static unsigned getFCmpOpcode(CmpInst::Predicate Pred, unsigned Size)
static APFloat getOneFP(const Type *LLVMFloatTy)
static bool isUSMStorageClass(SPIRV::StorageClass::StorageClass SC)
static bool isASCastInGVar(MachineRegisterInfo *MRI, Register ResVReg)
static bool mayApplyGenericSelection(unsigned Opcode)
static APFloat getZeroFP(const Type *LLVMFloatTy)
std::vector< std::pair< SPIRV::InstructionSet::InstructionSet, uint32_t > > ExtInstList
static unsigned getBoolCmpOpcode(unsigned PredNum)
static unsigned getICmpOpcode(unsigned PredNum)
static void addMemoryOperands(MachineMemOperand *MemOp, MachineInstrBuilder &MIB, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry &GR)
static bool isConstReg(MachineRegisterInfo *MRI, MachineInstr *OpDef, SmallPtrSet< SPIRVType *, 4 > &Visited)
static unsigned getPtrCmpOpcode(unsigned Pred)
bool isDead(const MachineInstr &MI, const MachineRegisterInfo &MRI)
BaseType
A given derived pointer can have multiple base pointers through phi/selects.
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
static const fltSemantics & IEEEsingle()
static const fltSemantics & IEEEdouble()
static const fltSemantics & IEEEhalf()
static APFloat getOne(const fltSemantics &Sem, bool Negative=false)
Factory for Positive and Negative One.
static APFloat getZero(const fltSemantics &Sem, bool Negative=false)
Factory for Positive and Negative Zero.
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
uint64_t getZExtValue() const
Get zero extended value.
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
@ ICMP_SLT
signed less than
@ ICMP_SLE
signed less or equal
@ FCMP_OLT
0 1 0 0 True if ordered and less than
@ FCMP_ULE
1 1 0 1 True if unordered, less than, or equal
@ FCMP_OGT
0 0 1 0 True if ordered and greater than
@ FCMP_OGE
0 0 1 1 True if ordered and greater than or equal
@ ICMP_UGE
unsigned greater or equal
@ ICMP_UGT
unsigned greater than
@ ICMP_SGT
signed greater than
@ FCMP_ULT
1 1 0 0 True if unordered or less than
@ FCMP_ONE
0 1 1 0 True if ordered and operands are unequal
@ FCMP_UEQ
1 0 0 1 True if unordered or equal
@ ICMP_ULT
unsigned less than
@ FCMP_UGT
1 0 1 0 True if unordered or greater than
@ FCMP_OLE
0 1 0 1 True if ordered and less than or equal
@ FCMP_ORD
0 1 1 1 True if ordered (no nans)
@ ICMP_SGE
signed greater or equal
@ FCMP_UNE
1 1 1 0 True if unordered or not equal
@ ICMP_ULE
unsigned less or equal
@ FCMP_UGE
1 0 1 1 True if unordered, greater than, or equal
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Represents a call to an intrinsic.
Intrinsic::ID getIntrinsicID() const
unsigned getAddressSpace() const
Module * getParent()
Get the module that this global value is contained inside of...
@ InternalLinkage
Rename collisions when linking (static functions).
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
constexpr bool isScalar() const
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
constexpr bool isVector() const
static constexpr LLT pointer(unsigned AddressSpace, unsigned SizeInBits)
Get a low-level pointer in the given address space.
constexpr bool isPointer() const
static constexpr LLT fixed_vector(unsigned NumElements, unsigned ScalarSizeInBits)
Get a low-level fixed-width vector of some number of elements and element width.
int getNumber() const
MachineBasicBlocks are uniquely numbered at the function level, unless they're not in a MachineFuncti...
LLVM_ABI iterator getFirstNonPHI()
Returns a pointer to the first instruction in this block that is not a PHINode instruction.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
MachineInstrBundleIterator< MachineInstr > iterator
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
Helper class to build MachineInstr.
MachineInstrBuilder buildInstr(unsigned Opcode)
Build and insert <empty> = Opcode <empty>.
MachineFunction & getMF()
Getter for the function we currently build.
MachineRegisterInfo * getMRI()
Getter for MRI.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
bool constrainAllUses(const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI) const
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & setMIFlags(unsigned Flags) const
MachineInstr * getInstr() const
If conversion operators fail, use this method to get the MachineInstr explicitly.
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
const MachineBasicBlock * getParent() const
unsigned getNumOperands() const
Retuns the total number of operands.
LLVM_ABI void setDesc(const MCInstrDesc &TID)
Replace the instruction descriptor (thus opcode) of the current instruction with a new one.
LLVM_ABI unsigned getNumExplicitDefs() const
Returns the number of non-implicit definitions.
LLVM_ABI const MachineFunction * getMF() const
Return the function that contains the basic block that this instruction belongs to.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
LLVM_ABI void removeOperand(unsigned OpNo)
Erase an operand from an instruction, leaving it with one fewer operand than it started with.
const MachineOperand & getOperand(unsigned i) const
A description of a memory reference used in the backend.
@ MOVolatile
The memory access is volatile.
@ MONonTemporal
The memory access is non-temporal.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
MachineBasicBlock * getMBB() const
Register getReg() const
getReg - Returns the register number.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
defusechain_instr_iterator< true, false, false, true > use_instr_iterator
use_instr_iterator/use_instr_begin/use_instr_end - Walk all uses of the specified register,...
defusechain_instr_iterator< false, true, false, true > def_instr_iterator
def_instr_iterator/def_instr_begin/def_instr_end - Walk all defs of the specified register,...
LLVM_ABI Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
LLVM_ABI void setType(Register VReg, LLT Ty)
Set the low-level type of VReg to Ty.
Analysis providing profile information.
Holds all the information related to register banks.
Wrapper class representing virtual and physical registers.
constexpr bool isValid() const
SPIRVType * getSPIRVTypeForVReg(Register VReg, const MachineFunction *MF=nullptr) const
Register getOrCreateConstInt(uint64_t Val, MachineInstr &I, SPIRVType *SpvType, const SPIRVInstrInfo &TII, bool ZeroAsNull=true)
SPIRVType * getResultType(Register VReg, MachineFunction *MF=nullptr)
SPIRVType * getOrCreateSPIRVBoolType(MachineIRBuilder &MIRBuilder, bool EmitIR)
MachineInstr * getOrAddMemAliasingINTELInst(MachineIRBuilder &MIRBuilder, const MDNode *AliasingListMD)
void assignSPIRVTypeToVReg(SPIRVType *Type, Register VReg, const MachineFunction &MF)
Register getOrCreateUndef(MachineInstr &I, SPIRVType *SpvType, const SPIRVInstrInfo &TII)
Register buildGlobalVariable(Register Reg, SPIRVType *BaseType, StringRef Name, const GlobalValue *GV, SPIRV::StorageClass::StorageClass Storage, const MachineInstr *Init, bool IsConst, const std::optional< SPIRV::LinkageType::LinkageType > &LinkageType, MachineIRBuilder &MIRBuilder, bool IsInstSelector)
SPIRVType * changePointerStorageClass(SPIRVType *PtrType, SPIRV::StorageClass::StorageClass SC, MachineInstr &I)
const Type * getTypeForSPIRVType(const SPIRVType *Ty) const
bool isBitcastCompatible(const SPIRVType *Type1, const SPIRVType *Type2) const
unsigned getScalarOrVectorComponentCount(Register VReg) const
bool isScalarOrVectorSigned(const SPIRVType *Type) const
Register getOrCreateGlobalVariableWithBinding(const SPIRVType *VarType, uint32_t Set, uint32_t Binding, StringRef Name, MachineIRBuilder &MIRBuilder)
SPIRVType * getOrCreateSPIRVType(const Type *Type, MachineInstr &I, SPIRV::AccessQualifier::AccessQualifier AQ, bool EmitIR)
unsigned getPointerSize() const
SPIRVType * getOrCreateSPIRVPointerType(const Type *BaseType, MachineIRBuilder &MIRBuilder, SPIRV::StorageClass::StorageClass SC)
Register buildConstantFP(APFloat Val, MachineIRBuilder &MIRBuilder, SPIRVType *SpvType=nullptr)
SPIRVType * getPointeeType(SPIRVType *PtrType)
void invalidateMachineInstr(MachineInstr *MI)
Register getSPIRVTypeID(const SPIRVType *SpirvType) const
bool isScalarOfType(Register VReg, unsigned TypeOpcode) const
bool findValueAttrs(const MachineInstr *Key, Type *&Ty, StringRef &Name)
void addGlobalObject(const Value *V, const MachineFunction *MF, Register R)
SPIRVType * getScalarOrVectorComponentType(Register VReg) const
void recordFunctionPointer(const MachineOperand *MO, const Function *F)
bool isAggregateType(SPIRVType *Type) const
const TargetRegisterClass * getRegClass(SPIRVType *SpvType) const
SPIRVType * getOrCreateSPIRVVectorType(SPIRVType *BaseType, unsigned NumElements, MachineIRBuilder &MIRBuilder, bool EmitIR)
bool isScalarOrVectorOfType(Register VReg, unsigned TypeOpcode) const
Register getOrCreateConstIntArray(uint64_t Val, size_t Num, MachineInstr &I, SPIRVType *SpvType, const SPIRVInstrInfo &TII)
MachineFunction * setCurrentFunc(MachineFunction &MF)
Register getOrCreateConstVector(uint64_t Val, MachineInstr &I, SPIRVType *SpvType, const SPIRVInstrInfo &TII, bool ZeroAsNull=true)
SPIRVType * getOrCreateSPIRVIntegerType(unsigned BitWidth, MachineIRBuilder &MIRBuilder)
Type * getDeducedGlobalValueType(const GlobalValue *Global)
LLT getRegType(SPIRVType *SpvType) const
SPIRV::StorageClass::StorageClass getPointerStorageClass(Register VReg) const
Register getOrCreateConstFP(APFloat Val, MachineInstr &I, SPIRVType *SpvType, const SPIRVInstrInfo &TII, bool ZeroAsNull=true)
Register getOrCreateConstNullPtr(MachineIRBuilder &MIRBuilder, SPIRVType *SpvType)
unsigned getScalarOrVectorBitWidth(const SPIRVType *Type) const
const SPIRVType * retrieveScalarOrVectorIntType(const SPIRVType *Type) const
bool erase(const MachineInstr *MI)
bool add(SPIRV::IRHandle Handle, const MachineInstr *MI)
Register find(SPIRV::IRHandle Handle, const MachineFunction *MF)
bool isPhysicalSPIRV() const
bool isAtLeastSPIRVVer(VersionTuple VerToCompareTo) const
bool canUseExtInstSet(SPIRV::InstructionSet::InstructionSet E) const
bool isLogicalSPIRV() const
bool canUseExtension(SPIRV::Extension::Extension E) const
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
bool contains(ConstPtrType Ptr) const
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
reference emplace_back(ArgTypes &&... Args)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
constexpr size_t size() const
size - Get the string size.
static LLVM_ABI StructType * get(LLVMContext &Context, ArrayRef< Type * > Elements, bool isPacked=false)
This static method is the primary way to create a literal StructType.
The instances of the Type class are immutable: once they are created, they are never changed.
@ HalfTyID
16-bit floating point type
@ FloatTyID
32-bit floating point type
@ DoubleTyID
64-bit floating point type
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
bool isStructTy() const
True if this is an instance of StructType.
TypeID getTypeID() const
Return the type id for the type.
Value * getOperand(unsigned i) const
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
LLVM_C_ABI LLVMTypeRef LLVMIntType(unsigned NumBits)
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char IsConst[]
Key for Kernel::Arg::Metadata::mIsConst.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Scope
Defines the scope in which this symbol should be visible: Default – Visible in the public interface o...
NodeAddr< DefNode * > Def
NodeAddr< InstrNode * > Instr
NodeAddr< UseNode * > Use
This is an optimization pass for GlobalISel generic memory operations.
void buildOpName(Register Target, const StringRef &Name, MachineIRBuilder &MIRBuilder)
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
int64_t getIConstValSext(Register ConstReg, const MachineRegisterInfo *MRI)
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
bool isTypeFoldingSupported(unsigned Opcode)
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
void addNumImm(const APInt &Imm, MachineInstrBuilder &MIB)
LLVM_ABI void salvageDebugInfo(const MachineRegisterInfo &MRI, MachineInstr &MI)
Assuming the instruction MI is going to be deleted, attempt to salvage debug users of MI by writing t...
LLVM_ABI bool constrainSelectedInstRegOperands(MachineInstr &I, const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
Mutate the newly-selected instruction I to constrain its (possibly generic) virtual register operands...
bool isPreISelGenericOpcode(unsigned Opcode)
Check whether the given Opcode is a generic opcode that is not supposed to appear after ISel.
unsigned getArrayComponentCount(const MachineRegisterInfo *MRI, const MachineInstr *ResType)
uint64_t getIConstVal(Register ConstReg, const MachineRegisterInfo *MRI)
SmallVector< MachineInstr *, 4 > createContinuedInstructions(MachineIRBuilder &MIRBuilder, unsigned Opcode, unsigned MinWC, unsigned ContinuedOpcode, ArrayRef< Register > Args, Register ReturnRegister, Register TypeID)
SPIRV::MemorySemantics::MemorySemantics getMemSemanticsForStorageClass(SPIRV::StorageClass::StorageClass SC)
constexpr unsigned storageClassToAddressSpace(SPIRV::StorageClass::StorageClass SC)
MachineBasicBlock::iterator getFirstValidInstructionInsertPoint(MachineBasicBlock &BB)
void buildOpDecorate(Register Reg, MachineIRBuilder &MIRBuilder, SPIRV::Decoration::Decoration Dec, const std::vector< uint32_t > &DecArgs, StringRef StrImm)
MachineBasicBlock::iterator getOpVariableMBBIt(MachineInstr &I)
Register createVirtualRegister(SPIRVType *SpvType, SPIRVGlobalRegistry *GR, MachineRegisterInfo *MRI, const MachineFunction &MF)
MachineInstr * getImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
Type * toTypedPointer(Type *Ty)
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
const MachineInstr SPIRVType
constexpr bool isGenericCastablePtr(SPIRV::StorageClass::StorageClass SC)
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
MachineInstr * passCopy(MachineInstr *Def, const MachineRegisterInfo *MRI)
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
std::optional< SPIRV::LinkageType::LinkageType > getSpirvLinkageTypeFor(const SPIRVSubtarget &ST, const GlobalValue &GV)
LLVM_ABI raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
SPIRV::StorageClass::StorageClass addressSpaceToStorageClass(unsigned AddrSpace, const SPIRVSubtarget &STI)
AtomicOrdering
Atomic ordering for LLVM's memory model.
SPIRV::Scope::Scope getMemScope(LLVMContext &Ctx, SyncScope::ID Id)
InstructionSelector * createSPIRVInstructionSelector(const SPIRVTargetMachine &TM, const SPIRVSubtarget &Subtarget, const RegisterBankInfo &RBI)
std::string getStringValueFromReg(Register Reg, MachineRegisterInfo &MRI)
int64_t foldImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
DWARFExpression::Operation Op
constexpr unsigned BitWidth
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
bool hasInitializer(const GlobalVariable *GV)
MachineInstr * getVRegDef(MachineRegisterInfo &MRI, Register Reg)
SPIRV::MemorySemantics::MemorySemantics getMemSemantics(AtomicOrdering Ord)
std::string getLinkStringForBuiltIn(SPIRV::BuiltIn::BuiltIn BuiltInValue)
LLVM_ABI bool isTriviallyDead(const MachineInstr &MI, const MachineRegisterInfo &MRI)
Check whether an instruction MI is dead: it only defines dead virtual registers, and doesn't have oth...