26#include "llvm/IR/IntrinsicsSPIRV.h"
38 for (
unsigned WordIndex = 0; WordIndex < 4; ++WordIndex) {
39 unsigned StrIndex = i + WordIndex;
41 if (StrIndex < Str.size()) {
42 CharToAdd = Str[StrIndex];
44 Word |= (CharToAdd << (WordIndex * 8));
51 return (Str.size() + 4) & ~3;
56 for (
unsigned i = 0; i < PaddedLen; i += 4) {
64 for (
unsigned i = 0; i < PaddedLen; i += 4) {
71 std::vector<Value *> &Args) {
73 for (
unsigned i = 0; i < PaddedLen; i += 4) {
85 assert(Def && Def->getOpcode() == TargetOpcode::G_GLOBAL_VALUE &&
86 "Expected G_GLOBAL_VALUE");
87 const GlobalValue *GV = Def->getOperand(1).getGlobal();
94 const auto Bitwidth = Imm.getBitWidth();
97 else if (Bitwidth <= 32) {
98 MIB.
addImm(Imm.getZExtValue());
103 }
else if (Bitwidth <= 64) {
104 uint64_t FullImm = Imm.getZExtValue();
105 uint32_t LowBits = FullImm & 0xffffffff;
106 uint32_t HighBits = (FullImm >> 32) & 0xffffffff;
127 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpName))
134 const std::vector<uint32_t> &DecArgs,
138 for (
const auto &DecArg : DecArgs)
143 SPIRV::Decoration::Decoration Dec,
144 const std::vector<uint32_t> &DecArgs,
StringRef StrImm) {
145 auto MIB = MIRBuilder.
buildInstr(SPIRV::OpDecorate)
152 SPIRV::Decoration::Decoration Dec,
153 const std::vector<uint32_t> &DecArgs,
StringRef StrImm) {
155 auto MIB =
BuildMI(
MBB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpDecorate))
162 SPIRV::Decoration::Decoration Dec,
uint32_t Member,
163 const std::vector<uint32_t> &DecArgs,
165 auto MIB = MIRBuilder.
buildInstr(SPIRV::OpMemberDecorate)
174 SPIRV::Decoration::Decoration Dec,
uint32_t Member,
175 const std::vector<uint32_t> &DecArgs,
178 auto MIB =
BuildMI(
MBB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpMemberDecorate))
191 if (OpMD->getNumOperands() == 0)
197 "element of the decoration");
207 static_cast<uint32_t>(SPIRV::Decoration::NoContraction) ||
209 static_cast<uint32_t>(SPIRV::Decoration::FPFastMathMode)) {
212 auto MIB = MIRBuilder.
buildInstr(SPIRV::OpDecorate)
215 for (
unsigned OpI = 1, OpE = OpMD->getNumOperands(); OpI != OpE; ++OpI) {
218 MIB.addImm(
static_cast<uint32_t>(OpV->getZExtValue()));
232 bool IsHeader =
false;
234 for (; It !=
E && It !=
I; ++It) {
235 Opcode = It->getOpcode();
236 if (Opcode == SPIRV::OpFunction || Opcode == SPIRV::OpFunctionParameter) {
238 }
else if (IsHeader &&
239 !(Opcode == SPIRV::ASSIGN_TYPE || Opcode == SPIRV::OpLabel)) {
249 if (
I ==
MBB->begin())
252 while (
I->isTerminator() ||
I->isDebugValue()) {
253 if (
I ==
MBB->begin())
260SPIRV::StorageClass::StorageClass
264 return SPIRV::StorageClass::Function;
266 return SPIRV::StorageClass::CrossWorkgroup;
268 return SPIRV::StorageClass::UniformConstant;
270 return SPIRV::StorageClass::Workgroup;
272 return SPIRV::StorageClass::Generic;
274 return STI.
canUseExtension(SPIRV::Extension::SPV_INTEL_usm_storage_classes)
275 ? SPIRV::StorageClass::DeviceOnlyINTEL
276 : SPIRV::StorageClass::CrossWorkgroup;
278 return STI.
canUseExtension(SPIRV::Extension::SPV_INTEL_usm_storage_classes)
279 ? SPIRV::StorageClass::HostOnlyINTEL
280 : SPIRV::StorageClass::CrossWorkgroup;
282 return SPIRV::StorageClass::Input;
284 return SPIRV::StorageClass::Output;
286 return SPIRV::StorageClass::CodeSectionINTEL;
288 return SPIRV::StorageClass::Private;
290 return SPIRV::StorageClass::StorageBuffer;
292 return SPIRV::StorageClass::Uniform;
298SPIRV::MemorySemantics::MemorySemantics
301 case SPIRV::StorageClass::StorageBuffer:
302 case SPIRV::StorageClass::Uniform:
303 return SPIRV::MemorySemantics::UniformMemory;
304 case SPIRV::StorageClass::Workgroup:
305 return SPIRV::MemorySemantics::WorkgroupMemory;
306 case SPIRV::StorageClass::CrossWorkgroup:
307 return SPIRV::MemorySemantics::CrossWorkgroupMemory;
308 case SPIRV::StorageClass::AtomicCounter:
309 return SPIRV::MemorySemantics::AtomicCounterMemory;
310 case SPIRV::StorageClass::Image:
311 return SPIRV::MemorySemantics::ImageMemory;
313 return SPIRV::MemorySemantics::None;
320 return SPIRV::MemorySemantics::Acquire;
322 return SPIRV::MemorySemantics::Release;
324 return SPIRV::MemorySemantics::AcquireRelease;
326 return SPIRV::MemorySemantics::SequentiallyConsistent;
330 return SPIRV::MemorySemantics::None;
342 Ctx.getOrInsertSyncScopeID(
"subgroup");
344 Ctx.getOrInsertSyncScopeID(
"workgroup");
346 Ctx.getOrInsertSyncScopeID(
"device");
349 return SPIRV::Scope::Invocation;
351 return SPIRV::Scope::CrossDevice;
352 else if (Id == SubGroup)
353 return SPIRV::Scope::Subgroup;
354 else if (Id == WorkGroup)
355 return SPIRV::Scope::Workgroup;
356 else if (Id == Device)
357 return SPIRV::Scope::Device;
358 return SPIRV::Scope::CrossDevice;
365 MI->getOpcode() == SPIRV::G_TRUNC ||
MI->getOpcode() == SPIRV::G_ZEXT
366 ?
MRI->getVRegDef(
MI->getOperand(1).getReg())
369 if (GI->is(Intrinsic::spv_track_constant)) {
371 return MRI->getVRegDef(ConstReg);
373 }
else if (ConstInstr->
getOpcode() == SPIRV::ASSIGN_TYPE) {
375 return MRI->getVRegDef(ConstReg);
376 }
else if (ConstInstr->
getOpcode() == TargetOpcode::G_CONSTANT ||
377 ConstInstr->
getOpcode() == TargetOpcode::G_FCONSTANT) {
381 return MRI->getVRegDef(ConstReg);
386 assert(
MI &&
MI->getOpcode() == TargetOpcode::G_CONSTANT);
387 return MI->getOperand(1).getCImm()->getValue().getZExtValue();
392 assert(
MI &&
MI->getOpcode() == TargetOpcode::G_CONSTANT);
393 return MI->getOperand(1).getCImm()->getSExtValue();
398 return GI->is(IntrinsicID);
410 return MangledName ==
"write_pipe_2" || MangledName ==
"read_pipe_2" ||
411 MangledName ==
"write_pipe_2_bl" || MangledName ==
"read_pipe_2_bl" ||
412 MangledName ==
"write_pipe_4" || MangledName ==
"read_pipe_4" ||
413 MangledName ==
"reserve_write_pipe" ||
414 MangledName ==
"reserve_read_pipe" ||
415 MangledName ==
"commit_write_pipe" ||
416 MangledName ==
"commit_read_pipe" ||
417 MangledName ==
"work_group_reserve_write_pipe" ||
418 MangledName ==
"work_group_reserve_read_pipe" ||
419 MangledName ==
"work_group_commit_write_pipe" ||
420 MangledName ==
"work_group_commit_read_pipe" ||
421 MangledName ==
"get_pipe_num_packets_ro" ||
422 MangledName ==
"get_pipe_max_packets_ro" ||
423 MangledName ==
"get_pipe_num_packets_wo" ||
424 MangledName ==
"get_pipe_max_packets_wo" ||
425 MangledName ==
"sub_group_reserve_write_pipe" ||
426 MangledName ==
"sub_group_reserve_read_pipe" ||
427 MangledName ==
"sub_group_commit_write_pipe" ||
428 MangledName ==
"sub_group_commit_read_pipe" ||
429 MangledName ==
"to_global" || MangledName ==
"to_local" ||
430 MangledName ==
"to_private";
434 return MangledName ==
"__enqueue_kernel_basic" ||
435 MangledName ==
"__enqueue_kernel_basic_events" ||
436 MangledName ==
"__enqueue_kernel_varargs" ||
437 MangledName ==
"__enqueue_kernel_events_varargs";
441 return MangledName ==
"__get_kernel_work_group_size_impl" ||
442 MangledName ==
"__get_kernel_sub_group_count_for_ndrange_impl" ||
443 MangledName ==
"__get_kernel_max_sub_group_size_for_ndrange_impl" ||
444 MangledName ==
"__get_kernel_preferred_work_group_size_multiple_impl";
448 if (!Name.starts_with(
"__"))
453 Name ==
"__translate_sampler_initializer";
458 bool IsNonMangledSPIRV = Name.starts_with(
"__spirv_");
459 bool IsNonMangledHLSL = Name.starts_with(
"__hlsl_");
460 bool IsMangled = Name.starts_with(
"_Z");
463 if (IsNonMangledOCL || IsNonMangledSPIRV || IsNonMangledHLSL || !IsMangled)
468 std::string Result = DemangledName;
477 size_t Start, Len = 0;
478 size_t DemangledNameLenStart = 2;
479 if (Name.starts_with(
"_ZN")) {
481 size_t NameSpaceStart = Name.find_first_not_of(
"rVKRO", 3);
483 if (Name.substr(NameSpaceStart, 11) !=
"2cl7__spirv")
484 return std::string();
485 DemangledNameLenStart = NameSpaceStart + 11;
487 Start = Name.find_first_not_of(
"0123456789", DemangledNameLenStart);
488 [[maybe_unused]]
bool Error =
489 Name.substr(DemangledNameLenStart, Start - DemangledNameLenStart)
490 .getAsInteger(10, Len);
491 assert(!
Error &&
"Failed to parse demangled name length");
492 return Name.substr(Start, Len).str();
496 if (Name.starts_with(
"opencl.") || Name.starts_with(
"ocl_") ||
497 Name.starts_with(
"spirv."))
519 if (
F.getFnAttribute(
"hlsl.shader").isValid())
526 TypeName.consume_front(
"atomic_");
527 if (TypeName.consume_front(
"void"))
529 else if (TypeName.consume_front(
"bool") || TypeName.consume_front(
"_Bool"))
531 else if (TypeName.consume_front(
"char") ||
532 TypeName.consume_front(
"signed char") ||
533 TypeName.consume_front(
"unsigned char") ||
534 TypeName.consume_front(
"uchar"))
536 else if (TypeName.consume_front(
"short") ||
537 TypeName.consume_front(
"signed short") ||
538 TypeName.consume_front(
"unsigned short") ||
539 TypeName.consume_front(
"ushort"))
541 else if (TypeName.consume_front(
"int") ||
542 TypeName.consume_front(
"signed int") ||
543 TypeName.consume_front(
"unsigned int") ||
544 TypeName.consume_front(
"uint"))
546 else if (TypeName.consume_front(
"long") ||
547 TypeName.consume_front(
"signed long") ||
548 TypeName.consume_front(
"unsigned long") ||
549 TypeName.consume_front(
"ulong"))
551 else if (TypeName.consume_front(
"half") ||
552 TypeName.consume_front(
"_Float16") ||
553 TypeName.consume_front(
"__fp16"))
555 else if (TypeName.consume_front(
"float"))
557 else if (TypeName.consume_front(
"double"))
564std::unordered_set<BasicBlock *>
565PartialOrderingVisitor::getReachableFrom(BasicBlock *Start) {
566 std::queue<BasicBlock *> ToVisit;
569 std::unordered_set<BasicBlock *> Output;
570 while (ToVisit.size() != 0) {
571 BasicBlock *BB = ToVisit.front();
574 if (Output.count(BB) != 0)
588bool PartialOrderingVisitor::CanBeVisited(
BasicBlock *BB)
const {
591 if (DT.dominates(BB,
P))
595 if (BlockToOrder.count(
P) == 0)
600 Loop *
L = LI.getLoopFor(
P);
601 if (L ==
nullptr ||
L->contains(BB))
607 assert(
L->getNumBackEdges() <= 1);
613 if (Latch ==
nullptr)
617 if (BlockToOrder.count(Latch) == 0)
625 auto It = BlockToOrder.find(BB);
626 if (It != BlockToOrder.end())
627 return It->second.Rank;
632 if (DT.dominates(BB,
P))
635 auto Iterator = BlockToOrder.end();
636 Loop *L = LI.getLoopFor(
P);
637 BasicBlock *Latch = L ? L->getLoopLatch() :
nullptr;
641 if (L ==
nullptr || L->contains(BB) || Latch ==
nullptr) {
642 Iterator = BlockToOrder.find(
P);
647 Iterator = BlockToOrder.find(Latch);
650 assert(Iterator != BlockToOrder.end());
651 result = std::max(result, Iterator->second.Rank + 1);
657size_t PartialOrderingVisitor::visit(
BasicBlock *BB,
size_t Unused) {
661 size_t QueueIndex = 0;
662 while (ToVisit.size() != 0) {
666 if (!CanBeVisited(BB)) {
668 if (QueueIndex >= ToVisit.size())
670 "No valid candidate in the queue. Is the graph reducible?");
677 OrderInfo
Info = {Rank, BlockToOrder.size()};
678 BlockToOrder.emplace(BB,
Info);
681 if (Queued.count(S) != 0)
695 visit(&*
F.begin(), 0);
697 Order.reserve(
F.size());
698 for (
auto &[BB,
Info] : BlockToOrder)
699 Order.emplace_back(BB);
701 std::sort(Order.begin(), Order.end(), [&](
const auto &
LHS,
const auto &
RHS) {
702 return compare(LHS, RHS);
708 const OrderInfo &InfoLHS = BlockToOrder.at(
const_cast<BasicBlock *
>(
LHS));
709 const OrderInfo &InfoRHS = BlockToOrder.at(
const_cast<BasicBlock *
>(
RHS));
710 if (InfoLHS.Rank != InfoRHS.Rank)
711 return InfoLHS.Rank < InfoRHS.Rank;
712 return InfoLHS.TraversalIndex < InfoRHS.TraversalIndex;
717 std::unordered_set<BasicBlock *> Reachable = getReachableFrom(&Start);
718 assert(BlockToOrder.count(&Start) != 0);
721 auto It = Order.begin();
722 while (It != Order.end() && *It != &Start)
727 assert(It != Order.end());
730 std::optional<size_t> EndRank = std::nullopt;
731 for (; It != Order.end(); ++It) {
732 if (EndRank.has_value() && BlockToOrder[*It].Rank > *EndRank)
735 if (Reachable.count(*It) == 0) {
740 EndRank = BlockToOrder[*It].Rank;
750 std::vector<BasicBlock *> Order;
751 Order.reserve(
F.size());
756 assert(&*
F.begin() == Order[0]);
759 if (BB != LastBlock && &*LastBlock->
getNextNode() != BB) {
771 if (MaybeDef && MaybeDef->
getOpcode() == SPIRV::ASSIGN_TYPE)
779 constexpr unsigned MaxIters = 1024;
780 for (
unsigned I = 0;
I < MaxIters; ++
I) {
781 std::string OrdName = Name +
Twine(
I).
str();
782 if (!M.getFunction(OrdName)) {
783 Name = std::move(OrdName);
796 if (!
MRI->getRegClassOrNull(
Reg) || Force) {
807 SPIRV::AccessQualifier::AccessQualifier AccessQual,
808 bool EmitIR,
bool Force) {
811 GR, MIRBuilder.
getMRI(), MIRBuilder.
getMF(), Force);
837 SPIRV::AccessQualifier::AccessQualifier AccessQual,
bool EmitIR) {
847 Args.push_back(Arg2);
850 return B.CreateIntrinsic(IntrID, {Types}, Args);
855 if (Ty->isPtrOrPtrVectorTy())
860 for (
const Type *ArgTy : RefTy->params())
873 if (
F->getName().starts_with(
"llvm.spv."))
880SmallVector<MachineInstr *, 4>
882 unsigned MinWC,
unsigned ContinuedOpcode,
887 constexpr unsigned MaxWordCount = UINT16_MAX;
888 const size_t NumElements = Args.size();
889 size_t MaxNumElements = MaxWordCount - MinWC;
890 size_t SPIRVStructNumElements = NumElements;
892 if (NumElements > MaxNumElements) {
895 SPIRVStructNumElements = MaxNumElements;
896 MaxNumElements = MaxWordCount - 1;
902 for (
size_t I = 0;
I < SPIRVStructNumElements; ++
I)
905 Instructions.push_back(MIB.getInstr());
907 for (
size_t I = SPIRVStructNumElements;
I < NumElements;
908 I += MaxNumElements) {
909 auto MIB = MIRBuilder.
buildInstr(ContinuedOpcode);
910 for (
size_t J =
I; J < std::min(
I + MaxNumElements, NumElements); ++J)
912 Instructions.push_back(MIB.getInstr());
918 unsigned LC = SPIRV::LoopControl::None;
922 std::vector<std::pair<unsigned, unsigned>> MaskToValueMap;
924 LC |= SPIRV::LoopControl::DontUnroll;
928 LC |= SPIRV::LoopControl::Unroll;
930 std::optional<int>
Count =
933 LC |= SPIRV::LoopControl::PartialCount;
934 MaskToValueMap.emplace_back(
935 std::make_pair(SPIRV::LoopControl::PartialCount, *
Count));
939 for (
auto &[Mask, Val] : MaskToValueMap)
940 Result.push_back(Val);
946 static const std::set<unsigned> TypeFoldingSupportingOpcs = {
948 TargetOpcode::G_FADD,
949 TargetOpcode::G_STRICT_FADD,
951 TargetOpcode::G_FSUB,
952 TargetOpcode::G_STRICT_FSUB,
954 TargetOpcode::G_FMUL,
955 TargetOpcode::G_STRICT_FMUL,
956 TargetOpcode::G_SDIV,
957 TargetOpcode::G_UDIV,
958 TargetOpcode::G_FDIV,
959 TargetOpcode::G_STRICT_FDIV,
960 TargetOpcode::G_SREM,
961 TargetOpcode::G_UREM,
962 TargetOpcode::G_FREM,
963 TargetOpcode::G_STRICT_FREM,
964 TargetOpcode::G_FNEG,
965 TargetOpcode::G_CONSTANT,
966 TargetOpcode::G_FCONSTANT,
971 TargetOpcode::G_ASHR,
972 TargetOpcode::G_LSHR,
973 TargetOpcode::G_SELECT,
974 TargetOpcode::G_EXTRACT_VECTOR_ELT,
977 return TypeFoldingSupportingOpcs;
986 return (Def->getOpcode() == SPIRV::ASSIGN_TYPE ||
987 Def->getOpcode() == TargetOpcode::COPY)
988 ?
MRI->getVRegDef(Def->getOperand(1).getReg())
1000 if (Def->getOpcode() == TargetOpcode::G_CONSTANT ||
1001 Def->getOpcode() == SPIRV::OpConstantI)
1009 if (Def->getOpcode() == SPIRV::OpConstantI)
1010 return Def->getOperand(2).getImm();
1011 if (Def->getOpcode() == TargetOpcode::G_CONSTANT)
1012 return Def->getOperand(1).getCImm()->getZExtValue();
1028 while (VarPos != BB.
end() && VarPos->getOpcode() != SPIRV::OpFunction) {
1035 while (VarPos != BB.
end() &&
1036 VarPos->getOpcode() == SPIRV::OpFunctionParameter) {
1041 return VarPos != BB.
end() && VarPos->getOpcode() == SPIRV::OpLabel ? ++VarPos
1045std::optional<SPIRV::LinkageType::LinkageType>
1048 return std::nullopt;
1051 return SPIRV::LinkageType::Import;
1054 ST.canUseExtension(SPIRV::Extension::SPV_KHR_linkonce_odr))
1055 return SPIRV::LinkageType::LinkOnceODR;
1057 return SPIRV::LinkageType::Export;
unsigned const MachineRegisterInfo * MRI
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
Analysis containing CSE Info
Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...
const HexagonInstrInfo * TII
This file declares the MachineIRBuilder class.
uint64_t IntrinsicInst * II
Class for arbitrary precision integers.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Class to represent array types.
LLVM Basic Block Representation.
LLVM_ABI void moveAfter(BasicBlock *MovePos)
Unlink this basic block from its current function and insert it right after MovePos in the function M...
const Instruction & front() const
This class represents a function call, abstracting a target machine's calling convention.
An array constant whose element type is a simple 1/2/4/8-byte integer or float/double,...
StringRef getAsCString() const
If this array is isCString(), then this method returns the array (without the trailing null byte) as ...
This is the shared class of boolean and integer constants.
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
bool dominates(const DomTreeNodeBase< NodeT > *A, const DomTreeNodeBase< NodeT > *B) const
dominates - Returns true iff A dominates B.
Lightweight error class with error context and mandatory checking.
Class to represent function types.
bool hasLocalLinkage() const
bool hasHiddenVisibility() const
bool isDeclarationForLinker() const
bool hasLinkOnceODRLinkage() const
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
This is an important class for using LLVM in a threaded context.
Represents a single loop in the control flow graph.
Instances of this class represent a single low-level machine instruction.
void addOperand(const MCOperand Op)
static MCOperand createImm(int64_t Val)
const MDOperand & getOperand(unsigned I) const
unsigned getNumOperands() const
Return number of MDNode operands.
MachineInstrBundleIterator< MachineInstr > iterator
const MachineBasicBlock & front() const
Helper class to build MachineInstr.
MachineInstrBuilder buildInstr(unsigned Opcode)
Build and insert <empty> = Opcode <empty>.
MachineFunction & getMF()
Getter for the function we currently build.
MachineRegisterInfo * getMRI()
Getter for MRI.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
MachineInstr * getInstr() const
If conversion operators fail, use this method to get the MachineInstr explicitly.
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
void setAsmPrinterFlag(uint8_t Flag)
Set a flag for the AsmPrinter.
const MachineOperand & getOperand(unsigned i) const
MachineOperand class - Representation of each machine instruction operand.
Register getReg() const
getReg - Returns the register number.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
A Module instance is used to store all the information related to an LLVM module.
size_t GetNodeRank(BasicBlock *BB) const
void partialOrderVisit(BasicBlock &Start, std::function< bool(BasicBlock *)> Op)
bool compare(const BasicBlock *LHS, const BasicBlock *RHS) const
PartialOrderingVisitor(Function &F)
Wrapper class representing virtual and physical registers.
void assignSPIRVTypeToVReg(SPIRVType *Type, Register VReg, const MachineFunction &MF)
SPIRVType * getOrCreateSPIRVType(const Type *Type, MachineInstr &I, SPIRV::AccessQualifier::AccessQualifier AQ, bool EmitIR)
const TargetRegisterClass * getRegClass(SPIRVType *SpvType) const
LLT getRegType(SPIRVType *SpvType) const
bool canUseExtension(SPIRV::Extension::Extension E) const
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
std::string str() const
str - Get the contents as an std::string.
constexpr bool empty() const
empty - Check if the string is empty.
Class to represent target extensions types, which are generally unintrospectable from target-independ...
Target - Wrapper for Target specific information.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
LLVM_ABI std::string str() const
Return the twine contents as a std::string.
The instances of the Type class are immutable: once they are created, they are never changed.
static LLVM_ABI IntegerType * getInt64Ty(LLVMContext &C)
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
static LLVM_ABI Type * getVoidTy(LLVMContext &C)
static LLVM_ABI IntegerType * getInt8Ty(LLVMContext &C)
static LLVM_ABI IntegerType * getInt16Ty(LLVMContext &C)
static LLVM_ABI IntegerType * getIntNTy(LLVMContext &C, unsigned N)
static LLVM_ABI Type * getDoubleTy(LLVMContext &C)
static LLVM_ABI Type * getFloatTy(LLVMContext &C)
static LLVM_ABI Type * getHalfTy(LLVMContext &C)
Value * getOperand(unsigned i) const
LLVM Value Representation.
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ SPIR_KERNEL
Used for SPIR kernel functions.
@ BasicBlock
Various leaf nodes.
@ SingleThread
Synchronized with respect to signal handlers executing in the same thread.
@ System
Synchronized with respect to all concurrently executing threads.
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > dyn_extract(Y &&MD)
Extract a Value from Metadata, if any.
This is an optimization pass for GlobalISel generic memory operations.
void buildOpName(Register Target, const StringRef &Name, MachineIRBuilder &MIRBuilder)
bool getVacantFunctionName(Module &M, std::string &Name)
std::string getStringImm(const MachineInstr &MI, unsigned StartIndex)
LLVM_ABI bool getBooleanLoopAttribute(const Loop *TheLoop, StringRef Name)
Returns true if Name is applied to TheLoop and enabled.
int64_t getIConstValSext(Register ConstReg, const MachineRegisterInfo *MRI)
bool isTypedPointerWrapper(const TargetExtType *ExtTy)
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
static void finishBuildOpDecorate(MachineInstrBuilder &MIB, const std::vector< uint32_t > &DecArgs, StringRef StrImm)
bool isTypeFoldingSupported(unsigned Opcode)
static uint32_t convertCharsToWord(const StringRef &Str, unsigned i)
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
MachineInstr * getDef(const MachineOperand &MO, const MachineRegisterInfo *MRI)
void addNumImm(const APInt &Imm, MachineInstrBuilder &MIB)
auto successors(const MachineBasicBlock *BB)
CallInst * buildIntrWithMD(Intrinsic::ID IntrID, ArrayRef< Type * > Types, Value *Arg, Value *Arg2, ArrayRef< Constant * > Imms, IRBuilder<> &B)
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
unsigned getArrayComponentCount(const MachineRegisterInfo *MRI, const MachineInstr *ResType)
bool sortBlocks(Function &F)
SmallVector< unsigned, 1 > getSpirvLoopControlOperandsFromLoopMetadata(Loop *L)
uint64_t getIConstVal(Register ConstReg, const MachineRegisterInfo *MRI)
SmallVector< MachineInstr *, 4 > createContinuedInstructions(MachineIRBuilder &MIRBuilder, unsigned Opcode, unsigned MinWC, unsigned ContinuedOpcode, ArrayRef< Register > Args, Register ReturnRegister, Register TypeID)
SPIRV::MemorySemantics::MemorySemantics getMemSemanticsForStorageClass(SPIRV::StorageClass::StorageClass SC)
MachineBasicBlock::iterator getFirstValidInstructionInsertPoint(MachineBasicBlock &BB)
bool isNestedPointer(const Type *Ty)
MetadataAsValue * buildMD(Value *Arg)
std::string getOclOrSpirvBuiltinDemangledName(StringRef Name)
void buildOpDecorate(Register Reg, MachineIRBuilder &MIRBuilder, SPIRV::Decoration::Decoration Dec, const std::vector< uint32_t > &DecArgs, StringRef StrImm)
MachineBasicBlock::iterator getOpVariableMBBIt(MachineInstr &I)
Register createVirtualRegister(SPIRVType *SpvType, SPIRVGlobalRegistry *GR, MachineRegisterInfo *MRI, const MachineFunction &MF)
MachineInstr * getImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
std::string getSPIRVStringOperand(const InstType &MI, unsigned StartIndex)
void buildOpMemberDecorate(Register Reg, MachineIRBuilder &MIRBuilder, SPIRV::Decoration::Decoration Dec, uint32_t Member, const std::vector< uint32_t > &DecArgs, StringRef StrImm)
Type * toTypedPointer(Type *Ty)
DEMANGLE_ABI char * itaniumDemangle(std::string_view mangled_name, bool ParseParams=true)
Returns a non-NULL pointer to a NUL-terminated C style string that should be explicitly freed,...
bool isSpecialOpaqueType(const Type *Ty)
void setRegClassType(Register Reg, SPIRVType *SpvType, SPIRVGlobalRegistry *GR, MachineRegisterInfo *MRI, const MachineFunction &MF, bool Force)
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
MachineBasicBlock::iterator getInsertPtValidEnd(MachineBasicBlock *MBB)
FunctionAddr VTableAddr Count
const MachineInstr SPIRVType
static bool isNonMangledOCLBuiltin(StringRef Name)
MachineInstr * passCopy(MachineInstr *Def, const MachineRegisterInfo *MRI)
std::optional< SPIRV::LinkageType::LinkageType > getSpirvLinkageTypeFor(const SPIRVSubtarget &ST, const GlobalValue &GV)
bool isEntryPoint(const Function &F)
const std::set< unsigned > & getTypeFoldingSupportedOpcodes()
SPIRV::StorageClass::StorageClass addressSpaceToStorageClass(unsigned AddrSpace, const SPIRVSubtarget &STI)
LLVM_ABI std::optional< int > getOptionalIntLoopAttribute(const Loop *TheLoop, StringRef Name)
Find named metadata for a loop with an integer value.
AtomicOrdering
Atomic ordering for LLVM's memory model.
SPIRV::Scope::Scope getMemScope(LLVMContext &Ctx, SyncScope::ID Id)
static bool isPipeOrAddressSpaceCastBI(const StringRef MangledName)
void buildOpSpirvDecorations(Register Reg, MachineIRBuilder &MIRBuilder, const MDNode *GVarMD, const SPIRVSubtarget &ST)
std::string getStringValueFromReg(Register Reg, MachineRegisterInfo &MRI)
int64_t foldImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
Type * parseBasicTypeName(StringRef &TypeName, LLVMContext &Ctx)
DWARFExpression::Operation Op
MachineInstr * getDefInstrMaybeConstant(Register &ConstReg, const MachineRegisterInfo *MRI)
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
bool hasBuiltinTypePrefix(StringRef Name)
Type * getMDOperandAsType(const MDNode *N, unsigned I)
auto predecessors(const MachineBasicBlock *BB)
static size_t getPaddedLen(const StringRef &Str)
bool isSpvIntrinsic(const MachineInstr &MI, Intrinsic::ID IntrinsicID)
void addStringImm(const StringRef &Str, MCInst &Inst)
static bool isKernelQueryBI(const StringRef MangledName)
MachineInstr * getVRegDef(MachineRegisterInfo &MRI, Register Reg)
static bool isEnqueueKernelBI(const StringRef MangledName)
SPIRV::MemorySemantics::MemorySemantics getMemSemantics(AtomicOrdering Ord)