LLVM 23.0.0git
SPIRVUtils.cpp
Go to the documentation of this file.
1//===--- SPIRVUtils.cpp ---- SPIR-V Utility Functions -----------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains miscellaneous utility functions.
10//
11//===----------------------------------------------------------------------===//
12
13#include "SPIRVUtils.h"
15#include "SPIRV.h"
16#include "SPIRVGlobalRegistry.h"
17#include "SPIRVInstrInfo.h"
18#include "SPIRVSubtarget.h"
19#include "llvm/ADT/StringRef.h"
26#include "llvm/IR/IntrinsicsSPIRV.h"
27#include <queue>
28#include <vector>
29
30namespace llvm {
31namespace SPIRV {
32// This code restores function args/retvalue types for composite cases
33// because the final types should still be aggregate whereas they're i32
34// during the translation to cope with aggregate flattening etc.
35// TODO: should these just return nullptr when there's no metadata?
37 FunctionType *FTy,
38 StringRef Name) {
39 if (!NMD)
40 return FTy;
41
42 constexpr auto getConstInt = [](MDNode *MD, unsigned OpId) -> ConstantInt * {
43 if (MD->getNumOperands() <= OpId)
44 return nullptr;
45 if (auto *CMeta = dyn_cast<ConstantAsMetadata>(MD->getOperand(OpId)))
46 return dyn_cast<ConstantInt>(CMeta->getValue());
47 return nullptr;
48 };
49
50 auto It = find_if(NMD->operands(), [Name](MDNode *N) {
51 if (auto *MDS = dyn_cast_or_null<MDString>(N->getOperand(0)))
52 return MDS->getString() == Name;
53 return false;
54 });
55
56 if (It == NMD->op_end())
57 return FTy;
58
59 Type *RetTy = FTy->getReturnType();
60 SmallVector<Type *, 4> PTys(FTy->params());
61
62 for (unsigned I = 1; I != (*It)->getNumOperands(); ++I) {
63 MDNode *MD = dyn_cast<MDNode>((*It)->getOperand(I));
64 assert(MD && "MDNode operand is expected");
65
66 if (auto *Const = getConstInt(MD, 0)) {
67 auto *CMeta = dyn_cast<ConstantAsMetadata>(MD->getOperand(1));
68 assert(CMeta && "ConstantAsMetadata operand is expected");
69 assert(Const->getSExtValue() >= -1);
70 // Currently -1 indicates return value, greater values mean
71 // argument numbers.
72 if (Const->getSExtValue() == -1)
73 RetTy = CMeta->getType();
74 else
75 PTys[Const->getSExtValue()] = CMeta->getType();
76 }
77 }
78
79 return FunctionType::get(RetTy, PTys, FTy->isVarArg());
80}
81
84 F.getParent()->getNamedMetadata("spv.cloned_funcs"), F.getFunctionType(),
85 F.getName());
86}
87
90 CB.getModule()->getNamedMetadata("spv.mutated_callsites"),
91 CB.getFunctionType(), CB.getName());
92}
93} // Namespace SPIRV
94
95// The following functions are used to add these string literals as a series of
96// 32-bit integer operands with the correct format, and unpack them if necessary
97// when making string comparisons in compiler passes.
98// SPIR-V requires null-terminated UTF-8 strings padded to 32-bit alignment.
99static uint32_t convertCharsToWord(const StringRef &Str, unsigned i) {
100 uint32_t Word = 0u; // Build up this 32-bit word from 4 8-bit chars.
101 for (unsigned WordIndex = 0; WordIndex < 4; ++WordIndex) {
102 unsigned StrIndex = i + WordIndex;
103 uint8_t CharToAdd = 0; // Initilize char as padding/null.
104 if (StrIndex < Str.size()) { // If it's within the string, get a real char.
105 CharToAdd = Str[StrIndex];
106 }
107 Word |= (CharToAdd << (WordIndex * 8));
108 }
109 return Word;
110}
111
112// Get length including padding and null terminator.
113static size_t getPaddedLen(const StringRef &Str) {
114 return (Str.size() + 4) & ~3;
115}
116
117void addStringImm(const StringRef &Str, MCInst &Inst) {
118 const size_t PaddedLen = getPaddedLen(Str);
119 for (unsigned i = 0; i < PaddedLen; i += 4) {
120 // Add an operand for the 32-bits of chars or padding.
122 }
123}
124
126 const size_t PaddedLen = getPaddedLen(Str);
127 for (unsigned i = 0; i < PaddedLen; i += 4) {
128 // Add an operand for the 32-bits of chars or padding.
129 MIB.addImm(convertCharsToWord(Str, i));
130 }
131}
132
134 std::vector<Value *> &Args) {
135 const size_t PaddedLen = getPaddedLen(Str);
136 for (unsigned i = 0; i < PaddedLen; i += 4) {
137 // Add a vector element for the 32-bits of chars or padding.
138 Args.push_back(B.getInt32(convertCharsToWord(Str, i)));
139 }
140}
141
142std::string getStringImm(const MachineInstr &MI, unsigned StartIndex) {
143 return getSPIRVStringOperand(MI, StartIndex);
144}
145
147 MachineInstr *Def = getVRegDef(MRI, Reg);
148 assert(Def && Def->getOpcode() == TargetOpcode::G_GLOBAL_VALUE &&
149 "Expected G_GLOBAL_VALUE");
150 const GlobalValue *GV = Def->getOperand(1).getGlobal();
151 Value *V = GV->getOperand(0);
153 return CDA->getAsCString().str();
154}
155
156void addNumImm(const APInt &Imm, MachineInstrBuilder &MIB) {
157 const auto Bitwidth = Imm.getBitWidth();
158 if (Bitwidth == 1)
159 return; // Already handled
160 else if (Bitwidth <= 32) {
161 MIB.addImm(Imm.getZExtValue());
162 // Asm Printer needs this info to print floating-type correctly
163 if (Bitwidth == 16)
165 return;
166 } else if (Bitwidth <= 64) {
167 uint64_t FullImm = Imm.getZExtValue();
168 uint32_t LowBits = FullImm & 0xffffffff;
169 uint32_t HighBits = (FullImm >> 32) & 0xffffffff;
170 MIB.addImm(LowBits).addImm(HighBits);
171 // Asm Printer needs this info to print 64-bit operands correctly
173 return;
174 } else {
175 // Emit ceil(Bitwidth / 32) words to conform SPIR-V spec.
176 unsigned NumWords = (Bitwidth + 31) / 32;
177 for (unsigned I = 0; I < NumWords; ++I) {
178 unsigned LimbIdx = I / 2;
179 unsigned LimbShift = (I % 2) * 32;
180 uint32_t Word = (Imm.getRawData()[LimbIdx] >> LimbShift) & 0xffffffff;
181 MIB.addImm(Word);
182 }
183 return;
184 }
185}
186
188 MachineIRBuilder &MIRBuilder) {
189 if (!Name.empty()) {
190 auto MIB = MIRBuilder.buildInstr(SPIRV::OpName).addUse(Target);
191 addStringImm(Name, MIB);
192 }
193}
194
196 const SPIRVInstrInfo &TII) {
197 if (!Name.empty()) {
198 auto MIB =
199 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpName))
200 .addUse(Target);
201 addStringImm(Name, MIB);
202 }
203}
204
206 const std::vector<uint32_t> &DecArgs,
207 StringRef StrImm) {
208 if (!StrImm.empty())
209 addStringImm(StrImm, MIB);
210 for (const auto &DecArg : DecArgs)
211 MIB.addImm(DecArg);
212}
213
215 SPIRV::Decoration::Decoration Dec,
216 const std::vector<uint32_t> &DecArgs, StringRef StrImm) {
217 auto MIB = MIRBuilder.buildInstr(SPIRV::OpDecorate)
218 .addUse(Reg)
219 .addImm(static_cast<uint32_t>(Dec));
220 finishBuildOpDecorate(MIB, DecArgs, StrImm);
221}
222
224 SPIRV::Decoration::Decoration Dec,
225 const std::vector<uint32_t> &DecArgs, StringRef StrImm) {
226 MachineBasicBlock &MBB = *I.getParent();
227 auto MIB = BuildMI(MBB, I, I.getDebugLoc(), TII.get(SPIRV::OpDecorate))
228 .addUse(Reg)
229 .addImm(static_cast<uint32_t>(Dec));
230 finishBuildOpDecorate(MIB, DecArgs, StrImm);
231}
232
234 SPIRV::Decoration::Decoration Dec, uint32_t Member,
235 const std::vector<uint32_t> &DecArgs,
236 StringRef StrImm) {
237 auto MIB = MIRBuilder.buildInstr(SPIRV::OpMemberDecorate)
238 .addUse(Reg)
239 .addImm(Member)
240 .addImm(static_cast<uint32_t>(Dec));
241 finishBuildOpDecorate(MIB, DecArgs, StrImm);
242}
243
245 const SPIRVInstrInfo &TII,
246 SPIRV::Decoration::Decoration Dec, uint32_t Member,
247 const std::vector<uint32_t> &DecArgs,
248 StringRef StrImm) {
249 MachineBasicBlock &MBB = *I.getParent();
250 auto MIB = BuildMI(MBB, I, I.getDebugLoc(), TII.get(SPIRV::OpMemberDecorate))
251 .addUse(Reg)
252 .addImm(Member)
253 .addImm(static_cast<uint32_t>(Dec));
254 finishBuildOpDecorate(MIB, DecArgs, StrImm);
255}
256
258 const MDNode *GVarMD, const SPIRVSubtarget &ST) {
259 for (unsigned I = 0, E = GVarMD->getNumOperands(); I != E; ++I) {
260 auto *OpMD = dyn_cast<MDNode>(GVarMD->getOperand(I));
261 if (!OpMD)
262 report_fatal_error("Invalid decoration");
263 if (OpMD->getNumOperands() == 0)
264 report_fatal_error("Expect operand(s) of the decoration");
265 ConstantInt *DecorationId =
266 mdconst::dyn_extract<ConstantInt>(OpMD->getOperand(0));
267 if (!DecorationId)
268 report_fatal_error("Expect SPIR-V <Decoration> operand to be the first "
269 "element of the decoration");
270
271 // The goal of `spirv.Decorations` metadata is to provide a way to
272 // represent SPIR-V entities that do not map to LLVM in an obvious way.
273 // FP flags do have obvious matches between LLVM IR and SPIR-V.
274 // Additionally, we have no guarantee at this point that the flags passed
275 // through the decoration are not violated already in the optimizer passes.
276 // Therefore, we simply ignore FP flags, including NoContraction, and
277 // FPFastMathMode.
278 if (DecorationId->getZExtValue() ==
279 static_cast<uint32_t>(SPIRV::Decoration::NoContraction) ||
280 DecorationId->getZExtValue() ==
281 static_cast<uint32_t>(SPIRV::Decoration::FPFastMathMode)) {
282 continue; // Ignored.
283 }
284 auto MIB = MIRBuilder.buildInstr(SPIRV::OpDecorate)
285 .addUse(Reg)
286 .addImm(static_cast<uint32_t>(DecorationId->getZExtValue()));
287 for (unsigned OpI = 1, OpE = OpMD->getNumOperands(); OpI != OpE; ++OpI) {
288 if (ConstantInt *OpV =
289 mdconst::dyn_extract<ConstantInt>(OpMD->getOperand(OpI)))
290 MIB.addImm(static_cast<uint32_t>(OpV->getZExtValue()));
291 else if (MDString *OpV = dyn_cast<MDString>(OpMD->getOperand(OpI)))
292 addStringImm(OpV->getString(), MIB);
293 else
294 report_fatal_error("Unexpected operand of the decoration");
295 }
296 }
297}
298
300 MachineFunction *MF = I.getParent()->getParent();
301 MachineBasicBlock *MBB = &MF->front();
302 MachineBasicBlock::iterator It = MBB->SkipPHIsAndLabels(MBB->begin()),
303 E = MBB->end();
304 bool IsHeader = false;
305 unsigned Opcode;
306 for (; It != E && It != I; ++It) {
307 Opcode = It->getOpcode();
308 if (Opcode == SPIRV::OpFunction || Opcode == SPIRV::OpFunctionParameter) {
309 IsHeader = true;
310 } else if (IsHeader &&
311 !(Opcode == SPIRV::ASSIGN_TYPE || Opcode == SPIRV::OpLabel)) {
312 ++It;
313 break;
314 }
315 }
316 return It;
317}
318
321 if (I == MBB->begin())
322 return I;
323 --I;
324 while (I->isTerminator() || I->isDebugValue()) {
325 if (I == MBB->begin())
326 break;
327 --I;
328 }
329 return I;
330}
331
332SPIRV::StorageClass::StorageClass
333addressSpaceToStorageClass(unsigned AddrSpace, const SPIRVSubtarget &STI) {
334 switch (AddrSpace) {
335 case 0:
336 return SPIRV::StorageClass::Function;
337 case 1:
338 return SPIRV::StorageClass::CrossWorkgroup;
339 case 2:
340 return SPIRV::StorageClass::UniformConstant;
341 case 3:
342 return SPIRV::StorageClass::Workgroup;
343 case 4:
344 return SPIRV::StorageClass::Generic;
345 case 5:
346 return STI.canUseExtension(SPIRV::Extension::SPV_INTEL_usm_storage_classes)
347 ? SPIRV::StorageClass::DeviceOnlyINTEL
348 : SPIRV::StorageClass::CrossWorkgroup;
349 case 6:
350 return STI.canUseExtension(SPIRV::Extension::SPV_INTEL_usm_storage_classes)
351 ? SPIRV::StorageClass::HostOnlyINTEL
352 : SPIRV::StorageClass::CrossWorkgroup;
353 case 7:
354 return SPIRV::StorageClass::Input;
355 case 8:
356 return SPIRV::StorageClass::Output;
357 case 9:
358 return SPIRV::StorageClass::CodeSectionINTEL;
359 case 10:
360 return SPIRV::StorageClass::Private;
361 case 11:
362 return SPIRV::StorageClass::StorageBuffer;
363 case 12:
364 return SPIRV::StorageClass::Uniform;
365 case 13:
366 return SPIRV::StorageClass::PushConstant;
367 default:
368 report_fatal_error("Unknown address space");
369 }
370}
371
372SPIRV::MemorySemantics::MemorySemantics
373getMemSemanticsForStorageClass(SPIRV::StorageClass::StorageClass SC) {
374 switch (SC) {
375 case SPIRV::StorageClass::StorageBuffer:
376 case SPIRV::StorageClass::Uniform:
377 return SPIRV::MemorySemantics::UniformMemory;
378 case SPIRV::StorageClass::Workgroup:
379 return SPIRV::MemorySemantics::WorkgroupMemory;
380 case SPIRV::StorageClass::CrossWorkgroup:
381 return SPIRV::MemorySemantics::CrossWorkgroupMemory;
382 case SPIRV::StorageClass::AtomicCounter:
383 return SPIRV::MemorySemantics::AtomicCounterMemory;
384 case SPIRV::StorageClass::Image:
385 return SPIRV::MemorySemantics::ImageMemory;
386 default:
387 return SPIRV::MemorySemantics::None;
388 }
389}
390
391SPIRV::MemorySemantics::MemorySemantics getMemSemantics(AtomicOrdering Ord) {
392 switch (Ord) {
394 return SPIRV::MemorySemantics::Acquire;
396 return SPIRV::MemorySemantics::Release;
398 return SPIRV::MemorySemantics::AcquireRelease;
400 return SPIRV::MemorySemantics::SequentiallyConsistent;
404 return SPIRV::MemorySemantics::None;
405 }
406 llvm_unreachable(nullptr);
407}
408
409SPIRV::Scope::Scope getMemScope(LLVMContext &Ctx, SyncScope::ID Id) {
410 // Named by
411 // https://registry.khronos.org/SPIR-V/specs/unified1/SPIRV.html#_scope_id.
412 // We don't need aliases for Invocation and CrossDevice, as we already have
413 // them covered by "singlethread" and "" strings respectively (see
414 // implementation of LLVMContext::LLVMContext()).
415 static const llvm::SyncScope::ID SubGroup =
416 Ctx.getOrInsertSyncScopeID("subgroup");
417 static const llvm::SyncScope::ID WorkGroup =
418 Ctx.getOrInsertSyncScopeID("workgroup");
419 static const llvm::SyncScope::ID Device =
420 Ctx.getOrInsertSyncScopeID("device");
421
423 return SPIRV::Scope::Invocation;
424 else if (Id == llvm::SyncScope::System)
425 return SPIRV::Scope::CrossDevice;
426 else if (Id == SubGroup)
427 return SPIRV::Scope::Subgroup;
428 else if (Id == WorkGroup)
429 return SPIRV::Scope::Workgroup;
430 else if (Id == Device)
431 return SPIRV::Scope::Device;
432 return SPIRV::Scope::CrossDevice;
433}
434
436 const MachineRegisterInfo *MRI) {
437 MachineInstr *MI = MRI->getVRegDef(ConstReg);
438 MachineInstr *ConstInstr =
439 MI->getOpcode() == SPIRV::G_TRUNC || MI->getOpcode() == SPIRV::G_ZEXT
440 ? MRI->getVRegDef(MI->getOperand(1).getReg())
441 : MI;
442 if (auto *GI = dyn_cast<GIntrinsic>(ConstInstr)) {
443 if (GI->is(Intrinsic::spv_track_constant)) {
444 ConstReg = ConstInstr->getOperand(2).getReg();
445 return MRI->getVRegDef(ConstReg);
446 }
447 } else if (ConstInstr->getOpcode() == SPIRV::ASSIGN_TYPE) {
448 ConstReg = ConstInstr->getOperand(1).getReg();
449 return MRI->getVRegDef(ConstReg);
450 } else if (ConstInstr->getOpcode() == TargetOpcode::G_CONSTANT ||
451 ConstInstr->getOpcode() == TargetOpcode::G_FCONSTANT) {
452 ConstReg = ConstInstr->getOperand(0).getReg();
453 return ConstInstr;
454 }
455 return MRI->getVRegDef(ConstReg);
456}
457
459 const MachineInstr *MI = getDefInstrMaybeConstant(ConstReg, MRI);
460 assert(MI && MI->getOpcode() == TargetOpcode::G_CONSTANT);
461 return MI->getOperand(1).getCImm()->getValue().getZExtValue();
462}
463
464int64_t getIConstValSext(Register ConstReg, const MachineRegisterInfo *MRI) {
465 const MachineInstr *MI = getDefInstrMaybeConstant(ConstReg, MRI);
466 assert(MI && MI->getOpcode() == TargetOpcode::G_CONSTANT);
467 return MI->getOperand(1).getCImm()->getSExtValue();
468}
469
470bool isSpvIntrinsic(const MachineInstr &MI, Intrinsic::ID IntrinsicID) {
471 if (const auto *GI = dyn_cast<GIntrinsic>(&MI))
472 return GI->is(IntrinsicID);
473 return false;
474}
475
476Type *getMDOperandAsType(const MDNode *N, unsigned I) {
477 Type *ElementTy = cast<ValueAsMetadata>(N->getOperand(I))->getType();
478 return toTypedPointer(ElementTy);
479}
480
481// The set of names is borrowed from the SPIR-V translator.
482// TODO: may be implemented in SPIRVBuiltins.td.
483static bool isPipeOrAddressSpaceCastBI(const StringRef MangledName) {
484 return MangledName == "write_pipe_2" || MangledName == "read_pipe_2" ||
485 MangledName == "write_pipe_2_bl" || MangledName == "read_pipe_2_bl" ||
486 MangledName == "write_pipe_4" || MangledName == "read_pipe_4" ||
487 MangledName == "reserve_write_pipe" ||
488 MangledName == "reserve_read_pipe" ||
489 MangledName == "commit_write_pipe" ||
490 MangledName == "commit_read_pipe" ||
491 MangledName == "work_group_reserve_write_pipe" ||
492 MangledName == "work_group_reserve_read_pipe" ||
493 MangledName == "work_group_commit_write_pipe" ||
494 MangledName == "work_group_commit_read_pipe" ||
495 MangledName == "get_pipe_num_packets_ro" ||
496 MangledName == "get_pipe_max_packets_ro" ||
497 MangledName == "get_pipe_num_packets_wo" ||
498 MangledName == "get_pipe_max_packets_wo" ||
499 MangledName == "sub_group_reserve_write_pipe" ||
500 MangledName == "sub_group_reserve_read_pipe" ||
501 MangledName == "sub_group_commit_write_pipe" ||
502 MangledName == "sub_group_commit_read_pipe" ||
503 MangledName == "to_global" || MangledName == "to_local" ||
504 MangledName == "to_private";
505}
506
507static bool isEnqueueKernelBI(const StringRef MangledName) {
508 return MangledName == "__enqueue_kernel_basic" ||
509 MangledName == "__enqueue_kernel_basic_events" ||
510 MangledName == "__enqueue_kernel_varargs" ||
511 MangledName == "__enqueue_kernel_events_varargs";
512}
513
514static bool isKernelQueryBI(const StringRef MangledName) {
515 return MangledName == "__get_kernel_work_group_size_impl" ||
516 MangledName == "__get_kernel_sub_group_count_for_ndrange_impl" ||
517 MangledName == "__get_kernel_max_sub_group_size_for_ndrange_impl" ||
518 MangledName == "__get_kernel_preferred_work_group_size_multiple_impl";
519}
520
522 if (!Name.starts_with("__"))
523 return false;
524
525 return isEnqueueKernelBI(Name) || isKernelQueryBI(Name) ||
526 isPipeOrAddressSpaceCastBI(Name.drop_front(2)) ||
527 Name == "__translate_sampler_initializer";
528}
529
531 bool IsNonMangledOCL = isNonMangledOCLBuiltin(Name);
532 bool IsNonMangledSPIRV = Name.starts_with("__spirv_");
533 bool IsNonMangledHLSL = Name.starts_with("__hlsl_");
534 bool IsMangled = Name.starts_with("_Z");
535
536 // Otherwise use simple demangling to return the function name.
537 if (IsNonMangledOCL || IsNonMangledSPIRV || IsNonMangledHLSL || !IsMangled)
538 return Name.str();
539
540 // Try to use the itanium demangler.
541 if (char *DemangledName = itaniumDemangle(Name.data())) {
542 std::string Result = DemangledName;
543 free(DemangledName);
544 return Result;
545 }
546
547 // Autocheck C++, maybe need to do explicit check of the source language.
548 // OpenCL C++ built-ins are declared in cl namespace.
549 // TODO: consider using 'St' abbriviation for cl namespace mangling.
550 // Similar to ::std:: in C++.
551 size_t Start, Len = 0;
552 size_t DemangledNameLenStart = 2;
553 if (Name.starts_with("_ZN")) {
554 // Skip CV and ref qualifiers.
555 size_t NameSpaceStart = Name.find_first_not_of("rVKRO", 3);
556 // All built-ins are in the ::cl:: namespace.
557 if (Name.substr(NameSpaceStart, 11) != "2cl7__spirv")
558 return std::string();
559 DemangledNameLenStart = NameSpaceStart + 11;
560 }
561 Start = Name.find_first_not_of("0123456789", DemangledNameLenStart);
562 [[maybe_unused]] bool Error =
563 Name.substr(DemangledNameLenStart, Start - DemangledNameLenStart)
564 .getAsInteger(10, Len);
565 assert(!Error && "Failed to parse demangled name length");
566 return Name.substr(Start, Len).str();
567}
568
570 if (Name.starts_with("opencl.") || Name.starts_with("ocl_") ||
571 Name.starts_with("spirv."))
572 return true;
573 return false;
574}
575
576bool isSpecialOpaqueType(const Type *Ty) {
577 if (const TargetExtType *ExtTy = dyn_cast<TargetExtType>(Ty))
578 return isTypedPointerWrapper(ExtTy)
579 ? false
580 : hasBuiltinTypePrefix(ExtTy->getName());
581
582 return false;
583}
584
585bool isEntryPoint(const Function &F) {
586 // OpenCL handling: any function with the SPIR_KERNEL
587 // calling convention will be a potential entry point.
588 if (F.getCallingConv() == CallingConv::SPIR_KERNEL)
589 return true;
590
591 // HLSL handling: special attribute are emitted from the
592 // front-end.
593 if (F.getFnAttribute("hlsl.shader").isValid())
594 return true;
595
596 return false;
597}
598
600 TypeName.consume_front("atomic_");
601 if (TypeName.consume_front("void"))
602 return Type::getVoidTy(Ctx);
603 else if (TypeName.consume_front("bool") || TypeName.consume_front("_Bool"))
604 return Type::getIntNTy(Ctx, 1);
605 else if (TypeName.consume_front("char") ||
606 TypeName.consume_front("signed char") ||
607 TypeName.consume_front("unsigned char") ||
608 TypeName.consume_front("uchar"))
609 return Type::getInt8Ty(Ctx);
610 else if (TypeName.consume_front("short") ||
611 TypeName.consume_front("signed short") ||
612 TypeName.consume_front("unsigned short") ||
613 TypeName.consume_front("ushort"))
614 return Type::getInt16Ty(Ctx);
615 else if (TypeName.consume_front("int") ||
616 TypeName.consume_front("signed int") ||
617 TypeName.consume_front("unsigned int") ||
618 TypeName.consume_front("uint"))
619 return Type::getInt32Ty(Ctx);
620 else if (TypeName.consume_front("long") ||
621 TypeName.consume_front("signed long") ||
622 TypeName.consume_front("unsigned long") ||
623 TypeName.consume_front("ulong"))
624 return Type::getInt64Ty(Ctx);
625 else if (TypeName.consume_front("half") ||
626 TypeName.consume_front("_Float16") ||
627 TypeName.consume_front("__fp16"))
628 return Type::getHalfTy(Ctx);
629 else if (TypeName.consume_front("float"))
630 return Type::getFloatTy(Ctx);
631 else if (TypeName.consume_front("double"))
632 return Type::getDoubleTy(Ctx);
633
634 // Unable to recognize SPIRV type name
635 return nullptr;
636}
637
638std::unordered_set<BasicBlock *>
639PartialOrderingVisitor::getReachableFrom(BasicBlock *Start) {
640 std::queue<BasicBlock *> ToVisit;
641 ToVisit.push(Start);
642
643 std::unordered_set<BasicBlock *> Output;
644 while (ToVisit.size() != 0) {
645 BasicBlock *BB = ToVisit.front();
646 ToVisit.pop();
647
648 if (Output.count(BB) != 0)
649 continue;
650 Output.insert(BB);
651
652 for (BasicBlock *Successor : successors(BB)) {
653 if (DT.dominates(Successor, BB))
654 continue;
655 ToVisit.push(Successor);
656 }
657 }
658
659 return Output;
660}
661
662bool PartialOrderingVisitor::CanBeVisited(BasicBlock *BB) const {
663 for (BasicBlock *P : predecessors(BB)) {
664 // Ignore back-edges.
665 if (DT.dominates(BB, P))
666 continue;
667
668 // One of the predecessor hasn't been visited. Not ready yet.
669 if (BlockToOrder.count(P) == 0)
670 return false;
671
672 // If the block is a loop exit, the loop must be finished before
673 // we can continue.
674 Loop *L = LI.getLoopFor(P);
675 if (L == nullptr || L->contains(BB))
676 continue;
677
678 // SPIR-V requires a single back-edge. And the backend first
679 // step transforms loops into the simplified format. If we have
680 // more than 1 back-edge, something is wrong.
681 assert(L->getNumBackEdges() <= 1);
682
683 // If the loop has no latch, loop's rank won't matter, so we can
684 // proceed.
685 BasicBlock *Latch = L->getLoopLatch();
686 assert(Latch);
687 if (Latch == nullptr)
688 continue;
689
690 // The latch is not ready yet, let's wait.
691 if (BlockToOrder.count(Latch) == 0)
692 return false;
693 }
694
695 return true;
696}
697
699 auto It = BlockToOrder.find(BB);
700 if (It != BlockToOrder.end())
701 return It->second.Rank;
702
703 size_t result = 0;
704 for (BasicBlock *P : predecessors(BB)) {
705 // Ignore back-edges.
706 if (DT.dominates(BB, P))
707 continue;
708
709 auto Iterator = BlockToOrder.end();
710 Loop *L = LI.getLoopFor(P);
711 BasicBlock *Latch = L ? L->getLoopLatch() : nullptr;
712
713 // If the predecessor is either outside a loop, or part of
714 // the same loop, simply take its rank + 1.
715 if (L == nullptr || L->contains(BB) || Latch == nullptr) {
716 Iterator = BlockToOrder.find(P);
717 } else {
718 // Otherwise, take the loop's rank (highest rank in the loop) as base.
719 // Since loops have a single latch, highest rank is easy to find.
720 // If the loop has no latch, then it doesn't matter.
721 Iterator = BlockToOrder.find(Latch);
722 }
723
724 assert(Iterator != BlockToOrder.end());
725 result = std::max(result, Iterator->second.Rank + 1);
726 }
727
728 return result;
729}
730
731size_t PartialOrderingVisitor::visit(BasicBlock *BB, size_t Unused) {
732 ToVisit.push(BB);
733 Queued.insert(BB);
734
735 size_t QueueIndex = 0;
736 while (ToVisit.size() != 0) {
737 BasicBlock *BB = ToVisit.front();
738 ToVisit.pop();
739
740 if (!CanBeVisited(BB)) {
741 ToVisit.push(BB);
742 if (QueueIndex >= ToVisit.size())
744 "No valid candidate in the queue. Is the graph reducible?");
745 QueueIndex++;
746 continue;
747 }
748
749 QueueIndex = 0;
750 size_t Rank = GetNodeRank(BB);
751 OrderInfo Info = {Rank, BlockToOrder.size()};
752 BlockToOrder.emplace(BB, Info);
753
754 for (BasicBlock *S : successors(BB)) {
755 if (Queued.count(S) != 0)
756 continue;
757 ToVisit.push(S);
758 Queued.insert(S);
759 }
760 }
761
762 return 0;
763}
764
766 DT.recalculate(F);
767 LI = LoopInfo(DT);
768
769 visit(&*F.begin(), 0);
770
771 Order.reserve(F.size());
772 for (auto &[BB, Info] : BlockToOrder)
773 Order.emplace_back(BB);
774
775 std::sort(Order.begin(), Order.end(), [&](const auto &LHS, const auto &RHS) {
776 return compare(LHS, RHS);
777 });
778}
779
781 const BasicBlock *RHS) const {
782 const OrderInfo &InfoLHS = BlockToOrder.at(const_cast<BasicBlock *>(LHS));
783 const OrderInfo &InfoRHS = BlockToOrder.at(const_cast<BasicBlock *>(RHS));
784 if (InfoLHS.Rank != InfoRHS.Rank)
785 return InfoLHS.Rank < InfoRHS.Rank;
786 return InfoLHS.TraversalIndex < InfoRHS.TraversalIndex;
787}
788
790 BasicBlock &Start, std::function<bool(BasicBlock *)> Op) {
791 std::unordered_set<BasicBlock *> Reachable = getReachableFrom(&Start);
792 assert(BlockToOrder.count(&Start) != 0);
793
794 // Skipping blocks with a rank inferior to |Start|'s rank.
795 auto It = Order.begin();
796 while (It != Order.end() && *It != &Start)
797 ++It;
798
799 // This is unexpected. Worst case |Start| is the last block,
800 // so It should point to the last block, not past-end.
801 assert(It != Order.end());
802
803 // By default, there is no rank limit. Setting it to the maximum value.
804 std::optional<size_t> EndRank = std::nullopt;
805 for (; It != Order.end(); ++It) {
806 if (EndRank.has_value() && BlockToOrder[*It].Rank > *EndRank)
807 break;
808
809 if (Reachable.count(*It) == 0) {
810 continue;
811 }
812
813 if (!Op(*It)) {
814 EndRank = BlockToOrder[*It].Rank;
815 }
816 }
817}
818
820 if (F.size() == 0)
821 return false;
822
823 bool Modified = false;
824 std::vector<BasicBlock *> Order;
825 Order.reserve(F.size());
826
828 llvm::append_range(Order, RPOT);
829
830 assert(&*F.begin() == Order[0]);
831 BasicBlock *LastBlock = &*F.begin();
832 for (BasicBlock *BB : Order) {
833 if (BB != LastBlock && &*LastBlock->getNextNode() != BB) {
834 Modified = true;
835 BB->moveAfter(LastBlock);
836 }
837 LastBlock = BB;
838 }
839
840 return Modified;
841}
842
844 MachineInstr *MaybeDef = MRI.getVRegDef(Reg);
845 if (MaybeDef && MaybeDef->getOpcode() == SPIRV::ASSIGN_TYPE)
846 MaybeDef = MRI.getVRegDef(MaybeDef->getOperand(1).getReg());
847 return MaybeDef;
848}
849
850bool getVacantFunctionName(Module &M, std::string &Name) {
851 // It's a bit of paranoia, but still we don't want to have even a chance that
852 // the loop will work for too long.
853 constexpr unsigned MaxIters = 1024;
854 for (unsigned I = 0; I < MaxIters; ++I) {
855 std::string OrdName = Name + Twine(I).str();
856 if (!M.getFunction(OrdName)) {
857 Name = std::move(OrdName);
858 return true;
859 }
860 }
861 return false;
862}
863
864// Assign SPIR-V type to the register. If the register has no valid assigned
865// class, set register LLT type and class according to the SPIR-V type.
868 const MachineFunction &MF, bool Force) {
869 GR->assignSPIRVTypeToVReg(SpvType, Reg, MF);
870 if (!MRI->getRegClassOrNull(Reg) || Force) {
871 MRI->setRegClass(Reg, GR->getRegClass(SpvType));
872 LLT RegType = GR->getRegType(SpvType);
873 if (Force || !MRI->getType(Reg).isValid())
874 MRI->setType(Reg, RegType);
875 }
876}
877
878// Create a SPIR-V type, assign SPIR-V type to the register. If the register has
879// no valid assigned class, set register LLT type and class according to the
880// SPIR-V type.
882 MachineIRBuilder &MIRBuilder,
883 SPIRV::AccessQualifier::AccessQualifier AccessQual,
884 bool EmitIR, bool Force) {
886 GR->getOrCreateSPIRVType(Ty, MIRBuilder, AccessQual, EmitIR),
887 GR, MIRBuilder.getMRI(), MIRBuilder.getMF(), Force);
888}
889
890// Create a virtual register and assign SPIR-V type to the register. Set
891// register LLT type and class according to the SPIR-V type.
894 const MachineFunction &MF) {
895 Register Reg = MRI->createVirtualRegister(GR->getRegClass(SpvType));
896 MRI->setType(Reg, GR->getRegType(SpvType));
897 GR->assignSPIRVTypeToVReg(SpvType, Reg, MF);
898 return Reg;
899}
900
901// Create a virtual register and assign SPIR-V type to the register. Set
902// register LLT type and class according to the SPIR-V type.
904 MachineIRBuilder &MIRBuilder) {
905 return createVirtualRegister(SpvType, GR, MIRBuilder.getMRI(),
906 MIRBuilder.getMF());
907}
908
909// Create a SPIR-V type, virtual register and assign SPIR-V type to the
910// register. Set register LLT type and class according to the SPIR-V type.
912 const Type *Ty, SPIRVGlobalRegistry *GR, MachineIRBuilder &MIRBuilder,
913 SPIRV::AccessQualifier::AccessQualifier AccessQual, bool EmitIR) {
915 GR->getOrCreateSPIRVType(Ty, MIRBuilder, AccessQual, EmitIR), GR,
916 MIRBuilder);
917}
918
920 Value *Arg, Value *Arg2, ArrayRef<Constant *> Imms,
921 IRBuilder<> &B) {
923 Args.push_back(Arg2);
924 Args.push_back(buildMD(Arg));
925 llvm::append_range(Args, Imms);
926 return B.CreateIntrinsic(IntrID, {Types}, Args);
927}
928
929// Return true if there is an opaque pointer type nested in the argument.
930bool isNestedPointer(const Type *Ty) {
931 if (Ty->isPtrOrPtrVectorTy())
932 return true;
933 if (const FunctionType *RefTy = dyn_cast<FunctionType>(Ty)) {
934 if (isNestedPointer(RefTy->getReturnType()))
935 return true;
936 for (const Type *ArgTy : RefTy->params())
937 if (isNestedPointer(ArgTy))
938 return true;
939 return false;
940 }
941 if (const ArrayType *RefTy = dyn_cast<ArrayType>(Ty))
942 return isNestedPointer(RefTy->getElementType());
943 return false;
944}
945
946bool isSpvIntrinsic(const Value *Arg) {
947 if (const auto *II = dyn_cast<IntrinsicInst>(Arg))
948 if (Function *F = II->getCalledFunction())
949 if (F->getName().starts_with("llvm.spv."))
950 return true;
951 return false;
952}
953
954// Function to create continued instructions for SPV_INTEL_long_composites
955// extension
956SmallVector<MachineInstr *, 4>
958 unsigned MinWC, unsigned ContinuedOpcode,
959 ArrayRef<Register> Args, Register ReturnRegister,
961
963 constexpr unsigned MaxWordCount = UINT16_MAX;
964 const size_t NumElements = Args.size();
965 size_t MaxNumElements = MaxWordCount - MinWC;
966 size_t SPIRVStructNumElements = NumElements;
967
968 if (NumElements > MaxNumElements) {
969 // Do adjustments for continued instructions which always had only one
970 // minumum word count.
971 SPIRVStructNumElements = MaxNumElements;
972 MaxNumElements = MaxWordCount - 1;
973 }
974
975 auto MIB =
976 MIRBuilder.buildInstr(Opcode).addDef(ReturnRegister).addUse(TypeID);
977
978 for (size_t I = 0; I < SPIRVStructNumElements; ++I)
979 MIB.addUse(Args[I]);
980
981 Instructions.push_back(MIB.getInstr());
982
983 for (size_t I = SPIRVStructNumElements; I < NumElements;
984 I += MaxNumElements) {
985 auto MIB = MIRBuilder.buildInstr(ContinuedOpcode);
986 for (size_t J = I; J < std::min(I + MaxNumElements, NumElements); ++J)
987 MIB.addUse(Args[J]);
988 Instructions.push_back(MIB.getInstr());
989 }
990 return Instructions;
991}
992
995 unsigned LC = SPIRV::LoopControl::None;
996 // Currently used only to store PartialCount value. Later when other
997 // LoopControls are added - this map should be sorted before making
998 // them loop_merge operands to satisfy 3.23. Loop Control requirements.
999 std::vector<std::pair<unsigned, unsigned>> MaskToValueMap;
1000 if (findOptionMDForLoopID(LoopMD, "llvm.loop.unroll.disable")) {
1001 LC |= SPIRV::LoopControl::DontUnroll;
1002 } else {
1003 if (findOptionMDForLoopID(LoopMD, "llvm.loop.unroll.enable") ||
1004 findOptionMDForLoopID(LoopMD, "llvm.loop.unroll.full")) {
1005 LC |= SPIRV::LoopControl::Unroll;
1006 }
1007 if (MDNode *CountMD =
1008 findOptionMDForLoopID(LoopMD, "llvm.loop.unroll.count")) {
1009 if (auto *CI =
1010 mdconst::extract_or_null<ConstantInt>(CountMD->getOperand(1))) {
1011 unsigned Count = CI->getZExtValue();
1012 if (Count != 1) {
1013 LC |= SPIRV::LoopControl::PartialCount;
1014 MaskToValueMap.emplace_back(
1015 std::make_pair(SPIRV::LoopControl::PartialCount, Count));
1016 }
1017 }
1018 }
1019 }
1020 SmallVector<unsigned, 1> Result = {LC};
1021 for (auto &[Mask, Val] : MaskToValueMap)
1022 Result.push_back(Val);
1023 return Result;
1024}
1025
1029
1030const std::set<unsigned> &getTypeFoldingSupportedOpcodes() {
1031 // clang-format off
1032 static const std::set<unsigned> TypeFoldingSupportingOpcs = {
1033 TargetOpcode::G_ADD,
1034 TargetOpcode::G_FADD,
1035 TargetOpcode::G_STRICT_FADD,
1036 TargetOpcode::G_SUB,
1037 TargetOpcode::G_FSUB,
1038 TargetOpcode::G_STRICT_FSUB,
1039 TargetOpcode::G_MUL,
1040 TargetOpcode::G_FMUL,
1041 TargetOpcode::G_STRICT_FMUL,
1042 TargetOpcode::G_SDIV,
1043 TargetOpcode::G_UDIV,
1044 TargetOpcode::G_FDIV,
1045 TargetOpcode::G_STRICT_FDIV,
1046 TargetOpcode::G_SREM,
1047 TargetOpcode::G_UREM,
1048 TargetOpcode::G_FREM,
1049 TargetOpcode::G_STRICT_FREM,
1050 TargetOpcode::G_FNEG,
1051 TargetOpcode::G_CONSTANT,
1052 TargetOpcode::G_FCONSTANT,
1053 TargetOpcode::G_AND,
1054 TargetOpcode::G_OR,
1055 TargetOpcode::G_XOR,
1056 TargetOpcode::G_SHL,
1057 TargetOpcode::G_ASHR,
1058 TargetOpcode::G_LSHR,
1059 TargetOpcode::G_SELECT,
1060 TargetOpcode::G_EXTRACT_VECTOR_ELT,
1061 };
1062 // clang-format on
1063 return TypeFoldingSupportingOpcs;
1064}
1065
1066bool isTypeFoldingSupported(unsigned Opcode) {
1067 return getTypeFoldingSupportedOpcodes().count(Opcode) > 0;
1068}
1069
1070// Traversing [g]MIR accounting for pseudo-instructions.
1072 return (Def->getOpcode() == SPIRV::ASSIGN_TYPE ||
1073 Def->getOpcode() == TargetOpcode::COPY)
1074 ? MRI->getVRegDef(Def->getOperand(1).getReg())
1075 : Def;
1076}
1077
1079 if (MachineInstr *Def = MRI->getVRegDef(MO.getReg()))
1080 return passCopy(Def, MRI);
1081 return nullptr;
1082}
1083
1085 if (MachineInstr *Def = getDef(MO, MRI)) {
1086 if (Def->getOpcode() == TargetOpcode::G_CONSTANT ||
1087 Def->getOpcode() == SPIRV::OpConstantI)
1088 return Def;
1089 }
1090 return nullptr;
1091}
1092
1093int64_t foldImm(const MachineOperand &MO, const MachineRegisterInfo *MRI) {
1094 if (MachineInstr *Def = getImm(MO, MRI)) {
1095 if (Def->getOpcode() == SPIRV::OpConstantI)
1096 return Def->getOperand(2).getImm();
1097 if (Def->getOpcode() == TargetOpcode::G_CONSTANT)
1098 return Def->getOperand(1).getCImm()->getZExtValue();
1099 }
1100 llvm_unreachable("Unexpected integer constant pattern");
1101}
1102
1104 const MachineInstr *ResType) {
1105 return foldImm(ResType->getOperand(2), MRI);
1106}
1107
1110 // Find the position to insert the OpVariable instruction.
1111 // We will insert it after the last OpFunctionParameter, if any, or
1112 // after OpFunction otherwise.
1113 MachineBasicBlock::iterator VarPos = BB.begin();
1114 while (VarPos != BB.end() && VarPos->getOpcode() != SPIRV::OpFunction) {
1115 ++VarPos;
1116 }
1117 // Advance VarPos to the next instruction after OpFunction, it will either
1118 // be an OpFunctionParameter, so that we can start the next loop, or the
1119 // position to insert the OpVariable instruction.
1120 ++VarPos;
1121 while (VarPos != BB.end() &&
1122 VarPos->getOpcode() == SPIRV::OpFunctionParameter) {
1123 ++VarPos;
1124 }
1125 // VarPos is now pointing at after the last OpFunctionParameter, if any,
1126 // or after OpFunction, if no parameters.
1127 return VarPos != BB.end() && VarPos->getOpcode() == SPIRV::OpLabel ? ++VarPos
1128 : VarPos;
1129}
1130
1131bool matchPeeledArrayPattern(const StructType *Ty, Type *&OriginalElementType,
1132 uint64_t &TotalSize) {
1133 // An array of N padded structs is represented as {[N-1 x <{T, pad}>], T}.
1134 if (Ty->getStructNumElements() != 2)
1135 return false;
1136
1137 Type *FirstElement = Ty->getStructElementType(0);
1138 Type *SecondElement = Ty->getStructElementType(1);
1139
1140 if (!FirstElement->isArrayTy())
1141 return false;
1142
1143 Type *ArrayElementType = FirstElement->getArrayElementType();
1144 if (!ArrayElementType->isStructTy() ||
1145 ArrayElementType->getStructNumElements() != 2)
1146 return false;
1147
1148 Type *T_in_struct = ArrayElementType->getStructElementType(0);
1149 if (T_in_struct != SecondElement)
1150 return false;
1151
1152 auto *Padding_in_struct =
1153 dyn_cast<TargetExtType>(ArrayElementType->getStructElementType(1));
1154 if (!Padding_in_struct || Padding_in_struct->getName() != "spirv.Padding")
1155 return false;
1156
1157 const uint64_t ArraySize = FirstElement->getArrayNumElements();
1158 TotalSize = ArraySize + 1;
1159 OriginalElementType = ArrayElementType;
1160 return true;
1161}
1162
1164 if (!Ty->isStructTy())
1165 return Ty;
1166
1167 auto *STy = cast<StructType>(Ty);
1168 Type *OriginalElementType = nullptr;
1169 uint64_t TotalSize = 0;
1170 if (matchPeeledArrayPattern(STy, OriginalElementType, TotalSize)) {
1171 Type *ResultTy = ArrayType::get(
1172 reconstitutePeeledArrayType(OriginalElementType), TotalSize);
1173 return ResultTy;
1174 }
1175
1176 SmallVector<Type *, 4> NewElementTypes;
1177 bool Changed = false;
1178 for (Type *ElementTy : STy->elements()) {
1179 Type *NewElementTy = reconstitutePeeledArrayType(ElementTy);
1180 if (NewElementTy != ElementTy)
1181 Changed = true;
1182 NewElementTypes.push_back(NewElementTy);
1183 }
1184
1185 if (!Changed)
1186 return Ty;
1187
1188 Type *ResultTy;
1189 if (STy->isLiteral())
1190 ResultTy =
1191 StructType::get(STy->getContext(), NewElementTypes, STy->isPacked());
1192 else {
1193 auto *NewTy = StructType::create(STy->getContext(), STy->getName());
1194 NewTy->setBody(NewElementTypes, STy->isPacked());
1195 ResultTy = NewTy;
1196 }
1197 return ResultTy;
1198}
1199
1200std::optional<SPIRV::LinkageType::LinkageType>
1202 if (GV.hasLocalLinkage())
1203 return std::nullopt;
1204
1205 if (GV.isDeclarationForLinker()) {
1206 // Interface variables must not get Import linkage.
1207 if (const auto *GVar = dyn_cast<GlobalVariable>(&GV)) {
1208 auto SC = addressSpaceToStorageClass(GVar->getAddressSpace(), ST);
1209 if (SC == SPIRV::StorageClass::Input ||
1210 SC == SPIRV::StorageClass::Output ||
1211 SC == SPIRV::StorageClass::PushConstant)
1212 return std::nullopt;
1213 }
1214 return SPIRV::LinkageType::Import;
1215 }
1216
1217 if (GV.hasHiddenVisibility())
1218 return std::nullopt;
1219
1220 if (GV.hasLinkOnceODRLinkage() &&
1221 ST.canUseExtension(SPIRV::Extension::SPV_KHR_linkonce_odr))
1222 return SPIRV::LinkageType::LinkOnceODR;
1223
1224 return SPIRV::LinkageType::Export;
1225}
1226
1228 std::string ServiceFunName = SPIRV_BACKEND_SERVICE_FUN_NAME;
1229 if (!getVacantFunctionName(M, ServiceFunName))
1231 "cannot allocate a name for the internal service function");
1232 if (Function *SF = M.getFunction(ServiceFunName)) {
1233 if (SF->getInstructionCount() > 0)
1235 "Unexpected combination of global variables and function pointers");
1236 return SF;
1237 }
1239 FunctionType::get(Type::getVoidTy(M.getContext()), {}, false),
1240 GlobalValue::PrivateLinkage, ServiceFunName, M);
1242 return SF;
1243}
1244
1245} // namespace llvm
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
MachineBasicBlock & MBB
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
This file declares the MachineIRBuilder class.
Register Reg
Type::TypeID TypeID
uint64_t IntrinsicInst * II
#define P(N)
static ConstantInt * getConstInt(MDNode *MD, unsigned NumOp)
#define SPIRV_BACKEND_SERVICE_FUN_NAME
Definition SPIRVUtils.h:532
Value * RHS
Value * LHS
Class for arbitrary precision integers.
Definition APInt.h:78
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
Class to represent array types.
static LLVM_ABI ArrayType * get(Type *ElementType, uint64_t NumElements)
This static method is the primary way to construct an ArrayType.
LLVM Basic Block Representation.
Definition BasicBlock.h:62
LLVM_ABI void moveAfter(BasicBlock *MovePos)
Unlink this basic block from its current function and insert it right after MovePos in the function M...
const Instruction & front() const
Definition BasicBlock.h:484
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
FunctionType * getFunctionType() const
This class represents a function call, abstracting a target machine's calling convention.
An array constant whose element type is a simple 1/2/4/8-byte integer, bytes or float/double,...
Definition Constants.h:846
StringRef getAsCString() const
If this array is isCString(), then this method returns the array (without the trailing null byte) as ...
Definition Constants.h:819
This is the shared class of boolean and integer constants.
Definition Constants.h:87
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
Definition Constants.h:168
bool dominates(const DomTreeNodeBase< NodeT > *A, const DomTreeNodeBase< NodeT > *B) const
dominates - Returns true iff A dominates B.
Lightweight error class with error context and mandatory checking.
Definition Error.h:159
Class to represent function types.
ArrayRef< Type * > params() const
bool isVarArg() const
Type * getReturnType() const
static LLVM_ABI FunctionType * get(Type *Result, ArrayRef< Type * > Params, bool isVarArg)
This static method is the primary way of constructing a FunctionType.
void addFnAttr(Attribute::AttrKind Kind)
Add function attributes to this function.
Definition Function.cpp:638
static Function * Create(FunctionType *Ty, LinkageTypes Linkage, unsigned AddrSpace, const Twine &N="", Module *M=nullptr)
Definition Function.h:168
const Function & getFunction() const
Definition Function.h:166
bool hasLocalLinkage() const
bool hasHiddenVisibility() const
bool isDeclarationForLinker() const
bool hasLinkOnceODRLinkage() const
@ PrivateLinkage
Like Internal, but omit from symbol table.
Definition GlobalValue.h:61
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition IRBuilder.h:2811
LLVM_ABI const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
constexpr bool isValid() const
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
Represents a single loop in the control flow graph.
Definition LoopInfo.h:40
Instances of this class represent a single low-level machine instruction.
Definition MCInst.h:188
void addOperand(const MCOperand Op)
Definition MCInst.h:215
static MCOperand createImm(int64_t Val)
Definition MCInst.h:145
Metadata node.
Definition Metadata.h:1080
const MDOperand & getOperand(unsigned I) const
Definition Metadata.h:1444
unsigned getNumOperands() const
Return number of MDNode operands.
Definition Metadata.h:1450
A single uniqued string.
Definition Metadata.h:722
MachineInstrBundleIterator< MachineInstr > iterator
const MachineBasicBlock & front() const
Helper class to build MachineInstr.
MachineInstrBuilder buildInstr(unsigned Opcode)
Build and insert <empty> = Opcode <empty>.
MachineFunction & getMF()
Getter for the function we currently build.
MachineRegisterInfo * getMRI()
Getter for MRI.
const MachineInstrBuilder & addUse(Register RegNo, RegState Flags={}, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addDef(Register RegNo, RegState Flags={}, unsigned SubReg=0) const
Add a virtual register definition operand.
MachineInstr * getInstr() const
If conversion operators fail, use this method to get the MachineInstr explicitly.
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
void setAsmPrinterFlag(AsmPrinterFlagTy Flag)
Set a flag for the AsmPrinter.
const MachineOperand & getOperand(unsigned i) const
MachineOperand class - Representation of each machine instruction operand.
Register getReg() const
getReg - Returns the register number.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
LLVM_ABI MachineInstr * getVRegDef(Register Reg) const
getVRegDef - Return the machine instr that defines the specified virtual register or null if none is ...
LLVM_ABI Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
LLT getType(Register Reg) const
Get the low-level type of Reg or LLT{} if Reg is not a generic (target independent) virtual register.
LLVM_ABI void setType(Register VReg, LLT Ty)
Set the low-level type of VReg to Ty.
LLVM_ABI void setRegClass(Register Reg, const TargetRegisterClass *RC)
setRegClass - Set the register class of the specified virtual register.
const TargetRegisterClass * getRegClassOrNull(Register Reg) const
Return the register class of Reg, or null if Reg has not been assigned a register class yet.
A Module instance is used to store all the information related to an LLVM module.
Definition Module.h:67
NamedMDNode * getNamedMetadata(StringRef Name) const
Return the first NamedMDNode in the module with the specified name.
Definition Module.cpp:301
A tuple of MDNodes.
Definition Metadata.h:1760
op_iterator op_end()
Definition Metadata.h:1849
iterator_range< op_iterator > operands()
Definition Metadata.h:1856
size_t GetNodeRank(BasicBlock *BB) const
void partialOrderVisit(BasicBlock &Start, std::function< bool(BasicBlock *)> Op)
bool compare(const BasicBlock *LHS, const BasicBlock *RHS) const
Wrapper class representing virtual and physical registers.
Definition Register.h:20
void assignSPIRVTypeToVReg(SPIRVTypeInst Type, Register VReg, const MachineFunction &MF)
const TargetRegisterClass * getRegClass(SPIRVTypeInst SpvType) const
LLT getRegType(SPIRVTypeInst SpvType) const
SPIRVTypeInst getOrCreateSPIRVType(const Type *Type, MachineInstr &I, SPIRV::AccessQualifier::AccessQualifier AQ, bool EmitIR)
bool canUseExtension(SPIRV::Extension::Extension E) const
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
std::string str() const
str - Get the contents as an std::string.
Definition StringRef.h:222
constexpr bool empty() const
empty - Check if the string is empty.
Definition StringRef.h:140
Class to represent struct types.
static LLVM_ABI StructType * get(LLVMContext &Context, ArrayRef< Type * > Elements, bool isPacked=false)
This static method is the primary way to create a literal StructType.
Definition Type.cpp:483
static LLVM_ABI StructType * create(LLVMContext &Context, StringRef Name)
This creates an identified struct.
Definition Type.cpp:689
Class to represent target extensions types, which are generally unintrospectable from target-independ...
Target - Wrapper for Target specific information.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition Twine.h:82
LLVM_ABI std::string str() const
Return the twine contents as a std::string.
Definition Twine.cpp:17
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:46
static LLVM_ABI IntegerType * getInt64Ty(LLVMContext &C)
Definition Type.cpp:314
LLVM_ABI Type * getStructElementType(unsigned N) const
bool isArrayTy() const
True if this is an instance of ArrayType.
Definition Type.h:281
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
Definition Type.cpp:313
Type * getArrayElementType() const
Definition Type.h:427
LLVM_ABI unsigned getStructNumElements() const
LLVM_ABI uint64_t getArrayNumElements() const
static LLVM_ABI Type * getVoidTy(LLVMContext &C)
Definition Type.cpp:286
static LLVM_ABI IntegerType * getInt8Ty(LLVMContext &C)
Definition Type.cpp:311
bool isStructTy() const
True if this is an instance of StructType.
Definition Type.h:278
static LLVM_ABI IntegerType * getInt16Ty(LLVMContext &C)
Definition Type.cpp:312
static LLVM_ABI IntegerType * getIntNTy(LLVMContext &C, unsigned N)
Definition Type.cpp:317
static LLVM_ABI Type * getDoubleTy(LLVMContext &C)
Definition Type.cpp:291
static LLVM_ABI Type * getFloatTy(LLVMContext &C)
Definition Type.cpp:290
static LLVM_ABI Type * getHalfTy(LLVMContext &C)
Definition Type.cpp:288
Value * getOperand(unsigned i) const
Definition User.h:207
LLVM Value Representation.
Definition Value.h:75
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition Value.cpp:322
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
Definition ilist_node.h:348
Changed
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ SPIR_KERNEL
Used for SPIR kernel functions.
@ BasicBlock
Various leaf nodes.
Definition ISDOpcodes.h:81
FunctionType * getOriginalFunctionType(const Function &F)
static FunctionType * extractFunctionTypeFromMetadata(NamedMDNode *NMD, FunctionType *FTy, StringRef Name)
@ SingleThread
Synchronized with respect to signal handlers executing in the same thread.
Definition LLVMContext.h:55
@ System
Synchronized with respect to all concurrently executing threads.
Definition LLVMContext.h:58
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > extract_or_null(Y &&MD)
Extract a Value from Metadata, allowing null.
Definition Metadata.h:683
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > dyn_extract(Y &&MD)
Extract a Value from Metadata, if any.
Definition Metadata.h:696
This is an optimization pass for GlobalISel generic memory operations.
void buildOpName(Register Target, const StringRef &Name, MachineIRBuilder &MIRBuilder)
bool getVacantFunctionName(Module &M, std::string &Name)
std::string getStringImm(const MachineInstr &MI, unsigned StartIndex)
int64_t getIConstValSext(Register ConstReg, const MachineRegisterInfo *MRI)
bool isTypedPointerWrapper(const TargetExtType *ExtTy)
Definition SPIRVUtils.h:410
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
static void finishBuildOpDecorate(MachineInstrBuilder &MIB, const std::vector< uint32_t > &DecArgs, StringRef StrImm)
bool isTypeFoldingSupported(unsigned Opcode)
static uint32_t convertCharsToWord(const StringRef &Str, unsigned i)
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
MachineInstr * getDef(const MachineOperand &MO, const MachineRegisterInfo *MRI)
void addNumImm(const APInt &Imm, MachineInstrBuilder &MIB)
auto successors(const MachineBasicBlock *BB)
CallInst * buildIntrWithMD(Intrinsic::ID IntrID, ArrayRef< Type * > Types, Value *Arg, Value *Arg2, ArrayRef< Constant * > Imms, IRBuilder<> &B)
bool matchPeeledArrayPattern(const StructType *Ty, Type *&OriginalElementType, uint64_t &TotalSize)
Register createVirtualRegister(SPIRVTypeInst SpvType, SPIRVGlobalRegistry *GR, MachineRegisterInfo *MRI, const MachineFunction &MF)
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition STLExtras.h:2208
unsigned getArrayComponentCount(const MachineRegisterInfo *MRI, const MachineInstr *ResType)
bool sortBlocks(Function &F)
uint64_t getIConstVal(Register ConstReg, const MachineRegisterInfo *MRI)
SmallVector< MachineInstr *, 4 > createContinuedInstructions(MachineIRBuilder &MIRBuilder, unsigned Opcode, unsigned MinWC, unsigned ContinuedOpcode, ArrayRef< Register > Args, Register ReturnRegister, Register TypeID)
SPIRV::MemorySemantics::MemorySemantics getMemSemanticsForStorageClass(SPIRV::StorageClass::StorageClass SC)
MachineBasicBlock::iterator getFirstValidInstructionInsertPoint(MachineBasicBlock &BB)
bool isNestedPointer(const Type *Ty)
Function * getOrCreateBackendServiceFunction(Module &M)
MetadataAsValue * buildMD(Value *Arg)
Definition SPIRVUtils.h:520
std::string getOclOrSpirvBuiltinDemangledName(StringRef Name)
SmallVector< unsigned, 1 > getSpirvLoopControlOperandsFromLoopMetadata(MDNode *LoopMD)
void buildOpDecorate(Register Reg, MachineIRBuilder &MIRBuilder, SPIRV::Decoration::Decoration Dec, const std::vector< uint32_t > &DecArgs, StringRef StrImm)
MachineBasicBlock::iterator getOpVariableMBBIt(MachineInstr &I)
MachineInstr * getImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
std::string getSPIRVStringOperand(const InstType &MI, unsigned StartIndex)
void buildOpMemberDecorate(Register Reg, MachineIRBuilder &MIRBuilder, SPIRV::Decoration::Decoration Dec, uint32_t Member, const std::vector< uint32_t > &DecArgs, StringRef StrImm)
Type * toTypedPointer(Type *Ty)
Definition SPIRVUtils.h:465
DEMANGLE_ABI char * itaniumDemangle(std::string_view mangled_name, bool ParseParams=true)
Returns a non-NULL pointer to a NUL-terminated C style string that should be explicitly freed,...
bool isSpecialOpaqueType(const Type *Ty)
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition Error.cpp:163
void setRegClassType(Register Reg, SPIRVTypeInst SpvType, SPIRVGlobalRegistry *GR, MachineRegisterInfo *MRI, const MachineFunction &MF, bool Force)
MachineBasicBlock::iterator getInsertPtValidEnd(MachineBasicBlock *MBB)
FunctionAddr VTableAddr Count
Definition InstrProf.h:139
static bool isNonMangledOCLBuiltin(StringRef Name)
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
MachineInstr * passCopy(MachineInstr *Def, const MachineRegisterInfo *MRI)
std::optional< SPIRV::LinkageType::LinkageType > getSpirvLinkageTypeFor(const SPIRVSubtarget &ST, const GlobalValue &GV)
bool isEntryPoint(const Function &F)
const std::set< unsigned > & getTypeFoldingSupportedOpcodes()
SPIRV::StorageClass::StorageClass addressSpaceToStorageClass(unsigned AddrSpace, const SPIRVSubtarget &STI)
AtomicOrdering
Atomic ordering for LLVM's memory model.
SPIRV::Scope::Scope getMemScope(LLVMContext &Ctx, SyncScope::ID Id)
static bool isPipeOrAddressSpaceCastBI(const StringRef MangledName)
void buildOpSpirvDecorations(Register Reg, MachineIRBuilder &MIRBuilder, const MDNode *GVarMD, const SPIRVSubtarget &ST)
std::string getStringValueFromReg(Register Reg, MachineRegisterInfo &MRI)
int64_t foldImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
Type * parseBasicTypeName(StringRef &TypeName, LLVMContext &Ctx)
DWARFExpression::Operation Op
MachineInstr * getDefInstrMaybeConstant(Register &ConstReg, const MachineRegisterInfo *MRI)
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
bool hasBuiltinTypePrefix(StringRef Name)
Type * getMDOperandAsType(const MDNode *N, unsigned I)
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1772
auto predecessors(const MachineBasicBlock *BB)
static size_t getPaddedLen(const StringRef &Str)
bool isSpvIntrinsic(const MachineInstr &MI, Intrinsic::ID IntrinsicID)
void addStringImm(const StringRef &Str, MCInst &Inst)
static bool isKernelQueryBI(const StringRef MangledName)
MachineInstr * getVRegDef(MachineRegisterInfo &MRI, Register Reg)
static bool isEnqueueKernelBI(const StringRef MangledName)
Type * reconstitutePeeledArrayType(Type *Ty)
SPIRV::MemorySemantics::MemorySemantics getMemSemantics(AtomicOrdering Ord)
LLVM_ABI MDNode * findOptionMDForLoopID(MDNode *LoopID, StringRef Name)
Find and return the loop attribute node for the attribute Name in LoopID.
#define N