LLVM 22.0.0git
SPIRVEmitIntrinsics.cpp
Go to the documentation of this file.
1//===-- SPIRVEmitIntrinsics.cpp - emit SPIRV intrinsics ---------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// The pass emits SPIRV intrinsics keeping essential high-level information for
10// the translation of LLVM IR to SPIR-V.
11//
12//===----------------------------------------------------------------------===//
13
14#include "SPIRV.h"
15#include "SPIRVBuiltins.h"
16#include "SPIRVSubtarget.h"
17#include "SPIRVTargetMachine.h"
18#include "SPIRVUtils.h"
19#include "llvm/ADT/DenseSet.h"
20#include "llvm/ADT/StringSet.h"
21#include "llvm/IR/IRBuilder.h"
23#include "llvm/IR/InstVisitor.h"
24#include "llvm/IR/IntrinsicsSPIRV.h"
28
29#include <cassert>
30#include <queue>
31#include <unordered_set>
32
33// This pass performs the following transformation on LLVM IR level required
34// for the following translation to SPIR-V:
35// - replaces direct usages of aggregate constants with target-specific
36// intrinsics;
37// - replaces aggregates-related instructions (extract/insert, ld/st, etc)
38// with a target-specific intrinsics;
39// - emits intrinsics for the global variable initializers since IRTranslator
40// doesn't handle them and it's not very convenient to translate them
41// ourselves;
42// - emits intrinsics to keep track of the string names assigned to the values;
43// - emits intrinsics to keep track of constants (this is necessary to have an
44// LLVM IR constant after the IRTranslation is completed) for their further
45// deduplication;
46// - emits intrinsics to keep track of original LLVM types of the values
47// to be able to emit proper SPIR-V types eventually.
48//
49// TODO: consider removing spv.track.constant in favor of spv.assign.type.
50
51using namespace llvm;
52
53namespace llvm::SPIRV {
54#define GET_BuiltinGroup_DECL
55#include "SPIRVGenTables.inc"
56} // namespace llvm::SPIRV
57
58namespace {
59
60class SPIRVEmitIntrinsics
61 : public ModulePass,
62 public InstVisitor<SPIRVEmitIntrinsics, Instruction *> {
63 SPIRVTargetMachine *TM = nullptr;
64 SPIRVGlobalRegistry *GR = nullptr;
65 Function *CurrF = nullptr;
66 bool TrackConstants = true;
67 bool HaveFunPtrs = false;
68 DenseMap<Instruction *, Constant *> AggrConsts;
69 DenseMap<Instruction *, Type *> AggrConstTypes;
70 DenseSet<Instruction *> AggrStores;
71 std::unordered_set<Value *> Named;
72
73 // map of function declarations to <pointer arg index => element type>
74 DenseMap<Function *, SmallVector<std::pair<unsigned, Type *>>> FDeclPtrTys;
75
76 // a register of Instructions that don't have a complete type definition
77 bool CanTodoType = true;
78 unsigned TodoTypeSz = 0;
79 DenseMap<Value *, bool> TodoType;
80 void insertTodoType(Value *Op) {
81 // TODO: add isa<CallInst>(Op) to no-insert
82 if (CanTodoType && !isa<GetElementPtrInst>(Op)) {
83 auto It = TodoType.try_emplace(Op, true);
84 if (It.second)
85 ++TodoTypeSz;
86 }
87 }
88 void eraseTodoType(Value *Op) {
89 auto It = TodoType.find(Op);
90 if (It != TodoType.end() && It->second) {
91 It->second = false;
92 --TodoTypeSz;
93 }
94 }
95 bool isTodoType(Value *Op) {
97 return false;
98 auto It = TodoType.find(Op);
99 return It != TodoType.end() && It->second;
100 }
101 // a register of Instructions that were visited by deduceOperandElementType()
102 // to validate operand types with an instruction
103 std::unordered_set<Instruction *> TypeValidated;
104
105 // well known result types of builtins
106 enum WellKnownTypes { Event };
107
108 // deduce element type of untyped pointers
109 Type *deduceElementType(Value *I, bool UnknownElemTypeI8);
110 Type *deduceElementTypeHelper(Value *I, bool UnknownElemTypeI8);
111 Type *deduceElementTypeHelper(Value *I, std::unordered_set<Value *> &Visited,
112 bool UnknownElemTypeI8,
113 bool IgnoreKnownType = false);
114 Type *deduceElementTypeByValueDeep(Type *ValueTy, Value *Operand,
115 bool UnknownElemTypeI8);
116 Type *deduceElementTypeByValueDeep(Type *ValueTy, Value *Operand,
117 std::unordered_set<Value *> &Visited,
118 bool UnknownElemTypeI8);
119 Type *deduceElementTypeByUsersDeep(Value *Op,
120 std::unordered_set<Value *> &Visited,
121 bool UnknownElemTypeI8);
122 void maybeAssignPtrType(Type *&Ty, Value *I, Type *RefTy,
123 bool UnknownElemTypeI8);
124
125 // deduce nested types of composites
126 Type *deduceNestedTypeHelper(User *U, bool UnknownElemTypeI8);
127 Type *deduceNestedTypeHelper(User *U, Type *Ty,
128 std::unordered_set<Value *> &Visited,
129 bool UnknownElemTypeI8);
130
131 // deduce Types of operands of the Instruction if possible
132 void deduceOperandElementType(Instruction *I,
133 SmallPtrSet<Instruction *, 4> *IncompleteRets,
134 const SmallPtrSet<Value *, 4> *AskOps = nullptr,
135 bool IsPostprocessing = false);
136
137 void preprocessCompositeConstants(IRBuilder<> &B);
138 void preprocessUndefs(IRBuilder<> &B);
139
140 Type *reconstructType(Value *Op, bool UnknownElemTypeI8,
141 bool IsPostprocessing);
142
143 void replaceMemInstrUses(Instruction *Old, Instruction *New, IRBuilder<> &B);
144 void processInstrAfterVisit(Instruction *I, IRBuilder<> &B);
145 bool insertAssignPtrTypeIntrs(Instruction *I, IRBuilder<> &B,
146 bool UnknownElemTypeI8);
147 void insertAssignTypeIntrs(Instruction *I, IRBuilder<> &B);
148 void insertAssignPtrTypeTargetExt(TargetExtType *AssignedType, Value *V,
149 IRBuilder<> &B);
150 void replacePointerOperandWithPtrCast(Instruction *I, Value *Pointer,
151 Type *ExpectedElementType,
152 unsigned OperandToReplace,
153 IRBuilder<> &B);
154 void insertPtrCastOrAssignTypeInstr(Instruction *I, IRBuilder<> &B);
155 bool shouldTryToAddMemAliasingDecoration(Instruction *Inst);
157 void insertConstantsForFPFastMathDefault(Module &M);
158 void processGlobalValue(GlobalVariable &GV, IRBuilder<> &B);
159 void processParamTypes(Function *F, IRBuilder<> &B);
160 void processParamTypesByFunHeader(Function *F, IRBuilder<> &B);
161 Type *deduceFunParamElementType(Function *F, unsigned OpIdx);
162 Type *deduceFunParamElementType(Function *F, unsigned OpIdx,
163 std::unordered_set<Function *> &FVisited);
164
165 bool deduceOperandElementTypeCalledFunction(
166 CallInst *CI, SmallVector<std::pair<Value *, unsigned>> &Ops,
167 Type *&KnownElemTy, bool &Incomplete);
168 void deduceOperandElementTypeFunctionPointer(
169 CallInst *CI, SmallVector<std::pair<Value *, unsigned>> &Ops,
170 Type *&KnownElemTy, bool IsPostprocessing);
171 bool deduceOperandElementTypeFunctionRet(
172 Instruction *I, SmallPtrSet<Instruction *, 4> *IncompleteRets,
173 const SmallPtrSet<Value *, 4> *AskOps, bool IsPostprocessing,
174 Type *&KnownElemTy, Value *Op, Function *F);
175
176 CallInst *buildSpvPtrcast(Function *F, Value *Op, Type *ElemTy);
177 void replaceUsesOfWithSpvPtrcast(Value *Op, Type *ElemTy, Instruction *I,
178 DenseMap<Function *, CallInst *> Ptrcasts);
179 void propagateElemType(Value *Op, Type *ElemTy,
180 DenseSet<std::pair<Value *, Value *>> &VisitedSubst);
181 void
182 propagateElemTypeRec(Value *Op, Type *PtrElemTy, Type *CastElemTy,
183 DenseSet<std::pair<Value *, Value *>> &VisitedSubst);
184 void propagateElemTypeRec(Value *Op, Type *PtrElemTy, Type *CastElemTy,
185 DenseSet<std::pair<Value *, Value *>> &VisitedSubst,
186 std::unordered_set<Value *> &Visited,
187 DenseMap<Function *, CallInst *> Ptrcasts);
188
189 void replaceAllUsesWith(Value *Src, Value *Dest, bool DeleteOld = true);
190 void replaceAllUsesWithAndErase(IRBuilder<> &B, Instruction *Src,
191 Instruction *Dest, bool DeleteOld = true);
192
193 void applyDemangledPtrArgTypes(IRBuilder<> &B);
194
195 GetElementPtrInst *simplifyZeroLengthArrayGepInst(GetElementPtrInst *GEP);
196
197 bool runOnFunction(Function &F);
198 bool postprocessTypes(Module &M);
199 bool processFunctionPointers(Module &M);
200 void parseFunDeclarations(Module &M);
201
202 void useRoundingMode(ConstrainedFPIntrinsic *FPI, IRBuilder<> &B);
203
204 // Tries to walk the type accessed by the given GEP instruction.
205 // For each nested type access, one of the 2 callbacks is called:
206 // - OnLiteralIndexing when the index is a known constant value.
207 // Parameters:
208 // PointedType: the pointed type resulting of this indexing.
209 // If the parent type is an array, this is the index in the array.
210 // If the parent type is a struct, this is the field index.
211 // Index: index of the element in the parent type.
212 // - OnDynamnicIndexing when the index is a non-constant value.
213 // This callback is only called when indexing into an array.
214 // Parameters:
215 // ElementType: the type of the elements stored in the parent array.
216 // Offset: the Value* containing the byte offset into the array.
217 // Return true if an error occured during the walk, false otherwise.
218 bool walkLogicalAccessChain(
219 GetElementPtrInst &GEP,
220 const std::function<void(Type *PointedType, uint64_t Index)>
221 &OnLiteralIndexing,
222 const std::function<void(Type *ElementType, Value *Offset)>
223 &OnDynamicIndexing);
224
225 // Returns the type accessed using the given GEP instruction by relying
226 // on the GEP type.
227 // FIXME: GEP types are not supposed to be used to retrieve the pointed
228 // type. This must be fixed.
229 Type *getGEPType(GetElementPtrInst *GEP);
230
231 // Returns the type accessed using the given GEP instruction by walking
232 // the source type using the GEP indices.
233 // FIXME: without help from the frontend, this method cannot reliably retrieve
234 // the stored type, nor can robustly determine the depth of the type
235 // we are accessing.
236 Type *getGEPTypeLogical(GetElementPtrInst *GEP);
237
238 Instruction *buildLogicalAccessChainFromGEP(GetElementPtrInst &GEP);
239
240public:
241 static char ID;
242 SPIRVEmitIntrinsics(SPIRVTargetMachine *TM = nullptr)
243 : ModulePass(ID), TM(TM) {}
244 Instruction *visitInstruction(Instruction &I) { return &I; }
245 Instruction *visitSwitchInst(SwitchInst &I);
246 Instruction *visitGetElementPtrInst(GetElementPtrInst &I);
247 Instruction *visitBitCastInst(BitCastInst &I);
248 Instruction *visitInsertElementInst(InsertElementInst &I);
249 Instruction *visitExtractElementInst(ExtractElementInst &I);
250 Instruction *visitInsertValueInst(InsertValueInst &I);
251 Instruction *visitExtractValueInst(ExtractValueInst &I);
252 Instruction *visitLoadInst(LoadInst &I);
253 Instruction *visitStoreInst(StoreInst &I);
254 Instruction *visitAllocaInst(AllocaInst &I);
255 Instruction *visitAtomicCmpXchgInst(AtomicCmpXchgInst &I);
256 Instruction *visitUnreachableInst(UnreachableInst &I);
257 Instruction *visitCallInst(CallInst &I);
258
259 StringRef getPassName() const override { return "SPIRV emit intrinsics"; }
260
261 bool runOnModule(Module &M) override;
262
263 void getAnalysisUsage(AnalysisUsage &AU) const override {
264 ModulePass::getAnalysisUsage(AU);
265 }
266};
267
268bool isConvergenceIntrinsic(const Instruction *I) {
269 const auto *II = dyn_cast<IntrinsicInst>(I);
270 if (!II)
271 return false;
272
273 return II->getIntrinsicID() == Intrinsic::experimental_convergence_entry ||
274 II->getIntrinsicID() == Intrinsic::experimental_convergence_loop ||
275 II->getIntrinsicID() == Intrinsic::experimental_convergence_anchor;
276}
277
278bool expectIgnoredInIRTranslation(const Instruction *I) {
279 const auto *II = dyn_cast<IntrinsicInst>(I);
280 if (!II)
281 return false;
282 switch (II->getIntrinsicID()) {
283 case Intrinsic::invariant_start:
284 case Intrinsic::spv_resource_handlefrombinding:
285 case Intrinsic::spv_resource_getpointer:
286 return true;
287 default:
288 return false;
289 }
290}
291
292// Returns the source pointer from `I` ignoring intermediate ptrcast.
293Value *getPointerRoot(Value *I) {
294 if (auto *II = dyn_cast<IntrinsicInst>(I)) {
295 if (II->getIntrinsicID() == Intrinsic::spv_ptrcast) {
296 Value *V = II->getArgOperand(0);
297 return getPointerRoot(V);
298 }
299 }
300 return I;
301}
302
303} // namespace
304
305char SPIRVEmitIntrinsics::ID = 0;
306
307INITIALIZE_PASS(SPIRVEmitIntrinsics, "emit-intrinsics", "SPIRV emit intrinsics",
308 false, false)
309
310static inline bool isAssignTypeInstr(const Instruction *I) {
311 return isa<IntrinsicInst>(I) &&
312 cast<IntrinsicInst>(I)->getIntrinsicID() == Intrinsic::spv_assign_type;
313}
314
319
320static bool isAggrConstForceInt32(const Value *V) {
321 return isa<ConstantArray>(V) || isa<ConstantStruct>(V) ||
323 (isa<ConstantAggregateZero>(V) && !V->getType()->isVectorTy());
324}
325
327 if (isa<PHINode>(I))
328 B.SetInsertPoint(I->getParent()->getFirstNonPHIOrDbgOrAlloca());
329 else
330 B.SetInsertPoint(I);
331}
332
334 B.SetCurrentDebugLocation(I->getDebugLoc());
335 if (I->getType()->isVoidTy())
336 B.SetInsertPoint(I->getNextNode());
337 else
338 B.SetInsertPoint(*I->getInsertionPointAfterDef());
339}
340
342 if (const auto *Intr = dyn_cast<IntrinsicInst>(I)) {
343 switch (Intr->getIntrinsicID()) {
344 case Intrinsic::invariant_start:
345 case Intrinsic::invariant_end:
346 return false;
347 }
348 }
349 return true;
350}
351
352static inline void reportFatalOnTokenType(const Instruction *I) {
353 if (I->getType()->isTokenTy())
354 report_fatal_error("A token is encountered but SPIR-V without extensions "
355 "does not support token type",
356 false);
357}
358
360 if (!I->hasName() || I->getType()->isAggregateType() ||
361 expectIgnoredInIRTranslation(I))
362 return;
365 LLVMContext &Ctx = I->getContext();
366 std::vector<Value *> Args = {
368 Ctx, MDNode::get(Ctx, MDString::get(Ctx, I->getName())))};
369 B.CreateIntrinsic(Intrinsic::spv_assign_name, {I->getType()}, Args);
370}
371
372void SPIRVEmitIntrinsics::replaceAllUsesWith(Value *Src, Value *Dest,
373 bool DeleteOld) {
374 GR->replaceAllUsesWith(Src, Dest, DeleteOld);
375 // Update uncomplete type records if any
376 if (isTodoType(Src)) {
377 if (DeleteOld)
378 eraseTodoType(Src);
379 insertTodoType(Dest);
380 }
381}
382
383void SPIRVEmitIntrinsics::replaceAllUsesWithAndErase(IRBuilder<> &B,
384 Instruction *Src,
385 Instruction *Dest,
386 bool DeleteOld) {
387 replaceAllUsesWith(Src, Dest, DeleteOld);
388 std::string Name = Src->hasName() ? Src->getName().str() : "";
389 Src->eraseFromParent();
390 if (!Name.empty()) {
391 Dest->setName(Name);
392 if (Named.insert(Dest).second)
393 emitAssignName(Dest, B);
394 }
395}
396
398 return SI && F->getCallingConv() == CallingConv::SPIR_KERNEL &&
399 isPointerTy(SI->getValueOperand()->getType()) &&
400 isa<Argument>(SI->getValueOperand());
401}
402
403// Maybe restore original function return type.
405 Type *Ty) {
407 if (!CI || CI->isIndirectCall() || CI->isInlineAsm() ||
409 return Ty;
410 if (Type *OriginalTy = GR->findMutated(CI->getCalledFunction()))
411 return OriginalTy;
412 return Ty;
413}
414
415// Reconstruct type with nested element types according to deduced type info.
416// Return nullptr if no detailed type info is available.
417Type *SPIRVEmitIntrinsics::reconstructType(Value *Op, bool UnknownElemTypeI8,
418 bool IsPostprocessing) {
419 Type *Ty = Op->getType();
420 if (auto *OpI = dyn_cast<Instruction>(Op))
421 Ty = restoreMutatedType(GR, OpI, Ty);
422 if (!isUntypedPointerTy(Ty))
423 return Ty;
424 // try to find the pointee type
425 if (Type *NestedTy = GR->findDeducedElementType(Op))
427 // not a pointer according to the type info (e.g., Event object)
428 CallInst *CI = GR->findAssignPtrTypeInstr(Op);
429 if (CI) {
430 MetadataAsValue *MD = cast<MetadataAsValue>(CI->getArgOperand(1));
431 return cast<ConstantAsMetadata>(MD->getMetadata())->getType();
432 }
433 if (UnknownElemTypeI8) {
434 if (!IsPostprocessing)
435 insertTodoType(Op);
436 return getTypedPointerWrapper(IntegerType::getInt8Ty(Op->getContext()),
438 }
439 return nullptr;
440}
441
442CallInst *SPIRVEmitIntrinsics::buildSpvPtrcast(Function *F, Value *Op,
443 Type *ElemTy) {
444 IRBuilder<> B(Op->getContext());
445 if (auto *OpI = dyn_cast<Instruction>(Op)) {
446 // spv_ptrcast's argument Op denotes an instruction that generates
447 // a value, and we may use getInsertionPointAfterDef()
449 } else if (auto *OpA = dyn_cast<Argument>(Op)) {
450 B.SetInsertPointPastAllocas(OpA->getParent());
451 B.SetCurrentDebugLocation(DebugLoc());
452 } else {
453 B.SetInsertPoint(F->getEntryBlock().getFirstNonPHIOrDbgOrAlloca());
454 }
455 Type *OpTy = Op->getType();
456 SmallVector<Type *, 2> Types = {OpTy, OpTy};
457 SmallVector<Value *, 2> Args = {Op, buildMD(getNormalizedPoisonValue(ElemTy)),
458 B.getInt32(getPointerAddressSpace(OpTy))};
459 CallInst *PtrCasted =
460 B.CreateIntrinsic(Intrinsic::spv_ptrcast, {Types}, Args);
461 GR->buildAssignPtr(B, ElemTy, PtrCasted);
462 return PtrCasted;
463}
464
465void SPIRVEmitIntrinsics::replaceUsesOfWithSpvPtrcast(
466 Value *Op, Type *ElemTy, Instruction *I,
467 DenseMap<Function *, CallInst *> Ptrcasts) {
468 Function *F = I->getParent()->getParent();
469 CallInst *PtrCastedI = nullptr;
470 auto It = Ptrcasts.find(F);
471 if (It == Ptrcasts.end()) {
472 PtrCastedI = buildSpvPtrcast(F, Op, ElemTy);
473 Ptrcasts[F] = PtrCastedI;
474 } else {
475 PtrCastedI = It->second;
476 }
477 I->replaceUsesOfWith(Op, PtrCastedI);
478}
479
480void SPIRVEmitIntrinsics::propagateElemType(
481 Value *Op, Type *ElemTy,
482 DenseSet<std::pair<Value *, Value *>> &VisitedSubst) {
483 DenseMap<Function *, CallInst *> Ptrcasts;
484 SmallVector<User *> Users(Op->users());
485 for (auto *U : Users) {
486 if (!isa<Instruction>(U) || isSpvIntrinsic(U))
487 continue;
488 if (!VisitedSubst.insert(std::make_pair(U, Op)).second)
489 continue;
491 // If the instruction was validated already, we need to keep it valid by
492 // keeping current Op type.
493 if (isa<GetElementPtrInst>(UI) ||
494 TypeValidated.find(UI) != TypeValidated.end())
495 replaceUsesOfWithSpvPtrcast(Op, ElemTy, UI, Ptrcasts);
496 }
497}
498
499void SPIRVEmitIntrinsics::propagateElemTypeRec(
500 Value *Op, Type *PtrElemTy, Type *CastElemTy,
501 DenseSet<std::pair<Value *, Value *>> &VisitedSubst) {
502 std::unordered_set<Value *> Visited;
503 DenseMap<Function *, CallInst *> Ptrcasts;
504 propagateElemTypeRec(Op, PtrElemTy, CastElemTy, VisitedSubst, Visited,
505 std::move(Ptrcasts));
506}
507
508void SPIRVEmitIntrinsics::propagateElemTypeRec(
509 Value *Op, Type *PtrElemTy, Type *CastElemTy,
510 DenseSet<std::pair<Value *, Value *>> &VisitedSubst,
511 std::unordered_set<Value *> &Visited,
512 DenseMap<Function *, CallInst *> Ptrcasts) {
513 if (!Visited.insert(Op).second)
514 return;
515 SmallVector<User *> Users(Op->users());
516 for (auto *U : Users) {
517 if (!isa<Instruction>(U) || isSpvIntrinsic(U))
518 continue;
519 if (!VisitedSubst.insert(std::make_pair(U, Op)).second)
520 continue;
522 // If the instruction was validated already, we need to keep it valid by
523 // keeping current Op type.
524 if (isa<GetElementPtrInst>(UI) ||
525 TypeValidated.find(UI) != TypeValidated.end())
526 replaceUsesOfWithSpvPtrcast(Op, CastElemTy, UI, Ptrcasts);
527 }
528}
529
530// Set element pointer type to the given value of ValueTy and tries to
531// specify this type further (recursively) by Operand value, if needed.
532
533Type *
534SPIRVEmitIntrinsics::deduceElementTypeByValueDeep(Type *ValueTy, Value *Operand,
535 bool UnknownElemTypeI8) {
536 std::unordered_set<Value *> Visited;
537 return deduceElementTypeByValueDeep(ValueTy, Operand, Visited,
538 UnknownElemTypeI8);
539}
540
541Type *SPIRVEmitIntrinsics::deduceElementTypeByValueDeep(
542 Type *ValueTy, Value *Operand, std::unordered_set<Value *> &Visited,
543 bool UnknownElemTypeI8) {
544 Type *Ty = ValueTy;
545 if (Operand) {
546 if (auto *PtrTy = dyn_cast<PointerType>(Ty)) {
547 if (Type *NestedTy =
548 deduceElementTypeHelper(Operand, Visited, UnknownElemTypeI8))
549 Ty = getTypedPointerWrapper(NestedTy, PtrTy->getAddressSpace());
550 } else {
551 Ty = deduceNestedTypeHelper(dyn_cast<User>(Operand), Ty, Visited,
552 UnknownElemTypeI8);
553 }
554 }
555 return Ty;
556}
557
558// Traverse User instructions to deduce an element pointer type of the operand.
559Type *SPIRVEmitIntrinsics::deduceElementTypeByUsersDeep(
560 Value *Op, std::unordered_set<Value *> &Visited, bool UnknownElemTypeI8) {
561 if (!Op || !isPointerTy(Op->getType()) || isa<ConstantPointerNull>(Op) ||
563 return nullptr;
564
565 if (auto ElemTy = getPointeeType(Op->getType()))
566 return ElemTy;
567
568 // maybe we already know operand's element type
569 if (Type *KnownTy = GR->findDeducedElementType(Op))
570 return KnownTy;
571
572 for (User *OpU : Op->users()) {
573 if (Instruction *Inst = dyn_cast<Instruction>(OpU)) {
574 if (Type *Ty = deduceElementTypeHelper(Inst, Visited, UnknownElemTypeI8))
575 return Ty;
576 }
577 }
578 return nullptr;
579}
580
581// Implements what we know in advance about intrinsics and builtin calls
582// TODO: consider feasibility of this particular case to be generalized by
583// encoding knowledge about intrinsics and builtin calls by corresponding
584// specification rules
586 Function *CalledF, unsigned OpIdx) {
587 if ((DemangledName.starts_with("__spirv_ocl_printf(") ||
588 DemangledName.starts_with("printf(")) &&
589 OpIdx == 0)
590 return IntegerType::getInt8Ty(CalledF->getContext());
591 return nullptr;
592}
593
594// Deduce and return a successfully deduced Type of the Instruction,
595// or nullptr otherwise.
596Type *SPIRVEmitIntrinsics::deduceElementTypeHelper(Value *I,
597 bool UnknownElemTypeI8) {
598 std::unordered_set<Value *> Visited;
599 return deduceElementTypeHelper(I, Visited, UnknownElemTypeI8);
600}
601
602void SPIRVEmitIntrinsics::maybeAssignPtrType(Type *&Ty, Value *Op, Type *RefTy,
603 bool UnknownElemTypeI8) {
604 if (isUntypedPointerTy(RefTy)) {
605 if (!UnknownElemTypeI8)
606 return;
607 insertTodoType(Op);
608 }
609 Ty = RefTy;
610}
611
612bool SPIRVEmitIntrinsics::walkLogicalAccessChain(
613 GetElementPtrInst &GEP,
614 const std::function<void(Type *, uint64_t)> &OnLiteralIndexing,
615 const std::function<void(Type *, Value *)> &OnDynamicIndexing) {
616 // We only rewrite i8* GEP. Other should be left as-is.
617 // Valid i8* GEP must always have a single index.
618 assert(GEP.getSourceElementType() ==
619 IntegerType::getInt8Ty(CurrF->getContext()));
620 assert(GEP.getNumIndices() == 1);
621
622 auto &DL = CurrF->getDataLayout();
623 Value *Src = getPointerRoot(GEP.getPointerOperand());
624 Type *CurType = deduceElementType(Src, true);
625
626 Value *Operand = *GEP.idx_begin();
627 ConstantInt *CI = dyn_cast<ConstantInt>(Operand);
628 if (!CI) {
629 ArrayType *AT = dyn_cast<ArrayType>(CurType);
630 // Operand is not constant. Either we have an array and accept it, or we
631 // give up.
632 if (AT)
633 OnDynamicIndexing(AT->getElementType(), Operand);
634 return AT == nullptr;
635 }
636
637 assert(CI);
638 uint64_t Offset = CI->getZExtValue();
639
640 do {
641 if (ArrayType *AT = dyn_cast<ArrayType>(CurType)) {
642 uint32_t EltTypeSize = DL.getTypeSizeInBits(AT->getElementType()) / 8;
643 assert(Offset < AT->getNumElements() * EltTypeSize);
644 uint64_t Index = Offset / EltTypeSize;
645 Offset = Offset - (Index * EltTypeSize);
646 CurType = AT->getElementType();
647 OnLiteralIndexing(CurType, Index);
648 } else if (StructType *ST = dyn_cast<StructType>(CurType)) {
649 uint32_t StructSize = DL.getTypeSizeInBits(ST) / 8;
650 assert(Offset < StructSize);
651 (void)StructSize;
652 const auto &STL = DL.getStructLayout(ST);
653 unsigned Element = STL->getElementContainingOffset(Offset);
654 Offset -= STL->getElementOffset(Element);
655 CurType = ST->getElementType(Element);
656 OnLiteralIndexing(CurType, Element);
657 } else {
658 // Vector type indexing should not use GEP.
659 // So if we have an index left, something is wrong. Giving up.
660 return true;
661 }
662 } while (Offset > 0);
663
664 return false;
665}
666
668SPIRVEmitIntrinsics::buildLogicalAccessChainFromGEP(GetElementPtrInst &GEP) {
669 auto &DL = CurrF->getDataLayout();
670 IRBuilder<> B(GEP.getParent());
671 B.SetInsertPoint(&GEP);
672
673 std::vector<Value *> Indices;
674 Indices.push_back(ConstantInt::get(
675 IntegerType::getInt32Ty(CurrF->getContext()), 0, /* Signed= */ false));
676 walkLogicalAccessChain(
677 GEP,
678 [&Indices, &B](Type *EltType, uint64_t Index) {
679 Indices.push_back(
680 ConstantInt::get(B.getInt64Ty(), Index, /* Signed= */ false));
681 },
682 [&Indices, &B, &DL](Type *EltType, Value *Offset) {
683 uint32_t EltTypeSize = DL.getTypeSizeInBits(EltType) / 8;
684 Value *Index = B.CreateUDiv(
685 Offset, ConstantInt::get(Offset->getType(), EltTypeSize,
686 /* Signed= */ false));
687 Indices.push_back(Index);
688 });
689
690 SmallVector<Type *, 2> Types = {GEP.getType(), GEP.getOperand(0)->getType()};
692 Args.push_back(B.getInt1(GEP.isInBounds()));
693 Args.push_back(GEP.getOperand(0));
694 llvm::append_range(Args, Indices);
695 auto *NewI = B.CreateIntrinsic(Intrinsic::spv_gep, {Types}, {Args});
696 replaceAllUsesWithAndErase(B, &GEP, NewI);
697 return NewI;
698}
699
700Type *SPIRVEmitIntrinsics::getGEPTypeLogical(GetElementPtrInst *GEP) {
701
702 Type *CurType = GEP->getResultElementType();
703
704 bool Interrupted = walkLogicalAccessChain(
705 *GEP, [&CurType](Type *EltType, uint64_t Index) { CurType = EltType; },
706 [&CurType](Type *EltType, Value *Index) { CurType = EltType; });
707
708 return Interrupted ? GEP->getResultElementType() : CurType;
709}
710
711Type *SPIRVEmitIntrinsics::getGEPType(GetElementPtrInst *Ref) {
712 if (Ref->getSourceElementType() ==
713 IntegerType::getInt8Ty(CurrF->getContext()) &&
715 return getGEPTypeLogical(Ref);
716 }
717
718 Type *Ty = nullptr;
719 // TODO: not sure if GetElementPtrInst::getTypeAtIndex() does anything
720 // useful here
721 if (isNestedPointer(Ref->getSourceElementType())) {
722 Ty = Ref->getSourceElementType();
723 for (Use &U : drop_begin(Ref->indices()))
724 Ty = GetElementPtrInst::getTypeAtIndex(Ty, U.get());
725 } else {
726 Ty = Ref->getResultElementType();
727 }
728 return Ty;
729}
730
731Type *SPIRVEmitIntrinsics::deduceElementTypeHelper(
732 Value *I, std::unordered_set<Value *> &Visited, bool UnknownElemTypeI8,
733 bool IgnoreKnownType) {
734 // allow to pass nullptr as an argument
735 if (!I)
736 return nullptr;
737
738 // maybe already known
739 if (!IgnoreKnownType)
740 if (Type *KnownTy = GR->findDeducedElementType(I))
741 return KnownTy;
742
743 // maybe a cycle
744 if (!Visited.insert(I).second)
745 return nullptr;
746
747 // fallback value in case when we fail to deduce a type
748 Type *Ty = nullptr;
749 // look for known basic patterns of type inference
750 if (auto *Ref = dyn_cast<AllocaInst>(I)) {
751 maybeAssignPtrType(Ty, I, Ref->getAllocatedType(), UnknownElemTypeI8);
752 } else if (auto *Ref = dyn_cast<GetElementPtrInst>(I)) {
753 Ty = getGEPType(Ref);
754 } else if (auto *Ref = dyn_cast<LoadInst>(I)) {
755 Value *Op = Ref->getPointerOperand();
756 Type *KnownTy = GR->findDeducedElementType(Op);
757 if (!KnownTy)
758 KnownTy = Op->getType();
759 if (Type *ElemTy = getPointeeType(KnownTy))
760 maybeAssignPtrType(Ty, I, ElemTy, UnknownElemTypeI8);
761 } else if (auto *Ref = dyn_cast<GlobalValue>(I)) {
762 Ty = deduceElementTypeByValueDeep(
763 Ref->getValueType(),
764 Ref->getNumOperands() > 0 ? Ref->getOperand(0) : nullptr, Visited,
765 UnknownElemTypeI8);
766 } else if (auto *Ref = dyn_cast<AddrSpaceCastInst>(I)) {
767 Type *RefTy = deduceElementTypeHelper(Ref->getPointerOperand(), Visited,
768 UnknownElemTypeI8);
769 maybeAssignPtrType(Ty, I, RefTy, UnknownElemTypeI8);
770 } else if (auto *Ref = dyn_cast<IntToPtrInst>(I)) {
771 maybeAssignPtrType(Ty, I, Ref->getDestTy(), UnknownElemTypeI8);
772 } else if (auto *Ref = dyn_cast<BitCastInst>(I)) {
773 if (Type *Src = Ref->getSrcTy(), *Dest = Ref->getDestTy();
774 isPointerTy(Src) && isPointerTy(Dest))
775 Ty = deduceElementTypeHelper(Ref->getOperand(0), Visited,
776 UnknownElemTypeI8);
777 } else if (auto *Ref = dyn_cast<AtomicCmpXchgInst>(I)) {
778 Value *Op = Ref->getNewValOperand();
779 if (isPointerTy(Op->getType()))
780 Ty = deduceElementTypeHelper(Op, Visited, UnknownElemTypeI8);
781 } else if (auto *Ref = dyn_cast<AtomicRMWInst>(I)) {
782 Value *Op = Ref->getValOperand();
783 if (isPointerTy(Op->getType()))
784 Ty = deduceElementTypeHelper(Op, Visited, UnknownElemTypeI8);
785 } else if (auto *Ref = dyn_cast<PHINode>(I)) {
786 Type *BestTy = nullptr;
787 unsigned MaxN = 1;
788 DenseMap<Type *, unsigned> PhiTys;
789 for (int i = Ref->getNumIncomingValues() - 1; i >= 0; --i) {
790 Ty = deduceElementTypeByUsersDeep(Ref->getIncomingValue(i), Visited,
791 UnknownElemTypeI8);
792 if (!Ty)
793 continue;
794 auto It = PhiTys.try_emplace(Ty, 1);
795 if (!It.second) {
796 ++It.first->second;
797 if (It.first->second > MaxN) {
798 MaxN = It.first->second;
799 BestTy = Ty;
800 }
801 }
802 }
803 if (BestTy)
804 Ty = BestTy;
805 } else if (auto *Ref = dyn_cast<SelectInst>(I)) {
806 for (Value *Op : {Ref->getTrueValue(), Ref->getFalseValue()}) {
807 Ty = deduceElementTypeByUsersDeep(Op, Visited, UnknownElemTypeI8);
808 if (Ty)
809 break;
810 }
811 } else if (auto *CI = dyn_cast<CallInst>(I)) {
812 static StringMap<unsigned> ResTypeByArg = {
813 {"to_global", 0},
814 {"to_local", 0},
815 {"to_private", 0},
816 {"__spirv_GenericCastToPtr_ToGlobal", 0},
817 {"__spirv_GenericCastToPtr_ToLocal", 0},
818 {"__spirv_GenericCastToPtr_ToPrivate", 0},
819 {"__spirv_GenericCastToPtrExplicit_ToGlobal", 0},
820 {"__spirv_GenericCastToPtrExplicit_ToLocal", 0},
821 {"__spirv_GenericCastToPtrExplicit_ToPrivate", 0}};
822 // TODO: maybe improve performance by caching demangled names
823
825 if (II && II->getIntrinsicID() == Intrinsic::spv_resource_getpointer) {
826 auto *HandleType = cast<TargetExtType>(II->getOperand(0)->getType());
827 if (HandleType->getTargetExtName() == "spirv.Image" ||
828 HandleType->getTargetExtName() == "spirv.SignedImage") {
829 for (User *U : II->users()) {
830 Ty = cast<Instruction>(U)->getAccessType();
831 if (Ty)
832 break;
833 }
834 } else if (HandleType->getTargetExtName() == "spirv.VulkanBuffer") {
835 // This call is supposed to index into an array
836 Ty = HandleType->getTypeParameter(0);
837 if (Ty->isArrayTy())
838 Ty = Ty->getArrayElementType();
839 else {
840 TargetExtType *BufferTy = cast<TargetExtType>(Ty);
841 assert(BufferTy->getTargetExtName() == "spirv.Layout");
842 Ty = BufferTy->getTypeParameter(0);
843 assert(Ty && Ty->isStructTy());
844 uint32_t Index = cast<ConstantInt>(II->getOperand(1))->getZExtValue();
845 Ty = cast<StructType>(Ty)->getElementType(Index);
846 }
847 } else {
848 llvm_unreachable("Unknown handle type for spv_resource_getpointer.");
849 }
850 } else if (II && II->getIntrinsicID() ==
851 Intrinsic::spv_generic_cast_to_ptr_explicit) {
852 Ty = deduceElementTypeHelper(CI->getArgOperand(0), Visited,
853 UnknownElemTypeI8);
854 } else if (Function *CalledF = CI->getCalledFunction()) {
855 std::string DemangledName =
856 getOclOrSpirvBuiltinDemangledName(CalledF->getName());
857 if (DemangledName.length() > 0)
858 DemangledName = SPIRV::lookupBuiltinNameHelper(DemangledName);
859 auto AsArgIt = ResTypeByArg.find(DemangledName);
860 if (AsArgIt != ResTypeByArg.end())
861 Ty = deduceElementTypeHelper(CI->getArgOperand(AsArgIt->second),
862 Visited, UnknownElemTypeI8);
863 else if (Type *KnownRetTy = GR->findDeducedElementType(CalledF))
864 Ty = KnownRetTy;
865 }
866 }
867
868 // remember the found relationship
869 if (Ty && !IgnoreKnownType) {
870 // specify nested types if needed, otherwise return unchanged
872 }
873
874 return Ty;
875}
876
877// Re-create a type of the value if it has untyped pointer fields, also nested.
878// Return the original value type if no corrections of untyped pointer
879// information is found or needed.
880Type *SPIRVEmitIntrinsics::deduceNestedTypeHelper(User *U,
881 bool UnknownElemTypeI8) {
882 std::unordered_set<Value *> Visited;
883 return deduceNestedTypeHelper(U, U->getType(), Visited, UnknownElemTypeI8);
884}
885
886Type *SPIRVEmitIntrinsics::deduceNestedTypeHelper(
887 User *U, Type *OrigTy, std::unordered_set<Value *> &Visited,
888 bool UnknownElemTypeI8) {
889 if (!U)
890 return OrigTy;
891
892 // maybe already known
893 if (Type *KnownTy = GR->findDeducedCompositeType(U))
894 return KnownTy;
895
896 // maybe a cycle
897 if (!Visited.insert(U).second)
898 return OrigTy;
899
900 if (isa<StructType>(OrigTy)) {
902 bool Change = false;
903 for (unsigned i = 0; i < U->getNumOperands(); ++i) {
904 Value *Op = U->getOperand(i);
905 assert(Op && "Operands should not be null.");
906 Type *OpTy = Op->getType();
907 Type *Ty = OpTy;
908 if (auto *PtrTy = dyn_cast<PointerType>(OpTy)) {
909 if (Type *NestedTy =
910 deduceElementTypeHelper(Op, Visited, UnknownElemTypeI8))
911 Ty = getTypedPointerWrapper(NestedTy, PtrTy->getAddressSpace());
912 } else {
913 Ty = deduceNestedTypeHelper(dyn_cast<User>(Op), OpTy, Visited,
914 UnknownElemTypeI8);
915 }
916 Tys.push_back(Ty);
917 Change |= Ty != OpTy;
918 }
919 if (Change) {
920 Type *NewTy = StructType::create(Tys);
921 GR->addDeducedCompositeType(U, NewTy);
922 return NewTy;
923 }
924 } else if (auto *ArrTy = dyn_cast<ArrayType>(OrigTy)) {
925 if (Value *Op = U->getNumOperands() > 0 ? U->getOperand(0) : nullptr) {
926 Type *OpTy = ArrTy->getElementType();
927 Type *Ty = OpTy;
928 if (auto *PtrTy = dyn_cast<PointerType>(OpTy)) {
929 if (Type *NestedTy =
930 deduceElementTypeHelper(Op, Visited, UnknownElemTypeI8))
931 Ty = getTypedPointerWrapper(NestedTy, PtrTy->getAddressSpace());
932 } else {
933 Ty = deduceNestedTypeHelper(dyn_cast<User>(Op), OpTy, Visited,
934 UnknownElemTypeI8);
935 }
936 if (Ty != OpTy) {
937 Type *NewTy = ArrayType::get(Ty, ArrTy->getNumElements());
938 GR->addDeducedCompositeType(U, NewTy);
939 return NewTy;
940 }
941 }
942 } else if (auto *VecTy = dyn_cast<VectorType>(OrigTy)) {
943 if (Value *Op = U->getNumOperands() > 0 ? U->getOperand(0) : nullptr) {
944 Type *OpTy = VecTy->getElementType();
945 Type *Ty = OpTy;
946 if (auto *PtrTy = dyn_cast<PointerType>(OpTy)) {
947 if (Type *NestedTy =
948 deduceElementTypeHelper(Op, Visited, UnknownElemTypeI8))
949 Ty = getTypedPointerWrapper(NestedTy, PtrTy->getAddressSpace());
950 } else {
951 Ty = deduceNestedTypeHelper(dyn_cast<User>(Op), OpTy, Visited,
952 UnknownElemTypeI8);
953 }
954 if (Ty != OpTy) {
955 Type *NewTy = VectorType::get(Ty, VecTy->getElementCount());
957 return NewTy;
958 }
959 }
960 }
961
962 return OrigTy;
963}
964
965Type *SPIRVEmitIntrinsics::deduceElementType(Value *I, bool UnknownElemTypeI8) {
966 if (Type *Ty = deduceElementTypeHelper(I, UnknownElemTypeI8))
967 return Ty;
968 if (!UnknownElemTypeI8)
969 return nullptr;
970 insertTodoType(I);
971 return IntegerType::getInt8Ty(I->getContext());
972}
973
975 Value *PointerOperand) {
976 Type *PointeeTy = GR->findDeducedElementType(PointerOperand);
977 if (PointeeTy && !isUntypedPointerTy(PointeeTy))
978 return nullptr;
979 auto *PtrTy = dyn_cast<PointerType>(I->getType());
980 if (!PtrTy)
981 return I->getType();
982 if (Type *NestedTy = GR->findDeducedElementType(I))
983 return getTypedPointerWrapper(NestedTy, PtrTy->getAddressSpace());
984 return nullptr;
985}
986
987// Try to deduce element type for a call base. Returns false if this is an
988// indirect function invocation, and true otherwise.
989bool SPIRVEmitIntrinsics::deduceOperandElementTypeCalledFunction(
990 CallInst *CI, SmallVector<std::pair<Value *, unsigned>> &Ops,
991 Type *&KnownElemTy, bool &Incomplete) {
992 Function *CalledF = CI->getCalledFunction();
993 if (!CalledF)
994 return false;
995 std::string DemangledName =
997 if (DemangledName.length() > 0 &&
998 !StringRef(DemangledName).starts_with("llvm.")) {
999 const SPIRVSubtarget &ST = TM->getSubtarget<SPIRVSubtarget>(*CalledF);
1000 auto [Grp, Opcode, ExtNo] = SPIRV::mapBuiltinToOpcode(
1001 DemangledName, ST.getPreferredInstructionSet());
1002 if (Opcode == SPIRV::OpGroupAsyncCopy) {
1003 for (unsigned i = 0, PtrCnt = 0; i < CI->arg_size() && PtrCnt < 2; ++i) {
1004 Value *Op = CI->getArgOperand(i);
1005 if (!isPointerTy(Op->getType()))
1006 continue;
1007 ++PtrCnt;
1008 if (Type *ElemTy = GR->findDeducedElementType(Op))
1009 KnownElemTy = ElemTy; // src will rewrite dest if both are defined
1010 Ops.push_back(std::make_pair(Op, i));
1011 }
1012 } else if (Grp == SPIRV::Atomic || Grp == SPIRV::AtomicFloating) {
1013 if (CI->arg_size() == 0)
1014 return true;
1015 Value *Op = CI->getArgOperand(0);
1016 if (!isPointerTy(Op->getType()))
1017 return true;
1018 switch (Opcode) {
1019 case SPIRV::OpAtomicFAddEXT:
1020 case SPIRV::OpAtomicFMinEXT:
1021 case SPIRV::OpAtomicFMaxEXT:
1022 case SPIRV::OpAtomicLoad:
1023 case SPIRV::OpAtomicCompareExchangeWeak:
1024 case SPIRV::OpAtomicCompareExchange:
1025 case SPIRV::OpAtomicExchange:
1026 case SPIRV::OpAtomicIAdd:
1027 case SPIRV::OpAtomicISub:
1028 case SPIRV::OpAtomicOr:
1029 case SPIRV::OpAtomicXor:
1030 case SPIRV::OpAtomicAnd:
1031 case SPIRV::OpAtomicUMin:
1032 case SPIRV::OpAtomicUMax:
1033 case SPIRV::OpAtomicSMin:
1034 case SPIRV::OpAtomicSMax: {
1035 KnownElemTy = isPointerTy(CI->getType()) ? getAtomicElemTy(GR, CI, Op)
1036 : CI->getType();
1037 if (!KnownElemTy)
1038 return true;
1039 Incomplete = isTodoType(Op);
1040 Ops.push_back(std::make_pair(Op, 0));
1041 } break;
1042 case SPIRV::OpAtomicStore: {
1043 if (CI->arg_size() < 4)
1044 return true;
1045 Value *ValOp = CI->getArgOperand(3);
1046 KnownElemTy = isPointerTy(ValOp->getType())
1047 ? getAtomicElemTy(GR, CI, Op)
1048 : ValOp->getType();
1049 if (!KnownElemTy)
1050 return true;
1051 Incomplete = isTodoType(Op);
1052 Ops.push_back(std::make_pair(Op, 0));
1053 } break;
1054 }
1055 }
1056 }
1057 return true;
1058}
1059
1060// Try to deduce element type for a function pointer.
1061void SPIRVEmitIntrinsics::deduceOperandElementTypeFunctionPointer(
1062 CallInst *CI, SmallVector<std::pair<Value *, unsigned>> &Ops,
1063 Type *&KnownElemTy, bool IsPostprocessing) {
1064 Value *Op = CI->getCalledOperand();
1065 if (!Op || !isPointerTy(Op->getType()))
1066 return;
1067 Ops.push_back(std::make_pair(Op, std::numeric_limits<unsigned>::max()));
1068 FunctionType *FTy = CI->getFunctionType();
1069 bool IsNewFTy = false, IsIncomplete = false;
1071 for (Value *Arg : CI->args()) {
1072 Type *ArgTy = Arg->getType();
1073 if (ArgTy->isPointerTy()) {
1074 if (Type *ElemTy = GR->findDeducedElementType(Arg)) {
1075 IsNewFTy = true;
1076 ArgTy = getTypedPointerWrapper(ElemTy, getPointerAddressSpace(ArgTy));
1077 if (isTodoType(Arg))
1078 IsIncomplete = true;
1079 } else {
1080 IsIncomplete = true;
1081 }
1082 }
1083 ArgTys.push_back(ArgTy);
1084 }
1085 Type *RetTy = FTy->getReturnType();
1086 if (CI->getType()->isPointerTy()) {
1087 if (Type *ElemTy = GR->findDeducedElementType(CI)) {
1088 IsNewFTy = true;
1089 RetTy =
1091 if (isTodoType(CI))
1092 IsIncomplete = true;
1093 } else {
1094 IsIncomplete = true;
1095 }
1096 }
1097 if (!IsPostprocessing && IsIncomplete)
1098 insertTodoType(Op);
1099 KnownElemTy =
1100 IsNewFTy ? FunctionType::get(RetTy, ArgTys, FTy->isVarArg()) : FTy;
1101}
1102
1103bool SPIRVEmitIntrinsics::deduceOperandElementTypeFunctionRet(
1104 Instruction *I, SmallPtrSet<Instruction *, 4> *IncompleteRets,
1105 const SmallPtrSet<Value *, 4> *AskOps, bool IsPostprocessing,
1106 Type *&KnownElemTy, Value *Op, Function *F) {
1107 KnownElemTy = GR->findDeducedElementType(F);
1108 if (KnownElemTy)
1109 return false;
1110 if (Type *OpElemTy = GR->findDeducedElementType(Op)) {
1111 OpElemTy = normalizeType(OpElemTy);
1112 GR->addDeducedElementType(F, OpElemTy);
1113 GR->addReturnType(
1114 F, TypedPointerType::get(OpElemTy,
1115 getPointerAddressSpace(F->getReturnType())));
1116 // non-recursive update of types in function uses
1117 DenseSet<std::pair<Value *, Value *>> VisitedSubst{std::make_pair(I, Op)};
1118 for (User *U : F->users()) {
1119 CallInst *CI = dyn_cast<CallInst>(U);
1120 if (!CI || CI->getCalledFunction() != F)
1121 continue;
1122 if (CallInst *AssignCI = GR->findAssignPtrTypeInstr(CI)) {
1123 if (Type *PrevElemTy = GR->findDeducedElementType(CI)) {
1124 GR->updateAssignType(AssignCI, CI,
1125 getNormalizedPoisonValue(OpElemTy));
1126 propagateElemType(CI, PrevElemTy, VisitedSubst);
1127 }
1128 }
1129 }
1130 // Non-recursive update of types in the function uncomplete returns.
1131 // This may happen just once per a function, the latch is a pair of
1132 // findDeducedElementType(F) / addDeducedElementType(F, ...).
1133 // With or without the latch it is a non-recursive call due to
1134 // IncompleteRets set to nullptr in this call.
1135 if (IncompleteRets)
1136 for (Instruction *IncompleteRetI : *IncompleteRets)
1137 deduceOperandElementType(IncompleteRetI, nullptr, AskOps,
1138 IsPostprocessing);
1139 } else if (IncompleteRets) {
1140 IncompleteRets->insert(I);
1141 }
1142 TypeValidated.insert(I);
1143 return true;
1144}
1145
1146// If the Instruction has Pointer operands with unresolved types, this function
1147// tries to deduce them. If the Instruction has Pointer operands with known
1148// types which differ from expected, this function tries to insert a bitcast to
1149// resolve the issue.
1150void SPIRVEmitIntrinsics::deduceOperandElementType(
1151 Instruction *I, SmallPtrSet<Instruction *, 4> *IncompleteRets,
1152 const SmallPtrSet<Value *, 4> *AskOps, bool IsPostprocessing) {
1154 Type *KnownElemTy = nullptr;
1155 bool Incomplete = false;
1156 // look for known basic patterns of type inference
1157 if (auto *Ref = dyn_cast<PHINode>(I)) {
1158 if (!isPointerTy(I->getType()) ||
1159 !(KnownElemTy = GR->findDeducedElementType(I)))
1160 return;
1161 Incomplete = isTodoType(I);
1162 for (unsigned i = 0; i < Ref->getNumIncomingValues(); i++) {
1163 Value *Op = Ref->getIncomingValue(i);
1164 if (isPointerTy(Op->getType()))
1165 Ops.push_back(std::make_pair(Op, i));
1166 }
1167 } else if (auto *Ref = dyn_cast<AddrSpaceCastInst>(I)) {
1168 KnownElemTy = GR->findDeducedElementType(I);
1169 if (!KnownElemTy)
1170 return;
1171 Incomplete = isTodoType(I);
1172 Ops.push_back(std::make_pair(Ref->getPointerOperand(), 0));
1173 } else if (auto *Ref = dyn_cast<BitCastInst>(I)) {
1174 if (!isPointerTy(I->getType()))
1175 return;
1176 KnownElemTy = GR->findDeducedElementType(I);
1177 if (!KnownElemTy)
1178 return;
1179 Incomplete = isTodoType(I);
1180 Ops.push_back(std::make_pair(Ref->getOperand(0), 0));
1181 } else if (auto *Ref = dyn_cast<GetElementPtrInst>(I)) {
1182 if (GR->findDeducedElementType(Ref->getPointerOperand()))
1183 return;
1184 KnownElemTy = Ref->getSourceElementType();
1185 Ops.push_back(std::make_pair(Ref->getPointerOperand(),
1187 } else if (auto *Ref = dyn_cast<LoadInst>(I)) {
1188 KnownElemTy = I->getType();
1189 if (isUntypedPointerTy(KnownElemTy))
1190 return;
1191 Type *PointeeTy = GR->findDeducedElementType(Ref->getPointerOperand());
1192 if (PointeeTy && !isUntypedPointerTy(PointeeTy))
1193 return;
1194 Ops.push_back(std::make_pair(Ref->getPointerOperand(),
1196 } else if (auto *Ref = dyn_cast<StoreInst>(I)) {
1197 if (!(KnownElemTy =
1198 reconstructType(Ref->getValueOperand(), false, IsPostprocessing)))
1199 return;
1200 Type *PointeeTy = GR->findDeducedElementType(Ref->getPointerOperand());
1201 if (PointeeTy && !isUntypedPointerTy(PointeeTy))
1202 return;
1203 Ops.push_back(std::make_pair(Ref->getPointerOperand(),
1205 } else if (auto *Ref = dyn_cast<AtomicCmpXchgInst>(I)) {
1206 KnownElemTy = isPointerTy(I->getType())
1207 ? getAtomicElemTy(GR, I, Ref->getPointerOperand())
1208 : I->getType();
1209 if (!KnownElemTy)
1210 return;
1211 Incomplete = isTodoType(Ref->getPointerOperand());
1212 Ops.push_back(std::make_pair(Ref->getPointerOperand(),
1214 } else if (auto *Ref = dyn_cast<AtomicRMWInst>(I)) {
1215 KnownElemTy = isPointerTy(I->getType())
1216 ? getAtomicElemTy(GR, I, Ref->getPointerOperand())
1217 : I->getType();
1218 if (!KnownElemTy)
1219 return;
1220 Incomplete = isTodoType(Ref->getPointerOperand());
1221 Ops.push_back(std::make_pair(Ref->getPointerOperand(),
1223 } else if (auto *Ref = dyn_cast<SelectInst>(I)) {
1224 if (!isPointerTy(I->getType()) ||
1225 !(KnownElemTy = GR->findDeducedElementType(I)))
1226 return;
1227 Incomplete = isTodoType(I);
1228 for (unsigned i = 0; i < Ref->getNumOperands(); i++) {
1229 Value *Op = Ref->getOperand(i);
1230 if (isPointerTy(Op->getType()))
1231 Ops.push_back(std::make_pair(Op, i));
1232 }
1233 } else if (auto *Ref = dyn_cast<ReturnInst>(I)) {
1234 if (!isPointerTy(CurrF->getReturnType()))
1235 return;
1236 Value *Op = Ref->getReturnValue();
1237 if (!Op)
1238 return;
1239 if (deduceOperandElementTypeFunctionRet(I, IncompleteRets, AskOps,
1240 IsPostprocessing, KnownElemTy, Op,
1241 CurrF))
1242 return;
1243 Incomplete = isTodoType(CurrF);
1244 Ops.push_back(std::make_pair(Op, 0));
1245 } else if (auto *Ref = dyn_cast<ICmpInst>(I)) {
1246 if (!isPointerTy(Ref->getOperand(0)->getType()))
1247 return;
1248 Value *Op0 = Ref->getOperand(0);
1249 Value *Op1 = Ref->getOperand(1);
1250 bool Incomplete0 = isTodoType(Op0);
1251 bool Incomplete1 = isTodoType(Op1);
1252 Type *ElemTy1 = GR->findDeducedElementType(Op1);
1253 Type *ElemTy0 = (Incomplete0 && !Incomplete1 && ElemTy1)
1254 ? nullptr
1255 : GR->findDeducedElementType(Op0);
1256 if (ElemTy0) {
1257 KnownElemTy = ElemTy0;
1258 Incomplete = Incomplete0;
1259 Ops.push_back(std::make_pair(Op1, 1));
1260 } else if (ElemTy1) {
1261 KnownElemTy = ElemTy1;
1262 Incomplete = Incomplete1;
1263 Ops.push_back(std::make_pair(Op0, 0));
1264 }
1265 } else if (CallInst *CI = dyn_cast<CallInst>(I)) {
1266 if (!CI->isIndirectCall())
1267 deduceOperandElementTypeCalledFunction(CI, Ops, KnownElemTy, Incomplete);
1268 else if (HaveFunPtrs)
1269 deduceOperandElementTypeFunctionPointer(CI, Ops, KnownElemTy,
1270 IsPostprocessing);
1271 }
1272
1273 // There is no enough info to deduce types or all is valid.
1274 if (!KnownElemTy || Ops.size() == 0)
1275 return;
1276
1277 LLVMContext &Ctx = CurrF->getContext();
1278 IRBuilder<> B(Ctx);
1279 for (auto &OpIt : Ops) {
1280 Value *Op = OpIt.first;
1281 if (AskOps && !AskOps->contains(Op))
1282 continue;
1283 Type *AskTy = nullptr;
1284 CallInst *AskCI = nullptr;
1285 if (IsPostprocessing && AskOps) {
1286 AskTy = GR->findDeducedElementType(Op);
1287 AskCI = GR->findAssignPtrTypeInstr(Op);
1288 assert(AskTy && AskCI);
1289 }
1290 Type *Ty = AskTy ? AskTy : GR->findDeducedElementType(Op);
1291 if (Ty == KnownElemTy)
1292 continue;
1293 Value *OpTyVal = getNormalizedPoisonValue(KnownElemTy);
1294 Type *OpTy = Op->getType();
1295 if (Op->hasUseList() &&
1296 (!Ty || AskTy || isUntypedPointerTy(Ty) || isTodoType(Op))) {
1297 Type *PrevElemTy = GR->findDeducedElementType(Op);
1298 GR->addDeducedElementType(Op, normalizeType(KnownElemTy));
1299 // check if KnownElemTy is complete
1300 if (!Incomplete)
1301 eraseTodoType(Op);
1302 else if (!IsPostprocessing)
1303 insertTodoType(Op);
1304 // check if there is existing Intrinsic::spv_assign_ptr_type instruction
1305 CallInst *AssignCI = AskCI ? AskCI : GR->findAssignPtrTypeInstr(Op);
1306 if (AssignCI == nullptr) {
1307 Instruction *User = dyn_cast<Instruction>(Op->use_begin()->get());
1308 setInsertPointSkippingPhis(B, User ? User->getNextNode() : I);
1309 CallInst *CI =
1310 buildIntrWithMD(Intrinsic::spv_assign_ptr_type, {OpTy}, OpTyVal, Op,
1311 {B.getInt32(getPointerAddressSpace(OpTy))}, B);
1312 GR->addAssignPtrTypeInstr(Op, CI);
1313 } else {
1314 GR->updateAssignType(AssignCI, Op, OpTyVal);
1315 DenseSet<std::pair<Value *, Value *>> VisitedSubst{
1316 std::make_pair(I, Op)};
1317 propagateElemTypeRec(Op, KnownElemTy, PrevElemTy, VisitedSubst);
1318 }
1319 } else {
1320 eraseTodoType(Op);
1321 CallInst *PtrCastI =
1322 buildSpvPtrcast(I->getParent()->getParent(), Op, KnownElemTy);
1323 if (OpIt.second == std::numeric_limits<unsigned>::max())
1324 dyn_cast<CallInst>(I)->setCalledOperand(PtrCastI);
1325 else
1326 I->setOperand(OpIt.second, PtrCastI);
1327 }
1328 }
1329 TypeValidated.insert(I);
1330}
1331
1332void SPIRVEmitIntrinsics::replaceMemInstrUses(Instruction *Old,
1333 Instruction *New,
1334 IRBuilder<> &B) {
1335 while (!Old->user_empty()) {
1336 auto *U = Old->user_back();
1337 if (isAssignTypeInstr(U)) {
1338 B.SetInsertPoint(U);
1339 SmallVector<Value *, 2> Args = {New, U->getOperand(1)};
1340 CallInst *AssignCI =
1341 B.CreateIntrinsic(Intrinsic::spv_assign_type, {New->getType()}, Args);
1342 GR->addAssignPtrTypeInstr(New, AssignCI);
1343 U->eraseFromParent();
1344 } else if (isMemInstrToReplace(U) || isa<ReturnInst>(U) ||
1345 isa<CallInst>(U)) {
1346 U->replaceUsesOfWith(Old, New);
1347 } else {
1348 llvm_unreachable("illegal aggregate intrinsic user");
1349 }
1350 }
1351 New->copyMetadata(*Old);
1352 Old->eraseFromParent();
1353}
1354
1355void SPIRVEmitIntrinsics::preprocessUndefs(IRBuilder<> &B) {
1356 std::queue<Instruction *> Worklist;
1357 for (auto &I : instructions(CurrF))
1358 Worklist.push(&I);
1359
1360 while (!Worklist.empty()) {
1361 Instruction *I = Worklist.front();
1362 bool BPrepared = false;
1363 Worklist.pop();
1364
1365 for (auto &Op : I->operands()) {
1366 auto *AggrUndef = dyn_cast<UndefValue>(Op);
1367 if (!AggrUndef || !Op->getType()->isAggregateType())
1368 continue;
1369
1370 if (!BPrepared) {
1372 BPrepared = true;
1373 }
1374 auto *IntrUndef = B.CreateIntrinsic(Intrinsic::spv_undef, {});
1375 Worklist.push(IntrUndef);
1376 I->replaceUsesOfWith(Op, IntrUndef);
1377 AggrConsts[IntrUndef] = AggrUndef;
1378 AggrConstTypes[IntrUndef] = AggrUndef->getType();
1379 }
1380 }
1381}
1382
1383void SPIRVEmitIntrinsics::preprocessCompositeConstants(IRBuilder<> &B) {
1384 std::queue<Instruction *> Worklist;
1385 for (auto &I : instructions(CurrF))
1386 Worklist.push(&I);
1387
1388 while (!Worklist.empty()) {
1389 auto *I = Worklist.front();
1390 bool IsPhi = isa<PHINode>(I), BPrepared = false;
1391 assert(I);
1392 bool KeepInst = false;
1393 for (const auto &Op : I->operands()) {
1394 Constant *AggrConst = nullptr;
1395 Type *ResTy = nullptr;
1396 if (auto *COp = dyn_cast<ConstantVector>(Op)) {
1397 AggrConst = COp;
1398 ResTy = COp->getType();
1399 } else if (auto *COp = dyn_cast<ConstantArray>(Op)) {
1400 AggrConst = COp;
1401 ResTy = B.getInt32Ty();
1402 } else if (auto *COp = dyn_cast<ConstantStruct>(Op)) {
1403 AggrConst = COp;
1404 ResTy = B.getInt32Ty();
1405 } else if (auto *COp = dyn_cast<ConstantDataArray>(Op)) {
1406 AggrConst = COp;
1407 ResTy = B.getInt32Ty();
1408 } else if (auto *COp = dyn_cast<ConstantAggregateZero>(Op)) {
1409 AggrConst = COp;
1410 ResTy = Op->getType()->isVectorTy() ? COp->getType() : B.getInt32Ty();
1411 }
1412 if (AggrConst) {
1414 if (auto *COp = dyn_cast<ConstantDataSequential>(Op))
1415 for (unsigned i = 0; i < COp->getNumElements(); ++i)
1416 Args.push_back(COp->getElementAsConstant(i));
1417 else
1418 llvm::append_range(Args, AggrConst->operands());
1419 if (!BPrepared) {
1420 IsPhi ? B.SetInsertPointPastAllocas(I->getParent()->getParent())
1421 : B.SetInsertPoint(I);
1422 BPrepared = true;
1423 }
1424 auto *CI =
1425 B.CreateIntrinsic(Intrinsic::spv_const_composite, {ResTy}, {Args});
1426 Worklist.push(CI);
1427 I->replaceUsesOfWith(Op, CI);
1428 KeepInst = true;
1429 AggrConsts[CI] = AggrConst;
1430 AggrConstTypes[CI] = deduceNestedTypeHelper(AggrConst, false);
1431 }
1432 }
1433 if (!KeepInst)
1434 Worklist.pop();
1435 }
1436}
1437
1439 IRBuilder<> &B) {
1440 LLVMContext &Ctx = I->getContext();
1442 B.CreateIntrinsic(Intrinsic::spv_assign_decoration, {I->getType()},
1443 {I, MetadataAsValue::get(Ctx, MDNode::get(Ctx, {Node}))});
1444}
1445
1447 unsigned RoundingModeDeco,
1448 IRBuilder<> &B) {
1449 LLVMContext &Ctx = I->getContext();
1451 MDNode *RoundingModeNode = MDNode::get(
1452 Ctx,
1454 ConstantInt::get(Int32Ty, SPIRV::Decoration::FPRoundingMode)),
1455 ConstantAsMetadata::get(ConstantInt::get(Int32Ty, RoundingModeDeco))});
1456 createDecorationIntrinsic(I, RoundingModeNode, B);
1457}
1458
1460 IRBuilder<> &B) {
1461 LLVMContext &Ctx = I->getContext();
1463 MDNode *SaturatedConversionNode =
1464 MDNode::get(Ctx, {ConstantAsMetadata::get(ConstantInt::get(
1465 Int32Ty, SPIRV::Decoration::SaturatedConversion))});
1466 createDecorationIntrinsic(I, SaturatedConversionNode, B);
1467}
1468
1470 if (auto *CI = dyn_cast<CallInst>(I)) {
1471 if (Function *Fu = CI->getCalledFunction()) {
1472 if (Fu->isIntrinsic()) {
1473 unsigned const int IntrinsicId = Fu->getIntrinsicID();
1474 switch (IntrinsicId) {
1475 case Intrinsic::fptosi_sat:
1476 case Intrinsic::fptoui_sat:
1478 break;
1479 default:
1480 break;
1481 }
1482 }
1483 }
1484 }
1485}
1486
1487Instruction *SPIRVEmitIntrinsics::visitCallInst(CallInst &Call) {
1488 if (!Call.isInlineAsm())
1489 return &Call;
1490
1491 const InlineAsm *IA = cast<InlineAsm>(Call.getCalledOperand());
1492 LLVMContext &Ctx = CurrF->getContext();
1493
1494 Constant *TyC = UndefValue::get(IA->getFunctionType());
1495 MDString *ConstraintString = MDString::get(Ctx, IA->getConstraintString());
1497 buildMD(TyC),
1498 MetadataAsValue::get(Ctx, MDNode::get(Ctx, ConstraintString))};
1499 for (unsigned OpIdx = 0; OpIdx < Call.arg_size(); OpIdx++)
1500 Args.push_back(Call.getArgOperand(OpIdx));
1501
1503 B.SetInsertPoint(&Call);
1504 B.CreateIntrinsic(Intrinsic::spv_inline_asm, {Args});
1505 return &Call;
1506}
1507
1508// Use a tip about rounding mode to create a decoration.
1509void SPIRVEmitIntrinsics::useRoundingMode(ConstrainedFPIntrinsic *FPI,
1510 IRBuilder<> &B) {
1511 std::optional<RoundingMode> RM = FPI->getRoundingMode();
1512 if (!RM.has_value())
1513 return;
1514 unsigned RoundingModeDeco = std::numeric_limits<unsigned>::max();
1515 switch (RM.value()) {
1516 default:
1517 // ignore unknown rounding modes
1518 break;
1519 case RoundingMode::NearestTiesToEven:
1520 RoundingModeDeco = SPIRV::FPRoundingMode::FPRoundingMode::RTE;
1521 break;
1522 case RoundingMode::TowardNegative:
1523 RoundingModeDeco = SPIRV::FPRoundingMode::FPRoundingMode::RTN;
1524 break;
1525 case RoundingMode::TowardPositive:
1526 RoundingModeDeco = SPIRV::FPRoundingMode::FPRoundingMode::RTP;
1527 break;
1528 case RoundingMode::TowardZero:
1529 RoundingModeDeco = SPIRV::FPRoundingMode::FPRoundingMode::RTZ;
1530 break;
1531 case RoundingMode::Dynamic:
1532 case RoundingMode::NearestTiesToAway:
1533 // TODO: check if supported
1534 break;
1535 }
1536 if (RoundingModeDeco == std::numeric_limits<unsigned>::max())
1537 return;
1538 // Convert the tip about rounding mode into a decoration record.
1539 createRoundingModeDecoration(FPI, RoundingModeDeco, B);
1540}
1541
1542Instruction *SPIRVEmitIntrinsics::visitSwitchInst(SwitchInst &I) {
1543 BasicBlock *ParentBB = I.getParent();
1544 IRBuilder<> B(ParentBB);
1545 B.SetInsertPoint(&I);
1548 for (auto &Op : I.operands()) {
1549 if (Op.get()->getType()->isSized()) {
1550 Args.push_back(Op);
1551 } else if (BasicBlock *BB = dyn_cast<BasicBlock>(Op.get())) {
1552 BBCases.push_back(BB);
1553 Args.push_back(BlockAddress::get(BB->getParent(), BB));
1554 } else {
1555 report_fatal_error("Unexpected switch operand");
1556 }
1557 }
1558 CallInst *NewI = B.CreateIntrinsic(Intrinsic::spv_switch,
1559 {I.getOperand(0)->getType()}, {Args});
1560 // remove switch to avoid its unneeded and undesirable unwrap into branches
1561 // and conditions
1562 replaceAllUsesWith(&I, NewI);
1563 I.eraseFromParent();
1564 // insert artificial and temporary instruction to preserve valid CFG,
1565 // it will be removed after IR translation pass
1566 B.SetInsertPoint(ParentBB);
1567 IndirectBrInst *BrI = B.CreateIndirectBr(
1568 Constant::getNullValue(PointerType::getUnqual(ParentBB->getContext())),
1569 BBCases.size());
1570 for (BasicBlock *BBCase : BBCases)
1571 BrI->addDestination(BBCase);
1572 return BrI;
1573}
1574
1575Instruction *SPIRVEmitIntrinsics::visitGetElementPtrInst(GetElementPtrInst &I) {
1576 if (I.getSourceElementType() == IntegerType::getInt8Ty(CurrF->getContext()) &&
1578 Instruction *Result = buildLogicalAccessChainFromGEP(I);
1579 if (Result)
1580 return Result;
1581 }
1582
1583 IRBuilder<> B(I.getParent());
1584 B.SetInsertPoint(&I);
1585 SmallVector<Type *, 2> Types = {I.getType(), I.getOperand(0)->getType()};
1587 Args.push_back(B.getInt1(I.isInBounds()));
1588 llvm::append_range(Args, I.operands());
1589 auto *NewI = B.CreateIntrinsic(Intrinsic::spv_gep, {Types}, {Args});
1590 replaceAllUsesWithAndErase(B, &I, NewI);
1591 return NewI;
1592}
1593
1594Instruction *SPIRVEmitIntrinsics::visitBitCastInst(BitCastInst &I) {
1595 IRBuilder<> B(I.getParent());
1596 B.SetInsertPoint(&I);
1597 Value *Source = I.getOperand(0);
1598
1599 // SPIR-V, contrary to LLVM 17+ IR, supports bitcasts between pointers of
1600 // varying element types. In case of IR coming from older versions of LLVM
1601 // such bitcasts do not provide sufficient information, should be just skipped
1602 // here, and handled in insertPtrCastOrAssignTypeInstr.
1603 if (isPointerTy(I.getType())) {
1604 replaceAllUsesWith(&I, Source);
1605 I.eraseFromParent();
1606 return nullptr;
1607 }
1608
1609 SmallVector<Type *, 2> Types = {I.getType(), Source->getType()};
1610 SmallVector<Value *> Args(I.op_begin(), I.op_end());
1611 auto *NewI = B.CreateIntrinsic(Intrinsic::spv_bitcast, {Types}, {Args});
1612 replaceAllUsesWithAndErase(B, &I, NewI);
1613 return NewI;
1614}
1615
1616void SPIRVEmitIntrinsics::insertAssignPtrTypeTargetExt(
1617 TargetExtType *AssignedType, Value *V, IRBuilder<> &B) {
1618 Type *VTy = V->getType();
1619
1620 // A couple of sanity checks.
1621 assert((isPointerTy(VTy)) && "Expect a pointer type!");
1622 if (Type *ElemTy = getPointeeType(VTy))
1623 if (ElemTy != AssignedType)
1624 report_fatal_error("Unexpected pointer element type!");
1625
1626 CallInst *AssignCI = GR->findAssignPtrTypeInstr(V);
1627 if (!AssignCI) {
1628 GR->buildAssignType(B, AssignedType, V);
1629 return;
1630 }
1631
1632 Type *CurrentType =
1634 cast<MetadataAsValue>(AssignCI->getOperand(1))->getMetadata())
1635 ->getType();
1636 if (CurrentType == AssignedType)
1637 return;
1638
1639 // Builtin types cannot be redeclared or casted.
1640 if (CurrentType->isTargetExtTy())
1641 report_fatal_error("Type mismatch " + CurrentType->getTargetExtName() +
1642 "/" + AssignedType->getTargetExtName() +
1643 " for value " + V->getName(),
1644 false);
1645
1646 // Our previous guess about the type seems to be wrong, let's update
1647 // inferred type according to a new, more precise type information.
1648 GR->updateAssignType(AssignCI, V, getNormalizedPoisonValue(AssignedType));
1649}
1650
1651void SPIRVEmitIntrinsics::replacePointerOperandWithPtrCast(
1652 Instruction *I, Value *Pointer, Type *ExpectedElementType,
1653 unsigned OperandToReplace, IRBuilder<> &B) {
1654 TypeValidated.insert(I);
1655
1656 // Do not emit spv_ptrcast if Pointer's element type is ExpectedElementType
1657 Type *PointerElemTy = deduceElementTypeHelper(Pointer, false);
1658 if (PointerElemTy == ExpectedElementType ||
1659 isEquivalentTypes(PointerElemTy, ExpectedElementType))
1660 return;
1661
1663 Value *ExpectedElementVal = getNormalizedPoisonValue(ExpectedElementType);
1664 MetadataAsValue *VMD = buildMD(ExpectedElementVal);
1665 unsigned AddressSpace = getPointerAddressSpace(Pointer->getType());
1666 bool FirstPtrCastOrAssignPtrType = true;
1667
1668 // Do not emit new spv_ptrcast if equivalent one already exists or when
1669 // spv_assign_ptr_type already targets this pointer with the same element
1670 // type.
1671 if (Pointer->hasUseList()) {
1672 for (auto User : Pointer->users()) {
1673 auto *II = dyn_cast<IntrinsicInst>(User);
1674 if (!II ||
1675 (II->getIntrinsicID() != Intrinsic::spv_assign_ptr_type &&
1676 II->getIntrinsicID() != Intrinsic::spv_ptrcast) ||
1677 II->getOperand(0) != Pointer)
1678 continue;
1679
1680 // There is some spv_ptrcast/spv_assign_ptr_type already targeting this
1681 // pointer.
1682 FirstPtrCastOrAssignPtrType = false;
1683 if (II->getOperand(1) != VMD ||
1684 dyn_cast<ConstantInt>(II->getOperand(2))->getSExtValue() !=
1686 continue;
1687
1688 // The spv_ptrcast/spv_assign_ptr_type targeting this pointer is of the
1689 // same element type and address space.
1690 if (II->getIntrinsicID() != Intrinsic::spv_ptrcast)
1691 return;
1692
1693 // This must be a spv_ptrcast, do not emit new if this one has the same BB
1694 // as I. Otherwise, search for other spv_ptrcast/spv_assign_ptr_type.
1695 if (II->getParent() != I->getParent())
1696 continue;
1697
1698 I->setOperand(OperandToReplace, II);
1699 return;
1700 }
1701 }
1702
1703 if (isa<Instruction>(Pointer) || isa<Argument>(Pointer)) {
1704 if (FirstPtrCastOrAssignPtrType) {
1705 // If this would be the first spv_ptrcast, do not emit spv_ptrcast and
1706 // emit spv_assign_ptr_type instead.
1707 GR->buildAssignPtr(B, ExpectedElementType, Pointer);
1708 return;
1709 } else if (isTodoType(Pointer)) {
1710 eraseTodoType(Pointer);
1711 if (!isa<CallInst>(Pointer) && !isa<GetElementPtrInst>(Pointer)) {
1712 // If this wouldn't be the first spv_ptrcast but existing type info is
1713 // uncomplete, update spv_assign_ptr_type arguments.
1714 if (CallInst *AssignCI = GR->findAssignPtrTypeInstr(Pointer)) {
1715 Type *PrevElemTy = GR->findDeducedElementType(Pointer);
1716 assert(PrevElemTy);
1717 DenseSet<std::pair<Value *, Value *>> VisitedSubst{
1718 std::make_pair(I, Pointer)};
1719 GR->updateAssignType(AssignCI, Pointer, ExpectedElementVal);
1720 propagateElemType(Pointer, PrevElemTy, VisitedSubst);
1721 } else {
1722 GR->buildAssignPtr(B, ExpectedElementType, Pointer);
1723 }
1724 return;
1725 }
1726 }
1727 }
1728
1729 // Emit spv_ptrcast
1730 SmallVector<Type *, 2> Types = {Pointer->getType(), Pointer->getType()};
1731 SmallVector<Value *, 2> Args = {Pointer, VMD, B.getInt32(AddressSpace)};
1732 auto *PtrCastI = B.CreateIntrinsic(Intrinsic::spv_ptrcast, {Types}, Args);
1733 I->setOperand(OperandToReplace, PtrCastI);
1734 // We need to set up a pointee type for the newly created spv_ptrcast.
1735 GR->buildAssignPtr(B, ExpectedElementType, PtrCastI);
1736}
1737
1738void SPIRVEmitIntrinsics::insertPtrCastOrAssignTypeInstr(Instruction *I,
1739 IRBuilder<> &B) {
1740 // Handle basic instructions:
1741 StoreInst *SI = dyn_cast<StoreInst>(I);
1742 if (IsKernelArgInt8(CurrF, SI)) {
1743 replacePointerOperandWithPtrCast(
1744 I, SI->getValueOperand(), IntegerType::getInt8Ty(CurrF->getContext()),
1745 0, B);
1746 }
1747 if (SI) {
1748 Value *Op = SI->getValueOperand();
1749 Value *Pointer = SI->getPointerOperand();
1750 Type *OpTy = Op->getType();
1751 if (auto *OpI = dyn_cast<Instruction>(Op))
1752 OpTy = restoreMutatedType(GR, OpI, OpTy);
1753 if (OpTy == Op->getType())
1754 OpTy = deduceElementTypeByValueDeep(OpTy, Op, false);
1755 replacePointerOperandWithPtrCast(I, Pointer, OpTy, 1, B);
1756 return;
1757 }
1758 if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
1759 Value *Pointer = LI->getPointerOperand();
1760 Type *OpTy = LI->getType();
1761 if (auto *PtrTy = dyn_cast<PointerType>(OpTy)) {
1762 if (Type *ElemTy = GR->findDeducedElementType(LI)) {
1763 OpTy = getTypedPointerWrapper(ElemTy, PtrTy->getAddressSpace());
1764 } else {
1765 Type *NewOpTy = OpTy;
1766 OpTy = deduceElementTypeByValueDeep(OpTy, LI, false);
1767 if (OpTy == NewOpTy)
1768 insertTodoType(Pointer);
1769 }
1770 }
1771 replacePointerOperandWithPtrCast(I, Pointer, OpTy, 0, B);
1772 return;
1773 }
1774 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(I)) {
1775 Value *Pointer = GEPI->getPointerOperand();
1776 Type *OpTy = nullptr;
1777
1778 // Knowing the accessed type is mandatory for logical SPIR-V. Sadly,
1779 // the GEP source element type should not be used for this purpose, and
1780 // the alternative type-scavenging method is not working.
1781 // Physical SPIR-V can work around this, but not logical, hence still
1782 // try to rely on the broken type scavenging for logical.
1783 bool IsRewrittenGEP =
1784 GEPI->getSourceElementType() == IntegerType::getInt8Ty(I->getContext());
1785 if (IsRewrittenGEP && TM->getSubtargetImpl()->isLogicalSPIRV()) {
1786 Value *Src = getPointerRoot(Pointer);
1787 OpTy = GR->findDeducedElementType(Src);
1788 }
1789
1790 // In all cases, fall back to the GEP type if type scavenging failed.
1791 if (!OpTy)
1792 OpTy = GEPI->getSourceElementType();
1793
1794 replacePointerOperandWithPtrCast(I, Pointer, OpTy, 0, B);
1795 if (isNestedPointer(OpTy))
1796 insertTodoType(Pointer);
1797 return;
1798 }
1799
1800 // TODO: review and merge with existing logics:
1801 // Handle calls to builtins (non-intrinsics):
1802 CallInst *CI = dyn_cast<CallInst>(I);
1803 if (!CI || CI->isIndirectCall() || CI->isInlineAsm() ||
1805 return;
1806
1807 // collect information about formal parameter types
1808 std::string DemangledName =
1810 Function *CalledF = CI->getCalledFunction();
1811 SmallVector<Type *, 4> CalledArgTys;
1812 bool HaveTypes = false;
1813 for (unsigned OpIdx = 0; OpIdx < CalledF->arg_size(); ++OpIdx) {
1814 Argument *CalledArg = CalledF->getArg(OpIdx);
1815 Type *ArgType = CalledArg->getType();
1816 if (!isPointerTy(ArgType)) {
1817 CalledArgTys.push_back(nullptr);
1818 } else if (Type *ArgTypeElem = getPointeeType(ArgType)) {
1819 CalledArgTys.push_back(ArgTypeElem);
1820 HaveTypes = true;
1821 } else {
1822 Type *ElemTy = GR->findDeducedElementType(CalledArg);
1823 if (!ElemTy && hasPointeeTypeAttr(CalledArg))
1824 ElemTy = getPointeeTypeByAttr(CalledArg);
1825 if (!ElemTy) {
1826 ElemTy = getPointeeTypeByCallInst(DemangledName, CalledF, OpIdx);
1827 if (ElemTy) {
1828 GR->addDeducedElementType(CalledArg, normalizeType(ElemTy));
1829 } else {
1830 for (User *U : CalledArg->users()) {
1831 if (Instruction *Inst = dyn_cast<Instruction>(U)) {
1832 if ((ElemTy = deduceElementTypeHelper(Inst, false)) != nullptr)
1833 break;
1834 }
1835 }
1836 }
1837 }
1838 HaveTypes |= ElemTy != nullptr;
1839 CalledArgTys.push_back(ElemTy);
1840 }
1841 }
1842
1843 if (DemangledName.empty() && !HaveTypes)
1844 return;
1845
1846 for (unsigned OpIdx = 0; OpIdx < CI->arg_size(); OpIdx++) {
1847 Value *ArgOperand = CI->getArgOperand(OpIdx);
1848 if (!isPointerTy(ArgOperand->getType()))
1849 continue;
1850
1851 // Constants (nulls/undefs) are handled in insertAssignPtrTypeIntrs()
1852 if (!isa<Instruction>(ArgOperand) && !isa<Argument>(ArgOperand)) {
1853 // However, we may have assumptions about the formal argument's type and
1854 // may have a need to insert a ptr cast for the actual parameter of this
1855 // call.
1856 Argument *CalledArg = CalledF->getArg(OpIdx);
1857 if (!GR->findDeducedElementType(CalledArg))
1858 continue;
1859 }
1860
1861 Type *ExpectedType =
1862 OpIdx < CalledArgTys.size() ? CalledArgTys[OpIdx] : nullptr;
1863 if (!ExpectedType && !DemangledName.empty())
1864 ExpectedType = SPIRV::parseBuiltinCallArgumentBaseType(
1865 DemangledName, OpIdx, I->getContext());
1866 if (!ExpectedType || ExpectedType->isVoidTy())
1867 continue;
1868
1869 if (ExpectedType->isTargetExtTy() &&
1871 insertAssignPtrTypeTargetExt(cast<TargetExtType>(ExpectedType),
1872 ArgOperand, B);
1873 else
1874 replacePointerOperandWithPtrCast(CI, ArgOperand, ExpectedType, OpIdx, B);
1875 }
1876}
1877
1878Instruction *SPIRVEmitIntrinsics::visitInsertElementInst(InsertElementInst &I) {
1879 // If it's a <1 x Type> vector type, don't modify it. It's not a legal vector
1880 // type in LLT and IRTranslator will replace it by the scalar.
1881 if (isVector1(I.getType()))
1882 return &I;
1883
1884 SmallVector<Type *, 4> Types = {I.getType(), I.getOperand(0)->getType(),
1885 I.getOperand(1)->getType(),
1886 I.getOperand(2)->getType()};
1887 IRBuilder<> B(I.getParent());
1888 B.SetInsertPoint(&I);
1889 SmallVector<Value *> Args(I.op_begin(), I.op_end());
1890 auto *NewI = B.CreateIntrinsic(Intrinsic::spv_insertelt, {Types}, {Args});
1891 replaceAllUsesWithAndErase(B, &I, NewI);
1892 return NewI;
1893}
1894
1896SPIRVEmitIntrinsics::visitExtractElementInst(ExtractElementInst &I) {
1897 // If it's a <1 x Type> vector type, don't modify it. It's not a legal vector
1898 // type in LLT and IRTranslator will replace it by the scalar.
1899 if (isVector1(I.getVectorOperandType()))
1900 return &I;
1901
1902 IRBuilder<> B(I.getParent());
1903 B.SetInsertPoint(&I);
1904 SmallVector<Type *, 3> Types = {I.getType(), I.getVectorOperandType(),
1905 I.getIndexOperand()->getType()};
1906 SmallVector<Value *, 2> Args = {I.getVectorOperand(), I.getIndexOperand()};
1907 auto *NewI = B.CreateIntrinsic(Intrinsic::spv_extractelt, {Types}, {Args});
1908 replaceAllUsesWithAndErase(B, &I, NewI);
1909 return NewI;
1910}
1911
1912Instruction *SPIRVEmitIntrinsics::visitInsertValueInst(InsertValueInst &I) {
1913 IRBuilder<> B(I.getParent());
1914 B.SetInsertPoint(&I);
1915 SmallVector<Type *, 1> Types = {I.getInsertedValueOperand()->getType()};
1917 Value *AggregateOp = I.getAggregateOperand();
1918 if (isa<UndefValue>(AggregateOp))
1919 Args.push_back(UndefValue::get(B.getInt32Ty()));
1920 else
1921 Args.push_back(AggregateOp);
1922 Args.push_back(I.getInsertedValueOperand());
1923 for (auto &Op : I.indices())
1924 Args.push_back(B.getInt32(Op));
1925 Instruction *NewI =
1926 B.CreateIntrinsic(Intrinsic::spv_insertv, {Types}, {Args});
1927 replaceMemInstrUses(&I, NewI, B);
1928 return NewI;
1929}
1930
1931Instruction *SPIRVEmitIntrinsics::visitExtractValueInst(ExtractValueInst &I) {
1932 if (I.getAggregateOperand()->getType()->isAggregateType())
1933 return &I;
1934 IRBuilder<> B(I.getParent());
1935 B.SetInsertPoint(&I);
1936 SmallVector<Value *> Args(I.operands());
1937 for (auto &Op : I.indices())
1938 Args.push_back(B.getInt32(Op));
1939 auto *NewI =
1940 B.CreateIntrinsic(Intrinsic::spv_extractv, {I.getType()}, {Args});
1941 replaceAllUsesWithAndErase(B, &I, NewI);
1942 return NewI;
1943}
1944
1945Instruction *SPIRVEmitIntrinsics::visitLoadInst(LoadInst &I) {
1946 if (!I.getType()->isAggregateType())
1947 return &I;
1948 IRBuilder<> B(I.getParent());
1949 B.SetInsertPoint(&I);
1950 TrackConstants = false;
1951 const auto *TLI = TM->getSubtargetImpl()->getTargetLowering();
1953 TLI->getLoadMemOperandFlags(I, CurrF->getDataLayout());
1954 auto *NewI =
1955 B.CreateIntrinsic(Intrinsic::spv_load, {I.getOperand(0)->getType()},
1956 {I.getPointerOperand(), B.getInt16(Flags),
1957 B.getInt8(I.getAlign().value())});
1958 replaceMemInstrUses(&I, NewI, B);
1959 return NewI;
1960}
1961
1962Instruction *SPIRVEmitIntrinsics::visitStoreInst(StoreInst &I) {
1963 if (!AggrStores.contains(&I))
1964 return &I;
1965 IRBuilder<> B(I.getParent());
1966 B.SetInsertPoint(&I);
1967 TrackConstants = false;
1968 const auto *TLI = TM->getSubtargetImpl()->getTargetLowering();
1970 TLI->getStoreMemOperandFlags(I, CurrF->getDataLayout());
1971 auto *PtrOp = I.getPointerOperand();
1972 auto *NewI = B.CreateIntrinsic(
1973 Intrinsic::spv_store, {I.getValueOperand()->getType(), PtrOp->getType()},
1974 {I.getValueOperand(), PtrOp, B.getInt16(Flags),
1975 B.getInt8(I.getAlign().value())});
1976 NewI->copyMetadata(I);
1977 I.eraseFromParent();
1978 return NewI;
1979}
1980
1981Instruction *SPIRVEmitIntrinsics::visitAllocaInst(AllocaInst &I) {
1982 Value *ArraySize = nullptr;
1983 if (I.isArrayAllocation()) {
1984 const SPIRVSubtarget *STI = TM->getSubtargetImpl(*I.getFunction());
1985 if (!STI->canUseExtension(
1986 SPIRV::Extension::SPV_INTEL_variable_length_array))
1988 "array allocation: this instruction requires the following "
1989 "SPIR-V extension: SPV_INTEL_variable_length_array",
1990 false);
1991 ArraySize = I.getArraySize();
1992 }
1993 IRBuilder<> B(I.getParent());
1994 B.SetInsertPoint(&I);
1995 TrackConstants = false;
1996 Type *PtrTy = I.getType();
1997 auto *NewI =
1998 ArraySize
1999 ? B.CreateIntrinsic(Intrinsic::spv_alloca_array,
2000 {PtrTy, ArraySize->getType()},
2001 {ArraySize, B.getInt8(I.getAlign().value())})
2002 : B.CreateIntrinsic(Intrinsic::spv_alloca, {PtrTy},
2003 {B.getInt8(I.getAlign().value())});
2004 replaceAllUsesWithAndErase(B, &I, NewI);
2005 return NewI;
2006}
2007
2008Instruction *SPIRVEmitIntrinsics::visitAtomicCmpXchgInst(AtomicCmpXchgInst &I) {
2009 assert(I.getType()->isAggregateType() && "Aggregate result is expected");
2010 IRBuilder<> B(I.getParent());
2011 B.SetInsertPoint(&I);
2012 SmallVector<Value *> Args(I.operands());
2013 Args.push_back(B.getInt32(
2014 static_cast<uint32_t>(getMemScope(I.getContext(), I.getSyncScopeID()))));
2015 Args.push_back(B.getInt32(
2016 static_cast<uint32_t>(getMemSemantics(I.getSuccessOrdering()))));
2017 Args.push_back(B.getInt32(
2018 static_cast<uint32_t>(getMemSemantics(I.getFailureOrdering()))));
2019 auto *NewI = B.CreateIntrinsic(Intrinsic::spv_cmpxchg,
2020 {I.getPointerOperand()->getType()}, {Args});
2021 replaceMemInstrUses(&I, NewI, B);
2022 return NewI;
2023}
2024
2025Instruction *SPIRVEmitIntrinsics::visitUnreachableInst(UnreachableInst &I) {
2026 IRBuilder<> B(I.getParent());
2027 B.SetInsertPoint(&I);
2028 B.CreateIntrinsic(Intrinsic::spv_unreachable, {});
2029 return &I;
2030}
2031
2032void SPIRVEmitIntrinsics::processGlobalValue(GlobalVariable &GV,
2033 IRBuilder<> &B) {
2034 // Skip special artificial variables.
2035 static const StringSet<> ArtificialGlobals{"llvm.global.annotations",
2036 "llvm.compiler.used"};
2037
2038 if (ArtificialGlobals.contains(GV.getName()))
2039 return;
2040
2041 Constant *Init = nullptr;
2042 if (hasInitializer(&GV)) {
2043 // Deduce element type and store results in Global Registry.
2044 // Result is ignored, because TypedPointerType is not supported
2045 // by llvm IR general logic.
2046 deduceElementTypeHelper(&GV, false);
2047 Init = GV.getInitializer();
2048 Type *Ty = isAggrConstForceInt32(Init) ? B.getInt32Ty() : Init->getType();
2049 Constant *Const = isAggrConstForceInt32(Init) ? B.getInt32(1) : Init;
2050 auto *InitInst = B.CreateIntrinsic(Intrinsic::spv_init_global,
2051 {GV.getType(), Ty}, {&GV, Const});
2052 InitInst->setArgOperand(1, Init);
2053 }
2054 if (!Init && GV.use_empty())
2055 B.CreateIntrinsic(Intrinsic::spv_unref_global, GV.getType(), &GV);
2056}
2057
2058// Return true, if we can't decide what is the pointee type now and will get
2059// back to the question later. Return false is spv_assign_ptr_type is not needed
2060// or can be inserted immediately.
2061bool SPIRVEmitIntrinsics::insertAssignPtrTypeIntrs(Instruction *I,
2062 IRBuilder<> &B,
2063 bool UnknownElemTypeI8) {
2065 if (!isPointerTy(I->getType()) || !requireAssignType(I))
2066 return false;
2067
2069 if (Type *ElemTy = deduceElementType(I, UnknownElemTypeI8)) {
2070 GR->buildAssignPtr(B, ElemTy, I);
2071 return false;
2072 }
2073 return true;
2074}
2075
2076void SPIRVEmitIntrinsics::insertAssignTypeIntrs(Instruction *I,
2077 IRBuilder<> &B) {
2078 // TODO: extend the list of functions with known result types
2079 static StringMap<unsigned> ResTypeWellKnown = {
2080 {"async_work_group_copy", WellKnownTypes::Event},
2081 {"async_work_group_strided_copy", WellKnownTypes::Event},
2082 {"__spirv_GroupAsyncCopy", WellKnownTypes::Event}};
2083
2085
2086 bool IsKnown = false;
2087 if (auto *CI = dyn_cast<CallInst>(I)) {
2088 if (!CI->isIndirectCall() && !CI->isInlineAsm() &&
2089 CI->getCalledFunction() && !CI->getCalledFunction()->isIntrinsic()) {
2090 Function *CalledF = CI->getCalledFunction();
2091 std::string DemangledName =
2093 FPDecorationId DecorationId = FPDecorationId::NONE;
2094 if (DemangledName.length() > 0)
2095 DemangledName =
2096 SPIRV::lookupBuiltinNameHelper(DemangledName, &DecorationId);
2097 auto ResIt = ResTypeWellKnown.find(DemangledName);
2098 if (ResIt != ResTypeWellKnown.end()) {
2099 IsKnown = true;
2101 switch (ResIt->second) {
2102 case WellKnownTypes::Event:
2103 GR->buildAssignType(
2104 B, TargetExtType::get(I->getContext(), "spirv.Event"), I);
2105 break;
2106 }
2107 }
2108 // check if a floating rounding mode or saturation info is present
2109 switch (DecorationId) {
2110 default:
2111 break;
2112 case FPDecorationId::SAT:
2114 break;
2115 case FPDecorationId::RTE:
2117 CI, SPIRV::FPRoundingMode::FPRoundingMode::RTE, B);
2118 break;
2119 case FPDecorationId::RTZ:
2121 CI, SPIRV::FPRoundingMode::FPRoundingMode::RTZ, B);
2122 break;
2123 case FPDecorationId::RTP:
2125 CI, SPIRV::FPRoundingMode::FPRoundingMode::RTP, B);
2126 break;
2127 case FPDecorationId::RTN:
2129 CI, SPIRV::FPRoundingMode::FPRoundingMode::RTN, B);
2130 break;
2131 }
2132 }
2133 }
2134
2135 Type *Ty = I->getType();
2136 if (!IsKnown && !Ty->isVoidTy() && !isPointerTy(Ty) && requireAssignType(I)) {
2138 Type *TypeToAssign = Ty;
2139 if (auto *II = dyn_cast<IntrinsicInst>(I)) {
2140 if (II->getIntrinsicID() == Intrinsic::spv_const_composite ||
2141 II->getIntrinsicID() == Intrinsic::spv_undef) {
2142 auto It = AggrConstTypes.find(II);
2143 if (It == AggrConstTypes.end())
2144 report_fatal_error("Unknown composite intrinsic type");
2145 TypeToAssign = It->second;
2146 }
2147 }
2148 TypeToAssign = restoreMutatedType(GR, I, TypeToAssign);
2149 GR->buildAssignType(B, TypeToAssign, I);
2150 }
2151 for (const auto &Op : I->operands()) {
2153 // Check GetElementPtrConstantExpr case.
2155 (isa<GEPOperator>(Op) ||
2156 (cast<ConstantExpr>(Op)->getOpcode() == CastInst::IntToPtr)))) {
2158 Type *OpTy = Op->getType();
2159 if (isa<UndefValue>(Op) && OpTy->isAggregateType()) {
2160 CallInst *AssignCI =
2161 buildIntrWithMD(Intrinsic::spv_assign_type, {B.getInt32Ty()}, Op,
2162 UndefValue::get(B.getInt32Ty()), {}, B);
2163 GR->addAssignPtrTypeInstr(Op, AssignCI);
2164 } else if (!isa<Instruction>(Op)) {
2165 Type *OpTy = Op->getType();
2166 Type *OpTyElem = getPointeeType(OpTy);
2167 if (OpTyElem) {
2168 GR->buildAssignPtr(B, OpTyElem, Op);
2169 } else if (isPointerTy(OpTy)) {
2170 Type *ElemTy = GR->findDeducedElementType(Op);
2171 GR->buildAssignPtr(B, ElemTy ? ElemTy : deduceElementType(Op, true),
2172 Op);
2173 } else {
2174 Value *OpTyVal = Op;
2175 if (OpTy->isTargetExtTy()) {
2176 // We need to do this in order to be consistent with how target ext
2177 // types are handled in `processInstrAfterVisit`
2178 OpTyVal = getNormalizedPoisonValue(OpTy);
2179 }
2180 CallInst *AssignCI =
2181 buildIntrWithMD(Intrinsic::spv_assign_type, {OpTy},
2182 getNormalizedPoisonValue(OpTy), OpTyVal, {}, B);
2183 GR->addAssignPtrTypeInstr(OpTyVal, AssignCI);
2184 }
2185 }
2186 }
2187 }
2188}
2189
2190bool SPIRVEmitIntrinsics::shouldTryToAddMemAliasingDecoration(
2191 Instruction *Inst) {
2192 const SPIRVSubtarget *STI = TM->getSubtargetImpl(*Inst->getFunction());
2193 if (!STI->canUseExtension(SPIRV::Extension::SPV_INTEL_memory_access_aliasing))
2194 return false;
2195 // Add aliasing decorations to internal load and store intrinsics
2196 // and atomic instructions, skipping atomic store as it won't have ID to
2197 // attach the decoration.
2198 CallInst *CI = dyn_cast<CallInst>(Inst);
2199 if (!CI)
2200 return false;
2201 if (Function *Fun = CI->getCalledFunction()) {
2202 if (Fun->isIntrinsic()) {
2203 switch (Fun->getIntrinsicID()) {
2204 case Intrinsic::spv_load:
2205 case Intrinsic::spv_store:
2206 return true;
2207 default:
2208 return false;
2209 }
2210 }
2212 const std::string Prefix = "__spirv_Atomic";
2213 const bool IsAtomic = Name.find(Prefix) == 0;
2214
2215 if (!Fun->getReturnType()->isVoidTy() && IsAtomic)
2216 return true;
2217 }
2218 return false;
2219}
2220
2221void SPIRVEmitIntrinsics::insertSpirvDecorations(Instruction *I,
2222 IRBuilder<> &B) {
2223 if (MDNode *MD = I->getMetadata("spirv.Decorations")) {
2225 B.CreateIntrinsic(Intrinsic::spv_assign_decoration, {I->getType()},
2226 {I, MetadataAsValue::get(I->getContext(), MD)});
2227 }
2228 // Lower alias.scope/noalias metadata
2229 {
2230 auto processMemAliasingDecoration = [&](unsigned Kind) {
2231 if (MDNode *AliasListMD = I->getMetadata(Kind)) {
2232 if (shouldTryToAddMemAliasingDecoration(I)) {
2233 uint32_t Dec = Kind == LLVMContext::MD_alias_scope
2234 ? SPIRV::Decoration::AliasScopeINTEL
2235 : SPIRV::Decoration::NoAliasINTEL;
2237 I, ConstantInt::get(B.getInt32Ty(), Dec),
2238 MetadataAsValue::get(I->getContext(), AliasListMD)};
2240 B.CreateIntrinsic(Intrinsic::spv_assign_aliasing_decoration,
2241 {I->getType()}, {Args});
2242 }
2243 }
2244 };
2245 processMemAliasingDecoration(LLVMContext::MD_alias_scope);
2246 processMemAliasingDecoration(LLVMContext::MD_noalias);
2247 }
2248 // MD_fpmath
2249 if (MDNode *MD = I->getMetadata(LLVMContext::MD_fpmath)) {
2250 const SPIRVSubtarget *STI = TM->getSubtargetImpl(*I->getFunction());
2251 bool AllowFPMaxError =
2252 STI->canUseExtension(SPIRV::Extension::SPV_INTEL_fp_max_error);
2253 if (!AllowFPMaxError)
2254 return;
2255
2257 B.CreateIntrinsic(Intrinsic::spv_assign_fpmaxerror_decoration,
2258 {I->getType()},
2259 {I, MetadataAsValue::get(I->getContext(), MD)});
2260 }
2261}
2262
2264 const Module &M,
2266 &FPFastMathDefaultInfoMap,
2267 Function *F) {
2268 auto it = FPFastMathDefaultInfoMap.find(F);
2269 if (it != FPFastMathDefaultInfoMap.end())
2270 return it->second;
2271
2272 // If the map does not contain the entry, create a new one. Initialize it to
2273 // contain all 3 elements sorted by bit width of target type: {half, float,
2274 // double}.
2275 SPIRV::FPFastMathDefaultInfoVector FPFastMathDefaultInfoVec;
2276 FPFastMathDefaultInfoVec.emplace_back(Type::getHalfTy(M.getContext()),
2277 SPIRV::FPFastMathMode::None);
2278 FPFastMathDefaultInfoVec.emplace_back(Type::getFloatTy(M.getContext()),
2279 SPIRV::FPFastMathMode::None);
2280 FPFastMathDefaultInfoVec.emplace_back(Type::getDoubleTy(M.getContext()),
2281 SPIRV::FPFastMathMode::None);
2282 return FPFastMathDefaultInfoMap[F] = std::move(FPFastMathDefaultInfoVec);
2283}
2284
2286 SPIRV::FPFastMathDefaultInfoVector &FPFastMathDefaultInfoVec,
2287 const Type *Ty) {
2288 size_t BitWidth = Ty->getScalarSizeInBits();
2289 int Index =
2291 BitWidth);
2292 assert(Index >= 0 && Index < 3 &&
2293 "Expected FPFastMathDefaultInfo for half, float, or double");
2294 assert(FPFastMathDefaultInfoVec.size() == 3 &&
2295 "Expected FPFastMathDefaultInfoVec to have exactly 3 elements");
2296 return FPFastMathDefaultInfoVec[Index];
2297}
2298
2299void SPIRVEmitIntrinsics::insertConstantsForFPFastMathDefault(Module &M) {
2300 const SPIRVSubtarget *ST = TM->getSubtargetImpl();
2301 if (!ST->canUseExtension(SPIRV::Extension::SPV_KHR_float_controls2))
2302 return;
2303
2304 // Store the FPFastMathDefaultInfo in the FPFastMathDefaultInfoMap.
2305 // We need the entry point (function) as the key, and the target
2306 // type and flags as the value.
2307 // We also need to check ContractionOff and SignedZeroInfNanPreserve
2308 // execution modes, as they are now deprecated and must be replaced
2309 // with FPFastMathDefaultInfo.
2310 auto Node = M.getNamedMetadata("spirv.ExecutionMode");
2311 if (!Node) {
2312 if (!M.getNamedMetadata("opencl.enable.FP_CONTRACT")) {
2313 // This requires emitting ContractionOff. However, because
2314 // ContractionOff is now deprecated, we need to replace it with
2315 // FPFastMathDefaultInfo with FP Fast Math Mode bitmask set to all 0.
2316 // We need to create the constant for that.
2317
2318 // Create constant instruction with the bitmask flags.
2319 Constant *InitValue =
2320 ConstantInt::get(Type::getInt32Ty(M.getContext()), 0);
2321 // TODO: Reuse constant if there is one already with the required
2322 // value.
2323 [[maybe_unused]] GlobalVariable *GV =
2324 new GlobalVariable(M, // Module
2325 Type::getInt32Ty(M.getContext()), // Type
2326 true, // isConstant
2328 InitValue // Initializer
2329 );
2330 }
2331 return;
2332 }
2333
2334 // The table maps function pointers to their default FP fast math info. It
2335 // can be assumed that the SmallVector is sorted by the bit width of the
2336 // type. The first element is the smallest bit width, and the last element
2337 // is the largest bit width, therefore, we will have {half, float, double}
2338 // in the order of their bit widths.
2339 DenseMap<Function *, SPIRV::FPFastMathDefaultInfoVector>
2340 FPFastMathDefaultInfoMap;
2341
2342 for (unsigned i = 0; i < Node->getNumOperands(); i++) {
2343 MDNode *MDN = cast<MDNode>(Node->getOperand(i));
2344 assert(MDN->getNumOperands() >= 2 && "Expected at least 2 operands");
2346 cast<ConstantAsMetadata>(MDN->getOperand(0))->getValue());
2347 const auto EM =
2349 cast<ConstantAsMetadata>(MDN->getOperand(1))->getValue())
2350 ->getZExtValue();
2351 if (EM == SPIRV::ExecutionMode::FPFastMathDefault) {
2352 assert(MDN->getNumOperands() == 4 &&
2353 "Expected 4 operands for FPFastMathDefault");
2354 const Type *T = cast<ValueAsMetadata>(MDN->getOperand(2))->getType();
2355 unsigned Flags =
2357 cast<ConstantAsMetadata>(MDN->getOperand(3))->getValue())
2358 ->getZExtValue();
2359 SPIRV::FPFastMathDefaultInfoVector &FPFastMathDefaultInfoVec =
2360 getOrCreateFPFastMathDefaultInfoVec(M, FPFastMathDefaultInfoMap, F);
2361 SPIRV::FPFastMathDefaultInfo &Info =
2362 getFPFastMathDefaultInfo(FPFastMathDefaultInfoVec, T);
2363 Info.FastMathFlags = Flags;
2364 Info.FPFastMathDefault = true;
2365 } else if (EM == SPIRV::ExecutionMode::ContractionOff) {
2366 assert(MDN->getNumOperands() == 2 &&
2367 "Expected no operands for ContractionOff");
2368
2369 // We need to save this info for every possible FP type, i.e. {half,
2370 // float, double, fp128}.
2371 SPIRV::FPFastMathDefaultInfoVector &FPFastMathDefaultInfoVec =
2372 getOrCreateFPFastMathDefaultInfoVec(M, FPFastMathDefaultInfoMap, F);
2373 for (SPIRV::FPFastMathDefaultInfo &Info : FPFastMathDefaultInfoVec) {
2374 Info.ContractionOff = true;
2375 }
2376 } else if (EM == SPIRV::ExecutionMode::SignedZeroInfNanPreserve) {
2377 assert(MDN->getNumOperands() == 3 &&
2378 "Expected 1 operand for SignedZeroInfNanPreserve");
2379 unsigned TargetWidth =
2381 cast<ConstantAsMetadata>(MDN->getOperand(2))->getValue())
2382 ->getZExtValue();
2383 // We need to save this info only for the FP type with TargetWidth.
2384 SPIRV::FPFastMathDefaultInfoVector &FPFastMathDefaultInfoVec =
2385 getOrCreateFPFastMathDefaultInfoVec(M, FPFastMathDefaultInfoMap, F);
2388 assert(Index >= 0 && Index < 3 &&
2389 "Expected FPFastMathDefaultInfo for half, float, or double");
2390 assert(FPFastMathDefaultInfoVec.size() == 3 &&
2391 "Expected FPFastMathDefaultInfoVec to have exactly 3 elements");
2392 FPFastMathDefaultInfoVec[Index].SignedZeroInfNanPreserve = true;
2393 }
2394 }
2395
2396 std::unordered_map<unsigned, GlobalVariable *> GlobalVars;
2397 for (auto &[Func, FPFastMathDefaultInfoVec] : FPFastMathDefaultInfoMap) {
2398 if (FPFastMathDefaultInfoVec.empty())
2399 continue;
2400
2401 for (const SPIRV::FPFastMathDefaultInfo &Info : FPFastMathDefaultInfoVec) {
2402 assert(Info.Ty && "Expected target type for FPFastMathDefaultInfo");
2403 // Skip if none of the execution modes was used.
2404 unsigned Flags = Info.FastMathFlags;
2405 if (Flags == SPIRV::FPFastMathMode::None && !Info.ContractionOff &&
2406 !Info.SignedZeroInfNanPreserve && !Info.FPFastMathDefault)
2407 continue;
2408
2409 // Check if flags are compatible.
2410 if (Info.ContractionOff && (Flags & SPIRV::FPFastMathMode::AllowContract))
2411 report_fatal_error("Conflicting FPFastMathFlags: ContractionOff "
2412 "and AllowContract");
2413
2414 if (Info.SignedZeroInfNanPreserve &&
2415 !(Flags &
2416 (SPIRV::FPFastMathMode::NotNaN | SPIRV::FPFastMathMode::NotInf |
2417 SPIRV::FPFastMathMode::NSZ))) {
2418 if (Info.FPFastMathDefault)
2419 report_fatal_error("Conflicting FPFastMathFlags: "
2420 "SignedZeroInfNanPreserve but at least one of "
2421 "NotNaN/NotInf/NSZ is enabled.");
2422 }
2423
2424 if ((Flags & SPIRV::FPFastMathMode::AllowTransform) &&
2425 !((Flags & SPIRV::FPFastMathMode::AllowReassoc) &&
2426 (Flags & SPIRV::FPFastMathMode::AllowContract))) {
2427 report_fatal_error("Conflicting FPFastMathFlags: "
2428 "AllowTransform requires AllowReassoc and "
2429 "AllowContract to be set.");
2430 }
2431
2432 auto it = GlobalVars.find(Flags);
2433 GlobalVariable *GV = nullptr;
2434 if (it != GlobalVars.end()) {
2435 // Reuse existing global variable.
2436 GV = it->second;
2437 } else {
2438 // Create constant instruction with the bitmask flags.
2439 Constant *InitValue =
2440 ConstantInt::get(Type::getInt32Ty(M.getContext()), Flags);
2441 // TODO: Reuse constant if there is one already with the required
2442 // value.
2443 GV = new GlobalVariable(M, // Module
2444 Type::getInt32Ty(M.getContext()), // Type
2445 true, // isConstant
2447 InitValue // Initializer
2448 );
2449 GlobalVars[Flags] = GV;
2450 }
2451 }
2452 }
2453}
2454
2455void SPIRVEmitIntrinsics::processInstrAfterVisit(Instruction *I,
2456 IRBuilder<> &B) {
2457 auto *II = dyn_cast<IntrinsicInst>(I);
2458 bool IsConstComposite =
2459 II && II->getIntrinsicID() == Intrinsic::spv_const_composite;
2460 if (IsConstComposite && TrackConstants) {
2462 auto t = AggrConsts.find(I);
2463 assert(t != AggrConsts.end());
2464 auto *NewOp =
2465 buildIntrWithMD(Intrinsic::spv_track_constant,
2466 {II->getType(), II->getType()}, t->second, I, {}, B);
2467 replaceAllUsesWith(I, NewOp, false);
2468 NewOp->setArgOperand(0, I);
2469 }
2470 bool IsPhi = isa<PHINode>(I), BPrepared = false;
2471 for (const auto &Op : I->operands()) {
2472 if (isa<PHINode>(I) || isa<SwitchInst>(I) ||
2474 continue;
2475 unsigned OpNo = Op.getOperandNo();
2476 if (II && ((II->getIntrinsicID() == Intrinsic::spv_gep && OpNo == 0) ||
2477 (II->paramHasAttr(OpNo, Attribute::ImmArg))))
2478 continue;
2479
2480 if (!BPrepared) {
2481 IsPhi ? B.SetInsertPointPastAllocas(I->getParent()->getParent())
2482 : B.SetInsertPoint(I);
2483 BPrepared = true;
2484 }
2485 Type *OpTy = Op->getType();
2486 Type *OpElemTy = GR->findDeducedElementType(Op);
2487 Value *NewOp = Op;
2488 if (OpTy->isTargetExtTy()) {
2489 // Since this value is replaced by poison, we need to do the same in
2490 // `insertAssignTypeIntrs`.
2491 Value *OpTyVal = getNormalizedPoisonValue(OpTy);
2492 NewOp = buildIntrWithMD(Intrinsic::spv_track_constant,
2493 {OpTy, OpTyVal->getType()}, Op, OpTyVal, {}, B);
2494 }
2495 if (!IsConstComposite && isPointerTy(OpTy) && OpElemTy != nullptr &&
2496 OpElemTy != IntegerType::getInt8Ty(I->getContext())) {
2497 SmallVector<Type *, 2> Types = {OpTy, OpTy};
2498 SmallVector<Value *, 2> Args = {
2499 NewOp, buildMD(getNormalizedPoisonValue(OpElemTy)),
2500 B.getInt32(getPointerAddressSpace(OpTy))};
2501 CallInst *PtrCasted =
2502 B.CreateIntrinsic(Intrinsic::spv_ptrcast, {Types}, Args);
2503 GR->buildAssignPtr(B, OpElemTy, PtrCasted);
2504 NewOp = PtrCasted;
2505 }
2506 if (NewOp != Op)
2507 I->setOperand(OpNo, NewOp);
2508 }
2509 if (Named.insert(I).second)
2510 emitAssignName(I, B);
2511}
2512
2513Type *SPIRVEmitIntrinsics::deduceFunParamElementType(Function *F,
2514 unsigned OpIdx) {
2515 std::unordered_set<Function *> FVisited;
2516 return deduceFunParamElementType(F, OpIdx, FVisited);
2517}
2518
2519Type *SPIRVEmitIntrinsics::deduceFunParamElementType(
2520 Function *F, unsigned OpIdx, std::unordered_set<Function *> &FVisited) {
2521 // maybe a cycle
2522 if (!FVisited.insert(F).second)
2523 return nullptr;
2524
2525 std::unordered_set<Value *> Visited;
2527 // search in function's call sites
2528 for (User *U : F->users()) {
2529 CallInst *CI = dyn_cast<CallInst>(U);
2530 if (!CI || OpIdx >= CI->arg_size())
2531 continue;
2532 Value *OpArg = CI->getArgOperand(OpIdx);
2533 if (!isPointerTy(OpArg->getType()))
2534 continue;
2535 // maybe we already know operand's element type
2536 if (Type *KnownTy = GR->findDeducedElementType(OpArg))
2537 return KnownTy;
2538 // try to deduce from the operand itself
2539 Visited.clear();
2540 if (Type *Ty = deduceElementTypeHelper(OpArg, Visited, false))
2541 return Ty;
2542 // search in actual parameter's users
2543 for (User *OpU : OpArg->users()) {
2545 if (!Inst || Inst == CI)
2546 continue;
2547 Visited.clear();
2548 if (Type *Ty = deduceElementTypeHelper(Inst, Visited, false))
2549 return Ty;
2550 }
2551 // check if it's a formal parameter of the outer function
2552 if (!CI->getParent() || !CI->getParent()->getParent())
2553 continue;
2554 Function *OuterF = CI->getParent()->getParent();
2555 if (FVisited.find(OuterF) != FVisited.end())
2556 continue;
2557 for (unsigned i = 0; i < OuterF->arg_size(); ++i) {
2558 if (OuterF->getArg(i) == OpArg) {
2559 Lookup.push_back(std::make_pair(OuterF, i));
2560 break;
2561 }
2562 }
2563 }
2564
2565 // search in function parameters
2566 for (auto &Pair : Lookup) {
2567 if (Type *Ty = deduceFunParamElementType(Pair.first, Pair.second, FVisited))
2568 return Ty;
2569 }
2570
2571 return nullptr;
2572}
2573
2574void SPIRVEmitIntrinsics::processParamTypesByFunHeader(Function *F,
2575 IRBuilder<> &B) {
2576 B.SetInsertPointPastAllocas(F);
2577 for (unsigned OpIdx = 0; OpIdx < F->arg_size(); ++OpIdx) {
2578 Argument *Arg = F->getArg(OpIdx);
2579 if (!isUntypedPointerTy(Arg->getType()))
2580 continue;
2581 Type *ElemTy = GR->findDeducedElementType(Arg);
2582 if (ElemTy)
2583 continue;
2584 if (hasPointeeTypeAttr(Arg) &&
2585 (ElemTy = getPointeeTypeByAttr(Arg)) != nullptr) {
2586 GR->buildAssignPtr(B, ElemTy, Arg);
2587 continue;
2588 }
2589 // search in function's call sites
2590 for (User *U : F->users()) {
2591 CallInst *CI = dyn_cast<CallInst>(U);
2592 if (!CI || OpIdx >= CI->arg_size())
2593 continue;
2594 Value *OpArg = CI->getArgOperand(OpIdx);
2595 if (!isPointerTy(OpArg->getType()))
2596 continue;
2597 // maybe we already know operand's element type
2598 if ((ElemTy = GR->findDeducedElementType(OpArg)) != nullptr)
2599 break;
2600 }
2601 if (ElemTy) {
2602 GR->buildAssignPtr(B, ElemTy, Arg);
2603 continue;
2604 }
2605 if (HaveFunPtrs) {
2606 for (User *U : Arg->users()) {
2607 CallInst *CI = dyn_cast<CallInst>(U);
2608 if (CI && !isa<IntrinsicInst>(CI) && CI->isIndirectCall() &&
2609 CI->getCalledOperand() == Arg &&
2610 CI->getParent()->getParent() == CurrF) {
2612 deduceOperandElementTypeFunctionPointer(CI, Ops, ElemTy, false);
2613 if (ElemTy) {
2614 GR->buildAssignPtr(B, ElemTy, Arg);
2615 break;
2616 }
2617 }
2618 }
2619 }
2620 }
2621}
2622
2623void SPIRVEmitIntrinsics::processParamTypes(Function *F, IRBuilder<> &B) {
2624 B.SetInsertPointPastAllocas(F);
2625 for (unsigned OpIdx = 0; OpIdx < F->arg_size(); ++OpIdx) {
2626 Argument *Arg = F->getArg(OpIdx);
2627 if (!isUntypedPointerTy(Arg->getType()))
2628 continue;
2629 Type *ElemTy = GR->findDeducedElementType(Arg);
2630 if (!ElemTy && (ElemTy = deduceFunParamElementType(F, OpIdx)) != nullptr) {
2631 if (CallInst *AssignCI = GR->findAssignPtrTypeInstr(Arg)) {
2632 DenseSet<std::pair<Value *, Value *>> VisitedSubst;
2633 GR->updateAssignType(AssignCI, Arg, getNormalizedPoisonValue(ElemTy));
2634 propagateElemType(Arg, IntegerType::getInt8Ty(F->getContext()),
2635 VisitedSubst);
2636 } else {
2637 GR->buildAssignPtr(B, ElemTy, Arg);
2638 }
2639 }
2640 }
2641}
2642
2644 SPIRVGlobalRegistry *GR) {
2645 FunctionType *FTy = F->getFunctionType();
2646 bool IsNewFTy = false;
2648 for (Argument &Arg : F->args()) {
2649 Type *ArgTy = Arg.getType();
2650 if (ArgTy->isPointerTy())
2651 if (Type *ElemTy = GR->findDeducedElementType(&Arg)) {
2652 IsNewFTy = true;
2653 ArgTy = getTypedPointerWrapper(ElemTy, getPointerAddressSpace(ArgTy));
2654 }
2655 ArgTys.push_back(ArgTy);
2656 }
2657 return IsNewFTy
2658 ? FunctionType::get(FTy->getReturnType(), ArgTys, FTy->isVarArg())
2659 : FTy;
2660}
2661
2662bool SPIRVEmitIntrinsics::processFunctionPointers(Module &M) {
2663 SmallVector<Function *> Worklist;
2664 for (auto &F : M) {
2665 if (F.isIntrinsic())
2666 continue;
2667 if (F.isDeclaration()) {
2668 for (User *U : F.users()) {
2669 CallInst *CI = dyn_cast<CallInst>(U);
2670 if (!CI || CI->getCalledFunction() != &F) {
2671 Worklist.push_back(&F);
2672 break;
2673 }
2674 }
2675 } else {
2676 if (F.user_empty())
2677 continue;
2678 Type *FPElemTy = GR->findDeducedElementType(&F);
2679 if (!FPElemTy)
2680 FPElemTy = getFunctionPointerElemType(&F, GR);
2681 for (User *U : F.users()) {
2682 IntrinsicInst *II = dyn_cast<IntrinsicInst>(U);
2683 if (!II || II->arg_size() != 3 || II->getOperand(0) != &F)
2684 continue;
2685 if (II->getIntrinsicID() == Intrinsic::spv_assign_ptr_type ||
2686 II->getIntrinsicID() == Intrinsic::spv_ptrcast) {
2688 break;
2689 }
2690 }
2691 }
2692 }
2693 if (Worklist.empty())
2694 return false;
2695
2696 std::string ServiceFunName = SPIRV_BACKEND_SERVICE_FUN_NAME;
2697 if (!getVacantFunctionName(M, ServiceFunName))
2699 "cannot allocate a name for the internal service function");
2700 LLVMContext &Ctx = M.getContext();
2701 Function *SF =
2702 Function::Create(FunctionType::get(Type::getVoidTy(Ctx), {}, false),
2703 GlobalValue::PrivateLinkage, ServiceFunName, M);
2705 BasicBlock *BB = BasicBlock::Create(Ctx, "entry", SF);
2706 IRBuilder<> IRB(BB);
2707
2708 for (Function *F : Worklist) {
2710 for (const auto &Arg : F->args())
2711 Args.push_back(getNormalizedPoisonValue(Arg.getType()));
2712 IRB.CreateCall(F, Args);
2713 }
2714 IRB.CreateRetVoid();
2715
2716 return true;
2717}
2718
2719// Apply types parsed from demangled function declarations.
2720void SPIRVEmitIntrinsics::applyDemangledPtrArgTypes(IRBuilder<> &B) {
2721 DenseMap<Function *, CallInst *> Ptrcasts;
2722 for (auto It : FDeclPtrTys) {
2723 Function *F = It.first;
2724 for (auto *U : F->users()) {
2725 CallInst *CI = dyn_cast<CallInst>(U);
2726 if (!CI || CI->getCalledFunction() != F)
2727 continue;
2728 unsigned Sz = CI->arg_size();
2729 for (auto [Idx, ElemTy] : It.second) {
2730 if (Idx >= Sz)
2731 continue;
2732 Value *Param = CI->getArgOperand(Idx);
2733 if (GR->findDeducedElementType(Param) || isa<GlobalValue>(Param))
2734 continue;
2735 if (Argument *Arg = dyn_cast<Argument>(Param)) {
2736 if (!hasPointeeTypeAttr(Arg)) {
2737 B.SetInsertPointPastAllocas(Arg->getParent());
2738 B.SetCurrentDebugLocation(DebugLoc());
2739 GR->buildAssignPtr(B, ElemTy, Arg);
2740 }
2741 } else if (isa<GetElementPtrInst>(Param)) {
2742 replaceUsesOfWithSpvPtrcast(Param, normalizeType(ElemTy), CI,
2743 Ptrcasts);
2744 } else if (isa<Instruction>(Param)) {
2745 GR->addDeducedElementType(Param, normalizeType(ElemTy));
2746 // insertAssignTypeIntrs() will complete buildAssignPtr()
2747 } else {
2748 B.SetInsertPoint(CI->getParent()
2749 ->getParent()
2750 ->getEntryBlock()
2751 .getFirstNonPHIOrDbgOrAlloca());
2752 GR->buildAssignPtr(B, ElemTy, Param);
2753 }
2754 CallInst *Ref = dyn_cast<CallInst>(Param);
2755 if (!Ref)
2756 continue;
2757 Function *RefF = Ref->getCalledFunction();
2758 if (!RefF || !isPointerTy(RefF->getReturnType()) ||
2759 GR->findDeducedElementType(RefF))
2760 continue;
2761 ElemTy = normalizeType(ElemTy);
2762 GR->addDeducedElementType(RefF, ElemTy);
2763 GR->addReturnType(
2765 ElemTy, getPointerAddressSpace(RefF->getReturnType())));
2766 }
2767 }
2768 }
2769}
2770
2771GetElementPtrInst *
2772SPIRVEmitIntrinsics::simplifyZeroLengthArrayGepInst(GetElementPtrInst *GEP) {
2773 // getelementptr [0 x T], P, 0 (zero), I -> getelementptr T, P, I.
2774 // If type is 0-length array and first index is 0 (zero), drop both the
2775 // 0-length array type and the first index. This is a common pattern in
2776 // the IR, e.g. when using a zero-length array as a placeholder for a
2777 // flexible array such as unbound arrays.
2778 assert(GEP && "GEP is null");
2779 Type *SrcTy = GEP->getSourceElementType();
2780 SmallVector<Value *, 8> Indices(GEP->indices());
2781 ArrayType *ArrTy = dyn_cast<ArrayType>(SrcTy);
2782 if (ArrTy && ArrTy->getNumElements() == 0 &&
2784 IRBuilder<> Builder(GEP);
2785 Indices.erase(Indices.begin());
2786 SrcTy = ArrTy->getElementType();
2787 Value *NewGEP = Builder.CreateGEP(SrcTy, GEP->getPointerOperand(), Indices,
2788 "", GEP->getNoWrapFlags());
2789 assert(llvm::isa<GetElementPtrInst>(NewGEP) && "NewGEP should be a GEP");
2790 return cast<GetElementPtrInst>(NewGEP);
2791 }
2792 return nullptr;
2793}
2794
2795bool SPIRVEmitIntrinsics::runOnFunction(Function &Func) {
2796 if (Func.isDeclaration())
2797 return false;
2798
2799 const SPIRVSubtarget &ST = TM->getSubtarget<SPIRVSubtarget>(Func);
2800 GR = ST.getSPIRVGlobalRegistry();
2801
2802 if (!CurrF)
2803 HaveFunPtrs =
2804 ST.canUseExtension(SPIRV::Extension::SPV_INTEL_function_pointers);
2805
2806 CurrF = &Func;
2807 IRBuilder<> B(Func.getContext());
2808 AggrConsts.clear();
2809 AggrConstTypes.clear();
2810 AggrStores.clear();
2811
2812 // Fix GEP result types ahead of inference, and simplify if possible.
2813 // Data structure for dead instructions that were simplified and replaced.
2814 SmallPtrSet<Instruction *, 4> DeadInsts;
2815 for (auto &I : instructions(Func)) {
2817 if (!Ref || GR->findDeducedElementType(Ref))
2818 continue;
2819
2820 GetElementPtrInst *NewGEP = simplifyZeroLengthArrayGepInst(Ref);
2821 if (NewGEP) {
2822 Ref->replaceAllUsesWith(NewGEP);
2823 DeadInsts.insert(Ref);
2824 Ref = NewGEP;
2825 }
2826 if (Type *GepTy = getGEPType(Ref))
2827 GR->addDeducedElementType(Ref, normalizeType(GepTy));
2828 }
2829 // Remove dead instructions that were simplified and replaced.
2830 for (auto *I : DeadInsts) {
2831 assert(I->use_empty() && "Dead instruction should not have any uses left");
2832 I->eraseFromParent();
2833 }
2834
2835 processParamTypesByFunHeader(CurrF, B);
2836
2837 // StoreInst's operand type can be changed during the next
2838 // transformations, so we need to store it in the set. Also store already
2839 // transformed types.
2840 for (auto &I : instructions(Func)) {
2841 StoreInst *SI = dyn_cast<StoreInst>(&I);
2842 if (!SI)
2843 continue;
2844 Type *ElTy = SI->getValueOperand()->getType();
2845 if (ElTy->isAggregateType() || ElTy->isVectorTy())
2846 AggrStores.insert(&I);
2847 }
2848
2849 B.SetInsertPoint(&Func.getEntryBlock(), Func.getEntryBlock().begin());
2850 for (auto &GV : Func.getParent()->globals())
2851 processGlobalValue(GV, B);
2852
2853 preprocessUndefs(B);
2854 preprocessCompositeConstants(B);
2857
2858 applyDemangledPtrArgTypes(B);
2859
2860 // Pass forward: use operand to deduce instructions result.
2861 for (auto &I : Worklist) {
2862 // Don't emit intrinsincs for convergence intrinsics.
2863 if (isConvergenceIntrinsic(I))
2864 continue;
2865
2866 bool Postpone = insertAssignPtrTypeIntrs(I, B, false);
2867 // if Postpone is true, we can't decide on pointee type yet
2868 insertAssignTypeIntrs(I, B);
2869 insertPtrCastOrAssignTypeInstr(I, B);
2871 // if instruction requires a pointee type set, let's check if we know it
2872 // already, and force it to be i8 if not
2873 if (Postpone && !GR->findAssignPtrTypeInstr(I))
2874 insertAssignPtrTypeIntrs(I, B, true);
2875
2876 if (auto *FPI = dyn_cast<ConstrainedFPIntrinsic>(I))
2877 useRoundingMode(FPI, B);
2878 }
2879
2880 // Pass backward: use instructions results to specify/update/cast operands
2881 // where needed.
2882 SmallPtrSet<Instruction *, 4> IncompleteRets;
2883 for (auto &I : llvm::reverse(instructions(Func)))
2884 deduceOperandElementType(&I, &IncompleteRets);
2885
2886 // Pass forward for PHIs only, their operands are not preceed the
2887 // instruction in meaning of `instructions(Func)`.
2888 for (BasicBlock &BB : Func)
2889 for (PHINode &Phi : BB.phis())
2890 if (isPointerTy(Phi.getType()))
2891 deduceOperandElementType(&Phi, nullptr);
2892
2893 for (auto *I : Worklist) {
2894 TrackConstants = true;
2895 if (!I->getType()->isVoidTy() || isa<StoreInst>(I))
2897 // Visitors return either the original/newly created instruction for
2898 // further processing, nullptr otherwise.
2899 I = visit(*I);
2900 if (!I)
2901 continue;
2902
2903 // Don't emit intrinsics for convergence operations.
2904 if (isConvergenceIntrinsic(I))
2905 continue;
2906
2908 processInstrAfterVisit(I, B);
2909 }
2910
2911 return true;
2912}
2913
2914// Try to deduce a better type for pointers to untyped ptr.
2915bool SPIRVEmitIntrinsics::postprocessTypes(Module &M) {
2916 if (!GR || TodoTypeSz == 0)
2917 return false;
2918
2919 unsigned SzTodo = TodoTypeSz;
2920 DenseMap<Value *, SmallPtrSet<Value *, 4>> ToProcess;
2921 for (auto [Op, Enabled] : TodoType) {
2922 // TODO: add isa<CallInst>(Op) to continue
2924 continue;
2925 CallInst *AssignCI = GR->findAssignPtrTypeInstr(Op);
2926 Type *KnownTy = GR->findDeducedElementType(Op);
2927 if (!KnownTy || !AssignCI)
2928 continue;
2929 assert(Op == AssignCI->getArgOperand(0));
2930 // Try to improve the type deduced after all Functions are processed.
2931 if (auto *CI = dyn_cast<Instruction>(Op)) {
2932 CurrF = CI->getParent()->getParent();
2933 std::unordered_set<Value *> Visited;
2934 if (Type *ElemTy = deduceElementTypeHelper(Op, Visited, false, true)) {
2935 if (ElemTy != KnownTy) {
2936 DenseSet<std::pair<Value *, Value *>> VisitedSubst;
2937 propagateElemType(CI, ElemTy, VisitedSubst);
2938 eraseTodoType(Op);
2939 continue;
2940 }
2941 }
2942 }
2943
2944 if (Op->hasUseList()) {
2945 for (User *U : Op->users()) {
2947 if (Inst && !isa<IntrinsicInst>(Inst))
2948 ToProcess[Inst].insert(Op);
2949 }
2950 }
2951 }
2952 if (TodoTypeSz == 0)
2953 return true;
2954
2955 for (auto &F : M) {
2956 CurrF = &F;
2957 SmallPtrSet<Instruction *, 4> IncompleteRets;
2958 for (auto &I : llvm::reverse(instructions(F))) {
2959 auto It = ToProcess.find(&I);
2960 if (It == ToProcess.end())
2961 continue;
2962 It->second.remove_if([this](Value *V) { return !isTodoType(V); });
2963 if (It->second.size() == 0)
2964 continue;
2965 deduceOperandElementType(&I, &IncompleteRets, &It->second, true);
2966 if (TodoTypeSz == 0)
2967 return true;
2968 }
2969 }
2970
2971 return SzTodo > TodoTypeSz;
2972}
2973
2974// Parse and store argument types of function declarations where needed.
2975void SPIRVEmitIntrinsics::parseFunDeclarations(Module &M) {
2976 for (auto &F : M) {
2977 if (!F.isDeclaration() || F.isIntrinsic())
2978 continue;
2979 // get the demangled name
2980 std::string DemangledName = getOclOrSpirvBuiltinDemangledName(F.getName());
2981 if (DemangledName.empty())
2982 continue;
2983 // allow only OpGroupAsyncCopy use case at the moment
2984 const SPIRVSubtarget &ST = TM->getSubtarget<SPIRVSubtarget>(F);
2985 auto [Grp, Opcode, ExtNo] = SPIRV::mapBuiltinToOpcode(
2986 DemangledName, ST.getPreferredInstructionSet());
2987 if (Opcode != SPIRV::OpGroupAsyncCopy)
2988 continue;
2989 // find pointer arguments
2990 SmallVector<unsigned> Idxs;
2991 for (unsigned OpIdx = 0; OpIdx < F.arg_size(); ++OpIdx) {
2992 Argument *Arg = F.getArg(OpIdx);
2993 if (isPointerTy(Arg->getType()) && !hasPointeeTypeAttr(Arg))
2994 Idxs.push_back(OpIdx);
2995 }
2996 if (!Idxs.size())
2997 continue;
2998 // parse function arguments
2999 LLVMContext &Ctx = F.getContext();
3001 SPIRV::parseBuiltinTypeStr(TypeStrs, DemangledName, Ctx);
3002 if (!TypeStrs.size())
3003 continue;
3004 // find type info for pointer arguments
3005 for (unsigned Idx : Idxs) {
3006 if (Idx >= TypeStrs.size())
3007 continue;
3008 if (Type *ElemTy =
3009 SPIRV::parseBuiltinCallArgumentType(TypeStrs[Idx].trim(), Ctx))
3011 !ElemTy->isTargetExtTy())
3012 FDeclPtrTys[&F].push_back(std::make_pair(Idx, ElemTy));
3013 }
3014 }
3015}
3016
3017bool SPIRVEmitIntrinsics::runOnModule(Module &M) {
3018 bool Changed = false;
3019
3020 parseFunDeclarations(M);
3021 insertConstantsForFPFastMathDefault(M);
3022
3023 TodoType.clear();
3024 for (auto &F : M)
3026
3027 // Specify function parameters after all functions were processed.
3028 for (auto &F : M) {
3029 // check if function parameter types are set
3030 CurrF = &F;
3031 if (!F.isDeclaration() && !F.isIntrinsic()) {
3032 IRBuilder<> B(F.getContext());
3033 processParamTypes(&F, B);
3034 }
3035 }
3036
3037 CanTodoType = false;
3038 Changed |= postprocessTypes(M);
3039
3040 if (HaveFunPtrs)
3041 Changed |= processFunctionPointers(M);
3042
3043 return Changed;
3044}
3045
3047 return new SPIRVEmitIntrinsics(TM);
3048}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
aarch64 promote const
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
always inline
Expand Atomic instructions
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
Analysis containing CSE Info
Definition CSEInfo.cpp:27
static void replaceAllUsesWith(Value *Old, Value *New, SmallPtrSet< BasicBlock *, 32 > &FreshBBs, bool IsHuge)
Replace all old uses with new ones, and push the updated BBs into FreshBBs.
This file defines the DenseSet and SmallDenseSet classes.
static bool runOnFunction(Function &F, bool PostInlining)
Hexagon Common GEP
iv Induction Variable Users
Definition IVUsers.cpp:48
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
#define F(x, y, z)
Definition MD5.cpp:55
#define I(x, y, z)
Definition MD5.cpp:58
Machine Check Debug Module
#define T
MachineInstr unsigned OpIdx
uint64_t IntrinsicInst * II
Function * Fun
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
Definition PassSupport.h:56
static unsigned getNumElements(Type *Ty)
static bool isMemInstrToReplace(Instruction *I)
static bool isAggrConstForceInt32(const Value *V)
static SPIRV::FPFastMathDefaultInfoVector & getOrCreateFPFastMathDefaultInfoVec(const Module &M, DenseMap< Function *, SPIRV::FPFastMathDefaultInfoVector > &FPFastMathDefaultInfoMap, Function *F)
static Type * getAtomicElemTy(SPIRVGlobalRegistry *GR, Instruction *I, Value *PointerOperand)
static void reportFatalOnTokenType(const Instruction *I)
static void setInsertPointAfterDef(IRBuilder<> &B, Instruction *I)
static void emitAssignName(Instruction *I, IRBuilder<> &B)
static Type * getPointeeTypeByCallInst(StringRef DemangledName, Function *CalledF, unsigned OpIdx)
static void createRoundingModeDecoration(Instruction *I, unsigned RoundingModeDeco, IRBuilder<> &B)
static void createDecorationIntrinsic(Instruction *I, MDNode *Node, IRBuilder<> &B)
static SPIRV::FPFastMathDefaultInfo & getFPFastMathDefaultInfo(SPIRV::FPFastMathDefaultInfoVector &FPFastMathDefaultInfoVec, const Type *Ty)
static bool IsKernelArgInt8(Function *F, StoreInst *SI)
static void addSaturatedDecorationToIntrinsic(Instruction *I, IRBuilder<> &B)
static void setInsertPointSkippingPhis(IRBuilder<> &B, Instruction *I)
static FunctionType * getFunctionPointerElemType(Function *F, SPIRVGlobalRegistry *GR)
static void createSaturatedConversionDecoration(Instruction *I, IRBuilder<> &B)
static Type * restoreMutatedType(SPIRVGlobalRegistry *GR, Instruction *I, Type *Ty)
static bool requireAssignType(Instruction *I)
void visit(MachineFunction &MF, MachineBasicBlock &Start, std::function< void(MachineBasicBlock *)> op)
static void insertSpirvDecorations(MachineFunction &MF, SPIRVGlobalRegistry *GR, MachineIRBuilder MIB)
#define SPIRV_BACKEND_SERVICE_FUN_NAME
Definition SPIRVUtils.h:503
static bool Enabled
Definition Statistic.cpp:46
StringSet - A set-like wrapper for the StringMap.
DEMANGLE_NAMESPACE_BEGIN bool starts_with(std::string_view self, char C) noexcept
static SymbolRef::Type getType(const Symbol *Sym)
Definition TapiFile.cpp:39
static std::optional< unsigned > getOpcode(ArrayRef< VPValue * > Values)
Returns the opcode of Values or ~0 if they do not all agree.
Definition VPlanSLP.cpp:247
static int Lookup(ArrayRef< TableEntry > Table, unsigned Opcode)
This class represents an incoming formal argument to a Function.
Definition Argument.h:32
const Function * getParent() const
Definition Argument.h:44
static unsigned getPointerOperandIndex()
static unsigned getPointerOperandIndex()
iterator_range< const_phi_iterator > phis() const
Returns a range that iterates over the phis in the basic block.
Definition BasicBlock.h:528
static BasicBlock * Create(LLVMContext &Context, const Twine &Name="", Function *Parent=nullptr, BasicBlock *InsertBefore=nullptr)
Creates a new BasicBlock.
Definition BasicBlock.h:206
LLVM_ABI LLVMContext & getContext() const
Get the context in which this basic block lives.
static LLVM_ABI BlockAddress * get(Function *F, BasicBlock *BB)
Return a BlockAddress for the specified function and basic block.
bool isInlineAsm() const
Check if this call is an inline asm statement.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
LLVM_ABI bool isIndirectCall() const
Return true if the callsite is an indirect call.
Value * getCalledOperand() const
Value * getArgOperand(unsigned i) const
FunctionType * getFunctionType() const
iterator_range< User::op_iterator > args()
Iteration adapter for range-for loops.
unsigned arg_size() const
This class represents a function call, abstracting a target machine's calling convention.
static ConstantAsMetadata * get(Constant *C)
Definition Metadata.h:536
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
Definition Constants.h:163
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
LLVM_ABI std::optional< RoundingMode > getRoundingMode() const
iterator find(const_arg_type_t< KeyT > Val)
Definition DenseMap.h:178
std::pair< iterator, bool > try_emplace(KeyT &&Key, Ts &&...Args)
Definition DenseMap.h:248
iterator end()
Definition DenseMap.h:81
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition DenseMap.h:233
static LLVM_ABI FunctionType * get(Type *Result, ArrayRef< Type * > Params, bool isVarArg)
This static method is the primary way of constructing a FunctionType.
void addFnAttr(Attribute::AttrKind Kind)
Add function attributes to this function.
Definition Function.cpp:637
static Function * Create(FunctionType *Ty, LinkageTypes Linkage, unsigned AddrSpace, const Twine &N="", Module *M=nullptr)
Definition Function.h:166
const DataLayout & getDataLayout() const
Get the data layout of the module this function belongs to.
Definition Function.cpp:363
Intrinsic::ID getIntrinsicID() const LLVM_READONLY
getIntrinsicID - This method returns the ID number of the specified function, or Intrinsic::not_intri...
Definition Function.h:244
bool isIntrinsic() const
isIntrinsic - Returns true if the function's name starts with "llvm.".
Definition Function.h:249
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Definition Function.cpp:359
size_t arg_size() const
Definition Function.h:899
Type * getReturnType() const
Returns the type of the ret val.
Definition Function.h:214
Argument * getArg(unsigned i) const
Definition Function.h:884
static LLVM_ABI Type * getTypeAtIndex(Type *Ty, Value *Idx)
Return the type of the element at the given index of an indexable type.
static unsigned getPointerOperandIndex()
PointerType * getType() const
Global values are always pointers.
@ PrivateLinkage
Like Internal, but omit from symbol table.
Definition GlobalValue.h:61
@ InternalLinkage
Rename collisions when linking (static functions).
Definition GlobalValue.h:60
const Constant * getInitializer() const
getInitializer - Return the initializer for this global variable.
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition IRBuilder.h:2788
LLVM_ABI void addDestination(BasicBlock *Dest)
Add a destination.
Base class for instruction visitors.
Definition InstVisitor.h:78
LLVM_ABI InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
Instruction * user_back()
Specialize the methods defined in Value, as we know that an instruction can only be used by other ins...
LLVM_ABI const Function * getFunction() const
Return the function this instruction belongs to.
LLVM_ABI void copyMetadata(const Instruction &SrcInst, ArrayRef< unsigned > WL=ArrayRef< unsigned >())
Copy metadata from SrcInst to this instruction.
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
static unsigned getPointerOperandIndex()
Metadata node.
Definition Metadata.h:1078
const MDOperand & getOperand(unsigned I) const
Definition Metadata.h:1442
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
Definition Metadata.h:1569
unsigned getNumOperands() const
Return number of MDNode operands.
Definition Metadata.h:1448
static LLVM_ABI MDString * get(LLVMContext &Context, StringRef Str)
Definition Metadata.cpp:608
Flags
Flags values. These may be or'd together.
static LLVM_ABI MetadataAsValue * get(LLVMContext &Context, Metadata *MD)
Definition Metadata.cpp:104
Metadata * getMetadata() const
Definition Metadata.h:201
ModulePass class - This class is used to implement unstructured interprocedural optimizations and ana...
Definition Pass.h:255
A Module instance is used to store all the information related to an LLVM module.
Definition Module.h:67
void addAssignPtrTypeInstr(Value *Val, CallInst *AssignPtrTyCI)
void buildAssignPtr(IRBuilder<> &B, Type *ElemTy, Value *Arg)
Type * findDeducedCompositeType(const Value *Val)
void replaceAllUsesWith(Value *Old, Value *New, bool DeleteOld=true)
void addDeducedElementType(Value *Val, Type *Ty)
void addReturnType(const Function *ArgF, TypedPointerType *DerivedTy)
Type * findMutated(const Value *Val)
void addDeducedCompositeType(Value *Val, Type *Ty)
void buildAssignType(IRBuilder<> &B, Type *Ty, Value *Arg)
Type * findDeducedElementType(const Value *Val)
void updateAssignType(CallInst *AssignCI, Value *Arg, Value *OfType)
CallInst * findAssignPtrTypeInstr(const Value *Val)
const SPIRVTargetLowering * getTargetLowering() const override
bool isLogicalSPIRV() const
bool canUseExtension(SPIRV::Extension::Extension E) const
const SPIRVSubtarget * getSubtargetImpl() const
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
bool contains(ConstPtrType Ptr) const
reference emplace_back(ArgTypes &&... Args)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
static unsigned getPointerOperandIndex()
iterator end()
Definition StringMap.h:224
iterator find(StringRef Key)
Definition StringMap.h:237
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
Definition StringRef.h:261
bool contains(StringRef key) const
Check if the set contains the given key.
Definition StringSet.h:60
static LLVM_ABI StructType * create(LLVMContext &Context, StringRef Name)
This creates an identified struct.
Definition Type.cpp:620
static LLVM_ABI TargetExtType * get(LLVMContext &Context, StringRef Name, ArrayRef< Type * > Types={}, ArrayRef< unsigned > Ints={})
Return a target extension type having the specified name and optional type and integer parameters.
Definition Type.cpp:908
Type * getTypeParameter(unsigned i) const
const STC & getSubtarget(const Function &F) const
This method returns a pointer to the specified type of TargetSubtargetInfo.
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
bool isVectorTy() const
True if this is an instance of VectorType.
Definition Type.h:273
bool isArrayTy() const
True if this is an instance of ArrayType.
Definition Type.h:264
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
Definition Type.cpp:297
bool isPointerTy() const
True if this is an instance of PointerType.
Definition Type.h:267
Type * getArrayElementType() const
Definition Type.h:408
LLVM_ABI StringRef getTargetExtName() const
static LLVM_ABI IntegerType * getInt8Ty(LLVMContext &C)
Definition Type.cpp:295
bool isStructTy() const
True if this is an instance of StructType.
Definition Type.h:261
bool isTargetExtTy() const
Return true if this is a target extension type.
Definition Type.h:203
bool isAggregateType() const
Return true if the type is an aggregate type.
Definition Type.h:304
static LLVM_ABI Type * getDoubleTy(LLVMContext &C)
Definition Type.cpp:286
static LLVM_ABI Type * getFloatTy(LLVMContext &C)
Definition Type.cpp:285
static LLVM_ABI Type * getHalfTy(LLVMContext &C)
Definition Type.cpp:283
bool isVoidTy() const
Return true if this is 'void'.
Definition Type.h:139
static LLVM_ABI bool isValidElementType(Type *ElemTy)
Return true if the specified type is valid as a element type.
static LLVM_ABI TypedPointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
static LLVM_ABI UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
op_range operands()
Definition User.h:292
void setOperand(unsigned i, Value *Val)
Definition User.h:237
LLVM_ABI bool replaceUsesOfWith(Value *From, Value *To)
Replace uses of one Value with another.
Definition User.cpp:24
Value * getOperand(unsigned i) const
Definition User.h:232
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
LLVM_ABI void setName(const Twine &Name)
Change the name of the value.
Definition Value.cpp:390
iterator_range< user_iterator > users()
Definition Value.h:426
bool use_empty() const
Definition Value.h:346
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition Value.cpp:322
bool user_empty() const
Definition Value.h:389
const ParentTy * getParent() const
Definition ilist_node.h:34
CallInst * Call
Changed
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
@ SPIR_KERNEL
Used for SPIR kernel functions.
@ BasicBlock
Various leaf nodes.
Definition ISDOpcodes.h:81
bool match(Val *V, const Pattern &P)
is_zero m_Zero()
Match any null constant or a vector with all elements equal to 0.
DenseSetImpl< ValueT, DenseMap< ValueT, DenseSetEmpty, ValueInfoT, DenseSetPair< ValueT > >, ValueInfoT > DenseSet
Definition DenseSet.h:264
ElementType
The element type of an SRV or UAV resource.
Definition DXILABI.h:60
@ User
could "use" a pointer
NodeAddr< PhiNode * > Phi
Definition RDFGraph.h:390
NodeAddr< NodeBase * > Node
Definition RDFGraph.h:381
NodeAddr< FuncNode * > Func
Definition RDFGraph.h:393
friend class Instruction
Iterator for Instructions in a `BasicBlock.
Definition BasicBlock.h:73
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition STLExtras.h:316
bool getVacantFunctionName(Module &M, std::string &Name)
@ Offset
Definition DWP.cpp:477
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
bool isTypedPointerWrapper(const TargetExtType *ExtTy)
Definition SPIRVUtils.h:381
ModulePass * createSPIRVEmitIntrinsicsPass(SPIRVTargetMachine *TM)
unsigned getPointerAddressSpace(const Type *T)
Definition SPIRVUtils.h:345
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
FunctionAddr VTableAddr uintptr_t uintptr_t Int32Ty
Definition InstrProf.h:296
CallInst * buildIntrWithMD(Intrinsic::ID IntrID, ArrayRef< Type * > Types, Value *Arg, Value *Arg2, ArrayRef< Constant * > Imms, IRBuilder<> &B)
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition STLExtras.h:2136
FPDecorationId
Definition SPIRVUtils.h:527
bool isNestedPointer(const Type *Ty)
MetadataAsValue * buildMD(Value *Arg)
Definition SPIRVUtils.h:491
std::string getOclOrSpirvBuiltinDemangledName(StringRef Name)
auto reverse(ContainerTy &&C)
Definition STLExtras.h:406
Type * getTypedPointerWrapper(Type *ElemTy, unsigned AS)
Definition SPIRVUtils.h:376
bool isVector1(Type *Ty)
Definition SPIRVUtils.h:469
bool isPointerTy(const Type *T)
Definition SPIRVUtils.h:339
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition Error.cpp:167
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
SPIRV::Scope::Scope getMemScope(LLVMContext &Ctx, SyncScope::ID Id)
@ Ref
The access may reference the value stored in memory.
Definition ModRef.h:32
IRBuilder(LLVMContext &, FolderTy, InserterTy, MDNode *, ArrayRef< OperandBundleDef >) -> IRBuilder< FolderTy, InserterTy >
DWARFExpression::Operation Op
Type * getPointeeTypeByAttr(Argument *Arg)
Definition SPIRVUtils.h:358
bool hasPointeeTypeAttr(Argument *Arg)
Definition SPIRVUtils.h:353
constexpr unsigned BitWidth
bool isEquivalentTypes(Type *Ty1, Type *Ty2)
Definition SPIRVUtils.h:431
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
iterator_range< pointer_iterator< WrappedIteratorT > > make_pointer_range(RangeT &&Range)
Definition iterator.h:363
bool hasInitializer(const GlobalVariable *GV)
Definition SPIRVUtils.h:324
Type * normalizeType(Type *Ty)
Definition SPIRVUtils.h:477
bool isSpvIntrinsic(const MachineInstr &MI, Intrinsic::ID IntrinsicID)
Type * getPointeeType(const Type *Ty)
Definition SPIRVUtils.h:408
PoisonValue * getNormalizedPoisonValue(Type *Ty)
Definition SPIRVUtils.h:487
bool isUntypedPointerTy(const Type *T)
Definition SPIRVUtils.h:334
SPIRV::MemorySemantics::MemorySemantics getMemSemantics(AtomicOrdering Ord)
static size_t computeFPFastMathDefaultInfoVecIndex(size_t BitWidth)
Definition SPIRVUtils.h:146