LLVM 22.0.0git
Verifier.cpp
Go to the documentation of this file.
1//===-- Verifier.cpp - Implement the Module Verifier -----------------------==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the function verifier interface, that can be used for some
10// basic correctness checking of input to the system.
11//
12// Note that this does not provide full `Java style' security and verifications,
13// instead it just tries to ensure that code is well-formed.
14//
15// * Both of a binary operator's parameters are of the same type
16// * Verify that the indices of mem access instructions match other operands
17// * Verify that arithmetic and other things are only performed on first-class
18// types. Verify that shifts & logicals only happen on integrals f.e.
19// * All of the constants in a switch statement are of the correct type
20// * The code is in valid SSA form
21// * It should be illegal to put a label into any other type (like a structure)
22// or to return one. [except constant arrays!]
23// * Only phi nodes can be self referential: 'add i32 %0, %0 ; <int>:0' is bad
24// * PHI nodes must have an entry for each predecessor, with no extras.
25// * PHI nodes must be the first thing in a basic block, all grouped together
26// * All basic blocks should only end with terminator insts, not contain them
27// * The entry node to a function must not have predecessors
28// * All Instructions must be embedded into a basic block
29// * Functions cannot take a void-typed parameter
30// * Verify that a function's argument list agrees with it's declared type.
31// * It is illegal to specify a name for a void value.
32// * It is illegal to have a internal global value with no initializer
33// * It is illegal to have a ret instruction that returns a value that does not
34// agree with the function return value type.
35// * Function call argument types match the function prototype
36// * A landing pad is defined by a landingpad instruction, and can be jumped to
37// only by the unwind edge of an invoke instruction.
38// * A landingpad instruction must be the first non-PHI instruction in the
39// block.
40// * Landingpad instructions must be in a function with a personality function.
41// * Convergence control intrinsics are introduced in ConvergentOperations.rst.
42// The applied restrictions are too numerous to list here.
43// * The convergence entry intrinsic and the loop heart must be the first
44// non-PHI instruction in their respective block. This does not conflict with
45// the landing pads, since these two kinds cannot occur in the same block.
46// * All other things that are tested by asserts spread about the code...
47//
48//===----------------------------------------------------------------------===//
49
50#include "llvm/IR/Verifier.h"
51#include "llvm/ADT/APFloat.h"
52#include "llvm/ADT/APInt.h"
53#include "llvm/ADT/ArrayRef.h"
54#include "llvm/ADT/DenseMap.h"
55#include "llvm/ADT/MapVector.h"
56#include "llvm/ADT/STLExtras.h"
60#include "llvm/ADT/StringRef.h"
61#include "llvm/ADT/Twine.h"
63#include "llvm/IR/Argument.h"
65#include "llvm/IR/Attributes.h"
66#include "llvm/IR/BasicBlock.h"
67#include "llvm/IR/CFG.h"
68#include "llvm/IR/CallingConv.h"
69#include "llvm/IR/Comdat.h"
70#include "llvm/IR/Constant.h"
73#include "llvm/IR/Constants.h"
75#include "llvm/IR/DataLayout.h"
76#include "llvm/IR/DebugInfo.h"
78#include "llvm/IR/DebugLoc.h"
80#include "llvm/IR/Dominators.h"
82#include "llvm/IR/Function.h"
83#include "llvm/IR/GCStrategy.h"
84#include "llvm/IR/GlobalAlias.h"
85#include "llvm/IR/GlobalValue.h"
87#include "llvm/IR/InlineAsm.h"
88#include "llvm/IR/InstVisitor.h"
89#include "llvm/IR/InstrTypes.h"
90#include "llvm/IR/Instruction.h"
93#include "llvm/IR/Intrinsics.h"
94#include "llvm/IR/IntrinsicsAArch64.h"
95#include "llvm/IR/IntrinsicsAMDGPU.h"
96#include "llvm/IR/IntrinsicsARM.h"
97#include "llvm/IR/IntrinsicsNVPTX.h"
98#include "llvm/IR/IntrinsicsWebAssembly.h"
99#include "llvm/IR/LLVMContext.h"
101#include "llvm/IR/Metadata.h"
102#include "llvm/IR/Module.h"
104#include "llvm/IR/PassManager.h"
106#include "llvm/IR/Statepoint.h"
107#include "llvm/IR/Type.h"
108#include "llvm/IR/Use.h"
109#include "llvm/IR/User.h"
111#include "llvm/IR/Value.h"
113#include "llvm/Pass.h"
117#include "llvm/Support/Casting.h"
121#include "llvm/Support/ModRef.h"
124#include <algorithm>
125#include <cassert>
126#include <cstdint>
127#include <memory>
128#include <optional>
129#include <string>
130#include <utility>
131
132using namespace llvm;
133
135 "verify-noalias-scope-decl-dom", cl::Hidden, cl::init(false),
136 cl::desc("Ensure that llvm.experimental.noalias.scope.decl for identical "
137 "scopes are not dominating"));
138
141 const Module &M;
143 const Triple &TT;
146
147 /// Track the brokenness of the module while recursively visiting.
148 bool Broken = false;
149 /// Broken debug info can be "recovered" from by stripping the debug info.
150 bool BrokenDebugInfo = false;
151 /// Whether to treat broken debug info as an error.
153
155 : OS(OS), M(M), MST(&M), TT(M.getTargetTriple()), DL(M.getDataLayout()),
156 Context(M.getContext()) {}
157
158private:
159 void Write(const Module *M) {
160 *OS << "; ModuleID = '" << M->getModuleIdentifier() << "'\n";
161 }
162
163 void Write(const Value *V) {
164 if (V)
165 Write(*V);
166 }
167
168 void Write(const Value &V) {
169 if (isa<Instruction>(V)) {
170 V.print(*OS, MST);
171 *OS << '\n';
172 } else {
173 V.printAsOperand(*OS, true, MST);
174 *OS << '\n';
175 }
176 }
177
178 void Write(const DbgRecord *DR) {
179 if (DR) {
180 DR->print(*OS, MST, false);
181 *OS << '\n';
182 }
183 }
184
186 switch (Type) {
188 *OS << "value";
189 break;
191 *OS << "declare";
192 break;
194 *OS << "declare_value";
195 break;
197 *OS << "assign";
198 break;
200 *OS << "end";
201 break;
203 *OS << "any";
204 break;
205 };
206 }
207
208 void Write(const Metadata *MD) {
209 if (!MD)
210 return;
211 MD->print(*OS, MST, &M);
212 *OS << '\n';
213 }
214
215 template <class T> void Write(const MDTupleTypedArrayWrapper<T> &MD) {
216 Write(MD.get());
217 }
218
219 void Write(const NamedMDNode *NMD) {
220 if (!NMD)
221 return;
222 NMD->print(*OS, MST);
223 *OS << '\n';
224 }
225
226 void Write(Type *T) {
227 if (!T)
228 return;
229 *OS << ' ' << *T;
230 }
231
232 void Write(const Comdat *C) {
233 if (!C)
234 return;
235 *OS << *C;
236 }
237
238 void Write(const APInt *AI) {
239 if (!AI)
240 return;
241 *OS << *AI << '\n';
242 }
243
244 void Write(const unsigned i) { *OS << i << '\n'; }
245
246 // NOLINTNEXTLINE(readability-identifier-naming)
247 void Write(const Attribute *A) {
248 if (!A)
249 return;
250 *OS << A->getAsString() << '\n';
251 }
252
253 // NOLINTNEXTLINE(readability-identifier-naming)
254 void Write(const AttributeSet *AS) {
255 if (!AS)
256 return;
257 *OS << AS->getAsString() << '\n';
258 }
259
260 // NOLINTNEXTLINE(readability-identifier-naming)
261 void Write(const AttributeList *AL) {
262 if (!AL)
263 return;
264 AL->print(*OS);
265 }
266
267 void Write(Printable P) { *OS << P << '\n'; }
268
269 template <typename T> void Write(ArrayRef<T> Vs) {
270 for (const T &V : Vs)
271 Write(V);
272 }
273
274 template <typename T1, typename... Ts>
275 void WriteTs(const T1 &V1, const Ts &... Vs) {
276 Write(V1);
277 WriteTs(Vs...);
278 }
279
280 template <typename... Ts> void WriteTs() {}
281
282public:
283 /// A check failed, so printout out the condition and the message.
284 ///
285 /// This provides a nice place to put a breakpoint if you want to see why
286 /// something is not correct.
287 void CheckFailed(const Twine &Message) {
288 if (OS)
289 *OS << Message << '\n';
290 Broken = true;
291 }
292
293 /// A check failed (with values to print).
294 ///
295 /// This calls the Message-only version so that the above is easier to set a
296 /// breakpoint on.
297 template <typename T1, typename... Ts>
298 void CheckFailed(const Twine &Message, const T1 &V1, const Ts &... Vs) {
299 CheckFailed(Message);
300 if (OS)
301 WriteTs(V1, Vs...);
302 }
303
304 /// A debug info check failed.
305 void DebugInfoCheckFailed(const Twine &Message) {
306 if (OS)
307 *OS << Message << '\n';
309 BrokenDebugInfo = true;
310 }
311
312 /// A debug info check failed (with values to print).
313 template <typename T1, typename... Ts>
314 void DebugInfoCheckFailed(const Twine &Message, const T1 &V1,
315 const Ts &... Vs) {
316 DebugInfoCheckFailed(Message);
317 if (OS)
318 WriteTs(V1, Vs...);
319 }
320};
321
322namespace {
323
324class Verifier : public InstVisitor<Verifier>, VerifierSupport {
325 friend class InstVisitor<Verifier>;
326 DominatorTree DT;
327
328 /// When verifying a basic block, keep track of all of the
329 /// instructions we have seen so far.
330 ///
331 /// This allows us to do efficient dominance checks for the case when an
332 /// instruction has an operand that is an instruction in the same block.
333 SmallPtrSet<Instruction *, 16> InstsInThisBlock;
334
335 /// Keep track of the metadata nodes that have been checked already.
337
338 /// Keep track which DISubprogram is attached to which function.
340
341 /// Track all DICompileUnits visited.
343
344 /// The result type for a landingpad.
345 Type *LandingPadResultTy;
346
347 /// Whether we've seen a call to @llvm.localescape in this function
348 /// already.
349 bool SawFrameEscape;
350
351 /// Whether the current function has a DISubprogram attached to it.
352 bool HasDebugInfo = false;
353
354 /// Stores the count of how many objects were passed to llvm.localescape for a
355 /// given function and the largest index passed to llvm.localrecover.
357
358 // Maps catchswitches and cleanuppads that unwind to siblings to the
359 // terminators that indicate the unwind, used to detect cycles therein.
361
362 /// Cache which blocks are in which funclet, if an EH funclet personality is
363 /// in use. Otherwise empty.
364 DenseMap<BasicBlock *, ColorVector> BlockEHFuncletColors;
365
366 /// Cache of constants visited in search of ConstantExprs.
367 SmallPtrSet<const Constant *, 32> ConstantExprVisited;
368
369 /// Cache of declarations of the llvm.experimental.deoptimize.<ty> intrinsic.
370 SmallVector<const Function *, 4> DeoptimizeDeclarations;
371
372 /// Cache of attribute lists verified.
373 SmallPtrSet<const void *, 32> AttributeListsVisited;
374
375 // Verify that this GlobalValue is only used in this module.
376 // This map is used to avoid visiting uses twice. We can arrive at a user
377 // twice, if they have multiple operands. In particular for very large
378 // constant expressions, we can arrive at a particular user many times.
379 SmallPtrSet<const Value *, 32> GlobalValueVisited;
380
381 // Keeps track of duplicate function argument debug info.
383
384 TBAAVerifier TBAAVerifyHelper;
385 ConvergenceVerifier ConvergenceVerifyHelper;
386
387 SmallVector<IntrinsicInst *, 4> NoAliasScopeDecls;
388
389 void checkAtomicMemAccessSize(Type *Ty, const Instruction *I);
390
391public:
392 explicit Verifier(raw_ostream *OS, bool ShouldTreatBrokenDebugInfoAsError,
393 const Module &M)
394 : VerifierSupport(OS, M), LandingPadResultTy(nullptr),
395 SawFrameEscape(false), TBAAVerifyHelper(this) {
396 TreatBrokenDebugInfoAsError = ShouldTreatBrokenDebugInfoAsError;
397 }
398
399 bool hasBrokenDebugInfo() const { return BrokenDebugInfo; }
400
401 bool verify(const Function &F) {
402 llvm::TimeTraceScope timeScope("Verifier");
403 assert(F.getParent() == &M &&
404 "An instance of this class only works with a specific module!");
405
406 // First ensure the function is well-enough formed to compute dominance
407 // information, and directly compute a dominance tree. We don't rely on the
408 // pass manager to provide this as it isolates us from a potentially
409 // out-of-date dominator tree and makes it significantly more complex to run
410 // this code outside of a pass manager.
411 // FIXME: It's really gross that we have to cast away constness here.
412 if (!F.empty())
413 DT.recalculate(const_cast<Function &>(F));
414
415 for (const BasicBlock &BB : F) {
416 if (!BB.empty() && BB.back().isTerminator())
417 continue;
418
419 if (OS) {
420 *OS << "Basic Block in function '" << F.getName()
421 << "' does not have terminator!\n";
422 BB.printAsOperand(*OS, true, MST);
423 *OS << "\n";
424 }
425 return false;
426 }
427
428 auto FailureCB = [this](const Twine &Message) {
429 this->CheckFailed(Message);
430 };
431 ConvergenceVerifyHelper.initialize(OS, FailureCB, F);
432
433 Broken = false;
434 // FIXME: We strip const here because the inst visitor strips const.
435 visit(const_cast<Function &>(F));
436 verifySiblingFuncletUnwinds();
437
438 if (ConvergenceVerifyHelper.sawTokens())
439 ConvergenceVerifyHelper.verify(DT);
440
441 InstsInThisBlock.clear();
442 DebugFnArgs.clear();
443 LandingPadResultTy = nullptr;
444 SawFrameEscape = false;
445 SiblingFuncletInfo.clear();
446 verifyNoAliasScopeDecl();
447 NoAliasScopeDecls.clear();
448
449 return !Broken;
450 }
451
452 /// Verify the module that this instance of \c Verifier was initialized with.
453 bool verify() {
454 Broken = false;
455
456 // Collect all declarations of the llvm.experimental.deoptimize intrinsic.
457 for (const Function &F : M)
458 if (F.getIntrinsicID() == Intrinsic::experimental_deoptimize)
459 DeoptimizeDeclarations.push_back(&F);
460
461 // Now that we've visited every function, verify that we never asked to
462 // recover a frame index that wasn't escaped.
463 verifyFrameRecoverIndices();
464 for (const GlobalVariable &GV : M.globals())
465 visitGlobalVariable(GV);
466
467 for (const GlobalAlias &GA : M.aliases())
468 visitGlobalAlias(GA);
469
470 for (const GlobalIFunc &GI : M.ifuncs())
471 visitGlobalIFunc(GI);
472
473 for (const NamedMDNode &NMD : M.named_metadata())
474 visitNamedMDNode(NMD);
475
476 for (const StringMapEntry<Comdat> &SMEC : M.getComdatSymbolTable())
477 visitComdat(SMEC.getValue());
478
479 visitModuleFlags();
480 visitModuleIdents();
481 visitModuleCommandLines();
482 visitModuleErrnoTBAA();
483
484 verifyCompileUnits();
485
486 verifyDeoptimizeCallingConvs();
487 DISubprogramAttachments.clear();
488 return !Broken;
489 }
490
491private:
492 /// Whether a metadata node is allowed to be, or contain, a DILocation.
493 enum class AreDebugLocsAllowed { No, Yes };
494
495 /// Metadata that should be treated as a range, with slightly different
496 /// requirements.
497 enum class RangeLikeMetadataKind {
498 Range, // MD_range
499 AbsoluteSymbol, // MD_absolute_symbol
500 NoaliasAddrspace // MD_noalias_addrspace
501 };
502
503 // Verification methods...
504 void visitGlobalValue(const GlobalValue &GV);
505 void visitGlobalVariable(const GlobalVariable &GV);
506 void visitGlobalAlias(const GlobalAlias &GA);
507 void visitGlobalIFunc(const GlobalIFunc &GI);
508 void visitAliaseeSubExpr(const GlobalAlias &A, const Constant &C);
509 void visitAliaseeSubExpr(SmallPtrSetImpl<const GlobalAlias *> &Visited,
510 const GlobalAlias &A, const Constant &C);
511 void visitNamedMDNode(const NamedMDNode &NMD);
512 void visitMDNode(const MDNode &MD, AreDebugLocsAllowed AllowLocs);
513 void visitMetadataAsValue(const MetadataAsValue &MD, Function *F);
514 void visitValueAsMetadata(const ValueAsMetadata &MD, Function *F);
515 void visitDIArgList(const DIArgList &AL, Function *F);
516 void visitComdat(const Comdat &C);
517 void visitModuleIdents();
518 void visitModuleCommandLines();
519 void visitModuleErrnoTBAA();
520 void visitModuleFlags();
521 void visitModuleFlag(const MDNode *Op,
522 DenseMap<const MDString *, const MDNode *> &SeenIDs,
523 SmallVectorImpl<const MDNode *> &Requirements);
524 void visitModuleFlagCGProfileEntry(const MDOperand &MDO);
525 void visitFunction(const Function &F);
526 void visitBasicBlock(BasicBlock &BB);
527 void verifyRangeLikeMetadata(const Value &V, const MDNode *Range, Type *Ty,
528 RangeLikeMetadataKind Kind);
529 void visitRangeMetadata(Instruction &I, MDNode *Range, Type *Ty);
530 void visitNoaliasAddrspaceMetadata(Instruction &I, MDNode *Range, Type *Ty);
531 void visitDereferenceableMetadata(Instruction &I, MDNode *MD);
532 void visitNofreeMetadata(Instruction &I, MDNode *MD);
533 void visitProfMetadata(Instruction &I, MDNode *MD);
534 void visitCallStackMetadata(MDNode *MD);
535 void visitMemProfMetadata(Instruction &I, MDNode *MD);
536 void visitCallsiteMetadata(Instruction &I, MDNode *MD);
537 void visitCalleeTypeMetadata(Instruction &I, MDNode *MD);
538 void visitDIAssignIDMetadata(Instruction &I, MDNode *MD);
539 void visitMMRAMetadata(Instruction &I, MDNode *MD);
540 void visitAnnotationMetadata(MDNode *Annotation);
541 void visitAliasScopeMetadata(const MDNode *MD);
542 void visitAliasScopeListMetadata(const MDNode *MD);
543 void visitAccessGroupMetadata(const MDNode *MD);
544 void visitCapturesMetadata(Instruction &I, const MDNode *Captures);
545 void visitAllocTokenMetadata(Instruction &I, MDNode *MD);
546
547 template <class Ty> bool isValidMetadataArray(const MDTuple &N);
548#define HANDLE_SPECIALIZED_MDNODE_LEAF(CLASS) void visit##CLASS(const CLASS &N);
549#include "llvm/IR/Metadata.def"
550 void visitDIScope(const DIScope &N);
551 void visitDIVariable(const DIVariable &N);
552 void visitDILexicalBlockBase(const DILexicalBlockBase &N);
553 void visitDITemplateParameter(const DITemplateParameter &N);
554
555 void visitTemplateParams(const MDNode &N, const Metadata &RawParams);
556
557 void visit(DbgLabelRecord &DLR);
558 void visit(DbgVariableRecord &DVR);
559 // InstVisitor overrides...
560 using InstVisitor<Verifier>::visit;
561 void visitDbgRecords(Instruction &I);
562 void visit(Instruction &I);
563
564 void visitTruncInst(TruncInst &I);
565 void visitZExtInst(ZExtInst &I);
566 void visitSExtInst(SExtInst &I);
567 void visitFPTruncInst(FPTruncInst &I);
568 void visitFPExtInst(FPExtInst &I);
569 void visitFPToUIInst(FPToUIInst &I);
570 void visitFPToSIInst(FPToSIInst &I);
571 void visitUIToFPInst(UIToFPInst &I);
572 void visitSIToFPInst(SIToFPInst &I);
573 void visitIntToPtrInst(IntToPtrInst &I);
574 void checkPtrToAddr(Type *SrcTy, Type *DestTy, const Value &V);
575 void visitPtrToAddrInst(PtrToAddrInst &I);
576 void visitPtrToIntInst(PtrToIntInst &I);
577 void visitBitCastInst(BitCastInst &I);
578 void visitAddrSpaceCastInst(AddrSpaceCastInst &I);
579 void visitPHINode(PHINode &PN);
580 void visitCallBase(CallBase &Call);
581 void visitUnaryOperator(UnaryOperator &U);
582 void visitBinaryOperator(BinaryOperator &B);
583 void visitICmpInst(ICmpInst &IC);
584 void visitFCmpInst(FCmpInst &FC);
585 void visitExtractElementInst(ExtractElementInst &EI);
586 void visitInsertElementInst(InsertElementInst &EI);
587 void visitShuffleVectorInst(ShuffleVectorInst &EI);
588 void visitVAArgInst(VAArgInst &VAA) { visitInstruction(VAA); }
589 void visitCallInst(CallInst &CI);
590 void visitInvokeInst(InvokeInst &II);
591 void visitGetElementPtrInst(GetElementPtrInst &GEP);
592 void visitLoadInst(LoadInst &LI);
593 void visitStoreInst(StoreInst &SI);
594 void verifyDominatesUse(Instruction &I, unsigned i);
595 void visitInstruction(Instruction &I);
596 void visitTerminator(Instruction &I);
597 void visitBranchInst(BranchInst &BI);
598 void visitReturnInst(ReturnInst &RI);
599 void visitSwitchInst(SwitchInst &SI);
600 void visitIndirectBrInst(IndirectBrInst &BI);
601 void visitCallBrInst(CallBrInst &CBI);
602 void visitSelectInst(SelectInst &SI);
603 void visitUserOp1(Instruction &I);
604 void visitUserOp2(Instruction &I) { visitUserOp1(I); }
605 void visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call);
606 void visitConstrainedFPIntrinsic(ConstrainedFPIntrinsic &FPI);
607 void visitVPIntrinsic(VPIntrinsic &VPI);
608 void visitDbgLabelIntrinsic(StringRef Kind, DbgLabelInst &DLI);
609 void visitAtomicCmpXchgInst(AtomicCmpXchgInst &CXI);
610 void visitAtomicRMWInst(AtomicRMWInst &RMWI);
611 void visitFenceInst(FenceInst &FI);
612 void visitAllocaInst(AllocaInst &AI);
613 void visitExtractValueInst(ExtractValueInst &EVI);
614 void visitInsertValueInst(InsertValueInst &IVI);
615 void visitEHPadPredecessors(Instruction &I);
616 void visitLandingPadInst(LandingPadInst &LPI);
617 void visitResumeInst(ResumeInst &RI);
618 void visitCatchPadInst(CatchPadInst &CPI);
619 void visitCatchReturnInst(CatchReturnInst &CatchReturn);
620 void visitCleanupPadInst(CleanupPadInst &CPI);
621 void visitFuncletPadInst(FuncletPadInst &FPI);
622 void visitCatchSwitchInst(CatchSwitchInst &CatchSwitch);
623 void visitCleanupReturnInst(CleanupReturnInst &CRI);
624
625 void verifySwiftErrorCall(CallBase &Call, const Value *SwiftErrorVal);
626 void verifySwiftErrorValue(const Value *SwiftErrorVal);
627 void verifyTailCCMustTailAttrs(const AttrBuilder &Attrs, StringRef Context);
628 void verifyMustTailCall(CallInst &CI);
629 bool verifyAttributeCount(AttributeList Attrs, unsigned Params);
630 void verifyAttributeTypes(AttributeSet Attrs, const Value *V);
631 void verifyParameterAttrs(AttributeSet Attrs, Type *Ty, const Value *V);
632 void checkUnsignedBaseTenFuncAttr(AttributeList Attrs, StringRef Attr,
633 const Value *V);
634 void verifyFunctionAttrs(FunctionType *FT, AttributeList Attrs,
635 const Value *V, bool IsIntrinsic, bool IsInlineAsm);
636 void verifyFunctionMetadata(ArrayRef<std::pair<unsigned, MDNode *>> MDs);
637 void verifyUnknownProfileMetadata(MDNode *MD);
638 void visitConstantExprsRecursively(const Constant *EntryC);
639 void visitConstantExpr(const ConstantExpr *CE);
640 void visitConstantPtrAuth(const ConstantPtrAuth *CPA);
641 void verifyInlineAsmCall(const CallBase &Call);
642 void verifyStatepoint(const CallBase &Call);
643 void verifyFrameRecoverIndices();
644 void verifySiblingFuncletUnwinds();
645
646 void verifyFragmentExpression(const DbgVariableRecord &I);
647 template <typename ValueOrMetadata>
648 void verifyFragmentExpression(const DIVariable &V,
650 ValueOrMetadata *Desc);
651 void verifyFnArgs(const DbgVariableRecord &DVR);
652 void verifyNotEntryValue(const DbgVariableRecord &I);
653
654 /// Module-level debug info verification...
655 void verifyCompileUnits();
656
657 /// Module-level verification that all @llvm.experimental.deoptimize
658 /// declarations share the same calling convention.
659 void verifyDeoptimizeCallingConvs();
660
661 void verifyAttachedCallBundle(const CallBase &Call,
662 const OperandBundleUse &BU);
663
664 /// Verify the llvm.experimental.noalias.scope.decl declarations
665 void verifyNoAliasScopeDecl();
666};
667
668} // end anonymous namespace
669
670/// We know that cond should be true, if not print an error message.
671#define Check(C, ...) \
672 do { \
673 if (!(C)) { \
674 CheckFailed(__VA_ARGS__); \
675 return; \
676 } \
677 } while (false)
678
679/// We know that a debug info condition should be true, if not print
680/// an error message.
681#define CheckDI(C, ...) \
682 do { \
683 if (!(C)) { \
684 DebugInfoCheckFailed(__VA_ARGS__); \
685 return; \
686 } \
687 } while (false)
688
689void Verifier::visitDbgRecords(Instruction &I) {
690 if (!I.DebugMarker)
691 return;
692 CheckDI(I.DebugMarker->MarkedInstr == &I,
693 "Instruction has invalid DebugMarker", &I);
694 CheckDI(!isa<PHINode>(&I) || !I.hasDbgRecords(),
695 "PHI Node must not have any attached DbgRecords", &I);
696 for (DbgRecord &DR : I.getDbgRecordRange()) {
697 CheckDI(DR.getMarker() == I.DebugMarker,
698 "DbgRecord had invalid DebugMarker", &I, &DR);
699 if (auto *Loc =
701 visitMDNode(*Loc, AreDebugLocsAllowed::Yes);
702 if (auto *DVR = dyn_cast<DbgVariableRecord>(&DR)) {
703 visit(*DVR);
704 // These have to appear after `visit` for consistency with existing
705 // intrinsic behaviour.
706 verifyFragmentExpression(*DVR);
707 verifyNotEntryValue(*DVR);
708 } else if (auto *DLR = dyn_cast<DbgLabelRecord>(&DR)) {
709 visit(*DLR);
710 }
711 }
712}
713
714void Verifier::visit(Instruction &I) {
715 visitDbgRecords(I);
716 for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i)
717 Check(I.getOperand(i) != nullptr, "Operand is null", &I);
719}
720
721// Helper to iterate over indirect users. By returning false, the callback can ask to stop traversing further.
722static void forEachUser(const Value *User,
724 llvm::function_ref<bool(const Value *)> Callback) {
725 if (!Visited.insert(User).second)
726 return;
727
729 while (!WorkList.empty()) {
730 const Value *Cur = WorkList.pop_back_val();
731 if (!Visited.insert(Cur).second)
732 continue;
733 if (Callback(Cur))
734 append_range(WorkList, Cur->materialized_users());
735 }
736}
737
738void Verifier::visitGlobalValue(const GlobalValue &GV) {
740 "Global is external, but doesn't have external or weak linkage!", &GV);
741
742 if (const GlobalObject *GO = dyn_cast<GlobalObject>(&GV)) {
743 if (const MDNode *Associated =
744 GO->getMetadata(LLVMContext::MD_associated)) {
745 Check(Associated->getNumOperands() == 1,
746 "associated metadata must have one operand", &GV, Associated);
747 const Metadata *Op = Associated->getOperand(0).get();
748 Check(Op, "associated metadata must have a global value", GO, Associated);
749
750 const auto *VM = dyn_cast_or_null<ValueAsMetadata>(Op);
751 Check(VM, "associated metadata must be ValueAsMetadata", GO, Associated);
752 if (VM) {
753 Check(isa<PointerType>(VM->getValue()->getType()),
754 "associated value must be pointer typed", GV, Associated);
755
756 const Value *Stripped = VM->getValue()->stripPointerCastsAndAliases();
757 Check(isa<GlobalObject>(Stripped) || isa<Constant>(Stripped),
758 "associated metadata must point to a GlobalObject", GO, Stripped);
759 Check(Stripped != GO,
760 "global values should not associate to themselves", GO,
761 Associated);
762 }
763 }
764
765 // FIXME: Why is getMetadata on GlobalValue protected?
766 if (const MDNode *AbsoluteSymbol =
767 GO->getMetadata(LLVMContext::MD_absolute_symbol)) {
768 verifyRangeLikeMetadata(*GO, AbsoluteSymbol,
769 DL.getIntPtrType(GO->getType()),
770 RangeLikeMetadataKind::AbsoluteSymbol);
771 }
772 }
773
775 "Only global variables can have appending linkage!", &GV);
776
777 if (GV.hasAppendingLinkage()) {
778 const GlobalVariable *GVar = dyn_cast<GlobalVariable>(&GV);
779 Check(GVar && GVar->getValueType()->isArrayTy(),
780 "Only global arrays can have appending linkage!", GVar);
781 }
782
783 if (GV.isDeclarationForLinker())
784 Check(!GV.hasComdat(), "Declaration may not be in a Comdat!", &GV);
785
786 if (GV.hasDLLExportStorageClass()) {
788 "dllexport GlobalValue must have default or protected visibility",
789 &GV);
790 }
791 if (GV.hasDLLImportStorageClass()) {
793 "dllimport GlobalValue must have default visibility", &GV);
794 Check(!GV.isDSOLocal(), "GlobalValue with DLLImport Storage is dso_local!",
795 &GV);
796
797 Check((GV.isDeclaration() &&
800 "Global is marked as dllimport, but not external", &GV);
801 }
802
803 if (GV.isImplicitDSOLocal())
804 Check(GV.isDSOLocal(),
805 "GlobalValue with local linkage or non-default "
806 "visibility must be dso_local!",
807 &GV);
808
809 forEachUser(&GV, GlobalValueVisited, [&](const Value *V) -> bool {
810 if (const Instruction *I = dyn_cast<Instruction>(V)) {
811 if (!I->getParent() || !I->getParent()->getParent())
812 CheckFailed("Global is referenced by parentless instruction!", &GV, &M,
813 I);
814 else if (I->getParent()->getParent()->getParent() != &M)
815 CheckFailed("Global is referenced in a different module!", &GV, &M, I,
816 I->getParent()->getParent(),
817 I->getParent()->getParent()->getParent());
818 return false;
819 } else if (const Function *F = dyn_cast<Function>(V)) {
820 if (F->getParent() != &M)
821 CheckFailed("Global is used by function in a different module", &GV, &M,
822 F, F->getParent());
823 return false;
824 }
825 return true;
826 });
827}
828
829void Verifier::visitGlobalVariable(const GlobalVariable &GV) {
830 Type *GVType = GV.getValueType();
831
832 if (MaybeAlign A = GV.getAlign()) {
833 Check(A->value() <= Value::MaximumAlignment,
834 "huge alignment values are unsupported", &GV);
835 }
836
837 if (GV.hasInitializer()) {
838 Check(GV.getInitializer()->getType() == GVType,
839 "Global variable initializer type does not match global "
840 "variable type!",
841 &GV);
843 "Global variable initializer must be sized", &GV);
844 visitConstantExprsRecursively(GV.getInitializer());
845 // If the global has common linkage, it must have a zero initializer and
846 // cannot be constant.
847 if (GV.hasCommonLinkage()) {
849 "'common' global must have a zero initializer!", &GV);
850 Check(!GV.isConstant(), "'common' global may not be marked constant!",
851 &GV);
852 Check(!GV.hasComdat(), "'common' global may not be in a Comdat!", &GV);
853 }
854 }
855
856 if (GV.hasName() && (GV.getName() == "llvm.global_ctors" ||
857 GV.getName() == "llvm.global_dtors")) {
859 "invalid linkage for intrinsic global variable", &GV);
861 "invalid uses of intrinsic global variable", &GV);
862
863 // Don't worry about emitting an error for it not being an array,
864 // visitGlobalValue will complain on appending non-array.
865 if (ArrayType *ATy = dyn_cast<ArrayType>(GVType)) {
866 StructType *STy = dyn_cast<StructType>(ATy->getElementType());
867 PointerType *FuncPtrTy =
868 PointerType::get(Context, DL.getProgramAddressSpace());
869 Check(STy && (STy->getNumElements() == 2 || STy->getNumElements() == 3) &&
870 STy->getTypeAtIndex(0u)->isIntegerTy(32) &&
871 STy->getTypeAtIndex(1) == FuncPtrTy,
872 "wrong type for intrinsic global variable", &GV);
873 Check(STy->getNumElements() == 3,
874 "the third field of the element type is mandatory, "
875 "specify ptr null to migrate from the obsoleted 2-field form");
876 Type *ETy = STy->getTypeAtIndex(2);
877 Check(ETy->isPointerTy(), "wrong type for intrinsic global variable",
878 &GV);
879 }
880 }
881
882 if (GV.hasName() && (GV.getName() == "llvm.used" ||
883 GV.getName() == "llvm.compiler.used")) {
885 "invalid linkage for intrinsic global variable", &GV);
887 "invalid uses of intrinsic global variable", &GV);
888
889 if (ArrayType *ATy = dyn_cast<ArrayType>(GVType)) {
890 PointerType *PTy = dyn_cast<PointerType>(ATy->getElementType());
891 Check(PTy, "wrong type for intrinsic global variable", &GV);
892 if (GV.hasInitializer()) {
893 const Constant *Init = GV.getInitializer();
894 const ConstantArray *InitArray = dyn_cast<ConstantArray>(Init);
895 Check(InitArray, "wrong initializer for intrinsic global variable",
896 Init);
897 for (Value *Op : InitArray->operands()) {
898 Value *V = Op->stripPointerCasts();
901 Twine("invalid ") + GV.getName() + " member", V);
902 Check(V->hasName(),
903 Twine("members of ") + GV.getName() + " must be named", V);
904 }
905 }
906 }
907 }
908
909 // Visit any debug info attachments.
911 GV.getMetadata(LLVMContext::MD_dbg, MDs);
912 for (auto *MD : MDs) {
913 if (auto *GVE = dyn_cast<DIGlobalVariableExpression>(MD))
914 visitDIGlobalVariableExpression(*GVE);
915 else
916 CheckDI(false, "!dbg attachment of global variable must be a "
917 "DIGlobalVariableExpression");
918 }
919
920 // Scalable vectors cannot be global variables, since we don't know
921 // the runtime size.
922 Check(!GVType->isScalableTy(), "Globals cannot contain scalable types", &GV);
923
924 // Check if it is or contains a target extension type that disallows being
925 // used as a global.
927 "Global @" + GV.getName() + " has illegal target extension type",
928 GVType);
929
930 if (!GV.hasInitializer()) {
931 visitGlobalValue(GV);
932 return;
933 }
934
935 // Walk any aggregate initializers looking for bitcasts between address spaces
936 visitConstantExprsRecursively(GV.getInitializer());
937
938 visitGlobalValue(GV);
939}
940
941void Verifier::visitAliaseeSubExpr(const GlobalAlias &GA, const Constant &C) {
942 SmallPtrSet<const GlobalAlias*, 4> Visited;
943 Visited.insert(&GA);
944 visitAliaseeSubExpr(Visited, GA, C);
945}
946
947void Verifier::visitAliaseeSubExpr(SmallPtrSetImpl<const GlobalAlias*> &Visited,
948 const GlobalAlias &GA, const Constant &C) {
951 cast<GlobalValue>(C).hasAvailableExternallyLinkage(),
952 "available_externally alias must point to available_externally "
953 "global value",
954 &GA);
955 }
956 if (const auto *GV = dyn_cast<GlobalValue>(&C)) {
958 Check(!GV->isDeclarationForLinker(), "Alias must point to a definition",
959 &GA);
960 }
961
962 if (const auto *GA2 = dyn_cast<GlobalAlias>(GV)) {
963 Check(Visited.insert(GA2).second, "Aliases cannot form a cycle", &GA);
964
965 Check(!GA2->isInterposable(),
966 "Alias cannot point to an interposable alias", &GA);
967 } else {
968 // Only continue verifying subexpressions of GlobalAliases.
969 // Do not recurse into global initializers.
970 return;
971 }
972 }
973
974 if (const auto *CE = dyn_cast<ConstantExpr>(&C))
975 visitConstantExprsRecursively(CE);
976
977 for (const Use &U : C.operands()) {
978 Value *V = &*U;
979 if (const auto *GA2 = dyn_cast<GlobalAlias>(V))
980 visitAliaseeSubExpr(Visited, GA, *GA2->getAliasee());
981 else if (const auto *C2 = dyn_cast<Constant>(V))
982 visitAliaseeSubExpr(Visited, GA, *C2);
983 }
984}
985
986void Verifier::visitGlobalAlias(const GlobalAlias &GA) {
988 "Alias should have private, internal, linkonce, weak, linkonce_odr, "
989 "weak_odr, external, or available_externally linkage!",
990 &GA);
991 const Constant *Aliasee = GA.getAliasee();
992 Check(Aliasee, "Aliasee cannot be NULL!", &GA);
993 Check(GA.getType() == Aliasee->getType(),
994 "Alias and aliasee types should match!", &GA);
995
996 Check(isa<GlobalValue>(Aliasee) || isa<ConstantExpr>(Aliasee),
997 "Aliasee should be either GlobalValue or ConstantExpr", &GA);
998
999 visitAliaseeSubExpr(GA, *Aliasee);
1000
1001 visitGlobalValue(GA);
1002}
1003
1004void Verifier::visitGlobalIFunc(const GlobalIFunc &GI) {
1005 visitGlobalValue(GI);
1006
1008 GI.getAllMetadata(MDs);
1009 for (const auto &I : MDs) {
1010 CheckDI(I.first != LLVMContext::MD_dbg,
1011 "an ifunc may not have a !dbg attachment", &GI);
1012 Check(I.first != LLVMContext::MD_prof,
1013 "an ifunc may not have a !prof attachment", &GI);
1014 visitMDNode(*I.second, AreDebugLocsAllowed::No);
1015 }
1016
1018 "IFunc should have private, internal, linkonce, weak, linkonce_odr, "
1019 "weak_odr, or external linkage!",
1020 &GI);
1021 // Pierce through ConstantExprs and GlobalAliases and check that the resolver
1022 // is a Function definition.
1023 const Function *Resolver = GI.getResolverFunction();
1024 Check(Resolver, "IFunc must have a Function resolver", &GI);
1025 Check(!Resolver->isDeclarationForLinker(),
1026 "IFunc resolver must be a definition", &GI);
1027
1028 // Check that the immediate resolver operand (prior to any bitcasts) has the
1029 // correct type.
1030 const Type *ResolverTy = GI.getResolver()->getType();
1031
1033 "IFunc resolver must return a pointer", &GI);
1034
1035 Check(ResolverTy == PointerType::get(Context, GI.getAddressSpace()),
1036 "IFunc resolver has incorrect type", &GI);
1037}
1038
1039void Verifier::visitNamedMDNode(const NamedMDNode &NMD) {
1040 // There used to be various other llvm.dbg.* nodes, but we don't support
1041 // upgrading them and we want to reserve the namespace for future uses.
1042 if (NMD.getName().starts_with("llvm.dbg."))
1043 CheckDI(NMD.getName() == "llvm.dbg.cu",
1044 "unrecognized named metadata node in the llvm.dbg namespace", &NMD);
1045 for (const MDNode *MD : NMD.operands()) {
1046 if (NMD.getName() == "llvm.dbg.cu")
1047 CheckDI(MD && isa<DICompileUnit>(MD), "invalid compile unit", &NMD, MD);
1048
1049 if (!MD)
1050 continue;
1051
1052 visitMDNode(*MD, AreDebugLocsAllowed::Yes);
1053 }
1054}
1055
1056void Verifier::visitMDNode(const MDNode &MD, AreDebugLocsAllowed AllowLocs) {
1057 // Only visit each node once. Metadata can be mutually recursive, so this
1058 // avoids infinite recursion here, as well as being an optimization.
1059 if (!MDNodes.insert(&MD).second)
1060 return;
1061
1062 Check(&MD.getContext() == &Context,
1063 "MDNode context does not match Module context!", &MD);
1064
1065 switch (MD.getMetadataID()) {
1066 default:
1067 llvm_unreachable("Invalid MDNode subclass");
1068 case Metadata::MDTupleKind:
1069 break;
1070#define HANDLE_SPECIALIZED_MDNODE_LEAF(CLASS) \
1071 case Metadata::CLASS##Kind: \
1072 visit##CLASS(cast<CLASS>(MD)); \
1073 break;
1074#include "llvm/IR/Metadata.def"
1075 }
1076
1077 for (const Metadata *Op : MD.operands()) {
1078 if (!Op)
1079 continue;
1080 Check(!isa<LocalAsMetadata>(Op), "Invalid operand for global metadata!",
1081 &MD, Op);
1082 CheckDI(!isa<DILocation>(Op) || AllowLocs == AreDebugLocsAllowed::Yes,
1083 "DILocation not allowed within this metadata node", &MD, Op);
1084 if (auto *N = dyn_cast<MDNode>(Op)) {
1085 visitMDNode(*N, AllowLocs);
1086 continue;
1087 }
1088 if (auto *V = dyn_cast<ValueAsMetadata>(Op)) {
1089 visitValueAsMetadata(*V, nullptr);
1090 continue;
1091 }
1092 }
1093
1094 // Check llvm.loop.estimated_trip_count.
1095 if (MD.getNumOperands() > 0 &&
1097 Check(MD.getNumOperands() == 2, "Expected two operands", &MD);
1099 Check(Count && Count->getType()->isIntegerTy() &&
1100 cast<IntegerType>(Count->getType())->getBitWidth() <= 32,
1101 "Expected second operand to be an integer constant of type i32 or "
1102 "smaller",
1103 &MD);
1104 }
1105
1106 // Check these last, so we diagnose problems in operands first.
1107 Check(!MD.isTemporary(), "Expected no forward declarations!", &MD);
1108 Check(MD.isResolved(), "All nodes should be resolved!", &MD);
1109}
1110
1111void Verifier::visitValueAsMetadata(const ValueAsMetadata &MD, Function *F) {
1112 Check(MD.getValue(), "Expected valid value", &MD);
1113 Check(!MD.getValue()->getType()->isMetadataTy(),
1114 "Unexpected metadata round-trip through values", &MD, MD.getValue());
1115
1116 auto *L = dyn_cast<LocalAsMetadata>(&MD);
1117 if (!L)
1118 return;
1119
1120 Check(F, "function-local metadata used outside a function", L);
1121
1122 // If this was an instruction, bb, or argument, verify that it is in the
1123 // function that we expect.
1124 Function *ActualF = nullptr;
1125 if (Instruction *I = dyn_cast<Instruction>(L->getValue())) {
1126 Check(I->getParent(), "function-local metadata not in basic block", L, I);
1127 ActualF = I->getParent()->getParent();
1128 } else if (BasicBlock *BB = dyn_cast<BasicBlock>(L->getValue()))
1129 ActualF = BB->getParent();
1130 else if (Argument *A = dyn_cast<Argument>(L->getValue()))
1131 ActualF = A->getParent();
1132 assert(ActualF && "Unimplemented function local metadata case!");
1133
1134 Check(ActualF == F, "function-local metadata used in wrong function", L);
1135}
1136
1137void Verifier::visitDIArgList(const DIArgList &AL, Function *F) {
1138 for (const ValueAsMetadata *VAM : AL.getArgs())
1139 visitValueAsMetadata(*VAM, F);
1140}
1141
1142void Verifier::visitMetadataAsValue(const MetadataAsValue &MDV, Function *F) {
1143 Metadata *MD = MDV.getMetadata();
1144 if (auto *N = dyn_cast<MDNode>(MD)) {
1145 visitMDNode(*N, AreDebugLocsAllowed::No);
1146 return;
1147 }
1148
1149 // Only visit each node once. Metadata can be mutually recursive, so this
1150 // avoids infinite recursion here, as well as being an optimization.
1151 if (!MDNodes.insert(MD).second)
1152 return;
1153
1154 if (auto *V = dyn_cast<ValueAsMetadata>(MD))
1155 visitValueAsMetadata(*V, F);
1156
1157 if (auto *AL = dyn_cast<DIArgList>(MD))
1158 visitDIArgList(*AL, F);
1159}
1160
1161static bool isType(const Metadata *MD) { return !MD || isa<DIType>(MD); }
1162static bool isScope(const Metadata *MD) { return !MD || isa<DIScope>(MD); }
1163static bool isDINode(const Metadata *MD) { return !MD || isa<DINode>(MD); }
1164static bool isMDTuple(const Metadata *MD) { return !MD || isa<MDTuple>(MD); }
1165
1166void Verifier::visitDILocation(const DILocation &N) {
1167 CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1168 "location requires a valid scope", &N, N.getRawScope());
1169 if (auto *IA = N.getRawInlinedAt())
1170 CheckDI(isa<DILocation>(IA), "inlined-at should be a location", &N, IA);
1171 if (auto *SP = dyn_cast<DISubprogram>(N.getRawScope()))
1172 CheckDI(SP->isDefinition(), "scope points into the type hierarchy", &N);
1173}
1174
1175void Verifier::visitGenericDINode(const GenericDINode &N) {
1176 CheckDI(N.getTag(), "invalid tag", &N);
1177}
1178
1179void Verifier::visitDIScope(const DIScope &N) {
1180 if (auto *F = N.getRawFile())
1181 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1182}
1183
1184void Verifier::visitDISubrangeType(const DISubrangeType &N) {
1185 CheckDI(N.getTag() == dwarf::DW_TAG_subrange_type, "invalid tag", &N);
1186 auto *BaseType = N.getRawBaseType();
1187 CheckDI(!BaseType || isType(BaseType), "BaseType must be a type");
1188 auto *LBound = N.getRawLowerBound();
1189 CheckDI(!LBound || isa<ConstantAsMetadata>(LBound) ||
1190 isa<DIVariable>(LBound) || isa<DIExpression>(LBound) ||
1191 isa<DIDerivedType>(LBound),
1192 "LowerBound must be signed constant or DIVariable or DIExpression or "
1193 "DIDerivedType",
1194 &N);
1195 auto *UBound = N.getRawUpperBound();
1196 CheckDI(!UBound || isa<ConstantAsMetadata>(UBound) ||
1197 isa<DIVariable>(UBound) || isa<DIExpression>(UBound) ||
1198 isa<DIDerivedType>(UBound),
1199 "UpperBound must be signed constant or DIVariable or DIExpression or "
1200 "DIDerivedType",
1201 &N);
1202 auto *Stride = N.getRawStride();
1203 CheckDI(!Stride || isa<ConstantAsMetadata>(Stride) ||
1204 isa<DIVariable>(Stride) || isa<DIExpression>(Stride),
1205 "Stride must be signed constant or DIVariable or DIExpression", &N);
1206 auto *Bias = N.getRawBias();
1207 CheckDI(!Bias || isa<ConstantAsMetadata>(Bias) || isa<DIVariable>(Bias) ||
1208 isa<DIExpression>(Bias),
1209 "Bias must be signed constant or DIVariable or DIExpression", &N);
1210 // Subrange types currently only support constant size.
1211 auto *Size = N.getRawSizeInBits();
1213 "SizeInBits must be a constant");
1214}
1215
1216void Verifier::visitDISubrange(const DISubrange &N) {
1217 CheckDI(N.getTag() == dwarf::DW_TAG_subrange_type, "invalid tag", &N);
1218 CheckDI(!N.getRawCountNode() || !N.getRawUpperBound(),
1219 "Subrange can have any one of count or upperBound", &N);
1220 auto *CBound = N.getRawCountNode();
1221 CheckDI(!CBound || isa<ConstantAsMetadata>(CBound) ||
1222 isa<DIVariable>(CBound) || isa<DIExpression>(CBound),
1223 "Count must be signed constant or DIVariable or DIExpression", &N);
1224 auto Count = N.getCount();
1226 cast<ConstantInt *>(Count)->getSExtValue() >= -1,
1227 "invalid subrange count", &N);
1228 auto *LBound = N.getRawLowerBound();
1229 CheckDI(!LBound || isa<ConstantAsMetadata>(LBound) ||
1230 isa<DIVariable>(LBound) || isa<DIExpression>(LBound),
1231 "LowerBound must be signed constant or DIVariable or DIExpression",
1232 &N);
1233 auto *UBound = N.getRawUpperBound();
1234 CheckDI(!UBound || isa<ConstantAsMetadata>(UBound) ||
1235 isa<DIVariable>(UBound) || isa<DIExpression>(UBound),
1236 "UpperBound must be signed constant or DIVariable or DIExpression",
1237 &N);
1238 auto *Stride = N.getRawStride();
1239 CheckDI(!Stride || isa<ConstantAsMetadata>(Stride) ||
1240 isa<DIVariable>(Stride) || isa<DIExpression>(Stride),
1241 "Stride must be signed constant or DIVariable or DIExpression", &N);
1242}
1243
1244void Verifier::visitDIGenericSubrange(const DIGenericSubrange &N) {
1245 CheckDI(N.getTag() == dwarf::DW_TAG_generic_subrange, "invalid tag", &N);
1246 CheckDI(!N.getRawCountNode() || !N.getRawUpperBound(),
1247 "GenericSubrange can have any one of count or upperBound", &N);
1248 auto *CBound = N.getRawCountNode();
1249 CheckDI(!CBound || isa<DIVariable>(CBound) || isa<DIExpression>(CBound),
1250 "Count must be signed constant or DIVariable or DIExpression", &N);
1251 auto *LBound = N.getRawLowerBound();
1252 CheckDI(LBound, "GenericSubrange must contain lowerBound", &N);
1253 CheckDI(isa<DIVariable>(LBound) || isa<DIExpression>(LBound),
1254 "LowerBound must be signed constant or DIVariable or DIExpression",
1255 &N);
1256 auto *UBound = N.getRawUpperBound();
1257 CheckDI(!UBound || isa<DIVariable>(UBound) || isa<DIExpression>(UBound),
1258 "UpperBound must be signed constant or DIVariable or DIExpression",
1259 &N);
1260 auto *Stride = N.getRawStride();
1261 CheckDI(Stride, "GenericSubrange must contain stride", &N);
1262 CheckDI(isa<DIVariable>(Stride) || isa<DIExpression>(Stride),
1263 "Stride must be signed constant or DIVariable or DIExpression", &N);
1264}
1265
1266void Verifier::visitDIEnumerator(const DIEnumerator &N) {
1267 CheckDI(N.getTag() == dwarf::DW_TAG_enumerator, "invalid tag", &N);
1268}
1269
1270void Verifier::visitDIBasicType(const DIBasicType &N) {
1271 CheckDI(N.getTag() == dwarf::DW_TAG_base_type ||
1272 N.getTag() == dwarf::DW_TAG_unspecified_type ||
1273 N.getTag() == dwarf::DW_TAG_string_type,
1274 "invalid tag", &N);
1275 // Basic types currently only support constant size.
1276 auto *Size = N.getRawSizeInBits();
1278 "SizeInBits must be a constant");
1279}
1280
1281void Verifier::visitDIFixedPointType(const DIFixedPointType &N) {
1282 visitDIBasicType(N);
1283
1284 CheckDI(N.getTag() == dwarf::DW_TAG_base_type, "invalid tag", &N);
1285 CheckDI(N.getEncoding() == dwarf::DW_ATE_signed_fixed ||
1286 N.getEncoding() == dwarf::DW_ATE_unsigned_fixed,
1287 "invalid encoding", &N);
1291 "invalid kind", &N);
1293 N.getFactorRaw() == 0,
1294 "factor should be 0 for rationals", &N);
1296 (N.getNumeratorRaw() == 0 && N.getDenominatorRaw() == 0),
1297 "numerator and denominator should be 0 for non-rationals", &N);
1298}
1299
1300void Verifier::visitDIStringType(const DIStringType &N) {
1301 CheckDI(N.getTag() == dwarf::DW_TAG_string_type, "invalid tag", &N);
1302 CheckDI(!(N.isBigEndian() && N.isLittleEndian()), "has conflicting flags",
1303 &N);
1304}
1305
1306void Verifier::visitDIDerivedType(const DIDerivedType &N) {
1307 // Common scope checks.
1308 visitDIScope(N);
1309
1310 CheckDI(N.getTag() == dwarf::DW_TAG_typedef ||
1311 N.getTag() == dwarf::DW_TAG_pointer_type ||
1312 N.getTag() == dwarf::DW_TAG_ptr_to_member_type ||
1313 N.getTag() == dwarf::DW_TAG_reference_type ||
1314 N.getTag() == dwarf::DW_TAG_rvalue_reference_type ||
1315 N.getTag() == dwarf::DW_TAG_const_type ||
1316 N.getTag() == dwarf::DW_TAG_immutable_type ||
1317 N.getTag() == dwarf::DW_TAG_volatile_type ||
1318 N.getTag() == dwarf::DW_TAG_restrict_type ||
1319 N.getTag() == dwarf::DW_TAG_atomic_type ||
1320 N.getTag() == dwarf::DW_TAG_LLVM_ptrauth_type ||
1321 N.getTag() == dwarf::DW_TAG_member ||
1322 (N.getTag() == dwarf::DW_TAG_variable && N.isStaticMember()) ||
1323 N.getTag() == dwarf::DW_TAG_inheritance ||
1324 N.getTag() == dwarf::DW_TAG_friend ||
1325 N.getTag() == dwarf::DW_TAG_set_type ||
1326 N.getTag() == dwarf::DW_TAG_template_alias,
1327 "invalid tag", &N);
1328 if (N.getTag() == dwarf::DW_TAG_ptr_to_member_type) {
1329 CheckDI(isType(N.getRawExtraData()), "invalid pointer to member type", &N,
1330 N.getRawExtraData());
1331 } else if (N.getTag() == dwarf::DW_TAG_template_alias) {
1332 CheckDI(isMDTuple(N.getRawExtraData()), "invalid template parameters", &N,
1333 N.getRawExtraData());
1334 } else if (N.getTag() == dwarf::DW_TAG_inheritance ||
1335 N.getTag() == dwarf::DW_TAG_member ||
1336 N.getTag() == dwarf::DW_TAG_variable) {
1337 auto *ExtraData = N.getRawExtraData();
1338 auto IsValidExtraData = [&]() {
1339 if (ExtraData == nullptr)
1340 return true;
1341 if (isa<ConstantAsMetadata>(ExtraData) || isa<MDString>(ExtraData) ||
1342 isa<DIObjCProperty>(ExtraData))
1343 return true;
1344 if (auto *Tuple = dyn_cast<MDTuple>(ExtraData)) {
1345 if (Tuple->getNumOperands() != 1)
1346 return false;
1347 return isa_and_nonnull<ConstantAsMetadata>(Tuple->getOperand(0).get());
1348 }
1349 return false;
1350 };
1351 CheckDI(IsValidExtraData(),
1352 "extraData must be ConstantAsMetadata, MDString, DIObjCProperty, "
1353 "or MDTuple with single ConstantAsMetadata operand",
1354 &N, ExtraData);
1355 }
1356
1357 if (N.getTag() == dwarf::DW_TAG_set_type) {
1358 if (auto *T = N.getRawBaseType()) {
1362 CheckDI(
1363 (Enum && Enum->getTag() == dwarf::DW_TAG_enumeration_type) ||
1364 (Subrange && Subrange->getTag() == dwarf::DW_TAG_subrange_type) ||
1365 (Basic && (Basic->getEncoding() == dwarf::DW_ATE_unsigned ||
1366 Basic->getEncoding() == dwarf::DW_ATE_signed ||
1367 Basic->getEncoding() == dwarf::DW_ATE_unsigned_char ||
1368 Basic->getEncoding() == dwarf::DW_ATE_signed_char ||
1369 Basic->getEncoding() == dwarf::DW_ATE_boolean)),
1370 "invalid set base type", &N, T);
1371 }
1372 }
1373
1374 CheckDI(isScope(N.getRawScope()), "invalid scope", &N, N.getRawScope());
1375 CheckDI(isType(N.getRawBaseType()), "invalid base type", &N,
1376 N.getRawBaseType());
1377
1378 if (N.getDWARFAddressSpace()) {
1379 CheckDI(N.getTag() == dwarf::DW_TAG_pointer_type ||
1380 N.getTag() == dwarf::DW_TAG_reference_type ||
1381 N.getTag() == dwarf::DW_TAG_rvalue_reference_type,
1382 "DWARF address space only applies to pointer or reference types",
1383 &N);
1384 }
1385
1386 auto *Size = N.getRawSizeInBits();
1389 "SizeInBits must be a constant or DIVariable or DIExpression");
1390}
1391
1392/// Detect mutually exclusive flags.
1393static bool hasConflictingReferenceFlags(unsigned Flags) {
1394 return ((Flags & DINode::FlagLValueReference) &&
1395 (Flags & DINode::FlagRValueReference)) ||
1396 ((Flags & DINode::FlagTypePassByValue) &&
1397 (Flags & DINode::FlagTypePassByReference));
1398}
1399
1400void Verifier::visitTemplateParams(const MDNode &N, const Metadata &RawParams) {
1401 auto *Params = dyn_cast<MDTuple>(&RawParams);
1402 CheckDI(Params, "invalid template params", &N, &RawParams);
1403 for (Metadata *Op : Params->operands()) {
1404 CheckDI(Op && isa<DITemplateParameter>(Op), "invalid template parameter",
1405 &N, Params, Op);
1406 }
1407}
1408
1409void Verifier::visitDICompositeType(const DICompositeType &N) {
1410 // Common scope checks.
1411 visitDIScope(N);
1412
1413 CheckDI(N.getTag() == dwarf::DW_TAG_array_type ||
1414 N.getTag() == dwarf::DW_TAG_structure_type ||
1415 N.getTag() == dwarf::DW_TAG_union_type ||
1416 N.getTag() == dwarf::DW_TAG_enumeration_type ||
1417 N.getTag() == dwarf::DW_TAG_class_type ||
1418 N.getTag() == dwarf::DW_TAG_variant_part ||
1419 N.getTag() == dwarf::DW_TAG_variant ||
1420 N.getTag() == dwarf::DW_TAG_namelist,
1421 "invalid tag", &N);
1422
1423 CheckDI(isScope(N.getRawScope()), "invalid scope", &N, N.getRawScope());
1424 CheckDI(isType(N.getRawBaseType()), "invalid base type", &N,
1425 N.getRawBaseType());
1426
1427 CheckDI(!N.getRawElements() || isa<MDTuple>(N.getRawElements()),
1428 "invalid composite elements", &N, N.getRawElements());
1429 CheckDI(isType(N.getRawVTableHolder()), "invalid vtable holder", &N,
1430 N.getRawVTableHolder());
1432 "invalid reference flags", &N);
1433 unsigned DIBlockByRefStruct = 1 << 4;
1434 CheckDI((N.getFlags() & DIBlockByRefStruct) == 0,
1435 "DIBlockByRefStruct on DICompositeType is no longer supported", &N);
1436 CheckDI(llvm::all_of(N.getElements(), [](const DINode *N) { return N; }),
1437 "DISubprogram contains null entry in `elements` field", &N);
1438
1439 if (N.isVector()) {
1440 const DINodeArray Elements = N.getElements();
1441 CheckDI(Elements.size() == 1 &&
1442 Elements[0]->getTag() == dwarf::DW_TAG_subrange_type,
1443 "invalid vector, expected one element of type subrange", &N);
1444 }
1445
1446 if (auto *Params = N.getRawTemplateParams())
1447 visitTemplateParams(N, *Params);
1448
1449 if (auto *D = N.getRawDiscriminator()) {
1450 CheckDI(isa<DIDerivedType>(D) && N.getTag() == dwarf::DW_TAG_variant_part,
1451 "discriminator can only appear on variant part");
1452 }
1453
1454 if (N.getRawDataLocation()) {
1455 CheckDI(N.getTag() == dwarf::DW_TAG_array_type,
1456 "dataLocation can only appear in array type");
1457 }
1458
1459 if (N.getRawAssociated()) {
1460 CheckDI(N.getTag() == dwarf::DW_TAG_array_type,
1461 "associated can only appear in array type");
1462 }
1463
1464 if (N.getRawAllocated()) {
1465 CheckDI(N.getTag() == dwarf::DW_TAG_array_type,
1466 "allocated can only appear in array type");
1467 }
1468
1469 if (N.getRawRank()) {
1470 CheckDI(N.getTag() == dwarf::DW_TAG_array_type,
1471 "rank can only appear in array type");
1472 }
1473
1474 if (N.getTag() == dwarf::DW_TAG_array_type) {
1475 CheckDI(N.getRawBaseType(), "array types must have a base type", &N);
1476 }
1477
1478 auto *Size = N.getRawSizeInBits();
1481 "SizeInBits must be a constant or DIVariable or DIExpression");
1482}
1483
1484void Verifier::visitDISubroutineType(const DISubroutineType &N) {
1485 CheckDI(N.getTag() == dwarf::DW_TAG_subroutine_type, "invalid tag", &N);
1486 if (auto *Types = N.getRawTypeArray()) {
1487 CheckDI(isa<MDTuple>(Types), "invalid composite elements", &N, Types);
1488 for (Metadata *Ty : N.getTypeArray()->operands()) {
1489 CheckDI(isType(Ty), "invalid subroutine type ref", &N, Types, Ty);
1490 }
1491 }
1493 "invalid reference flags", &N);
1494}
1495
1496void Verifier::visitDIFile(const DIFile &N) {
1497 CheckDI(N.getTag() == dwarf::DW_TAG_file_type, "invalid tag", &N);
1498 std::optional<DIFile::ChecksumInfo<StringRef>> Checksum = N.getChecksum();
1499 if (Checksum) {
1500 CheckDI(Checksum->Kind <= DIFile::ChecksumKind::CSK_Last,
1501 "invalid checksum kind", &N);
1502 size_t Size;
1503 switch (Checksum->Kind) {
1504 case DIFile::CSK_MD5:
1505 Size = 32;
1506 break;
1507 case DIFile::CSK_SHA1:
1508 Size = 40;
1509 break;
1510 case DIFile::CSK_SHA256:
1511 Size = 64;
1512 break;
1513 }
1514 CheckDI(Checksum->Value.size() == Size, "invalid checksum length", &N);
1515 CheckDI(Checksum->Value.find_if_not(llvm::isHexDigit) == StringRef::npos,
1516 "invalid checksum", &N);
1517 }
1518}
1519
1520void Verifier::visitDICompileUnit(const DICompileUnit &N) {
1521 CheckDI(N.isDistinct(), "compile units must be distinct", &N);
1522 CheckDI(N.getTag() == dwarf::DW_TAG_compile_unit, "invalid tag", &N);
1523
1524 // Don't bother verifying the compilation directory or producer string
1525 // as those could be empty.
1526 CheckDI(N.getRawFile() && isa<DIFile>(N.getRawFile()), "invalid file", &N,
1527 N.getRawFile());
1528 CheckDI(!N.getFile()->getFilename().empty(), "invalid filename", &N,
1529 N.getFile());
1530
1531 CheckDI((N.getEmissionKind() <= DICompileUnit::LastEmissionKind),
1532 "invalid emission kind", &N);
1533
1534 if (auto *Array = N.getRawEnumTypes()) {
1535 CheckDI(isa<MDTuple>(Array), "invalid enum list", &N, Array);
1536 for (Metadata *Op : N.getEnumTypes()->operands()) {
1538 CheckDI(Enum && Enum->getTag() == dwarf::DW_TAG_enumeration_type,
1539 "invalid enum type", &N, N.getEnumTypes(), Op);
1540 }
1541 }
1542 if (auto *Array = N.getRawRetainedTypes()) {
1543 CheckDI(isa<MDTuple>(Array), "invalid retained type list", &N, Array);
1544 for (Metadata *Op : N.getRetainedTypes()->operands()) {
1545 CheckDI(
1546 Op && (isa<DIType>(Op) || (isa<DISubprogram>(Op) &&
1547 !cast<DISubprogram>(Op)->isDefinition())),
1548 "invalid retained type", &N, Op);
1549 }
1550 }
1551 if (auto *Array = N.getRawGlobalVariables()) {
1552 CheckDI(isa<MDTuple>(Array), "invalid global variable list", &N, Array);
1553 for (Metadata *Op : N.getGlobalVariables()->operands()) {
1555 "invalid global variable ref", &N, Op);
1556 }
1557 }
1558 if (auto *Array = N.getRawImportedEntities()) {
1559 CheckDI(isa<MDTuple>(Array), "invalid imported entity list", &N, Array);
1560 for (Metadata *Op : N.getImportedEntities()->operands()) {
1561 CheckDI(Op && isa<DIImportedEntity>(Op), "invalid imported entity ref",
1562 &N, Op);
1563 }
1564 }
1565 if (auto *Array = N.getRawMacros()) {
1566 CheckDI(isa<MDTuple>(Array), "invalid macro list", &N, Array);
1567 for (Metadata *Op : N.getMacros()->operands()) {
1568 CheckDI(Op && isa<DIMacroNode>(Op), "invalid macro ref", &N, Op);
1569 }
1570 }
1571 CUVisited.insert(&N);
1572}
1573
1574void Verifier::visitDISubprogram(const DISubprogram &N) {
1575 CheckDI(N.getTag() == dwarf::DW_TAG_subprogram, "invalid tag", &N);
1576 CheckDI(isScope(N.getRawScope()), "invalid scope", &N, N.getRawScope());
1577 if (auto *F = N.getRawFile())
1578 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1579 else
1580 CheckDI(N.getLine() == 0, "line specified with no file", &N, N.getLine());
1581 if (auto *T = N.getRawType())
1582 CheckDI(isa<DISubroutineType>(T), "invalid subroutine type", &N, T);
1583 CheckDI(isType(N.getRawContainingType()), "invalid containing type", &N,
1584 N.getRawContainingType());
1585 if (auto *Params = N.getRawTemplateParams())
1586 visitTemplateParams(N, *Params);
1587 if (auto *S = N.getRawDeclaration())
1588 CheckDI(isa<DISubprogram>(S) && !cast<DISubprogram>(S)->isDefinition(),
1589 "invalid subprogram declaration", &N, S);
1590 if (auto *RawNode = N.getRawRetainedNodes()) {
1591 auto *Node = dyn_cast<MDTuple>(RawNode);
1592 CheckDI(Node, "invalid retained nodes list", &N, RawNode);
1593 for (Metadata *Op : Node->operands()) {
1594 CheckDI(Op, "nullptr in retained nodes", &N, Node);
1595
1596 auto True = [](const Metadata *) { return true; };
1597 auto False = [](const Metadata *) { return false; };
1598 bool IsTypeCorrect =
1599 DISubprogram::visitRetainedNode<bool>(Op, True, True, True, False);
1600 CheckDI(IsTypeCorrect,
1601 "invalid retained nodes, expected DILocalVariable, DILabel or "
1602 "DIImportedEntity",
1603 &N, Node, Op);
1604
1605 auto *RetainedNode = cast<DINode>(Op);
1606 auto *RetainedNodeScope = dyn_cast_or_null<DILocalScope>(
1608 CheckDI(RetainedNodeScope,
1609 "invalid retained nodes, retained node is not local", &N, Node,
1610 RetainedNode);
1611 CheckDI(
1612 RetainedNodeScope->getSubprogram() == &N,
1613 "invalid retained nodes, retained node does not belong to subprogram",
1614 &N, Node, RetainedNode, RetainedNodeScope);
1615 }
1616 }
1618 "invalid reference flags", &N);
1619
1620 auto *Unit = N.getRawUnit();
1621 if (N.isDefinition()) {
1622 // Subprogram definitions (not part of the type hierarchy).
1623 CheckDI(N.isDistinct(), "subprogram definitions must be distinct", &N);
1624 CheckDI(Unit, "subprogram definitions must have a compile unit", &N);
1625 CheckDI(isa<DICompileUnit>(Unit), "invalid unit type", &N, Unit);
1626 // There's no good way to cross the CU boundary to insert a nested
1627 // DISubprogram definition in one CU into a type defined in another CU.
1628 auto *CT = dyn_cast_or_null<DICompositeType>(N.getRawScope());
1629 if (CT && CT->getRawIdentifier() &&
1630 M.getContext().isODRUniquingDebugTypes())
1631 CheckDI(N.getDeclaration(),
1632 "definition subprograms cannot be nested within DICompositeType "
1633 "when enabling ODR",
1634 &N);
1635 } else {
1636 // Subprogram declarations (part of the type hierarchy).
1637 CheckDI(!Unit, "subprogram declarations must not have a compile unit", &N);
1638 CheckDI(!N.getRawDeclaration(),
1639 "subprogram declaration must not have a declaration field");
1640 }
1641
1642 if (auto *RawThrownTypes = N.getRawThrownTypes()) {
1643 auto *ThrownTypes = dyn_cast<MDTuple>(RawThrownTypes);
1644 CheckDI(ThrownTypes, "invalid thrown types list", &N, RawThrownTypes);
1645 for (Metadata *Op : ThrownTypes->operands())
1646 CheckDI(Op && isa<DIType>(Op), "invalid thrown type", &N, ThrownTypes,
1647 Op);
1648 }
1649
1650 if (N.areAllCallsDescribed())
1651 CheckDI(N.isDefinition(),
1652 "DIFlagAllCallsDescribed must be attached to a definition");
1653}
1654
1655void Verifier::visitDILexicalBlockBase(const DILexicalBlockBase &N) {
1656 CheckDI(N.getTag() == dwarf::DW_TAG_lexical_block, "invalid tag", &N);
1657 CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1658 "invalid local scope", &N, N.getRawScope());
1659 if (auto *SP = dyn_cast<DISubprogram>(N.getRawScope()))
1660 CheckDI(SP->isDefinition(), "scope points into the type hierarchy", &N);
1661}
1662
1663void Verifier::visitDILexicalBlock(const DILexicalBlock &N) {
1664 visitDILexicalBlockBase(N);
1665
1666 CheckDI(N.getLine() || !N.getColumn(),
1667 "cannot have column info without line info", &N);
1668}
1669
1670void Verifier::visitDILexicalBlockFile(const DILexicalBlockFile &N) {
1671 visitDILexicalBlockBase(N);
1672}
1673
1674void Verifier::visitDICommonBlock(const DICommonBlock &N) {
1675 CheckDI(N.getTag() == dwarf::DW_TAG_common_block, "invalid tag", &N);
1676 if (auto *S = N.getRawScope())
1677 CheckDI(isa<DIScope>(S), "invalid scope ref", &N, S);
1678 if (auto *S = N.getRawDecl())
1679 CheckDI(isa<DIGlobalVariable>(S), "invalid declaration", &N, S);
1680}
1681
1682void Verifier::visitDINamespace(const DINamespace &N) {
1683 CheckDI(N.getTag() == dwarf::DW_TAG_namespace, "invalid tag", &N);
1684 if (auto *S = N.getRawScope())
1685 CheckDI(isa<DIScope>(S), "invalid scope ref", &N, S);
1686}
1687
1688void Verifier::visitDIMacro(const DIMacro &N) {
1689 CheckDI(N.getMacinfoType() == dwarf::DW_MACINFO_define ||
1690 N.getMacinfoType() == dwarf::DW_MACINFO_undef,
1691 "invalid macinfo type", &N);
1692 CheckDI(!N.getName().empty(), "anonymous macro", &N);
1693 if (!N.getValue().empty()) {
1694 assert(N.getValue().data()[0] != ' ' && "Macro value has a space prefix");
1695 }
1696}
1697
1698void Verifier::visitDIMacroFile(const DIMacroFile &N) {
1699 CheckDI(N.getMacinfoType() == dwarf::DW_MACINFO_start_file,
1700 "invalid macinfo type", &N);
1701 if (auto *F = N.getRawFile())
1702 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1703
1704 if (auto *Array = N.getRawElements()) {
1705 CheckDI(isa<MDTuple>(Array), "invalid macro list", &N, Array);
1706 for (Metadata *Op : N.getElements()->operands()) {
1707 CheckDI(Op && isa<DIMacroNode>(Op), "invalid macro ref", &N, Op);
1708 }
1709 }
1710}
1711
1712void Verifier::visitDIModule(const DIModule &N) {
1713 CheckDI(N.getTag() == dwarf::DW_TAG_module, "invalid tag", &N);
1714 CheckDI(!N.getName().empty(), "anonymous module", &N);
1715}
1716
1717void Verifier::visitDITemplateParameter(const DITemplateParameter &N) {
1718 CheckDI(isType(N.getRawType()), "invalid type ref", &N, N.getRawType());
1719}
1720
1721void Verifier::visitDITemplateTypeParameter(const DITemplateTypeParameter &N) {
1722 visitDITemplateParameter(N);
1723
1724 CheckDI(N.getTag() == dwarf::DW_TAG_template_type_parameter, "invalid tag",
1725 &N);
1726}
1727
1728void Verifier::visitDITemplateValueParameter(
1729 const DITemplateValueParameter &N) {
1730 visitDITemplateParameter(N);
1731
1732 CheckDI(N.getTag() == dwarf::DW_TAG_template_value_parameter ||
1733 N.getTag() == dwarf::DW_TAG_GNU_template_template_param ||
1734 N.getTag() == dwarf::DW_TAG_GNU_template_parameter_pack,
1735 "invalid tag", &N);
1736}
1737
1738void Verifier::visitDIVariable(const DIVariable &N) {
1739 if (auto *S = N.getRawScope())
1740 CheckDI(isa<DIScope>(S), "invalid scope", &N, S);
1741 if (auto *F = N.getRawFile())
1742 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1743}
1744
1745void Verifier::visitDIGlobalVariable(const DIGlobalVariable &N) {
1746 // Checks common to all variables.
1747 visitDIVariable(N);
1748
1749 CheckDI(N.getTag() == dwarf::DW_TAG_variable, "invalid tag", &N);
1750 CheckDI(isType(N.getRawType()), "invalid type ref", &N, N.getRawType());
1751 // Check only if the global variable is not an extern
1752 if (N.isDefinition())
1753 CheckDI(N.getType(), "missing global variable type", &N);
1754 if (auto *Member = N.getRawStaticDataMemberDeclaration()) {
1756 "invalid static data member declaration", &N, Member);
1757 }
1758}
1759
1760void Verifier::visitDILocalVariable(const DILocalVariable &N) {
1761 // Checks common to all variables.
1762 visitDIVariable(N);
1763
1764 CheckDI(isType(N.getRawType()), "invalid type ref", &N, N.getRawType());
1765 CheckDI(N.getTag() == dwarf::DW_TAG_variable, "invalid tag", &N);
1766 CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1767 "local variable requires a valid scope", &N, N.getRawScope());
1768 if (auto Ty = N.getType())
1769 CheckDI(!isa<DISubroutineType>(Ty), "invalid type", &N, N.getType());
1770}
1771
1772void Verifier::visitDIAssignID(const DIAssignID &N) {
1773 CheckDI(!N.getNumOperands(), "DIAssignID has no arguments", &N);
1774 CheckDI(N.isDistinct(), "DIAssignID must be distinct", &N);
1775}
1776
1777void Verifier::visitDILabel(const DILabel &N) {
1778 if (auto *S = N.getRawScope())
1779 CheckDI(isa<DIScope>(S), "invalid scope", &N, S);
1780 if (auto *F = N.getRawFile())
1781 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1782
1783 CheckDI(N.getTag() == dwarf::DW_TAG_label, "invalid tag", &N);
1784 CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1785 "label requires a valid scope", &N, N.getRawScope());
1786}
1787
1788void Verifier::visitDIExpression(const DIExpression &N) {
1789 CheckDI(N.isValid(), "invalid expression", &N);
1790}
1791
1792void Verifier::visitDIGlobalVariableExpression(
1793 const DIGlobalVariableExpression &GVE) {
1794 CheckDI(GVE.getVariable(), "missing variable");
1795 if (auto *Var = GVE.getVariable())
1796 visitDIGlobalVariable(*Var);
1797 if (auto *Expr = GVE.getExpression()) {
1798 visitDIExpression(*Expr);
1799 if (auto Fragment = Expr->getFragmentInfo())
1800 verifyFragmentExpression(*GVE.getVariable(), *Fragment, &GVE);
1801 }
1802}
1803
1804void Verifier::visitDIObjCProperty(const DIObjCProperty &N) {
1805 CheckDI(N.getTag() == dwarf::DW_TAG_APPLE_property, "invalid tag", &N);
1806 if (auto *T = N.getRawType())
1807 CheckDI(isType(T), "invalid type ref", &N, T);
1808 if (auto *F = N.getRawFile())
1809 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1810}
1811
1812void Verifier::visitDIImportedEntity(const DIImportedEntity &N) {
1813 CheckDI(N.getTag() == dwarf::DW_TAG_imported_module ||
1814 N.getTag() == dwarf::DW_TAG_imported_declaration,
1815 "invalid tag", &N);
1816 if (auto *S = N.getRawScope())
1817 CheckDI(isa<DIScope>(S), "invalid scope for imported entity", &N, S);
1818 CheckDI(isDINode(N.getRawEntity()), "invalid imported entity", &N,
1819 N.getRawEntity());
1820}
1821
1822void Verifier::visitComdat(const Comdat &C) {
1823 // In COFF the Module is invalid if the GlobalValue has private linkage.
1824 // Entities with private linkage don't have entries in the symbol table.
1825 if (TT.isOSBinFormatCOFF())
1826 if (const GlobalValue *GV = M.getNamedValue(C.getName()))
1827 Check(!GV->hasPrivateLinkage(), "comdat global value has private linkage",
1828 GV);
1829}
1830
1831void Verifier::visitModuleIdents() {
1832 const NamedMDNode *Idents = M.getNamedMetadata("llvm.ident");
1833 if (!Idents)
1834 return;
1835
1836 // llvm.ident takes a list of metadata entry. Each entry has only one string.
1837 // Scan each llvm.ident entry and make sure that this requirement is met.
1838 for (const MDNode *N : Idents->operands()) {
1839 Check(N->getNumOperands() == 1,
1840 "incorrect number of operands in llvm.ident metadata", N);
1841 Check(dyn_cast_or_null<MDString>(N->getOperand(0)),
1842 ("invalid value for llvm.ident metadata entry operand"
1843 "(the operand should be a string)"),
1844 N->getOperand(0));
1845 }
1846}
1847
1848void Verifier::visitModuleCommandLines() {
1849 const NamedMDNode *CommandLines = M.getNamedMetadata("llvm.commandline");
1850 if (!CommandLines)
1851 return;
1852
1853 // llvm.commandline takes a list of metadata entry. Each entry has only one
1854 // string. Scan each llvm.commandline entry and make sure that this
1855 // requirement is met.
1856 for (const MDNode *N : CommandLines->operands()) {
1857 Check(N->getNumOperands() == 1,
1858 "incorrect number of operands in llvm.commandline metadata", N);
1859 Check(dyn_cast_or_null<MDString>(N->getOperand(0)),
1860 ("invalid value for llvm.commandline metadata entry operand"
1861 "(the operand should be a string)"),
1862 N->getOperand(0));
1863 }
1864}
1865
1866void Verifier::visitModuleErrnoTBAA() {
1867 const NamedMDNode *ErrnoTBAA = M.getNamedMetadata("llvm.errno.tbaa");
1868 if (!ErrnoTBAA)
1869 return;
1870
1871 Check(ErrnoTBAA->getNumOperands() >= 1,
1872 "llvm.errno.tbaa must have at least one operand", ErrnoTBAA);
1873
1874 for (const MDNode *N : ErrnoTBAA->operands())
1875 TBAAVerifyHelper.visitTBAAMetadata(nullptr, N);
1876}
1877
1878void Verifier::visitModuleFlags() {
1879 const NamedMDNode *Flags = M.getModuleFlagsMetadata();
1880 if (!Flags) return;
1881
1882 // Scan each flag, and track the flags and requirements.
1883 DenseMap<const MDString*, const MDNode*> SeenIDs;
1884 SmallVector<const MDNode*, 16> Requirements;
1885 uint64_t PAuthABIPlatform = -1;
1886 uint64_t PAuthABIVersion = -1;
1887 for (const MDNode *MDN : Flags->operands()) {
1888 visitModuleFlag(MDN, SeenIDs, Requirements);
1889 if (MDN->getNumOperands() != 3)
1890 continue;
1891 if (const auto *FlagName = dyn_cast_or_null<MDString>(MDN->getOperand(1))) {
1892 if (FlagName->getString() == "aarch64-elf-pauthabi-platform") {
1893 if (const auto *PAP =
1895 PAuthABIPlatform = PAP->getZExtValue();
1896 } else if (FlagName->getString() == "aarch64-elf-pauthabi-version") {
1897 if (const auto *PAV =
1899 PAuthABIVersion = PAV->getZExtValue();
1900 }
1901 }
1902 }
1903
1904 if ((PAuthABIPlatform == uint64_t(-1)) != (PAuthABIVersion == uint64_t(-1)))
1905 CheckFailed("either both or no 'aarch64-elf-pauthabi-platform' and "
1906 "'aarch64-elf-pauthabi-version' module flags must be present");
1907
1908 // Validate that the requirements in the module are valid.
1909 for (const MDNode *Requirement : Requirements) {
1910 const MDString *Flag = cast<MDString>(Requirement->getOperand(0));
1911 const Metadata *ReqValue = Requirement->getOperand(1);
1912
1913 const MDNode *Op = SeenIDs.lookup(Flag);
1914 if (!Op) {
1915 CheckFailed("invalid requirement on flag, flag is not present in module",
1916 Flag);
1917 continue;
1918 }
1919
1920 if (Op->getOperand(2) != ReqValue) {
1921 CheckFailed(("invalid requirement on flag, "
1922 "flag does not have the required value"),
1923 Flag);
1924 continue;
1925 }
1926 }
1927}
1928
1929void
1930Verifier::visitModuleFlag(const MDNode *Op,
1931 DenseMap<const MDString *, const MDNode *> &SeenIDs,
1932 SmallVectorImpl<const MDNode *> &Requirements) {
1933 // Each module flag should have three arguments, the merge behavior (a
1934 // constant int), the flag ID (an MDString), and the value.
1935 Check(Op->getNumOperands() == 3,
1936 "incorrect number of operands in module flag", Op);
1937 Module::ModFlagBehavior MFB;
1938 if (!Module::isValidModFlagBehavior(Op->getOperand(0), MFB)) {
1940 "invalid behavior operand in module flag (expected constant integer)",
1941 Op->getOperand(0));
1942 Check(false,
1943 "invalid behavior operand in module flag (unexpected constant)",
1944 Op->getOperand(0));
1945 }
1946 MDString *ID = dyn_cast_or_null<MDString>(Op->getOperand(1));
1947 Check(ID, "invalid ID operand in module flag (expected metadata string)",
1948 Op->getOperand(1));
1949
1950 // Check the values for behaviors with additional requirements.
1951 switch (MFB) {
1952 case Module::Error:
1953 case Module::Warning:
1954 case Module::Override:
1955 // These behavior types accept any value.
1956 break;
1957
1958 case Module::Min: {
1959 auto *V = mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(2));
1960 Check(V && V->getValue().isNonNegative(),
1961 "invalid value for 'min' module flag (expected constant non-negative "
1962 "integer)",
1963 Op->getOperand(2));
1964 break;
1965 }
1966
1967 case Module::Max: {
1969 "invalid value for 'max' module flag (expected constant integer)",
1970 Op->getOperand(2));
1971 break;
1972 }
1973
1974 case Module::Require: {
1975 // The value should itself be an MDNode with two operands, a flag ID (an
1976 // MDString), and a value.
1977 MDNode *Value = dyn_cast<MDNode>(Op->getOperand(2));
1978 Check(Value && Value->getNumOperands() == 2,
1979 "invalid value for 'require' module flag (expected metadata pair)",
1980 Op->getOperand(2));
1981 Check(isa<MDString>(Value->getOperand(0)),
1982 ("invalid value for 'require' module flag "
1983 "(first value operand should be a string)"),
1984 Value->getOperand(0));
1985
1986 // Append it to the list of requirements, to check once all module flags are
1987 // scanned.
1988 Requirements.push_back(Value);
1989 break;
1990 }
1991
1992 case Module::Append:
1993 case Module::AppendUnique: {
1994 // These behavior types require the operand be an MDNode.
1995 Check(isa<MDNode>(Op->getOperand(2)),
1996 "invalid value for 'append'-type module flag "
1997 "(expected a metadata node)",
1998 Op->getOperand(2));
1999 break;
2000 }
2001 }
2002
2003 // Unless this is a "requires" flag, check the ID is unique.
2004 if (MFB != Module::Require) {
2005 bool Inserted = SeenIDs.insert(std::make_pair(ID, Op)).second;
2006 Check(Inserted,
2007 "module flag identifiers must be unique (or of 'require' type)", ID);
2008 }
2009
2010 if (ID->getString() == "wchar_size") {
2011 ConstantInt *Value
2013 Check(Value, "wchar_size metadata requires constant integer argument");
2014 }
2015
2016 if (ID->getString() == "Linker Options") {
2017 // If the llvm.linker.options named metadata exists, we assume that the
2018 // bitcode reader has upgraded the module flag. Otherwise the flag might
2019 // have been created by a client directly.
2020 Check(M.getNamedMetadata("llvm.linker.options"),
2021 "'Linker Options' named metadata no longer supported");
2022 }
2023
2024 if (ID->getString() == "SemanticInterposition") {
2025 ConstantInt *Value =
2027 Check(Value,
2028 "SemanticInterposition metadata requires constant integer argument");
2029 }
2030
2031 if (ID->getString() == "CG Profile") {
2032 for (const MDOperand &MDO : cast<MDNode>(Op->getOperand(2))->operands())
2033 visitModuleFlagCGProfileEntry(MDO);
2034 }
2035}
2036
2037void Verifier::visitModuleFlagCGProfileEntry(const MDOperand &MDO) {
2038 auto CheckFunction = [&](const MDOperand &FuncMDO) {
2039 if (!FuncMDO)
2040 return;
2041 auto F = dyn_cast<ValueAsMetadata>(FuncMDO);
2042 Check(F && isa<Function>(F->getValue()->stripPointerCasts()),
2043 "expected a Function or null", FuncMDO);
2044 };
2045 auto Node = dyn_cast_or_null<MDNode>(MDO);
2046 Check(Node && Node->getNumOperands() == 3, "expected a MDNode triple", MDO);
2047 CheckFunction(Node->getOperand(0));
2048 CheckFunction(Node->getOperand(1));
2049 auto Count = dyn_cast_or_null<ConstantAsMetadata>(Node->getOperand(2));
2050 Check(Count && Count->getType()->isIntegerTy(),
2051 "expected an integer constant", Node->getOperand(2));
2052}
2053
2054void Verifier::verifyAttributeTypes(AttributeSet Attrs, const Value *V) {
2055 for (Attribute A : Attrs) {
2056
2057 if (A.isStringAttribute()) {
2058#define GET_ATTR_NAMES
2059#define ATTRIBUTE_ENUM(ENUM_NAME, DISPLAY_NAME)
2060#define ATTRIBUTE_STRBOOL(ENUM_NAME, DISPLAY_NAME) \
2061 if (A.getKindAsString() == #DISPLAY_NAME) { \
2062 auto V = A.getValueAsString(); \
2063 if (!(V.empty() || V == "true" || V == "false")) \
2064 CheckFailed("invalid value for '" #DISPLAY_NAME "' attribute: " + V + \
2065 ""); \
2066 }
2067
2068#include "llvm/IR/Attributes.inc"
2069 continue;
2070 }
2071
2072 if (A.isIntAttribute() != Attribute::isIntAttrKind(A.getKindAsEnum())) {
2073 CheckFailed("Attribute '" + A.getAsString() + "' should have an Argument",
2074 V);
2075 return;
2076 }
2077 }
2078}
2079
2080// VerifyParameterAttrs - Check the given attributes for an argument or return
2081// value of the specified type. The value V is printed in error messages.
2082void Verifier::verifyParameterAttrs(AttributeSet Attrs, Type *Ty,
2083 const Value *V) {
2084 if (!Attrs.hasAttributes())
2085 return;
2086
2087 verifyAttributeTypes(Attrs, V);
2088
2089 for (Attribute Attr : Attrs)
2090 Check(Attr.isStringAttribute() ||
2091 Attribute::canUseAsParamAttr(Attr.getKindAsEnum()),
2092 "Attribute '" + Attr.getAsString() + "' does not apply to parameters",
2093 V);
2094
2095 if (Attrs.hasAttribute(Attribute::ImmArg)) {
2096 unsigned AttrCount =
2097 Attrs.getNumAttributes() - Attrs.hasAttribute(Attribute::Range);
2098 Check(AttrCount == 1,
2099 "Attribute 'immarg' is incompatible with other attributes except the "
2100 "'range' attribute",
2101 V);
2102 }
2103
2104 // Check for mutually incompatible attributes. Only inreg is compatible with
2105 // sret.
2106 unsigned AttrCount = 0;
2107 AttrCount += Attrs.hasAttribute(Attribute::ByVal);
2108 AttrCount += Attrs.hasAttribute(Attribute::InAlloca);
2109 AttrCount += Attrs.hasAttribute(Attribute::Preallocated);
2110 AttrCount += Attrs.hasAttribute(Attribute::StructRet) ||
2111 Attrs.hasAttribute(Attribute::InReg);
2112 AttrCount += Attrs.hasAttribute(Attribute::Nest);
2113 AttrCount += Attrs.hasAttribute(Attribute::ByRef);
2114 Check(AttrCount <= 1,
2115 "Attributes 'byval', 'inalloca', 'preallocated', 'inreg', 'nest', "
2116 "'byref', and 'sret' are incompatible!",
2117 V);
2118
2119 Check(!(Attrs.hasAttribute(Attribute::InAlloca) &&
2120 Attrs.hasAttribute(Attribute::ReadOnly)),
2121 "Attributes "
2122 "'inalloca and readonly' are incompatible!",
2123 V);
2124
2125 Check(!(Attrs.hasAttribute(Attribute::StructRet) &&
2126 Attrs.hasAttribute(Attribute::Returned)),
2127 "Attributes "
2128 "'sret and returned' are incompatible!",
2129 V);
2130
2131 Check(!(Attrs.hasAttribute(Attribute::ZExt) &&
2132 Attrs.hasAttribute(Attribute::SExt)),
2133 "Attributes "
2134 "'zeroext and signext' are incompatible!",
2135 V);
2136
2137 Check(!(Attrs.hasAttribute(Attribute::ReadNone) &&
2138 Attrs.hasAttribute(Attribute::ReadOnly)),
2139 "Attributes "
2140 "'readnone and readonly' are incompatible!",
2141 V);
2142
2143 Check(!(Attrs.hasAttribute(Attribute::ReadNone) &&
2144 Attrs.hasAttribute(Attribute::WriteOnly)),
2145 "Attributes "
2146 "'readnone and writeonly' are incompatible!",
2147 V);
2148
2149 Check(!(Attrs.hasAttribute(Attribute::ReadOnly) &&
2150 Attrs.hasAttribute(Attribute::WriteOnly)),
2151 "Attributes "
2152 "'readonly and writeonly' are incompatible!",
2153 V);
2154
2155 Check(!(Attrs.hasAttribute(Attribute::NoInline) &&
2156 Attrs.hasAttribute(Attribute::AlwaysInline)),
2157 "Attributes "
2158 "'noinline and alwaysinline' are incompatible!",
2159 V);
2160
2161 Check(!(Attrs.hasAttribute(Attribute::Writable) &&
2162 Attrs.hasAttribute(Attribute::ReadNone)),
2163 "Attributes writable and readnone are incompatible!", V);
2164
2165 Check(!(Attrs.hasAttribute(Attribute::Writable) &&
2166 Attrs.hasAttribute(Attribute::ReadOnly)),
2167 "Attributes writable and readonly are incompatible!", V);
2168
2169 AttributeMask IncompatibleAttrs = AttributeFuncs::typeIncompatible(Ty, Attrs);
2170 for (Attribute Attr : Attrs) {
2171 if (!Attr.isStringAttribute() &&
2172 IncompatibleAttrs.contains(Attr.getKindAsEnum())) {
2173 CheckFailed("Attribute '" + Attr.getAsString() +
2174 "' applied to incompatible type!", V);
2175 return;
2176 }
2177 }
2178
2179 if (isa<PointerType>(Ty)) {
2180 if (Attrs.hasAttribute(Attribute::Alignment)) {
2181 Align AttrAlign = Attrs.getAlignment().valueOrOne();
2182 Check(AttrAlign.value() <= Value::MaximumAlignment,
2183 "huge alignment values are unsupported", V);
2184 }
2185 if (Attrs.hasAttribute(Attribute::ByVal)) {
2186 Type *ByValTy = Attrs.getByValType();
2187 SmallPtrSet<Type *, 4> Visited;
2188 Check(ByValTy->isSized(&Visited),
2189 "Attribute 'byval' does not support unsized types!", V);
2190 // Check if it is or contains a target extension type that disallows being
2191 // used on the stack.
2193 "'byval' argument has illegal target extension type", V);
2194 Check(DL.getTypeAllocSize(ByValTy).getKnownMinValue() < (1ULL << 32),
2195 "huge 'byval' arguments are unsupported", V);
2196 }
2197 if (Attrs.hasAttribute(Attribute::ByRef)) {
2198 SmallPtrSet<Type *, 4> Visited;
2199 Check(Attrs.getByRefType()->isSized(&Visited),
2200 "Attribute 'byref' does not support unsized types!", V);
2201 Check(DL.getTypeAllocSize(Attrs.getByRefType()).getKnownMinValue() <
2202 (1ULL << 32),
2203 "huge 'byref' arguments are unsupported", V);
2204 }
2205 if (Attrs.hasAttribute(Attribute::InAlloca)) {
2206 SmallPtrSet<Type *, 4> Visited;
2207 Check(Attrs.getInAllocaType()->isSized(&Visited),
2208 "Attribute 'inalloca' does not support unsized types!", V);
2209 Check(DL.getTypeAllocSize(Attrs.getInAllocaType()).getKnownMinValue() <
2210 (1ULL << 32),
2211 "huge 'inalloca' arguments are unsupported", V);
2212 }
2213 if (Attrs.hasAttribute(Attribute::Preallocated)) {
2214 SmallPtrSet<Type *, 4> Visited;
2215 Check(Attrs.getPreallocatedType()->isSized(&Visited),
2216 "Attribute 'preallocated' does not support unsized types!", V);
2217 Check(
2218 DL.getTypeAllocSize(Attrs.getPreallocatedType()).getKnownMinValue() <
2219 (1ULL << 32),
2220 "huge 'preallocated' arguments are unsupported", V);
2221 }
2222 }
2223
2224 if (Attrs.hasAttribute(Attribute::Initializes)) {
2225 auto Inits = Attrs.getAttribute(Attribute::Initializes).getInitializes();
2226 Check(!Inits.empty(), "Attribute 'initializes' does not support empty list",
2227 V);
2229 "Attribute 'initializes' does not support unordered ranges", V);
2230 }
2231
2232 if (Attrs.hasAttribute(Attribute::NoFPClass)) {
2233 uint64_t Val = Attrs.getAttribute(Attribute::NoFPClass).getValueAsInt();
2234 Check(Val != 0, "Attribute 'nofpclass' must have at least one test bit set",
2235 V);
2236 Check((Val & ~static_cast<unsigned>(fcAllFlags)) == 0,
2237 "Invalid value for 'nofpclass' test mask", V);
2238 }
2239 if (Attrs.hasAttribute(Attribute::Range)) {
2240 const ConstantRange &CR =
2241 Attrs.getAttribute(Attribute::Range).getValueAsConstantRange();
2243 "Range bit width must match type bit width!", V);
2244 }
2245}
2246
2247void Verifier::checkUnsignedBaseTenFuncAttr(AttributeList Attrs, StringRef Attr,
2248 const Value *V) {
2249 if (Attrs.hasFnAttr(Attr)) {
2250 StringRef S = Attrs.getFnAttr(Attr).getValueAsString();
2251 unsigned N;
2252 if (S.getAsInteger(10, N))
2253 CheckFailed("\"" + Attr + "\" takes an unsigned integer: " + S, V);
2254 }
2255}
2256
2257// Check parameter attributes against a function type.
2258// The value V is printed in error messages.
2259void Verifier::verifyFunctionAttrs(FunctionType *FT, AttributeList Attrs,
2260 const Value *V, bool IsIntrinsic,
2261 bool IsInlineAsm) {
2262 if (Attrs.isEmpty())
2263 return;
2264
2265 if (AttributeListsVisited.insert(Attrs.getRawPointer()).second) {
2266 Check(Attrs.hasParentContext(Context),
2267 "Attribute list does not match Module context!", &Attrs, V);
2268 for (const auto &AttrSet : Attrs) {
2269 Check(!AttrSet.hasAttributes() || AttrSet.hasParentContext(Context),
2270 "Attribute set does not match Module context!", &AttrSet, V);
2271 for (const auto &A : AttrSet) {
2272 Check(A.hasParentContext(Context),
2273 "Attribute does not match Module context!", &A, V);
2274 }
2275 }
2276 }
2277
2278 bool SawNest = false;
2279 bool SawReturned = false;
2280 bool SawSRet = false;
2281 bool SawSwiftSelf = false;
2282 bool SawSwiftAsync = false;
2283 bool SawSwiftError = false;
2284
2285 // Verify return value attributes.
2286 AttributeSet RetAttrs = Attrs.getRetAttrs();
2287 for (Attribute RetAttr : RetAttrs)
2288 Check(RetAttr.isStringAttribute() ||
2289 Attribute::canUseAsRetAttr(RetAttr.getKindAsEnum()),
2290 "Attribute '" + RetAttr.getAsString() +
2291 "' does not apply to function return values",
2292 V);
2293
2294 unsigned MaxParameterWidth = 0;
2295 auto GetMaxParameterWidth = [&MaxParameterWidth](Type *Ty) {
2296 if (Ty->isVectorTy()) {
2297 if (auto *VT = dyn_cast<FixedVectorType>(Ty)) {
2298 unsigned Size = VT->getPrimitiveSizeInBits().getFixedValue();
2299 if (Size > MaxParameterWidth)
2300 MaxParameterWidth = Size;
2301 }
2302 }
2303 };
2304 GetMaxParameterWidth(FT->getReturnType());
2305 verifyParameterAttrs(RetAttrs, FT->getReturnType(), V);
2306
2307 // Verify parameter attributes.
2308 for (unsigned i = 0, e = FT->getNumParams(); i != e; ++i) {
2309 Type *Ty = FT->getParamType(i);
2310 AttributeSet ArgAttrs = Attrs.getParamAttrs(i);
2311
2312 if (!IsIntrinsic) {
2313 Check(!ArgAttrs.hasAttribute(Attribute::ImmArg),
2314 "immarg attribute only applies to intrinsics", V);
2315 if (!IsInlineAsm)
2316 Check(!ArgAttrs.hasAttribute(Attribute::ElementType),
2317 "Attribute 'elementtype' can only be applied to intrinsics"
2318 " and inline asm.",
2319 V);
2320 }
2321
2322 verifyParameterAttrs(ArgAttrs, Ty, V);
2323 GetMaxParameterWidth(Ty);
2324
2325 if (ArgAttrs.hasAttribute(Attribute::Nest)) {
2326 Check(!SawNest, "More than one parameter has attribute nest!", V);
2327 SawNest = true;
2328 }
2329
2330 if (ArgAttrs.hasAttribute(Attribute::Returned)) {
2331 Check(!SawReturned, "More than one parameter has attribute returned!", V);
2332 Check(Ty->canLosslesslyBitCastTo(FT->getReturnType()),
2333 "Incompatible argument and return types for 'returned' attribute",
2334 V);
2335 SawReturned = true;
2336 }
2337
2338 if (ArgAttrs.hasAttribute(Attribute::StructRet)) {
2339 Check(!SawSRet, "Cannot have multiple 'sret' parameters!", V);
2340 Check(i == 0 || i == 1,
2341 "Attribute 'sret' is not on first or second parameter!", V);
2342 SawSRet = true;
2343 }
2344
2345 if (ArgAttrs.hasAttribute(Attribute::SwiftSelf)) {
2346 Check(!SawSwiftSelf, "Cannot have multiple 'swiftself' parameters!", V);
2347 SawSwiftSelf = true;
2348 }
2349
2350 if (ArgAttrs.hasAttribute(Attribute::SwiftAsync)) {
2351 Check(!SawSwiftAsync, "Cannot have multiple 'swiftasync' parameters!", V);
2352 SawSwiftAsync = true;
2353 }
2354
2355 if (ArgAttrs.hasAttribute(Attribute::SwiftError)) {
2356 Check(!SawSwiftError, "Cannot have multiple 'swifterror' parameters!", V);
2357 SawSwiftError = true;
2358 }
2359
2360 if (ArgAttrs.hasAttribute(Attribute::InAlloca)) {
2361 Check(i == FT->getNumParams() - 1,
2362 "inalloca isn't on the last parameter!", V);
2363 }
2364 }
2365
2366 if (!Attrs.hasFnAttrs())
2367 return;
2368
2369 verifyAttributeTypes(Attrs.getFnAttrs(), V);
2370 for (Attribute FnAttr : Attrs.getFnAttrs())
2371 Check(FnAttr.isStringAttribute() ||
2372 Attribute::canUseAsFnAttr(FnAttr.getKindAsEnum()),
2373 "Attribute '" + FnAttr.getAsString() +
2374 "' does not apply to functions!",
2375 V);
2376
2377 Check(!(Attrs.hasFnAttr(Attribute::NoInline) &&
2378 Attrs.hasFnAttr(Attribute::AlwaysInline)),
2379 "Attributes 'noinline and alwaysinline' are incompatible!", V);
2380
2381 if (Attrs.hasFnAttr(Attribute::OptimizeNone)) {
2382 Check(Attrs.hasFnAttr(Attribute::NoInline),
2383 "Attribute 'optnone' requires 'noinline'!", V);
2384
2385 Check(!Attrs.hasFnAttr(Attribute::OptimizeForSize),
2386 "Attributes 'optsize and optnone' are incompatible!", V);
2387
2388 Check(!Attrs.hasFnAttr(Attribute::MinSize),
2389 "Attributes 'minsize and optnone' are incompatible!", V);
2390
2391 Check(!Attrs.hasFnAttr(Attribute::OptimizeForDebugging),
2392 "Attributes 'optdebug and optnone' are incompatible!", V);
2393 }
2394
2395 Check(!(Attrs.hasFnAttr(Attribute::SanitizeRealtime) &&
2396 Attrs.hasFnAttr(Attribute::SanitizeRealtimeBlocking)),
2397 "Attributes "
2398 "'sanitize_realtime and sanitize_realtime_blocking' are incompatible!",
2399 V);
2400
2401 if (Attrs.hasFnAttr(Attribute::OptimizeForDebugging)) {
2402 Check(!Attrs.hasFnAttr(Attribute::OptimizeForSize),
2403 "Attributes 'optsize and optdebug' are incompatible!", V);
2404
2405 Check(!Attrs.hasFnAttr(Attribute::MinSize),
2406 "Attributes 'minsize and optdebug' are incompatible!", V);
2407 }
2408
2409 Check(!Attrs.hasAttrSomewhere(Attribute::Writable) ||
2410 isModSet(Attrs.getMemoryEffects().getModRef(IRMemLocation::ArgMem)),
2411 "Attribute writable and memory without argmem: write are incompatible!",
2412 V);
2413
2414 if (Attrs.hasFnAttr("aarch64_pstate_sm_enabled")) {
2415 Check(!Attrs.hasFnAttr("aarch64_pstate_sm_compatible"),
2416 "Attributes 'aarch64_pstate_sm_enabled and "
2417 "aarch64_pstate_sm_compatible' are incompatible!",
2418 V);
2419 }
2420
2421 Check((Attrs.hasFnAttr("aarch64_new_za") + Attrs.hasFnAttr("aarch64_in_za") +
2422 Attrs.hasFnAttr("aarch64_inout_za") +
2423 Attrs.hasFnAttr("aarch64_out_za") +
2424 Attrs.hasFnAttr("aarch64_preserves_za") +
2425 Attrs.hasFnAttr("aarch64_za_state_agnostic")) <= 1,
2426 "Attributes 'aarch64_new_za', 'aarch64_in_za', 'aarch64_out_za', "
2427 "'aarch64_inout_za', 'aarch64_preserves_za' and "
2428 "'aarch64_za_state_agnostic' are mutually exclusive",
2429 V);
2430
2431 Check((Attrs.hasFnAttr("aarch64_new_zt0") +
2432 Attrs.hasFnAttr("aarch64_in_zt0") +
2433 Attrs.hasFnAttr("aarch64_inout_zt0") +
2434 Attrs.hasFnAttr("aarch64_out_zt0") +
2435 Attrs.hasFnAttr("aarch64_preserves_zt0") +
2436 Attrs.hasFnAttr("aarch64_za_state_agnostic")) <= 1,
2437 "Attributes 'aarch64_new_zt0', 'aarch64_in_zt0', 'aarch64_out_zt0', "
2438 "'aarch64_inout_zt0', 'aarch64_preserves_zt0' and "
2439 "'aarch64_za_state_agnostic' are mutually exclusive",
2440 V);
2441
2442 if (Attrs.hasFnAttr(Attribute::JumpTable)) {
2443 const GlobalValue *GV = cast<GlobalValue>(V);
2445 "Attribute 'jumptable' requires 'unnamed_addr'", V);
2446 }
2447
2448 if (auto Args = Attrs.getFnAttrs().getAllocSizeArgs()) {
2449 auto CheckParam = [&](StringRef Name, unsigned ParamNo) {
2450 if (ParamNo >= FT->getNumParams()) {
2451 CheckFailed("'allocsize' " + Name + " argument is out of bounds", V);
2452 return false;
2453 }
2454
2455 if (!FT->getParamType(ParamNo)->isIntegerTy()) {
2456 CheckFailed("'allocsize' " + Name +
2457 " argument must refer to an integer parameter",
2458 V);
2459 return false;
2460 }
2461
2462 return true;
2463 };
2464
2465 if (!CheckParam("element size", Args->first))
2466 return;
2467
2468 if (Args->second && !CheckParam("number of elements", *Args->second))
2469 return;
2470 }
2471
2472 if (Attrs.hasFnAttr(Attribute::AllocKind)) {
2473 AllocFnKind K = Attrs.getAllocKind();
2475 K & (AllocFnKind::Alloc | AllocFnKind::Realloc | AllocFnKind::Free);
2476 if (!is_contained(
2477 {AllocFnKind::Alloc, AllocFnKind::Realloc, AllocFnKind::Free},
2478 Type))
2479 CheckFailed(
2480 "'allockind()' requires exactly one of alloc, realloc, and free");
2481 if ((Type == AllocFnKind::Free) &&
2482 ((K & (AllocFnKind::Uninitialized | AllocFnKind::Zeroed |
2483 AllocFnKind::Aligned)) != AllocFnKind::Unknown))
2484 CheckFailed("'allockind(\"free\")' doesn't allow uninitialized, zeroed, "
2485 "or aligned modifiers.");
2486 AllocFnKind ZeroedUninit = AllocFnKind::Uninitialized | AllocFnKind::Zeroed;
2487 if ((K & ZeroedUninit) == ZeroedUninit)
2488 CheckFailed("'allockind()' can't be both zeroed and uninitialized");
2489 }
2490
2491 if (Attribute A = Attrs.getFnAttr("alloc-variant-zeroed"); A.isValid()) {
2492 StringRef S = A.getValueAsString();
2493 Check(!S.empty(), "'alloc-variant-zeroed' must not be empty");
2494 Function *Variant = M.getFunction(S);
2495 if (Variant) {
2496 Attribute Family = Attrs.getFnAttr("alloc-family");
2497 Attribute VariantFamily = Variant->getFnAttribute("alloc-family");
2498 if (Family.isValid())
2499 Check(VariantFamily.isValid() &&
2500 VariantFamily.getValueAsString() == Family.getValueAsString(),
2501 "'alloc-variant-zeroed' must name a function belonging to the "
2502 "same 'alloc-family'");
2503
2504 Check(Variant->hasFnAttribute(Attribute::AllocKind) &&
2505 (Variant->getFnAttribute(Attribute::AllocKind).getAllocKind() &
2506 AllocFnKind::Zeroed) != AllocFnKind::Unknown,
2507 "'alloc-variant-zeroed' must name a function with "
2508 "'allockind(\"zeroed\")'");
2509
2510 Check(FT == Variant->getFunctionType(),
2511 "'alloc-variant-zeroed' must name a function with the same "
2512 "signature");
2513 }
2514 }
2515
2516 if (Attrs.hasFnAttr(Attribute::VScaleRange)) {
2517 unsigned VScaleMin = Attrs.getFnAttrs().getVScaleRangeMin();
2518 if (VScaleMin == 0)
2519 CheckFailed("'vscale_range' minimum must be greater than 0", V);
2520 else if (!isPowerOf2_32(VScaleMin))
2521 CheckFailed("'vscale_range' minimum must be power-of-two value", V);
2522 std::optional<unsigned> VScaleMax = Attrs.getFnAttrs().getVScaleRangeMax();
2523 if (VScaleMax && VScaleMin > VScaleMax)
2524 CheckFailed("'vscale_range' minimum cannot be greater than maximum", V);
2525 else if (VScaleMax && !isPowerOf2_32(*VScaleMax))
2526 CheckFailed("'vscale_range' maximum must be power-of-two value", V);
2527 }
2528
2529 if (Attribute FPAttr = Attrs.getFnAttr("frame-pointer"); FPAttr.isValid()) {
2530 StringRef FP = FPAttr.getValueAsString();
2531 if (FP != "all" && FP != "non-leaf" && FP != "none" && FP != "reserved" &&
2532 FP != "non-leaf-no-reserve")
2533 CheckFailed("invalid value for 'frame-pointer' attribute: " + FP, V);
2534 }
2535
2536 checkUnsignedBaseTenFuncAttr(Attrs, "patchable-function-prefix", V);
2537 checkUnsignedBaseTenFuncAttr(Attrs, "patchable-function-entry", V);
2538 if (Attrs.hasFnAttr("patchable-function-entry-section"))
2539 Check(!Attrs.getFnAttr("patchable-function-entry-section")
2540 .getValueAsString()
2541 .empty(),
2542 "\"patchable-function-entry-section\" must not be empty");
2543 checkUnsignedBaseTenFuncAttr(Attrs, "warn-stack-size", V);
2544
2545 if (auto A = Attrs.getFnAttr("sign-return-address"); A.isValid()) {
2546 StringRef S = A.getValueAsString();
2547 if (S != "none" && S != "all" && S != "non-leaf")
2548 CheckFailed("invalid value for 'sign-return-address' attribute: " + S, V);
2549 }
2550
2551 if (auto A = Attrs.getFnAttr("sign-return-address-key"); A.isValid()) {
2552 StringRef S = A.getValueAsString();
2553 if (S != "a_key" && S != "b_key")
2554 CheckFailed("invalid value for 'sign-return-address-key' attribute: " + S,
2555 V);
2556 if (auto AA = Attrs.getFnAttr("sign-return-address"); !AA.isValid()) {
2557 CheckFailed(
2558 "'sign-return-address-key' present without `sign-return-address`");
2559 }
2560 }
2561
2562 if (auto A = Attrs.getFnAttr("branch-target-enforcement"); A.isValid()) {
2563 StringRef S = A.getValueAsString();
2564 if (S != "" && S != "true" && S != "false")
2565 CheckFailed(
2566 "invalid value for 'branch-target-enforcement' attribute: " + S, V);
2567 }
2568
2569 if (auto A = Attrs.getFnAttr("branch-protection-pauth-lr"); A.isValid()) {
2570 StringRef S = A.getValueAsString();
2571 if (S != "" && S != "true" && S != "false")
2572 CheckFailed(
2573 "invalid value for 'branch-protection-pauth-lr' attribute: " + S, V);
2574 }
2575
2576 if (auto A = Attrs.getFnAttr("guarded-control-stack"); A.isValid()) {
2577 StringRef S = A.getValueAsString();
2578 if (S != "" && S != "true" && S != "false")
2579 CheckFailed("invalid value for 'guarded-control-stack' attribute: " + S,
2580 V);
2581 }
2582
2583 if (auto A = Attrs.getFnAttr("vector-function-abi-variant"); A.isValid()) {
2584 StringRef S = A.getValueAsString();
2585 const std::optional<VFInfo> Info = VFABI::tryDemangleForVFABI(S, FT);
2586 if (!Info)
2587 CheckFailed("invalid name for a VFABI variant: " + S, V);
2588 }
2589
2590 if (auto A = Attrs.getFnAttr("denormal-fp-math"); A.isValid()) {
2591 StringRef S = A.getValueAsString();
2593 CheckFailed("invalid value for 'denormal-fp-math' attribute: " + S, V);
2594 }
2595
2596 if (auto A = Attrs.getFnAttr("denormal-fp-math-f32"); A.isValid()) {
2597 StringRef S = A.getValueAsString();
2599 CheckFailed("invalid value for 'denormal-fp-math-f32' attribute: " + S,
2600 V);
2601 }
2602
2603 if (auto A = Attrs.getFnAttr("modular-format"); A.isValid()) {
2604 StringRef S = A.getValueAsString();
2606 S.split(Args, ',');
2607 Check(Args.size() >= 5,
2608 "modular-format attribute requires at least 5 arguments", V);
2609 unsigned FirstArgIdx;
2610 Check(!Args[2].getAsInteger(10, FirstArgIdx),
2611 "modular-format attribute first arg index is not an integer", V);
2612 unsigned UpperBound = FT->getNumParams() + (FT->isVarArg() ? 1 : 0);
2613 Check(FirstArgIdx > 0 && FirstArgIdx <= UpperBound,
2614 "modular-format attribute first arg index is out of bounds", V);
2615 }
2616
2617 if (auto A = Attrs.getFnAttr("target-features"); A.isValid()) {
2618 StringRef S = A.getValueAsString();
2619 if (!S.empty()) {
2620 for (auto FeatureFlag : split(S, ',')) {
2621 if (FeatureFlag.empty())
2622 CheckFailed(
2623 "target-features attribute should not contain an empty string");
2624 else
2625 Check(FeatureFlag[0] == '+' || FeatureFlag[0] == '-',
2626 "target feature '" + FeatureFlag +
2627 "' must start with a '+' or '-'",
2628 V);
2629 }
2630 }
2631 }
2632}
2633void Verifier::verifyUnknownProfileMetadata(MDNode *MD) {
2634 Check(MD->getNumOperands() == 2,
2635 "'unknown' !prof should have a single additional operand", MD);
2636 auto *PassName = dyn_cast<MDString>(MD->getOperand(1));
2637 Check(PassName != nullptr,
2638 "'unknown' !prof should have an additional operand of type "
2639 "string");
2640 Check(!PassName->getString().empty(),
2641 "the 'unknown' !prof operand should not be an empty string");
2642}
2643
2644void Verifier::verifyFunctionMetadata(
2645 ArrayRef<std::pair<unsigned, MDNode *>> MDs) {
2646 for (const auto &Pair : MDs) {
2647 if (Pair.first == LLVMContext::MD_prof) {
2648 MDNode *MD = Pair.second;
2649 Check(MD->getNumOperands() >= 2,
2650 "!prof annotations should have no less than 2 operands", MD);
2651 // We may have functions that are synthesized by the compiler, e.g. in
2652 // WPD, that we can't currently determine the entry count.
2653 if (MD->getOperand(0).equalsStr(
2655 verifyUnknownProfileMetadata(MD);
2656 continue;
2657 }
2658
2659 // Check first operand.
2660 Check(MD->getOperand(0) != nullptr, "first operand should not be null",
2661 MD);
2663 "expected string with name of the !prof annotation", MD);
2664 MDString *MDS = cast<MDString>(MD->getOperand(0));
2665 StringRef ProfName = MDS->getString();
2668 "first operand should be 'function_entry_count'"
2669 " or 'synthetic_function_entry_count'",
2670 MD);
2671
2672 // Check second operand.
2673 Check(MD->getOperand(1) != nullptr, "second operand should not be null",
2674 MD);
2676 "expected integer argument to function_entry_count", MD);
2677 } else if (Pair.first == LLVMContext::MD_kcfi_type) {
2678 MDNode *MD = Pair.second;
2679 Check(MD->getNumOperands() == 1,
2680 "!kcfi_type must have exactly one operand", MD);
2681 Check(MD->getOperand(0) != nullptr, "!kcfi_type operand must not be null",
2682 MD);
2684 "expected a constant operand for !kcfi_type", MD);
2685 Constant *C = cast<ConstantAsMetadata>(MD->getOperand(0))->getValue();
2686 Check(isa<ConstantInt>(C) && isa<IntegerType>(C->getType()),
2687 "expected a constant integer operand for !kcfi_type", MD);
2689 "expected a 32-bit integer constant operand for !kcfi_type", MD);
2690 }
2691 }
2692}
2693
2694void Verifier::visitConstantExprsRecursively(const Constant *EntryC) {
2695 if (EntryC->getNumOperands() == 0)
2696 return;
2697
2698 if (!ConstantExprVisited.insert(EntryC).second)
2699 return;
2700
2702 Stack.push_back(EntryC);
2703
2704 while (!Stack.empty()) {
2705 const Constant *C = Stack.pop_back_val();
2706
2707 // Check this constant expression.
2708 if (const auto *CE = dyn_cast<ConstantExpr>(C))
2709 visitConstantExpr(CE);
2710
2711 if (const auto *CPA = dyn_cast<ConstantPtrAuth>(C))
2712 visitConstantPtrAuth(CPA);
2713
2714 if (const auto *GV = dyn_cast<GlobalValue>(C)) {
2715 // Global Values get visited separately, but we do need to make sure
2716 // that the global value is in the correct module
2717 Check(GV->getParent() == &M, "Referencing global in another module!",
2718 EntryC, &M, GV, GV->getParent());
2719 continue;
2720 }
2721
2722 // Visit all sub-expressions.
2723 for (const Use &U : C->operands()) {
2724 const auto *OpC = dyn_cast<Constant>(U);
2725 if (!OpC)
2726 continue;
2727 if (!ConstantExprVisited.insert(OpC).second)
2728 continue;
2729 Stack.push_back(OpC);
2730 }
2731 }
2732}
2733
2734void Verifier::visitConstantExpr(const ConstantExpr *CE) {
2735 if (CE->getOpcode() == Instruction::BitCast)
2736 Check(CastInst::castIsValid(Instruction::BitCast, CE->getOperand(0),
2737 CE->getType()),
2738 "Invalid bitcast", CE);
2739 else if (CE->getOpcode() == Instruction::PtrToAddr)
2740 checkPtrToAddr(CE->getOperand(0)->getType(), CE->getType(), *CE);
2741}
2742
2743void Verifier::visitConstantPtrAuth(const ConstantPtrAuth *CPA) {
2744 Check(CPA->getPointer()->getType()->isPointerTy(),
2745 "signed ptrauth constant base pointer must have pointer type");
2746
2747 Check(CPA->getType() == CPA->getPointer()->getType(),
2748 "signed ptrauth constant must have same type as its base pointer");
2749
2750 Check(CPA->getKey()->getBitWidth() == 32,
2751 "signed ptrauth constant key must be i32 constant integer");
2752
2754 "signed ptrauth constant address discriminator must be a pointer");
2755
2756 Check(CPA->getDiscriminator()->getBitWidth() == 64,
2757 "signed ptrauth constant discriminator must be i64 constant integer");
2758
2760 "signed ptrauth constant deactivation symbol must be a pointer");
2761
2764 "signed ptrauth constant deactivation symbol must be a global value "
2765 "or null");
2766}
2767
2768bool Verifier::verifyAttributeCount(AttributeList Attrs, unsigned Params) {
2769 // There shouldn't be more attribute sets than there are parameters plus the
2770 // function and return value.
2771 return Attrs.getNumAttrSets() <= Params + 2;
2772}
2773
2774void Verifier::verifyInlineAsmCall(const CallBase &Call) {
2775 const InlineAsm *IA = cast<InlineAsm>(Call.getCalledOperand());
2776 unsigned ArgNo = 0;
2777 unsigned LabelNo = 0;
2778 for (const InlineAsm::ConstraintInfo &CI : IA->ParseConstraints()) {
2779 if (CI.Type == InlineAsm::isLabel) {
2780 ++LabelNo;
2781 continue;
2782 }
2783
2784 // Only deal with constraints that correspond to call arguments.
2785 if (!CI.hasArg())
2786 continue;
2787
2788 if (CI.isIndirect) {
2789 const Value *Arg = Call.getArgOperand(ArgNo);
2790 Check(Arg->getType()->isPointerTy(),
2791 "Operand for indirect constraint must have pointer type", &Call);
2792
2794 "Operand for indirect constraint must have elementtype attribute",
2795 &Call);
2796 } else {
2797 Check(!Call.paramHasAttr(ArgNo, Attribute::ElementType),
2798 "Elementtype attribute can only be applied for indirect "
2799 "constraints",
2800 &Call);
2801 }
2802
2803 ArgNo++;
2804 }
2805
2806 if (auto *CallBr = dyn_cast<CallBrInst>(&Call)) {
2807 Check(LabelNo == CallBr->getNumIndirectDests(),
2808 "Number of label constraints does not match number of callbr dests",
2809 &Call);
2810 } else {
2811 Check(LabelNo == 0, "Label constraints can only be used with callbr",
2812 &Call);
2813 }
2814}
2815
2816/// Verify that statepoint intrinsic is well formed.
2817void Verifier::verifyStatepoint(const CallBase &Call) {
2818 assert(Call.getIntrinsicID() == Intrinsic::experimental_gc_statepoint);
2819
2822 "gc.statepoint must read and write all memory to preserve "
2823 "reordering restrictions required by safepoint semantics",
2824 Call);
2825
2826 const int64_t NumPatchBytes =
2827 cast<ConstantInt>(Call.getArgOperand(1))->getSExtValue();
2828 assert(isInt<32>(NumPatchBytes) && "NumPatchBytesV is an i32!");
2829 Check(NumPatchBytes >= 0,
2830 "gc.statepoint number of patchable bytes must be "
2831 "positive",
2832 Call);
2833
2834 Type *TargetElemType = Call.getParamElementType(2);
2835 Check(TargetElemType,
2836 "gc.statepoint callee argument must have elementtype attribute", Call);
2837 FunctionType *TargetFuncType = dyn_cast<FunctionType>(TargetElemType);
2838 Check(TargetFuncType,
2839 "gc.statepoint callee elementtype must be function type", Call);
2840
2841 const int NumCallArgs = cast<ConstantInt>(Call.getArgOperand(3))->getZExtValue();
2842 Check(NumCallArgs >= 0,
2843 "gc.statepoint number of arguments to underlying call "
2844 "must be positive",
2845 Call);
2846 const int NumParams = (int)TargetFuncType->getNumParams();
2847 if (TargetFuncType->isVarArg()) {
2848 Check(NumCallArgs >= NumParams,
2849 "gc.statepoint mismatch in number of vararg call args", Call);
2850
2851 // TODO: Remove this limitation
2852 Check(TargetFuncType->getReturnType()->isVoidTy(),
2853 "gc.statepoint doesn't support wrapping non-void "
2854 "vararg functions yet",
2855 Call);
2856 } else
2857 Check(NumCallArgs == NumParams,
2858 "gc.statepoint mismatch in number of call args", Call);
2859
2860 const uint64_t Flags
2861 = cast<ConstantInt>(Call.getArgOperand(4))->getZExtValue();
2862 Check((Flags & ~(uint64_t)StatepointFlags::MaskAll) == 0,
2863 "unknown flag used in gc.statepoint flags argument", Call);
2864
2865 // Verify that the types of the call parameter arguments match
2866 // the type of the wrapped callee.
2867 AttributeList Attrs = Call.getAttributes();
2868 for (int i = 0; i < NumParams; i++) {
2869 Type *ParamType = TargetFuncType->getParamType(i);
2870 Type *ArgType = Call.getArgOperand(5 + i)->getType();
2871 Check(ArgType == ParamType,
2872 "gc.statepoint call argument does not match wrapped "
2873 "function type",
2874 Call);
2875
2876 if (TargetFuncType->isVarArg()) {
2877 AttributeSet ArgAttrs = Attrs.getParamAttrs(5 + i);
2878 Check(!ArgAttrs.hasAttribute(Attribute::StructRet),
2879 "Attribute 'sret' cannot be used for vararg call arguments!", Call);
2880 }
2881 }
2882
2883 const int EndCallArgsInx = 4 + NumCallArgs;
2884
2885 const Value *NumTransitionArgsV = Call.getArgOperand(EndCallArgsInx + 1);
2886 Check(isa<ConstantInt>(NumTransitionArgsV),
2887 "gc.statepoint number of transition arguments "
2888 "must be constant integer",
2889 Call);
2890 const int NumTransitionArgs =
2891 cast<ConstantInt>(NumTransitionArgsV)->getZExtValue();
2892 Check(NumTransitionArgs == 0,
2893 "gc.statepoint w/inline transition bundle is deprecated", Call);
2894 const int EndTransitionArgsInx = EndCallArgsInx + 1 + NumTransitionArgs;
2895
2896 const Value *NumDeoptArgsV = Call.getArgOperand(EndTransitionArgsInx + 1);
2897 Check(isa<ConstantInt>(NumDeoptArgsV),
2898 "gc.statepoint number of deoptimization arguments "
2899 "must be constant integer",
2900 Call);
2901 const int NumDeoptArgs = cast<ConstantInt>(NumDeoptArgsV)->getZExtValue();
2902 Check(NumDeoptArgs == 0,
2903 "gc.statepoint w/inline deopt operands is deprecated", Call);
2904
2905 const int ExpectedNumArgs = 7 + NumCallArgs;
2906 Check(ExpectedNumArgs == (int)Call.arg_size(),
2907 "gc.statepoint too many arguments", Call);
2908
2909 // Check that the only uses of this gc.statepoint are gc.result or
2910 // gc.relocate calls which are tied to this statepoint and thus part
2911 // of the same statepoint sequence
2912 for (const User *U : Call.users()) {
2913 const CallInst *UserCall = dyn_cast<const CallInst>(U);
2914 Check(UserCall, "illegal use of statepoint token", Call, U);
2915 if (!UserCall)
2916 continue;
2917 Check(isa<GCRelocateInst>(UserCall) || isa<GCResultInst>(UserCall),
2918 "gc.result or gc.relocate are the only value uses "
2919 "of a gc.statepoint",
2920 Call, U);
2921 if (isa<GCResultInst>(UserCall)) {
2922 Check(UserCall->getArgOperand(0) == &Call,
2923 "gc.result connected to wrong gc.statepoint", Call, UserCall);
2924 } else if (isa<GCRelocateInst>(Call)) {
2925 Check(UserCall->getArgOperand(0) == &Call,
2926 "gc.relocate connected to wrong gc.statepoint", Call, UserCall);
2927 }
2928 }
2929
2930 // Note: It is legal for a single derived pointer to be listed multiple
2931 // times. It's non-optimal, but it is legal. It can also happen after
2932 // insertion if we strip a bitcast away.
2933 // Note: It is really tempting to check that each base is relocated and
2934 // that a derived pointer is never reused as a base pointer. This turns
2935 // out to be problematic since optimizations run after safepoint insertion
2936 // can recognize equality properties that the insertion logic doesn't know
2937 // about. See example statepoint.ll in the verifier subdirectory
2938}
2939
2940void Verifier::verifyFrameRecoverIndices() {
2941 for (auto &Counts : FrameEscapeInfo) {
2942 Function *F = Counts.first;
2943 unsigned EscapedObjectCount = Counts.second.first;
2944 unsigned MaxRecoveredIndex = Counts.second.second;
2945 Check(MaxRecoveredIndex <= EscapedObjectCount,
2946 "all indices passed to llvm.localrecover must be less than the "
2947 "number of arguments passed to llvm.localescape in the parent "
2948 "function",
2949 F);
2950 }
2951}
2952
2953static Instruction *getSuccPad(Instruction *Terminator) {
2954 BasicBlock *UnwindDest;
2955 if (auto *II = dyn_cast<InvokeInst>(Terminator))
2956 UnwindDest = II->getUnwindDest();
2957 else if (auto *CSI = dyn_cast<CatchSwitchInst>(Terminator))
2958 UnwindDest = CSI->getUnwindDest();
2959 else
2960 UnwindDest = cast<CleanupReturnInst>(Terminator)->getUnwindDest();
2961 return &*UnwindDest->getFirstNonPHIIt();
2962}
2963
2964void Verifier::verifySiblingFuncletUnwinds() {
2965 llvm::TimeTraceScope timeScope("Verifier verify sibling funclet unwinds");
2966 SmallPtrSet<Instruction *, 8> Visited;
2967 SmallPtrSet<Instruction *, 8> Active;
2968 for (const auto &Pair : SiblingFuncletInfo) {
2969 Instruction *PredPad = Pair.first;
2970 if (Visited.count(PredPad))
2971 continue;
2972 Active.insert(PredPad);
2973 Instruction *Terminator = Pair.second;
2974 do {
2975 Instruction *SuccPad = getSuccPad(Terminator);
2976 if (Active.count(SuccPad)) {
2977 // Found a cycle; report error
2978 Instruction *CyclePad = SuccPad;
2979 SmallVector<Instruction *, 8> CycleNodes;
2980 do {
2981 CycleNodes.push_back(CyclePad);
2982 Instruction *CycleTerminator = SiblingFuncletInfo[CyclePad];
2983 if (CycleTerminator != CyclePad)
2984 CycleNodes.push_back(CycleTerminator);
2985 CyclePad = getSuccPad(CycleTerminator);
2986 } while (CyclePad != SuccPad);
2987 Check(false, "EH pads can't handle each other's exceptions",
2988 ArrayRef<Instruction *>(CycleNodes));
2989 }
2990 // Don't re-walk a node we've already checked
2991 if (!Visited.insert(SuccPad).second)
2992 break;
2993 // Walk to this successor if it has a map entry.
2994 PredPad = SuccPad;
2995 auto TermI = SiblingFuncletInfo.find(PredPad);
2996 if (TermI == SiblingFuncletInfo.end())
2997 break;
2998 Terminator = TermI->second;
2999 Active.insert(PredPad);
3000 } while (true);
3001 // Each node only has one successor, so we've walked all the active
3002 // nodes' successors.
3003 Active.clear();
3004 }
3005}
3006
3007// visitFunction - Verify that a function is ok.
3008//
3009void Verifier::visitFunction(const Function &F) {
3010 visitGlobalValue(F);
3011
3012 // Check function arguments.
3013 FunctionType *FT = F.getFunctionType();
3014 unsigned NumArgs = F.arg_size();
3015
3016 Check(&Context == &F.getContext(),
3017 "Function context does not match Module context!", &F);
3018
3019 Check(!F.hasCommonLinkage(), "Functions may not have common linkage", &F);
3020 Check(FT->getNumParams() == NumArgs,
3021 "# formal arguments must match # of arguments for function type!", &F,
3022 FT);
3023 Check(F.getReturnType()->isFirstClassType() ||
3024 F.getReturnType()->isVoidTy() || F.getReturnType()->isStructTy(),
3025 "Functions cannot return aggregate values!", &F);
3026
3027 Check(!F.hasStructRetAttr() || F.getReturnType()->isVoidTy(),
3028 "Invalid struct return type!", &F);
3029
3030 if (MaybeAlign A = F.getAlign()) {
3031 Check(A->value() <= Value::MaximumAlignment,
3032 "huge alignment values are unsupported", &F);
3033 }
3034
3035 AttributeList Attrs = F.getAttributes();
3036
3037 Check(verifyAttributeCount(Attrs, FT->getNumParams()),
3038 "Attribute after last parameter!", &F);
3039
3040 bool IsIntrinsic = F.isIntrinsic();
3041
3042 // Check function attributes.
3043 verifyFunctionAttrs(FT, Attrs, &F, IsIntrinsic, /* IsInlineAsm */ false);
3044
3045 // On function declarations/definitions, we do not support the builtin
3046 // attribute. We do not check this in VerifyFunctionAttrs since that is
3047 // checking for Attributes that can/can not ever be on functions.
3048 Check(!Attrs.hasFnAttr(Attribute::Builtin),
3049 "Attribute 'builtin' can only be applied to a callsite.", &F);
3050
3051 Check(!Attrs.hasAttrSomewhere(Attribute::ElementType),
3052 "Attribute 'elementtype' can only be applied to a callsite.", &F);
3053
3054 Check(!Attrs.hasFnAttr("aarch64_zt0_undef"),
3055 "Attribute 'aarch64_zt0_undef' can only be applied to a callsite.");
3056
3057 if (Attrs.hasFnAttr(Attribute::Naked))
3058 for (const Argument &Arg : F.args())
3059 Check(Arg.use_empty(), "cannot use argument of naked function", &Arg);
3060
3061 // Check that this function meets the restrictions on this calling convention.
3062 // Sometimes varargs is used for perfectly forwarding thunks, so some of these
3063 // restrictions can be lifted.
3064 switch (F.getCallingConv()) {
3065 default:
3066 case CallingConv::C:
3067 break;
3068 case CallingConv::X86_INTR: {
3069 Check(F.arg_empty() || Attrs.hasParamAttr(0, Attribute::ByVal),
3070 "Calling convention parameter requires byval", &F);
3071 break;
3072 }
3073 case CallingConv::AMDGPU_KERNEL:
3074 case CallingConv::SPIR_KERNEL:
3075 case CallingConv::AMDGPU_CS_Chain:
3076 case CallingConv::AMDGPU_CS_ChainPreserve:
3077 Check(F.getReturnType()->isVoidTy(),
3078 "Calling convention requires void return type", &F);
3079 [[fallthrough]];
3080 case CallingConv::AMDGPU_VS:
3081 case CallingConv::AMDGPU_HS:
3082 case CallingConv::AMDGPU_GS:
3083 case CallingConv::AMDGPU_PS:
3084 case CallingConv::AMDGPU_CS:
3085 Check(!F.hasStructRetAttr(), "Calling convention does not allow sret", &F);
3086 if (F.getCallingConv() != CallingConv::SPIR_KERNEL) {
3087 const unsigned StackAS = DL.getAllocaAddrSpace();
3088 unsigned i = 0;
3089 for (const Argument &Arg : F.args()) {
3090 Check(!Attrs.hasParamAttr(i, Attribute::ByVal),
3091 "Calling convention disallows byval", &F);
3092 Check(!Attrs.hasParamAttr(i, Attribute::Preallocated),
3093 "Calling convention disallows preallocated", &F);
3094 Check(!Attrs.hasParamAttr(i, Attribute::InAlloca),
3095 "Calling convention disallows inalloca", &F);
3096
3097 if (Attrs.hasParamAttr(i, Attribute::ByRef)) {
3098 // FIXME: Should also disallow LDS and GDS, but we don't have the enum
3099 // value here.
3100 Check(Arg.getType()->getPointerAddressSpace() != StackAS,
3101 "Calling convention disallows stack byref", &F);
3102 }
3103
3104 ++i;
3105 }
3106 }
3107
3108 [[fallthrough]];
3109 case CallingConv::Fast:
3110 case CallingConv::Cold:
3111 case CallingConv::Intel_OCL_BI:
3112 case CallingConv::PTX_Kernel:
3113 case CallingConv::PTX_Device:
3114 Check(!F.isVarArg(),
3115 "Calling convention does not support varargs or "
3116 "perfect forwarding!",
3117 &F);
3118 break;
3119 case CallingConv::AMDGPU_Gfx_WholeWave:
3120 Check(!F.arg_empty() && F.arg_begin()->getType()->isIntegerTy(1),
3121 "Calling convention requires first argument to be i1", &F);
3122 Check(!F.arg_begin()->hasInRegAttr(),
3123 "Calling convention requires first argument to not be inreg", &F);
3124 Check(!F.isVarArg(),
3125 "Calling convention does not support varargs or "
3126 "perfect forwarding!",
3127 &F);
3128 break;
3129 }
3130
3131 // Check that the argument values match the function type for this function...
3132 unsigned i = 0;
3133 for (const Argument &Arg : F.args()) {
3134 Check(Arg.getType() == FT->getParamType(i),
3135 "Argument value does not match function argument type!", &Arg,
3136 FT->getParamType(i));
3137 Check(Arg.getType()->isFirstClassType(),
3138 "Function arguments must have first-class types!", &Arg);
3139 if (!IsIntrinsic) {
3140 Check(!Arg.getType()->isMetadataTy(),
3141 "Function takes metadata but isn't an intrinsic", &Arg, &F);
3142 Check(!Arg.getType()->isTokenLikeTy(),
3143 "Function takes token but isn't an intrinsic", &Arg, &F);
3144 Check(!Arg.getType()->isX86_AMXTy(),
3145 "Function takes x86_amx but isn't an intrinsic", &Arg, &F);
3146 }
3147
3148 // Check that swifterror argument is only used by loads and stores.
3149 if (Attrs.hasParamAttr(i, Attribute::SwiftError)) {
3150 verifySwiftErrorValue(&Arg);
3151 }
3152 ++i;
3153 }
3154
3155 if (!IsIntrinsic) {
3156 Check(!F.getReturnType()->isTokenLikeTy(),
3157 "Function returns a token but isn't an intrinsic", &F);
3158 Check(!F.getReturnType()->isX86_AMXTy(),
3159 "Function returns a x86_amx but isn't an intrinsic", &F);
3160 }
3161
3162 // Get the function metadata attachments.
3164 F.getAllMetadata(MDs);
3165 assert(F.hasMetadata() != MDs.empty() && "Bit out-of-sync");
3166 verifyFunctionMetadata(MDs);
3167
3168 // Check validity of the personality function
3169 if (F.hasPersonalityFn()) {
3170 auto *Per = dyn_cast<Function>(F.getPersonalityFn()->stripPointerCasts());
3171 if (Per)
3172 Check(Per->getParent() == F.getParent(),
3173 "Referencing personality function in another module!", &F,
3174 F.getParent(), Per, Per->getParent());
3175 }
3176
3177 // EH funclet coloring can be expensive, recompute on-demand
3178 BlockEHFuncletColors.clear();
3179
3180 if (F.isMaterializable()) {
3181 // Function has a body somewhere we can't see.
3182 Check(MDs.empty(), "unmaterialized function cannot have metadata", &F,
3183 MDs.empty() ? nullptr : MDs.front().second);
3184 } else if (F.isDeclaration()) {
3185 for (const auto &I : MDs) {
3186 // This is used for call site debug information.
3187 CheckDI(I.first != LLVMContext::MD_dbg ||
3188 !cast<DISubprogram>(I.second)->isDistinct(),
3189 "function declaration may only have a unique !dbg attachment",
3190 &F);
3191 Check(I.first != LLVMContext::MD_prof,
3192 "function declaration may not have a !prof attachment", &F);
3193
3194 // Verify the metadata itself.
3195 visitMDNode(*I.second, AreDebugLocsAllowed::Yes);
3196 }
3197 Check(!F.hasPersonalityFn(),
3198 "Function declaration shouldn't have a personality routine", &F);
3199 } else {
3200 // Verify that this function (which has a body) is not named "llvm.*". It
3201 // is not legal to define intrinsics.
3202 Check(!IsIntrinsic, "llvm intrinsics cannot be defined!", &F);
3203
3204 // Check the entry node
3205 const BasicBlock *Entry = &F.getEntryBlock();
3206 Check(pred_empty(Entry),
3207 "Entry block to function must not have predecessors!", Entry);
3208
3209 // The address of the entry block cannot be taken, unless it is dead.
3210 if (Entry->hasAddressTaken()) {
3211 Check(!BlockAddress::lookup(Entry)->isConstantUsed(),
3212 "blockaddress may not be used with the entry block!", Entry);
3213 }
3214
3215 unsigned NumDebugAttachments = 0, NumProfAttachments = 0,
3216 NumKCFIAttachments = 0;
3217 // Visit metadata attachments.
3218 for (const auto &I : MDs) {
3219 // Verify that the attachment is legal.
3220 auto AllowLocs = AreDebugLocsAllowed::No;
3221 switch (I.first) {
3222 default:
3223 break;
3224 case LLVMContext::MD_dbg: {
3225 ++NumDebugAttachments;
3226 CheckDI(NumDebugAttachments == 1,
3227 "function must have a single !dbg attachment", &F, I.second);
3228 CheckDI(isa<DISubprogram>(I.second),
3229 "function !dbg attachment must be a subprogram", &F, I.second);
3230 CheckDI(cast<DISubprogram>(I.second)->isDistinct(),
3231 "function definition may only have a distinct !dbg attachment",
3232 &F);
3233
3234 auto *SP = cast<DISubprogram>(I.second);
3235 const Function *&AttachedTo = DISubprogramAttachments[SP];
3236 CheckDI(!AttachedTo || AttachedTo == &F,
3237 "DISubprogram attached to more than one function", SP, &F);
3238 AttachedTo = &F;
3239 AllowLocs = AreDebugLocsAllowed::Yes;
3240 break;
3241 }
3242 case LLVMContext::MD_prof:
3243 ++NumProfAttachments;
3244 Check(NumProfAttachments == 1,
3245 "function must have a single !prof attachment", &F, I.second);
3246 break;
3247 case LLVMContext::MD_kcfi_type:
3248 ++NumKCFIAttachments;
3249 Check(NumKCFIAttachments == 1,
3250 "function must have a single !kcfi_type attachment", &F,
3251 I.second);
3252 break;
3253 }
3254
3255 // Verify the metadata itself.
3256 visitMDNode(*I.second, AllowLocs);
3257 }
3258 }
3259
3260 // If this function is actually an intrinsic, verify that it is only used in
3261 // direct call/invokes, never having its "address taken".
3262 // Only do this if the module is materialized, otherwise we don't have all the
3263 // uses.
3264 if (F.isIntrinsic() && F.getParent()->isMaterialized()) {
3265 const User *U;
3266 if (F.hasAddressTaken(&U, false, true, false,
3267 /*IgnoreARCAttachedCall=*/true))
3268 Check(false, "Invalid user of intrinsic instruction!", U);
3269 }
3270
3271 // Check intrinsics' signatures.
3272 switch (F.getIntrinsicID()) {
3273 case Intrinsic::experimental_gc_get_pointer_base: {
3274 FunctionType *FT = F.getFunctionType();
3275 Check(FT->getNumParams() == 1, "wrong number of parameters", F);
3276 Check(isa<PointerType>(F.getReturnType()),
3277 "gc.get.pointer.base must return a pointer", F);
3278 Check(FT->getParamType(0) == F.getReturnType(),
3279 "gc.get.pointer.base operand and result must be of the same type", F);
3280 break;
3281 }
3282 case Intrinsic::experimental_gc_get_pointer_offset: {
3283 FunctionType *FT = F.getFunctionType();
3284 Check(FT->getNumParams() == 1, "wrong number of parameters", F);
3285 Check(isa<PointerType>(FT->getParamType(0)),
3286 "gc.get.pointer.offset operand must be a pointer", F);
3287 Check(F.getReturnType()->isIntegerTy(),
3288 "gc.get.pointer.offset must return integer", F);
3289 break;
3290 }
3291 }
3292
3293 auto *N = F.getSubprogram();
3294 HasDebugInfo = (N != nullptr);
3295 if (!HasDebugInfo)
3296 return;
3297
3298 // Check that all !dbg attachments lead to back to N.
3299 //
3300 // FIXME: Check this incrementally while visiting !dbg attachments.
3301 // FIXME: Only check when N is the canonical subprogram for F.
3302 SmallPtrSet<const MDNode *, 32> Seen;
3303 auto VisitDebugLoc = [&](const Instruction &I, const MDNode *Node) {
3304 // Be careful about using DILocation here since we might be dealing with
3305 // broken code (this is the Verifier after all).
3306 const DILocation *DL = dyn_cast_or_null<DILocation>(Node);
3307 if (!DL)
3308 return;
3309 if (!Seen.insert(DL).second)
3310 return;
3311
3312 Metadata *Parent = DL->getRawScope();
3313 CheckDI(Parent && isa<DILocalScope>(Parent),
3314 "DILocation's scope must be a DILocalScope", N, &F, &I, DL, Parent);
3315
3316 DILocalScope *Scope = DL->getInlinedAtScope();
3317 Check(Scope, "Failed to find DILocalScope", DL);
3318
3319 if (!Seen.insert(Scope).second)
3320 return;
3321
3322 DISubprogram *SP = Scope->getSubprogram();
3323
3324 // Scope and SP could be the same MDNode and we don't want to skip
3325 // validation in that case
3326 if ((Scope != SP) && !Seen.insert(SP).second)
3327 return;
3328
3329 CheckDI(SP->describes(&F),
3330 "!dbg attachment points at wrong subprogram for function", N, &F,
3331 &I, DL, Scope, SP);
3332 };
3333 for (auto &BB : F)
3334 for (auto &I : BB) {
3335 VisitDebugLoc(I, I.getDebugLoc().getAsMDNode());
3336 // The llvm.loop annotations also contain two DILocations.
3337 if (auto MD = I.getMetadata(LLVMContext::MD_loop))
3338 for (unsigned i = 1; i < MD->getNumOperands(); ++i)
3339 VisitDebugLoc(I, dyn_cast_or_null<MDNode>(MD->getOperand(i)));
3340 if (BrokenDebugInfo)
3341 return;
3342 }
3343}
3344
3345// verifyBasicBlock - Verify that a basic block is well formed...
3346//
3347void Verifier::visitBasicBlock(BasicBlock &BB) {
3348 InstsInThisBlock.clear();
3349 ConvergenceVerifyHelper.visit(BB);
3350
3351 // Ensure that basic blocks have terminators!
3352 Check(BB.getTerminator(), "Basic Block does not have terminator!", &BB);
3353
3354 // Check constraints that this basic block imposes on all of the PHI nodes in
3355 // it.
3356 if (isa<PHINode>(BB.front())) {
3357 SmallVector<BasicBlock *, 8> Preds(predecessors(&BB));
3359 llvm::sort(Preds);
3360 for (const PHINode &PN : BB.phis()) {
3361 Check(PN.getNumIncomingValues() == Preds.size(),
3362 "PHINode should have one entry for each predecessor of its "
3363 "parent basic block!",
3364 &PN);
3365
3366 // Get and sort all incoming values in the PHI node...
3367 Values.clear();
3368 Values.reserve(PN.getNumIncomingValues());
3369 for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i)
3370 Values.push_back(
3371 std::make_pair(PN.getIncomingBlock(i), PN.getIncomingValue(i)));
3372 llvm::sort(Values);
3373
3374 for (unsigned i = 0, e = Values.size(); i != e; ++i) {
3375 // Check to make sure that if there is more than one entry for a
3376 // particular basic block in this PHI node, that the incoming values are
3377 // all identical.
3378 //
3379 Check(i == 0 || Values[i].first != Values[i - 1].first ||
3380 Values[i].second == Values[i - 1].second,
3381 "PHI node has multiple entries for the same basic block with "
3382 "different incoming values!",
3383 &PN, Values[i].first, Values[i].second, Values[i - 1].second);
3384
3385 // Check to make sure that the predecessors and PHI node entries are
3386 // matched up.
3387 Check(Values[i].first == Preds[i],
3388 "PHI node entries do not match predecessors!", &PN,
3389 Values[i].first, Preds[i]);
3390 }
3391 }
3392 }
3393
3394 // Check that all instructions have their parent pointers set up correctly.
3395 for (auto &I : BB)
3396 {
3397 Check(I.getParent() == &BB, "Instruction has bogus parent pointer!");
3398 }
3399
3400 // Confirm that no issues arise from the debug program.
3401 CheckDI(!BB.getTrailingDbgRecords(), "Basic Block has trailing DbgRecords!",
3402 &BB);
3403}
3404
3405void Verifier::visitTerminator(Instruction &I) {
3406 // Ensure that terminators only exist at the end of the basic block.
3407 Check(&I == I.getParent()->getTerminator(),
3408 "Terminator found in the middle of a basic block!", I.getParent());
3409 visitInstruction(I);
3410}
3411
3412void Verifier::visitBranchInst(BranchInst &BI) {
3413 if (BI.isConditional()) {
3415 "Branch condition is not 'i1' type!", &BI, BI.getCondition());
3416 }
3417 visitTerminator(BI);
3418}
3419
3420void Verifier::visitReturnInst(ReturnInst &RI) {
3421 Function *F = RI.getParent()->getParent();
3422 unsigned N = RI.getNumOperands();
3423 if (F->getReturnType()->isVoidTy())
3424 Check(N == 0,
3425 "Found return instr that returns non-void in Function of void "
3426 "return type!",
3427 &RI, F->getReturnType());
3428 else
3429 Check(N == 1 && F->getReturnType() == RI.getOperand(0)->getType(),
3430 "Function return type does not match operand "
3431 "type of return inst!",
3432 &RI, F->getReturnType());
3433
3434 // Check to make sure that the return value has necessary properties for
3435 // terminators...
3436 visitTerminator(RI);
3437}
3438
3439void Verifier::visitSwitchInst(SwitchInst &SI) {
3440 Check(SI.getType()->isVoidTy(), "Switch must have void result type!", &SI);
3441 // Check to make sure that all of the constants in the switch instruction
3442 // have the same type as the switched-on value.
3443 Type *SwitchTy = SI.getCondition()->getType();
3444 SmallPtrSet<ConstantInt*, 32> Constants;
3445 for (auto &Case : SI.cases()) {
3446 Check(isa<ConstantInt>(Case.getCaseValue()),
3447 "Case value is not a constant integer.", &SI);
3448 Check(Case.getCaseValue()->getType() == SwitchTy,
3449 "Switch constants must all be same type as switch value!", &SI);
3450 Check(Constants.insert(Case.getCaseValue()).second,
3451 "Duplicate integer as switch case", &SI, Case.getCaseValue());
3452 }
3453
3454 visitTerminator(SI);
3455}
3456
3457void Verifier::visitIndirectBrInst(IndirectBrInst &BI) {
3459 "Indirectbr operand must have pointer type!", &BI);
3460 for (unsigned i = 0, e = BI.getNumDestinations(); i != e; ++i)
3462 "Indirectbr destinations must all have pointer type!", &BI);
3463
3464 visitTerminator(BI);
3465}
3466
3467void Verifier::visitCallBrInst(CallBrInst &CBI) {
3468 if (!CBI.isInlineAsm()) {
3470 "Callbr: indirect function / invalid signature");
3471 Check(!CBI.hasOperandBundles(),
3472 "Callbr for intrinsics currently doesn't support operand bundles");
3473
3474 switch (CBI.getIntrinsicID()) {
3475 case Intrinsic::amdgcn_kill: {
3476 Check(CBI.getNumIndirectDests() == 1,
3477 "Callbr amdgcn_kill only supports one indirect dest");
3478 bool Unreachable = isa<UnreachableInst>(CBI.getIndirectDest(0)->begin());
3479 CallInst *Call = dyn_cast<CallInst>(CBI.getIndirectDest(0)->begin());
3480 Check(Unreachable || (Call && Call->getIntrinsicID() ==
3481 Intrinsic::amdgcn_unreachable),
3482 "Callbr amdgcn_kill indirect dest needs to be unreachable");
3483 break;
3484 }
3485 default:
3486 CheckFailed(
3487 "Callbr currently only supports asm-goto and selected intrinsics");
3488 }
3489 visitIntrinsicCall(CBI.getIntrinsicID(), CBI);
3490 } else {
3491 const InlineAsm *IA = cast<InlineAsm>(CBI.getCalledOperand());
3492 Check(!IA->canThrow(), "Unwinding from Callbr is not allowed");
3493
3494 verifyInlineAsmCall(CBI);
3495 }
3496 visitTerminator(CBI);
3497}
3498
3499void Verifier::visitSelectInst(SelectInst &SI) {
3500 Check(!SelectInst::areInvalidOperands(SI.getOperand(0), SI.getOperand(1),
3501 SI.getOperand(2)),
3502 "Invalid operands for select instruction!", &SI);
3503
3504 Check(SI.getTrueValue()->getType() == SI.getType(),
3505 "Select values must have same type as select instruction!", &SI);
3506 visitInstruction(SI);
3507}
3508
3509/// visitUserOp1 - User defined operators shouldn't live beyond the lifetime of
3510/// a pass, if any exist, it's an error.
3511///
3512void Verifier::visitUserOp1(Instruction &I) {
3513 Check(false, "User-defined operators should not live outside of a pass!", &I);
3514}
3515
3516void Verifier::visitTruncInst(TruncInst &I) {
3517 // Get the source and destination types
3518 Type *SrcTy = I.getOperand(0)->getType();
3519 Type *DestTy = I.getType();
3520
3521 // Get the size of the types in bits, we'll need this later
3522 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3523 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3524
3525 Check(SrcTy->isIntOrIntVectorTy(), "Trunc only operates on integer", &I);
3526 Check(DestTy->isIntOrIntVectorTy(), "Trunc only produces integer", &I);
3527 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3528 "trunc source and destination must both be a vector or neither", &I);
3529 Check(SrcBitSize > DestBitSize, "DestTy too big for Trunc", &I);
3530
3531 visitInstruction(I);
3532}
3533
3534void Verifier::visitZExtInst(ZExtInst &I) {
3535 // Get the source and destination types
3536 Type *SrcTy = I.getOperand(0)->getType();
3537 Type *DestTy = I.getType();
3538
3539 // Get the size of the types in bits, we'll need this later
3540 Check(SrcTy->isIntOrIntVectorTy(), "ZExt only operates on integer", &I);
3541 Check(DestTy->isIntOrIntVectorTy(), "ZExt only produces an integer", &I);
3542 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3543 "zext source and destination must both be a vector or neither", &I);
3544 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3545 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3546
3547 Check(SrcBitSize < DestBitSize, "Type too small for ZExt", &I);
3548
3549 visitInstruction(I);
3550}
3551
3552void Verifier::visitSExtInst(SExtInst &I) {
3553 // Get the source and destination types
3554 Type *SrcTy = I.getOperand(0)->getType();
3555 Type *DestTy = I.getType();
3556
3557 // Get the size of the types in bits, we'll need this later
3558 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3559 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3560
3561 Check(SrcTy->isIntOrIntVectorTy(), "SExt only operates on integer", &I);
3562 Check(DestTy->isIntOrIntVectorTy(), "SExt only produces an integer", &I);
3563 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3564 "sext source and destination must both be a vector or neither", &I);
3565 Check(SrcBitSize < DestBitSize, "Type too small for SExt", &I);
3566
3567 visitInstruction(I);
3568}
3569
3570void Verifier::visitFPTruncInst(FPTruncInst &I) {
3571 // Get the source and destination types
3572 Type *SrcTy = I.getOperand(0)->getType();
3573 Type *DestTy = I.getType();
3574 // Get the size of the types in bits, we'll need this later
3575 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3576 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3577
3578 Check(SrcTy->isFPOrFPVectorTy(), "FPTrunc only operates on FP", &I);
3579 Check(DestTy->isFPOrFPVectorTy(), "FPTrunc only produces an FP", &I);
3580 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3581 "fptrunc source and destination must both be a vector or neither", &I);
3582 Check(SrcBitSize > DestBitSize, "DestTy too big for FPTrunc", &I);
3583
3584 visitInstruction(I);
3585}
3586
3587void Verifier::visitFPExtInst(FPExtInst &I) {
3588 // Get the source and destination types
3589 Type *SrcTy = I.getOperand(0)->getType();
3590 Type *DestTy = I.getType();
3591
3592 // Get the size of the types in bits, we'll need this later
3593 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3594 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3595
3596 Check(SrcTy->isFPOrFPVectorTy(), "FPExt only operates on FP", &I);
3597 Check(DestTy->isFPOrFPVectorTy(), "FPExt only produces an FP", &I);
3598 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3599 "fpext source and destination must both be a vector or neither", &I);
3600 Check(SrcBitSize < DestBitSize, "DestTy too small for FPExt", &I);
3601
3602 visitInstruction(I);
3603}
3604
3605void Verifier::visitUIToFPInst(UIToFPInst &I) {
3606 // Get the source and destination types
3607 Type *SrcTy = I.getOperand(0)->getType();
3608 Type *DestTy = I.getType();
3609
3610 bool SrcVec = SrcTy->isVectorTy();
3611 bool DstVec = DestTy->isVectorTy();
3612
3613 Check(SrcVec == DstVec,
3614 "UIToFP source and dest must both be vector or scalar", &I);
3615 Check(SrcTy->isIntOrIntVectorTy(),
3616 "UIToFP source must be integer or integer vector", &I);
3617 Check(DestTy->isFPOrFPVectorTy(), "UIToFP result must be FP or FP vector",
3618 &I);
3619
3620 if (SrcVec && DstVec)
3621 Check(cast<VectorType>(SrcTy)->getElementCount() ==
3622 cast<VectorType>(DestTy)->getElementCount(),
3623 "UIToFP source and dest vector length mismatch", &I);
3624
3625 visitInstruction(I);
3626}
3627
3628void Verifier::visitSIToFPInst(SIToFPInst &I) {
3629 // Get the source and destination types
3630 Type *SrcTy = I.getOperand(0)->getType();
3631 Type *DestTy = I.getType();
3632
3633 bool SrcVec = SrcTy->isVectorTy();
3634 bool DstVec = DestTy->isVectorTy();
3635
3636 Check(SrcVec == DstVec,
3637 "SIToFP source and dest must both be vector or scalar", &I);
3638 Check(SrcTy->isIntOrIntVectorTy(),
3639 "SIToFP source must be integer or integer vector", &I);
3640 Check(DestTy->isFPOrFPVectorTy(), "SIToFP result must be FP or FP vector",
3641 &I);
3642
3643 if (SrcVec && DstVec)
3644 Check(cast<VectorType>(SrcTy)->getElementCount() ==
3645 cast<VectorType>(DestTy)->getElementCount(),
3646 "SIToFP source and dest vector length mismatch", &I);
3647
3648 visitInstruction(I);
3649}
3650
3651void Verifier::visitFPToUIInst(FPToUIInst &I) {
3652 // Get the source and destination types
3653 Type *SrcTy = I.getOperand(0)->getType();
3654 Type *DestTy = I.getType();
3655
3656 bool SrcVec = SrcTy->isVectorTy();
3657 bool DstVec = DestTy->isVectorTy();
3658
3659 Check(SrcVec == DstVec,
3660 "FPToUI source and dest must both be vector or scalar", &I);
3661 Check(SrcTy->isFPOrFPVectorTy(), "FPToUI source must be FP or FP vector", &I);
3662 Check(DestTy->isIntOrIntVectorTy(),
3663 "FPToUI result must be integer or integer vector", &I);
3664
3665 if (SrcVec && DstVec)
3666 Check(cast<VectorType>(SrcTy)->getElementCount() ==
3667 cast<VectorType>(DestTy)->getElementCount(),
3668 "FPToUI source and dest vector length mismatch", &I);
3669
3670 visitInstruction(I);
3671}
3672
3673void Verifier::visitFPToSIInst(FPToSIInst &I) {
3674 // Get the source and destination types
3675 Type *SrcTy = I.getOperand(0)->getType();
3676 Type *DestTy = I.getType();
3677
3678 bool SrcVec = SrcTy->isVectorTy();
3679 bool DstVec = DestTy->isVectorTy();
3680
3681 Check(SrcVec == DstVec,
3682 "FPToSI source and dest must both be vector or scalar", &I);
3683 Check(SrcTy->isFPOrFPVectorTy(), "FPToSI source must be FP or FP vector", &I);
3684 Check(DestTy->isIntOrIntVectorTy(),
3685 "FPToSI result must be integer or integer vector", &I);
3686
3687 if (SrcVec && DstVec)
3688 Check(cast<VectorType>(SrcTy)->getElementCount() ==
3689 cast<VectorType>(DestTy)->getElementCount(),
3690 "FPToSI source and dest vector length mismatch", &I);
3691
3692 visitInstruction(I);
3693}
3694
3695void Verifier::checkPtrToAddr(Type *SrcTy, Type *DestTy, const Value &V) {
3696 Check(SrcTy->isPtrOrPtrVectorTy(), "PtrToAddr source must be pointer", V);
3697 Check(DestTy->isIntOrIntVectorTy(), "PtrToAddr result must be integral", V);
3698 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(), "PtrToAddr type mismatch",
3699 V);
3700
3701 if (SrcTy->isVectorTy()) {
3702 auto *VSrc = cast<VectorType>(SrcTy);
3703 auto *VDest = cast<VectorType>(DestTy);
3704 Check(VSrc->getElementCount() == VDest->getElementCount(),
3705 "PtrToAddr vector length mismatch", V);
3706 }
3707
3708 Type *AddrTy = DL.getAddressType(SrcTy);
3709 Check(AddrTy == DestTy, "PtrToAddr result must be address width", V);
3710}
3711
3712void Verifier::visitPtrToAddrInst(PtrToAddrInst &I) {
3713 checkPtrToAddr(I.getOperand(0)->getType(), I.getType(), I);
3714 visitInstruction(I);
3715}
3716
3717void Verifier::visitPtrToIntInst(PtrToIntInst &I) {
3718 // Get the source and destination types
3719 Type *SrcTy = I.getOperand(0)->getType();
3720 Type *DestTy = I.getType();
3721
3722 Check(SrcTy->isPtrOrPtrVectorTy(), "PtrToInt source must be pointer", &I);
3723
3724 Check(DestTy->isIntOrIntVectorTy(), "PtrToInt result must be integral", &I);
3725 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(), "PtrToInt type mismatch",
3726 &I);
3727
3728 if (SrcTy->isVectorTy()) {
3729 auto *VSrc = cast<VectorType>(SrcTy);
3730 auto *VDest = cast<VectorType>(DestTy);
3731 Check(VSrc->getElementCount() == VDest->getElementCount(),
3732 "PtrToInt Vector length mismatch", &I);
3733 }
3734
3735 visitInstruction(I);
3736}
3737
3738void Verifier::visitIntToPtrInst(IntToPtrInst &I) {
3739 // Get the source and destination types
3740 Type *SrcTy = I.getOperand(0)->getType();
3741 Type *DestTy = I.getType();
3742
3743 Check(SrcTy->isIntOrIntVectorTy(), "IntToPtr source must be an integral", &I);
3744 Check(DestTy->isPtrOrPtrVectorTy(), "IntToPtr result must be a pointer", &I);
3745
3746 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(), "IntToPtr type mismatch",
3747 &I);
3748 if (SrcTy->isVectorTy()) {
3749 auto *VSrc = cast<VectorType>(SrcTy);
3750 auto *VDest = cast<VectorType>(DestTy);
3751 Check(VSrc->getElementCount() == VDest->getElementCount(),
3752 "IntToPtr Vector length mismatch", &I);
3753 }
3754 visitInstruction(I);
3755}
3756
3757void Verifier::visitBitCastInst(BitCastInst &I) {
3758 Check(
3759 CastInst::castIsValid(Instruction::BitCast, I.getOperand(0), I.getType()),
3760 "Invalid bitcast", &I);
3761 visitInstruction(I);
3762}
3763
3764void Verifier::visitAddrSpaceCastInst(AddrSpaceCastInst &I) {
3765 Type *SrcTy = I.getOperand(0)->getType();
3766 Type *DestTy = I.getType();
3767
3768 Check(SrcTy->isPtrOrPtrVectorTy(), "AddrSpaceCast source must be a pointer",
3769 &I);
3770 Check(DestTy->isPtrOrPtrVectorTy(), "AddrSpaceCast result must be a pointer",
3771 &I);
3773 "AddrSpaceCast must be between different address spaces", &I);
3774 if (auto *SrcVTy = dyn_cast<VectorType>(SrcTy))
3775 Check(SrcVTy->getElementCount() ==
3776 cast<VectorType>(DestTy)->getElementCount(),
3777 "AddrSpaceCast vector pointer number of elements mismatch", &I);
3778 visitInstruction(I);
3779}
3780
3781/// visitPHINode - Ensure that a PHI node is well formed.
3782///
3783void Verifier::visitPHINode(PHINode &PN) {
3784 // Ensure that the PHI nodes are all grouped together at the top of the block.
3785 // This can be tested by checking whether the instruction before this is
3786 // either nonexistent (because this is begin()) or is a PHI node. If not,
3787 // then there is some other instruction before a PHI.
3788 Check(&PN == &PN.getParent()->front() ||
3790 "PHI nodes not grouped at top of basic block!", &PN, PN.getParent());
3791
3792 // Check that a PHI doesn't yield a Token.
3793 Check(!PN.getType()->isTokenLikeTy(), "PHI nodes cannot have token type!");
3794
3795 // Check that all of the values of the PHI node have the same type as the
3796 // result.
3797 for (Value *IncValue : PN.incoming_values()) {
3798 Check(PN.getType() == IncValue->getType(),
3799 "PHI node operands are not the same type as the result!", &PN);
3800 }
3801
3802 // All other PHI node constraints are checked in the visitBasicBlock method.
3803
3804 visitInstruction(PN);
3805}
3806
3807void Verifier::visitCallBase(CallBase &Call) {
3809 "Called function must be a pointer!", Call);
3810 FunctionType *FTy = Call.getFunctionType();
3811
3812 // Verify that the correct number of arguments are being passed
3813 if (FTy->isVarArg())
3814 Check(Call.arg_size() >= FTy->getNumParams(),
3815 "Called function requires more parameters than were provided!", Call);
3816 else
3817 Check(Call.arg_size() == FTy->getNumParams(),
3818 "Incorrect number of arguments passed to called function!", Call);
3819
3820 // Verify that all arguments to the call match the function type.
3821 for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i)
3822 Check(Call.getArgOperand(i)->getType() == FTy->getParamType(i),
3823 "Call parameter type does not match function signature!",
3824 Call.getArgOperand(i), FTy->getParamType(i), Call);
3825
3826 AttributeList Attrs = Call.getAttributes();
3827
3828 Check(verifyAttributeCount(Attrs, Call.arg_size()),
3829 "Attribute after last parameter!", Call);
3830
3831 Function *Callee =
3833 bool IsIntrinsic = Callee && Callee->isIntrinsic();
3834 if (IsIntrinsic)
3835 Check(Callee->getValueType() == FTy,
3836 "Intrinsic called with incompatible signature", Call);
3837
3838 // Verify if the calling convention of the callee is callable.
3840 "calling convention does not permit calls", Call);
3841
3842 // Disallow passing/returning values with alignment higher than we can
3843 // represent.
3844 // FIXME: Consider making DataLayout cap the alignment, so this isn't
3845 // necessary.
3846 auto VerifyTypeAlign = [&](Type *Ty, const Twine &Message) {
3847 if (!Ty->isSized())
3848 return;
3849 Align ABIAlign = DL.getABITypeAlign(Ty);
3850 Check(ABIAlign.value() <= Value::MaximumAlignment,
3851 "Incorrect alignment of " + Message + " to called function!", Call);
3852 };
3853
3854 if (!IsIntrinsic) {
3855 VerifyTypeAlign(FTy->getReturnType(), "return type");
3856 for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i) {
3857 Type *Ty = FTy->getParamType(i);
3858 VerifyTypeAlign(Ty, "argument passed");
3859 }
3860 }
3861
3862 if (Attrs.hasFnAttr(Attribute::Speculatable)) {
3863 // Don't allow speculatable on call sites, unless the underlying function
3864 // declaration is also speculatable.
3865 Check(Callee && Callee->isSpeculatable(),
3866 "speculatable attribute may not apply to call sites", Call);
3867 }
3868
3869 if (Attrs.hasFnAttr(Attribute::Preallocated)) {
3870 Check(Call.getIntrinsicID() == Intrinsic::call_preallocated_arg,
3871 "preallocated as a call site attribute can only be on "
3872 "llvm.call.preallocated.arg");
3873 }
3874
3875 // Verify call attributes.
3876 verifyFunctionAttrs(FTy, Attrs, &Call, IsIntrinsic, Call.isInlineAsm());
3877
3878 // Conservatively check the inalloca argument.
3879 // We have a bug if we can find that there is an underlying alloca without
3880 // inalloca.
3881 if (Call.hasInAllocaArgument()) {
3882 Value *InAllocaArg = Call.getArgOperand(FTy->getNumParams() - 1);
3883 if (auto AI = dyn_cast<AllocaInst>(InAllocaArg->stripInBoundsOffsets()))
3884 Check(AI->isUsedWithInAlloca(),
3885 "inalloca argument for call has mismatched alloca", AI, Call);
3886 }
3887
3888 // For each argument of the callsite, if it has the swifterror argument,
3889 // make sure the underlying alloca/parameter it comes from has a swifterror as
3890 // well.
3891 for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i) {
3892 if (Call.paramHasAttr(i, Attribute::SwiftError)) {
3893 Value *SwiftErrorArg = Call.getArgOperand(i);
3894 if (auto AI = dyn_cast<AllocaInst>(SwiftErrorArg->stripInBoundsOffsets())) {
3895 Check(AI->isSwiftError(),
3896 "swifterror argument for call has mismatched alloca", AI, Call);
3897 continue;
3898 }
3899 auto ArgI = dyn_cast<Argument>(SwiftErrorArg);
3900 Check(ArgI, "swifterror argument should come from an alloca or parameter",
3901 SwiftErrorArg, Call);
3902 Check(ArgI->hasSwiftErrorAttr(),
3903 "swifterror argument for call has mismatched parameter", ArgI,
3904 Call);
3905 }
3906
3907 if (Attrs.hasParamAttr(i, Attribute::ImmArg)) {
3908 // Don't allow immarg on call sites, unless the underlying declaration
3909 // also has the matching immarg.
3910 Check(Callee && Callee->hasParamAttribute(i, Attribute::ImmArg),
3911 "immarg may not apply only to call sites", Call.getArgOperand(i),
3912 Call);
3913 }
3914
3915 if (Call.paramHasAttr(i, Attribute::ImmArg)) {
3916 Value *ArgVal = Call.getArgOperand(i);
3917 Check(isa<ConstantInt>(ArgVal) || isa<ConstantFP>(ArgVal),
3918 "immarg operand has non-immediate parameter", ArgVal, Call);
3919
3920 // If the imm-arg is an integer and also has a range attached,
3921 // check if the given value is within the range.
3922 if (Call.paramHasAttr(i, Attribute::Range)) {
3923 if (auto *CI = dyn_cast<ConstantInt>(ArgVal)) {
3924 const ConstantRange &CR =
3925 Call.getParamAttr(i, Attribute::Range).getValueAsConstantRange();
3926 Check(CR.contains(CI->getValue()),
3927 "immarg value " + Twine(CI->getValue().getSExtValue()) +
3928 " out of range [" + Twine(CR.getLower().getSExtValue()) +
3929 ", " + Twine(CR.getUpper().getSExtValue()) + ")",
3930 Call);
3931 }
3932 }
3933 }
3934
3935 if (Call.paramHasAttr(i, Attribute::Preallocated)) {
3936 Value *ArgVal = Call.getArgOperand(i);
3937 bool hasOB =
3939 bool isMustTail = Call.isMustTailCall();
3940 Check(hasOB != isMustTail,
3941 "preallocated operand either requires a preallocated bundle or "
3942 "the call to be musttail (but not both)",
3943 ArgVal, Call);
3944 }
3945 }
3946
3947 if (FTy->isVarArg()) {
3948 // FIXME? is 'nest' even legal here?
3949 bool SawNest = false;
3950 bool SawReturned = false;
3951
3952 for (unsigned Idx = 0; Idx < FTy->getNumParams(); ++Idx) {
3953 if (Attrs.hasParamAttr(Idx, Attribute::Nest))
3954 SawNest = true;
3955 if (Attrs.hasParamAttr(Idx, Attribute::Returned))
3956 SawReturned = true;
3957 }
3958
3959 // Check attributes on the varargs part.
3960 for (unsigned Idx = FTy->getNumParams(); Idx < Call.arg_size(); ++Idx) {
3961 Type *Ty = Call.getArgOperand(Idx)->getType();
3962 AttributeSet ArgAttrs = Attrs.getParamAttrs(Idx);
3963 verifyParameterAttrs(ArgAttrs, Ty, &Call);
3964
3965 if (ArgAttrs.hasAttribute(Attribute::Nest)) {
3966 Check(!SawNest, "More than one parameter has attribute nest!", Call);
3967 SawNest = true;
3968 }
3969
3970 if (ArgAttrs.hasAttribute(Attribute::Returned)) {
3971 Check(!SawReturned, "More than one parameter has attribute returned!",
3972 Call);
3973 Check(Ty->canLosslesslyBitCastTo(FTy->getReturnType()),
3974 "Incompatible argument and return types for 'returned' "
3975 "attribute",
3976 Call);
3977 SawReturned = true;
3978 }
3979
3980 // Statepoint intrinsic is vararg but the wrapped function may be not.
3981 // Allow sret here and check the wrapped function in verifyStatepoint.
3982 if (Call.getIntrinsicID() != Intrinsic::experimental_gc_statepoint)
3983 Check(!ArgAttrs.hasAttribute(Attribute::StructRet),
3984 "Attribute 'sret' cannot be used for vararg call arguments!",
3985 Call);
3986
3987 if (ArgAttrs.hasAttribute(Attribute::InAlloca))
3988 Check(Idx == Call.arg_size() - 1,
3989 "inalloca isn't on the last argument!", Call);
3990 }
3991 }
3992
3993 // Verify that there's no metadata unless it's a direct call to an intrinsic.
3994 if (!IsIntrinsic) {
3995 for (Type *ParamTy : FTy->params()) {
3996 Check(!ParamTy->isMetadataTy(),
3997 "Function has metadata parameter but isn't an intrinsic", Call);
3998 Check(!ParamTy->isTokenLikeTy(),
3999 "Function has token parameter but isn't an intrinsic", Call);
4000 }
4001 }
4002
4003 // Verify that indirect calls don't return tokens.
4004 if (!Call.getCalledFunction()) {
4005 Check(!FTy->getReturnType()->isTokenLikeTy(),
4006 "Return type cannot be token for indirect call!");
4007 Check(!FTy->getReturnType()->isX86_AMXTy(),
4008 "Return type cannot be x86_amx for indirect call!");
4009 }
4010
4012 visitIntrinsicCall(ID, Call);
4013
4014 // Verify that a callsite has at most one "deopt", at most one "funclet", at
4015 // most one "gc-transition", at most one "cfguardtarget", at most one
4016 // "preallocated" operand bundle, and at most one "ptrauth" operand bundle.
4017 bool FoundDeoptBundle = false, FoundFuncletBundle = false,
4018 FoundGCTransitionBundle = false, FoundCFGuardTargetBundle = false,
4019 FoundPreallocatedBundle = false, FoundGCLiveBundle = false,
4020 FoundPtrauthBundle = false, FoundKCFIBundle = false,
4021 FoundAttachedCallBundle = false;
4022 for (unsigned i = 0, e = Call.getNumOperandBundles(); i < e; ++i) {
4023 OperandBundleUse BU = Call.getOperandBundleAt(i);
4024 uint32_t Tag = BU.getTagID();
4025 if (Tag == LLVMContext::OB_deopt) {
4026 Check(!FoundDeoptBundle, "Multiple deopt operand bundles", Call);
4027 FoundDeoptBundle = true;
4028 } else if (Tag == LLVMContext::OB_gc_transition) {
4029 Check(!FoundGCTransitionBundle, "Multiple gc-transition operand bundles",
4030 Call);
4031 FoundGCTransitionBundle = true;
4032 } else if (Tag == LLVMContext::OB_funclet) {
4033 Check(!FoundFuncletBundle, "Multiple funclet operand bundles", Call);
4034 FoundFuncletBundle = true;
4035 Check(BU.Inputs.size() == 1,
4036 "Expected exactly one funclet bundle operand", Call);
4037 Check(isa<FuncletPadInst>(BU.Inputs.front()),
4038 "Funclet bundle operands should correspond to a FuncletPadInst",
4039 Call);
4040 } else if (Tag == LLVMContext::OB_cfguardtarget) {
4041 Check(!FoundCFGuardTargetBundle, "Multiple CFGuardTarget operand bundles",
4042 Call);
4043 FoundCFGuardTargetBundle = true;
4044 Check(BU.Inputs.size() == 1,
4045 "Expected exactly one cfguardtarget bundle operand", Call);
4046 } else if (Tag == LLVMContext::OB_ptrauth) {
4047 Check(!FoundPtrauthBundle, "Multiple ptrauth operand bundles", Call);
4048 FoundPtrauthBundle = true;
4049 Check(BU.Inputs.size() == 2,
4050 "Expected exactly two ptrauth bundle operands", Call);
4051 Check(isa<ConstantInt>(BU.Inputs[0]) &&
4052 BU.Inputs[0]->getType()->isIntegerTy(32),
4053 "Ptrauth bundle key operand must be an i32 constant", Call);
4054 Check(BU.Inputs[1]->getType()->isIntegerTy(64),
4055 "Ptrauth bundle discriminator operand must be an i64", Call);
4056 } else if (Tag == LLVMContext::OB_kcfi) {
4057 Check(!FoundKCFIBundle, "Multiple kcfi operand bundles", Call);
4058 FoundKCFIBundle = true;
4059 Check(BU.Inputs.size() == 1, "Expected exactly one kcfi bundle operand",
4060 Call);
4061 Check(isa<ConstantInt>(BU.Inputs[0]) &&
4062 BU.Inputs[0]->getType()->isIntegerTy(32),
4063 "Kcfi bundle operand must be an i32 constant", Call);
4064 } else if (Tag == LLVMContext::OB_preallocated) {
4065 Check(!FoundPreallocatedBundle, "Multiple preallocated operand bundles",
4066 Call);
4067 FoundPreallocatedBundle = true;
4068 Check(BU.Inputs.size() == 1,
4069 "Expected exactly one preallocated bundle operand", Call);
4070 auto Input = dyn_cast<IntrinsicInst>(BU.Inputs.front());
4071 Check(Input &&
4072 Input->getIntrinsicID() == Intrinsic::call_preallocated_setup,
4073 "\"preallocated\" argument must be a token from "
4074 "llvm.call.preallocated.setup",
4075 Call);
4076 } else if (Tag == LLVMContext::OB_gc_live) {
4077 Check(!FoundGCLiveBundle, "Multiple gc-live operand bundles", Call);
4078 FoundGCLiveBundle = true;
4080 Check(!FoundAttachedCallBundle,
4081 "Multiple \"clang.arc.attachedcall\" operand bundles", Call);
4082 FoundAttachedCallBundle = true;
4083 verifyAttachedCallBundle(Call, BU);
4084 }
4085 }
4086
4087 // Verify that callee and callsite agree on whether to use pointer auth.
4088 Check(!(Call.getCalledFunction() && FoundPtrauthBundle),
4089 "Direct call cannot have a ptrauth bundle", Call);
4090
4091 // Verify that each inlinable callsite of a debug-info-bearing function in a
4092 // debug-info-bearing function has a debug location attached to it. Failure to
4093 // do so causes assertion failures when the inliner sets up inline scope info
4094 // (Interposable functions are not inlinable, neither are functions without
4095 // definitions.)
4101 "inlinable function call in a function with "
4102 "debug info must have a !dbg location",
4103 Call);
4104
4105 if (Call.isInlineAsm())
4106 verifyInlineAsmCall(Call);
4107
4108 ConvergenceVerifyHelper.visit(Call);
4109
4110 visitInstruction(Call);
4111}
4112
4113void Verifier::verifyTailCCMustTailAttrs(const AttrBuilder &Attrs,
4114 StringRef Context) {
4115 Check(!Attrs.contains(Attribute::InAlloca),
4116 Twine("inalloca attribute not allowed in ") + Context);
4117 Check(!Attrs.contains(Attribute::InReg),
4118 Twine("inreg attribute not allowed in ") + Context);
4119 Check(!Attrs.contains(Attribute::SwiftError),
4120 Twine("swifterror attribute not allowed in ") + Context);
4121 Check(!Attrs.contains(Attribute::Preallocated),
4122 Twine("preallocated attribute not allowed in ") + Context);
4123 Check(!Attrs.contains(Attribute::ByRef),
4124 Twine("byref attribute not allowed in ") + Context);
4125}
4126
4127/// Two types are "congruent" if they are identical, or if they are both pointer
4128/// types with different pointee types and the same address space.
4129static bool isTypeCongruent(Type *L, Type *R) {
4130 if (L == R)
4131 return true;
4134 if (!PL || !PR)
4135 return false;
4136 return PL->getAddressSpace() == PR->getAddressSpace();
4137}
4138
4139static AttrBuilder getParameterABIAttributes(LLVMContext& C, unsigned I, AttributeList Attrs) {
4140 static const Attribute::AttrKind ABIAttrs[] = {
4141 Attribute::StructRet, Attribute::ByVal, Attribute::InAlloca,
4142 Attribute::InReg, Attribute::StackAlignment, Attribute::SwiftSelf,
4143 Attribute::SwiftAsync, Attribute::SwiftError, Attribute::Preallocated,
4144 Attribute::ByRef};
4145 AttrBuilder Copy(C);
4146 for (auto AK : ABIAttrs) {
4147 Attribute Attr = Attrs.getParamAttrs(I).getAttribute(AK);
4148 if (Attr.isValid())
4149 Copy.addAttribute(Attr);
4150 }
4151
4152 // `align` is ABI-affecting only in combination with `byval` or `byref`.
4153 if (Attrs.hasParamAttr(I, Attribute::Alignment) &&
4154 (Attrs.hasParamAttr(I, Attribute::ByVal) ||
4155 Attrs.hasParamAttr(I, Attribute::ByRef)))
4156 Copy.addAlignmentAttr(Attrs.getParamAlignment(I));
4157 return Copy;
4158}
4159
4160void Verifier::verifyMustTailCall(CallInst &CI) {
4161 Check(!CI.isInlineAsm(), "cannot use musttail call with inline asm", &CI);
4162
4163 Function *F = CI.getParent()->getParent();
4164 FunctionType *CallerTy = F->getFunctionType();
4165 FunctionType *CalleeTy = CI.getFunctionType();
4166 Check(CallerTy->isVarArg() == CalleeTy->isVarArg(),
4167 "cannot guarantee tail call due to mismatched varargs", &CI);
4168 Check(isTypeCongruent(CallerTy->getReturnType(), CalleeTy->getReturnType()),
4169 "cannot guarantee tail call due to mismatched return types", &CI);
4170
4171 // - The calling conventions of the caller and callee must match.
4172 Check(F->getCallingConv() == CI.getCallingConv(),
4173 "cannot guarantee tail call due to mismatched calling conv", &CI);
4174
4175 // - The call must immediately precede a :ref:`ret <i_ret>` instruction,
4176 // or a pointer bitcast followed by a ret instruction.
4177 // - The ret instruction must return the (possibly bitcasted) value
4178 // produced by the call or void.
4179 Value *RetVal = &CI;
4181
4182 // Handle the optional bitcast.
4183 if (BitCastInst *BI = dyn_cast_or_null<BitCastInst>(Next)) {
4184 Check(BI->getOperand(0) == RetVal,
4185 "bitcast following musttail call must use the call", BI);
4186 RetVal = BI;
4187 Next = BI->getNextNode();
4188 }
4189
4190 // Check the return.
4191 ReturnInst *Ret = dyn_cast_or_null<ReturnInst>(Next);
4192 Check(Ret, "musttail call must precede a ret with an optional bitcast", &CI);
4193 Check(!Ret->getReturnValue() || Ret->getReturnValue() == RetVal ||
4195 "musttail call result must be returned", Ret);
4196
4197 AttributeList CallerAttrs = F->getAttributes();
4198 AttributeList CalleeAttrs = CI.getAttributes();
4199 if (CI.getCallingConv() == CallingConv::SwiftTail ||
4200 CI.getCallingConv() == CallingConv::Tail) {
4201 StringRef CCName =
4202 CI.getCallingConv() == CallingConv::Tail ? "tailcc" : "swifttailcc";
4203
4204 // - Only sret, byval, swiftself, and swiftasync ABI-impacting attributes
4205 // are allowed in swifttailcc call
4206 for (unsigned I = 0, E = CallerTy->getNumParams(); I != E; ++I) {
4207 AttrBuilder ABIAttrs = getParameterABIAttributes(F->getContext(), I, CallerAttrs);
4208 SmallString<32> Context{CCName, StringRef(" musttail caller")};
4209 verifyTailCCMustTailAttrs(ABIAttrs, Context);
4210 }
4211 for (unsigned I = 0, E = CalleeTy->getNumParams(); I != E; ++I) {
4212 AttrBuilder ABIAttrs = getParameterABIAttributes(F->getContext(), I, CalleeAttrs);
4213 SmallString<32> Context{CCName, StringRef(" musttail callee")};
4214 verifyTailCCMustTailAttrs(ABIAttrs, Context);
4215 }
4216 // - Varargs functions are not allowed
4217 Check(!CallerTy->isVarArg(), Twine("cannot guarantee ") + CCName +
4218 " tail call for varargs function");
4219 return;
4220 }
4221
4222 // - The caller and callee prototypes must match. Pointer types of
4223 // parameters or return types may differ in pointee type, but not
4224 // address space.
4225 if (!CI.getIntrinsicID()) {
4226 Check(CallerTy->getNumParams() == CalleeTy->getNumParams(),
4227 "cannot guarantee tail call due to mismatched parameter counts", &CI);
4228 for (unsigned I = 0, E = CallerTy->getNumParams(); I != E; ++I) {
4229 Check(
4230 isTypeCongruent(CallerTy->getParamType(I), CalleeTy->getParamType(I)),
4231 "cannot guarantee tail call due to mismatched parameter types", &CI);
4232 }
4233 }
4234
4235 // - All ABI-impacting function attributes, such as sret, byval, inreg,
4236 // returned, preallocated, and inalloca, must match.
4237 for (unsigned I = 0, E = CallerTy->getNumParams(); I != E; ++I) {
4238 AttrBuilder CallerABIAttrs = getParameterABIAttributes(F->getContext(), I, CallerAttrs);
4239 AttrBuilder CalleeABIAttrs = getParameterABIAttributes(F->getContext(), I, CalleeAttrs);
4240 Check(CallerABIAttrs == CalleeABIAttrs,
4241 "cannot guarantee tail call due to mismatched ABI impacting "
4242 "function attributes",
4243 &CI, CI.getOperand(I));
4244 }
4245}
4246
4247void Verifier::visitCallInst(CallInst &CI) {
4248 visitCallBase(CI);
4249
4250 if (CI.isMustTailCall())
4251 verifyMustTailCall(CI);
4252}
4253
4254void Verifier::visitInvokeInst(InvokeInst &II) {
4255 visitCallBase(II);
4256
4257 // Verify that the first non-PHI instruction of the unwind destination is an
4258 // exception handling instruction.
4259 Check(
4260 II.getUnwindDest()->isEHPad(),
4261 "The unwind destination does not have an exception handling instruction!",
4262 &II);
4263
4264 visitTerminator(II);
4265}
4266
4267/// visitUnaryOperator - Check the argument to the unary operator.
4268///
4269void Verifier::visitUnaryOperator(UnaryOperator &U) {
4270 Check(U.getType() == U.getOperand(0)->getType(),
4271 "Unary operators must have same type for"
4272 "operands and result!",
4273 &U);
4274
4275 switch (U.getOpcode()) {
4276 // Check that floating-point arithmetic operators are only used with
4277 // floating-point operands.
4278 case Instruction::FNeg:
4279 Check(U.getType()->isFPOrFPVectorTy(),
4280 "FNeg operator only works with float types!", &U);
4281 break;
4282 default:
4283 llvm_unreachable("Unknown UnaryOperator opcode!");
4284 }
4285
4286 visitInstruction(U);
4287}
4288
4289/// visitBinaryOperator - Check that both arguments to the binary operator are
4290/// of the same type!
4291///
4292void Verifier::visitBinaryOperator(BinaryOperator &B) {
4293 Check(B.getOperand(0)->getType() == B.getOperand(1)->getType(),
4294 "Both operands to a binary operator are not of the same type!", &B);
4295
4296 switch (B.getOpcode()) {
4297 // Check that integer arithmetic operators are only used with
4298 // integral operands.
4299 case Instruction::Add:
4300 case Instruction::Sub:
4301 case Instruction::Mul:
4302 case Instruction::SDiv:
4303 case Instruction::UDiv:
4304 case Instruction::SRem:
4305 case Instruction::URem:
4306 Check(B.getType()->isIntOrIntVectorTy(),
4307 "Integer arithmetic operators only work with integral types!", &B);
4308 Check(B.getType() == B.getOperand(0)->getType(),
4309 "Integer arithmetic operators must have same type "
4310 "for operands and result!",
4311 &B);
4312 break;
4313 // Check that floating-point arithmetic operators are only used with
4314 // floating-point operands.
4315 case Instruction::FAdd:
4316 case Instruction::FSub:
4317 case Instruction::FMul:
4318 case Instruction::FDiv:
4319 case Instruction::FRem:
4320 Check(B.getType()->isFPOrFPVectorTy(),
4321 "Floating-point arithmetic operators only work with "
4322 "floating-point types!",
4323 &B);
4324 Check(B.getType() == B.getOperand(0)->getType(),
4325 "Floating-point arithmetic operators must have same type "
4326 "for operands and result!",
4327 &B);
4328 break;
4329 // Check that logical operators are only used with integral operands.
4330 case Instruction::And:
4331 case Instruction::Or:
4332 case Instruction::Xor:
4333 Check(B.getType()->isIntOrIntVectorTy(),
4334 "Logical operators only work with integral types!", &B);
4335 Check(B.getType() == B.getOperand(0)->getType(),
4336 "Logical operators must have same type for operands and result!", &B);
4337 break;
4338 case Instruction::Shl:
4339 case Instruction::LShr:
4340 case Instruction::AShr:
4341 Check(B.getType()->isIntOrIntVectorTy(),
4342 "Shifts only work with integral types!", &B);
4343 Check(B.getType() == B.getOperand(0)->getType(),
4344 "Shift return type must be same as operands!", &B);
4345 break;
4346 default:
4347 llvm_unreachable("Unknown BinaryOperator opcode!");
4348 }
4349
4350 visitInstruction(B);
4351}
4352
4353void Verifier::visitICmpInst(ICmpInst &IC) {
4354 // Check that the operands are the same type
4355 Type *Op0Ty = IC.getOperand(0)->getType();
4356 Type *Op1Ty = IC.getOperand(1)->getType();
4357 Check(Op0Ty == Op1Ty,
4358 "Both operands to ICmp instruction are not of the same type!", &IC);
4359 // Check that the operands are the right type
4360 Check(Op0Ty->isIntOrIntVectorTy() || Op0Ty->isPtrOrPtrVectorTy(),
4361 "Invalid operand types for ICmp instruction", &IC);
4362 // Check that the predicate is valid.
4363 Check(IC.isIntPredicate(), "Invalid predicate in ICmp instruction!", &IC);
4364
4365 visitInstruction(IC);
4366}
4367
4368void Verifier::visitFCmpInst(FCmpInst &FC) {
4369 // Check that the operands are the same type
4370 Type *Op0Ty = FC.getOperand(0)->getType();
4371 Type *Op1Ty = FC.getOperand(1)->getType();
4372 Check(Op0Ty == Op1Ty,
4373 "Both operands to FCmp instruction are not of the same type!", &FC);
4374 // Check that the operands are the right type
4375 Check(Op0Ty->isFPOrFPVectorTy(), "Invalid operand types for FCmp instruction",
4376 &FC);
4377 // Check that the predicate is valid.
4378 Check(FC.isFPPredicate(), "Invalid predicate in FCmp instruction!", &FC);
4379
4380 visitInstruction(FC);
4381}
4382
4383void Verifier::visitExtractElementInst(ExtractElementInst &EI) {
4385 "Invalid extractelement operands!", &EI);
4386 visitInstruction(EI);
4387}
4388
4389void Verifier::visitInsertElementInst(InsertElementInst &IE) {
4390 Check(InsertElementInst::isValidOperands(IE.getOperand(0), IE.getOperand(1),
4391 IE.getOperand(2)),
4392 "Invalid insertelement operands!", &IE);
4393 visitInstruction(IE);
4394}
4395
4396void Verifier::visitShuffleVectorInst(ShuffleVectorInst &SV) {
4398 SV.getShuffleMask()),
4399 "Invalid shufflevector operands!", &SV);
4400 visitInstruction(SV);
4401}
4402
4403void Verifier::visitGetElementPtrInst(GetElementPtrInst &GEP) {
4404 Type *TargetTy = GEP.getPointerOperandType()->getScalarType();
4405
4406 Check(isa<PointerType>(TargetTy),
4407 "GEP base pointer is not a vector or a vector of pointers", &GEP);
4408 Check(GEP.getSourceElementType()->isSized(), "GEP into unsized type!", &GEP);
4409
4410 if (auto *STy = dyn_cast<StructType>(GEP.getSourceElementType())) {
4411 Check(!STy->isScalableTy(),
4412 "getelementptr cannot target structure that contains scalable vector"
4413 "type",
4414 &GEP);
4415 }
4416
4417 SmallVector<Value *, 16> Idxs(GEP.indices());
4418 Check(
4419 all_of(Idxs, [](Value *V) { return V->getType()->isIntOrIntVectorTy(); }),
4420 "GEP indexes must be integers", &GEP);
4421 Type *ElTy =
4422 GetElementPtrInst::getIndexedType(GEP.getSourceElementType(), Idxs);
4423 Check(ElTy, "Invalid indices for GEP pointer type!", &GEP);
4424
4425 PointerType *PtrTy = dyn_cast<PointerType>(GEP.getType()->getScalarType());
4426
4427 Check(PtrTy && GEP.getResultElementType() == ElTy,
4428 "GEP is not of right type for indices!", &GEP, ElTy);
4429
4430 if (auto *GEPVTy = dyn_cast<VectorType>(GEP.getType())) {
4431 // Additional checks for vector GEPs.
4432 ElementCount GEPWidth = GEPVTy->getElementCount();
4433 if (GEP.getPointerOperandType()->isVectorTy())
4434 Check(
4435 GEPWidth ==
4436 cast<VectorType>(GEP.getPointerOperandType())->getElementCount(),
4437 "Vector GEP result width doesn't match operand's", &GEP);
4438 for (Value *Idx : Idxs) {
4439 Type *IndexTy = Idx->getType();
4440 if (auto *IndexVTy = dyn_cast<VectorType>(IndexTy)) {
4441 ElementCount IndexWidth = IndexVTy->getElementCount();
4442 Check(IndexWidth == GEPWidth, "Invalid GEP index vector width", &GEP);
4443 }
4444 Check(IndexTy->isIntOrIntVectorTy(),
4445 "All GEP indices should be of integer type");
4446 }
4447 }
4448
4449 Check(GEP.getAddressSpace() == PtrTy->getAddressSpace(),
4450 "GEP address space doesn't match type", &GEP);
4451
4452 visitInstruction(GEP);
4453}
4454
4455static bool isContiguous(const ConstantRange &A, const ConstantRange &B) {
4456 return A.getUpper() == B.getLower() || A.getLower() == B.getUpper();
4457}
4458
4459/// Verify !range and !absolute_symbol metadata. These have the same
4460/// restrictions, except !absolute_symbol allows the full set.
4461void Verifier::verifyRangeLikeMetadata(const Value &I, const MDNode *Range,
4462 Type *Ty, RangeLikeMetadataKind Kind) {
4463 unsigned NumOperands = Range->getNumOperands();
4464 Check(NumOperands % 2 == 0, "Unfinished range!", Range);
4465 unsigned NumRanges = NumOperands / 2;
4466 Check(NumRanges >= 1, "It should have at least one range!", Range);
4467
4468 ConstantRange LastRange(1, true); // Dummy initial value
4469 for (unsigned i = 0; i < NumRanges; ++i) {
4470 ConstantInt *Low =
4471 mdconst::dyn_extract<ConstantInt>(Range->getOperand(2 * i));
4472 Check(Low, "The lower limit must be an integer!", Low);
4473 ConstantInt *High =
4474 mdconst::dyn_extract<ConstantInt>(Range->getOperand(2 * i + 1));
4475 Check(High, "The upper limit must be an integer!", High);
4476
4477 Check(High->getType() == Low->getType(), "Range pair types must match!",
4478 &I);
4479
4480 if (Kind == RangeLikeMetadataKind::NoaliasAddrspace) {
4481 Check(High->getType()->isIntegerTy(32),
4482 "noalias.addrspace type must be i32!", &I);
4483 } else {
4484 Check(High->getType() == Ty->getScalarType(),
4485 "Range types must match instruction type!", &I);
4486 }
4487
4488 APInt HighV = High->getValue();
4489 APInt LowV = Low->getValue();
4490
4491 // ConstantRange asserts if the ranges are the same except for the min/max
4492 // value. Leave the cases it tolerates for the empty range error below.
4493 Check(LowV != HighV || LowV.isMaxValue() || LowV.isMinValue(),
4494 "The upper and lower limits cannot be the same value", &I);
4495
4496 ConstantRange CurRange(LowV, HighV);
4497 Check(!CurRange.isEmptySet() &&
4498 (Kind == RangeLikeMetadataKind::AbsoluteSymbol ||
4499 !CurRange.isFullSet()),
4500 "Range must not be empty!", Range);
4501 if (i != 0) {
4502 Check(CurRange.intersectWith(LastRange).isEmptySet(),
4503 "Intervals are overlapping", Range);
4504 Check(LowV.sgt(LastRange.getLower()), "Intervals are not in order",
4505 Range);
4506 Check(!isContiguous(CurRange, LastRange), "Intervals are contiguous",
4507 Range);
4508 }
4509 LastRange = ConstantRange(LowV, HighV);
4510 }
4511 if (NumRanges > 2) {
4512 APInt FirstLow =
4513 mdconst::dyn_extract<ConstantInt>(Range->getOperand(0))->getValue();
4514 APInt FirstHigh =
4515 mdconst::dyn_extract<ConstantInt>(Range->getOperand(1))->getValue();
4516 ConstantRange FirstRange(FirstLow, FirstHigh);
4517 Check(FirstRange.intersectWith(LastRange).isEmptySet(),
4518 "Intervals are overlapping", Range);
4519 Check(!isContiguous(FirstRange, LastRange), "Intervals are contiguous",
4520 Range);
4521 }
4522}
4523
4524void Verifier::visitRangeMetadata(Instruction &I, MDNode *Range, Type *Ty) {
4525 assert(Range && Range == I.getMetadata(LLVMContext::MD_range) &&
4526 "precondition violation");
4527 verifyRangeLikeMetadata(I, Range, Ty, RangeLikeMetadataKind::Range);
4528}
4529
4530void Verifier::visitNoaliasAddrspaceMetadata(Instruction &I, MDNode *Range,
4531 Type *Ty) {
4532 assert(Range && Range == I.getMetadata(LLVMContext::MD_noalias_addrspace) &&
4533 "precondition violation");
4534 verifyRangeLikeMetadata(I, Range, Ty,
4535 RangeLikeMetadataKind::NoaliasAddrspace);
4536}
4537
4538void Verifier::checkAtomicMemAccessSize(Type *Ty, const Instruction *I) {
4539 unsigned Size = DL.getTypeSizeInBits(Ty).getFixedValue();
4540 Check(Size >= 8, "atomic memory access' size must be byte-sized", Ty, I);
4541 Check(!(Size & (Size - 1)),
4542 "atomic memory access' operand must have a power-of-two size", Ty, I);
4543}
4544
4545void Verifier::visitLoadInst(LoadInst &LI) {
4547 Check(PTy, "Load operand must be a pointer.", &LI);
4548 Type *ElTy = LI.getType();
4549 if (MaybeAlign A = LI.getAlign()) {
4550 Check(A->value() <= Value::MaximumAlignment,
4551 "huge alignment values are unsupported", &LI);
4552 }
4553 Check(ElTy->isSized(), "loading unsized types is not allowed", &LI);
4554 if (LI.isAtomic()) {
4555 Check(LI.getOrdering() != AtomicOrdering::Release &&
4556 LI.getOrdering() != AtomicOrdering::AcquireRelease,
4557 "Load cannot have Release ordering", &LI);
4558 Check(ElTy->getScalarType()->isIntOrPtrTy() ||
4560 "atomic load operand must have integer, pointer, floating point, "
4561 "or vector type!",
4562 ElTy, &LI);
4563
4564 checkAtomicMemAccessSize(ElTy, &LI);
4565 } else {
4567 "Non-atomic load cannot have SynchronizationScope specified", &LI);
4568 }
4569
4570 visitInstruction(LI);
4571}
4572
4573void Verifier::visitStoreInst(StoreInst &SI) {
4574 PointerType *PTy = dyn_cast<PointerType>(SI.getOperand(1)->getType());
4575 Check(PTy, "Store operand must be a pointer.", &SI);
4576 Type *ElTy = SI.getOperand(0)->getType();
4577 if (MaybeAlign A = SI.getAlign()) {
4578 Check(A->value() <= Value::MaximumAlignment,
4579 "huge alignment values are unsupported", &SI);
4580 }
4581 Check(ElTy->isSized(), "storing unsized types is not allowed", &SI);
4582 if (SI.isAtomic()) {
4583 Check(SI.getOrdering() != AtomicOrdering::Acquire &&
4584 SI.getOrdering() != AtomicOrdering::AcquireRelease,
4585 "Store cannot have Acquire ordering", &SI);
4586 Check(ElTy->getScalarType()->isIntOrPtrTy() ||
4588 "atomic store operand must have integer, pointer, floating point, "
4589 "or vector type!",
4590 ElTy, &SI);
4591 checkAtomicMemAccessSize(ElTy, &SI);
4592 } else {
4593 Check(SI.getSyncScopeID() == SyncScope::System,
4594 "Non-atomic store cannot have SynchronizationScope specified", &SI);
4595 }
4596 visitInstruction(SI);
4597}
4598
4599/// Check that SwiftErrorVal is used as a swifterror argument in CS.
4600void Verifier::verifySwiftErrorCall(CallBase &Call,
4601 const Value *SwiftErrorVal) {
4602 for (const auto &I : llvm::enumerate(Call.args())) {
4603 if (I.value() == SwiftErrorVal) {
4604 Check(Call.paramHasAttr(I.index(), Attribute::SwiftError),
4605 "swifterror value when used in a callsite should be marked "
4606 "with swifterror attribute",
4607 SwiftErrorVal, Call);
4608 }
4609 }
4610}
4611
4612void Verifier::verifySwiftErrorValue(const Value *SwiftErrorVal) {
4613 // Check that swifterror value is only used by loads, stores, or as
4614 // a swifterror argument.
4615 for (const User *U : SwiftErrorVal->users()) {
4617 isa<InvokeInst>(U),
4618 "swifterror value can only be loaded and stored from, or "
4619 "as a swifterror argument!",
4620 SwiftErrorVal, U);
4621 // If it is used by a store, check it is the second operand.
4622 if (auto StoreI = dyn_cast<StoreInst>(U))
4623 Check(StoreI->getOperand(1) == SwiftErrorVal,
4624 "swifterror value should be the second operand when used "
4625 "by stores",
4626 SwiftErrorVal, U);
4627 if (auto *Call = dyn_cast<CallBase>(U))
4628 verifySwiftErrorCall(*const_cast<CallBase *>(Call), SwiftErrorVal);
4629 }
4630}
4631
4632void Verifier::visitAllocaInst(AllocaInst &AI) {
4633 Type *Ty = AI.getAllocatedType();
4634 SmallPtrSet<Type*, 4> Visited;
4635 Check(Ty->isSized(&Visited), "Cannot allocate unsized type", &AI);
4636 // Check if it's a target extension type that disallows being used on the
4637 // stack.
4639 "Alloca has illegal target extension type", &AI);
4641 "Alloca array size must have integer type", &AI);
4642 if (MaybeAlign A = AI.getAlign()) {
4643 Check(A->value() <= Value::MaximumAlignment,
4644 "huge alignment values are unsupported", &AI);
4645 }
4646
4647 if (AI.isSwiftError()) {
4648 Check(Ty->isPointerTy(), "swifterror alloca must have pointer type", &AI);
4650 "swifterror alloca must not be array allocation", &AI);
4651 verifySwiftErrorValue(&AI);
4652 }
4653
4654 if (TT.isAMDGPU()) {
4656 "alloca on amdgpu must be in addrspace(5)", &AI);
4657 }
4658
4659 visitInstruction(AI);
4660}
4661
4662void Verifier::visitAtomicCmpXchgInst(AtomicCmpXchgInst &CXI) {
4663 Type *ElTy = CXI.getOperand(1)->getType();
4664 Check(ElTy->isIntOrPtrTy(),
4665 "cmpxchg operand must have integer or pointer type", ElTy, &CXI);
4666 checkAtomicMemAccessSize(ElTy, &CXI);
4667 visitInstruction(CXI);
4668}
4669
4670void Verifier::visitAtomicRMWInst(AtomicRMWInst &RMWI) {
4671 Check(RMWI.getOrdering() != AtomicOrdering::Unordered,
4672 "atomicrmw instructions cannot be unordered.", &RMWI);
4673 auto Op = RMWI.getOperation();
4674 Type *ElTy = RMWI.getOperand(1)->getType();
4675 if (Op == AtomicRMWInst::Xchg) {
4676 Check(ElTy->isIntegerTy() || ElTy->isFloatingPointTy() ||
4677 ElTy->isPointerTy(),
4678 "atomicrmw " + AtomicRMWInst::getOperationName(Op) +
4679 " operand must have integer or floating point type!",
4680 &RMWI, ElTy);
4681 } else if (AtomicRMWInst::isFPOperation(Op)) {
4683 "atomicrmw " + AtomicRMWInst::getOperationName(Op) +
4684 " operand must have floating-point or fixed vector of floating-point "
4685 "type!",
4686 &RMWI, ElTy);
4687 } else {
4688 Check(ElTy->isIntegerTy(),
4689 "atomicrmw " + AtomicRMWInst::getOperationName(Op) +
4690 " operand must have integer type!",
4691 &RMWI, ElTy);
4692 }
4693 checkAtomicMemAccessSize(ElTy, &RMWI);
4695 "Invalid binary operation!", &RMWI);
4696 visitInstruction(RMWI);
4697}
4698
4699void Verifier::visitFenceInst(FenceInst &FI) {
4700 const AtomicOrdering Ordering = FI.getOrdering();
4701 Check(Ordering == AtomicOrdering::Acquire ||
4702 Ordering == AtomicOrdering::Release ||
4703 Ordering == AtomicOrdering::AcquireRelease ||
4704 Ordering == AtomicOrdering::SequentiallyConsistent,
4705 "fence instructions may only have acquire, release, acq_rel, or "
4706 "seq_cst ordering.",
4707 &FI);
4708 visitInstruction(FI);
4709}
4710
4711void Verifier::visitExtractValueInst(ExtractValueInst &EVI) {
4713 EVI.getIndices()) == EVI.getType(),
4714 "Invalid ExtractValueInst operands!", &EVI);
4715
4716 visitInstruction(EVI);
4717}
4718
4719void Verifier::visitInsertValueInst(InsertValueInst &IVI) {
4721 IVI.getIndices()) ==
4722 IVI.getOperand(1)->getType(),
4723 "Invalid InsertValueInst operands!", &IVI);
4724
4725 visitInstruction(IVI);
4726}
4727
4728static Value *getParentPad(Value *EHPad) {
4729 if (auto *FPI = dyn_cast<FuncletPadInst>(EHPad))
4730 return FPI->getParentPad();
4731
4732 return cast<CatchSwitchInst>(EHPad)->getParentPad();
4733}
4734
4735void Verifier::visitEHPadPredecessors(Instruction &I) {
4736 assert(I.isEHPad());
4737
4738 BasicBlock *BB = I.getParent();
4739 Function *F = BB->getParent();
4740
4741 Check(BB != &F->getEntryBlock(), "EH pad cannot be in entry block.", &I);
4742
4743 if (auto *LPI = dyn_cast<LandingPadInst>(&I)) {
4744 // The landingpad instruction defines its parent as a landing pad block. The
4745 // landing pad block may be branched to only by the unwind edge of an
4746 // invoke.
4747 for (BasicBlock *PredBB : predecessors(BB)) {
4748 const auto *II = dyn_cast<InvokeInst>(PredBB->getTerminator());
4749 Check(II && II->getUnwindDest() == BB && II->getNormalDest() != BB,
4750 "Block containing LandingPadInst must be jumped to "
4751 "only by the unwind edge of an invoke.",
4752 LPI);
4753 }
4754 return;
4755 }
4756 if (auto *CPI = dyn_cast<CatchPadInst>(&I)) {
4757 if (!pred_empty(BB))
4758 Check(BB->getUniquePredecessor() == CPI->getCatchSwitch()->getParent(),
4759 "Block containg CatchPadInst must be jumped to "
4760 "only by its catchswitch.",
4761 CPI);
4762 Check(BB != CPI->getCatchSwitch()->getUnwindDest(),
4763 "Catchswitch cannot unwind to one of its catchpads",
4764 CPI->getCatchSwitch(), CPI);
4765 return;
4766 }
4767
4768 // Verify that each pred has a legal terminator with a legal to/from EH
4769 // pad relationship.
4770 Instruction *ToPad = &I;
4771 Value *ToPadParent = getParentPad(ToPad);
4772 for (BasicBlock *PredBB : predecessors(BB)) {
4773 Instruction *TI = PredBB->getTerminator();
4774 Value *FromPad;
4775 if (auto *II = dyn_cast<InvokeInst>(TI)) {
4776 Check(II->getUnwindDest() == BB && II->getNormalDest() != BB,
4777 "EH pad must be jumped to via an unwind edge", ToPad, II);
4778 auto *CalledFn =
4779 dyn_cast<Function>(II->getCalledOperand()->stripPointerCasts());
4780 if (CalledFn && CalledFn->isIntrinsic() && II->doesNotThrow() &&
4781 !IntrinsicInst::mayLowerToFunctionCall(CalledFn->getIntrinsicID()))
4782 continue;
4783 if (auto Bundle = II->getOperandBundle(LLVMContext::OB_funclet))
4784 FromPad = Bundle->Inputs[0];
4785 else
4786 FromPad = ConstantTokenNone::get(II->getContext());
4787 } else if (auto *CRI = dyn_cast<CleanupReturnInst>(TI)) {
4788 FromPad = CRI->getOperand(0);
4789 Check(FromPad != ToPadParent, "A cleanupret must exit its cleanup", CRI);
4790 } else if (auto *CSI = dyn_cast<CatchSwitchInst>(TI)) {
4791 FromPad = CSI;
4792 } else {
4793 Check(false, "EH pad must be jumped to via an unwind edge", ToPad, TI);
4794 }
4795
4796 // The edge may exit from zero or more nested pads.
4797 SmallPtrSet<Value *, 8> Seen;
4798 for (;; FromPad = getParentPad(FromPad)) {
4799 Check(FromPad != ToPad,
4800 "EH pad cannot handle exceptions raised within it", FromPad, TI);
4801 if (FromPad == ToPadParent) {
4802 // This is a legal unwind edge.
4803 break;
4804 }
4805 Check(!isa<ConstantTokenNone>(FromPad),
4806 "A single unwind edge may only enter one EH pad", TI);
4807 Check(Seen.insert(FromPad).second, "EH pad jumps through a cycle of pads",
4808 FromPad);
4809
4810 // This will be diagnosed on the corresponding instruction already. We
4811 // need the extra check here to make sure getParentPad() works.
4812 Check(isa<FuncletPadInst>(FromPad) || isa<CatchSwitchInst>(FromPad),
4813 "Parent pad must be catchpad/cleanuppad/catchswitch", TI);
4814 }
4815 }
4816}
4817
4818void Verifier::visitLandingPadInst(LandingPadInst &LPI) {
4819 // The landingpad instruction is ill-formed if it doesn't have any clauses and
4820 // isn't a cleanup.
4821 Check(LPI.getNumClauses() > 0 || LPI.isCleanup(),
4822 "LandingPadInst needs at least one clause or to be a cleanup.", &LPI);
4823
4824 visitEHPadPredecessors(LPI);
4825
4826 if (!LandingPadResultTy)
4827 LandingPadResultTy = LPI.getType();
4828 else
4829 Check(LandingPadResultTy == LPI.getType(),
4830 "The landingpad instruction should have a consistent result type "
4831 "inside a function.",
4832 &LPI);
4833
4834 Function *F = LPI.getParent()->getParent();
4835 Check(F->hasPersonalityFn(),
4836 "LandingPadInst needs to be in a function with a personality.", &LPI);
4837
4838 // The landingpad instruction must be the first non-PHI instruction in the
4839 // block.
4840 Check(LPI.getParent()->getLandingPadInst() == &LPI,
4841 "LandingPadInst not the first non-PHI instruction in the block.", &LPI);
4842
4843 for (unsigned i = 0, e = LPI.getNumClauses(); i < e; ++i) {
4844 Constant *Clause = LPI.getClause(i);
4845 if (LPI.isCatch(i)) {
4846 Check(isa<PointerType>(Clause->getType()),
4847 "Catch operand does not have pointer type!", &LPI);
4848 } else {
4849 Check(LPI.isFilter(i), "Clause is neither catch nor filter!", &LPI);
4851 "Filter operand is not an array of constants!", &LPI);
4852 }
4853 }
4854
4855 visitInstruction(LPI);
4856}
4857
4858void Verifier::visitResumeInst(ResumeInst &RI) {
4860 "ResumeInst needs to be in a function with a personality.", &RI);
4861
4862 if (!LandingPadResultTy)
4863 LandingPadResultTy = RI.getValue()->getType();
4864 else
4865 Check(LandingPadResultTy == RI.getValue()->getType(),
4866 "The resume instruction should have a consistent result type "
4867 "inside a function.",
4868 &RI);
4869
4870 visitTerminator(RI);
4871}
4872
4873void Verifier::visitCatchPadInst(CatchPadInst &CPI) {
4874 BasicBlock *BB = CPI.getParent();
4875
4876 Function *F = BB->getParent();
4877 Check(F->hasPersonalityFn(),
4878 "CatchPadInst needs to be in a function with a personality.", &CPI);
4879
4881 "CatchPadInst needs to be directly nested in a CatchSwitchInst.",
4882 CPI.getParentPad());
4883
4884 // The catchpad instruction must be the first non-PHI instruction in the
4885 // block.
4886 Check(&*BB->getFirstNonPHIIt() == &CPI,
4887 "CatchPadInst not the first non-PHI instruction in the block.", &CPI);
4888
4889 visitEHPadPredecessors(CPI);
4890 visitFuncletPadInst(CPI);
4891}
4892
4893void Verifier::visitCatchReturnInst(CatchReturnInst &CatchReturn) {
4894 Check(isa<CatchPadInst>(CatchReturn.getOperand(0)),
4895 "CatchReturnInst needs to be provided a CatchPad", &CatchReturn,
4896 CatchReturn.getOperand(0));
4897
4898 visitTerminator(CatchReturn);
4899}
4900
4901void Verifier::visitCleanupPadInst(CleanupPadInst &CPI) {
4902 BasicBlock *BB = CPI.getParent();
4903
4904 Function *F = BB->getParent();
4905 Check(F->hasPersonalityFn(),
4906 "CleanupPadInst needs to be in a function with a personality.", &CPI);
4907
4908 // The cleanuppad instruction must be the first non-PHI instruction in the
4909 // block.
4910 Check(&*BB->getFirstNonPHIIt() == &CPI,
4911 "CleanupPadInst not the first non-PHI instruction in the block.", &CPI);
4912
4913 auto *ParentPad = CPI.getParentPad();
4914 Check(isa<ConstantTokenNone>(ParentPad) || isa<FuncletPadInst>(ParentPad),
4915 "CleanupPadInst has an invalid parent.", &CPI);
4916
4917 visitEHPadPredecessors(CPI);
4918 visitFuncletPadInst(CPI);
4919}
4920
4921void Verifier::visitFuncletPadInst(FuncletPadInst &FPI) {
4922 User *FirstUser = nullptr;
4923 Value *FirstUnwindPad = nullptr;
4924 SmallVector<FuncletPadInst *, 8> Worklist({&FPI});
4925 SmallPtrSet<FuncletPadInst *, 8> Seen;
4926
4927 while (!Worklist.empty()) {
4928 FuncletPadInst *CurrentPad = Worklist.pop_back_val();
4929 Check(Seen.insert(CurrentPad).second,
4930 "FuncletPadInst must not be nested within itself", CurrentPad);
4931 Value *UnresolvedAncestorPad = nullptr;
4932 for (User *U : CurrentPad->users()) {
4933 BasicBlock *UnwindDest;
4934 if (auto *CRI = dyn_cast<CleanupReturnInst>(U)) {
4935 UnwindDest = CRI->getUnwindDest();
4936 } else if (auto *CSI = dyn_cast<CatchSwitchInst>(U)) {
4937 // We allow catchswitch unwind to caller to nest
4938 // within an outer pad that unwinds somewhere else,
4939 // because catchswitch doesn't have a nounwind variant.
4940 // See e.g. SimplifyCFGOpt::SimplifyUnreachable.
4941 if (CSI->unwindsToCaller())
4942 continue;
4943 UnwindDest = CSI->getUnwindDest();
4944 } else if (auto *II = dyn_cast<InvokeInst>(U)) {
4945 UnwindDest = II->getUnwindDest();
4946 } else if (isa<CallInst>(U)) {
4947 // Calls which don't unwind may be found inside funclet
4948 // pads that unwind somewhere else. We don't *require*
4949 // such calls to be annotated nounwind.
4950 continue;
4951 } else if (auto *CPI = dyn_cast<CleanupPadInst>(U)) {
4952 // The unwind dest for a cleanup can only be found by
4953 // recursive search. Add it to the worklist, and we'll
4954 // search for its first use that determines where it unwinds.
4955 Worklist.push_back(CPI);
4956 continue;
4957 } else {
4958 Check(isa<CatchReturnInst>(U), "Bogus funclet pad use", U);
4959 continue;
4960 }
4961
4962 Value *UnwindPad;
4963 bool ExitsFPI;
4964 if (UnwindDest) {
4965 UnwindPad = &*UnwindDest->getFirstNonPHIIt();
4966 if (!cast<Instruction>(UnwindPad)->isEHPad())
4967 continue;
4968 Value *UnwindParent = getParentPad(UnwindPad);
4969 // Ignore unwind edges that don't exit CurrentPad.
4970 if (UnwindParent == CurrentPad)
4971 continue;
4972 // Determine whether the original funclet pad is exited,
4973 // and if we are scanning nested pads determine how many
4974 // of them are exited so we can stop searching their
4975 // children.
4976 Value *ExitedPad = CurrentPad;
4977 ExitsFPI = false;
4978 do {
4979 if (ExitedPad == &FPI) {
4980 ExitsFPI = true;
4981 // Now we can resolve any ancestors of CurrentPad up to
4982 // FPI, but not including FPI since we need to make sure
4983 // to check all direct users of FPI for consistency.
4984 UnresolvedAncestorPad = &FPI;
4985 break;
4986 }
4987 Value *ExitedParent = getParentPad(ExitedPad);
4988 if (ExitedParent == UnwindParent) {
4989 // ExitedPad is the ancestor-most pad which this unwind
4990 // edge exits, so we can resolve up to it, meaning that
4991 // ExitedParent is the first ancestor still unresolved.
4992 UnresolvedAncestorPad = ExitedParent;
4993 break;
4994 }
4995 ExitedPad = ExitedParent;
4996 } while (!isa<ConstantTokenNone>(ExitedPad));
4997 } else {
4998 // Unwinding to caller exits all pads.
4999 UnwindPad = ConstantTokenNone::get(FPI.getContext());
5000 ExitsFPI = true;
5001 UnresolvedAncestorPad = &FPI;
5002 }
5003
5004 if (ExitsFPI) {
5005 // This unwind edge exits FPI. Make sure it agrees with other
5006 // such edges.
5007 if (FirstUser) {
5008 Check(UnwindPad == FirstUnwindPad,
5009 "Unwind edges out of a funclet "
5010 "pad must have the same unwind "
5011 "dest",
5012 &FPI, U, FirstUser);
5013 } else {
5014 FirstUser = U;
5015 FirstUnwindPad = UnwindPad;
5016 // Record cleanup sibling unwinds for verifySiblingFuncletUnwinds
5017 if (isa<CleanupPadInst>(&FPI) && !isa<ConstantTokenNone>(UnwindPad) &&
5018 getParentPad(UnwindPad) == getParentPad(&FPI))
5019 SiblingFuncletInfo[&FPI] = cast<Instruction>(U);
5020 }
5021 }
5022 // Make sure we visit all uses of FPI, but for nested pads stop as
5023 // soon as we know where they unwind to.
5024 if (CurrentPad != &FPI)
5025 break;
5026 }
5027 if (UnresolvedAncestorPad) {
5028 if (CurrentPad == UnresolvedAncestorPad) {
5029 // When CurrentPad is FPI itself, we don't mark it as resolved even if
5030 // we've found an unwind edge that exits it, because we need to verify
5031 // all direct uses of FPI.
5032 assert(CurrentPad == &FPI);
5033 continue;
5034 }
5035 // Pop off the worklist any nested pads that we've found an unwind
5036 // destination for. The pads on the worklist are the uncles,
5037 // great-uncles, etc. of CurrentPad. We've found an unwind destination
5038 // for all ancestors of CurrentPad up to but not including
5039 // UnresolvedAncestorPad.
5040 Value *ResolvedPad = CurrentPad;
5041 while (!Worklist.empty()) {
5042 Value *UnclePad = Worklist.back();
5043 Value *AncestorPad = getParentPad(UnclePad);
5044 // Walk ResolvedPad up the ancestor list until we either find the
5045 // uncle's parent or the last resolved ancestor.
5046 while (ResolvedPad != AncestorPad) {
5047 Value *ResolvedParent = getParentPad(ResolvedPad);
5048 if (ResolvedParent == UnresolvedAncestorPad) {
5049 break;
5050 }
5051 ResolvedPad = ResolvedParent;
5052 }
5053 // If the resolved ancestor search didn't find the uncle's parent,
5054 // then the uncle is not yet resolved.
5055 if (ResolvedPad != AncestorPad)
5056 break;
5057 // This uncle is resolved, so pop it from the worklist.
5058 Worklist.pop_back();
5059 }
5060 }
5061 }
5062
5063 if (FirstUnwindPad) {
5064 if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(FPI.getParentPad())) {
5065 BasicBlock *SwitchUnwindDest = CatchSwitch->getUnwindDest();
5066 Value *SwitchUnwindPad;
5067 if (SwitchUnwindDest)
5068 SwitchUnwindPad = &*SwitchUnwindDest->getFirstNonPHIIt();
5069 else
5070 SwitchUnwindPad = ConstantTokenNone::get(FPI.getContext());
5071 Check(SwitchUnwindPad == FirstUnwindPad,
5072 "Unwind edges out of a catch must have the same unwind dest as "
5073 "the parent catchswitch",
5074 &FPI, FirstUser, CatchSwitch);
5075 }
5076 }
5077
5078 visitInstruction(FPI);
5079}
5080
5081void Verifier::visitCatchSwitchInst(CatchSwitchInst &CatchSwitch) {
5082 BasicBlock *BB = CatchSwitch.getParent();
5083
5084 Function *F = BB->getParent();
5085 Check(F->hasPersonalityFn(),
5086 "CatchSwitchInst needs to be in a function with a personality.",
5087 &CatchSwitch);
5088
5089 // The catchswitch instruction must be the first non-PHI instruction in the
5090 // block.
5091 Check(&*BB->getFirstNonPHIIt() == &CatchSwitch,
5092 "CatchSwitchInst not the first non-PHI instruction in the block.",
5093 &CatchSwitch);
5094
5095 auto *ParentPad = CatchSwitch.getParentPad();
5096 Check(isa<ConstantTokenNone>(ParentPad) || isa<FuncletPadInst>(ParentPad),
5097 "CatchSwitchInst has an invalid parent.", ParentPad);
5098
5099 if (BasicBlock *UnwindDest = CatchSwitch.getUnwindDest()) {
5100 BasicBlock::iterator I = UnwindDest->getFirstNonPHIIt();
5101 Check(I->isEHPad() && !isa<LandingPadInst>(I),
5102 "CatchSwitchInst must unwind to an EH block which is not a "
5103 "landingpad.",
5104 &CatchSwitch);
5105
5106 // Record catchswitch sibling unwinds for verifySiblingFuncletUnwinds
5107 if (getParentPad(&*I) == ParentPad)
5108 SiblingFuncletInfo[&CatchSwitch] = &CatchSwitch;
5109 }
5110
5111 Check(CatchSwitch.getNumHandlers() != 0,
5112 "CatchSwitchInst cannot have empty handler list", &CatchSwitch);
5113
5114 for (BasicBlock *Handler : CatchSwitch.handlers()) {
5115 Check(isa<CatchPadInst>(Handler->getFirstNonPHIIt()),
5116 "CatchSwitchInst handlers must be catchpads", &CatchSwitch, Handler);
5117 }
5118
5119 visitEHPadPredecessors(CatchSwitch);
5120 visitTerminator(CatchSwitch);
5121}
5122
5123void Verifier::visitCleanupReturnInst(CleanupReturnInst &CRI) {
5125 "CleanupReturnInst needs to be provided a CleanupPad", &CRI,
5126 CRI.getOperand(0));
5127
5128 if (BasicBlock *UnwindDest = CRI.getUnwindDest()) {
5129 BasicBlock::iterator I = UnwindDest->getFirstNonPHIIt();
5130 Check(I->isEHPad() && !isa<LandingPadInst>(I),
5131 "CleanupReturnInst must unwind to an EH block which is not a "
5132 "landingpad.",
5133 &CRI);
5134 }
5135
5136 visitTerminator(CRI);
5137}
5138
5139void Verifier::verifyDominatesUse(Instruction &I, unsigned i) {
5140 Instruction *Op = cast<Instruction>(I.getOperand(i));
5141 // If the we have an invalid invoke, don't try to compute the dominance.
5142 // We already reject it in the invoke specific checks and the dominance
5143 // computation doesn't handle multiple edges.
5144 if (InvokeInst *II = dyn_cast<InvokeInst>(Op)) {
5145 if (II->getNormalDest() == II->getUnwindDest())
5146 return;
5147 }
5148
5149 // Quick check whether the def has already been encountered in the same block.
5150 // PHI nodes are not checked to prevent accepting preceding PHIs, because PHI
5151 // uses are defined to happen on the incoming edge, not at the instruction.
5152 //
5153 // FIXME: If this operand is a MetadataAsValue (wrapping a LocalAsMetadata)
5154 // wrapping an SSA value, assert that we've already encountered it. See
5155 // related FIXME in Mapper::mapLocalAsMetadata in ValueMapper.cpp.
5156 if (!isa<PHINode>(I) && InstsInThisBlock.count(Op))
5157 return;
5158
5159 const Use &U = I.getOperandUse(i);
5160 Check(DT.dominates(Op, U), "Instruction does not dominate all uses!", Op, &I);
5161}
5162
5163void Verifier::visitDereferenceableMetadata(Instruction& I, MDNode* MD) {
5164 Check(I.getType()->isPointerTy(),
5165 "dereferenceable, dereferenceable_or_null "
5166 "apply only to pointer types",
5167 &I);
5169 "dereferenceable, dereferenceable_or_null apply only to load"
5170 " and inttoptr instructions, use attributes for calls or invokes",
5171 &I);
5172 Check(MD->getNumOperands() == 1,
5173 "dereferenceable, dereferenceable_or_null "
5174 "take one operand!",
5175 &I);
5176 ConstantInt *CI = mdconst::dyn_extract<ConstantInt>(MD->getOperand(0));
5177 Check(CI && CI->getType()->isIntegerTy(64),
5178 "dereferenceable, "
5179 "dereferenceable_or_null metadata value must be an i64!",
5180 &I);
5181}
5182
5183void Verifier::visitNofreeMetadata(Instruction &I, MDNode *MD) {
5184 Check(I.getType()->isPointerTy(), "nofree applies only to pointer types", &I);
5185 Check((isa<IntToPtrInst>(I)), "nofree applies only to inttoptr instruction",
5186 &I);
5187 Check(MD->getNumOperands() == 0, "nofree metadata must be empty", &I);
5188}
5189
5190void Verifier::visitProfMetadata(Instruction &I, MDNode *MD) {
5191 auto GetBranchingTerminatorNumOperands = [&]() {
5192 unsigned ExpectedNumOperands = 0;
5193 if (BranchInst *BI = dyn_cast<BranchInst>(&I))
5194 ExpectedNumOperands = BI->getNumSuccessors();
5195 else if (SwitchInst *SI = dyn_cast<SwitchInst>(&I))
5196 ExpectedNumOperands = SI->getNumSuccessors();
5197 else if (isa<CallInst>(&I))
5198 ExpectedNumOperands = 1;
5199 else if (IndirectBrInst *IBI = dyn_cast<IndirectBrInst>(&I))
5200 ExpectedNumOperands = IBI->getNumDestinations();
5201 else if (isa<SelectInst>(&I))
5202 ExpectedNumOperands = 2;
5203 else if (CallBrInst *CI = dyn_cast<CallBrInst>(&I))
5204 ExpectedNumOperands = CI->getNumSuccessors();
5205 return ExpectedNumOperands;
5206 };
5207 Check(MD->getNumOperands() >= 1,
5208 "!prof annotations should have at least 1 operand", MD);
5209 // Check first operand.
5210 Check(MD->getOperand(0) != nullptr, "first operand should not be null", MD);
5212 "expected string with name of the !prof annotation", MD);
5213 MDString *MDS = cast<MDString>(MD->getOperand(0));
5214 StringRef ProfName = MDS->getString();
5215
5217 Check(GetBranchingTerminatorNumOperands() != 0 || isa<InvokeInst>(I),
5218 "'unknown' !prof should only appear on instructions on which "
5219 "'branch_weights' would",
5220 MD);
5221 verifyUnknownProfileMetadata(MD);
5222 return;
5223 }
5224
5225 Check(MD->getNumOperands() >= 2,
5226 "!prof annotations should have no less than 2 operands", MD);
5227
5228 // Check consistency of !prof branch_weights metadata.
5229 if (ProfName == MDProfLabels::BranchWeights) {
5230 unsigned NumBranchWeights = getNumBranchWeights(*MD);
5231 if (isa<InvokeInst>(&I)) {
5232 Check(NumBranchWeights == 1 || NumBranchWeights == 2,
5233 "Wrong number of InvokeInst branch_weights operands", MD);
5234 } else {
5235 const unsigned ExpectedNumOperands = GetBranchingTerminatorNumOperands();
5236 if (ExpectedNumOperands == 0)
5237 CheckFailed("!prof branch_weights are not allowed for this instruction",
5238 MD);
5239
5240 Check(NumBranchWeights == ExpectedNumOperands, "Wrong number of operands",
5241 MD);
5242 }
5243 for (unsigned i = getBranchWeightOffset(MD); i < MD->getNumOperands();
5244 ++i) {
5245 auto &MDO = MD->getOperand(i);
5246 Check(MDO, "second operand should not be null", MD);
5248 "!prof brunch_weights operand is not a const int");
5249 }
5250 } else if (ProfName == MDProfLabels::ValueProfile) {
5251 Check(isValueProfileMD(MD), "invalid value profiling metadata", MD);
5252 ConstantInt *KindInt = mdconst::dyn_extract<ConstantInt>(MD->getOperand(1));
5253 Check(KindInt, "VP !prof missing kind argument", MD);
5254
5255 auto Kind = KindInt->getZExtValue();
5256 Check(Kind >= InstrProfValueKind::IPVK_First &&
5257 Kind <= InstrProfValueKind::IPVK_Last,
5258 "Invalid VP !prof kind", MD);
5259 Check(MD->getNumOperands() % 2 == 1,
5260 "VP !prof should have an even number "
5261 "of arguments after 'VP'",
5262 MD);
5263 if (Kind == InstrProfValueKind::IPVK_IndirectCallTarget ||
5264 Kind == InstrProfValueKind::IPVK_MemOPSize)
5266 "VP !prof indirect call or memop size expected to be applied to "
5267 "CallBase instructions only",
5268 MD);
5269 } else {
5270 CheckFailed("expected either branch_weights or VP profile name", MD);
5271 }
5272}
5273
5274void Verifier::visitDIAssignIDMetadata(Instruction &I, MDNode *MD) {
5275 assert(I.hasMetadata(LLVMContext::MD_DIAssignID));
5276 // DIAssignID metadata must be attached to either an alloca or some form of
5277 // store/memory-writing instruction.
5278 // FIXME: We allow all intrinsic insts here to avoid trying to enumerate all
5279 // possible store intrinsics.
5280 bool ExpectedInstTy =
5282 CheckDI(ExpectedInstTy, "!DIAssignID attached to unexpected instruction kind",
5283 I, MD);
5284 // Iterate over the MetadataAsValue uses of the DIAssignID - these should
5285 // only be found as DbgAssignIntrinsic operands.
5286 if (auto *AsValue = MetadataAsValue::getIfExists(Context, MD)) {
5287 for (auto *User : AsValue->users()) {
5289 "!DIAssignID should only be used by llvm.dbg.assign intrinsics",
5290 MD, User);
5291 // All of the dbg.assign intrinsics should be in the same function as I.
5292 if (auto *DAI = dyn_cast<DbgAssignIntrinsic>(User))
5293 CheckDI(DAI->getFunction() == I.getFunction(),
5294 "dbg.assign not in same function as inst", DAI, &I);
5295 }
5296 }
5297 for (DbgVariableRecord *DVR :
5298 cast<DIAssignID>(MD)->getAllDbgVariableRecordUsers()) {
5299 CheckDI(DVR->isDbgAssign(),
5300 "!DIAssignID should only be used by Assign DVRs.", MD, DVR);
5301 CheckDI(DVR->getFunction() == I.getFunction(),
5302 "DVRAssign not in same function as inst", DVR, &I);
5303 }
5304}
5305
5306void Verifier::visitMMRAMetadata(Instruction &I, MDNode *MD) {
5308 "!mmra metadata attached to unexpected instruction kind", I, MD);
5309
5310 // MMRA Metadata should either be a tag, e.g. !{!"foo", !"bar"}, or a
5311 // list of tags such as !2 in the following example:
5312 // !0 = !{!"a", !"b"}
5313 // !1 = !{!"c", !"d"}
5314 // !2 = !{!0, !1}
5315 if (MMRAMetadata::isTagMD(MD))
5316 return;
5317
5318 Check(isa<MDTuple>(MD), "!mmra expected to be a metadata tuple", I, MD);
5319 for (const MDOperand &MDOp : MD->operands())
5320 Check(MMRAMetadata::isTagMD(MDOp.get()),
5321 "!mmra metadata tuple operand is not an MMRA tag", I, MDOp.get());
5322}
5323
5324void Verifier::visitCallStackMetadata(MDNode *MD) {
5325 // Call stack metadata should consist of a list of at least 1 constant int
5326 // (representing a hash of the location).
5327 Check(MD->getNumOperands() >= 1,
5328 "call stack metadata should have at least 1 operand", MD);
5329
5330 for (const auto &Op : MD->operands())
5332 "call stack metadata operand should be constant integer", Op);
5333}
5334
5335void Verifier::visitMemProfMetadata(Instruction &I, MDNode *MD) {
5336 Check(isa<CallBase>(I), "!memprof metadata should only exist on calls", &I);
5337 Check(MD->getNumOperands() >= 1,
5338 "!memprof annotations should have at least 1 metadata operand "
5339 "(MemInfoBlock)",
5340 MD);
5341
5342 // Check each MIB
5343 for (auto &MIBOp : MD->operands()) {
5344 MDNode *MIB = dyn_cast<MDNode>(MIBOp);
5345 // The first operand of an MIB should be the call stack metadata.
5346 // There rest of the operands should be MDString tags, and there should be
5347 // at least one.
5348 Check(MIB->getNumOperands() >= 2,
5349 "Each !memprof MemInfoBlock should have at least 2 operands", MIB);
5350
5351 // Check call stack metadata (first operand).
5352 Check(MIB->getOperand(0) != nullptr,
5353 "!memprof MemInfoBlock first operand should not be null", MIB);
5354 Check(isa<MDNode>(MIB->getOperand(0)),
5355 "!memprof MemInfoBlock first operand should be an MDNode", MIB);
5356 MDNode *StackMD = dyn_cast<MDNode>(MIB->getOperand(0));
5357 visitCallStackMetadata(StackMD);
5358
5359 // The second MIB operand should be MDString.
5361 "!memprof MemInfoBlock second operand should be an MDString", MIB);
5362
5363 // Any remaining should be MDNode that are pairs of integers
5364 for (unsigned I = 2; I < MIB->getNumOperands(); ++I) {
5365 MDNode *OpNode = dyn_cast<MDNode>(MIB->getOperand(I));
5366 Check(OpNode, "Not all !memprof MemInfoBlock operands 2 to N are MDNode",
5367 MIB);
5368 Check(OpNode->getNumOperands() == 2,
5369 "Not all !memprof MemInfoBlock operands 2 to N are MDNode with 2 "
5370 "operands",
5371 MIB);
5372 // Check that all of Op's operands are ConstantInt.
5373 Check(llvm::all_of(OpNode->operands(),
5374 [](const MDOperand &Op) {
5375 return mdconst::hasa<ConstantInt>(Op);
5376 }),
5377 "Not all !memprof MemInfoBlock operands 2 to N are MDNode with "
5378 "ConstantInt operands",
5379 MIB);
5380 }
5381 }
5382}
5383
5384void Verifier::visitCallsiteMetadata(Instruction &I, MDNode *MD) {
5385 Check(isa<CallBase>(I), "!callsite metadata should only exist on calls", &I);
5386 // Verify the partial callstack annotated from memprof profiles. This callsite
5387 // is a part of a profiled allocation callstack.
5388 visitCallStackMetadata(MD);
5389}
5390
5391static inline bool isConstantIntMetadataOperand(const Metadata *MD) {
5392 if (auto *VAL = dyn_cast<ValueAsMetadata>(MD))
5393 return isa<ConstantInt>(VAL->getValue());
5394 return false;
5395}
5396
5397void Verifier::visitCalleeTypeMetadata(Instruction &I, MDNode *MD) {
5398 Check(isa<CallBase>(I), "!callee_type metadata should only exist on calls",
5399 &I);
5400 for (Metadata *Op : MD->operands()) {
5402 "The callee_type metadata must be a list of type metadata nodes", Op);
5403 auto *TypeMD = cast<MDNode>(Op);
5404 Check(TypeMD->getNumOperands() == 2,
5405 "Well-formed generalized type metadata must contain exactly two "
5406 "operands",
5407 Op);
5408 Check(isConstantIntMetadataOperand(TypeMD->getOperand(0)) &&
5409 mdconst::extract<ConstantInt>(TypeMD->getOperand(0))->isZero(),
5410 "The first operand of type metadata for functions must be zero", Op);
5411 Check(TypeMD->hasGeneralizedMDString(),
5412 "Only generalized type metadata can be part of the callee_type "
5413 "metadata list",
5414 Op);
5415 }
5416}
5417
5418void Verifier::visitAnnotationMetadata(MDNode *Annotation) {
5419 Check(isa<MDTuple>(Annotation), "annotation must be a tuple");
5420 Check(Annotation->getNumOperands() >= 1,
5421 "annotation must have at least one operand");
5422 for (const MDOperand &Op : Annotation->operands()) {
5423 bool TupleOfStrings =
5424 isa<MDTuple>(Op.get()) &&
5425 all_of(cast<MDTuple>(Op)->operands(), [](auto &Annotation) {
5426 return isa<MDString>(Annotation.get());
5427 });
5428 Check(isa<MDString>(Op.get()) || TupleOfStrings,
5429 "operands must be a string or a tuple of strings");
5430 }
5431}
5432
5433void Verifier::visitAliasScopeMetadata(const MDNode *MD) {
5434 unsigned NumOps = MD->getNumOperands();
5435 Check(NumOps >= 2 && NumOps <= 3, "scope must have two or three operands",
5436 MD);
5437 Check(MD->getOperand(0).get() == MD || isa<MDString>(MD->getOperand(0)),
5438 "first scope operand must be self-referential or string", MD);
5439 if (NumOps == 3)
5441 "third scope operand must be string (if used)", MD);
5442
5443 MDNode *Domain = dyn_cast<MDNode>(MD->getOperand(1));
5444 Check(Domain != nullptr, "second scope operand must be MDNode", MD);
5445
5446 unsigned NumDomainOps = Domain->getNumOperands();
5447 Check(NumDomainOps >= 1 && NumDomainOps <= 2,
5448 "domain must have one or two operands", Domain);
5449 Check(Domain->getOperand(0).get() == Domain ||
5450 isa<MDString>(Domain->getOperand(0)),
5451 "first domain operand must be self-referential or string", Domain);
5452 if (NumDomainOps == 2)
5453 Check(isa<MDString>(Domain->getOperand(1)),
5454 "second domain operand must be string (if used)", Domain);
5455}
5456
5457void Verifier::visitAliasScopeListMetadata(const MDNode *MD) {
5458 for (const MDOperand &Op : MD->operands()) {
5459 const MDNode *OpMD = dyn_cast<MDNode>(Op);
5460 Check(OpMD != nullptr, "scope list must consist of MDNodes", MD);
5461 visitAliasScopeMetadata(OpMD);
5462 }
5463}
5464
5465void Verifier::visitAccessGroupMetadata(const MDNode *MD) {
5466 auto IsValidAccessScope = [](const MDNode *MD) {
5467 return MD->getNumOperands() == 0 && MD->isDistinct();
5468 };
5469
5470 // It must be either an access scope itself...
5471 if (IsValidAccessScope(MD))
5472 return;
5473
5474 // ...or a list of access scopes.
5475 for (const MDOperand &Op : MD->operands()) {
5476 const MDNode *OpMD = dyn_cast<MDNode>(Op);
5477 Check(OpMD != nullptr, "Access scope list must consist of MDNodes", MD);
5478 Check(IsValidAccessScope(OpMD),
5479 "Access scope list contains invalid access scope", MD);
5480 }
5481}
5482
5483void Verifier::visitCapturesMetadata(Instruction &I, const MDNode *Captures) {
5484 static const char *ValidArgs[] = {"address_is_null", "address",
5485 "read_provenance", "provenance"};
5486
5487 auto *SI = dyn_cast<StoreInst>(&I);
5488 Check(SI, "!captures metadata can only be applied to store instructions", &I);
5489 Check(SI->getValueOperand()->getType()->isPointerTy(),
5490 "!captures metadata can only be applied to store with value operand of "
5491 "pointer type",
5492 &I);
5493 Check(Captures->getNumOperands() != 0, "!captures metadata cannot be empty",
5494 &I);
5495
5496 for (Metadata *Op : Captures->operands()) {
5497 auto *Str = dyn_cast<MDString>(Op);
5498 Check(Str, "!captures metadata must be a list of strings", &I);
5499 Check(is_contained(ValidArgs, Str->getString()),
5500 "invalid entry in !captures metadata", &I, Str);
5501 }
5502}
5503
5504void Verifier::visitAllocTokenMetadata(Instruction &I, MDNode *MD) {
5505 Check(isa<CallBase>(I), "!alloc_token should only exist on calls", &I);
5506 Check(MD->getNumOperands() == 2, "!alloc_token must have 2 operands", MD);
5507 Check(isa<MDString>(MD->getOperand(0)), "expected string", MD);
5509 "expected integer constant", MD);
5510}
5511
5512/// verifyInstruction - Verify that an instruction is well formed.
5513///
5514void Verifier::visitInstruction(Instruction &I) {
5515 BasicBlock *BB = I.getParent();
5516 Check(BB, "Instruction not embedded in basic block!", &I);
5517
5518 if (!isa<PHINode>(I)) { // Check that non-phi nodes are not self referential
5519 for (User *U : I.users()) {
5520 Check(U != (User *)&I || !DT.isReachableFromEntry(BB),
5521 "Only PHI nodes may reference their own value!", &I);
5522 }
5523 }
5524
5525 // Check that void typed values don't have names
5526 Check(!I.getType()->isVoidTy() || !I.hasName(),
5527 "Instruction has a name, but provides a void value!", &I);
5528
5529 // Check that the return value of the instruction is either void or a legal
5530 // value type.
5531 Check(I.getType()->isVoidTy() || I.getType()->isFirstClassType(),
5532 "Instruction returns a non-scalar type!", &I);
5533
5534 // Check that the instruction doesn't produce metadata. Calls are already
5535 // checked against the callee type.
5536 Check(!I.getType()->isMetadataTy() || isa<CallInst>(I) || isa<InvokeInst>(I),
5537 "Invalid use of metadata!", &I);
5538
5539 // Check that all uses of the instruction, if they are instructions
5540 // themselves, actually have parent basic blocks. If the use is not an
5541 // instruction, it is an error!
5542 for (Use &U : I.uses()) {
5543 if (Instruction *Used = dyn_cast<Instruction>(U.getUser()))
5544 Check(Used->getParent() != nullptr,
5545 "Instruction referencing"
5546 " instruction not embedded in a basic block!",
5547 &I, Used);
5548 else {
5549 CheckFailed("Use of instruction is not an instruction!", U);
5550 return;
5551 }
5552 }
5553
5554 // Get a pointer to the call base of the instruction if it is some form of
5555 // call.
5556 const CallBase *CBI = dyn_cast<CallBase>(&I);
5557
5558 for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i) {
5559 Check(I.getOperand(i) != nullptr, "Instruction has null operand!", &I);
5560
5561 // Check to make sure that only first-class-values are operands to
5562 // instructions.
5563 if (!I.getOperand(i)->getType()->isFirstClassType()) {
5564 Check(false, "Instruction operands must be first-class values!", &I);
5565 }
5566
5567 if (Function *F = dyn_cast<Function>(I.getOperand(i))) {
5568 // This code checks whether the function is used as the operand of a
5569 // clang_arc_attachedcall operand bundle.
5570 auto IsAttachedCallOperand = [](Function *F, const CallBase *CBI,
5571 int Idx) {
5572 return CBI && CBI->isOperandBundleOfType(
5574 };
5575
5576 // Check to make sure that the "address of" an intrinsic function is never
5577 // taken. Ignore cases where the address of the intrinsic function is used
5578 // as the argument of operand bundle "clang.arc.attachedcall" as those
5579 // cases are handled in verifyAttachedCallBundle.
5580 Check((!F->isIntrinsic() ||
5581 (CBI && &CBI->getCalledOperandUse() == &I.getOperandUse(i)) ||
5582 IsAttachedCallOperand(F, CBI, i)),
5583 "Cannot take the address of an intrinsic!", &I);
5584 Check(!F->isIntrinsic() || isa<CallInst>(I) || isa<CallBrInst>(I) ||
5585 F->getIntrinsicID() == Intrinsic::donothing ||
5586 F->getIntrinsicID() == Intrinsic::seh_try_begin ||
5587 F->getIntrinsicID() == Intrinsic::seh_try_end ||
5588 F->getIntrinsicID() == Intrinsic::seh_scope_begin ||
5589 F->getIntrinsicID() == Intrinsic::seh_scope_end ||
5590 F->getIntrinsicID() == Intrinsic::coro_resume ||
5591 F->getIntrinsicID() == Intrinsic::coro_destroy ||
5592 F->getIntrinsicID() == Intrinsic::coro_await_suspend_void ||
5593 F->getIntrinsicID() == Intrinsic::coro_await_suspend_bool ||
5594 F->getIntrinsicID() == Intrinsic::coro_await_suspend_handle ||
5595 F->getIntrinsicID() ==
5596 Intrinsic::experimental_patchpoint_void ||
5597 F->getIntrinsicID() == Intrinsic::experimental_patchpoint ||
5598 F->getIntrinsicID() == Intrinsic::fake_use ||
5599 F->getIntrinsicID() == Intrinsic::experimental_gc_statepoint ||
5600 F->getIntrinsicID() == Intrinsic::wasm_throw ||
5601 F->getIntrinsicID() == Intrinsic::wasm_rethrow ||
5602 IsAttachedCallOperand(F, CBI, i),
5603 "Cannot invoke an intrinsic other than donothing, patchpoint, "
5604 "statepoint, coro_resume, coro_destroy, clang.arc.attachedcall or "
5605 "wasm.(re)throw",
5606 &I);
5607 Check(F->getParent() == &M, "Referencing function in another module!", &I,
5608 &M, F, F->getParent());
5609 } else if (BasicBlock *OpBB = dyn_cast<BasicBlock>(I.getOperand(i))) {
5610 Check(OpBB->getParent() == BB->getParent(),
5611 "Referring to a basic block in another function!", &I);
5612 } else if (Argument *OpArg = dyn_cast<Argument>(I.getOperand(i))) {
5613 Check(OpArg->getParent() == BB->getParent(),
5614 "Referring to an argument in another function!", &I);
5615 } else if (GlobalValue *GV = dyn_cast<GlobalValue>(I.getOperand(i))) {
5616 Check(GV->getParent() == &M, "Referencing global in another module!", &I,
5617 &M, GV, GV->getParent());
5618 } else if (Instruction *OpInst = dyn_cast<Instruction>(I.getOperand(i))) {
5619 Check(OpInst->getFunction() == BB->getParent(),
5620 "Referring to an instruction in another function!", &I);
5621 verifyDominatesUse(I, i);
5622 } else if (isa<InlineAsm>(I.getOperand(i))) {
5623 Check(CBI && &CBI->getCalledOperandUse() == &I.getOperandUse(i),
5624 "Cannot take the address of an inline asm!", &I);
5625 } else if (auto *C = dyn_cast<Constant>(I.getOperand(i))) {
5626 visitConstantExprsRecursively(C);
5627 }
5628 }
5629
5630 if (MDNode *MD = I.getMetadata(LLVMContext::MD_fpmath)) {
5631 Check(I.getType()->isFPOrFPVectorTy(),
5632 "fpmath requires a floating point result!", &I);
5633 Check(MD->getNumOperands() == 1, "fpmath takes one operand!", &I);
5634 if (ConstantFP *CFP0 =
5636 const APFloat &Accuracy = CFP0->getValueAPF();
5637 Check(&Accuracy.getSemantics() == &APFloat::IEEEsingle(),
5638 "fpmath accuracy must have float type", &I);
5639 Check(Accuracy.isFiniteNonZero() && !Accuracy.isNegative(),
5640 "fpmath accuracy not a positive number!", &I);
5641 } else {
5642 Check(false, "invalid fpmath accuracy!", &I);
5643 }
5644 }
5645
5646 if (MDNode *Range = I.getMetadata(LLVMContext::MD_range)) {
5648 "Ranges are only for loads, calls and invokes!", &I);
5649 visitRangeMetadata(I, Range, I.getType());
5650 }
5651
5652 if (MDNode *Range = I.getMetadata(LLVMContext::MD_noalias_addrspace)) {
5655 "noalias.addrspace are only for memory operations!", &I);
5656 visitNoaliasAddrspaceMetadata(I, Range, I.getType());
5657 }
5658
5659 if (I.hasMetadata(LLVMContext::MD_invariant_group)) {
5661 "invariant.group metadata is only for loads and stores", &I);
5662 }
5663
5664 if (MDNode *MD = I.getMetadata(LLVMContext::MD_nonnull)) {
5665 Check(I.getType()->isPointerTy(), "nonnull applies only to pointer types",
5666 &I);
5668 "nonnull applies only to load instructions, use attributes"
5669 " for calls or invokes",
5670 &I);
5671 Check(MD->getNumOperands() == 0, "nonnull metadata must be empty", &I);
5672 }
5673
5674 if (MDNode *MD = I.getMetadata(LLVMContext::MD_dereferenceable))
5675 visitDereferenceableMetadata(I, MD);
5676
5677 if (MDNode *MD = I.getMetadata(LLVMContext::MD_dereferenceable_or_null))
5678 visitDereferenceableMetadata(I, MD);
5679
5680 if (MDNode *MD = I.getMetadata(LLVMContext::MD_nofree))
5681 visitNofreeMetadata(I, MD);
5682
5683 if (MDNode *TBAA = I.getMetadata(LLVMContext::MD_tbaa))
5684 TBAAVerifyHelper.visitTBAAMetadata(&I, TBAA);
5685
5686 if (MDNode *MD = I.getMetadata(LLVMContext::MD_noalias))
5687 visitAliasScopeListMetadata(MD);
5688 if (MDNode *MD = I.getMetadata(LLVMContext::MD_alias_scope))
5689 visitAliasScopeListMetadata(MD);
5690
5691 if (MDNode *MD = I.getMetadata(LLVMContext::MD_access_group))
5692 visitAccessGroupMetadata(MD);
5693
5694 if (MDNode *AlignMD = I.getMetadata(LLVMContext::MD_align)) {
5695 Check(I.getType()->isPointerTy(), "align applies only to pointer types",
5696 &I);
5698 "align applies only to load instructions, "
5699 "use attributes for calls or invokes",
5700 &I);
5701 Check(AlignMD->getNumOperands() == 1, "align takes one operand!", &I);
5702 ConstantInt *CI = mdconst::dyn_extract<ConstantInt>(AlignMD->getOperand(0));
5703 Check(CI && CI->getType()->isIntegerTy(64),
5704 "align metadata value must be an i64!", &I);
5705 uint64_t Align = CI->getZExtValue();
5706 Check(isPowerOf2_64(Align), "align metadata value must be a power of 2!",
5707 &I);
5708 Check(Align <= Value::MaximumAlignment,
5709 "alignment is larger that implementation defined limit", &I);
5710 }
5711
5712 if (MDNode *MD = I.getMetadata(LLVMContext::MD_prof))
5713 visitProfMetadata(I, MD);
5714
5715 if (MDNode *MD = I.getMetadata(LLVMContext::MD_memprof))
5716 visitMemProfMetadata(I, MD);
5717
5718 if (MDNode *MD = I.getMetadata(LLVMContext::MD_callsite))
5719 visitCallsiteMetadata(I, MD);
5720
5721 if (MDNode *MD = I.getMetadata(LLVMContext::MD_callee_type))
5722 visitCalleeTypeMetadata(I, MD);
5723
5724 if (MDNode *MD = I.getMetadata(LLVMContext::MD_DIAssignID))
5725 visitDIAssignIDMetadata(I, MD);
5726
5727 if (MDNode *MMRA = I.getMetadata(LLVMContext::MD_mmra))
5728 visitMMRAMetadata(I, MMRA);
5729
5730 if (MDNode *Annotation = I.getMetadata(LLVMContext::MD_annotation))
5731 visitAnnotationMetadata(Annotation);
5732
5733 if (MDNode *Captures = I.getMetadata(LLVMContext::MD_captures))
5734 visitCapturesMetadata(I, Captures);
5735
5736 if (MDNode *MD = I.getMetadata(LLVMContext::MD_alloc_token))
5737 visitAllocTokenMetadata(I, MD);
5738
5739 if (MDNode *N = I.getDebugLoc().getAsMDNode()) {
5740 CheckDI(isa<DILocation>(N), "invalid !dbg metadata attachment", &I, N);
5741 visitMDNode(*N, AreDebugLocsAllowed::Yes);
5742
5743 if (auto *DL = dyn_cast<DILocation>(N)) {
5744 if (DL->getAtomGroup()) {
5745 CheckDI(DL->getScope()->getSubprogram()->getKeyInstructionsEnabled(),
5746 "DbgLoc uses atomGroup but DISubprogram doesn't have Key "
5747 "Instructions enabled",
5748 DL, DL->getScope()->getSubprogram());
5749 }
5750 }
5751 }
5752
5754 I.getAllMetadata(MDs);
5755 for (auto Attachment : MDs) {
5756 unsigned Kind = Attachment.first;
5757 auto AllowLocs =
5758 (Kind == LLVMContext::MD_dbg || Kind == LLVMContext::MD_loop)
5759 ? AreDebugLocsAllowed::Yes
5760 : AreDebugLocsAllowed::No;
5761 visitMDNode(*Attachment.second, AllowLocs);
5762 }
5763
5764 InstsInThisBlock.insert(&I);
5765}
5766
5767/// Allow intrinsics to be verified in different ways.
5768void Verifier::visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call) {
5770 Check(IF->isDeclaration(), "Intrinsic functions should never be defined!",
5771 IF);
5772
5773 // Verify that the intrinsic prototype lines up with what the .td files
5774 // describe.
5775 FunctionType *IFTy = IF->getFunctionType();
5776 bool IsVarArg = IFTy->isVarArg();
5777
5781
5782 // Walk the descriptors to extract overloaded types.
5787 "Intrinsic has incorrect return type!", IF);
5789 "Intrinsic has incorrect argument type!", IF);
5790
5791 // Verify if the intrinsic call matches the vararg property.
5792 if (IsVarArg)
5794 "Intrinsic was not defined with variable arguments!", IF);
5795 else
5797 "Callsite was not defined with variable arguments!", IF);
5798
5799 // All descriptors should be absorbed by now.
5800 Check(TableRef.empty(), "Intrinsic has too few arguments!", IF);
5801
5802 // Now that we have the intrinsic ID and the actual argument types (and we
5803 // know they are legal for the intrinsic!) get the intrinsic name through the
5804 // usual means. This allows us to verify the mangling of argument types into
5805 // the name.
5806 const std::string ExpectedName =
5807 Intrinsic::getName(ID, ArgTys, IF->getParent(), IFTy);
5808 Check(ExpectedName == IF->getName(),
5809 "Intrinsic name not mangled correctly for type arguments! "
5810 "Should be: " +
5811 ExpectedName,
5812 IF);
5813
5814 // If the intrinsic takes MDNode arguments, verify that they are either global
5815 // or are local to *this* function.
5816 for (Value *V : Call.args()) {
5817 if (auto *MD = dyn_cast<MetadataAsValue>(V))
5818 visitMetadataAsValue(*MD, Call.getCaller());
5819 if (auto *Const = dyn_cast<Constant>(V))
5820 Check(!Const->getType()->isX86_AMXTy(),
5821 "const x86_amx is not allowed in argument!");
5822 }
5823
5824 switch (ID) {
5825 default:
5826 break;
5827 case Intrinsic::assume: {
5828 if (Call.hasOperandBundles()) {
5830 Check(Cond && Cond->isOne(),
5831 "assume with operand bundles must have i1 true condition", Call);
5832 }
5833 for (auto &Elem : Call.bundle_op_infos()) {
5834 unsigned ArgCount = Elem.End - Elem.Begin;
5835 // Separate storage assumptions are special insofar as they're the only
5836 // operand bundles allowed on assumes that aren't parameter attributes.
5837 if (Elem.Tag->getKey() == "separate_storage") {
5838 Check(ArgCount == 2,
5839 "separate_storage assumptions should have 2 arguments", Call);
5840 Check(Call.getOperand(Elem.Begin)->getType()->isPointerTy() &&
5841 Call.getOperand(Elem.Begin + 1)->getType()->isPointerTy(),
5842 "arguments to separate_storage assumptions should be pointers",
5843 Call);
5844 continue;
5845 }
5846 Check(Elem.Tag->getKey() == "ignore" ||
5847 Attribute::isExistingAttribute(Elem.Tag->getKey()),
5848 "tags must be valid attribute names", Call);
5849 Attribute::AttrKind Kind =
5850 Attribute::getAttrKindFromName(Elem.Tag->getKey());
5851 if (Kind == Attribute::Alignment) {
5852 Check(ArgCount <= 3 && ArgCount >= 2,
5853 "alignment assumptions should have 2 or 3 arguments", Call);
5854 Check(Call.getOperand(Elem.Begin)->getType()->isPointerTy(),
5855 "first argument should be a pointer", Call);
5856 Check(Call.getOperand(Elem.Begin + 1)->getType()->isIntegerTy(),
5857 "second argument should be an integer", Call);
5858 if (ArgCount == 3)
5859 Check(Call.getOperand(Elem.Begin + 2)->getType()->isIntegerTy(),
5860 "third argument should be an integer if present", Call);
5861 continue;
5862 }
5863 if (Kind == Attribute::Dereferenceable) {
5864 Check(ArgCount == 2,
5865 "dereferenceable assumptions should have 2 arguments", Call);
5866 Check(Call.getOperand(Elem.Begin)->getType()->isPointerTy(),
5867 "first argument should be a pointer", Call);
5868 Check(Call.getOperand(Elem.Begin + 1)->getType()->isIntegerTy(),
5869 "second argument should be an integer", Call);
5870 continue;
5871 }
5872 Check(ArgCount <= 2, "too many arguments", Call);
5873 if (Kind == Attribute::None)
5874 break;
5875 if (Attribute::isIntAttrKind(Kind)) {
5876 Check(ArgCount == 2, "this attribute should have 2 arguments", Call);
5877 Check(isa<ConstantInt>(Call.getOperand(Elem.Begin + 1)),
5878 "the second argument should be a constant integral value", Call);
5879 } else if (Attribute::canUseAsParamAttr(Kind)) {
5880 Check((ArgCount) == 1, "this attribute should have one argument", Call);
5881 } else if (Attribute::canUseAsFnAttr(Kind)) {
5882 Check((ArgCount) == 0, "this attribute has no argument", Call);
5883 }
5884 }
5885 break;
5886 }
5887 case Intrinsic::ucmp:
5888 case Intrinsic::scmp: {
5889 Type *SrcTy = Call.getOperand(0)->getType();
5890 Type *DestTy = Call.getType();
5891
5892 Check(DestTy->getScalarSizeInBits() >= 2,
5893 "result type must be at least 2 bits wide", Call);
5894
5895 bool IsDestTypeVector = DestTy->isVectorTy();
5896 Check(SrcTy->isVectorTy() == IsDestTypeVector,
5897 "ucmp/scmp argument and result types must both be either vector or "
5898 "scalar types",
5899 Call);
5900 if (IsDestTypeVector) {
5901 auto SrcVecLen = cast<VectorType>(SrcTy)->getElementCount();
5902 auto DestVecLen = cast<VectorType>(DestTy)->getElementCount();
5903 Check(SrcVecLen == DestVecLen,
5904 "return type and arguments must have the same number of "
5905 "elements",
5906 Call);
5907 }
5908 break;
5909 }
5910 case Intrinsic::coro_id: {
5911 auto *InfoArg = Call.getArgOperand(3)->stripPointerCasts();
5912 if (isa<ConstantPointerNull>(InfoArg))
5913 break;
5914 auto *GV = dyn_cast<GlobalVariable>(InfoArg);
5915 Check(GV && GV->isConstant() && GV->hasDefinitiveInitializer(),
5916 "info argument of llvm.coro.id must refer to an initialized "
5917 "constant");
5918 Constant *Init = GV->getInitializer();
5920 "info argument of llvm.coro.id must refer to either a struct or "
5921 "an array");
5922 break;
5923 }
5924 case Intrinsic::is_fpclass: {
5925 const ConstantInt *TestMask = cast<ConstantInt>(Call.getOperand(1));
5926 Check((TestMask->getZExtValue() & ~static_cast<unsigned>(fcAllFlags)) == 0,
5927 "unsupported bits for llvm.is.fpclass test mask");
5928 break;
5929 }
5930 case Intrinsic::fptrunc_round: {
5931 // Check the rounding mode
5932 Metadata *MD = nullptr;
5934 if (MAV)
5935 MD = MAV->getMetadata();
5936
5937 Check(MD != nullptr, "missing rounding mode argument", Call);
5938
5939 Check(isa<MDString>(MD),
5940 ("invalid value for llvm.fptrunc.round metadata operand"
5941 " (the operand should be a string)"),
5942 MD);
5943
5944 std::optional<RoundingMode> RoundMode =
5945 convertStrToRoundingMode(cast<MDString>(MD)->getString());
5946 Check(RoundMode && *RoundMode != RoundingMode::Dynamic,
5947 "unsupported rounding mode argument", Call);
5948 break;
5949 }
5950#define BEGIN_REGISTER_VP_INTRINSIC(VPID, ...) case Intrinsic::VPID:
5951#include "llvm/IR/VPIntrinsics.def"
5952#undef BEGIN_REGISTER_VP_INTRINSIC
5953 visitVPIntrinsic(cast<VPIntrinsic>(Call));
5954 break;
5955#define INSTRUCTION(NAME, NARGS, ROUND_MODE, INTRINSIC) \
5956 case Intrinsic::INTRINSIC:
5957#include "llvm/IR/ConstrainedOps.def"
5958#undef INSTRUCTION
5959 visitConstrainedFPIntrinsic(cast<ConstrainedFPIntrinsic>(Call));
5960 break;
5961 case Intrinsic::dbg_declare: // llvm.dbg.declare
5962 case Intrinsic::dbg_value: // llvm.dbg.value
5963 case Intrinsic::dbg_assign: // llvm.dbg.assign
5964 case Intrinsic::dbg_label: // llvm.dbg.label
5965 // We no longer interpret debug intrinsics (the old variable-location
5966 // design). They're meaningless as far as LLVM is concerned we could make
5967 // it an error for them to appear, but it's possible we'll have users
5968 // converting back to intrinsics for the forseeable future (such as DXIL),
5969 // so tolerate their existance.
5970 break;
5971 case Intrinsic::memcpy:
5972 case Intrinsic::memcpy_inline:
5973 case Intrinsic::memmove:
5974 case Intrinsic::memset:
5975 case Intrinsic::memset_inline:
5976 break;
5977 case Intrinsic::experimental_memset_pattern: {
5978 const auto Memset = cast<MemSetPatternInst>(&Call);
5979 Check(Memset->getValue()->getType()->isSized(),
5980 "unsized types cannot be used as memset patterns", Call);
5981 break;
5982 }
5983 case Intrinsic::memcpy_element_unordered_atomic:
5984 case Intrinsic::memmove_element_unordered_atomic:
5985 case Intrinsic::memset_element_unordered_atomic: {
5986 const auto *AMI = cast<AnyMemIntrinsic>(&Call);
5987
5988 ConstantInt *ElementSizeCI =
5989 cast<ConstantInt>(AMI->getRawElementSizeInBytes());
5990 const APInt &ElementSizeVal = ElementSizeCI->getValue();
5991 Check(ElementSizeVal.isPowerOf2(),
5992 "element size of the element-wise atomic memory intrinsic "
5993 "must be a power of 2",
5994 Call);
5995
5996 auto IsValidAlignment = [&](MaybeAlign Alignment) {
5997 return Alignment && ElementSizeVal.ule(Alignment->value());
5998 };
5999 Check(IsValidAlignment(AMI->getDestAlign()),
6000 "incorrect alignment of the destination argument", Call);
6001 if (const auto *AMT = dyn_cast<AnyMemTransferInst>(AMI)) {
6002 Check(IsValidAlignment(AMT->getSourceAlign()),
6003 "incorrect alignment of the source argument", Call);
6004 }
6005 break;
6006 }
6007 case Intrinsic::call_preallocated_setup: {
6008 auto *NumArgs = cast<ConstantInt>(Call.getArgOperand(0));
6009 bool FoundCall = false;
6010 for (User *U : Call.users()) {
6011 auto *UseCall = dyn_cast<CallBase>(U);
6012 Check(UseCall != nullptr,
6013 "Uses of llvm.call.preallocated.setup must be calls");
6014 Intrinsic::ID IID = UseCall->getIntrinsicID();
6015 if (IID == Intrinsic::call_preallocated_arg) {
6016 auto *AllocArgIndex = dyn_cast<ConstantInt>(UseCall->getArgOperand(1));
6017 Check(AllocArgIndex != nullptr,
6018 "llvm.call.preallocated.alloc arg index must be a constant");
6019 auto AllocArgIndexInt = AllocArgIndex->getValue();
6020 Check(AllocArgIndexInt.sge(0) &&
6021 AllocArgIndexInt.slt(NumArgs->getValue()),
6022 "llvm.call.preallocated.alloc arg index must be between 0 and "
6023 "corresponding "
6024 "llvm.call.preallocated.setup's argument count");
6025 } else if (IID == Intrinsic::call_preallocated_teardown) {
6026 // nothing to do
6027 } else {
6028 Check(!FoundCall, "Can have at most one call corresponding to a "
6029 "llvm.call.preallocated.setup");
6030 FoundCall = true;
6031 size_t NumPreallocatedArgs = 0;
6032 for (unsigned i = 0; i < UseCall->arg_size(); i++) {
6033 if (UseCall->paramHasAttr(i, Attribute::Preallocated)) {
6034 ++NumPreallocatedArgs;
6035 }
6036 }
6037 Check(NumPreallocatedArgs != 0,
6038 "cannot use preallocated intrinsics on a call without "
6039 "preallocated arguments");
6040 Check(NumArgs->equalsInt(NumPreallocatedArgs),
6041 "llvm.call.preallocated.setup arg size must be equal to number "
6042 "of preallocated arguments "
6043 "at call site",
6044 Call, *UseCall);
6045 // getOperandBundle() cannot be called if more than one of the operand
6046 // bundle exists. There is already a check elsewhere for this, so skip
6047 // here if we see more than one.
6048 if (UseCall->countOperandBundlesOfType(LLVMContext::OB_preallocated) >
6049 1) {
6050 return;
6051 }
6052 auto PreallocatedBundle =
6053 UseCall->getOperandBundle(LLVMContext::OB_preallocated);
6054 Check(PreallocatedBundle,
6055 "Use of llvm.call.preallocated.setup outside intrinsics "
6056 "must be in \"preallocated\" operand bundle");
6057 Check(PreallocatedBundle->Inputs.front().get() == &Call,
6058 "preallocated bundle must have token from corresponding "
6059 "llvm.call.preallocated.setup");
6060 }
6061 }
6062 break;
6063 }
6064 case Intrinsic::call_preallocated_arg: {
6065 auto *Token = dyn_cast<CallBase>(Call.getArgOperand(0));
6066 Check(Token &&
6067 Token->getIntrinsicID() == Intrinsic::call_preallocated_setup,
6068 "llvm.call.preallocated.arg token argument must be a "
6069 "llvm.call.preallocated.setup");
6070 Check(Call.hasFnAttr(Attribute::Preallocated),
6071 "llvm.call.preallocated.arg must be called with a \"preallocated\" "
6072 "call site attribute");
6073 break;
6074 }
6075 case Intrinsic::call_preallocated_teardown: {
6076 auto *Token = dyn_cast<CallBase>(Call.getArgOperand(0));
6077 Check(Token &&
6078 Token->getIntrinsicID() == Intrinsic::call_preallocated_setup,
6079 "llvm.call.preallocated.teardown token argument must be a "
6080 "llvm.call.preallocated.setup");
6081 break;
6082 }
6083 case Intrinsic::gcroot:
6084 case Intrinsic::gcwrite:
6085 case Intrinsic::gcread:
6086 if (ID == Intrinsic::gcroot) {
6087 AllocaInst *AI =
6089 Check(AI, "llvm.gcroot parameter #1 must be an alloca.", Call);
6091 "llvm.gcroot parameter #2 must be a constant.", Call);
6092 if (!AI->getAllocatedType()->isPointerTy()) {
6094 "llvm.gcroot parameter #1 must either be a pointer alloca, "
6095 "or argument #2 must be a non-null constant.",
6096 Call);
6097 }
6098 }
6099
6100 Check(Call.getParent()->getParent()->hasGC(),
6101 "Enclosing function does not use GC.", Call);
6102 break;
6103 case Intrinsic::init_trampoline:
6105 "llvm.init_trampoline parameter #2 must resolve to a function.",
6106 Call);
6107 break;
6108 case Intrinsic::prefetch:
6109 Check(cast<ConstantInt>(Call.getArgOperand(1))->getZExtValue() < 2,
6110 "rw argument to llvm.prefetch must be 0-1", Call);
6111 Check(cast<ConstantInt>(Call.getArgOperand(2))->getZExtValue() < 4,
6112 "locality argument to llvm.prefetch must be 0-3", Call);
6113 Check(cast<ConstantInt>(Call.getArgOperand(3))->getZExtValue() < 2,
6114 "cache type argument to llvm.prefetch must be 0-1", Call);
6115 break;
6116 case Intrinsic::reloc_none: {
6118 cast<MetadataAsValue>(Call.getArgOperand(0))->getMetadata()),
6119 "llvm.reloc.none argument must be a metadata string", &Call);
6120 break;
6121 }
6122 case Intrinsic::stackprotector:
6124 "llvm.stackprotector parameter #2 must resolve to an alloca.", Call);
6125 break;
6126 case Intrinsic::localescape: {
6127 BasicBlock *BB = Call.getParent();
6128 Check(BB->isEntryBlock(), "llvm.localescape used outside of entry block",
6129 Call);
6130 Check(!SawFrameEscape, "multiple calls to llvm.localescape in one function",
6131 Call);
6132 for (Value *Arg : Call.args()) {
6133 if (isa<ConstantPointerNull>(Arg))
6134 continue; // Null values are allowed as placeholders.
6135 auto *AI = dyn_cast<AllocaInst>(Arg->stripPointerCasts());
6136 Check(AI && AI->isStaticAlloca(),
6137 "llvm.localescape only accepts static allocas", Call);
6138 }
6139 FrameEscapeInfo[BB->getParent()].first = Call.arg_size();
6140 SawFrameEscape = true;
6141 break;
6142 }
6143 case Intrinsic::localrecover: {
6145 Function *Fn = dyn_cast<Function>(FnArg);
6146 Check(Fn && !Fn->isDeclaration(),
6147 "llvm.localrecover first "
6148 "argument must be function defined in this module",
6149 Call);
6150 auto *IdxArg = cast<ConstantInt>(Call.getArgOperand(2));
6151 auto &Entry = FrameEscapeInfo[Fn];
6152 Entry.second = unsigned(
6153 std::max(uint64_t(Entry.second), IdxArg->getLimitedValue(~0U) + 1));
6154 break;
6155 }
6156
6157 case Intrinsic::experimental_gc_statepoint:
6158 if (auto *CI = dyn_cast<CallInst>(&Call))
6159 Check(!CI->isInlineAsm(),
6160 "gc.statepoint support for inline assembly unimplemented", CI);
6161 Check(Call.getParent()->getParent()->hasGC(),
6162 "Enclosing function does not use GC.", Call);
6163
6164 verifyStatepoint(Call);
6165 break;
6166 case Intrinsic::experimental_gc_result: {
6167 Check(Call.getParent()->getParent()->hasGC(),
6168 "Enclosing function does not use GC.", Call);
6169
6170 auto *Statepoint = Call.getArgOperand(0);
6171 if (isa<UndefValue>(Statepoint))
6172 break;
6173
6174 // Are we tied to a statepoint properly?
6175 const auto *StatepointCall = dyn_cast<CallBase>(Statepoint);
6176 Check(StatepointCall && StatepointCall->getIntrinsicID() ==
6177 Intrinsic::experimental_gc_statepoint,
6178 "gc.result operand #1 must be from a statepoint", Call,
6179 Call.getArgOperand(0));
6180
6181 // Check that result type matches wrapped callee.
6182 auto *TargetFuncType =
6183 cast<FunctionType>(StatepointCall->getParamElementType(2));
6184 Check(Call.getType() == TargetFuncType->getReturnType(),
6185 "gc.result result type does not match wrapped callee", Call);
6186 break;
6187 }
6188 case Intrinsic::experimental_gc_relocate: {
6189 Check(Call.arg_size() == 3, "wrong number of arguments", Call);
6190
6192 "gc.relocate must return a pointer or a vector of pointers", Call);
6193
6194 // Check that this relocate is correctly tied to the statepoint
6195
6196 // This is case for relocate on the unwinding path of an invoke statepoint
6197 if (LandingPadInst *LandingPad =
6199
6200 const BasicBlock *InvokeBB =
6201 LandingPad->getParent()->getUniquePredecessor();
6202
6203 // Landingpad relocates should have only one predecessor with invoke
6204 // statepoint terminator
6205 Check(InvokeBB, "safepoints should have unique landingpads",
6206 LandingPad->getParent());
6207 Check(InvokeBB->getTerminator(), "safepoint block should be well formed",
6208 InvokeBB);
6210 "gc relocate should be linked to a statepoint", InvokeBB);
6211 } else {
6212 // In all other cases relocate should be tied to the statepoint directly.
6213 // This covers relocates on a normal return path of invoke statepoint and
6214 // relocates of a call statepoint.
6215 auto *Token = Call.getArgOperand(0);
6217 "gc relocate is incorrectly tied to the statepoint", Call, Token);
6218 }
6219
6220 // Verify rest of the relocate arguments.
6221 const Value &StatepointCall = *cast<GCRelocateInst>(Call).getStatepoint();
6222
6223 // Both the base and derived must be piped through the safepoint.
6226 "gc.relocate operand #2 must be integer offset", Call);
6227
6228 Value *Derived = Call.getArgOperand(2);
6229 Check(isa<ConstantInt>(Derived),
6230 "gc.relocate operand #3 must be integer offset", Call);
6231
6232 const uint64_t BaseIndex = cast<ConstantInt>(Base)->getZExtValue();
6233 const uint64_t DerivedIndex = cast<ConstantInt>(Derived)->getZExtValue();
6234
6235 // Check the bounds
6236 if (isa<UndefValue>(StatepointCall))
6237 break;
6238 if (auto Opt = cast<GCStatepointInst>(StatepointCall)
6239 .getOperandBundle(LLVMContext::OB_gc_live)) {
6240 Check(BaseIndex < Opt->Inputs.size(),
6241 "gc.relocate: statepoint base index out of bounds", Call);
6242 Check(DerivedIndex < Opt->Inputs.size(),
6243 "gc.relocate: statepoint derived index out of bounds", Call);
6244 }
6245
6246 // Relocated value must be either a pointer type or vector-of-pointer type,
6247 // but gc_relocate does not need to return the same pointer type as the
6248 // relocated pointer. It can be casted to the correct type later if it's
6249 // desired. However, they must have the same address space and 'vectorness'
6250 GCRelocateInst &Relocate = cast<GCRelocateInst>(Call);
6251 auto *ResultType = Call.getType();
6252 auto *DerivedType = Relocate.getDerivedPtr()->getType();
6253 auto *BaseType = Relocate.getBasePtr()->getType();
6254
6255 Check(BaseType->isPtrOrPtrVectorTy(),
6256 "gc.relocate: relocated value must be a pointer", Call);
6257 Check(DerivedType->isPtrOrPtrVectorTy(),
6258 "gc.relocate: relocated value must be a pointer", Call);
6259
6260 Check(ResultType->isVectorTy() == DerivedType->isVectorTy(),
6261 "gc.relocate: vector relocates to vector and pointer to pointer",
6262 Call);
6263 Check(
6264 ResultType->getPointerAddressSpace() ==
6265 DerivedType->getPointerAddressSpace(),
6266 "gc.relocate: relocating a pointer shouldn't change its address space",
6267 Call);
6268
6269 auto GC = llvm::getGCStrategy(Relocate.getFunction()->getGC());
6270 Check(GC, "gc.relocate: calling function must have GCStrategy",
6271 Call.getFunction());
6272 if (GC) {
6273 auto isGCPtr = [&GC](Type *PTy) {
6274 return GC->isGCManagedPointer(PTy->getScalarType()).value_or(true);
6275 };
6276 Check(isGCPtr(ResultType), "gc.relocate: must return gc pointer", Call);
6277 Check(isGCPtr(BaseType),
6278 "gc.relocate: relocated value must be a gc pointer", Call);
6279 Check(isGCPtr(DerivedType),
6280 "gc.relocate: relocated value must be a gc pointer", Call);
6281 }
6282 break;
6283 }
6284 case Intrinsic::experimental_patchpoint: {
6285 if (Call.getCallingConv() == CallingConv::AnyReg) {
6287 "patchpoint: invalid return type used with anyregcc", Call);
6288 }
6289 break;
6290 }
6291 case Intrinsic::eh_exceptioncode:
6292 case Intrinsic::eh_exceptionpointer: {
6294 "eh.exceptionpointer argument must be a catchpad", Call);
6295 break;
6296 }
6297 case Intrinsic::get_active_lane_mask: {
6299 "get_active_lane_mask: must return a "
6300 "vector",
6301 Call);
6302 auto *ElemTy = Call.getType()->getScalarType();
6303 Check(ElemTy->isIntegerTy(1),
6304 "get_active_lane_mask: element type is not "
6305 "i1",
6306 Call);
6307 break;
6308 }
6309 case Intrinsic::experimental_get_vector_length: {
6310 ConstantInt *VF = cast<ConstantInt>(Call.getArgOperand(1));
6311 Check(!VF->isNegative() && !VF->isZero(),
6312 "get_vector_length: VF must be positive", Call);
6313 break;
6314 }
6315 case Intrinsic::masked_load: {
6316 Check(Call.getType()->isVectorTy(), "masked_load: must return a vector",
6317 Call);
6318
6320 Value *PassThru = Call.getArgOperand(2);
6321 Check(Mask->getType()->isVectorTy(), "masked_load: mask must be vector",
6322 Call);
6323 Check(PassThru->getType() == Call.getType(),
6324 "masked_load: pass through and return type must match", Call);
6325 Check(cast<VectorType>(Mask->getType())->getElementCount() ==
6326 cast<VectorType>(Call.getType())->getElementCount(),
6327 "masked_load: vector mask must be same length as return", Call);
6328 break;
6329 }
6330 case Intrinsic::masked_store: {
6331 Value *Val = Call.getArgOperand(0);
6333 Check(Mask->getType()->isVectorTy(), "masked_store: mask must be vector",
6334 Call);
6335 Check(cast<VectorType>(Mask->getType())->getElementCount() ==
6336 cast<VectorType>(Val->getType())->getElementCount(),
6337 "masked_store: vector mask must be same length as value", Call);
6338 break;
6339 }
6340
6341 case Intrinsic::experimental_guard: {
6342 Check(isa<CallInst>(Call), "experimental_guard cannot be invoked", Call);
6344 "experimental_guard must have exactly one "
6345 "\"deopt\" operand bundle");
6346 break;
6347 }
6348
6349 case Intrinsic::experimental_deoptimize: {
6350 Check(isa<CallInst>(Call), "experimental_deoptimize cannot be invoked",
6351 Call);
6353 "experimental_deoptimize must have exactly one "
6354 "\"deopt\" operand bundle");
6356 "experimental_deoptimize return type must match caller return type");
6357
6358 if (isa<CallInst>(Call)) {
6360 Check(RI,
6361 "calls to experimental_deoptimize must be followed by a return");
6362
6363 if (!Call.getType()->isVoidTy() && RI)
6364 Check(RI->getReturnValue() == &Call,
6365 "calls to experimental_deoptimize must be followed by a return "
6366 "of the value computed by experimental_deoptimize");
6367 }
6368
6369 break;
6370 }
6371 case Intrinsic::vastart: {
6373 "va_start called in a non-varargs function");
6374 break;
6375 }
6376 case Intrinsic::get_dynamic_area_offset: {
6377 auto *IntTy = dyn_cast<IntegerType>(Call.getType());
6378 Check(IntTy && DL.getPointerSizeInBits(DL.getAllocaAddrSpace()) ==
6379 IntTy->getBitWidth(),
6380 "get_dynamic_area_offset result type must be scalar integer matching "
6381 "alloca address space width",
6382 Call);
6383 break;
6384 }
6385 case Intrinsic::vector_reduce_and:
6386 case Intrinsic::vector_reduce_or:
6387 case Intrinsic::vector_reduce_xor:
6388 case Intrinsic::vector_reduce_add:
6389 case Intrinsic::vector_reduce_mul:
6390 case Intrinsic::vector_reduce_smax:
6391 case Intrinsic::vector_reduce_smin:
6392 case Intrinsic::vector_reduce_umax:
6393 case Intrinsic::vector_reduce_umin: {
6394 Type *ArgTy = Call.getArgOperand(0)->getType();
6395 Check(ArgTy->isIntOrIntVectorTy() && ArgTy->isVectorTy(),
6396 "Intrinsic has incorrect argument type!");
6397 break;
6398 }
6399 case Intrinsic::vector_reduce_fmax:
6400 case Intrinsic::vector_reduce_fmin: {
6401 Type *ArgTy = Call.getArgOperand(0)->getType();
6402 Check(ArgTy->isFPOrFPVectorTy() && ArgTy->isVectorTy(),
6403 "Intrinsic has incorrect argument type!");
6404 break;
6405 }
6406 case Intrinsic::vector_reduce_fadd:
6407 case Intrinsic::vector_reduce_fmul: {
6408 // Unlike the other reductions, the first argument is a start value. The
6409 // second argument is the vector to be reduced.
6410 Type *ArgTy = Call.getArgOperand(1)->getType();
6411 Check(ArgTy->isFPOrFPVectorTy() && ArgTy->isVectorTy(),
6412 "Intrinsic has incorrect argument type!");
6413 break;
6414 }
6415 case Intrinsic::smul_fix:
6416 case Intrinsic::smul_fix_sat:
6417 case Intrinsic::umul_fix:
6418 case Intrinsic::umul_fix_sat:
6419 case Intrinsic::sdiv_fix:
6420 case Intrinsic::sdiv_fix_sat:
6421 case Intrinsic::udiv_fix:
6422 case Intrinsic::udiv_fix_sat: {
6423 Value *Op1 = Call.getArgOperand(0);
6424 Value *Op2 = Call.getArgOperand(1);
6426 "first operand of [us][mul|div]_fix[_sat] must be an int type or "
6427 "vector of ints");
6429 "second operand of [us][mul|div]_fix[_sat] must be an int type or "
6430 "vector of ints");
6431
6432 auto *Op3 = cast<ConstantInt>(Call.getArgOperand(2));
6433 Check(Op3->getType()->isIntegerTy(),
6434 "third operand of [us][mul|div]_fix[_sat] must be an int type");
6435 Check(Op3->getBitWidth() <= 32,
6436 "third operand of [us][mul|div]_fix[_sat] must fit within 32 bits");
6437
6438 if (ID == Intrinsic::smul_fix || ID == Intrinsic::smul_fix_sat ||
6439 ID == Intrinsic::sdiv_fix || ID == Intrinsic::sdiv_fix_sat) {
6440 Check(Op3->getZExtValue() < Op1->getType()->getScalarSizeInBits(),
6441 "the scale of s[mul|div]_fix[_sat] must be less than the width of "
6442 "the operands");
6443 } else {
6444 Check(Op3->getZExtValue() <= Op1->getType()->getScalarSizeInBits(),
6445 "the scale of u[mul|div]_fix[_sat] must be less than or equal "
6446 "to the width of the operands");
6447 }
6448 break;
6449 }
6450 case Intrinsic::lrint:
6451 case Intrinsic::llrint:
6452 case Intrinsic::lround:
6453 case Intrinsic::llround: {
6454 Type *ValTy = Call.getArgOperand(0)->getType();
6455 Type *ResultTy = Call.getType();
6456 auto *VTy = dyn_cast<VectorType>(ValTy);
6457 auto *RTy = dyn_cast<VectorType>(ResultTy);
6458 Check(ValTy->isFPOrFPVectorTy() && ResultTy->isIntOrIntVectorTy(),
6459 ExpectedName + ": argument must be floating-point or vector "
6460 "of floating-points, and result must be integer or "
6461 "vector of integers",
6462 &Call);
6463 Check(ValTy->isVectorTy() == ResultTy->isVectorTy(),
6464 ExpectedName + ": argument and result disagree on vector use", &Call);
6465 if (VTy) {
6466 Check(VTy->getElementCount() == RTy->getElementCount(),
6467 ExpectedName + ": argument must be same length as result", &Call);
6468 }
6469 break;
6470 }
6471 case Intrinsic::bswap: {
6472 Type *Ty = Call.getType();
6473 unsigned Size = Ty->getScalarSizeInBits();
6474 Check(Size % 16 == 0, "bswap must be an even number of bytes", &Call);
6475 break;
6476 }
6477 case Intrinsic::invariant_start: {
6478 ConstantInt *InvariantSize = dyn_cast<ConstantInt>(Call.getArgOperand(0));
6479 Check(InvariantSize &&
6480 (!InvariantSize->isNegative() || InvariantSize->isMinusOne()),
6481 "invariant_start parameter must be -1, 0 or a positive number",
6482 &Call);
6483 break;
6484 }
6485 case Intrinsic::matrix_multiply:
6486 case Intrinsic::matrix_transpose:
6487 case Intrinsic::matrix_column_major_load:
6488 case Intrinsic::matrix_column_major_store: {
6490 ConstantInt *Stride = nullptr;
6491 ConstantInt *NumRows;
6492 ConstantInt *NumColumns;
6493 VectorType *ResultTy;
6494 Type *Op0ElemTy = nullptr;
6495 Type *Op1ElemTy = nullptr;
6496 switch (ID) {
6497 case Intrinsic::matrix_multiply: {
6498 NumRows = cast<ConstantInt>(Call.getArgOperand(2));
6499 ConstantInt *N = cast<ConstantInt>(Call.getArgOperand(3));
6500 NumColumns = cast<ConstantInt>(Call.getArgOperand(4));
6502 ->getNumElements() ==
6503 NumRows->getZExtValue() * N->getZExtValue(),
6504 "First argument of a matrix operation does not match specified "
6505 "shape!");
6507 ->getNumElements() ==
6508 N->getZExtValue() * NumColumns->getZExtValue(),
6509 "Second argument of a matrix operation does not match specified "
6510 "shape!");
6511
6512 ResultTy = cast<VectorType>(Call.getType());
6513 Op0ElemTy =
6514 cast<VectorType>(Call.getArgOperand(0)->getType())->getElementType();
6515 Op1ElemTy =
6516 cast<VectorType>(Call.getArgOperand(1)->getType())->getElementType();
6517 break;
6518 }
6519 case Intrinsic::matrix_transpose:
6520 NumRows = cast<ConstantInt>(Call.getArgOperand(1));
6521 NumColumns = cast<ConstantInt>(Call.getArgOperand(2));
6522 ResultTy = cast<VectorType>(Call.getType());
6523 Op0ElemTy =
6524 cast<VectorType>(Call.getArgOperand(0)->getType())->getElementType();
6525 break;
6526 case Intrinsic::matrix_column_major_load: {
6528 NumRows = cast<ConstantInt>(Call.getArgOperand(3));
6529 NumColumns = cast<ConstantInt>(Call.getArgOperand(4));
6530 ResultTy = cast<VectorType>(Call.getType());
6531 break;
6532 }
6533 case Intrinsic::matrix_column_major_store: {
6535 NumRows = cast<ConstantInt>(Call.getArgOperand(4));
6536 NumColumns = cast<ConstantInt>(Call.getArgOperand(5));
6537 ResultTy = cast<VectorType>(Call.getArgOperand(0)->getType());
6538 Op0ElemTy =
6539 cast<VectorType>(Call.getArgOperand(0)->getType())->getElementType();
6540 break;
6541 }
6542 default:
6543 llvm_unreachable("unexpected intrinsic");
6544 }
6545
6546 Check(ResultTy->getElementType()->isIntegerTy() ||
6547 ResultTy->getElementType()->isFloatingPointTy(),
6548 "Result type must be an integer or floating-point type!", IF);
6549
6550 if (Op0ElemTy)
6551 Check(ResultTy->getElementType() == Op0ElemTy,
6552 "Vector element type mismatch of the result and first operand "
6553 "vector!",
6554 IF);
6555
6556 if (Op1ElemTy)
6557 Check(ResultTy->getElementType() == Op1ElemTy,
6558 "Vector element type mismatch of the result and second operand "
6559 "vector!",
6560 IF);
6561
6563 NumRows->getZExtValue() * NumColumns->getZExtValue(),
6564 "Result of a matrix operation does not fit in the returned vector!");
6565
6566 if (Stride) {
6567 Check(Stride->getBitWidth() <= 64, "Stride bitwidth cannot exceed 64!",
6568 IF);
6569 Check(Stride->getZExtValue() >= NumRows->getZExtValue(),
6570 "Stride must be greater or equal than the number of rows!", IF);
6571 }
6572
6573 break;
6574 }
6575 case Intrinsic::vector_splice: {
6577 int64_t Idx = cast<ConstantInt>(Call.getArgOperand(2))->getSExtValue();
6578 int64_t KnownMinNumElements = VecTy->getElementCount().getKnownMinValue();
6579 if (VecTy->isScalableTy() && Call.getParent() &&
6580 Call.getParent()->getParent()) {
6581 AttributeList Attrs = Call.getParent()->getParent()->getAttributes();
6582 if (Attrs.hasFnAttr(Attribute::VScaleRange))
6583 KnownMinNumElements *= Attrs.getFnAttrs().getVScaleRangeMin();
6584 }
6585 Check((Idx < 0 && std::abs(Idx) <= KnownMinNumElements) ||
6586 (Idx >= 0 && Idx < KnownMinNumElements),
6587 "The splice index exceeds the range [-VL, VL-1] where VL is the "
6588 "known minimum number of elements in the vector. For scalable "
6589 "vectors the minimum number of elements is determined from "
6590 "vscale_range.",
6591 &Call);
6592 break;
6593 }
6594 case Intrinsic::stepvector: {
6596 Check(VecTy && VecTy->getScalarType()->isIntegerTy() &&
6597 VecTy->getScalarSizeInBits() >= 8,
6598 "stepvector only supported for vectors of integers "
6599 "with a bitwidth of at least 8.",
6600 &Call);
6601 break;
6602 }
6603 case Intrinsic::experimental_vector_match: {
6604 Value *Op1 = Call.getArgOperand(0);
6605 Value *Op2 = Call.getArgOperand(1);
6607
6608 VectorType *Op1Ty = dyn_cast<VectorType>(Op1->getType());
6609 VectorType *Op2Ty = dyn_cast<VectorType>(Op2->getType());
6610 VectorType *MaskTy = dyn_cast<VectorType>(Mask->getType());
6611
6612 Check(Op1Ty && Op2Ty && MaskTy, "Operands must be vectors.", &Call);
6614 "Second operand must be a fixed length vector.", &Call);
6615 Check(Op1Ty->getElementType()->isIntegerTy(),
6616 "First operand must be a vector of integers.", &Call);
6617 Check(Op1Ty->getElementType() == Op2Ty->getElementType(),
6618 "First two operands must have the same element type.", &Call);
6619 Check(Op1Ty->getElementCount() == MaskTy->getElementCount(),
6620 "First operand and mask must have the same number of elements.",
6621 &Call);
6622 Check(MaskTy->getElementType()->isIntegerTy(1),
6623 "Mask must be a vector of i1's.", &Call);
6624 Check(Call.getType() == MaskTy, "Return type must match the mask type.",
6625 &Call);
6626 break;
6627 }
6628 case Intrinsic::vector_insert: {
6629 Value *Vec = Call.getArgOperand(0);
6630 Value *SubVec = Call.getArgOperand(1);
6631 Value *Idx = Call.getArgOperand(2);
6632 unsigned IdxN = cast<ConstantInt>(Idx)->getZExtValue();
6633
6634 VectorType *VecTy = cast<VectorType>(Vec->getType());
6635 VectorType *SubVecTy = cast<VectorType>(SubVec->getType());
6636
6637 ElementCount VecEC = VecTy->getElementCount();
6638 ElementCount SubVecEC = SubVecTy->getElementCount();
6639 Check(VecTy->getElementType() == SubVecTy->getElementType(),
6640 "vector_insert parameters must have the same element "
6641 "type.",
6642 &Call);
6643 Check(IdxN % SubVecEC.getKnownMinValue() == 0,
6644 "vector_insert index must be a constant multiple of "
6645 "the subvector's known minimum vector length.");
6646
6647 // If this insertion is not the 'mixed' case where a fixed vector is
6648 // inserted into a scalable vector, ensure that the insertion of the
6649 // subvector does not overrun the parent vector.
6650 if (VecEC.isScalable() == SubVecEC.isScalable()) {
6651 Check(IdxN < VecEC.getKnownMinValue() &&
6652 IdxN + SubVecEC.getKnownMinValue() <= VecEC.getKnownMinValue(),
6653 "subvector operand of vector_insert would overrun the "
6654 "vector being inserted into.");
6655 }
6656 break;
6657 }
6658 case Intrinsic::vector_extract: {
6659 Value *Vec = Call.getArgOperand(0);
6660 Value *Idx = Call.getArgOperand(1);
6661 unsigned IdxN = cast<ConstantInt>(Idx)->getZExtValue();
6662
6663 VectorType *ResultTy = cast<VectorType>(Call.getType());
6664 VectorType *VecTy = cast<VectorType>(Vec->getType());
6665
6666 ElementCount VecEC = VecTy->getElementCount();
6667 ElementCount ResultEC = ResultTy->getElementCount();
6668
6669 Check(ResultTy->getElementType() == VecTy->getElementType(),
6670 "vector_extract result must have the same element "
6671 "type as the input vector.",
6672 &Call);
6673 Check(IdxN % ResultEC.getKnownMinValue() == 0,
6674 "vector_extract index must be a constant multiple of "
6675 "the result type's known minimum vector length.");
6676
6677 // If this extraction is not the 'mixed' case where a fixed vector is
6678 // extracted from a scalable vector, ensure that the extraction does not
6679 // overrun the parent vector.
6680 if (VecEC.isScalable() == ResultEC.isScalable()) {
6681 Check(IdxN < VecEC.getKnownMinValue() &&
6682 IdxN + ResultEC.getKnownMinValue() <= VecEC.getKnownMinValue(),
6683 "vector_extract would overrun.");
6684 }
6685 break;
6686 }
6687 case Intrinsic::vector_partial_reduce_fadd:
6688 case Intrinsic::vector_partial_reduce_add: {
6691
6692 unsigned VecWidth = VecTy->getElementCount().getKnownMinValue();
6693 unsigned AccWidth = AccTy->getElementCount().getKnownMinValue();
6694
6695 Check((VecWidth % AccWidth) == 0,
6696 "Invalid vector widths for partial "
6697 "reduction. The width of the input vector "
6698 "must be a positive integer multiple of "
6699 "the width of the accumulator vector.");
6700 break;
6701 }
6702 case Intrinsic::experimental_noalias_scope_decl: {
6703 NoAliasScopeDecls.push_back(cast<IntrinsicInst>(&Call));
6704 break;
6705 }
6706 case Intrinsic::preserve_array_access_index:
6707 case Intrinsic::preserve_struct_access_index:
6708 case Intrinsic::aarch64_ldaxr:
6709 case Intrinsic::aarch64_ldxr:
6710 case Intrinsic::arm_ldaex:
6711 case Intrinsic::arm_ldrex: {
6712 Type *ElemTy = Call.getParamElementType(0);
6713 Check(ElemTy, "Intrinsic requires elementtype attribute on first argument.",
6714 &Call);
6715 break;
6716 }
6717 case Intrinsic::aarch64_stlxr:
6718 case Intrinsic::aarch64_stxr:
6719 case Intrinsic::arm_stlex:
6720 case Intrinsic::arm_strex: {
6721 Type *ElemTy = Call.getAttributes().getParamElementType(1);
6722 Check(ElemTy,
6723 "Intrinsic requires elementtype attribute on second argument.",
6724 &Call);
6725 break;
6726 }
6727 case Intrinsic::aarch64_prefetch: {
6728 Check(cast<ConstantInt>(Call.getArgOperand(1))->getZExtValue() < 2,
6729 "write argument to llvm.aarch64.prefetch must be 0 or 1", Call);
6730 Check(cast<ConstantInt>(Call.getArgOperand(2))->getZExtValue() < 4,
6731 "target argument to llvm.aarch64.prefetch must be 0-3", Call);
6732 Check(cast<ConstantInt>(Call.getArgOperand(3))->getZExtValue() < 2,
6733 "stream argument to llvm.aarch64.prefetch must be 0 or 1", Call);
6734 Check(cast<ConstantInt>(Call.getArgOperand(4))->getZExtValue() < 2,
6735 "isdata argument to llvm.aarch64.prefetch must be 0 or 1", Call);
6736 break;
6737 }
6738 case Intrinsic::callbr_landingpad: {
6739 const auto *CBR = dyn_cast<CallBrInst>(Call.getOperand(0));
6740 Check(CBR, "intrinstic requires callbr operand", &Call);
6741 if (!CBR)
6742 break;
6743
6744 const BasicBlock *LandingPadBB = Call.getParent();
6745 const BasicBlock *PredBB = LandingPadBB->getUniquePredecessor();
6746 if (!PredBB) {
6747 CheckFailed("Intrinsic in block must have 1 unique predecessor", &Call);
6748 break;
6749 }
6750 if (!isa<CallBrInst>(PredBB->getTerminator())) {
6751 CheckFailed("Intrinsic must have corresponding callbr in predecessor",
6752 &Call);
6753 break;
6754 }
6755 Check(llvm::is_contained(CBR->getIndirectDests(), LandingPadBB),
6756 "Intrinsic's corresponding callbr must have intrinsic's parent basic "
6757 "block in indirect destination list",
6758 &Call);
6759 const Instruction &First = *LandingPadBB->begin();
6760 Check(&First == &Call, "No other instructions may proceed intrinsic",
6761 &Call);
6762 break;
6763 }
6764 case Intrinsic::amdgcn_cs_chain: {
6765 auto CallerCC = Call.getCaller()->getCallingConv();
6766 switch (CallerCC) {
6767 case CallingConv::AMDGPU_CS:
6768 case CallingConv::AMDGPU_CS_Chain:
6769 case CallingConv::AMDGPU_CS_ChainPreserve:
6770 case CallingConv::AMDGPU_ES:
6771 case CallingConv::AMDGPU_GS:
6772 case CallingConv::AMDGPU_HS:
6773 case CallingConv::AMDGPU_LS:
6774 case CallingConv::AMDGPU_VS:
6775 break;
6776 default:
6777 CheckFailed("Intrinsic cannot be called from functions with this "
6778 "calling convention",
6779 &Call);
6780 break;
6781 }
6782
6783 Check(Call.paramHasAttr(2, Attribute::InReg),
6784 "SGPR arguments must have the `inreg` attribute", &Call);
6785 Check(!Call.paramHasAttr(3, Attribute::InReg),
6786 "VGPR arguments must not have the `inreg` attribute", &Call);
6787
6788 auto *Next = Call.getNextNode();
6789 bool IsAMDUnreachable = Next && isa<IntrinsicInst>(Next) &&
6790 cast<IntrinsicInst>(Next)->getIntrinsicID() ==
6791 Intrinsic::amdgcn_unreachable;
6792 Check(Next && (isa<UnreachableInst>(Next) || IsAMDUnreachable),
6793 "llvm.amdgcn.cs.chain must be followed by unreachable", &Call);
6794 break;
6795 }
6796 case Intrinsic::amdgcn_init_exec_from_input: {
6797 const Argument *Arg = dyn_cast<Argument>(Call.getOperand(0));
6798 Check(Arg && Arg->hasInRegAttr(),
6799 "only inreg arguments to the parent function are valid as inputs to "
6800 "this intrinsic",
6801 &Call);
6802 break;
6803 }
6804 case Intrinsic::amdgcn_set_inactive_chain_arg: {
6805 auto CallerCC = Call.getCaller()->getCallingConv();
6806 switch (CallerCC) {
6807 case CallingConv::AMDGPU_CS_Chain:
6808 case CallingConv::AMDGPU_CS_ChainPreserve:
6809 break;
6810 default:
6811 CheckFailed("Intrinsic can only be used from functions with the "
6812 "amdgpu_cs_chain or amdgpu_cs_chain_preserve "
6813 "calling conventions",
6814 &Call);
6815 break;
6816 }
6817
6818 unsigned InactiveIdx = 1;
6819 Check(!Call.paramHasAttr(InactiveIdx, Attribute::InReg),
6820 "Value for inactive lanes must not have the `inreg` attribute",
6821 &Call);
6822 Check(isa<Argument>(Call.getArgOperand(InactiveIdx)),
6823 "Value for inactive lanes must be a function argument", &Call);
6824 Check(!cast<Argument>(Call.getArgOperand(InactiveIdx))->hasInRegAttr(),
6825 "Value for inactive lanes must be a VGPR function argument", &Call);
6826 break;
6827 }
6828 case Intrinsic::amdgcn_call_whole_wave: {
6830 Check(F, "Indirect whole wave calls are not allowed", &Call);
6831
6832 CallingConv::ID CC = F->getCallingConv();
6833 Check(CC == CallingConv::AMDGPU_Gfx_WholeWave,
6834 "Callee must have the amdgpu_gfx_whole_wave calling convention",
6835 &Call);
6836
6837 Check(!F->isVarArg(), "Variadic whole wave calls are not allowed", &Call);
6838
6839 Check(Call.arg_size() == F->arg_size(),
6840 "Call argument count must match callee argument count", &Call);
6841
6842 // The first argument of the call is the callee, and the first argument of
6843 // the callee is the active mask. The rest of the arguments must match.
6844 Check(F->arg_begin()->getType()->isIntegerTy(1),
6845 "Callee must have i1 as its first argument", &Call);
6846 for (auto [CallArg, FuncArg] :
6847 drop_begin(zip_equal(Call.args(), F->args()))) {
6848 Check(CallArg->getType() == FuncArg.getType(),
6849 "Argument types must match", &Call);
6850
6851 // Check that inreg attributes match between call site and function
6852 Check(Call.paramHasAttr(FuncArg.getArgNo(), Attribute::InReg) ==
6853 FuncArg.hasInRegAttr(),
6854 "Argument inreg attributes must match", &Call);
6855 }
6856 break;
6857 }
6858 case Intrinsic::amdgcn_s_prefetch_data: {
6859 Check(
6862 "llvm.amdgcn.s.prefetch.data only supports global or constant memory");
6863 break;
6864 }
6865 case Intrinsic::amdgcn_mfma_scale_f32_16x16x128_f8f6f4:
6866 case Intrinsic::amdgcn_mfma_scale_f32_32x32x64_f8f6f4: {
6867 Value *Src0 = Call.getArgOperand(0);
6868 Value *Src1 = Call.getArgOperand(1);
6869
6870 uint64_t CBSZ = cast<ConstantInt>(Call.getArgOperand(3))->getZExtValue();
6871 uint64_t BLGP = cast<ConstantInt>(Call.getArgOperand(4))->getZExtValue();
6872 Check(CBSZ <= 4, "invalid value for cbsz format", Call,
6873 Call.getArgOperand(3));
6874 Check(BLGP <= 4, "invalid value for blgp format", Call,
6875 Call.getArgOperand(4));
6876
6877 // AMDGPU::MFMAScaleFormats values
6878 auto getFormatNumRegs = [](unsigned FormatVal) {
6879 switch (FormatVal) {
6880 case 0:
6881 case 1:
6882 return 8u;
6883 case 2:
6884 case 3:
6885 return 6u;
6886 case 4:
6887 return 4u;
6888 default:
6889 llvm_unreachable("invalid format value");
6890 }
6891 };
6892
6893 auto isValidSrcASrcBVector = [](FixedVectorType *Ty) {
6894 if (!Ty || !Ty->getElementType()->isIntegerTy(32))
6895 return false;
6896 unsigned NumElts = Ty->getNumElements();
6897 return NumElts == 4 || NumElts == 6 || NumElts == 8;
6898 };
6899
6900 auto *Src0Ty = dyn_cast<FixedVectorType>(Src0->getType());
6901 auto *Src1Ty = dyn_cast<FixedVectorType>(Src1->getType());
6902 Check(isValidSrcASrcBVector(Src0Ty),
6903 "operand 0 must be 4, 6 or 8 element i32 vector", &Call, Src0);
6904 Check(isValidSrcASrcBVector(Src1Ty),
6905 "operand 1 must be 4, 6 or 8 element i32 vector", &Call, Src1);
6906
6907 // Permit excess registers for the format.
6908 Check(Src0Ty->getNumElements() >= getFormatNumRegs(CBSZ),
6909 "invalid vector type for format", &Call, Src0, Call.getArgOperand(3));
6910 Check(Src1Ty->getNumElements() >= getFormatNumRegs(BLGP),
6911 "invalid vector type for format", &Call, Src1, Call.getArgOperand(5));
6912 break;
6913 }
6914 case Intrinsic::amdgcn_wmma_f32_16x16x128_f8f6f4:
6915 case Intrinsic::amdgcn_wmma_scale_f32_16x16x128_f8f6f4:
6916 case Intrinsic::amdgcn_wmma_scale16_f32_16x16x128_f8f6f4: {
6917 Value *Src0 = Call.getArgOperand(1);
6918 Value *Src1 = Call.getArgOperand(3);
6919
6920 unsigned FmtA = cast<ConstantInt>(Call.getArgOperand(0))->getZExtValue();
6921 unsigned FmtB = cast<ConstantInt>(Call.getArgOperand(2))->getZExtValue();
6922 Check(FmtA <= 4, "invalid value for matrix format", Call,
6923 Call.getArgOperand(0));
6924 Check(FmtB <= 4, "invalid value for matrix format", Call,
6925 Call.getArgOperand(2));
6926
6927 // AMDGPU::MatrixFMT values
6928 auto getFormatNumRegs = [](unsigned FormatVal) {
6929 switch (FormatVal) {
6930 case 0:
6931 case 1:
6932 return 16u;
6933 case 2:
6934 case 3:
6935 return 12u;
6936 case 4:
6937 return 8u;
6938 default:
6939 llvm_unreachable("invalid format value");
6940 }
6941 };
6942
6943 auto isValidSrcASrcBVector = [](FixedVectorType *Ty) {
6944 if (!Ty || !Ty->getElementType()->isIntegerTy(32))
6945 return false;
6946 unsigned NumElts = Ty->getNumElements();
6947 return NumElts == 16 || NumElts == 12 || NumElts == 8;
6948 };
6949
6950 auto *Src0Ty = dyn_cast<FixedVectorType>(Src0->getType());
6951 auto *Src1Ty = dyn_cast<FixedVectorType>(Src1->getType());
6952 Check(isValidSrcASrcBVector(Src0Ty),
6953 "operand 1 must be 8, 12 or 16 element i32 vector", &Call, Src0);
6954 Check(isValidSrcASrcBVector(Src1Ty),
6955 "operand 3 must be 8, 12 or 16 element i32 vector", &Call, Src1);
6956
6957 // Permit excess registers for the format.
6958 Check(Src0Ty->getNumElements() >= getFormatNumRegs(FmtA),
6959 "invalid vector type for format", &Call, Src0, Call.getArgOperand(0));
6960 Check(Src1Ty->getNumElements() >= getFormatNumRegs(FmtB),
6961 "invalid vector type for format", &Call, Src1, Call.getArgOperand(2));
6962 break;
6963 }
6964 case Intrinsic::amdgcn_cooperative_atomic_load_32x4B:
6965 case Intrinsic::amdgcn_cooperative_atomic_load_16x8B:
6966 case Intrinsic::amdgcn_cooperative_atomic_load_8x16B:
6967 case Intrinsic::amdgcn_cooperative_atomic_store_32x4B:
6968 case Intrinsic::amdgcn_cooperative_atomic_store_16x8B:
6969 case Intrinsic::amdgcn_cooperative_atomic_store_8x16B: {
6970 // Check we only use this intrinsic on the FLAT or GLOBAL address spaces.
6971 Value *PtrArg = Call.getArgOperand(0);
6972 const unsigned AS = PtrArg->getType()->getPointerAddressSpace();
6974 "cooperative atomic intrinsics require a generic or global pointer",
6975 &Call, PtrArg);
6976
6977 // Last argument must be a MD string
6979 MDNode *MD = cast<MDNode>(Op->getMetadata());
6980 Check((MD->getNumOperands() == 1) && isa<MDString>(MD->getOperand(0)),
6981 "cooperative atomic intrinsics require that the last argument is a "
6982 "metadata string",
6983 &Call, Op);
6984 break;
6985 }
6986 case Intrinsic::nvvm_setmaxnreg_inc_sync_aligned_u32:
6987 case Intrinsic::nvvm_setmaxnreg_dec_sync_aligned_u32: {
6988 Value *V = Call.getArgOperand(0);
6989 unsigned RegCount = cast<ConstantInt>(V)->getZExtValue();
6990 Check(RegCount % 8 == 0,
6991 "reg_count argument to nvvm.setmaxnreg must be in multiples of 8");
6992 break;
6993 }
6994 case Intrinsic::experimental_convergence_entry:
6995 case Intrinsic::experimental_convergence_anchor:
6996 break;
6997 case Intrinsic::experimental_convergence_loop:
6998 break;
6999 case Intrinsic::ptrmask: {
7000 Type *Ty0 = Call.getArgOperand(0)->getType();
7001 Type *Ty1 = Call.getArgOperand(1)->getType();
7003 "llvm.ptrmask intrinsic first argument must be pointer or vector "
7004 "of pointers",
7005 &Call);
7006 Check(
7007 Ty0->isVectorTy() == Ty1->isVectorTy(),
7008 "llvm.ptrmask intrinsic arguments must be both scalars or both vectors",
7009 &Call);
7010 if (Ty0->isVectorTy())
7011 Check(cast<VectorType>(Ty0)->getElementCount() ==
7012 cast<VectorType>(Ty1)->getElementCount(),
7013 "llvm.ptrmask intrinsic arguments must have the same number of "
7014 "elements",
7015 &Call);
7016 Check(DL.getIndexTypeSizeInBits(Ty0) == Ty1->getScalarSizeInBits(),
7017 "llvm.ptrmask intrinsic second argument bitwidth must match "
7018 "pointer index type size of first argument",
7019 &Call);
7020 break;
7021 }
7022 case Intrinsic::thread_pointer: {
7024 DL.getDefaultGlobalsAddressSpace(),
7025 "llvm.thread.pointer intrinsic return type must be for the globals "
7026 "address space",
7027 &Call);
7028 break;
7029 }
7030 case Intrinsic::threadlocal_address: {
7031 const Value &Arg0 = *Call.getArgOperand(0);
7032 Check(isa<GlobalValue>(Arg0),
7033 "llvm.threadlocal.address first argument must be a GlobalValue");
7034 Check(cast<GlobalValue>(Arg0).isThreadLocal(),
7035 "llvm.threadlocal.address operand isThreadLocal() must be true");
7036 break;
7037 }
7038 case Intrinsic::lifetime_start:
7039 case Intrinsic::lifetime_end: {
7040 Value *Ptr = Call.getArgOperand(0);
7042 "llvm.lifetime.start/end can only be used on alloca or poison",
7043 &Call);
7044 break;
7045 }
7046 };
7047
7048 // Verify that there aren't any unmediated control transfers between funclets.
7050 Function *F = Call.getParent()->getParent();
7051 if (F->hasPersonalityFn() &&
7052 isScopedEHPersonality(classifyEHPersonality(F->getPersonalityFn()))) {
7053 // Run EH funclet coloring on-demand and cache results for other intrinsic
7054 // calls in this function
7055 if (BlockEHFuncletColors.empty())
7056 BlockEHFuncletColors = colorEHFunclets(*F);
7057
7058 // Check for catch-/cleanup-pad in first funclet block
7059 bool InEHFunclet = false;
7060 BasicBlock *CallBB = Call.getParent();
7061 const ColorVector &CV = BlockEHFuncletColors.find(CallBB)->second;
7062 assert(CV.size() > 0 && "Uncolored block");
7063 for (BasicBlock *ColorFirstBB : CV)
7064 if (auto It = ColorFirstBB->getFirstNonPHIIt();
7065 It != ColorFirstBB->end())
7067 InEHFunclet = true;
7068
7069 // Check for funclet operand bundle
7070 bool HasToken = false;
7071 for (unsigned I = 0, E = Call.getNumOperandBundles(); I != E; ++I)
7073 HasToken = true;
7074
7075 // This would cause silent code truncation in WinEHPrepare
7076 if (InEHFunclet)
7077 Check(HasToken, "Missing funclet token on intrinsic call", &Call);
7078 }
7079 }
7080}
7081
7082/// Carefully grab the subprogram from a local scope.
7083///
7084/// This carefully grabs the subprogram from a local scope, avoiding the
7085/// built-in assertions that would typically fire.
7087 if (!LocalScope)
7088 return nullptr;
7089
7090 if (auto *SP = dyn_cast<DISubprogram>(LocalScope))
7091 return SP;
7092
7093 if (auto *LB = dyn_cast<DILexicalBlockBase>(LocalScope))
7094 return getSubprogram(LB->getRawScope());
7095
7096 // Just return null; broken scope chains are checked elsewhere.
7097 assert(!isa<DILocalScope>(LocalScope) && "Unknown type of local scope");
7098 return nullptr;
7099}
7100
7101void Verifier::visit(DbgLabelRecord &DLR) {
7103 "invalid #dbg_label intrinsic variable", &DLR, DLR.getRawLabel());
7104
7105 // Ignore broken !dbg attachments; they're checked elsewhere.
7106 if (MDNode *N = DLR.getDebugLoc().getAsMDNode())
7107 if (!isa<DILocation>(N))
7108 return;
7109
7110 BasicBlock *BB = DLR.getParent();
7111 Function *F = BB ? BB->getParent() : nullptr;
7112
7113 // The scopes for variables and !dbg attachments must agree.
7114 DILabel *Label = DLR.getLabel();
7115 DILocation *Loc = DLR.getDebugLoc();
7116 CheckDI(Loc, "#dbg_label record requires a !dbg attachment", &DLR, BB, F);
7117
7118 DISubprogram *LabelSP = getSubprogram(Label->getRawScope());
7119 DISubprogram *LocSP = getSubprogram(Loc->getRawScope());
7120 if (!LabelSP || !LocSP)
7121 return;
7122
7123 CheckDI(LabelSP == LocSP,
7124 "mismatched subprogram between #dbg_label label and !dbg attachment",
7125 &DLR, BB, F, Label, Label->getScope()->getSubprogram(), Loc,
7126 Loc->getScope()->getSubprogram());
7127}
7128
7129void Verifier::visit(DbgVariableRecord &DVR) {
7130 BasicBlock *BB = DVR.getParent();
7131 Function *F = BB->getParent();
7132
7133 CheckDI(DVR.getType() == DbgVariableRecord::LocationType::Value ||
7134 DVR.getType() == DbgVariableRecord::LocationType::Declare ||
7135 DVR.getType() == DbgVariableRecord::LocationType::DeclareValue ||
7136 DVR.getType() == DbgVariableRecord::LocationType::Assign,
7137 "invalid #dbg record type", &DVR, DVR.getType(), BB, F);
7138
7139 // The location for a DbgVariableRecord must be either a ValueAsMetadata,
7140 // DIArgList, or an empty MDNode (which is a legacy representation for an
7141 // "undef" location).
7142 auto *MD = DVR.getRawLocation();
7143 CheckDI(MD && (isa<ValueAsMetadata>(MD) || isa<DIArgList>(MD) ||
7144 (isa<MDNode>(MD) && !cast<MDNode>(MD)->getNumOperands())),
7145 "invalid #dbg record address/value", &DVR, MD, BB, F);
7146 if (auto *VAM = dyn_cast<ValueAsMetadata>(MD)) {
7147 visitValueAsMetadata(*VAM, F);
7148 if (DVR.isDbgDeclare()) {
7149 // Allow integers here to support inttoptr salvage.
7150 Type *Ty = VAM->getValue()->getType();
7151 CheckDI(Ty->isPointerTy() || Ty->isIntegerTy(),
7152 "location of #dbg_declare must be a pointer or int", &DVR, MD, BB,
7153 F);
7154 }
7155 } else if (auto *AL = dyn_cast<DIArgList>(MD)) {
7156 visitDIArgList(*AL, F);
7157 }
7158
7160 "invalid #dbg record variable", &DVR, DVR.getRawVariable(), BB, F);
7161 visitMDNode(*DVR.getRawVariable(), AreDebugLocsAllowed::No);
7162
7164 "invalid #dbg record expression", &DVR, DVR.getRawExpression(), BB,
7165 F);
7166 visitMDNode(*DVR.getExpression(), AreDebugLocsAllowed::No);
7167
7168 if (DVR.isDbgAssign()) {
7170 "invalid #dbg_assign DIAssignID", &DVR, DVR.getRawAssignID(), BB,
7171 F);
7172 visitMDNode(*cast<DIAssignID>(DVR.getRawAssignID()),
7173 AreDebugLocsAllowed::No);
7174
7175 const auto *RawAddr = DVR.getRawAddress();
7176 // Similarly to the location above, the address for an assign
7177 // DbgVariableRecord must be a ValueAsMetadata or an empty MDNode, which
7178 // represents an undef address.
7179 CheckDI(
7180 isa<ValueAsMetadata>(RawAddr) ||
7181 (isa<MDNode>(RawAddr) && !cast<MDNode>(RawAddr)->getNumOperands()),
7182 "invalid #dbg_assign address", &DVR, DVR.getRawAddress(), BB, F);
7183 if (auto *VAM = dyn_cast<ValueAsMetadata>(RawAddr))
7184 visitValueAsMetadata(*VAM, F);
7185
7187 "invalid #dbg_assign address expression", &DVR,
7188 DVR.getRawAddressExpression(), BB, F);
7189 visitMDNode(*DVR.getAddressExpression(), AreDebugLocsAllowed::No);
7190
7191 // All of the linked instructions should be in the same function as DVR.
7192 for (Instruction *I : at::getAssignmentInsts(&DVR))
7193 CheckDI(DVR.getFunction() == I->getFunction(),
7194 "inst not in same function as #dbg_assign", I, &DVR, BB, F);
7195 }
7196
7197 // This check is redundant with one in visitLocalVariable().
7198 DILocalVariable *Var = DVR.getVariable();
7199 CheckDI(isType(Var->getRawType()), "invalid type ref", Var, Var->getRawType(),
7200 BB, F);
7201
7202 auto *DLNode = DVR.getDebugLoc().getAsMDNode();
7203 CheckDI(isa_and_nonnull<DILocation>(DLNode), "invalid #dbg record DILocation",
7204 &DVR, DLNode, BB, F);
7205 DILocation *Loc = DVR.getDebugLoc();
7206
7207 // The scopes for variables and !dbg attachments must agree.
7208 DISubprogram *VarSP = getSubprogram(Var->getRawScope());
7209 DISubprogram *LocSP = getSubprogram(Loc->getRawScope());
7210 if (!VarSP || !LocSP)
7211 return; // Broken scope chains are checked elsewhere.
7212
7213 CheckDI(VarSP == LocSP,
7214 "mismatched subprogram between #dbg record variable and DILocation",
7215 &DVR, BB, F, Var, Var->getScope()->getSubprogram(), Loc,
7216 Loc->getScope()->getSubprogram(), BB, F);
7217
7218 verifyFnArgs(DVR);
7219}
7220
7221void Verifier::visitVPIntrinsic(VPIntrinsic &VPI) {
7222 if (auto *VPCast = dyn_cast<VPCastIntrinsic>(&VPI)) {
7223 auto *RetTy = cast<VectorType>(VPCast->getType());
7224 auto *ValTy = cast<VectorType>(VPCast->getOperand(0)->getType());
7225 Check(RetTy->getElementCount() == ValTy->getElementCount(),
7226 "VP cast intrinsic first argument and result vector lengths must be "
7227 "equal",
7228 *VPCast);
7229
7230 switch (VPCast->getIntrinsicID()) {
7231 default:
7232 llvm_unreachable("Unknown VP cast intrinsic");
7233 case Intrinsic::vp_trunc:
7234 Check(RetTy->isIntOrIntVectorTy() && ValTy->isIntOrIntVectorTy(),
7235 "llvm.vp.trunc intrinsic first argument and result element type "
7236 "must be integer",
7237 *VPCast);
7238 Check(RetTy->getScalarSizeInBits() < ValTy->getScalarSizeInBits(),
7239 "llvm.vp.trunc intrinsic the bit size of first argument must be "
7240 "larger than the bit size of the return type",
7241 *VPCast);
7242 break;
7243 case Intrinsic::vp_zext:
7244 case Intrinsic::vp_sext:
7245 Check(RetTy->isIntOrIntVectorTy() && ValTy->isIntOrIntVectorTy(),
7246 "llvm.vp.zext or llvm.vp.sext intrinsic first argument and result "
7247 "element type must be integer",
7248 *VPCast);
7249 Check(RetTy->getScalarSizeInBits() > ValTy->getScalarSizeInBits(),
7250 "llvm.vp.zext or llvm.vp.sext intrinsic the bit size of first "
7251 "argument must be smaller than the bit size of the return type",
7252 *VPCast);
7253 break;
7254 case Intrinsic::vp_fptoui:
7255 case Intrinsic::vp_fptosi:
7256 case Intrinsic::vp_lrint:
7257 case Intrinsic::vp_llrint:
7258 Check(
7259 RetTy->isIntOrIntVectorTy() && ValTy->isFPOrFPVectorTy(),
7260 "llvm.vp.fptoui, llvm.vp.fptosi, llvm.vp.lrint or llvm.vp.llrint" "intrinsic first argument element "
7261 "type must be floating-point and result element type must be integer",
7262 *VPCast);
7263 break;
7264 case Intrinsic::vp_uitofp:
7265 case Intrinsic::vp_sitofp:
7266 Check(
7267 RetTy->isFPOrFPVectorTy() && ValTy->isIntOrIntVectorTy(),
7268 "llvm.vp.uitofp or llvm.vp.sitofp intrinsic first argument element "
7269 "type must be integer and result element type must be floating-point",
7270 *VPCast);
7271 break;
7272 case Intrinsic::vp_fptrunc:
7273 Check(RetTy->isFPOrFPVectorTy() && ValTy->isFPOrFPVectorTy(),
7274 "llvm.vp.fptrunc intrinsic first argument and result element type "
7275 "must be floating-point",
7276 *VPCast);
7277 Check(RetTy->getScalarSizeInBits() < ValTy->getScalarSizeInBits(),
7278 "llvm.vp.fptrunc intrinsic the bit size of first argument must be "
7279 "larger than the bit size of the return type",
7280 *VPCast);
7281 break;
7282 case Intrinsic::vp_fpext:
7283 Check(RetTy->isFPOrFPVectorTy() && ValTy->isFPOrFPVectorTy(),
7284 "llvm.vp.fpext intrinsic first argument and result element type "
7285 "must be floating-point",
7286 *VPCast);
7287 Check(RetTy->getScalarSizeInBits() > ValTy->getScalarSizeInBits(),
7288 "llvm.vp.fpext intrinsic the bit size of first argument must be "
7289 "smaller than the bit size of the return type",
7290 *VPCast);
7291 break;
7292 case Intrinsic::vp_ptrtoint:
7293 Check(RetTy->isIntOrIntVectorTy() && ValTy->isPtrOrPtrVectorTy(),
7294 "llvm.vp.ptrtoint intrinsic first argument element type must be "
7295 "pointer and result element type must be integer",
7296 *VPCast);
7297 break;
7298 case Intrinsic::vp_inttoptr:
7299 Check(RetTy->isPtrOrPtrVectorTy() && ValTy->isIntOrIntVectorTy(),
7300 "llvm.vp.inttoptr intrinsic first argument element type must be "
7301 "integer and result element type must be pointer",
7302 *VPCast);
7303 break;
7304 }
7305 }
7306
7307 switch (VPI.getIntrinsicID()) {
7308 case Intrinsic::vp_fcmp: {
7309 auto Pred = cast<VPCmpIntrinsic>(&VPI)->getPredicate();
7311 "invalid predicate for VP FP comparison intrinsic", &VPI);
7312 break;
7313 }
7314 case Intrinsic::vp_icmp: {
7315 auto Pred = cast<VPCmpIntrinsic>(&VPI)->getPredicate();
7317 "invalid predicate for VP integer comparison intrinsic", &VPI);
7318 break;
7319 }
7320 case Intrinsic::vp_is_fpclass: {
7321 auto TestMask = cast<ConstantInt>(VPI.getOperand(1));
7322 Check((TestMask->getZExtValue() & ~static_cast<unsigned>(fcAllFlags)) == 0,
7323 "unsupported bits for llvm.vp.is.fpclass test mask");
7324 break;
7325 }
7326 case Intrinsic::experimental_vp_splice: {
7327 VectorType *VecTy = cast<VectorType>(VPI.getType());
7328 int64_t Idx = cast<ConstantInt>(VPI.getArgOperand(2))->getSExtValue();
7329 int64_t KnownMinNumElements = VecTy->getElementCount().getKnownMinValue();
7330 if (VPI.getParent() && VPI.getParent()->getParent()) {
7331 AttributeList Attrs = VPI.getParent()->getParent()->getAttributes();
7332 if (Attrs.hasFnAttr(Attribute::VScaleRange))
7333 KnownMinNumElements *= Attrs.getFnAttrs().getVScaleRangeMin();
7334 }
7335 Check((Idx < 0 && std::abs(Idx) <= KnownMinNumElements) ||
7336 (Idx >= 0 && Idx < KnownMinNumElements),
7337 "The splice index exceeds the range [-VL, VL-1] where VL is the "
7338 "known minimum number of elements in the vector. For scalable "
7339 "vectors the minimum number of elements is determined from "
7340 "vscale_range.",
7341 &VPI);
7342 break;
7343 }
7344 }
7345}
7346
7347void Verifier::visitConstrainedFPIntrinsic(ConstrainedFPIntrinsic &FPI) {
7348 unsigned NumOperands = FPI.getNonMetadataArgCount();
7349 bool HasRoundingMD =
7351
7352 // Add the expected number of metadata operands.
7353 NumOperands += (1 + HasRoundingMD);
7354
7355 // Compare intrinsics carry an extra predicate metadata operand.
7357 NumOperands += 1;
7358 Check((FPI.arg_size() == NumOperands),
7359 "invalid arguments for constrained FP intrinsic", &FPI);
7360
7361 switch (FPI.getIntrinsicID()) {
7362 case Intrinsic::experimental_constrained_lrint:
7363 case Intrinsic::experimental_constrained_llrint: {
7364 Type *ValTy = FPI.getArgOperand(0)->getType();
7365 Type *ResultTy = FPI.getType();
7366 Check(!ValTy->isVectorTy() && !ResultTy->isVectorTy(),
7367 "Intrinsic does not support vectors", &FPI);
7368 break;
7369 }
7370
7371 case Intrinsic::experimental_constrained_lround:
7372 case Intrinsic::experimental_constrained_llround: {
7373 Type *ValTy = FPI.getArgOperand(0)->getType();
7374 Type *ResultTy = FPI.getType();
7375 Check(!ValTy->isVectorTy() && !ResultTy->isVectorTy(),
7376 "Intrinsic does not support vectors", &FPI);
7377 break;
7378 }
7379
7380 case Intrinsic::experimental_constrained_fcmp:
7381 case Intrinsic::experimental_constrained_fcmps: {
7382 auto Pred = cast<ConstrainedFPCmpIntrinsic>(&FPI)->getPredicate();
7384 "invalid predicate for constrained FP comparison intrinsic", &FPI);
7385 break;
7386 }
7387
7388 case Intrinsic::experimental_constrained_fptosi:
7389 case Intrinsic::experimental_constrained_fptoui: {
7390 Value *Operand = FPI.getArgOperand(0);
7391 ElementCount SrcEC;
7392 Check(Operand->getType()->isFPOrFPVectorTy(),
7393 "Intrinsic first argument must be floating point", &FPI);
7394 if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
7395 SrcEC = cast<VectorType>(OperandT)->getElementCount();
7396 }
7397
7398 Operand = &FPI;
7399 Check(SrcEC.isNonZero() == Operand->getType()->isVectorTy(),
7400 "Intrinsic first argument and result disagree on vector use", &FPI);
7401 Check(Operand->getType()->isIntOrIntVectorTy(),
7402 "Intrinsic result must be an integer", &FPI);
7403 if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
7404 Check(SrcEC == cast<VectorType>(OperandT)->getElementCount(),
7405 "Intrinsic first argument and result vector lengths must be equal",
7406 &FPI);
7407 }
7408 break;
7409 }
7410
7411 case Intrinsic::experimental_constrained_sitofp:
7412 case Intrinsic::experimental_constrained_uitofp: {
7413 Value *Operand = FPI.getArgOperand(0);
7414 ElementCount SrcEC;
7415 Check(Operand->getType()->isIntOrIntVectorTy(),
7416 "Intrinsic first argument must be integer", &FPI);
7417 if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
7418 SrcEC = cast<VectorType>(OperandT)->getElementCount();
7419 }
7420
7421 Operand = &FPI;
7422 Check(SrcEC.isNonZero() == Operand->getType()->isVectorTy(),
7423 "Intrinsic first argument and result disagree on vector use", &FPI);
7424 Check(Operand->getType()->isFPOrFPVectorTy(),
7425 "Intrinsic result must be a floating point", &FPI);
7426 if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
7427 Check(SrcEC == cast<VectorType>(OperandT)->getElementCount(),
7428 "Intrinsic first argument and result vector lengths must be equal",
7429 &FPI);
7430 }
7431 break;
7432 }
7433
7434 case Intrinsic::experimental_constrained_fptrunc:
7435 case Intrinsic::experimental_constrained_fpext: {
7436 Value *Operand = FPI.getArgOperand(0);
7437 Type *OperandTy = Operand->getType();
7438 Value *Result = &FPI;
7439 Type *ResultTy = Result->getType();
7440 Check(OperandTy->isFPOrFPVectorTy(),
7441 "Intrinsic first argument must be FP or FP vector", &FPI);
7442 Check(ResultTy->isFPOrFPVectorTy(),
7443 "Intrinsic result must be FP or FP vector", &FPI);
7444 Check(OperandTy->isVectorTy() == ResultTy->isVectorTy(),
7445 "Intrinsic first argument and result disagree on vector use", &FPI);
7446 if (OperandTy->isVectorTy()) {
7447 Check(cast<VectorType>(OperandTy)->getElementCount() ==
7448 cast<VectorType>(ResultTy)->getElementCount(),
7449 "Intrinsic first argument and result vector lengths must be equal",
7450 &FPI);
7451 }
7452 if (FPI.getIntrinsicID() == Intrinsic::experimental_constrained_fptrunc) {
7453 Check(OperandTy->getScalarSizeInBits() > ResultTy->getScalarSizeInBits(),
7454 "Intrinsic first argument's type must be larger than result type",
7455 &FPI);
7456 } else {
7457 Check(OperandTy->getScalarSizeInBits() < ResultTy->getScalarSizeInBits(),
7458 "Intrinsic first argument's type must be smaller than result type",
7459 &FPI);
7460 }
7461 break;
7462 }
7463
7464 default:
7465 break;
7466 }
7467
7468 // If a non-metadata argument is passed in a metadata slot then the
7469 // error will be caught earlier when the incorrect argument doesn't
7470 // match the specification in the intrinsic call table. Thus, no
7471 // argument type check is needed here.
7472
7473 Check(FPI.getExceptionBehavior().has_value(),
7474 "invalid exception behavior argument", &FPI);
7475 if (HasRoundingMD) {
7476 Check(FPI.getRoundingMode().has_value(), "invalid rounding mode argument",
7477 &FPI);
7478 }
7479}
7480
7481void Verifier::verifyFragmentExpression(const DbgVariableRecord &DVR) {
7482 DILocalVariable *V = dyn_cast_or_null<DILocalVariable>(DVR.getRawVariable());
7483 DIExpression *E = dyn_cast_or_null<DIExpression>(DVR.getRawExpression());
7484
7485 // We don't know whether this intrinsic verified correctly.
7486 if (!V || !E || !E->isValid())
7487 return;
7488
7489 // Nothing to do if this isn't a DW_OP_LLVM_fragment expression.
7490 auto Fragment = E->getFragmentInfo();
7491 if (!Fragment)
7492 return;
7493
7494 // The frontend helps out GDB by emitting the members of local anonymous
7495 // unions as artificial local variables with shared storage. When SROA splits
7496 // the storage for artificial local variables that are smaller than the entire
7497 // union, the overhang piece will be outside of the allotted space for the
7498 // variable and this check fails.
7499 // FIXME: Remove this check as soon as clang stops doing this; it hides bugs.
7500 if (V->isArtificial())
7501 return;
7502
7503 verifyFragmentExpression(*V, *Fragment, &DVR);
7504}
7505
7506template <typename ValueOrMetadata>
7507void Verifier::verifyFragmentExpression(const DIVariable &V,
7509 ValueOrMetadata *Desc) {
7510 // If there's no size, the type is broken, but that should be checked
7511 // elsewhere.
7512 auto VarSize = V.getSizeInBits();
7513 if (!VarSize)
7514 return;
7515
7516 unsigned FragSize = Fragment.SizeInBits;
7517 unsigned FragOffset = Fragment.OffsetInBits;
7518 CheckDI(FragSize + FragOffset <= *VarSize,
7519 "fragment is larger than or outside of variable", Desc, &V);
7520 CheckDI(FragSize != *VarSize, "fragment covers entire variable", Desc, &V);
7521}
7522
7523void Verifier::verifyFnArgs(const DbgVariableRecord &DVR) {
7524 // This function does not take the scope of noninlined function arguments into
7525 // account. Don't run it if current function is nodebug, because it may
7526 // contain inlined debug intrinsics.
7527 if (!HasDebugInfo)
7528 return;
7529
7530 // For performance reasons only check non-inlined ones.
7531 if (DVR.getDebugLoc()->getInlinedAt())
7532 return;
7533
7534 DILocalVariable *Var = DVR.getVariable();
7535 CheckDI(Var, "#dbg record without variable");
7536
7537 unsigned ArgNo = Var->getArg();
7538 if (!ArgNo)
7539 return;
7540
7541 // Verify there are no duplicate function argument debug info entries.
7542 // These will cause hard-to-debug assertions in the DWARF backend.
7543 if (DebugFnArgs.size() < ArgNo)
7544 DebugFnArgs.resize(ArgNo, nullptr);
7545
7546 auto *Prev = DebugFnArgs[ArgNo - 1];
7547 DebugFnArgs[ArgNo - 1] = Var;
7548 CheckDI(!Prev || (Prev == Var), "conflicting debug info for argument", &DVR,
7549 Prev, Var);
7550}
7551
7552void Verifier::verifyNotEntryValue(const DbgVariableRecord &DVR) {
7553 DIExpression *E = dyn_cast_or_null<DIExpression>(DVR.getRawExpression());
7554
7555 // We don't know whether this intrinsic verified correctly.
7556 if (!E || !E->isValid())
7557 return;
7558
7560 Value *VarValue = DVR.getVariableLocationOp(0);
7561 if (isa<UndefValue>(VarValue) || isa<PoisonValue>(VarValue))
7562 return;
7563 // We allow EntryValues for swift async arguments, as they have an
7564 // ABI-guarantee to be turned into a specific register.
7565 if (auto *ArgLoc = dyn_cast_or_null<Argument>(VarValue);
7566 ArgLoc && ArgLoc->hasAttribute(Attribute::SwiftAsync))
7567 return;
7568 }
7569
7570 CheckDI(!E->isEntryValue(),
7571 "Entry values are only allowed in MIR unless they target a "
7572 "swiftasync Argument",
7573 &DVR);
7574}
7575
7576void Verifier::verifyCompileUnits() {
7577 // When more than one Module is imported into the same context, such as during
7578 // an LTO build before linking the modules, ODR type uniquing may cause types
7579 // to point to a different CU. This check does not make sense in this case.
7580 if (M.getContext().isODRUniquingDebugTypes())
7581 return;
7582 auto *CUs = M.getNamedMetadata("llvm.dbg.cu");
7583 SmallPtrSet<const Metadata *, 2> Listed;
7584 if (CUs)
7585 Listed.insert_range(CUs->operands());
7586 for (const auto *CU : CUVisited)
7587 CheckDI(Listed.count(CU), "DICompileUnit not listed in llvm.dbg.cu", CU);
7588 CUVisited.clear();
7589}
7590
7591void Verifier::verifyDeoptimizeCallingConvs() {
7592 if (DeoptimizeDeclarations.empty())
7593 return;
7594
7595 const Function *First = DeoptimizeDeclarations[0];
7596 for (const auto *F : ArrayRef(DeoptimizeDeclarations).slice(1)) {
7597 Check(First->getCallingConv() == F->getCallingConv(),
7598 "All llvm.experimental.deoptimize declarations must have the same "
7599 "calling convention",
7600 First, F);
7601 }
7602}
7603
7604void Verifier::verifyAttachedCallBundle(const CallBase &Call,
7605 const OperandBundleUse &BU) {
7606 FunctionType *FTy = Call.getFunctionType();
7607
7608 Check((FTy->getReturnType()->isPointerTy() ||
7609 (Call.doesNotReturn() && FTy->getReturnType()->isVoidTy())),
7610 "a call with operand bundle \"clang.arc.attachedcall\" must call a "
7611 "function returning a pointer or a non-returning function that has a "
7612 "void return type",
7613 Call);
7614
7615 Check(BU.Inputs.size() == 1 && isa<Function>(BU.Inputs.front()),
7616 "operand bundle \"clang.arc.attachedcall\" requires one function as "
7617 "an argument",
7618 Call);
7619
7620 auto *Fn = cast<Function>(BU.Inputs.front());
7621 Intrinsic::ID IID = Fn->getIntrinsicID();
7622
7623 if (IID) {
7624 Check((IID == Intrinsic::objc_retainAutoreleasedReturnValue ||
7625 IID == Intrinsic::objc_claimAutoreleasedReturnValue ||
7626 IID == Intrinsic::objc_unsafeClaimAutoreleasedReturnValue),
7627 "invalid function argument", Call);
7628 } else {
7629 StringRef FnName = Fn->getName();
7630 Check((FnName == "objc_retainAutoreleasedReturnValue" ||
7631 FnName == "objc_claimAutoreleasedReturnValue" ||
7632 FnName == "objc_unsafeClaimAutoreleasedReturnValue"),
7633 "invalid function argument", Call);
7634 }
7635}
7636
7637void Verifier::verifyNoAliasScopeDecl() {
7638 if (NoAliasScopeDecls.empty())
7639 return;
7640
7641 // only a single scope must be declared at a time.
7642 for (auto *II : NoAliasScopeDecls) {
7643 assert(II->getIntrinsicID() == Intrinsic::experimental_noalias_scope_decl &&
7644 "Not a llvm.experimental.noalias.scope.decl ?");
7645 const auto *ScopeListMV = dyn_cast<MetadataAsValue>(
7647 Check(ScopeListMV != nullptr,
7648 "llvm.experimental.noalias.scope.decl must have a MetadataAsValue "
7649 "argument",
7650 II);
7651
7652 const auto *ScopeListMD = dyn_cast<MDNode>(ScopeListMV->getMetadata());
7653 Check(ScopeListMD != nullptr, "!id.scope.list must point to an MDNode", II);
7654 Check(ScopeListMD->getNumOperands() == 1,
7655 "!id.scope.list must point to a list with a single scope", II);
7656 visitAliasScopeListMetadata(ScopeListMD);
7657 }
7658
7659 // Only check the domination rule when requested. Once all passes have been
7660 // adapted this option can go away.
7662 return;
7663
7664 // Now sort the intrinsics based on the scope MDNode so that declarations of
7665 // the same scopes are next to each other.
7666 auto GetScope = [](IntrinsicInst *II) {
7667 const auto *ScopeListMV = cast<MetadataAsValue>(
7669 return &cast<MDNode>(ScopeListMV->getMetadata())->getOperand(0);
7670 };
7671
7672 // We are sorting on MDNode pointers here. For valid input IR this is ok.
7673 // TODO: Sort on Metadata ID to avoid non-deterministic error messages.
7674 auto Compare = [GetScope](IntrinsicInst *Lhs, IntrinsicInst *Rhs) {
7675 return GetScope(Lhs) < GetScope(Rhs);
7676 };
7677
7678 llvm::sort(NoAliasScopeDecls, Compare);
7679
7680 // Go over the intrinsics and check that for the same scope, they are not
7681 // dominating each other.
7682 auto ItCurrent = NoAliasScopeDecls.begin();
7683 while (ItCurrent != NoAliasScopeDecls.end()) {
7684 auto CurScope = GetScope(*ItCurrent);
7685 auto ItNext = ItCurrent;
7686 do {
7687 ++ItNext;
7688 } while (ItNext != NoAliasScopeDecls.end() &&
7689 GetScope(*ItNext) == CurScope);
7690
7691 // [ItCurrent, ItNext) represents the declarations for the same scope.
7692 // Ensure they are not dominating each other.. but only if it is not too
7693 // expensive.
7694 if (ItNext - ItCurrent < 32)
7695 for (auto *I : llvm::make_range(ItCurrent, ItNext))
7696 for (auto *J : llvm::make_range(ItCurrent, ItNext))
7697 if (I != J)
7698 Check(!DT.dominates(I, J),
7699 "llvm.experimental.noalias.scope.decl dominates another one "
7700 "with the same scope",
7701 I);
7702 ItCurrent = ItNext;
7703 }
7704}
7705
7706//===----------------------------------------------------------------------===//
7707// Implement the public interfaces to this file...
7708//===----------------------------------------------------------------------===//
7709
7711 Function &F = const_cast<Function &>(f);
7712
7713 // Don't use a raw_null_ostream. Printing IR is expensive.
7714 Verifier V(OS, /*ShouldTreatBrokenDebugInfoAsError=*/true, *f.getParent());
7715
7716 // Note that this function's return value is inverted from what you would
7717 // expect of a function called "verify".
7718 return !V.verify(F);
7719}
7720
7722 bool *BrokenDebugInfo) {
7723 // Don't use a raw_null_ostream. Printing IR is expensive.
7724 Verifier V(OS, /*ShouldTreatBrokenDebugInfoAsError=*/!BrokenDebugInfo, M);
7725
7726 bool Broken = false;
7727 for (const Function &F : M)
7728 Broken |= !V.verify(F);
7729
7730 Broken |= !V.verify();
7731 if (BrokenDebugInfo)
7732 *BrokenDebugInfo = V.hasBrokenDebugInfo();
7733 // Note that this function's return value is inverted from what you would
7734 // expect of a function called "verify".
7735 return Broken;
7736}
7737
7738namespace {
7739
7740struct VerifierLegacyPass : public FunctionPass {
7741 static char ID;
7742
7743 std::unique_ptr<Verifier> V;
7744 bool FatalErrors = true;
7745
7746 VerifierLegacyPass() : FunctionPass(ID) {
7748 }
7749 explicit VerifierLegacyPass(bool FatalErrors)
7750 : FunctionPass(ID),
7751 FatalErrors(FatalErrors) {
7753 }
7754
7755 bool doInitialization(Module &M) override {
7756 V = std::make_unique<Verifier>(
7757 &dbgs(), /*ShouldTreatBrokenDebugInfoAsError=*/false, M);
7758 return false;
7759 }
7760
7761 bool runOnFunction(Function &F) override {
7762 if (!V->verify(F) && FatalErrors) {
7763 errs() << "in function " << F.getName() << '\n';
7764 report_fatal_error("Broken function found, compilation aborted!");
7765 }
7766 return false;
7767 }
7768
7769 bool doFinalization(Module &M) override {
7770 bool HasErrors = false;
7771 for (Function &F : M)
7772 if (F.isDeclaration())
7773 HasErrors |= !V->verify(F);
7774
7775 HasErrors |= !V->verify();
7776 if (FatalErrors && (HasErrors || V->hasBrokenDebugInfo()))
7777 report_fatal_error("Broken module found, compilation aborted!");
7778 return false;
7779 }
7780
7781 void getAnalysisUsage(AnalysisUsage &AU) const override {
7782 AU.setPreservesAll();
7783 }
7784};
7785
7786} // end anonymous namespace
7787
7788/// Helper to issue failure from the TBAA verification
7789template <typename... Tys> void TBAAVerifier::CheckFailed(Tys &&... Args) {
7790 if (Diagnostic)
7791 return Diagnostic->CheckFailed(Args...);
7792}
7793
7794#define CheckTBAA(C, ...) \
7795 do { \
7796 if (!(C)) { \
7797 CheckFailed(__VA_ARGS__); \
7798 return false; \
7799 } \
7800 } while (false)
7801
7802/// Verify that \p BaseNode can be used as the "base type" in the struct-path
7803/// TBAA scheme. This means \p BaseNode is either a scalar node, or a
7804/// struct-type node describing an aggregate data structure (like a struct).
7805TBAAVerifier::TBAABaseNodeSummary
7806TBAAVerifier::verifyTBAABaseNode(const Instruction *I, const MDNode *BaseNode,
7807 bool IsNewFormat) {
7808 if (BaseNode->getNumOperands() < 2) {
7809 CheckFailed("Base nodes must have at least two operands", I, BaseNode);
7810 return {true, ~0u};
7811 }
7812
7813 auto Itr = TBAABaseNodes.find(BaseNode);
7814 if (Itr != TBAABaseNodes.end())
7815 return Itr->second;
7816
7817 auto Result = verifyTBAABaseNodeImpl(I, BaseNode, IsNewFormat);
7818 auto InsertResult = TBAABaseNodes.insert({BaseNode, Result});
7819 (void)InsertResult;
7820 assert(InsertResult.second && "We just checked!");
7821 return Result;
7822}
7823
7824TBAAVerifier::TBAABaseNodeSummary
7825TBAAVerifier::verifyTBAABaseNodeImpl(const Instruction *I,
7826 const MDNode *BaseNode, bool IsNewFormat) {
7827 const TBAAVerifier::TBAABaseNodeSummary InvalidNode = {true, ~0u};
7828
7829 if (BaseNode->getNumOperands() == 2) {
7830 // Scalar nodes can only be accessed at offset 0.
7831 return isValidScalarTBAANode(BaseNode)
7832 ? TBAAVerifier::TBAABaseNodeSummary({false, 0})
7833 : InvalidNode;
7834 }
7835
7836 if (IsNewFormat) {
7837 if (BaseNode->getNumOperands() % 3 != 0) {
7838 CheckFailed("Access tag nodes must have the number of operands that is a "
7839 "multiple of 3!", BaseNode);
7840 return InvalidNode;
7841 }
7842 } else {
7843 if (BaseNode->getNumOperands() % 2 != 1) {
7844 CheckFailed("Struct tag nodes must have an odd number of operands!",
7845 BaseNode);
7846 return InvalidNode;
7847 }
7848 }
7849
7850 // Check the type size field.
7851 if (IsNewFormat) {
7852 auto *TypeSizeNode = mdconst::dyn_extract_or_null<ConstantInt>(
7853 BaseNode->getOperand(1));
7854 if (!TypeSizeNode) {
7855 CheckFailed("Type size nodes must be constants!", I, BaseNode);
7856 return InvalidNode;
7857 }
7858 }
7859
7860 // Check the type name field. In the new format it can be anything.
7861 if (!IsNewFormat && !isa<MDString>(BaseNode->getOperand(0))) {
7862 CheckFailed("Struct tag nodes have a string as their first operand",
7863 BaseNode);
7864 return InvalidNode;
7865 }
7866
7867 bool Failed = false;
7868
7869 std::optional<APInt> PrevOffset;
7870 unsigned BitWidth = ~0u;
7871
7872 // We've already checked that BaseNode is not a degenerate root node with one
7873 // operand in \c verifyTBAABaseNode, so this loop should run at least once.
7874 unsigned FirstFieldOpNo = IsNewFormat ? 3 : 1;
7875 unsigned NumOpsPerField = IsNewFormat ? 3 : 2;
7876 for (unsigned Idx = FirstFieldOpNo; Idx < BaseNode->getNumOperands();
7877 Idx += NumOpsPerField) {
7878 const MDOperand &FieldTy = BaseNode->getOperand(Idx);
7879 const MDOperand &FieldOffset = BaseNode->getOperand(Idx + 1);
7880 if (!isa<MDNode>(FieldTy)) {
7881 CheckFailed("Incorrect field entry in struct type node!", I, BaseNode);
7882 Failed = true;
7883 continue;
7884 }
7885
7886 auto *OffsetEntryCI =
7888 if (!OffsetEntryCI) {
7889 CheckFailed("Offset entries must be constants!", I, BaseNode);
7890 Failed = true;
7891 continue;
7892 }
7893
7894 if (BitWidth == ~0u)
7895 BitWidth = OffsetEntryCI->getBitWidth();
7896
7897 if (OffsetEntryCI->getBitWidth() != BitWidth) {
7898 CheckFailed(
7899 "Bitwidth between the offsets and struct type entries must match", I,
7900 BaseNode);
7901 Failed = true;
7902 continue;
7903 }
7904
7905 // NB! As far as I can tell, we generate a non-strictly increasing offset
7906 // sequence only from structs that have zero size bit fields. When
7907 // recursing into a contained struct in \c getFieldNodeFromTBAABaseNode we
7908 // pick the field lexically the latest in struct type metadata node. This
7909 // mirrors the actual behavior of the alias analysis implementation.
7910 bool IsAscending =
7911 !PrevOffset || PrevOffset->ule(OffsetEntryCI->getValue());
7912
7913 if (!IsAscending) {
7914 CheckFailed("Offsets must be increasing!", I, BaseNode);
7915 Failed = true;
7916 }
7917
7918 PrevOffset = OffsetEntryCI->getValue();
7919
7920 if (IsNewFormat) {
7921 auto *MemberSizeNode = mdconst::dyn_extract_or_null<ConstantInt>(
7922 BaseNode->getOperand(Idx + 2));
7923 if (!MemberSizeNode) {
7924 CheckFailed("Member size entries must be constants!", I, BaseNode);
7925 Failed = true;
7926 continue;
7927 }
7928 }
7929 }
7930
7931 return Failed ? InvalidNode
7932 : TBAAVerifier::TBAABaseNodeSummary(false, BitWidth);
7933}
7934
7935static bool IsRootTBAANode(const MDNode *MD) {
7936 return MD->getNumOperands() < 2;
7937}
7938
7939static bool IsScalarTBAANodeImpl(const MDNode *MD,
7941 if (MD->getNumOperands() != 2 && MD->getNumOperands() != 3)
7942 return false;
7943
7944 if (!isa<MDString>(MD->getOperand(0)))
7945 return false;
7946
7947 if (MD->getNumOperands() == 3) {
7949 if (!(Offset && Offset->isZero() && isa<MDString>(MD->getOperand(0))))
7950 return false;
7951 }
7952
7953 auto *Parent = dyn_cast_or_null<MDNode>(MD->getOperand(1));
7954 return Parent && Visited.insert(Parent).second &&
7955 (IsRootTBAANode(Parent) || IsScalarTBAANodeImpl(Parent, Visited));
7956}
7957
7958bool TBAAVerifier::isValidScalarTBAANode(const MDNode *MD) {
7959 auto ResultIt = TBAAScalarNodes.find(MD);
7960 if (ResultIt != TBAAScalarNodes.end())
7961 return ResultIt->second;
7962
7963 SmallPtrSet<const MDNode *, 4> Visited;
7964 bool Result = IsScalarTBAANodeImpl(MD, Visited);
7965 auto InsertResult = TBAAScalarNodes.insert({MD, Result});
7966 (void)InsertResult;
7967 assert(InsertResult.second && "Just checked!");
7968
7969 return Result;
7970}
7971
7972/// Returns the field node at the offset \p Offset in \p BaseNode. Update \p
7973/// Offset in place to be the offset within the field node returned.
7974///
7975/// We assume we've okayed \p BaseNode via \c verifyTBAABaseNode.
7976MDNode *TBAAVerifier::getFieldNodeFromTBAABaseNode(const Instruction *I,
7977 const MDNode *BaseNode,
7978 APInt &Offset,
7979 bool IsNewFormat) {
7980 assert(BaseNode->getNumOperands() >= 2 && "Invalid base node!");
7981
7982 // Scalar nodes have only one possible "field" -- their parent in the access
7983 // hierarchy. Offset must be zero at this point, but our caller is supposed
7984 // to check that.
7985 if (BaseNode->getNumOperands() == 2)
7986 return cast<MDNode>(BaseNode->getOperand(1));
7987
7988 unsigned FirstFieldOpNo = IsNewFormat ? 3 : 1;
7989 unsigned NumOpsPerField = IsNewFormat ? 3 : 2;
7990 for (unsigned Idx = FirstFieldOpNo; Idx < BaseNode->getNumOperands();
7991 Idx += NumOpsPerField) {
7992 auto *OffsetEntryCI =
7993 mdconst::extract<ConstantInt>(BaseNode->getOperand(Idx + 1));
7994 if (OffsetEntryCI->getValue().ugt(Offset)) {
7995 if (Idx == FirstFieldOpNo) {
7996 CheckFailed("Could not find TBAA parent in struct type node", I,
7997 BaseNode, &Offset);
7998 return nullptr;
7999 }
8000
8001 unsigned PrevIdx = Idx - NumOpsPerField;
8002 auto *PrevOffsetEntryCI =
8003 mdconst::extract<ConstantInt>(BaseNode->getOperand(PrevIdx + 1));
8004 Offset -= PrevOffsetEntryCI->getValue();
8005 return cast<MDNode>(BaseNode->getOperand(PrevIdx));
8006 }
8007 }
8008
8009 unsigned LastIdx = BaseNode->getNumOperands() - NumOpsPerField;
8010 auto *LastOffsetEntryCI = mdconst::extract<ConstantInt>(
8011 BaseNode->getOperand(LastIdx + 1));
8012 Offset -= LastOffsetEntryCI->getValue();
8013 return cast<MDNode>(BaseNode->getOperand(LastIdx));
8014}
8015
8017 if (!Type || Type->getNumOperands() < 3)
8018 return false;
8019
8020 // In the new format type nodes shall have a reference to the parent type as
8021 // its first operand.
8022 return isa_and_nonnull<MDNode>(Type->getOperand(0));
8023}
8024
8026 CheckTBAA(MD->getNumOperands() > 0, "TBAA metadata cannot have 0 operands", I,
8027 MD);
8028
8029 if (I)
8033 "This instruction shall not have a TBAA access tag!", I);
8034
8035 bool IsStructPathTBAA =
8036 isa<MDNode>(MD->getOperand(0)) && MD->getNumOperands() >= 3;
8037
8038 CheckTBAA(IsStructPathTBAA,
8039 "Old-style TBAA is no longer allowed, use struct-path TBAA instead",
8040 I);
8041
8042 MDNode *BaseNode = dyn_cast_or_null<MDNode>(MD->getOperand(0));
8043 MDNode *AccessType = dyn_cast_or_null<MDNode>(MD->getOperand(1));
8044
8045 bool IsNewFormat = isNewFormatTBAATypeNode(AccessType);
8046
8047 if (IsNewFormat) {
8048 CheckTBAA(MD->getNumOperands() == 4 || MD->getNumOperands() == 5,
8049 "Access tag metadata must have either 4 or 5 operands", I, MD);
8050 } else {
8051 CheckTBAA(MD->getNumOperands() < 5,
8052 "Struct tag metadata must have either 3 or 4 operands", I, MD);
8053 }
8054
8055 // Check the access size field.
8056 if (IsNewFormat) {
8057 auto *AccessSizeNode = mdconst::dyn_extract_or_null<ConstantInt>(
8058 MD->getOperand(3));
8059 CheckTBAA(AccessSizeNode, "Access size field must be a constant", I, MD);
8060 }
8061
8062 // Check the immutability flag.
8063 unsigned ImmutabilityFlagOpNo = IsNewFormat ? 4 : 3;
8064 if (MD->getNumOperands() == ImmutabilityFlagOpNo + 1) {
8065 auto *IsImmutableCI = mdconst::dyn_extract_or_null<ConstantInt>(
8066 MD->getOperand(ImmutabilityFlagOpNo));
8067 CheckTBAA(IsImmutableCI,
8068 "Immutability tag on struct tag metadata must be a constant", I,
8069 MD);
8070 CheckTBAA(
8071 IsImmutableCI->isZero() || IsImmutableCI->isOne(),
8072 "Immutability part of the struct tag metadata must be either 0 or 1", I,
8073 MD);
8074 }
8075
8076 CheckTBAA(BaseNode && AccessType,
8077 "Malformed struct tag metadata: base and access-type "
8078 "should be non-null and point to Metadata nodes",
8079 I, MD, BaseNode, AccessType);
8080
8081 if (!IsNewFormat) {
8082 CheckTBAA(isValidScalarTBAANode(AccessType),
8083 "Access type node must be a valid scalar type", I, MD,
8084 AccessType);
8085 }
8086
8088 CheckTBAA(OffsetCI, "Offset must be constant integer", I, MD);
8089
8090 APInt Offset = OffsetCI->getValue();
8091 bool SeenAccessTypeInPath = false;
8092
8093 SmallPtrSet<MDNode *, 4> StructPath;
8094
8095 for (/* empty */; BaseNode && !IsRootTBAANode(BaseNode);
8096 BaseNode =
8097 getFieldNodeFromTBAABaseNode(I, BaseNode, Offset, IsNewFormat)) {
8098 if (!StructPath.insert(BaseNode).second) {
8099 CheckFailed("Cycle detected in struct path", I, MD);
8100 return false;
8101 }
8102
8103 bool Invalid;
8104 unsigned BaseNodeBitWidth;
8105 std::tie(Invalid, BaseNodeBitWidth) =
8106 verifyTBAABaseNode(I, BaseNode, IsNewFormat);
8107
8108 // If the base node is invalid in itself, then we've already printed all the
8109 // errors we wanted to print.
8110 if (Invalid)
8111 return false;
8112
8113 SeenAccessTypeInPath |= BaseNode == AccessType;
8114
8115 if (isValidScalarTBAANode(BaseNode) || BaseNode == AccessType)
8116 CheckTBAA(Offset == 0, "Offset not zero at the point of scalar access", I,
8117 MD, &Offset);
8118
8119 CheckTBAA(BaseNodeBitWidth == Offset.getBitWidth() ||
8120 (BaseNodeBitWidth == 0 && Offset == 0) ||
8121 (IsNewFormat && BaseNodeBitWidth == ~0u),
8122 "Access bit-width not the same as description bit-width", I, MD,
8123 BaseNodeBitWidth, Offset.getBitWidth());
8124
8125 if (IsNewFormat && SeenAccessTypeInPath)
8126 break;
8127 }
8128
8129 CheckTBAA(SeenAccessTypeInPath, "Did not see access type in access path!", I,
8130 MD);
8131 return true;
8132}
8133
8134char VerifierLegacyPass::ID = 0;
8135INITIALIZE_PASS(VerifierLegacyPass, "verify", "Module Verifier", false, false)
8136
8138 return new VerifierLegacyPass(FatalErrors);
8139}
8140
8141AnalysisKey VerifierAnalysis::Key;
8148
8153
8155 auto Res = AM.getResult<VerifierAnalysis>(M);
8156 if (FatalErrors && (Res.IRBroken || Res.DebugInfoBroken))
8157 report_fatal_error("Broken module found, compilation aborted!");
8158
8159 return PreservedAnalyses::all();
8160}
8161
8163 auto res = AM.getResult<VerifierAnalysis>(F);
8164 if (res.IRBroken && FatalErrors)
8165 report_fatal_error("Broken function found, compilation aborted!");
8166
8167 return PreservedAnalyses::all();
8168}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
AMDGPU address space definition.
ArrayRef< TableEntry > TableRef
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Atomic ordering constants.
@ RetAttr
@ FnAttr
This file contains the simple types necessary to represent the attributes associated with functions a...
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
Analysis containing CSE Info
Definition CSEInfo.cpp:27
This file contains the declarations for the subclasses of Constant, which represent the different fla...
This file declares the LLVM IR specialization of the GenericConvergenceVerifier template.
static DISubprogram * getSubprogram(bool IsDistinct, Ts &&...Args)
dxil translate DXIL Translate Metadata
This file defines the DenseMap class.
This file contains constants used for implementing Dwarf debug support.
static bool runOnFunction(Function &F, bool PostInlining)
#define Check(C,...)
Hexagon Common GEP
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
Module.h This file contains the declarations for the Module class.
This header defines various interfaces for pass management in LLVM.
This defines the Use class.
const size_t AbstractManglingParser< Derived, Alloc >::NumOps
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
Machine Check Debug Module
This file implements a map that provides insertion order iteration.
This file provides utility for Memory Model Relaxation Annotations (MMRAs).
static bool isContiguous(const ConstantRange &A, const ConstantRange &B)
This file contains the declarations for metadata subclasses.
#define T
#define T1
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
uint64_t High
uint64_t IntrinsicInst * II
#define P(N)
ppc ctr loops verify
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
Definition PassSupport.h:56
This file contains the declarations for profiling metadata utility functions.
const SmallVectorImpl< MachineOperand > & Cond
static bool isValid(const char C)
Returns true if C is a valid mangled character: <0-9a-zA-Z_>.
static unsigned getNumElements(Type *Ty)
void visit(MachineFunction &MF, MachineBasicBlock &Start, std::function< void(MachineBasicBlock *)> op)
This file contains some templates that are useful if you are working with the STL at all.
verify safepoint Safepoint IR Verifier
BaseType
A given derived pointer can have multiple base pointers through phi/selects.
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file contains some functions that are useful when dealing with strings.
static unsigned getBitWidth(Type *Ty, const DataLayout &DL)
Returns the bitwidth of the given scalar or pointer type.
static bool IsScalarTBAANodeImpl(const MDNode *MD, SmallPtrSetImpl< const MDNode * > &Visited)
static bool isType(const Metadata *MD)
static Instruction * getSuccPad(Instruction *Terminator)
static bool isMDTuple(const Metadata *MD)
static bool isNewFormatTBAATypeNode(llvm::MDNode *Type)
#define CheckDI(C,...)
We know that a debug info condition should be true, if not print an error message.
Definition Verifier.cpp:681
static void forEachUser(const Value *User, SmallPtrSet< const Value *, 32 > &Visited, llvm::function_ref< bool(const Value *)> Callback)
Definition Verifier.cpp:722
static bool isDINode(const Metadata *MD)
static bool isScope(const Metadata *MD)
static cl::opt< bool > VerifyNoAliasScopeDomination("verify-noalias-scope-decl-dom", cl::Hidden, cl::init(false), cl::desc("Ensure that llvm.experimental.noalias.scope.decl for identical " "scopes are not dominating"))
static bool isTypeCongruent(Type *L, Type *R)
Two types are "congruent" if they are identical, or if they are both pointer types with different poi...
#define CheckTBAA(C,...)
static bool isConstantIntMetadataOperand(const Metadata *MD)
static bool IsRootTBAANode(const MDNode *MD)
static Value * getParentPad(Value *EHPad)
static bool hasConflictingReferenceFlags(unsigned Flags)
Detect mutually exclusive flags.
static AttrBuilder getParameterABIAttributes(LLVMContext &C, unsigned I, AttributeList Attrs)
static const char PassName[]
bool isFiniteNonZero() const
Definition APFloat.h:1441
bool isNegative() const
Definition APFloat.h:1431
const fltSemantics & getSemantics() const
Definition APFloat.h:1439
Class for arbitrary precision integers.
Definition APInt.h:78
bool sgt(const APInt &RHS) const
Signed greater than comparison.
Definition APInt.h:1202
bool isMinValue() const
Determine if this is the smallest unsigned value.
Definition APInt.h:418
bool ule(const APInt &RHS) const
Unsigned less or equal comparison.
Definition APInt.h:1151
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
Definition APInt.h:441
int64_t getSExtValue() const
Get sign extended value.
Definition APInt.h:1563
bool isMaxValue() const
Determine if this is the largest unsigned value.
Definition APInt.h:400
This class represents a conversion between pointers from one address space to another.
bool isSwiftError() const
Return true if this alloca is used as a swifterror argument to a call.
LLVM_ABI bool isStaticAlloca() const
Return true if this alloca is in the entry block of the function and is a constant size.
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
unsigned getAddressSpace() const
Return the address space for the allocation.
LLVM_ABI bool isArrayAllocation() const
Return true if there is an allocation size parameter to the allocation instruction that is not 1.
const Value * getArraySize() const
Get the number of elements allocated.
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
void setPreservesAll()
Set by analyses that do not transform their input at all.
LLVM_ABI bool hasInRegAttr() const
Return true if this argument has the inreg attribute.
Definition Function.cpp:293
bool empty() const
empty - Check if the array is empty.
Definition ArrayRef.h:137
static bool isFPOperation(BinOp Op)
BinOp getOperation() const
static LLVM_ABI StringRef getOperationName(BinOp Op)
AtomicOrdering getOrdering() const
Returns the ordering constraint of this rmw instruction.
bool contains(Attribute::AttrKind A) const
Return true if the builder has the specified attribute.
LLVM_ABI bool hasAttribute(Attribute::AttrKind Kind) const
Return true if the attribute exists in this set.
LLVM_ABI std::string getAsString(bool InAttrGrp=false) const
Functions, function parameters, and return types can have attributes to indicate how they should be t...
Definition Attributes.h:69
LLVM_ABI const ConstantRange & getValueAsConstantRange() const
Return the attribute's value as a ConstantRange.
LLVM_ABI StringRef getValueAsString() const
Return the attribute's value as a string.
AttrKind
This enumeration lists the attributes that can be associated with parameters, function results,...
Definition Attributes.h:88
bool isValid() const
Return true if the attribute is any kind of attribute.
Definition Attributes.h:223
LLVM Basic Block Representation.
Definition BasicBlock.h:62
iterator begin()
Instruction iterator methods.
Definition BasicBlock.h:459
iterator_range< const_phi_iterator > phis() const
Returns a range that iterates over the phis in the basic block.
Definition BasicBlock.h:528
const Function * getParent() const
Return the enclosing method, or null if none.
Definition BasicBlock.h:213
LLVM_ABI InstListType::const_iterator getFirstNonPHIIt() const
Returns an iterator to the first instruction in this block that is not a PHINode instruction.
LLVM_ABI bool isEntryBlock() const
Return true if this is the entry block of the containing function.
const Instruction & front() const
Definition BasicBlock.h:482
LLVM_ABI const BasicBlock * getUniquePredecessor() const
Return the predecessor of this block if it has a unique predecessor block.
InstListType::iterator iterator
Instruction iterators...
Definition BasicBlock.h:170
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition BasicBlock.h:233
This class represents a no-op cast from one type to another.
static LLVM_ABI BlockAddress * lookup(const BasicBlock *BB)
Lookup an existing BlockAddress constant for the given BasicBlock.
bool isConditional() const
Value * getCondition() const
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
bool isInlineAsm() const
Check if this call is an inline asm statement.
bool hasInAllocaArgument() const
Determine if there are is an inalloca argument.
OperandBundleUse getOperandBundleAt(unsigned Index) const
Return the operand bundle at a specific index.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
bool doesNotAccessMemory(unsigned OpNo) const
bool hasFnAttr(Attribute::AttrKind Kind) const
Determine whether this call has the given attribute.
unsigned getNumOperandBundles() const
Return the number of operand bundles associated with this User.
CallingConv::ID getCallingConv() const
LLVM_ABI bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Determine whether the argument or parameter has the given attribute.
Attribute getParamAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Get the attribute of a given kind from a given arg.
iterator_range< bundle_op_iterator > bundle_op_infos()
Return the range [bundle_op_info_begin, bundle_op_info_end).
unsigned countOperandBundlesOfType(StringRef Name) const
Return the number of operand bundles with the tag Name attached to this instruction.
bool onlyReadsMemory(unsigned OpNo) const
Value * getCalledOperand() const
Type * getParamElementType(unsigned ArgNo) const
Extract the elementtype type for a parameter.
Value * getArgOperand(unsigned i) const
FunctionType * getFunctionType() const
LLVM_ABI Intrinsic::ID getIntrinsicID() const
Returns the intrinsic ID of the intrinsic called or Intrinsic::not_intrinsic if the called function i...
iterator_range< User::op_iterator > args()
Iteration adapter for range-for loops.
bool doesNotReturn() const
Determine if the call cannot return.
LLVM_ABI bool onlyAccessesArgMemory() const
Determine if the call can access memmory only using pointers based on its arguments.
unsigned arg_size() const
AttributeList getAttributes() const
Return the attributes for this call.
bool hasOperandBundles() const
Return true if this User has any operand bundles.
LLVM_ABI Function * getCaller()
Helper to get the caller (the parent function).
BasicBlock * getIndirectDest(unsigned i) const
unsigned getNumIndirectDests() const
Return the number of callbr indirect dest labels.
bool isMustTailCall() const
static LLVM_ABI bool castIsValid(Instruction::CastOps op, Type *SrcTy, Type *DstTy)
This method can be used to determine if a cast from SrcTy to DstTy using Opcode op is valid or not.
unsigned getNumHandlers() const
return the number of 'handlers' in this catchswitch instruction, except the default handler
Value * getParentPad() const
BasicBlock * getUnwindDest() const
handler_range handlers()
iteration adapter for range-for loops.
BasicBlock * getUnwindDest() const
bool isFPPredicate() const
Definition InstrTypes.h:782
bool isIntPredicate() const
Definition InstrTypes.h:783
static bool isIntPredicate(Predicate P)
Definition InstrTypes.h:776
bool isMinusOne() const
This function will return true iff every bit in this constant is set to true.
Definition Constants.h:234
bool isNegative() const
Definition Constants.h:217
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
Definition Constants.h:222
unsigned getBitWidth() const
getBitWidth - Return the scalar bitwidth of this constant.
Definition Constants.h:165
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
Definition Constants.h:171
const APInt & getValue() const
Return the constant as an APInt value reference.
Definition Constants.h:162
Constant * getAddrDiscriminator() const
The address discriminator if any, or the null constant.
Definition Constants.h:1081
Constant * getPointer() const
The pointer that is signed in this ptrauth signed pointer.
Definition Constants.h:1068
ConstantInt * getKey() const
The Key ID, an i32 constant.
Definition Constants.h:1071
Constant * getDeactivationSymbol() const
Definition Constants.h:1090
ConstantInt * getDiscriminator() const
The integer discriminator, an i64 constant, or 0.
Definition Constants.h:1074
static LLVM_ABI bool isOrderedRanges(ArrayRef< ConstantRange > RangesRef)
This class represents a range of values.
const APInt & getLower() const
Return the lower value for this range.
const APInt & getUpper() const
Return the upper value for this range.
LLVM_ABI bool contains(const APInt &Val) const
Return true if the specified value is in the set.
uint32_t getBitWidth() const
Get the bit width of this ConstantRange.
static LLVM_ABI ConstantTokenNone * get(LLVMContext &Context)
Return the ConstantTokenNone.
LLVM_ABI bool isNullValue() const
Return true if this is the value that would be returned by getNullValue.
Definition Constants.cpp:90
LLVM_ABI std::optional< fp::ExceptionBehavior > getExceptionBehavior() const
LLVM_ABI std::optional< RoundingMode > getRoundingMode() const
LLVM_ABI unsigned getNonMetadataArgCount() const
DbgVariableFragmentInfo FragmentInfo
@ FixedPointBinary
Scale factor 2^Factor.
@ FixedPointDecimal
Scale factor 10^Factor.
@ FixedPointRational
Arbitrary rational scale factor.
DIGlobalVariable * getVariable() const
LLVM_ABI DISubprogram * getSubprogram() const
Get the subprogram for this scope.
DILocalScope * getScope() const
Get the local scope for this variable.
Metadata * getRawScope() const
Base class for scope-like contexts.
Subprogram description. Uses SubclassData1.
static const DIScope * getRawRetainedNodeScope(const MDNode *N)
Base class for template parameters.
Base class for variables.
Metadata * getRawType() const
Metadata * getRawScope() const
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:64
Records a position in IR for a source label (DILabel).
Base class for non-instruction debug metadata records that have positions within IR.
LLVM_ABI void print(raw_ostream &O, bool IsForDebug=false) const
DebugLoc getDebugLoc() const
LLVM_ABI const BasicBlock * getParent() const
LLVM_ABI Function * getFunction()
Record of a variable value-assignment, aka a non instruction representation of the dbg....
LLVM_ABI Value * getVariableLocationOp(unsigned OpIdx) const
DIExpression * getExpression() const
DILocalVariable * getVariable() const
Metadata * getRawLocation() const
Returns the metadata operand for the first location description.
@ End
Marks the end of the concrete types.
@ Any
To indicate all LocationTypes in searches.
DIExpression * getAddressExpression() const
MDNode * getAsMDNode() const
Return this as a bar MDNode.
Definition DebugLoc.h:290
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
Definition DenseMap.h:205
iterator find(const_arg_type_t< KeyT > Val)
Definition DenseMap.h:178
bool empty() const
Definition DenseMap.h:109
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition DenseMap.h:241
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition Dominators.h:164
This instruction extracts a single (scalar) element from a VectorType value.
static LLVM_ABI bool isValidOperands(const Value *Vec, const Value *Idx)
Return true if an extractelement instruction can be formed with the specified operands.
ArrayRef< unsigned > getIndices() const
static LLVM_ABI Type * getIndexedType(Type *Agg, ArrayRef< unsigned > Idxs)
Returns the type of the element that would be extracted with an extractvalue instruction with the spe...
This instruction compares its operands according to the predicate given to the constructor.
This class represents an extension of floating point types.
This class represents a cast from floating point to signed integer.
This class represents a cast from floating point to unsigned integer.
This class represents a truncation of floating point types.
AtomicOrdering getOrdering() const
Returns the ordering constraint of this fence instruction.
Value * getParentPad() const
Convenience accessors.
FunctionPass class - This class is used to implement most global optimizations.
Definition Pass.h:314
Type * getReturnType() const
FunctionType * getFunctionType() const
Returns the FunctionType for me.
Definition Function.h:209
Intrinsic::ID getIntrinsicID() const LLVM_READONLY
getIntrinsicID - This method returns the ID number of the specified function, or Intrinsic::not_intri...
Definition Function.h:244
DISubprogram * getSubprogram() const
Get the attached subprogram.
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Definition Function.h:270
bool hasPersonalityFn() const
Check whether this function has a personality function.
Definition Function.h:903
const Function & getFunction() const
Definition Function.h:164
const std::string & getGC() const
Definition Function.cpp:834
Type * getReturnType() const
Returns the type of the ret val.
Definition Function.h:214
bool isVarArg() const
isVarArg - Return true if this function takes a variable number of arguments.
Definition Function.h:227
LLVM_ABI Value * getBasePtr() const
LLVM_ABI Value * getDerivedPtr() const
static LLVM_ABI Type * getIndexedType(Type *Ty, ArrayRef< Value * > IdxList)
Returns the result type of a getelementptr with the given source element type and indexes.
static bool isValidLinkage(LinkageTypes L)
Definition GlobalAlias.h:98
const Constant * getAliasee() const
Definition GlobalAlias.h:87
LLVM_ABI const Function * getResolverFunction() const
Definition Globals.cpp:665
static bool isValidLinkage(LinkageTypes L)
Definition GlobalIFunc.h:86
const Constant * getResolver() const
Definition GlobalIFunc.h:73
LLVM_ABI void getAllMetadata(SmallVectorImpl< std::pair< unsigned, MDNode * > > &MDs) const
Appends all metadata attached to this value to MDs, sorting by KindID.
bool hasComdat() const
MDNode * getMetadata(unsigned KindID) const
Get the current metadata attachments for the given kind, if any.
Definition Value.h:576
bool hasExternalLinkage() const
bool isDSOLocal() const
bool isImplicitDSOLocal() const
LLVM_ABI bool isDeclaration() const
Return true if the primary definition of this global value is outside of the current translation unit...
Definition Globals.cpp:328
bool hasValidDeclarationLinkage() const
LinkageTypes getLinkage() const
bool hasDefaultVisibility() const
bool hasPrivateLinkage() const
bool hasHiddenVisibility() const
bool hasExternalWeakLinkage() const
bool hasDLLImportStorageClass() const
bool hasDLLExportStorageClass() const
bool isDeclarationForLinker() const
unsigned getAddressSpace() const
Module * getParent()
Get the module that this global value is contained inside of...
PointerType * getType() const
Global values are always pointers.
LLVM_ABI bool isInterposable() const
Return true if this global's definition can be substituted with an arbitrary definition at link time ...
Definition Globals.cpp:107
bool hasComdat() const
bool hasCommonLinkage() const
bool hasGlobalUnnamedAddr() const
bool hasAppendingLinkage() const
bool hasAvailableExternallyLinkage() const
Type * getValueType() const
const Constant * getInitializer() const
getInitializer - Return the initializer for this global variable.
bool hasInitializer() const
Definitions have initializers, declarations don't.
MaybeAlign getAlign() const
Returns the alignment of the given variable.
bool isConstant() const
If the value is a global constant, its value is immutable throughout the runtime execution of the pro...
bool hasDefinitiveInitializer() const
hasDefinitiveInitializer - Whether the global variable has an initializer, and any other instances of...
This instruction compares its operands according to the predicate given to the constructor.
BasicBlock * getDestination(unsigned i)
Return the specified destination.
unsigned getNumDestinations() const
return the number of possible destinations in this indirectbr instruction.
unsigned getNumSuccessors() const
This instruction inserts a single (scalar) element into a VectorType value.
static LLVM_ABI bool isValidOperands(const Value *Vec, const Value *NewElt, const Value *Idx)
Return true if an insertelement instruction can be formed with the specified operands.
ArrayRef< unsigned > getIndices() const
Base class for instruction visitors.
Definition InstVisitor.h:78
void visit(Iterator Start, Iterator End)
Definition InstVisitor.h:87
LLVM_ABI unsigned getNumSuccessors() const LLVM_READONLY
Return the number of successors that this instruction has.
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
LLVM_ABI bool isAtomic() const LLVM_READONLY
Return true if this instruction has an AtomicOrdering of unordered or higher.
LLVM_ABI const Function * getFunction() const
Return the function this instruction belongs to.
This class represents a cast from an integer to a pointer.
static LLVM_ABI bool mayLowerToFunctionCall(Intrinsic::ID IID)
Check if the intrinsic might lower into a regular function call in the course of IR transformations.
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
bool isCleanup() const
Return 'true' if this landingpad instruction is a cleanup.
unsigned getNumClauses() const
Get the number of clauses for this landing pad.
bool isCatch(unsigned Idx) const
Return 'true' if the clause and index Idx is a catch clause.
bool isFilter(unsigned Idx) const
Return 'true' if the clause and index Idx is a filter clause.
Constant * getClause(unsigned Idx) const
Get the value of the clause at index Idx.
AtomicOrdering getOrdering() const
Returns the ordering constraint of this load instruction.
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this load instruction.
Align getAlign() const
Return the alignment of the access that is being performed.
Metadata node.
Definition Metadata.h:1078
const MDOperand & getOperand(unsigned I) const
Definition Metadata.h:1442
bool isTemporary() const
Definition Metadata.h:1262
ArrayRef< MDOperand > operands() const
Definition Metadata.h:1440
unsigned getNumOperands() const
Return number of MDNode operands.
Definition Metadata.h:1448
bool isDistinct() const
Definition Metadata.h:1261
bool isResolved() const
Check if node is fully resolved.
Definition Metadata.h:1258
LLVMContext & getContext() const
Definition Metadata.h:1242
bool equalsStr(StringRef Str) const
Definition Metadata.h:922
Metadata * get() const
Definition Metadata.h:929
LLVM_ABI StringRef getString() const
Definition Metadata.cpp:618
static LLVM_ABI bool isTagMD(const Metadata *MD)
This class implements a map that also provides access to all stored values in a deterministic order.
Definition MapVector.h:36
static LLVM_ABI MetadataAsValue * getIfExists(LLVMContext &Context, Metadata *MD)
Definition Metadata.cpp:112
Metadata * getMetadata() const
Definition Metadata.h:201
Root of the metadata hierarchy.
Definition Metadata.h:64
LLVM_ABI void print(raw_ostream &OS, const Module *M=nullptr, bool IsForDebug=false) const
Print.
unsigned getMetadataID() const
Definition Metadata.h:104
Manage lifetime of a slot tracker for printing IR.
A Module instance is used to store all the information related to an LLVM module.
Definition Module.h:67
LLVM_ABI StringRef getName() const
LLVM_ABI void print(raw_ostream &ROS, bool IsForDebug=false) const
LLVM_ABI unsigned getNumOperands() const
iterator_range< op_iterator > operands()
Definition Metadata.h:1853
op_range incoming_values()
static LLVM_ABI PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
A set of analyses that are preserved following a run of a transformation pass.
Definition Analysis.h:112
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition Analysis.h:118
This class represents a cast from a pointer to an address (non-capturing ptrtoint).
This class represents a cast from a pointer to an integer.
Value * getValue() const
Convenience accessor.
Value * getReturnValue() const
Convenience accessor. Returns null if there is no return value.
This class represents a sign extension of integer types.
This class represents a cast from signed integer to floating point.
static LLVM_ABI const char * areInvalidOperands(Value *Cond, Value *True, Value *False)
Return a string if the specified operands are invalid for a select operation, otherwise return null.
This instruction constructs a fixed permutation of two input vectors.
static LLVM_ABI bool isValidOperands(const Value *V1, const Value *V2, const Value *Mask)
Return true if a shufflevector instruction can be formed with the specified operands.
static LLVM_ABI void getShuffleMask(const Constant *Mask, SmallVectorImpl< int > &Result)
Convert the input shuffle mask operand to a vector of integers.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
void insert_range(Range &&R)
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
void reserve(size_type N)
iterator insert(iterator I, T &&Elt)
void resize(size_type N)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
std::pair< StringRef, StringRef > split(char Separator) const
Split into two substrings around the first occurrence of a separator character.
Definition StringRef.h:712
static constexpr size_t npos
Definition StringRef.h:57
bool getAsInteger(unsigned Radix, T &Result) const
Parse the current string as an integer of the specified radix.
Definition StringRef.h:472
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
Definition StringRef.h:261
constexpr bool empty() const
empty - Check if the string is empty.
Definition StringRef.h:143
unsigned getNumElements() const
Random access to the elements.
LLVM_ABI Type * getTypeAtIndex(const Value *V) const
Given an index value into the type, return the type of the element.
Definition Type.cpp:718
LLVM_ABI bool isScalableTy(SmallPtrSetImpl< const Type * > &Visited) const
Returns true if this struct contains a scalable vector.
Definition Type.cpp:440
Verify that the TBAA Metadatas are valid.
Definition Verifier.h:40
LLVM_ABI bool visitTBAAMetadata(const Instruction *I, const MDNode *MD)
Visit an instruction, or a TBAA node itself as part of a metadata, and return true if it is valid,...
unsigned size() const
Triple - Helper class for working with autoconf configuration names.
Definition Triple.h:47
This class represents a truncation of integer types.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition Twine.h:82
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
bool isVectorTy() const
True if this is an instance of VectorType.
Definition Type.h:273
LLVM_ABI bool containsNonGlobalTargetExtType(SmallPtrSetImpl< const Type * > &Visited) const
Return true if this type is or contains a target extension type that disallows being used as a global...
Definition Type.cpp:74
bool isArrayTy() const
True if this is an instance of ArrayType.
Definition Type.h:264
LLVM_ABI bool containsNonLocalTargetExtType(SmallPtrSetImpl< const Type * > &Visited) const
Return true if this type is or contains a target extension type that disallows being used as a local.
Definition Type.cpp:90
LLVM_ABI bool isScalableTy(SmallPtrSetImpl< const Type * > &Visited) const
Return true if this is a type whose size is a known multiple of vscale.
Definition Type.cpp:61
bool isLabelTy() const
Return true if this is 'label'.
Definition Type.h:228
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
Definition Type.h:246
bool isPointerTy() const
True if this is an instance of PointerType.
Definition Type.h:267
bool isTokenLikeTy() const
Returns true if this is 'token' or a token-like target type.s.
Definition Type.cpp:1065
LLVM_ABI unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
bool isSingleValueType() const
Return true if the type is a valid type for a register in codegen.
Definition Type.h:296
LLVM_ABI bool canLosslesslyBitCastTo(Type *Ty) const
Return true if this type could be converted with a lossless BitCast to type 'Ty'.
Definition Type.cpp:153
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
Definition Type.h:352
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
Definition Type.h:311
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
Definition Type.cpp:230
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
Definition Type.h:184
bool isPtrOrPtrVectorTy() const
Return true if this is a pointer type or a vector of pointer types.
Definition Type.h:270
bool isIntOrPtrTy() const
Return true if this is an integer type or a pointer type.
Definition Type.h:255
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition Type.h:240
bool isFPOrFPVectorTy() const
Return true if this is a FP type or a vector of FP.
Definition Type.h:225
bool isVoidTy() const
Return true if this is 'void'.
Definition Type.h:139
bool isMetadataTy() const
Return true if this is 'metadata'.
Definition Type.h:231
This class represents a cast unsigned integer to floating point.
op_range operands()
Definition User.h:293
Value * getOperand(unsigned i) const
Definition User.h:233
unsigned getNumOperands() const
Definition User.h:255
This class represents the va_arg llvm instruction, which returns an argument of the specified type gi...
Value * getValue() const
Definition Metadata.h:498
LLVM Value Representation.
Definition Value.h:75
iterator_range< user_iterator > materialized_users()
Definition Value.h:420
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
LLVM_ABI const Value * stripInBoundsOffsets(function_ref< void(const Value *)> Func=[](const Value *) {}) const
Strip off pointer casts and inbounds GEPs.
Definition Value.cpp:819
iterator_range< user_iterator > users()
Definition Value.h:426
bool materialized_use_empty() const
Definition Value.h:351
LLVM_ABI const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
Definition Value.cpp:708
LLVM_ABI LLVMContext & getContext() const
All values hold a context through their type.
Definition Value.cpp:1106
bool hasName() const
Definition Value.h:262
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition Value.cpp:322
Check a module for errors, and report separate error states for IR and debug info errors.
Definition Verifier.h:109
LLVM_ABI Result run(Module &M, ModuleAnalysisManager &)
LLVM_ABI PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM)
This class represents zero extension of integer types.
constexpr bool isNonZero() const
Definition TypeSize.h:155
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition TypeSize.h:168
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
Definition TypeSize.h:165
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
Definition ilist_node.h:34
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
Definition ilist_node.h:348
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition raw_ostream.h:53
CallInst * Call
This file contains the declaration of the Comdat class, which represents a single COMDAT in LLVM.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ FLAT_ADDRESS
Address space for flat memory.
@ GLOBAL_ADDRESS
Address space for global memory (RAT0, VTX0).
@ PRIVATE_ADDRESS
Address space for private memory.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
constexpr char Attrs[]
Key for Kernel::Metadata::mAttrs.
bool isFlatGlobalAddrSpace(unsigned AS)
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ Entry
Definition COFF.h:862
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
@ BasicBlock
Various leaf nodes.
Definition ISDOpcodes.h:81
LLVM_ABI MatchIntrinsicTypesResult matchIntrinsicSignature(FunctionType *FTy, ArrayRef< IITDescriptor > &Infos, SmallVectorImpl< Type * > &ArgTys)
Match the specified function type with the type constraints specified by the .td file.
LLVM_ABI void getIntrinsicInfoTableEntries(ID id, SmallVectorImpl< IITDescriptor > &T)
Return the IIT table descriptor for the specified intrinsic into an array of IITDescriptors.
@ MatchIntrinsicTypes_NoMatchRet
Definition Intrinsics.h:260
@ MatchIntrinsicTypes_NoMatchArg
Definition Intrinsics.h:261
LLVM_ABI bool hasConstrainedFPRoundingModeOperand(ID QID)
Returns true if the intrinsic ID is for one of the "ConstrainedFloating-Point Intrinsics" that take r...
LLVM_ABI StringRef getName(ID id)
Return the LLVM name for an intrinsic, such as "llvm.ppc.altivec.lvx".
static const int NoAliasScopeDeclScopeArg
Definition Intrinsics.h:41
LLVM_ABI bool matchIntrinsicVarArg(bool isVarArg, ArrayRef< IITDescriptor > &Infos)
Verify if the intrinsic has variable arguments.
std::variant< std::monostate, Loc::Single, Loc::Multi, Loc::MMI, Loc::EntryValue > Variant
Alias for the std::variant specialization base class of DbgVariable.
Definition DwarfDebug.h:189
Flag
These should be considered private to the implementation of the MCInstrDesc class.
@ System
Synchronized with respect to all concurrently executing threads.
Definition LLVMContext.h:58
LLVM_ABI std::optional< VFInfo > tryDemangleForVFABI(StringRef MangledName, const FunctionType *FTy)
Function to construct a VFInfo out of a mangled names in the following format:
@ CE
Windows NT (Windows on ARM)
Definition MCAsmInfo.h:48
LLVM_ABI AssignmentInstRange getAssignmentInsts(DIAssignID *ID)
Return a range of instructions (typically just one) that have ID as an attachment.
initializer< Ty > init(const Ty &Val)
@ DW_MACINFO_undef
Definition Dwarf.h:818
@ DW_MACINFO_start_file
Definition Dwarf.h:819
@ DW_MACINFO_define
Definition Dwarf.h:817
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > dyn_extract_or_null(Y &&MD)
Extract a Value from Metadata, if any, allowing null.
Definition Metadata.h:708
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > dyn_extract(Y &&MD)
Extract a Value from Metadata, if any.
Definition Metadata.h:695
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > extract(Y &&MD)
Extract a Value from Metadata.
Definition Metadata.h:667
@ User
could "use" a pointer
NodeAddr< UseNode * > Use
Definition RDFGraph.h:385
NodeAddr< NodeBase * > Node
Definition RDFGraph.h:381
friend class Instruction
Iterator for Instructions in a `BasicBlock.
Definition BasicBlock.h:73
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition STLExtras.h:316
@ Low
Lower the current thread's priority such that it does not affect foreground tasks significantly.
Definition Threading.h:280
@ Offset
Definition DWP.cpp:532
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1737
LLVM_ABI bool canInstructionHaveMMRAs(const Instruction &I)
detail::zippy< detail::zip_first, T, U, Args... > zip_equal(T &&t, U &&u, Args &&...args)
zip iterator that assumes that all iteratees have the same length.
Definition STLExtras.h:839
LLVM_ABI unsigned getBranchWeightOffset(const MDNode *ProfileData)
Return the offset to the first branch weight data.
constexpr bool isInt(int64_t x)
Checks if an integer fits into the given bit width.
Definition MathExtras.h:165
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
Definition STLExtras.h:2530
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
LLVM_ABI bool verifyFunction(const Function &F, raw_ostream *OS=nullptr)
Check a function for errors, useful for use when debugging a pass.
AllocFnKind
Definition Attributes.h:51
testing::Matcher< const detail::ErrorHolder & > Failed()
Definition Error.h:198
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition STLExtras.h:2184
LLVM_ABI DenseMap< BasicBlock *, ColorVector > colorEHFunclets(Function &F)
If an EH funclet personality is in use (see isFuncletEHPersonality), this will recompute which blocks...
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
Definition MathExtras.h:284
bool isa_and_nonnull(const Y &Val)
Definition Casting.h:676
Op::Description Desc
bool isScopedEHPersonality(EHPersonality Pers)
Returns true if this personality uses scope-style EH IR instructions: catchswitch,...
auto dyn_cast_or_null(const Y &Val)
Definition Casting.h:753
GenericConvergenceVerifier< SSAContext > ConvergenceVerifier
LLVM_ABI void initializeVerifierLegacyPassPass(PassRegistry &)
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition MathExtras.h:279
bool isModSet(const ModRefInfo MRI)
Definition ModRef.h:49
void sort(IteratorTy Start, IteratorTy End)
Definition STLExtras.h:1634
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition Error.cpp:167
FunctionAddr VTableAddr Count
Definition InstrProf.h:139
LLVM_ABI EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
iterator_range< SplittingIterator > split(StringRef Str, StringRef Separator)
Split the specified string over a separator and return a range-compatible iterable over its partition...
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
LLVM_ABI bool isValueProfileMD(const MDNode *ProfileData)
Checks if an MDNode contains value profiling Metadata.
LLVM_ABI raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
LLVM_ABI unsigned getNumBranchWeights(const MDNode &ProfileData)
AtomicOrdering
Atomic ordering for LLVM's memory model.
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
Definition ModRef.h:74
LLVM_ABI FunctionPass * createVerifierPass(bool FatalErrors=true)
FunctionAddr VTableAddr Next
Definition InstrProf.h:141
DWARFExpression::Operation Op
ArrayRef(const T &OneElt) -> ArrayRef< T >
constexpr unsigned BitWidth
TinyPtrVector< BasicBlock * > ColorVector
LLVM_ABI const char * LLVMLoopEstimatedTripCount
Profile-based loop metadata that should be accessed only by using llvm::getLoopEstimatedTripCount and...
DenormalMode parseDenormalFPAttribute(StringRef Str)
Returns the denormal mode to use for inputs and outputs.
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
LLVM_ABI std::optional< RoundingMode > convertStrToRoundingMode(StringRef)
Returns a valid RoundingMode enumerator when given a string that is valid as input in constrained int...
Definition FPEnv.cpp:25
LLVM_ABI std::unique_ptr< GCStrategy > getGCStrategy(const StringRef Name)
Lookup the GCStrategy object associated with the given gc name.
auto predecessors(const MachineBasicBlock *BB)
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition STLExtras.h:1945
bool pred_empty(const BasicBlock *BB)
Definition CFG.h:119
bool isHexDigit(char C)
Checks if character C is a hexadecimal numeric character.
AnalysisManager< Function > FunctionAnalysisManager
Convenience typedef for the Function analysis manager.
constexpr bool isCallableCC(CallingConv::ID CC)
LLVM_ABI bool verifyModule(const Module &M, raw_ostream *OS=nullptr, bool *BrokenDebugInfo=nullptr)
Check a module for errors.
AnalysisManager< Module > ModuleAnalysisManager
Convenience typedef for the Module analysis manager.
Definition MIRParser.h:39
#define N
constexpr uint64_t value() const
This is a hole in the type system and should not be abused.
Definition Alignment.h:77
A special type used by analysis passes to provide an address that identifies that particular analysis...
Definition Analysis.h:29
static LLVM_ABI const char * SyntheticFunctionEntryCount
static LLVM_ABI const char * UnknownBranchWeightsMarker
static LLVM_ABI const char * ValueProfile
static LLVM_ABI const char * FunctionEntryCount
static LLVM_ABI const char * BranchWeights
uint32_t getTagID() const
Return the tag of this operand bundle as an integer.
ArrayRef< Use > Inputs
void DebugInfoCheckFailed(const Twine &Message)
A debug info check failed.
Definition Verifier.cpp:305
VerifierSupport(raw_ostream *OS, const Module &M)
Definition Verifier.cpp:154
bool Broken
Track the brokenness of the module while recursively visiting.
Definition Verifier.cpp:148
void CheckFailed(const Twine &Message, const T1 &V1, const Ts &... Vs)
A check failed (with values to print).
Definition Verifier.cpp:298
bool BrokenDebugInfo
Broken debug info can be "recovered" from by stripping the debug info.
Definition Verifier.cpp:150
LLVMContext & Context
Definition Verifier.cpp:145
bool TreatBrokenDebugInfoAsError
Whether to treat broken debug info as an error.
Definition Verifier.cpp:152
void CheckFailed(const Twine &Message)
A check failed, so printout out the condition and the message.
Definition Verifier.cpp:287
const Module & M
Definition Verifier.cpp:141
const DataLayout & DL
Definition Verifier.cpp:144
void DebugInfoCheckFailed(const Twine &Message, const T1 &V1, const Ts &... Vs)
A debug info check failed (with values to print).
Definition Verifier.cpp:314
const Triple & TT
Definition Verifier.cpp:143
ModuleSlotTracker MST
Definition Verifier.cpp:142