LLVM 23.0.0git
Verifier.cpp
Go to the documentation of this file.
1//===-- Verifier.cpp - Implement the Module Verifier -----------------------==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the function verifier interface, that can be used for some
10// basic correctness checking of input to the system.
11//
12// Note that this does not provide full `Java style' security and verifications,
13// instead it just tries to ensure that code is well-formed.
14//
15// * Both of a binary operator's parameters are of the same type
16// * Verify that the indices of mem access instructions match other operands
17// * Verify that arithmetic and other things are only performed on first-class
18// types. Verify that shifts & logicals only happen on integrals f.e.
19// * All of the constants in a switch statement are of the correct type
20// * The code is in valid SSA form
21// * It should be illegal to put a label into any other type (like a structure)
22// or to return one. [except constant arrays!]
23// * Only phi nodes can be self referential: 'add i32 %0, %0 ; <int>:0' is bad
24// * PHI nodes must have an entry for each predecessor, with no extras.
25// * PHI nodes must be the first thing in a basic block, all grouped together
26// * All basic blocks should only end with terminator insts, not contain them
27// * The entry node to a function must not have predecessors
28// * All Instructions must be embedded into a basic block
29// * Functions cannot take a void-typed parameter
30// * Verify that a function's argument list agrees with it's declared type.
31// * It is illegal to specify a name for a void value.
32// * It is illegal to have a internal global value with no initializer
33// * It is illegal to have a ret instruction that returns a value that does not
34// agree with the function return value type.
35// * Function call argument types match the function prototype
36// * A landing pad is defined by a landingpad instruction, and can be jumped to
37// only by the unwind edge of an invoke instruction.
38// * A landingpad instruction must be the first non-PHI instruction in the
39// block.
40// * Landingpad instructions must be in a function with a personality function.
41// * Convergence control intrinsics are introduced in ConvergentOperations.rst.
42// The applied restrictions are too numerous to list here.
43// * The convergence entry intrinsic and the loop heart must be the first
44// non-PHI instruction in their respective block. This does not conflict with
45// the landing pads, since these two kinds cannot occur in the same block.
46// * All other things that are tested by asserts spread about the code...
47//
48//===----------------------------------------------------------------------===//
49
50#include "llvm/IR/Verifier.h"
51#include "llvm/ADT/APFloat.h"
52#include "llvm/ADT/APInt.h"
53#include "llvm/ADT/ArrayRef.h"
54#include "llvm/ADT/DenseMap.h"
55#include "llvm/ADT/MapVector.h"
56#include "llvm/ADT/STLExtras.h"
60#include "llvm/ADT/StringRef.h"
61#include "llvm/ADT/Twine.h"
63#include "llvm/IR/Argument.h"
65#include "llvm/IR/Attributes.h"
66#include "llvm/IR/BasicBlock.h"
67#include "llvm/IR/CFG.h"
68#include "llvm/IR/CallingConv.h"
69#include "llvm/IR/Comdat.h"
70#include "llvm/IR/Constant.h"
73#include "llvm/IR/Constants.h"
75#include "llvm/IR/DataLayout.h"
76#include "llvm/IR/DebugInfo.h"
78#include "llvm/IR/DebugLoc.h"
80#include "llvm/IR/Dominators.h"
82#include "llvm/IR/FPEnv.h"
83#include "llvm/IR/Function.h"
84#include "llvm/IR/GCStrategy.h"
86#include "llvm/IR/GlobalAlias.h"
87#include "llvm/IR/GlobalValue.h"
89#include "llvm/IR/InlineAsm.h"
90#include "llvm/IR/InstVisitor.h"
91#include "llvm/IR/InstrTypes.h"
92#include "llvm/IR/Instruction.h"
95#include "llvm/IR/Intrinsics.h"
96#include "llvm/IR/IntrinsicsAArch64.h"
97#include "llvm/IR/IntrinsicsAMDGPU.h"
98#include "llvm/IR/IntrinsicsARM.h"
99#include "llvm/IR/IntrinsicsNVPTX.h"
100#include "llvm/IR/IntrinsicsWebAssembly.h"
101#include "llvm/IR/LLVMContext.h"
103#include "llvm/IR/Metadata.h"
104#include "llvm/IR/Module.h"
106#include "llvm/IR/PassManager.h"
108#include "llvm/IR/Statepoint.h"
109#include "llvm/IR/Type.h"
110#include "llvm/IR/Use.h"
111#include "llvm/IR/User.h"
113#include "llvm/IR/Value.h"
115#include "llvm/Pass.h"
119#include "llvm/Support/Casting.h"
123#include "llvm/Support/ModRef.h"
126#include <algorithm>
127#include <cassert>
128#include <cstdint>
129#include <memory>
130#include <optional>
131#include <string>
132#include <utility>
133
134using namespace llvm;
135
137 "verify-noalias-scope-decl-dom", cl::Hidden, cl::init(false),
138 cl::desc("Ensure that llvm.experimental.noalias.scope.decl for identical "
139 "scopes are not dominating"));
140
143 const Module &M;
145 const Triple &TT;
148
149 /// Track the brokenness of the module while recursively visiting.
150 bool Broken = false;
151 /// Broken debug info can be "recovered" from by stripping the debug info.
152 bool BrokenDebugInfo = false;
153 /// Whether to treat broken debug info as an error.
155
157 : OS(OS), M(M), MST(&M), TT(M.getTargetTriple()), DL(M.getDataLayout()),
158 Context(M.getContext()) {}
159
160private:
161 void Write(const Module *M) {
162 *OS << "; ModuleID = '" << M->getModuleIdentifier() << "'\n";
163 }
164
165 void Write(const Value *V) {
166 if (V)
167 Write(*V);
168 }
169
170 void Write(const Value &V) {
171 if (isa<Instruction>(V)) {
172 V.print(*OS, MST);
173 *OS << '\n';
174 } else {
175 V.printAsOperand(*OS, true, MST);
176 *OS << '\n';
177 }
178 }
179
180 void Write(const DbgRecord *DR) {
181 if (DR) {
182 DR->print(*OS, MST, false);
183 *OS << '\n';
184 }
185 }
186
188 switch (Type) {
190 *OS << "value";
191 break;
193 *OS << "declare";
194 break;
196 *OS << "declare_value";
197 break;
199 *OS << "assign";
200 break;
202 *OS << "end";
203 break;
205 *OS << "any";
206 break;
207 };
208 }
209
210 void Write(const Metadata *MD) {
211 if (!MD)
212 return;
213 MD->print(*OS, MST, &M);
214 *OS << '\n';
215 }
216
217 template <class T> void Write(const MDTupleTypedArrayWrapper<T> &MD) {
218 Write(MD.get());
219 }
220
221 void Write(const NamedMDNode *NMD) {
222 if (!NMD)
223 return;
224 NMD->print(*OS, MST);
225 *OS << '\n';
226 }
227
228 void Write(Type *T) {
229 if (!T)
230 return;
231 *OS << ' ' << *T;
232 }
233
234 void Write(const Comdat *C) {
235 if (!C)
236 return;
237 *OS << *C;
238 }
239
240 void Write(const APInt *AI) {
241 if (!AI)
242 return;
243 *OS << *AI << '\n';
244 }
245
246 void Write(const unsigned i) { *OS << i << '\n'; }
247
248 // NOLINTNEXTLINE(readability-identifier-naming)
249 void Write(const Attribute *A) {
250 if (!A)
251 return;
252 *OS << A->getAsString() << '\n';
253 }
254
255 // NOLINTNEXTLINE(readability-identifier-naming)
256 void Write(const AttributeSet *AS) {
257 if (!AS)
258 return;
259 *OS << AS->getAsString() << '\n';
260 }
261
262 // NOLINTNEXTLINE(readability-identifier-naming)
263 void Write(const AttributeList *AL) {
264 if (!AL)
265 return;
266 AL->print(*OS);
267 }
268
269 void Write(Printable P) { *OS << P << '\n'; }
270
271 template <typename T> void Write(ArrayRef<T> Vs) {
272 for (const T &V : Vs)
273 Write(V);
274 }
275
276 template <typename T1, typename... Ts>
277 void WriteTs(const T1 &V1, const Ts &... Vs) {
278 Write(V1);
279 WriteTs(Vs...);
280 }
281
282 template <typename... Ts> void WriteTs() {}
283
284public:
285 /// A check failed, so printout out the condition and the message.
286 ///
287 /// This provides a nice place to put a breakpoint if you want to see why
288 /// something is not correct.
289 void CheckFailed(const Twine &Message) {
290 if (OS)
291 *OS << Message << '\n';
292 Broken = true;
293 }
294
295 /// A check failed (with values to print).
296 ///
297 /// This calls the Message-only version so that the above is easier to set a
298 /// breakpoint on.
299 template <typename T1, typename... Ts>
300 void CheckFailed(const Twine &Message, const T1 &V1, const Ts &... Vs) {
301 CheckFailed(Message);
302 if (OS)
303 WriteTs(V1, Vs...);
304 }
305
306 /// A debug info check failed.
307 void DebugInfoCheckFailed(const Twine &Message) {
308 if (OS)
309 *OS << Message << '\n';
311 BrokenDebugInfo = true;
312 }
313
314 /// A debug info check failed (with values to print).
315 template <typename T1, typename... Ts>
316 void DebugInfoCheckFailed(const Twine &Message, const T1 &V1,
317 const Ts &... Vs) {
318 DebugInfoCheckFailed(Message);
319 if (OS)
320 WriteTs(V1, Vs...);
321 }
322};
323
324namespace {
325
326class Verifier : public InstVisitor<Verifier>, VerifierSupport {
327 friend class InstVisitor<Verifier>;
328 DominatorTree DT;
329
330 /// When verifying a basic block, keep track of all of the
331 /// instructions we have seen so far.
332 ///
333 /// This allows us to do efficient dominance checks for the case when an
334 /// instruction has an operand that is an instruction in the same block.
335 SmallPtrSet<Instruction *, 16> InstsInThisBlock;
336
337 /// Keep track of the metadata nodes that have been checked already.
339
340 /// Keep track which DISubprogram is attached to which function.
342
343 /// Track all DICompileUnits visited.
345
346 /// The result type for a landingpad.
347 Type *LandingPadResultTy;
348
349 /// Whether we've seen a call to @llvm.localescape in this function
350 /// already.
351 bool SawFrameEscape;
352
353 /// Whether the current function has a DISubprogram attached to it.
354 bool HasDebugInfo = false;
355
356 /// Stores the count of how many objects were passed to llvm.localescape for a
357 /// given function and the largest index passed to llvm.localrecover.
359
360 // Maps catchswitches and cleanuppads that unwind to siblings to the
361 // terminators that indicate the unwind, used to detect cycles therein.
363
364 /// Cache which blocks are in which funclet, if an EH funclet personality is
365 /// in use. Otherwise empty.
366 DenseMap<BasicBlock *, ColorVector> BlockEHFuncletColors;
367
368 /// Cache of constants visited in search of ConstantExprs.
369 SmallPtrSet<const Constant *, 32> ConstantExprVisited;
370
371 /// Cache of declarations of the llvm.experimental.deoptimize.<ty> intrinsic.
372 SmallVector<const Function *, 4> DeoptimizeDeclarations;
373
374 /// Cache of attribute lists verified.
375 SmallPtrSet<const void *, 32> AttributeListsVisited;
376
377 // Verify that this GlobalValue is only used in this module.
378 // This map is used to avoid visiting uses twice. We can arrive at a user
379 // twice, if they have multiple operands. In particular for very large
380 // constant expressions, we can arrive at a particular user many times.
381 SmallPtrSet<const Value *, 32> GlobalValueVisited;
382
383 // Keeps track of duplicate function argument debug info.
385
386 TBAAVerifier TBAAVerifyHelper;
387 ConvergenceVerifier ConvergenceVerifyHelper;
388
389 SmallVector<IntrinsicInst *, 4> NoAliasScopeDecls;
390
391 void checkAtomicMemAccessSize(Type *Ty, const Instruction *I);
392
393public:
394 explicit Verifier(raw_ostream *OS, bool ShouldTreatBrokenDebugInfoAsError,
395 const Module &M)
396 : VerifierSupport(OS, M), LandingPadResultTy(nullptr),
397 SawFrameEscape(false), TBAAVerifyHelper(this) {
398 TreatBrokenDebugInfoAsError = ShouldTreatBrokenDebugInfoAsError;
399 }
400
401 bool hasBrokenDebugInfo() const { return BrokenDebugInfo; }
402
403 bool verify(const Function &F) {
404 llvm::TimeTraceScope timeScope("Verifier");
405 assert(F.getParent() == &M &&
406 "An instance of this class only works with a specific module!");
407
408 // First ensure the function is well-enough formed to compute dominance
409 // information, and directly compute a dominance tree. We don't rely on the
410 // pass manager to provide this as it isolates us from a potentially
411 // out-of-date dominator tree and makes it significantly more complex to run
412 // this code outside of a pass manager.
413
414 // First check that every basic block has a terminator, otherwise we can't
415 // even inspect the CFG.
416 for (const BasicBlock &BB : F) {
417 if (!BB.empty() && BB.back().isTerminator())
418 continue;
419
420 if (OS) {
421 *OS << "Basic Block in function '" << F.getName()
422 << "' does not have terminator!\n";
423 BB.printAsOperand(*OS, true, MST);
424 *OS << "\n";
425 }
426 return false;
427 }
428
429 // FIXME: It's really gross that we have to cast away constness here.
430 if (!F.empty())
431 DT.recalculate(const_cast<Function &>(F));
432
433 auto FailureCB = [this](const Twine &Message) {
434 this->CheckFailed(Message);
435 };
436 ConvergenceVerifyHelper.initialize(OS, FailureCB, F);
437
438 Broken = false;
439 // FIXME: We strip const here because the inst visitor strips const.
440 visit(const_cast<Function &>(F));
441 verifySiblingFuncletUnwinds();
442
443 if (ConvergenceVerifyHelper.sawTokens())
444 ConvergenceVerifyHelper.verify(DT);
445
446 InstsInThisBlock.clear();
447 DebugFnArgs.clear();
448 LandingPadResultTy = nullptr;
449 SawFrameEscape = false;
450 SiblingFuncletInfo.clear();
451 verifyNoAliasScopeDecl();
452 NoAliasScopeDecls.clear();
453
454 return !Broken;
455 }
456
457 /// Verify the module that this instance of \c Verifier was initialized with.
458 bool verify() {
459 Broken = false;
460
461 // Collect all declarations of the llvm.experimental.deoptimize intrinsic.
462 for (const Function &F : M)
463 if (F.getIntrinsicID() == Intrinsic::experimental_deoptimize)
464 DeoptimizeDeclarations.push_back(&F);
465
466 // Now that we've visited every function, verify that we never asked to
467 // recover a frame index that wasn't escaped.
468 verifyFrameRecoverIndices();
469 for (const GlobalVariable &GV : M.globals())
470 visitGlobalVariable(GV);
471
472 for (const GlobalAlias &GA : M.aliases())
473 visitGlobalAlias(GA);
474
475 for (const GlobalIFunc &GI : M.ifuncs())
476 visitGlobalIFunc(GI);
477
478 for (const NamedMDNode &NMD : M.named_metadata())
479 visitNamedMDNode(NMD);
480
481 for (const StringMapEntry<Comdat> &SMEC : M.getComdatSymbolTable())
482 visitComdat(SMEC.getValue());
483
484 visitModuleFlags();
485 visitModuleIdents();
486 visitModuleCommandLines();
487 visitModuleErrnoTBAA();
488
489 verifyCompileUnits();
490
491 verifyDeoptimizeCallingConvs();
492 DISubprogramAttachments.clear();
493 return !Broken;
494 }
495
496private:
497 /// Whether a metadata node is allowed to be, or contain, a DILocation.
498 enum class AreDebugLocsAllowed { No, Yes };
499
500 /// Metadata that should be treated as a range, with slightly different
501 /// requirements.
502 enum class RangeLikeMetadataKind {
503 Range, // MD_range
504 AbsoluteSymbol, // MD_absolute_symbol
505 NoaliasAddrspace // MD_noalias_addrspace
506 };
507
508 // Verification methods...
509 void visitGlobalValue(const GlobalValue &GV);
510 void visitGlobalVariable(const GlobalVariable &GV);
511 void visitGlobalAlias(const GlobalAlias &GA);
512 void visitGlobalIFunc(const GlobalIFunc &GI);
513 void visitAliaseeSubExpr(const GlobalAlias &A, const Constant &C);
514 void visitAliaseeSubExpr(SmallPtrSetImpl<const GlobalAlias *> &Visited,
515 const GlobalAlias &A, const Constant &C);
516 void visitNamedMDNode(const NamedMDNode &NMD);
517 void visitMDNode(const MDNode &MD, AreDebugLocsAllowed AllowLocs);
518 void visitMetadataAsValue(const MetadataAsValue &MD, Function *F);
519 void visitValueAsMetadata(const ValueAsMetadata &MD, Function *F);
520 void visitDIArgList(const DIArgList &AL, Function *F);
521 void visitComdat(const Comdat &C);
522 void visitModuleIdents();
523 void visitModuleCommandLines();
524 void visitModuleErrnoTBAA();
525 void visitModuleFlags();
526 void visitModuleFlag(const MDNode *Op,
527 DenseMap<const MDString *, const MDNode *> &SeenIDs,
528 SmallVectorImpl<const MDNode *> &Requirements);
529 void visitModuleFlagCGProfileEntry(const MDOperand &MDO);
530 void visitFunction(const Function &F);
531 void visitBasicBlock(BasicBlock &BB);
532 void verifyRangeLikeMetadata(const Value &V, const MDNode *Range, Type *Ty,
533 RangeLikeMetadataKind Kind);
534 void visitRangeMetadata(Instruction &I, MDNode *Range, Type *Ty);
535 void visitNoFPClassMetadata(Instruction &I, MDNode *Range, Type *Ty);
536 void visitNoaliasAddrspaceMetadata(Instruction &I, MDNode *Range, Type *Ty);
537 void visitDereferenceableMetadata(Instruction &I, MDNode *MD);
538 void visitNofreeMetadata(Instruction &I, MDNode *MD);
539 void visitProfMetadata(Instruction &I, MDNode *MD);
540 void visitCallStackMetadata(MDNode *MD);
541 void visitMemProfMetadata(Instruction &I, MDNode *MD);
542 void visitCallsiteMetadata(Instruction &I, MDNode *MD);
543 void visitCalleeTypeMetadata(Instruction &I, MDNode *MD);
544 void visitDIAssignIDMetadata(Instruction &I, MDNode *MD);
545 void visitMMRAMetadata(Instruction &I, MDNode *MD);
546 void visitAnnotationMetadata(MDNode *Annotation);
547 void visitAliasScopeMetadata(const MDNode *MD);
548 void visitAliasScopeListMetadata(const MDNode *MD);
549 void visitAccessGroupMetadata(const MDNode *MD);
550 void visitCapturesMetadata(Instruction &I, const MDNode *Captures);
551 void visitAllocTokenMetadata(Instruction &I, MDNode *MD);
552
553 template <class Ty> bool isValidMetadataArray(const MDTuple &N);
554#define HANDLE_SPECIALIZED_MDNODE_LEAF(CLASS) void visit##CLASS(const CLASS &N);
555#include "llvm/IR/Metadata.def"
556 void visitDIScope(const DIScope &N);
557 void visitDIVariable(const DIVariable &N);
558 void visitDILexicalBlockBase(const DILexicalBlockBase &N);
559 void visitDITemplateParameter(const DITemplateParameter &N);
560
561 void visitTemplateParams(const MDNode &N, const Metadata &RawParams);
562
563 void visit(DbgLabelRecord &DLR);
564 void visit(DbgVariableRecord &DVR);
565 // InstVisitor overrides...
566 using InstVisitor<Verifier>::visit;
567 void visitDbgRecords(Instruction &I);
568 void visit(Instruction &I);
569
570 void visitTruncInst(TruncInst &I);
571 void visitZExtInst(ZExtInst &I);
572 void visitSExtInst(SExtInst &I);
573 void visitFPTruncInst(FPTruncInst &I);
574 void visitFPExtInst(FPExtInst &I);
575 void visitFPToUIInst(FPToUIInst &I);
576 void visitFPToSIInst(FPToSIInst &I);
577 void visitUIToFPInst(UIToFPInst &I);
578 void visitSIToFPInst(SIToFPInst &I);
579 void visitIntToPtrInst(IntToPtrInst &I);
580 void checkPtrToAddr(Type *SrcTy, Type *DestTy, const Value &V);
581 void visitPtrToAddrInst(PtrToAddrInst &I);
582 void visitPtrToIntInst(PtrToIntInst &I);
583 void visitBitCastInst(BitCastInst &I);
584 void visitAddrSpaceCastInst(AddrSpaceCastInst &I);
585 void visitPHINode(PHINode &PN);
586 void visitCallBase(CallBase &Call);
587 void visitUnaryOperator(UnaryOperator &U);
588 void visitBinaryOperator(BinaryOperator &B);
589 void visitICmpInst(ICmpInst &IC);
590 void visitFCmpInst(FCmpInst &FC);
591 void visitExtractElementInst(ExtractElementInst &EI);
592 void visitInsertElementInst(InsertElementInst &EI);
593 void visitShuffleVectorInst(ShuffleVectorInst &EI);
594 void visitVAArgInst(VAArgInst &VAA) { visitInstruction(VAA); }
595 void visitCallInst(CallInst &CI);
596 void visitInvokeInst(InvokeInst &II);
597 void visitGetElementPtrInst(GetElementPtrInst &GEP);
598 void visitLoadInst(LoadInst &LI);
599 void visitStoreInst(StoreInst &SI);
600 void verifyDominatesUse(Instruction &I, unsigned i);
601 void visitInstruction(Instruction &I);
602 void visitTerminator(Instruction &I);
603 void visitCondBrInst(CondBrInst &BI);
604 void visitReturnInst(ReturnInst &RI);
605 void visitSwitchInst(SwitchInst &SI);
606 void visitIndirectBrInst(IndirectBrInst &BI);
607 void visitCallBrInst(CallBrInst &CBI);
608 void visitSelectInst(SelectInst &SI);
609 void visitUserOp1(Instruction &I);
610 void visitUserOp2(Instruction &I) { visitUserOp1(I); }
611 void visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call);
612 void visitConstrainedFPIntrinsic(ConstrainedFPIntrinsic &FPI);
613 void visitVPIntrinsic(VPIntrinsic &VPI);
614 void visitDbgLabelIntrinsic(StringRef Kind, DbgLabelInst &DLI);
615 void visitAtomicCmpXchgInst(AtomicCmpXchgInst &CXI);
616 void visitAtomicRMWInst(AtomicRMWInst &RMWI);
617 void visitFenceInst(FenceInst &FI);
618 void visitAllocaInst(AllocaInst &AI);
619 void visitExtractValueInst(ExtractValueInst &EVI);
620 void visitInsertValueInst(InsertValueInst &IVI);
621 void visitEHPadPredecessors(Instruction &I);
622 void visitLandingPadInst(LandingPadInst &LPI);
623 void visitResumeInst(ResumeInst &RI);
624 void visitCatchPadInst(CatchPadInst &CPI);
625 void visitCatchReturnInst(CatchReturnInst &CatchReturn);
626 void visitCleanupPadInst(CleanupPadInst &CPI);
627 void visitFuncletPadInst(FuncletPadInst &FPI);
628 void visitCatchSwitchInst(CatchSwitchInst &CatchSwitch);
629 void visitCleanupReturnInst(CleanupReturnInst &CRI);
630
631 void verifySwiftErrorCall(CallBase &Call, const Value *SwiftErrorVal);
632 void verifySwiftErrorValue(const Value *SwiftErrorVal);
633 void verifyTailCCMustTailAttrs(const AttrBuilder &Attrs, StringRef Context);
634 void verifyMustTailCall(CallInst &CI);
635 bool verifyAttributeCount(AttributeList Attrs, unsigned Params);
636 void verifyAttributeTypes(AttributeSet Attrs, const Value *V);
637 void verifyParameterAttrs(AttributeSet Attrs, Type *Ty, const Value *V);
638 void checkUnsignedBaseTenFuncAttr(AttributeList Attrs, StringRef Attr,
639 const Value *V);
640 void verifyFunctionAttrs(FunctionType *FT, AttributeList Attrs,
641 const Value *V, bool IsIntrinsic, bool IsInlineAsm);
642 void verifyFunctionMetadata(ArrayRef<std::pair<unsigned, MDNode *>> MDs);
643 void verifyUnknownProfileMetadata(MDNode *MD);
644 void visitConstantExprsRecursively(const Constant *EntryC);
645 void visitConstantExpr(const ConstantExpr *CE);
646 void visitConstantPtrAuth(const ConstantPtrAuth *CPA);
647 void verifyInlineAsmCall(const CallBase &Call);
648 void verifyStatepoint(const CallBase &Call);
649 void verifyFrameRecoverIndices();
650 void verifySiblingFuncletUnwinds();
651
652 void verifyFragmentExpression(const DbgVariableRecord &I);
653 template <typename ValueOrMetadata>
654 void verifyFragmentExpression(const DIVariable &V,
656 ValueOrMetadata *Desc);
657 void verifyFnArgs(const DbgVariableRecord &DVR);
658 void verifyNotEntryValue(const DbgVariableRecord &I);
659
660 /// Module-level debug info verification...
661 void verifyCompileUnits();
662
663 /// Module-level verification that all @llvm.experimental.deoptimize
664 /// declarations share the same calling convention.
665 void verifyDeoptimizeCallingConvs();
666
667 void verifyAttachedCallBundle(const CallBase &Call,
668 const OperandBundleUse &BU);
669
670 /// Verify the llvm.experimental.noalias.scope.decl declarations
671 void verifyNoAliasScopeDecl();
672};
673
674} // end anonymous namespace
675
676/// We know that cond should be true, if not print an error message.
677#define Check(C, ...) \
678 do { \
679 if (!(C)) { \
680 CheckFailed(__VA_ARGS__); \
681 return; \
682 } \
683 } while (false)
684
685/// We know that a debug info condition should be true, if not print
686/// an error message.
687#define CheckDI(C, ...) \
688 do { \
689 if (!(C)) { \
690 DebugInfoCheckFailed(__VA_ARGS__); \
691 return; \
692 } \
693 } while (false)
694
695void Verifier::visitDbgRecords(Instruction &I) {
696 if (!I.DebugMarker)
697 return;
698 CheckDI(I.DebugMarker->MarkedInstr == &I,
699 "Instruction has invalid DebugMarker", &I);
700 CheckDI(!isa<PHINode>(&I) || !I.hasDbgRecords(),
701 "PHI Node must not have any attached DbgRecords", &I);
702 for (DbgRecord &DR : I.getDbgRecordRange()) {
703 CheckDI(DR.getMarker() == I.DebugMarker,
704 "DbgRecord had invalid DebugMarker", &I, &DR);
705 if (auto *Loc =
707 visitMDNode(*Loc, AreDebugLocsAllowed::Yes);
708 if (auto *DVR = dyn_cast<DbgVariableRecord>(&DR)) {
709 visit(*DVR);
710 // These have to appear after `visit` for consistency with existing
711 // intrinsic behaviour.
712 verifyFragmentExpression(*DVR);
713 verifyNotEntryValue(*DVR);
714 } else if (auto *DLR = dyn_cast<DbgLabelRecord>(&DR)) {
715 visit(*DLR);
716 }
717 }
718}
719
720void Verifier::visit(Instruction &I) {
721 visitDbgRecords(I);
722 for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i)
723 Check(I.getOperand(i) != nullptr, "Operand is null", &I);
725}
726
727// Helper to iterate over indirect users. By returning false, the callback can ask to stop traversing further.
728static void forEachUser(const Value *User,
730 llvm::function_ref<bool(const Value *)> Callback) {
731 if (!Visited.insert(User).second)
732 return;
733
735 while (!WorkList.empty()) {
736 const Value *Cur = WorkList.pop_back_val();
737 if (!Visited.insert(Cur).second)
738 continue;
739 if (Callback(Cur))
740 append_range(WorkList, Cur->materialized_users());
741 }
742}
743
744void Verifier::visitGlobalValue(const GlobalValue &GV) {
746 "Global is external, but doesn't have external or weak linkage!", &GV);
747
748 if (const GlobalObject *GO = dyn_cast<GlobalObject>(&GV)) {
749 if (const MDNode *Associated =
750 GO->getMetadata(LLVMContext::MD_associated)) {
751 Check(Associated->getNumOperands() == 1,
752 "associated metadata must have one operand", &GV, Associated);
753 const Metadata *Op = Associated->getOperand(0).get();
754 Check(Op, "associated metadata must have a global value", GO, Associated);
755
756 const auto *VM = dyn_cast_or_null<ValueAsMetadata>(Op);
757 Check(VM, "associated metadata must be ValueAsMetadata", GO, Associated);
758 if (VM) {
759 Check(isa<PointerType>(VM->getValue()->getType()),
760 "associated value must be pointer typed", GV, Associated);
761
762 const Value *Stripped = VM->getValue()->stripPointerCastsAndAliases();
763 Check(isa<GlobalObject>(Stripped) || isa<Constant>(Stripped),
764 "associated metadata must point to a GlobalObject", GO, Stripped);
765 Check(Stripped != GO,
766 "global values should not associate to themselves", GO,
767 Associated);
768 }
769 }
770
771 // FIXME: Why is getMetadata on GlobalValue protected?
772 if (const MDNode *AbsoluteSymbol =
773 GO->getMetadata(LLVMContext::MD_absolute_symbol)) {
774 verifyRangeLikeMetadata(*GO, AbsoluteSymbol,
775 DL.getIntPtrType(GO->getType()),
776 RangeLikeMetadataKind::AbsoluteSymbol);
777 }
778
779 if (GO->hasMetadata(LLVMContext::MD_implicit_ref)) {
780 Check(!GO->isDeclaration(),
781 "ref metadata must not be placed on a declaration", GO);
782
784 GO->getMetadata(LLVMContext::MD_implicit_ref, MDs);
785 for (const MDNode *MD : MDs) {
786 Check(MD->getNumOperands() == 1, "ref metadata must have one operand",
787 &GV, MD);
788 const Metadata *Op = MD->getOperand(0).get();
789 const auto *VM = dyn_cast_or_null<ValueAsMetadata>(Op);
790 Check(VM, "ref metadata must be ValueAsMetadata", GO, MD);
791 if (VM) {
792 Check(isa<PointerType>(VM->getValue()->getType()),
793 "ref value must be pointer typed", GV, MD);
794
795 const Value *Stripped = VM->getValue()->stripPointerCastsAndAliases();
796 Check(isa<GlobalObject>(Stripped) || isa<Constant>(Stripped),
797 "ref metadata must point to a GlobalObject", GO, Stripped);
798 Check(Stripped != GO, "values should not reference themselves", GO,
799 MD);
800 }
801 }
802 }
803 }
804
806 "Only global variables can have appending linkage!", &GV);
807
808 if (GV.hasAppendingLinkage()) {
809 const GlobalVariable *GVar = dyn_cast<GlobalVariable>(&GV);
810 Check(GVar && GVar->getValueType()->isArrayTy(),
811 "Only global arrays can have appending linkage!", GVar);
812 }
813
814 if (GV.isDeclarationForLinker())
815 Check(!GV.hasComdat(), "Declaration may not be in a Comdat!", &GV);
816
817 if (GV.hasDLLExportStorageClass()) {
819 "dllexport GlobalValue must have default or protected visibility",
820 &GV);
821 }
822 if (GV.hasDLLImportStorageClass()) {
824 "dllimport GlobalValue must have default visibility", &GV);
825 Check(!GV.isDSOLocal(), "GlobalValue with DLLImport Storage is dso_local!",
826 &GV);
827
828 Check((GV.isDeclaration() &&
831 "Global is marked as dllimport, but not external", &GV);
832 }
833
834 if (GV.isImplicitDSOLocal())
835 Check(GV.isDSOLocal(),
836 "GlobalValue with local linkage or non-default "
837 "visibility must be dso_local!",
838 &GV);
839
840 forEachUser(&GV, GlobalValueVisited, [&](const Value *V) -> bool {
841 if (const Instruction *I = dyn_cast<Instruction>(V)) {
842 if (!I->getParent() || !I->getParent()->getParent())
843 CheckFailed("Global is referenced by parentless instruction!", &GV, &M,
844 I);
845 else if (I->getParent()->getParent()->getParent() != &M)
846 CheckFailed("Global is referenced in a different module!", &GV, &M, I,
847 I->getParent()->getParent(),
848 I->getParent()->getParent()->getParent());
849 return false;
850 } else if (const Function *F = dyn_cast<Function>(V)) {
851 if (F->getParent() != &M)
852 CheckFailed("Global is used by function in a different module", &GV, &M,
853 F, F->getParent());
854 return false;
855 }
856 return true;
857 });
858}
859
860void Verifier::visitGlobalVariable(const GlobalVariable &GV) {
861 Type *GVType = GV.getValueType();
862
863 if (MaybeAlign A = GV.getAlign()) {
864 Check(A->value() <= Value::MaximumAlignment,
865 "huge alignment values are unsupported", &GV);
866 }
867
868 if (GV.hasInitializer()) {
869 Check(GV.getInitializer()->getType() == GVType,
870 "Global variable initializer type does not match global "
871 "variable type!",
872 &GV);
874 "Global variable initializer must be sized", &GV);
875 visitConstantExprsRecursively(GV.getInitializer());
876 // If the global has common linkage, it must have a zero initializer and
877 // cannot be constant.
878 if (GV.hasCommonLinkage()) {
880 "'common' global must have a zero initializer!", &GV);
881 Check(!GV.isConstant(), "'common' global may not be marked constant!",
882 &GV);
883 Check(!GV.hasComdat(), "'common' global may not be in a Comdat!", &GV);
884 }
885 }
886
887 if (GV.hasName() && (GV.getName() == "llvm.global_ctors" ||
888 GV.getName() == "llvm.global_dtors")) {
890 "invalid linkage for intrinsic global variable", &GV);
892 "invalid uses of intrinsic global variable", &GV);
893
894 // Don't worry about emitting an error for it not being an array,
895 // visitGlobalValue will complain on appending non-array.
896 if (ArrayType *ATy = dyn_cast<ArrayType>(GVType)) {
897 StructType *STy = dyn_cast<StructType>(ATy->getElementType());
898 PointerType *FuncPtrTy =
899 PointerType::get(Context, DL.getProgramAddressSpace());
900 Check(STy && (STy->getNumElements() == 2 || STy->getNumElements() == 3) &&
901 STy->getTypeAtIndex(0u)->isIntegerTy(32) &&
902 STy->getTypeAtIndex(1) == FuncPtrTy,
903 "wrong type for intrinsic global variable", &GV);
904 Check(STy->getNumElements() == 3,
905 "the third field of the element type is mandatory, "
906 "specify ptr null to migrate from the obsoleted 2-field form");
907 Type *ETy = STy->getTypeAtIndex(2);
908 Check(ETy->isPointerTy(), "wrong type for intrinsic global variable",
909 &GV);
910 }
911 }
912
913 if (GV.hasName() && (GV.getName() == "llvm.used" ||
914 GV.getName() == "llvm.compiler.used")) {
916 "invalid linkage for intrinsic global variable", &GV);
918 "invalid uses of intrinsic global variable", &GV);
919
920 if (ArrayType *ATy = dyn_cast<ArrayType>(GVType)) {
921 PointerType *PTy = dyn_cast<PointerType>(ATy->getElementType());
922 Check(PTy, "wrong type for intrinsic global variable", &GV);
923 if (GV.hasInitializer()) {
924 const Constant *Init = GV.getInitializer();
925 const ConstantArray *InitArray = dyn_cast<ConstantArray>(Init);
926 Check(InitArray, "wrong initializer for intrinsic global variable",
927 Init);
928 for (Value *Op : InitArray->operands()) {
929 Value *V = Op->stripPointerCasts();
932 Twine("invalid ") + GV.getName() + " member", V);
933 Check(V->hasName(),
934 Twine("members of ") + GV.getName() + " must be named", V);
935 }
936 }
937 }
938 }
939
940 // Visit any debug info attachments.
942 GV.getMetadata(LLVMContext::MD_dbg, MDs);
943 for (auto *MD : MDs) {
944 if (auto *GVE = dyn_cast<DIGlobalVariableExpression>(MD))
945 visitDIGlobalVariableExpression(*GVE);
946 else
947 CheckDI(false, "!dbg attachment of global variable must be a "
948 "DIGlobalVariableExpression");
949 }
950
951 // Scalable vectors cannot be global variables, since we don't know
952 // the runtime size.
953 Check(!GVType->isScalableTy(), "Globals cannot contain scalable types", &GV);
954
955 // Check if it is or contains a target extension type that disallows being
956 // used as a global.
958 "Global @" + GV.getName() + " has illegal target extension type",
959 GVType);
960
961 // Check that the the address space can hold all bits of the type, recognized
962 // by an access in the address space being able to reach all bytes of the
963 // type.
964 Check(!GVType->isSized() ||
965 isUIntN(DL.getAddressSizeInBits(GV.getAddressSpace()),
966 GV.getGlobalSize(DL)),
967 "Global variable is too large to fit into the address space", &GV,
968 GVType);
969
970 if (!GV.hasInitializer()) {
971 visitGlobalValue(GV);
972 return;
973 }
974
975 // Walk any aggregate initializers looking for bitcasts between address spaces
976 visitConstantExprsRecursively(GV.getInitializer());
977
978 visitGlobalValue(GV);
979}
980
981void Verifier::visitAliaseeSubExpr(const GlobalAlias &GA, const Constant &C) {
982 SmallPtrSet<const GlobalAlias*, 4> Visited;
983 Visited.insert(&GA);
984 visitAliaseeSubExpr(Visited, GA, C);
985}
986
987void Verifier::visitAliaseeSubExpr(SmallPtrSetImpl<const GlobalAlias*> &Visited,
988 const GlobalAlias &GA, const Constant &C) {
991 cast<GlobalValue>(C).hasAvailableExternallyLinkage(),
992 "available_externally alias must point to available_externally "
993 "global value",
994 &GA);
995 }
996 if (const auto *GV = dyn_cast<GlobalValue>(&C)) {
998 Check(!GV->isDeclarationForLinker(), "Alias must point to a definition",
999 &GA);
1000 }
1001
1002 if (const auto *GA2 = dyn_cast<GlobalAlias>(GV)) {
1003 Check(Visited.insert(GA2).second, "Aliases cannot form a cycle", &GA);
1004
1005 Check(!GA2->isInterposable(),
1006 "Alias cannot point to an interposable alias", &GA);
1007 } else {
1008 // Only continue verifying subexpressions of GlobalAliases.
1009 // Do not recurse into global initializers.
1010 return;
1011 }
1012 }
1013
1014 if (const auto *CE = dyn_cast<ConstantExpr>(&C))
1015 visitConstantExprsRecursively(CE);
1016
1017 for (const Use &U : C.operands()) {
1018 Value *V = &*U;
1019 if (const auto *GA2 = dyn_cast<GlobalAlias>(V))
1020 visitAliaseeSubExpr(Visited, GA, *GA2->getAliasee());
1021 else if (const auto *C2 = dyn_cast<Constant>(V))
1022 visitAliaseeSubExpr(Visited, GA, *C2);
1023 }
1024}
1025
1026void Verifier::visitGlobalAlias(const GlobalAlias &GA) {
1028 "Alias should have private, internal, linkonce, weak, linkonce_odr, "
1029 "weak_odr, external, or available_externally linkage!",
1030 &GA);
1031 const Constant *Aliasee = GA.getAliasee();
1032 Check(Aliasee, "Aliasee cannot be NULL!", &GA);
1033 Check(GA.getType() == Aliasee->getType(),
1034 "Alias and aliasee types should match!", &GA);
1035
1036 Check(isa<GlobalValue>(Aliasee) || isa<ConstantExpr>(Aliasee),
1037 "Aliasee should be either GlobalValue or ConstantExpr", &GA);
1038
1039 visitAliaseeSubExpr(GA, *Aliasee);
1040
1041 visitGlobalValue(GA);
1042}
1043
1044void Verifier::visitGlobalIFunc(const GlobalIFunc &GI) {
1045 visitGlobalValue(GI);
1046
1048 GI.getAllMetadata(MDs);
1049 for (const auto &I : MDs) {
1050 CheckDI(I.first != LLVMContext::MD_dbg,
1051 "an ifunc may not have a !dbg attachment", &GI);
1052 Check(I.first != LLVMContext::MD_prof,
1053 "an ifunc may not have a !prof attachment", &GI);
1054 visitMDNode(*I.second, AreDebugLocsAllowed::No);
1055 }
1056
1058 "IFunc should have private, internal, linkonce, weak, linkonce_odr, "
1059 "weak_odr, or external linkage!",
1060 &GI);
1061 // Pierce through ConstantExprs and GlobalAliases and check that the resolver
1062 // is a Function definition.
1063 const Function *Resolver = GI.getResolverFunction();
1064 Check(Resolver, "IFunc must have a Function resolver", &GI);
1065 Check(!Resolver->isDeclarationForLinker(),
1066 "IFunc resolver must be a definition", &GI);
1067
1068 // Check that the immediate resolver operand (prior to any bitcasts) has the
1069 // correct type.
1070 const Type *ResolverTy = GI.getResolver()->getType();
1071
1073 "IFunc resolver must return a pointer", &GI);
1074
1075 Check(ResolverTy == PointerType::get(Context, GI.getAddressSpace()),
1076 "IFunc resolver has incorrect type", &GI);
1077}
1078
1079void Verifier::visitNamedMDNode(const NamedMDNode &NMD) {
1080 // There used to be various other llvm.dbg.* nodes, but we don't support
1081 // upgrading them and we want to reserve the namespace for future uses.
1082 if (NMD.getName().starts_with("llvm.dbg."))
1083 CheckDI(NMD.getName() == "llvm.dbg.cu",
1084 "unrecognized named metadata node in the llvm.dbg namespace", &NMD);
1085 for (const MDNode *MD : NMD.operands()) {
1086 if (NMD.getName() == "llvm.dbg.cu")
1087 CheckDI(MD && isa<DICompileUnit>(MD), "invalid compile unit", &NMD, MD);
1088
1089 if (!MD)
1090 continue;
1091
1092 visitMDNode(*MD, AreDebugLocsAllowed::Yes);
1093 }
1094}
1095
1096void Verifier::visitMDNode(const MDNode &MD, AreDebugLocsAllowed AllowLocs) {
1097 // Only visit each node once. Metadata can be mutually recursive, so this
1098 // avoids infinite recursion here, as well as being an optimization.
1099 if (!MDNodes.insert(&MD).second)
1100 return;
1101
1102 Check(&MD.getContext() == &Context,
1103 "MDNode context does not match Module context!", &MD);
1104
1105 switch (MD.getMetadataID()) {
1106 default:
1107 llvm_unreachable("Invalid MDNode subclass");
1108 case Metadata::MDTupleKind:
1109 break;
1110#define HANDLE_SPECIALIZED_MDNODE_LEAF(CLASS) \
1111 case Metadata::CLASS##Kind: \
1112 visit##CLASS(cast<CLASS>(MD)); \
1113 break;
1114#include "llvm/IR/Metadata.def"
1115 }
1116
1117 for (const Metadata *Op : MD.operands()) {
1118 if (!Op)
1119 continue;
1120 Check(!isa<LocalAsMetadata>(Op), "Invalid operand for global metadata!",
1121 &MD, Op);
1122 CheckDI(!isa<DILocation>(Op) || AllowLocs == AreDebugLocsAllowed::Yes,
1123 "DILocation not allowed within this metadata node", &MD, Op);
1124 if (auto *N = dyn_cast<MDNode>(Op)) {
1125 visitMDNode(*N, AllowLocs);
1126 continue;
1127 }
1128 if (auto *V = dyn_cast<ValueAsMetadata>(Op)) {
1129 visitValueAsMetadata(*V, nullptr);
1130 continue;
1131 }
1132 }
1133
1134 // Check llvm.loop.estimated_trip_count.
1135 if (MD.getNumOperands() > 0 &&
1137 Check(MD.getNumOperands() == 2, "Expected two operands", &MD);
1139 Check(Count && Count->getType()->isIntegerTy() &&
1140 cast<IntegerType>(Count->getType())->getBitWidth() <= 32,
1141 "Expected second operand to be an integer constant of type i32 or "
1142 "smaller",
1143 &MD);
1144 }
1145
1146 // Check these last, so we diagnose problems in operands first.
1147 Check(!MD.isTemporary(), "Expected no forward declarations!", &MD);
1148 Check(MD.isResolved(), "All nodes should be resolved!", &MD);
1149}
1150
1151void Verifier::visitValueAsMetadata(const ValueAsMetadata &MD, Function *F) {
1152 Check(MD.getValue(), "Expected valid value", &MD);
1153 Check(!MD.getValue()->getType()->isMetadataTy(),
1154 "Unexpected metadata round-trip through values", &MD, MD.getValue());
1155
1156 auto *L = dyn_cast<LocalAsMetadata>(&MD);
1157 if (!L)
1158 return;
1159
1160 Check(F, "function-local metadata used outside a function", L);
1161
1162 // If this was an instruction, bb, or argument, verify that it is in the
1163 // function that we expect.
1164 Function *ActualF = nullptr;
1165 if (Instruction *I = dyn_cast<Instruction>(L->getValue())) {
1166 Check(I->getParent(), "function-local metadata not in basic block", L, I);
1167 ActualF = I->getParent()->getParent();
1168 } else if (BasicBlock *BB = dyn_cast<BasicBlock>(L->getValue()))
1169 ActualF = BB->getParent();
1170 else if (Argument *A = dyn_cast<Argument>(L->getValue()))
1171 ActualF = A->getParent();
1172 assert(ActualF && "Unimplemented function local metadata case!");
1173
1174 Check(ActualF == F, "function-local metadata used in wrong function", L);
1175}
1176
1177void Verifier::visitDIArgList(const DIArgList &AL, Function *F) {
1178 for (const ValueAsMetadata *VAM : AL.getArgs())
1179 visitValueAsMetadata(*VAM, F);
1180}
1181
1182void Verifier::visitMetadataAsValue(const MetadataAsValue &MDV, Function *F) {
1183 Metadata *MD = MDV.getMetadata();
1184 if (auto *N = dyn_cast<MDNode>(MD)) {
1185 visitMDNode(*N, AreDebugLocsAllowed::No);
1186 return;
1187 }
1188
1189 // Only visit each node once. Metadata can be mutually recursive, so this
1190 // avoids infinite recursion here, as well as being an optimization.
1191 if (!MDNodes.insert(MD).second)
1192 return;
1193
1194 if (auto *V = dyn_cast<ValueAsMetadata>(MD))
1195 visitValueAsMetadata(*V, F);
1196
1197 if (auto *AL = dyn_cast<DIArgList>(MD))
1198 visitDIArgList(*AL, F);
1199}
1200
1201static bool isType(const Metadata *MD) { return !MD || isa<DIType>(MD); }
1202static bool isScope(const Metadata *MD) { return !MD || isa<DIScope>(MD); }
1203static bool isDINode(const Metadata *MD) { return !MD || isa<DINode>(MD); }
1204static bool isMDTuple(const Metadata *MD) { return !MD || isa<MDTuple>(MD); }
1205
1206void Verifier::visitDILocation(const DILocation &N) {
1207 CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1208 "location requires a valid scope", &N, N.getRawScope());
1209 if (auto *IA = N.getRawInlinedAt())
1210 CheckDI(isa<DILocation>(IA), "inlined-at should be a location", &N, IA);
1211 if (auto *SP = dyn_cast<DISubprogram>(N.getRawScope()))
1212 CheckDI(SP->isDefinition(), "scope points into the type hierarchy", &N);
1213}
1214
1215void Verifier::visitGenericDINode(const GenericDINode &N) {
1216 CheckDI(N.getTag(), "invalid tag", &N);
1217}
1218
1219void Verifier::visitDIScope(const DIScope &N) {
1220 if (auto *F = N.getRawFile())
1221 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1222}
1223
1224void Verifier::visitDISubrangeType(const DISubrangeType &N) {
1225 CheckDI(N.getTag() == dwarf::DW_TAG_subrange_type, "invalid tag", &N);
1226 auto *BaseType = N.getRawBaseType();
1227 CheckDI(!BaseType || isType(BaseType), "BaseType must be a type");
1228 auto *LBound = N.getRawLowerBound();
1229 CheckDI(!LBound || isa<ConstantAsMetadata>(LBound) ||
1230 isa<DIVariable>(LBound) || isa<DIExpression>(LBound) ||
1231 isa<DIDerivedType>(LBound),
1232 "LowerBound must be signed constant or DIVariable or DIExpression or "
1233 "DIDerivedType",
1234 &N);
1235 auto *UBound = N.getRawUpperBound();
1236 CheckDI(!UBound || isa<ConstantAsMetadata>(UBound) ||
1237 isa<DIVariable>(UBound) || isa<DIExpression>(UBound) ||
1238 isa<DIDerivedType>(UBound),
1239 "UpperBound must be signed constant or DIVariable or DIExpression or "
1240 "DIDerivedType",
1241 &N);
1242 auto *Stride = N.getRawStride();
1243 CheckDI(!Stride || isa<ConstantAsMetadata>(Stride) ||
1244 isa<DIVariable>(Stride) || isa<DIExpression>(Stride),
1245 "Stride must be signed constant or DIVariable or DIExpression", &N);
1246 auto *Bias = N.getRawBias();
1247 CheckDI(!Bias || isa<ConstantAsMetadata>(Bias) || isa<DIVariable>(Bias) ||
1248 isa<DIExpression>(Bias),
1249 "Bias must be signed constant or DIVariable or DIExpression", &N);
1250 // Subrange types currently only support constant size.
1251 auto *Size = N.getRawSizeInBits();
1253 "SizeInBits must be a constant");
1254}
1255
1256void Verifier::visitDISubrange(const DISubrange &N) {
1257 CheckDI(N.getTag() == dwarf::DW_TAG_subrange_type, "invalid tag", &N);
1258 CheckDI(!N.getRawCountNode() || !N.getRawUpperBound(),
1259 "Subrange can have any one of count or upperBound", &N);
1260 auto *CBound = N.getRawCountNode();
1261 CheckDI(!CBound || isa<ConstantAsMetadata>(CBound) ||
1262 isa<DIVariable>(CBound) || isa<DIExpression>(CBound),
1263 "Count must be signed constant or DIVariable or DIExpression", &N);
1264 auto Count = N.getCount();
1266 cast<ConstantInt *>(Count)->getSExtValue() >= -1,
1267 "invalid subrange count", &N);
1268 auto *LBound = N.getRawLowerBound();
1269 CheckDI(!LBound || isa<ConstantAsMetadata>(LBound) ||
1270 isa<DIVariable>(LBound) || isa<DIExpression>(LBound),
1271 "LowerBound must be signed constant or DIVariable or DIExpression",
1272 &N);
1273 auto *UBound = N.getRawUpperBound();
1274 CheckDI(!UBound || isa<ConstantAsMetadata>(UBound) ||
1275 isa<DIVariable>(UBound) || isa<DIExpression>(UBound),
1276 "UpperBound must be signed constant or DIVariable or DIExpression",
1277 &N);
1278 auto *Stride = N.getRawStride();
1279 CheckDI(!Stride || isa<ConstantAsMetadata>(Stride) ||
1280 isa<DIVariable>(Stride) || isa<DIExpression>(Stride),
1281 "Stride must be signed constant or DIVariable or DIExpression", &N);
1282}
1283
1284void Verifier::visitDIGenericSubrange(const DIGenericSubrange &N) {
1285 CheckDI(N.getTag() == dwarf::DW_TAG_generic_subrange, "invalid tag", &N);
1286 CheckDI(!N.getRawCountNode() || !N.getRawUpperBound(),
1287 "GenericSubrange can have any one of count or upperBound", &N);
1288 auto *CBound = N.getRawCountNode();
1289 CheckDI(!CBound || isa<DIVariable>(CBound) || isa<DIExpression>(CBound),
1290 "Count must be signed constant or DIVariable or DIExpression", &N);
1291 auto *LBound = N.getRawLowerBound();
1292 CheckDI(LBound, "GenericSubrange must contain lowerBound", &N);
1293 CheckDI(isa<DIVariable>(LBound) || isa<DIExpression>(LBound),
1294 "LowerBound must be signed constant or DIVariable or DIExpression",
1295 &N);
1296 auto *UBound = N.getRawUpperBound();
1297 CheckDI(!UBound || isa<DIVariable>(UBound) || isa<DIExpression>(UBound),
1298 "UpperBound must be signed constant or DIVariable or DIExpression",
1299 &N);
1300 auto *Stride = N.getRawStride();
1301 CheckDI(Stride, "GenericSubrange must contain stride", &N);
1302 CheckDI(isa<DIVariable>(Stride) || isa<DIExpression>(Stride),
1303 "Stride must be signed constant or DIVariable or DIExpression", &N);
1304}
1305
1306void Verifier::visitDIEnumerator(const DIEnumerator &N) {
1307 CheckDI(N.getTag() == dwarf::DW_TAG_enumerator, "invalid tag", &N);
1308}
1309
1310void Verifier::visitDIBasicType(const DIBasicType &N) {
1311 CheckDI(N.getTag() == dwarf::DW_TAG_base_type ||
1312 N.getTag() == dwarf::DW_TAG_unspecified_type ||
1313 N.getTag() == dwarf::DW_TAG_string_type,
1314 "invalid tag", &N);
1315 // Basic types currently only support constant size.
1316 auto *Size = N.getRawSizeInBits();
1318 "SizeInBits must be a constant");
1319}
1320
1321void Verifier::visitDIFixedPointType(const DIFixedPointType &N) {
1322 visitDIBasicType(N);
1323
1324 CheckDI(N.getTag() == dwarf::DW_TAG_base_type, "invalid tag", &N);
1325 CheckDI(N.getEncoding() == dwarf::DW_ATE_signed_fixed ||
1326 N.getEncoding() == dwarf::DW_ATE_unsigned_fixed,
1327 "invalid encoding", &N);
1331 "invalid kind", &N);
1333 N.getFactorRaw() == 0,
1334 "factor should be 0 for rationals", &N);
1336 (N.getNumeratorRaw() == 0 && N.getDenominatorRaw() == 0),
1337 "numerator and denominator should be 0 for non-rationals", &N);
1338}
1339
1340void Verifier::visitDIStringType(const DIStringType &N) {
1341 CheckDI(N.getTag() == dwarf::DW_TAG_string_type, "invalid tag", &N);
1342 CheckDI(!(N.isBigEndian() && N.isLittleEndian()), "has conflicting flags",
1343 &N);
1344}
1345
1346void Verifier::visitDIDerivedType(const DIDerivedType &N) {
1347 // Common scope checks.
1348 visitDIScope(N);
1349
1350 CheckDI(N.getTag() == dwarf::DW_TAG_typedef ||
1351 N.getTag() == dwarf::DW_TAG_pointer_type ||
1352 N.getTag() == dwarf::DW_TAG_ptr_to_member_type ||
1353 N.getTag() == dwarf::DW_TAG_reference_type ||
1354 N.getTag() == dwarf::DW_TAG_rvalue_reference_type ||
1355 N.getTag() == dwarf::DW_TAG_const_type ||
1356 N.getTag() == dwarf::DW_TAG_immutable_type ||
1357 N.getTag() == dwarf::DW_TAG_volatile_type ||
1358 N.getTag() == dwarf::DW_TAG_restrict_type ||
1359 N.getTag() == dwarf::DW_TAG_atomic_type ||
1360 N.getTag() == dwarf::DW_TAG_LLVM_ptrauth_type ||
1361 N.getTag() == dwarf::DW_TAG_member ||
1362 (N.getTag() == dwarf::DW_TAG_variable && N.isStaticMember()) ||
1363 N.getTag() == dwarf::DW_TAG_inheritance ||
1364 N.getTag() == dwarf::DW_TAG_friend ||
1365 N.getTag() == dwarf::DW_TAG_set_type ||
1366 N.getTag() == dwarf::DW_TAG_template_alias,
1367 "invalid tag", &N);
1368 if (N.getTag() == dwarf::DW_TAG_ptr_to_member_type) {
1369 CheckDI(isType(N.getRawExtraData()), "invalid pointer to member type", &N,
1370 N.getRawExtraData());
1371 } else if (N.getTag() == dwarf::DW_TAG_template_alias) {
1372 CheckDI(isMDTuple(N.getRawExtraData()), "invalid template parameters", &N,
1373 N.getRawExtraData());
1374 } else if (N.getTag() == dwarf::DW_TAG_inheritance ||
1375 N.getTag() == dwarf::DW_TAG_member ||
1376 N.getTag() == dwarf::DW_TAG_variable) {
1377 auto *ExtraData = N.getRawExtraData();
1378 auto IsValidExtraData = [&]() {
1379 if (ExtraData == nullptr)
1380 return true;
1381 if (isa<ConstantAsMetadata>(ExtraData) || isa<MDString>(ExtraData) ||
1382 isa<DIObjCProperty>(ExtraData))
1383 return true;
1384 if (auto *Tuple = dyn_cast<MDTuple>(ExtraData)) {
1385 if (Tuple->getNumOperands() != 1)
1386 return false;
1387 return isa_and_nonnull<ConstantAsMetadata>(Tuple->getOperand(0).get());
1388 }
1389 return false;
1390 };
1391 CheckDI(IsValidExtraData(),
1392 "extraData must be ConstantAsMetadata, MDString, DIObjCProperty, "
1393 "or MDTuple with single ConstantAsMetadata operand",
1394 &N, ExtraData);
1395 }
1396
1397 if (N.getTag() == dwarf::DW_TAG_set_type) {
1398 if (auto *T = N.getRawBaseType()) {
1402 CheckDI(
1403 (Enum && Enum->getTag() == dwarf::DW_TAG_enumeration_type) ||
1404 (Subrange && Subrange->getTag() == dwarf::DW_TAG_subrange_type) ||
1405 (Basic && (Basic->getEncoding() == dwarf::DW_ATE_unsigned ||
1406 Basic->getEncoding() == dwarf::DW_ATE_signed ||
1407 Basic->getEncoding() == dwarf::DW_ATE_unsigned_char ||
1408 Basic->getEncoding() == dwarf::DW_ATE_signed_char ||
1409 Basic->getEncoding() == dwarf::DW_ATE_boolean)),
1410 "invalid set base type", &N, T);
1411 }
1412 }
1413
1414 CheckDI(isScope(N.getRawScope()), "invalid scope", &N, N.getRawScope());
1415 CheckDI(isType(N.getRawBaseType()), "invalid base type", &N,
1416 N.getRawBaseType());
1417
1418 if (N.getDWARFAddressSpace()) {
1419 CheckDI(N.getTag() == dwarf::DW_TAG_pointer_type ||
1420 N.getTag() == dwarf::DW_TAG_reference_type ||
1421 N.getTag() == dwarf::DW_TAG_rvalue_reference_type,
1422 "DWARF address space only applies to pointer or reference types",
1423 &N);
1424 }
1425
1426 auto *Size = N.getRawSizeInBits();
1429 "SizeInBits must be a constant or DIVariable or DIExpression");
1430}
1431
1432/// Detect mutually exclusive flags.
1433static bool hasConflictingReferenceFlags(unsigned Flags) {
1434 return ((Flags & DINode::FlagLValueReference) &&
1435 (Flags & DINode::FlagRValueReference)) ||
1436 ((Flags & DINode::FlagTypePassByValue) &&
1437 (Flags & DINode::FlagTypePassByReference));
1438}
1439
1440void Verifier::visitTemplateParams(const MDNode &N, const Metadata &RawParams) {
1441 auto *Params = dyn_cast<MDTuple>(&RawParams);
1442 CheckDI(Params, "invalid template params", &N, &RawParams);
1443 for (Metadata *Op : Params->operands()) {
1444 CheckDI(Op && isa<DITemplateParameter>(Op), "invalid template parameter",
1445 &N, Params, Op);
1446 }
1447}
1448
1449void Verifier::visitDICompositeType(const DICompositeType &N) {
1450 // Common scope checks.
1451 visitDIScope(N);
1452
1453 CheckDI(N.getTag() == dwarf::DW_TAG_array_type ||
1454 N.getTag() == dwarf::DW_TAG_structure_type ||
1455 N.getTag() == dwarf::DW_TAG_union_type ||
1456 N.getTag() == dwarf::DW_TAG_enumeration_type ||
1457 N.getTag() == dwarf::DW_TAG_class_type ||
1458 N.getTag() == dwarf::DW_TAG_variant_part ||
1459 N.getTag() == dwarf::DW_TAG_variant ||
1460 N.getTag() == dwarf::DW_TAG_namelist,
1461 "invalid tag", &N);
1462
1463 CheckDI(isScope(N.getRawScope()), "invalid scope", &N, N.getRawScope());
1464 CheckDI(isType(N.getRawBaseType()), "invalid base type", &N,
1465 N.getRawBaseType());
1466
1467 CheckDI(!N.getRawElements() || isa<MDTuple>(N.getRawElements()),
1468 "invalid composite elements", &N, N.getRawElements());
1469 CheckDI(isType(N.getRawVTableHolder()), "invalid vtable holder", &N,
1470 N.getRawVTableHolder());
1472 "invalid reference flags", &N);
1473 unsigned DIBlockByRefStruct = 1 << 4;
1474 CheckDI((N.getFlags() & DIBlockByRefStruct) == 0,
1475 "DIBlockByRefStruct on DICompositeType is no longer supported", &N);
1476 CheckDI(llvm::all_of(N.getElements(), [](const DINode *N) { return N; }),
1477 "DISubprogram contains null entry in `elements` field", &N);
1478
1479 if (N.isVector()) {
1480 const DINodeArray Elements = N.getElements();
1481 CheckDI(Elements.size() == 1 &&
1482 Elements[0]->getTag() == dwarf::DW_TAG_subrange_type,
1483 "invalid vector, expected one element of type subrange", &N);
1484 }
1485
1486 if (auto *Params = N.getRawTemplateParams())
1487 visitTemplateParams(N, *Params);
1488
1489 if (auto *D = N.getRawDiscriminator()) {
1490 CheckDI(isa<DIDerivedType>(D) && N.getTag() == dwarf::DW_TAG_variant_part,
1491 "discriminator can only appear on variant part");
1492 }
1493
1494 if (N.getRawDataLocation()) {
1495 CheckDI(N.getTag() == dwarf::DW_TAG_array_type,
1496 "dataLocation can only appear in array type");
1497 }
1498
1499 if (N.getRawAssociated()) {
1500 CheckDI(N.getTag() == dwarf::DW_TAG_array_type,
1501 "associated can only appear in array type");
1502 }
1503
1504 if (N.getRawAllocated()) {
1505 CheckDI(N.getTag() == dwarf::DW_TAG_array_type,
1506 "allocated can only appear in array type");
1507 }
1508
1509 if (N.getRawRank()) {
1510 CheckDI(N.getTag() == dwarf::DW_TAG_array_type,
1511 "rank can only appear in array type");
1512 }
1513
1514 if (N.getTag() == dwarf::DW_TAG_array_type) {
1515 CheckDI(N.getRawBaseType(), "array types must have a base type", &N);
1516 }
1517
1518 auto *Size = N.getRawSizeInBits();
1521 "SizeInBits must be a constant or DIVariable or DIExpression");
1522}
1523
1524void Verifier::visitDISubroutineType(const DISubroutineType &N) {
1525 CheckDI(N.getTag() == dwarf::DW_TAG_subroutine_type, "invalid tag", &N);
1526 if (auto *Types = N.getRawTypeArray()) {
1527 CheckDI(isa<MDTuple>(Types), "invalid composite elements", &N, Types);
1528 for (Metadata *Ty : N.getTypeArray()->operands()) {
1529 CheckDI(isType(Ty), "invalid subroutine type ref", &N, Types, Ty);
1530 }
1531 }
1533 "invalid reference flags", &N);
1534}
1535
1536void Verifier::visitDIFile(const DIFile &N) {
1537 CheckDI(N.getTag() == dwarf::DW_TAG_file_type, "invalid tag", &N);
1538 std::optional<DIFile::ChecksumInfo<StringRef>> Checksum = N.getChecksum();
1539 if (Checksum) {
1540 CheckDI(Checksum->Kind <= DIFile::ChecksumKind::CSK_Last,
1541 "invalid checksum kind", &N);
1542 size_t Size;
1543 switch (Checksum->Kind) {
1544 case DIFile::CSK_MD5:
1545 Size = 32;
1546 break;
1547 case DIFile::CSK_SHA1:
1548 Size = 40;
1549 break;
1550 case DIFile::CSK_SHA256:
1551 Size = 64;
1552 break;
1553 }
1554 CheckDI(Checksum->Value.size() == Size, "invalid checksum length", &N);
1555 CheckDI(Checksum->Value.find_if_not(llvm::isHexDigit) == StringRef::npos,
1556 "invalid checksum", &N);
1557 }
1558}
1559
1560void Verifier::visitDICompileUnit(const DICompileUnit &N) {
1561 CheckDI(N.isDistinct(), "compile units must be distinct", &N);
1562 CheckDI(N.getTag() == dwarf::DW_TAG_compile_unit, "invalid tag", &N);
1563
1564 // Don't bother verifying the compilation directory or producer string
1565 // as those could be empty.
1566 CheckDI(N.getRawFile() && isa<DIFile>(N.getRawFile()), "invalid file", &N,
1567 N.getRawFile());
1568 CheckDI(!N.getFile()->getFilename().empty(), "invalid filename", &N,
1569 N.getFile());
1570
1571 CheckDI((N.getEmissionKind() <= DICompileUnit::LastEmissionKind),
1572 "invalid emission kind", &N);
1573
1574 if (auto *Array = N.getRawEnumTypes()) {
1575 CheckDI(isa<MDTuple>(Array), "invalid enum list", &N, Array);
1576 for (Metadata *Op : N.getEnumTypes()->operands()) {
1578 CheckDI(Enum && Enum->getTag() == dwarf::DW_TAG_enumeration_type,
1579 "invalid enum type", &N, N.getEnumTypes(), Op);
1580 CheckDI(!Enum->getScope() || !isa<DILocalScope>(Enum->getScope()),
1581 "function-local enum in a DICompileUnit's enum list", &N,
1582 N.getEnumTypes(), Op);
1583 }
1584 }
1585 if (auto *Array = N.getRawRetainedTypes()) {
1586 CheckDI(isa<MDTuple>(Array), "invalid retained type list", &N, Array);
1587 for (Metadata *Op : N.getRetainedTypes()->operands()) {
1588 CheckDI(
1589 Op && (isa<DIType>(Op) || (isa<DISubprogram>(Op) &&
1590 !cast<DISubprogram>(Op)->isDefinition())),
1591 "invalid retained type", &N, Op);
1592 }
1593 }
1594 if (auto *Array = N.getRawGlobalVariables()) {
1595 CheckDI(isa<MDTuple>(Array), "invalid global variable list", &N, Array);
1596 for (Metadata *Op : N.getGlobalVariables()->operands()) {
1598 "invalid global variable ref", &N, Op);
1599 }
1600 }
1601 if (auto *Array = N.getRawImportedEntities()) {
1602 CheckDI(isa<MDTuple>(Array), "invalid imported entity list", &N, Array);
1603 for (Metadata *Op : N.getImportedEntities()->operands()) {
1605 CheckDI(IE, "invalid imported entity ref", &N, Op);
1607 "function-local imports are not allowed in a DICompileUnit's "
1608 "imported entities list",
1609 &N, Op);
1610 }
1611 }
1612 if (auto *Array = N.getRawMacros()) {
1613 CheckDI(isa<MDTuple>(Array), "invalid macro list", &N, Array);
1614 for (Metadata *Op : N.getMacros()->operands()) {
1615 CheckDI(Op && isa<DIMacroNode>(Op), "invalid macro ref", &N, Op);
1616 }
1617 }
1618 CUVisited.insert(&N);
1619}
1620
1621void Verifier::visitDISubprogram(const DISubprogram &N) {
1622 CheckDI(N.getTag() == dwarf::DW_TAG_subprogram, "invalid tag", &N);
1623 CheckDI(isScope(N.getRawScope()), "invalid scope", &N, N.getRawScope());
1624 if (auto *F = N.getRawFile())
1625 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1626 else
1627 CheckDI(N.getLine() == 0, "line specified with no file", &N, N.getLine());
1628 if (auto *T = N.getRawType())
1629 CheckDI(isa<DISubroutineType>(T), "invalid subroutine type", &N, T);
1630 CheckDI(isType(N.getRawContainingType()), "invalid containing type", &N,
1631 N.getRawContainingType());
1632 if (auto *Params = N.getRawTemplateParams())
1633 visitTemplateParams(N, *Params);
1634 if (auto *S = N.getRawDeclaration())
1635 CheckDI(isa<DISubprogram>(S) && !cast<DISubprogram>(S)->isDefinition(),
1636 "invalid subprogram declaration", &N, S);
1637 if (auto *RawNode = N.getRawRetainedNodes()) {
1638 auto *Node = dyn_cast<MDTuple>(RawNode);
1639 CheckDI(Node, "invalid retained nodes list", &N, RawNode);
1640
1641 DenseMap<unsigned, DILocalVariable *> Args;
1642 for (Metadata *Op : Node->operands()) {
1643 CheckDI(Op, "nullptr in retained nodes", &N, Node);
1644
1645 auto True = [](const Metadata *) { return true; };
1646 auto False = [](const Metadata *) { return false; };
1647 bool IsTypeCorrect = DISubprogram::visitRetainedNode<bool>(
1648 Op, True, True, True, True, False);
1649 CheckDI(IsTypeCorrect,
1650 "invalid retained nodes, expected DILocalVariable, DILabel, "
1651 "DIImportedEntity or DIType",
1652 &N, Node, Op);
1653
1654 auto *RetainedNode = cast<DINode>(Op);
1655 auto *RetainedNodeScope = dyn_cast_or_null<DILocalScope>(
1657 CheckDI(RetainedNodeScope,
1658 "invalid retained nodes, retained node is not local", &N, Node,
1659 RetainedNode);
1660
1661 DISubprogram *RetainedNodeSP = RetainedNodeScope->getSubprogram();
1662 DICompileUnit *RetainedNodeUnit =
1663 RetainedNodeSP ? RetainedNodeSP->getUnit() : nullptr;
1664 CheckDI(
1665 RetainedNodeSP == &N,
1666 "invalid retained nodes, retained node does not belong to subprogram",
1667 &N, Node, RetainedNode, RetainedNodeScope, RetainedNodeSP,
1668 RetainedNodeUnit);
1669
1670 auto *DV = dyn_cast<DILocalVariable>(RetainedNode);
1671 if (!DV)
1672 continue;
1673 if (unsigned ArgNum = DV->getArg()) {
1674 auto [ArgI, Inserted] = Args.insert({ArgNum, DV});
1675 CheckDI(Inserted || DV == ArgI->second,
1676 "invalid retained nodes, more than one local variable with the "
1677 "same argument index",
1678 &N, N.getUnit(), Node, RetainedNode, Args[ArgNum]);
1679 }
1680 }
1681 }
1683 "invalid reference flags", &N);
1684
1685 auto *Unit = N.getRawUnit();
1686 if (N.isDefinition()) {
1687 // Subprogram definitions (not part of the type hierarchy).
1688 CheckDI(N.isDistinct(), "subprogram definitions must be distinct", &N);
1689 CheckDI(Unit, "subprogram definitions must have a compile unit", &N);
1690 CheckDI(isa<DICompileUnit>(Unit), "invalid unit type", &N, Unit);
1691 // There's no good way to cross the CU boundary to insert a nested
1692 // DISubprogram definition in one CU into a type defined in another CU.
1693 auto *CT = dyn_cast_or_null<DICompositeType>(N.getRawScope());
1694 if (CT && CT->getRawIdentifier() &&
1695 M.getContext().isODRUniquingDebugTypes())
1696 CheckDI(N.getDeclaration(),
1697 "definition subprograms cannot be nested within DICompositeType "
1698 "when enabling ODR",
1699 &N);
1700 } else {
1701 // Subprogram declarations (part of the type hierarchy).
1702 CheckDI(!Unit, "subprogram declarations must not have a compile unit", &N);
1703 CheckDI(!N.getRawDeclaration(),
1704 "subprogram declaration must not have a declaration field");
1705 }
1706
1707 if (auto *RawThrownTypes = N.getRawThrownTypes()) {
1708 auto *ThrownTypes = dyn_cast<MDTuple>(RawThrownTypes);
1709 CheckDI(ThrownTypes, "invalid thrown types list", &N, RawThrownTypes);
1710 for (Metadata *Op : ThrownTypes->operands())
1711 CheckDI(Op && isa<DIType>(Op), "invalid thrown type", &N, ThrownTypes,
1712 Op);
1713 }
1714
1715 if (N.areAllCallsDescribed())
1716 CheckDI(N.isDefinition(),
1717 "DIFlagAllCallsDescribed must be attached to a definition");
1718}
1719
1720void Verifier::visitDILexicalBlockBase(const DILexicalBlockBase &N) {
1721 CheckDI(N.getTag() == dwarf::DW_TAG_lexical_block, "invalid tag", &N);
1722 CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1723 "invalid local scope", &N, N.getRawScope());
1724 if (auto *SP = dyn_cast<DISubprogram>(N.getRawScope()))
1725 CheckDI(SP->isDefinition(), "scope points into the type hierarchy", &N);
1726}
1727
1728void Verifier::visitDILexicalBlock(const DILexicalBlock &N) {
1729 visitDILexicalBlockBase(N);
1730
1731 CheckDI(N.getLine() || !N.getColumn(),
1732 "cannot have column info without line info", &N);
1733}
1734
1735void Verifier::visitDILexicalBlockFile(const DILexicalBlockFile &N) {
1736 visitDILexicalBlockBase(N);
1737}
1738
1739void Verifier::visitDICommonBlock(const DICommonBlock &N) {
1740 CheckDI(N.getTag() == dwarf::DW_TAG_common_block, "invalid tag", &N);
1741 if (auto *S = N.getRawScope())
1742 CheckDI(isa<DIScope>(S), "invalid scope ref", &N, S);
1743 if (auto *S = N.getRawDecl())
1744 CheckDI(isa<DIGlobalVariable>(S), "invalid declaration", &N, S);
1745}
1746
1747void Verifier::visitDINamespace(const DINamespace &N) {
1748 CheckDI(N.getTag() == dwarf::DW_TAG_namespace, "invalid tag", &N);
1749 if (auto *S = N.getRawScope())
1750 CheckDI(isa<DIScope>(S), "invalid scope ref", &N, S);
1751}
1752
1753void Verifier::visitDIMacro(const DIMacro &N) {
1754 CheckDI(N.getMacinfoType() == dwarf::DW_MACINFO_define ||
1755 N.getMacinfoType() == dwarf::DW_MACINFO_undef,
1756 "invalid macinfo type", &N);
1757 CheckDI(!N.getName().empty(), "anonymous macro", &N);
1758 if (!N.getValue().empty()) {
1759 assert(N.getValue().data()[0] != ' ' && "Macro value has a space prefix");
1760 }
1761}
1762
1763void Verifier::visitDIMacroFile(const DIMacroFile &N) {
1764 CheckDI(N.getMacinfoType() == dwarf::DW_MACINFO_start_file,
1765 "invalid macinfo type", &N);
1766 if (auto *F = N.getRawFile())
1767 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1768
1769 if (auto *Array = N.getRawElements()) {
1770 CheckDI(isa<MDTuple>(Array), "invalid macro list", &N, Array);
1771 for (Metadata *Op : N.getElements()->operands()) {
1772 CheckDI(Op && isa<DIMacroNode>(Op), "invalid macro ref", &N, Op);
1773 }
1774 }
1775}
1776
1777void Verifier::visitDIModule(const DIModule &N) {
1778 CheckDI(N.getTag() == dwarf::DW_TAG_module, "invalid tag", &N);
1779 CheckDI(!N.getName().empty(), "anonymous module", &N);
1780}
1781
1782void Verifier::visitDITemplateParameter(const DITemplateParameter &N) {
1783 CheckDI(isType(N.getRawType()), "invalid type ref", &N, N.getRawType());
1784}
1785
1786void Verifier::visitDITemplateTypeParameter(const DITemplateTypeParameter &N) {
1787 visitDITemplateParameter(N);
1788
1789 CheckDI(N.getTag() == dwarf::DW_TAG_template_type_parameter, "invalid tag",
1790 &N);
1791}
1792
1793void Verifier::visitDITemplateValueParameter(
1794 const DITemplateValueParameter &N) {
1795 visitDITemplateParameter(N);
1796
1797 CheckDI(N.getTag() == dwarf::DW_TAG_template_value_parameter ||
1798 N.getTag() == dwarf::DW_TAG_GNU_template_template_param ||
1799 N.getTag() == dwarf::DW_TAG_GNU_template_parameter_pack,
1800 "invalid tag", &N);
1801}
1802
1803void Verifier::visitDIVariable(const DIVariable &N) {
1804 if (auto *S = N.getRawScope())
1805 CheckDI(isa<DIScope>(S), "invalid scope", &N, S);
1806 if (auto *F = N.getRawFile())
1807 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1808}
1809
1810void Verifier::visitDIGlobalVariable(const DIGlobalVariable &N) {
1811 // Checks common to all variables.
1812 visitDIVariable(N);
1813
1814 CheckDI(N.getTag() == dwarf::DW_TAG_variable, "invalid tag", &N);
1815 CheckDI(isType(N.getRawType()), "invalid type ref", &N, N.getRawType());
1816 // Check only if the global variable is not an extern
1817 if (N.isDefinition())
1818 CheckDI(N.getType(), "missing global variable type", &N);
1819 if (auto *Member = N.getRawStaticDataMemberDeclaration()) {
1821 "invalid static data member declaration", &N, Member);
1822 }
1823}
1824
1825void Verifier::visitDILocalVariable(const DILocalVariable &N) {
1826 // Checks common to all variables.
1827 visitDIVariable(N);
1828
1829 CheckDI(isType(N.getRawType()), "invalid type ref", &N, N.getRawType());
1830 CheckDI(N.getTag() == dwarf::DW_TAG_variable, "invalid tag", &N);
1831 CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1832 "local variable requires a valid scope", &N, N.getRawScope());
1833 if (auto Ty = N.getType())
1834 CheckDI(!isa<DISubroutineType>(Ty), "invalid type", &N, N.getType());
1835}
1836
1837void Verifier::visitDIAssignID(const DIAssignID &N) {
1838 CheckDI(!N.getNumOperands(), "DIAssignID has no arguments", &N);
1839 CheckDI(N.isDistinct(), "DIAssignID must be distinct", &N);
1840}
1841
1842void Verifier::visitDILabel(const DILabel &N) {
1843 if (auto *S = N.getRawScope())
1844 CheckDI(isa<DIScope>(S), "invalid scope", &N, S);
1845 if (auto *F = N.getRawFile())
1846 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1847
1848 CheckDI(N.getTag() == dwarf::DW_TAG_label, "invalid tag", &N);
1849 CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1850 "label requires a valid scope", &N, N.getRawScope());
1851}
1852
1853void Verifier::visitDIExpression(const DIExpression &N) {
1854 CheckDI(N.isValid(), "invalid expression", &N);
1855}
1856
1857void Verifier::visitDIGlobalVariableExpression(
1858 const DIGlobalVariableExpression &GVE) {
1859 CheckDI(GVE.getVariable(), "missing variable");
1860 if (auto *Var = GVE.getVariable())
1861 visitDIGlobalVariable(*Var);
1862 if (auto *Expr = GVE.getExpression()) {
1863 visitDIExpression(*Expr);
1864 if (auto Fragment = Expr->getFragmentInfo())
1865 verifyFragmentExpression(*GVE.getVariable(), *Fragment, &GVE);
1866 }
1867}
1868
1869void Verifier::visitDIObjCProperty(const DIObjCProperty &N) {
1870 CheckDI(N.getTag() == dwarf::DW_TAG_APPLE_property, "invalid tag", &N);
1871 if (auto *T = N.getRawType())
1872 CheckDI(isType(T), "invalid type ref", &N, T);
1873 if (auto *F = N.getRawFile())
1874 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1875}
1876
1877void Verifier::visitDIImportedEntity(const DIImportedEntity &N) {
1878 CheckDI(N.getTag() == dwarf::DW_TAG_imported_module ||
1879 N.getTag() == dwarf::DW_TAG_imported_declaration,
1880 "invalid tag", &N);
1881 if (auto *S = N.getRawScope())
1882 CheckDI(isa<DIScope>(S), "invalid scope for imported entity", &N, S);
1883 CheckDI(isDINode(N.getRawEntity()), "invalid imported entity", &N,
1884 N.getRawEntity());
1885}
1886
1887void Verifier::visitComdat(const Comdat &C) {
1888 // In COFF the Module is invalid if the GlobalValue has private linkage.
1889 // Entities with private linkage don't have entries in the symbol table.
1890 if (TT.isOSBinFormatCOFF())
1891 if (const GlobalValue *GV = M.getNamedValue(C.getName()))
1892 Check(!GV->hasPrivateLinkage(), "comdat global value has private linkage",
1893 GV);
1894}
1895
1896void Verifier::visitModuleIdents() {
1897 const NamedMDNode *Idents = M.getNamedMetadata("llvm.ident");
1898 if (!Idents)
1899 return;
1900
1901 // llvm.ident takes a list of metadata entry. Each entry has only one string.
1902 // Scan each llvm.ident entry and make sure that this requirement is met.
1903 for (const MDNode *N : Idents->operands()) {
1904 Check(N->getNumOperands() == 1,
1905 "incorrect number of operands in llvm.ident metadata", N);
1906 Check(dyn_cast_or_null<MDString>(N->getOperand(0)),
1907 ("invalid value for llvm.ident metadata entry operand"
1908 "(the operand should be a string)"),
1909 N->getOperand(0));
1910 }
1911}
1912
1913void Verifier::visitModuleCommandLines() {
1914 const NamedMDNode *CommandLines = M.getNamedMetadata("llvm.commandline");
1915 if (!CommandLines)
1916 return;
1917
1918 // llvm.commandline takes a list of metadata entry. Each entry has only one
1919 // string. Scan each llvm.commandline entry and make sure that this
1920 // requirement is met.
1921 for (const MDNode *N : CommandLines->operands()) {
1922 Check(N->getNumOperands() == 1,
1923 "incorrect number of operands in llvm.commandline metadata", N);
1924 Check(dyn_cast_or_null<MDString>(N->getOperand(0)),
1925 ("invalid value for llvm.commandline metadata entry operand"
1926 "(the operand should be a string)"),
1927 N->getOperand(0));
1928 }
1929}
1930
1931void Verifier::visitModuleErrnoTBAA() {
1932 const NamedMDNode *ErrnoTBAA = M.getNamedMetadata("llvm.errno.tbaa");
1933 if (!ErrnoTBAA)
1934 return;
1935
1936 Check(ErrnoTBAA->getNumOperands() >= 1,
1937 "llvm.errno.tbaa must have at least one operand", ErrnoTBAA);
1938
1939 for (const MDNode *N : ErrnoTBAA->operands())
1940 TBAAVerifyHelper.visitTBAAMetadata(nullptr, N);
1941}
1942
1943void Verifier::visitModuleFlags() {
1944 const NamedMDNode *Flags = M.getModuleFlagsMetadata();
1945 if (!Flags) return;
1946
1947 // Scan each flag, and track the flags and requirements.
1948 DenseMap<const MDString*, const MDNode*> SeenIDs;
1949 SmallVector<const MDNode*, 16> Requirements;
1950 uint64_t PAuthABIPlatform = -1;
1951 uint64_t PAuthABIVersion = -1;
1952 for (const MDNode *MDN : Flags->operands()) {
1953 visitModuleFlag(MDN, SeenIDs, Requirements);
1954 if (MDN->getNumOperands() != 3)
1955 continue;
1956 if (const auto *FlagName = dyn_cast_or_null<MDString>(MDN->getOperand(1))) {
1957 if (FlagName->getString() == "aarch64-elf-pauthabi-platform") {
1958 if (const auto *PAP =
1960 PAuthABIPlatform = PAP->getZExtValue();
1961 } else if (FlagName->getString() == "aarch64-elf-pauthabi-version") {
1962 if (const auto *PAV =
1964 PAuthABIVersion = PAV->getZExtValue();
1965 }
1966 }
1967 }
1968
1969 if ((PAuthABIPlatform == uint64_t(-1)) != (PAuthABIVersion == uint64_t(-1)))
1970 CheckFailed("either both or no 'aarch64-elf-pauthabi-platform' and "
1971 "'aarch64-elf-pauthabi-version' module flags must be present");
1972
1973 // Validate that the requirements in the module are valid.
1974 for (const MDNode *Requirement : Requirements) {
1975 const MDString *Flag = cast<MDString>(Requirement->getOperand(0));
1976 const Metadata *ReqValue = Requirement->getOperand(1);
1977
1978 const MDNode *Op = SeenIDs.lookup(Flag);
1979 if (!Op) {
1980 CheckFailed("invalid requirement on flag, flag is not present in module",
1981 Flag);
1982 continue;
1983 }
1984
1985 if (Op->getOperand(2) != ReqValue) {
1986 CheckFailed(("invalid requirement on flag, "
1987 "flag does not have the required value"),
1988 Flag);
1989 continue;
1990 }
1991 }
1992}
1993
1994void
1995Verifier::visitModuleFlag(const MDNode *Op,
1996 DenseMap<const MDString *, const MDNode *> &SeenIDs,
1997 SmallVectorImpl<const MDNode *> &Requirements) {
1998 // Each module flag should have three arguments, the merge behavior (a
1999 // constant int), the flag ID (an MDString), and the value.
2000 Check(Op->getNumOperands() == 3,
2001 "incorrect number of operands in module flag", Op);
2002 Module::ModFlagBehavior MFB;
2003 if (!Module::isValidModFlagBehavior(Op->getOperand(0), MFB)) {
2005 "invalid behavior operand in module flag (expected constant integer)",
2006 Op->getOperand(0));
2007 Check(false,
2008 "invalid behavior operand in module flag (unexpected constant)",
2009 Op->getOperand(0));
2010 }
2011 MDString *ID = dyn_cast_or_null<MDString>(Op->getOperand(1));
2012 Check(ID, "invalid ID operand in module flag (expected metadata string)",
2013 Op->getOperand(1));
2014
2015 // Check the values for behaviors with additional requirements.
2016 switch (MFB) {
2017 case Module::Error:
2018 case Module::Warning:
2019 case Module::Override:
2020 // These behavior types accept any value.
2021 break;
2022
2023 case Module::Min: {
2024 auto *V = mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(2));
2025 Check(V && V->getValue().isNonNegative(),
2026 "invalid value for 'min' module flag (expected constant non-negative "
2027 "integer)",
2028 Op->getOperand(2));
2029 break;
2030 }
2031
2032 case Module::Max: {
2034 "invalid value for 'max' module flag (expected constant integer)",
2035 Op->getOperand(2));
2036 break;
2037 }
2038
2039 case Module::Require: {
2040 // The value should itself be an MDNode with two operands, a flag ID (an
2041 // MDString), and a value.
2042 MDNode *Value = dyn_cast<MDNode>(Op->getOperand(2));
2043 Check(Value && Value->getNumOperands() == 2,
2044 "invalid value for 'require' module flag (expected metadata pair)",
2045 Op->getOperand(2));
2046 Check(isa<MDString>(Value->getOperand(0)),
2047 ("invalid value for 'require' module flag "
2048 "(first value operand should be a string)"),
2049 Value->getOperand(0));
2050
2051 // Append it to the list of requirements, to check once all module flags are
2052 // scanned.
2053 Requirements.push_back(Value);
2054 break;
2055 }
2056
2057 case Module::Append:
2058 case Module::AppendUnique: {
2059 // These behavior types require the operand be an MDNode.
2060 Check(isa<MDNode>(Op->getOperand(2)),
2061 "invalid value for 'append'-type module flag "
2062 "(expected a metadata node)",
2063 Op->getOperand(2));
2064 break;
2065 }
2066 }
2067
2068 // Unless this is a "requires" flag, check the ID is unique.
2069 if (MFB != Module::Require) {
2070 bool Inserted = SeenIDs.insert(std::make_pair(ID, Op)).second;
2071 Check(Inserted,
2072 "module flag identifiers must be unique (or of 'require' type)", ID);
2073 }
2074
2075 if (ID->getString() == "wchar_size") {
2076 ConstantInt *Value
2078 Check(Value, "wchar_size metadata requires constant integer argument");
2079 }
2080
2081 if (ID->getString() == "Linker Options") {
2082 // If the llvm.linker.options named metadata exists, we assume that the
2083 // bitcode reader has upgraded the module flag. Otherwise the flag might
2084 // have been created by a client directly.
2085 Check(M.getNamedMetadata("llvm.linker.options"),
2086 "'Linker Options' named metadata no longer supported");
2087 }
2088
2089 if (ID->getString() == "SemanticInterposition") {
2090 ConstantInt *Value =
2092 Check(Value,
2093 "SemanticInterposition metadata requires constant integer argument");
2094 }
2095
2096 if (ID->getString() == "CG Profile") {
2097 for (const MDOperand &MDO : cast<MDNode>(Op->getOperand(2))->operands())
2098 visitModuleFlagCGProfileEntry(MDO);
2099 }
2100}
2101
2102void Verifier::visitModuleFlagCGProfileEntry(const MDOperand &MDO) {
2103 auto CheckFunction = [&](const MDOperand &FuncMDO) {
2104 if (!FuncMDO)
2105 return;
2106 auto F = dyn_cast<ValueAsMetadata>(FuncMDO);
2107 Check(F && isa<Function>(F->getValue()->stripPointerCasts()),
2108 "expected a Function or null", FuncMDO);
2109 };
2110 auto Node = dyn_cast_or_null<MDNode>(MDO);
2111 Check(Node && Node->getNumOperands() == 3, "expected a MDNode triple", MDO);
2112 CheckFunction(Node->getOperand(0));
2113 CheckFunction(Node->getOperand(1));
2114 auto Count = dyn_cast_or_null<ConstantAsMetadata>(Node->getOperand(2));
2115 Check(Count && Count->getType()->isIntegerTy(),
2116 "expected an integer constant", Node->getOperand(2));
2117}
2118
2119void Verifier::verifyAttributeTypes(AttributeSet Attrs, const Value *V) {
2120 for (Attribute A : Attrs) {
2121
2122 if (A.isStringAttribute()) {
2123#define GET_ATTR_NAMES
2124#define ATTRIBUTE_ENUM(ENUM_NAME, DISPLAY_NAME)
2125#define ATTRIBUTE_STRBOOL(ENUM_NAME, DISPLAY_NAME) \
2126 if (A.getKindAsString() == #DISPLAY_NAME) { \
2127 auto V = A.getValueAsString(); \
2128 if (!(V.empty() || V == "true" || V == "false")) \
2129 CheckFailed("invalid value for '" #DISPLAY_NAME "' attribute: " + V + \
2130 ""); \
2131 }
2132
2133#include "llvm/IR/Attributes.inc"
2134 continue;
2135 }
2136
2137 if (A.isIntAttribute() != Attribute::isIntAttrKind(A.getKindAsEnum())) {
2138 CheckFailed("Attribute '" + A.getAsString() + "' should have an Argument",
2139 V);
2140 return;
2141 }
2142 }
2143}
2144
2145// VerifyParameterAttrs - Check the given attributes for an argument or return
2146// value of the specified type. The value V is printed in error messages.
2147void Verifier::verifyParameterAttrs(AttributeSet Attrs, Type *Ty,
2148 const Value *V) {
2149 if (!Attrs.hasAttributes())
2150 return;
2151
2152 verifyAttributeTypes(Attrs, V);
2153
2154 for (Attribute Attr : Attrs)
2155 Check(Attr.isStringAttribute() ||
2156 Attribute::canUseAsParamAttr(Attr.getKindAsEnum()),
2157 "Attribute '" + Attr.getAsString() + "' does not apply to parameters",
2158 V);
2159
2160 if (Attrs.hasAttribute(Attribute::ImmArg)) {
2161 unsigned AttrCount =
2162 Attrs.getNumAttributes() - Attrs.hasAttribute(Attribute::Range);
2163 Check(AttrCount == 1,
2164 "Attribute 'immarg' is incompatible with other attributes except the "
2165 "'range' attribute",
2166 V);
2167 }
2168
2169 // Check for mutually incompatible attributes. Only inreg is compatible with
2170 // sret.
2171 unsigned AttrCount = 0;
2172 AttrCount += Attrs.hasAttribute(Attribute::ByVal);
2173 AttrCount += Attrs.hasAttribute(Attribute::InAlloca);
2174 AttrCount += Attrs.hasAttribute(Attribute::Preallocated);
2175 AttrCount += Attrs.hasAttribute(Attribute::StructRet) ||
2176 Attrs.hasAttribute(Attribute::InReg);
2177 AttrCount += Attrs.hasAttribute(Attribute::Nest);
2178 AttrCount += Attrs.hasAttribute(Attribute::ByRef);
2179 Check(AttrCount <= 1,
2180 "Attributes 'byval', 'inalloca', 'preallocated', 'inreg', 'nest', "
2181 "'byref', and 'sret' are incompatible!",
2182 V);
2183
2184 Check(!(Attrs.hasAttribute(Attribute::InAlloca) &&
2185 Attrs.hasAttribute(Attribute::ReadOnly)),
2186 "Attributes "
2187 "'inalloca and readonly' are incompatible!",
2188 V);
2189
2190 Check(!(Attrs.hasAttribute(Attribute::StructRet) &&
2191 Attrs.hasAttribute(Attribute::Returned)),
2192 "Attributes "
2193 "'sret and returned' are incompatible!",
2194 V);
2195
2196 Check(!(Attrs.hasAttribute(Attribute::ZExt) &&
2197 Attrs.hasAttribute(Attribute::SExt)),
2198 "Attributes "
2199 "'zeroext and signext' are incompatible!",
2200 V);
2201
2202 Check(!(Attrs.hasAttribute(Attribute::ReadNone) &&
2203 Attrs.hasAttribute(Attribute::ReadOnly)),
2204 "Attributes "
2205 "'readnone and readonly' are incompatible!",
2206 V);
2207
2208 Check(!(Attrs.hasAttribute(Attribute::ReadNone) &&
2209 Attrs.hasAttribute(Attribute::WriteOnly)),
2210 "Attributes "
2211 "'readnone and writeonly' are incompatible!",
2212 V);
2213
2214 Check(!(Attrs.hasAttribute(Attribute::ReadOnly) &&
2215 Attrs.hasAttribute(Attribute::WriteOnly)),
2216 "Attributes "
2217 "'readonly and writeonly' are incompatible!",
2218 V);
2219
2220 Check(!(Attrs.hasAttribute(Attribute::NoInline) &&
2221 Attrs.hasAttribute(Attribute::AlwaysInline)),
2222 "Attributes "
2223 "'noinline and alwaysinline' are incompatible!",
2224 V);
2225
2226 Check(!(Attrs.hasAttribute(Attribute::Writable) &&
2227 Attrs.hasAttribute(Attribute::ReadNone)),
2228 "Attributes writable and readnone are incompatible!", V);
2229
2230 Check(!(Attrs.hasAttribute(Attribute::Writable) &&
2231 Attrs.hasAttribute(Attribute::ReadOnly)),
2232 "Attributes writable and readonly are incompatible!", V);
2233
2234 AttributeMask IncompatibleAttrs = AttributeFuncs::typeIncompatible(Ty, Attrs);
2235 for (Attribute Attr : Attrs) {
2236 if (!Attr.isStringAttribute() &&
2237 IncompatibleAttrs.contains(Attr.getKindAsEnum())) {
2238 CheckFailed("Attribute '" + Attr.getAsString() +
2239 "' applied to incompatible type!", V);
2240 return;
2241 }
2242 }
2243
2244 if (isa<PointerType>(Ty)) {
2245 if (Attrs.hasAttribute(Attribute::Alignment)) {
2246 Align AttrAlign = Attrs.getAlignment().valueOrOne();
2247 Check(AttrAlign.value() <= Value::MaximumAlignment,
2248 "huge alignment values are unsupported", V);
2249 }
2250 if (Attrs.hasAttribute(Attribute::ByVal)) {
2251 Type *ByValTy = Attrs.getByValType();
2252 SmallPtrSet<Type *, 4> Visited;
2253 Check(ByValTy->isSized(&Visited),
2254 "Attribute 'byval' does not support unsized types!", V);
2255 // Check if it is or contains a target extension type that disallows being
2256 // used on the stack.
2258 "'byval' argument has illegal target extension type", V);
2259 Check(DL.getTypeAllocSize(ByValTy).getKnownMinValue() < (1ULL << 32),
2260 "huge 'byval' arguments are unsupported", V);
2261 }
2262 if (Attrs.hasAttribute(Attribute::ByRef)) {
2263 SmallPtrSet<Type *, 4> Visited;
2264 Check(Attrs.getByRefType()->isSized(&Visited),
2265 "Attribute 'byref' does not support unsized types!", V);
2266 Check(DL.getTypeAllocSize(Attrs.getByRefType()).getKnownMinValue() <
2267 (1ULL << 32),
2268 "huge 'byref' arguments are unsupported", V);
2269 }
2270 if (Attrs.hasAttribute(Attribute::InAlloca)) {
2271 SmallPtrSet<Type *, 4> Visited;
2272 Check(Attrs.getInAllocaType()->isSized(&Visited),
2273 "Attribute 'inalloca' does not support unsized types!", V);
2274 Check(DL.getTypeAllocSize(Attrs.getInAllocaType()).getKnownMinValue() <
2275 (1ULL << 32),
2276 "huge 'inalloca' arguments are unsupported", V);
2277 }
2278 if (Attrs.hasAttribute(Attribute::Preallocated)) {
2279 SmallPtrSet<Type *, 4> Visited;
2280 Check(Attrs.getPreallocatedType()->isSized(&Visited),
2281 "Attribute 'preallocated' does not support unsized types!", V);
2282 Check(
2283 DL.getTypeAllocSize(Attrs.getPreallocatedType()).getKnownMinValue() <
2284 (1ULL << 32),
2285 "huge 'preallocated' arguments are unsupported", V);
2286 }
2287 }
2288
2289 if (Attrs.hasAttribute(Attribute::Initializes)) {
2290 auto Inits = Attrs.getAttribute(Attribute::Initializes).getInitializes();
2291 Check(!Inits.empty(), "Attribute 'initializes' does not support empty list",
2292 V);
2294 "Attribute 'initializes' does not support unordered ranges", V);
2295 }
2296
2297 if (Attrs.hasAttribute(Attribute::NoFPClass)) {
2298 uint64_t Val = Attrs.getAttribute(Attribute::NoFPClass).getValueAsInt();
2299 Check(Val != 0, "Attribute 'nofpclass' must have at least one test bit set",
2300 V);
2301 Check((Val & ~static_cast<unsigned>(fcAllFlags)) == 0,
2302 "Invalid value for 'nofpclass' test mask", V);
2303 }
2304 if (Attrs.hasAttribute(Attribute::Range)) {
2305 const ConstantRange &CR =
2306 Attrs.getAttribute(Attribute::Range).getValueAsConstantRange();
2308 "Range bit width must match type bit width!", V);
2309 }
2310}
2311
2312void Verifier::checkUnsignedBaseTenFuncAttr(AttributeList Attrs, StringRef Attr,
2313 const Value *V) {
2314 if (Attrs.hasFnAttr(Attr)) {
2315 StringRef S = Attrs.getFnAttr(Attr).getValueAsString();
2316 unsigned N;
2317 if (S.getAsInteger(10, N))
2318 CheckFailed("\"" + Attr + "\" takes an unsigned integer: " + S, V);
2319 }
2320}
2321
2322// Check parameter attributes against a function type.
2323// The value V is printed in error messages.
2324void Verifier::verifyFunctionAttrs(FunctionType *FT, AttributeList Attrs,
2325 const Value *V, bool IsIntrinsic,
2326 bool IsInlineAsm) {
2327 if (Attrs.isEmpty())
2328 return;
2329
2330 if (AttributeListsVisited.insert(Attrs.getRawPointer()).second) {
2331 Check(Attrs.hasParentContext(Context),
2332 "Attribute list does not match Module context!", &Attrs, V);
2333 for (const auto &AttrSet : Attrs) {
2334 Check(!AttrSet.hasAttributes() || AttrSet.hasParentContext(Context),
2335 "Attribute set does not match Module context!", &AttrSet, V);
2336 for (const auto &A : AttrSet) {
2337 Check(A.hasParentContext(Context),
2338 "Attribute does not match Module context!", &A, V);
2339 }
2340 }
2341 }
2342
2343 bool SawNest = false;
2344 bool SawReturned = false;
2345 bool SawSRet = false;
2346 bool SawSwiftSelf = false;
2347 bool SawSwiftAsync = false;
2348 bool SawSwiftError = false;
2349
2350 // Verify return value attributes.
2351 AttributeSet RetAttrs = Attrs.getRetAttrs();
2352 for (Attribute RetAttr : RetAttrs)
2353 Check(RetAttr.isStringAttribute() ||
2354 Attribute::canUseAsRetAttr(RetAttr.getKindAsEnum()),
2355 "Attribute '" + RetAttr.getAsString() +
2356 "' does not apply to function return values",
2357 V);
2358
2359 unsigned MaxParameterWidth = 0;
2360 auto GetMaxParameterWidth = [&MaxParameterWidth](Type *Ty) {
2361 if (Ty->isVectorTy()) {
2362 if (auto *VT = dyn_cast<FixedVectorType>(Ty)) {
2363 unsigned Size = VT->getPrimitiveSizeInBits().getFixedValue();
2364 if (Size > MaxParameterWidth)
2365 MaxParameterWidth = Size;
2366 }
2367 }
2368 };
2369 GetMaxParameterWidth(FT->getReturnType());
2370 verifyParameterAttrs(RetAttrs, FT->getReturnType(), V);
2371
2372 // Verify parameter attributes.
2373 for (unsigned i = 0, e = FT->getNumParams(); i != e; ++i) {
2374 Type *Ty = FT->getParamType(i);
2375 AttributeSet ArgAttrs = Attrs.getParamAttrs(i);
2376
2377 if (!IsIntrinsic) {
2378 Check(!ArgAttrs.hasAttribute(Attribute::ImmArg),
2379 "immarg attribute only applies to intrinsics", V);
2380 if (!IsInlineAsm)
2381 Check(!ArgAttrs.hasAttribute(Attribute::ElementType),
2382 "Attribute 'elementtype' can only be applied to intrinsics"
2383 " and inline asm.",
2384 V);
2385 }
2386
2387 verifyParameterAttrs(ArgAttrs, Ty, V);
2388 GetMaxParameterWidth(Ty);
2389
2390 if (ArgAttrs.hasAttribute(Attribute::Nest)) {
2391 Check(!SawNest, "More than one parameter has attribute nest!", V);
2392 SawNest = true;
2393 }
2394
2395 if (ArgAttrs.hasAttribute(Attribute::Returned)) {
2396 Check(!SawReturned, "More than one parameter has attribute returned!", V);
2397 Check(Ty->canLosslesslyBitCastTo(FT->getReturnType()),
2398 "Incompatible argument and return types for 'returned' attribute",
2399 V);
2400 SawReturned = true;
2401 }
2402
2403 if (ArgAttrs.hasAttribute(Attribute::StructRet)) {
2404 Check(!SawSRet, "Cannot have multiple 'sret' parameters!", V);
2405 Check(i == 0 || i == 1,
2406 "Attribute 'sret' is not on first or second parameter!", V);
2407 SawSRet = true;
2408 }
2409
2410 if (ArgAttrs.hasAttribute(Attribute::SwiftSelf)) {
2411 Check(!SawSwiftSelf, "Cannot have multiple 'swiftself' parameters!", V);
2412 SawSwiftSelf = true;
2413 }
2414
2415 if (ArgAttrs.hasAttribute(Attribute::SwiftAsync)) {
2416 Check(!SawSwiftAsync, "Cannot have multiple 'swiftasync' parameters!", V);
2417 SawSwiftAsync = true;
2418 }
2419
2420 if (ArgAttrs.hasAttribute(Attribute::SwiftError)) {
2421 Check(!SawSwiftError, "Cannot have multiple 'swifterror' parameters!", V);
2422 SawSwiftError = true;
2423 }
2424
2425 if (ArgAttrs.hasAttribute(Attribute::InAlloca)) {
2426 Check(i == FT->getNumParams() - 1,
2427 "inalloca isn't on the last parameter!", V);
2428 }
2429 }
2430
2431 if (!Attrs.hasFnAttrs())
2432 return;
2433
2434 verifyAttributeTypes(Attrs.getFnAttrs(), V);
2435 for (Attribute FnAttr : Attrs.getFnAttrs())
2436 Check(FnAttr.isStringAttribute() ||
2437 Attribute::canUseAsFnAttr(FnAttr.getKindAsEnum()),
2438 "Attribute '" + FnAttr.getAsString() +
2439 "' does not apply to functions!",
2440 V);
2441
2442 Check(!(Attrs.hasFnAttr(Attribute::NoInline) &&
2443 Attrs.hasFnAttr(Attribute::AlwaysInline)),
2444 "Attributes 'noinline and alwaysinline' are incompatible!", V);
2445
2446 if (Attrs.hasFnAttr(Attribute::OptimizeNone)) {
2447 Check(Attrs.hasFnAttr(Attribute::NoInline),
2448 "Attribute 'optnone' requires 'noinline'!", V);
2449
2450 Check(!Attrs.hasFnAttr(Attribute::OptimizeForSize),
2451 "Attributes 'optsize and optnone' are incompatible!", V);
2452
2453 Check(!Attrs.hasFnAttr(Attribute::MinSize),
2454 "Attributes 'minsize and optnone' are incompatible!", V);
2455
2456 Check(!Attrs.hasFnAttr(Attribute::OptimizeForDebugging),
2457 "Attributes 'optdebug and optnone' are incompatible!", V);
2458 }
2459
2460 Check(!(Attrs.hasFnAttr(Attribute::SanitizeRealtime) &&
2461 Attrs.hasFnAttr(Attribute::SanitizeRealtimeBlocking)),
2462 "Attributes "
2463 "'sanitize_realtime and sanitize_realtime_blocking' are incompatible!",
2464 V);
2465
2466 if (Attrs.hasFnAttr(Attribute::OptimizeForDebugging)) {
2467 Check(!Attrs.hasFnAttr(Attribute::OptimizeForSize),
2468 "Attributes 'optsize and optdebug' are incompatible!", V);
2469
2470 Check(!Attrs.hasFnAttr(Attribute::MinSize),
2471 "Attributes 'minsize and optdebug' are incompatible!", V);
2472 }
2473
2474 Check(!Attrs.hasAttrSomewhere(Attribute::Writable) ||
2475 isModSet(Attrs.getMemoryEffects().getModRef(IRMemLocation::ArgMem)),
2476 "Attribute writable and memory without argmem: write are incompatible!",
2477 V);
2478
2479 if (Attrs.hasFnAttr("aarch64_pstate_sm_enabled")) {
2480 Check(!Attrs.hasFnAttr("aarch64_pstate_sm_compatible"),
2481 "Attributes 'aarch64_pstate_sm_enabled and "
2482 "aarch64_pstate_sm_compatible' are incompatible!",
2483 V);
2484 }
2485
2486 Check((Attrs.hasFnAttr("aarch64_new_za") + Attrs.hasFnAttr("aarch64_in_za") +
2487 Attrs.hasFnAttr("aarch64_inout_za") +
2488 Attrs.hasFnAttr("aarch64_out_za") +
2489 Attrs.hasFnAttr("aarch64_preserves_za") +
2490 Attrs.hasFnAttr("aarch64_za_state_agnostic")) <= 1,
2491 "Attributes 'aarch64_new_za', 'aarch64_in_za', 'aarch64_out_za', "
2492 "'aarch64_inout_za', 'aarch64_preserves_za' and "
2493 "'aarch64_za_state_agnostic' are mutually exclusive",
2494 V);
2495
2496 Check((Attrs.hasFnAttr("aarch64_new_zt0") +
2497 Attrs.hasFnAttr("aarch64_in_zt0") +
2498 Attrs.hasFnAttr("aarch64_inout_zt0") +
2499 Attrs.hasFnAttr("aarch64_out_zt0") +
2500 Attrs.hasFnAttr("aarch64_preserves_zt0") +
2501 Attrs.hasFnAttr("aarch64_za_state_agnostic")) <= 1,
2502 "Attributes 'aarch64_new_zt0', 'aarch64_in_zt0', 'aarch64_out_zt0', "
2503 "'aarch64_inout_zt0', 'aarch64_preserves_zt0' and "
2504 "'aarch64_za_state_agnostic' are mutually exclusive",
2505 V);
2506
2507 if (Attrs.hasFnAttr(Attribute::JumpTable)) {
2508 const GlobalValue *GV = cast<GlobalValue>(V);
2510 "Attribute 'jumptable' requires 'unnamed_addr'", V);
2511 }
2512
2513 if (auto Args = Attrs.getFnAttrs().getAllocSizeArgs()) {
2514 auto CheckParam = [&](StringRef Name, unsigned ParamNo) {
2515 if (ParamNo >= FT->getNumParams()) {
2516 CheckFailed("'allocsize' " + Name + " argument is out of bounds", V);
2517 return false;
2518 }
2519
2520 if (!FT->getParamType(ParamNo)->isIntegerTy()) {
2521 CheckFailed("'allocsize' " + Name +
2522 " argument must refer to an integer parameter",
2523 V);
2524 return false;
2525 }
2526
2527 return true;
2528 };
2529
2530 if (!CheckParam("element size", Args->first))
2531 return;
2532
2533 if (Args->second && !CheckParam("number of elements", *Args->second))
2534 return;
2535 }
2536
2537 if (Attrs.hasFnAttr(Attribute::AllocKind)) {
2538 AllocFnKind K = Attrs.getAllocKind();
2540 K & (AllocFnKind::Alloc | AllocFnKind::Realloc | AllocFnKind::Free);
2541 if (!is_contained(
2542 {AllocFnKind::Alloc, AllocFnKind::Realloc, AllocFnKind::Free},
2543 Type))
2544 CheckFailed(
2545 "'allockind()' requires exactly one of alloc, realloc, and free");
2546 if ((Type == AllocFnKind::Free) &&
2547 ((K & (AllocFnKind::Uninitialized | AllocFnKind::Zeroed |
2548 AllocFnKind::Aligned)) != AllocFnKind::Unknown))
2549 CheckFailed("'allockind(\"free\")' doesn't allow uninitialized, zeroed, "
2550 "or aligned modifiers.");
2551 AllocFnKind ZeroedUninit = AllocFnKind::Uninitialized | AllocFnKind::Zeroed;
2552 if ((K & ZeroedUninit) == ZeroedUninit)
2553 CheckFailed("'allockind()' can't be both zeroed and uninitialized");
2554 }
2555
2556 if (Attribute A = Attrs.getFnAttr("alloc-variant-zeroed"); A.isValid()) {
2557 StringRef S = A.getValueAsString();
2558 Check(!S.empty(), "'alloc-variant-zeroed' must not be empty");
2559 Function *Variant = M.getFunction(S);
2560 if (Variant) {
2561 Attribute Family = Attrs.getFnAttr("alloc-family");
2562 Attribute VariantFamily = Variant->getFnAttribute("alloc-family");
2563 if (Family.isValid())
2564 Check(VariantFamily.isValid() &&
2565 VariantFamily.getValueAsString() == Family.getValueAsString(),
2566 "'alloc-variant-zeroed' must name a function belonging to the "
2567 "same 'alloc-family'");
2568
2569 Check(Variant->hasFnAttribute(Attribute::AllocKind) &&
2570 (Variant->getFnAttribute(Attribute::AllocKind).getAllocKind() &
2571 AllocFnKind::Zeroed) != AllocFnKind::Unknown,
2572 "'alloc-variant-zeroed' must name a function with "
2573 "'allockind(\"zeroed\")'");
2574
2575 Check(FT == Variant->getFunctionType(),
2576 "'alloc-variant-zeroed' must name a function with the same "
2577 "signature");
2578
2579 if (const Function *F = dyn_cast<Function>(V))
2580 Check(F->getCallingConv() == Variant->getCallingConv(),
2581 "'alloc-variant-zeroed' must name a function with the same "
2582 "calling convention");
2583 }
2584 }
2585
2586 if (Attrs.hasFnAttr(Attribute::VScaleRange)) {
2587 unsigned VScaleMin = Attrs.getFnAttrs().getVScaleRangeMin();
2588 if (VScaleMin == 0)
2589 CheckFailed("'vscale_range' minimum must be greater than 0", V);
2590 else if (!isPowerOf2_32(VScaleMin))
2591 CheckFailed("'vscale_range' minimum must be power-of-two value", V);
2592 std::optional<unsigned> VScaleMax = Attrs.getFnAttrs().getVScaleRangeMax();
2593 if (VScaleMax && VScaleMin > VScaleMax)
2594 CheckFailed("'vscale_range' minimum cannot be greater than maximum", V);
2595 else if (VScaleMax && !isPowerOf2_32(*VScaleMax))
2596 CheckFailed("'vscale_range' maximum must be power-of-two value", V);
2597 }
2598
2599 if (Attribute FPAttr = Attrs.getFnAttr("frame-pointer"); FPAttr.isValid()) {
2600 StringRef FP = FPAttr.getValueAsString();
2601 if (FP != "all" && FP != "non-leaf" && FP != "none" && FP != "reserved" &&
2602 FP != "non-leaf-no-reserve")
2603 CheckFailed("invalid value for 'frame-pointer' attribute: " + FP, V);
2604 }
2605
2606 checkUnsignedBaseTenFuncAttr(Attrs, "patchable-function-prefix", V);
2607 checkUnsignedBaseTenFuncAttr(Attrs, "patchable-function-entry", V);
2608 if (Attrs.hasFnAttr("patchable-function-entry-section"))
2609 Check(!Attrs.getFnAttr("patchable-function-entry-section")
2610 .getValueAsString()
2611 .empty(),
2612 "\"patchable-function-entry-section\" must not be empty");
2613 checkUnsignedBaseTenFuncAttr(Attrs, "warn-stack-size", V);
2614
2615 if (auto A = Attrs.getFnAttr("sign-return-address"); A.isValid()) {
2616 StringRef S = A.getValueAsString();
2617 if (S != "none" && S != "all" && S != "non-leaf")
2618 CheckFailed("invalid value for 'sign-return-address' attribute: " + S, V);
2619 }
2620
2621 if (auto A = Attrs.getFnAttr("sign-return-address-key"); A.isValid()) {
2622 StringRef S = A.getValueAsString();
2623 if (S != "a_key" && S != "b_key")
2624 CheckFailed("invalid value for 'sign-return-address-key' attribute: " + S,
2625 V);
2626 if (auto AA = Attrs.getFnAttr("sign-return-address"); !AA.isValid()) {
2627 CheckFailed(
2628 "'sign-return-address-key' present without `sign-return-address`");
2629 }
2630 }
2631
2632 if (auto A = Attrs.getFnAttr("branch-target-enforcement"); A.isValid()) {
2633 StringRef S = A.getValueAsString();
2634 if (S != "" && S != "true" && S != "false")
2635 CheckFailed(
2636 "invalid value for 'branch-target-enforcement' attribute: " + S, V);
2637 }
2638
2639 if (auto A = Attrs.getFnAttr("branch-protection-pauth-lr"); A.isValid()) {
2640 StringRef S = A.getValueAsString();
2641 if (S != "" && S != "true" && S != "false")
2642 CheckFailed(
2643 "invalid value for 'branch-protection-pauth-lr' attribute: " + S, V);
2644 }
2645
2646 if (auto A = Attrs.getFnAttr("guarded-control-stack"); A.isValid()) {
2647 StringRef S = A.getValueAsString();
2648 if (S != "" && S != "true" && S != "false")
2649 CheckFailed("invalid value for 'guarded-control-stack' attribute: " + S,
2650 V);
2651 }
2652
2653 if (auto A = Attrs.getFnAttr("vector-function-abi-variant"); A.isValid()) {
2654 StringRef S = A.getValueAsString();
2655 const std::optional<VFInfo> Info = VFABI::tryDemangleForVFABI(S, FT);
2656 if (!Info)
2657 CheckFailed("invalid name for a VFABI variant: " + S, V);
2658 }
2659
2660 if (auto A = Attrs.getFnAttr("modular-format"); A.isValid()) {
2661 StringRef S = A.getValueAsString();
2663 S.split(Args, ',');
2664 Check(Args.size() >= 5,
2665 "modular-format attribute requires at least 5 arguments", V);
2666 unsigned FirstArgIdx;
2667 Check(!Args[2].getAsInteger(10, FirstArgIdx),
2668 "modular-format attribute first arg index is not an integer", V);
2669 unsigned UpperBound = FT->getNumParams() + (FT->isVarArg() ? 1 : 0);
2670 Check(FirstArgIdx > 0 && FirstArgIdx <= UpperBound,
2671 "modular-format attribute first arg index is out of bounds", V);
2672 }
2673
2674 if (auto A = Attrs.getFnAttr("target-features"); A.isValid()) {
2675 StringRef S = A.getValueAsString();
2676 if (!S.empty()) {
2677 for (auto FeatureFlag : split(S, ',')) {
2678 if (FeatureFlag.empty())
2679 CheckFailed(
2680 "target-features attribute should not contain an empty string");
2681 else
2682 Check(FeatureFlag[0] == '+' || FeatureFlag[0] == '-',
2683 "target feature '" + FeatureFlag +
2684 "' must start with a '+' or '-'",
2685 V);
2686 }
2687 }
2688 }
2689}
2690void Verifier::verifyUnknownProfileMetadata(MDNode *MD) {
2691 Check(MD->getNumOperands() == 2,
2692 "'unknown' !prof should have a single additional operand", MD);
2693 auto *PassName = dyn_cast<MDString>(MD->getOperand(1));
2694 Check(PassName != nullptr,
2695 "'unknown' !prof should have an additional operand of type "
2696 "string");
2697 Check(!PassName->getString().empty(),
2698 "the 'unknown' !prof operand should not be an empty string");
2699}
2700
2701void Verifier::verifyFunctionMetadata(
2702 ArrayRef<std::pair<unsigned, MDNode *>> MDs) {
2703 for (const auto &Pair : MDs) {
2704 if (Pair.first == LLVMContext::MD_prof) {
2705 MDNode *MD = Pair.second;
2706 Check(MD->getNumOperands() >= 2,
2707 "!prof annotations should have no less than 2 operands", MD);
2708 // We may have functions that are synthesized by the compiler, e.g. in
2709 // WPD, that we can't currently determine the entry count.
2710 if (MD->getOperand(0).equalsStr(
2712 verifyUnknownProfileMetadata(MD);
2713 continue;
2714 }
2715
2716 // Check first operand.
2717 Check(MD->getOperand(0) != nullptr, "first operand should not be null",
2718 MD);
2720 "expected string with name of the !prof annotation", MD);
2721 MDString *MDS = cast<MDString>(MD->getOperand(0));
2722 StringRef ProfName = MDS->getString();
2725 "first operand should be 'function_entry_count'"
2726 " or 'synthetic_function_entry_count'",
2727 MD);
2728
2729 // Check second operand.
2730 Check(MD->getOperand(1) != nullptr, "second operand should not be null",
2731 MD);
2733 "expected integer argument to function_entry_count", MD);
2734 } else if (Pair.first == LLVMContext::MD_kcfi_type) {
2735 MDNode *MD = Pair.second;
2736 Check(MD->getNumOperands() == 1,
2737 "!kcfi_type must have exactly one operand", MD);
2738 Check(MD->getOperand(0) != nullptr, "!kcfi_type operand must not be null",
2739 MD);
2741 "expected a constant operand for !kcfi_type", MD);
2742 Constant *C = cast<ConstantAsMetadata>(MD->getOperand(0))->getValue();
2743 Check(isa<ConstantInt>(C) && isa<IntegerType>(C->getType()),
2744 "expected a constant integer operand for !kcfi_type", MD);
2746 "expected a 32-bit integer constant operand for !kcfi_type", MD);
2747 }
2748 }
2749}
2750
2751void Verifier::visitConstantExprsRecursively(const Constant *EntryC) {
2752 if (EntryC->getNumOperands() == 0)
2753 return;
2754
2755 if (!ConstantExprVisited.insert(EntryC).second)
2756 return;
2757
2759 Stack.push_back(EntryC);
2760
2761 while (!Stack.empty()) {
2762 const Constant *C = Stack.pop_back_val();
2763
2764 // Check this constant expression.
2765 if (const auto *CE = dyn_cast<ConstantExpr>(C))
2766 visitConstantExpr(CE);
2767
2768 if (const auto *CPA = dyn_cast<ConstantPtrAuth>(C))
2769 visitConstantPtrAuth(CPA);
2770
2771 if (const auto *GV = dyn_cast<GlobalValue>(C)) {
2772 // Global Values get visited separately, but we do need to make sure
2773 // that the global value is in the correct module
2774 Check(GV->getParent() == &M, "Referencing global in another module!",
2775 EntryC, &M, GV, GV->getParent());
2776 continue;
2777 }
2778
2779 // Visit all sub-expressions.
2780 for (const Use &U : C->operands()) {
2781 const auto *OpC = dyn_cast<Constant>(U);
2782 if (!OpC)
2783 continue;
2784 if (!ConstantExprVisited.insert(OpC).second)
2785 continue;
2786 Stack.push_back(OpC);
2787 }
2788 }
2789}
2790
2791void Verifier::visitConstantExpr(const ConstantExpr *CE) {
2792 if (CE->getOpcode() == Instruction::BitCast)
2793 Check(CastInst::castIsValid(Instruction::BitCast, CE->getOperand(0),
2794 CE->getType()),
2795 "Invalid bitcast", CE);
2796 else if (CE->getOpcode() == Instruction::PtrToAddr)
2797 checkPtrToAddr(CE->getOperand(0)->getType(), CE->getType(), *CE);
2798}
2799
2800void Verifier::visitConstantPtrAuth(const ConstantPtrAuth *CPA) {
2801 Check(CPA->getPointer()->getType()->isPointerTy(),
2802 "signed ptrauth constant base pointer must have pointer type");
2803
2804 Check(CPA->getType() == CPA->getPointer()->getType(),
2805 "signed ptrauth constant must have same type as its base pointer");
2806
2807 Check(CPA->getKey()->getBitWidth() == 32,
2808 "signed ptrauth constant key must be i32 constant integer");
2809
2811 "signed ptrauth constant address discriminator must be a pointer");
2812
2813 Check(CPA->getDiscriminator()->getBitWidth() == 64,
2814 "signed ptrauth constant discriminator must be i64 constant integer");
2815
2817 "signed ptrauth constant deactivation symbol must be a pointer");
2818
2821 "signed ptrauth constant deactivation symbol must be a global value "
2822 "or null");
2823}
2824
2825bool Verifier::verifyAttributeCount(AttributeList Attrs, unsigned Params) {
2826 // There shouldn't be more attribute sets than there are parameters plus the
2827 // function and return value.
2828 return Attrs.getNumAttrSets() <= Params + 2;
2829}
2830
2831void Verifier::verifyInlineAsmCall(const CallBase &Call) {
2832 const InlineAsm *IA = cast<InlineAsm>(Call.getCalledOperand());
2833 unsigned ArgNo = 0;
2834 unsigned LabelNo = 0;
2835 for (const InlineAsm::ConstraintInfo &CI : IA->ParseConstraints()) {
2836 if (CI.Type == InlineAsm::isLabel) {
2837 ++LabelNo;
2838 continue;
2839 }
2840
2841 // Only deal with constraints that correspond to call arguments.
2842 if (!CI.hasArg())
2843 continue;
2844
2845 if (CI.isIndirect) {
2846 const Value *Arg = Call.getArgOperand(ArgNo);
2847 Check(Arg->getType()->isPointerTy(),
2848 "Operand for indirect constraint must have pointer type", &Call);
2849
2851 "Operand for indirect constraint must have elementtype attribute",
2852 &Call);
2853 } else {
2854 Check(!Call.paramHasAttr(ArgNo, Attribute::ElementType),
2855 "Elementtype attribute can only be applied for indirect "
2856 "constraints",
2857 &Call);
2858 }
2859
2860 ArgNo++;
2861 }
2862
2863 if (auto *CallBr = dyn_cast<CallBrInst>(&Call)) {
2864 Check(LabelNo == CallBr->getNumIndirectDests(),
2865 "Number of label constraints does not match number of callbr dests",
2866 &Call);
2867 } else {
2868 Check(LabelNo == 0, "Label constraints can only be used with callbr",
2869 &Call);
2870 }
2871}
2872
2873/// Verify that statepoint intrinsic is well formed.
2874void Verifier::verifyStatepoint(const CallBase &Call) {
2875 assert(Call.getIntrinsicID() == Intrinsic::experimental_gc_statepoint);
2876
2879 "gc.statepoint must read and write all memory to preserve "
2880 "reordering restrictions required by safepoint semantics",
2881 Call);
2882
2883 const int64_t NumPatchBytes =
2884 cast<ConstantInt>(Call.getArgOperand(1))->getSExtValue();
2885 assert(isInt<32>(NumPatchBytes) && "NumPatchBytesV is an i32!");
2886 Check(NumPatchBytes >= 0,
2887 "gc.statepoint number of patchable bytes must be "
2888 "positive",
2889 Call);
2890
2891 Type *TargetElemType = Call.getParamElementType(2);
2892 Check(TargetElemType,
2893 "gc.statepoint callee argument must have elementtype attribute", Call);
2894 FunctionType *TargetFuncType = dyn_cast<FunctionType>(TargetElemType);
2895 Check(TargetFuncType,
2896 "gc.statepoint callee elementtype must be function type", Call);
2897
2898 const int NumCallArgs = cast<ConstantInt>(Call.getArgOperand(3))->getZExtValue();
2899 Check(NumCallArgs >= 0,
2900 "gc.statepoint number of arguments to underlying call "
2901 "must be positive",
2902 Call);
2903 const int NumParams = (int)TargetFuncType->getNumParams();
2904 if (TargetFuncType->isVarArg()) {
2905 Check(NumCallArgs >= NumParams,
2906 "gc.statepoint mismatch in number of vararg call args", Call);
2907
2908 // TODO: Remove this limitation
2909 Check(TargetFuncType->getReturnType()->isVoidTy(),
2910 "gc.statepoint doesn't support wrapping non-void "
2911 "vararg functions yet",
2912 Call);
2913 } else
2914 Check(NumCallArgs == NumParams,
2915 "gc.statepoint mismatch in number of call args", Call);
2916
2917 const uint64_t Flags
2918 = cast<ConstantInt>(Call.getArgOperand(4))->getZExtValue();
2919 Check((Flags & ~(uint64_t)StatepointFlags::MaskAll) == 0,
2920 "unknown flag used in gc.statepoint flags argument", Call);
2921
2922 // Verify that the types of the call parameter arguments match
2923 // the type of the wrapped callee.
2924 AttributeList Attrs = Call.getAttributes();
2925 for (int i = 0; i < NumParams; i++) {
2926 Type *ParamType = TargetFuncType->getParamType(i);
2927 Type *ArgType = Call.getArgOperand(5 + i)->getType();
2928 Check(ArgType == ParamType,
2929 "gc.statepoint call argument does not match wrapped "
2930 "function type",
2931 Call);
2932
2933 if (TargetFuncType->isVarArg()) {
2934 AttributeSet ArgAttrs = Attrs.getParamAttrs(5 + i);
2935 Check(!ArgAttrs.hasAttribute(Attribute::StructRet),
2936 "Attribute 'sret' cannot be used for vararg call arguments!", Call);
2937 }
2938 }
2939
2940 const int EndCallArgsInx = 4 + NumCallArgs;
2941
2942 const Value *NumTransitionArgsV = Call.getArgOperand(EndCallArgsInx + 1);
2943 Check(isa<ConstantInt>(NumTransitionArgsV),
2944 "gc.statepoint number of transition arguments "
2945 "must be constant integer",
2946 Call);
2947 const int NumTransitionArgs =
2948 cast<ConstantInt>(NumTransitionArgsV)->getZExtValue();
2949 Check(NumTransitionArgs == 0,
2950 "gc.statepoint w/inline transition bundle is deprecated", Call);
2951 const int EndTransitionArgsInx = EndCallArgsInx + 1 + NumTransitionArgs;
2952
2953 const Value *NumDeoptArgsV = Call.getArgOperand(EndTransitionArgsInx + 1);
2954 Check(isa<ConstantInt>(NumDeoptArgsV),
2955 "gc.statepoint number of deoptimization arguments "
2956 "must be constant integer",
2957 Call);
2958 const int NumDeoptArgs = cast<ConstantInt>(NumDeoptArgsV)->getZExtValue();
2959 Check(NumDeoptArgs == 0,
2960 "gc.statepoint w/inline deopt operands is deprecated", Call);
2961
2962 const int ExpectedNumArgs = 7 + NumCallArgs;
2963 Check(ExpectedNumArgs == (int)Call.arg_size(),
2964 "gc.statepoint too many arguments", Call);
2965
2966 // Check that the only uses of this gc.statepoint are gc.result or
2967 // gc.relocate calls which are tied to this statepoint and thus part
2968 // of the same statepoint sequence
2969 for (const User *U : Call.users()) {
2970 const CallInst *UserCall = dyn_cast<const CallInst>(U);
2971 Check(UserCall, "illegal use of statepoint token", Call, U);
2972 if (!UserCall)
2973 continue;
2974 Check(isa<GCRelocateInst>(UserCall) || isa<GCResultInst>(UserCall),
2975 "gc.result or gc.relocate are the only value uses "
2976 "of a gc.statepoint",
2977 Call, U);
2978 if (isa<GCResultInst>(UserCall)) {
2979 Check(UserCall->getArgOperand(0) == &Call,
2980 "gc.result connected to wrong gc.statepoint", Call, UserCall);
2981 } else if (isa<GCRelocateInst>(Call)) {
2982 Check(UserCall->getArgOperand(0) == &Call,
2983 "gc.relocate connected to wrong gc.statepoint", Call, UserCall);
2984 }
2985 }
2986
2987 // Note: It is legal for a single derived pointer to be listed multiple
2988 // times. It's non-optimal, but it is legal. It can also happen after
2989 // insertion if we strip a bitcast away.
2990 // Note: It is really tempting to check that each base is relocated and
2991 // that a derived pointer is never reused as a base pointer. This turns
2992 // out to be problematic since optimizations run after safepoint insertion
2993 // can recognize equality properties that the insertion logic doesn't know
2994 // about. See example statepoint.ll in the verifier subdirectory
2995}
2996
2997void Verifier::verifyFrameRecoverIndices() {
2998 for (auto &Counts : FrameEscapeInfo) {
2999 Function *F = Counts.first;
3000 unsigned EscapedObjectCount = Counts.second.first;
3001 unsigned MaxRecoveredIndex = Counts.second.second;
3002 Check(MaxRecoveredIndex <= EscapedObjectCount,
3003 "all indices passed to llvm.localrecover must be less than the "
3004 "number of arguments passed to llvm.localescape in the parent "
3005 "function",
3006 F);
3007 }
3008}
3009
3010static Instruction *getSuccPad(Instruction *Terminator) {
3011 BasicBlock *UnwindDest;
3012 if (auto *II = dyn_cast<InvokeInst>(Terminator))
3013 UnwindDest = II->getUnwindDest();
3014 else if (auto *CSI = dyn_cast<CatchSwitchInst>(Terminator))
3015 UnwindDest = CSI->getUnwindDest();
3016 else
3017 UnwindDest = cast<CleanupReturnInst>(Terminator)->getUnwindDest();
3018 return &*UnwindDest->getFirstNonPHIIt();
3019}
3020
3021void Verifier::verifySiblingFuncletUnwinds() {
3022 llvm::TimeTraceScope timeScope("Verifier verify sibling funclet unwinds");
3023 SmallPtrSet<Instruction *, 8> Visited;
3024 SmallPtrSet<Instruction *, 8> Active;
3025 for (const auto &Pair : SiblingFuncletInfo) {
3026 Instruction *PredPad = Pair.first;
3027 if (Visited.count(PredPad))
3028 continue;
3029 Active.insert(PredPad);
3030 Instruction *Terminator = Pair.second;
3031 do {
3032 Instruction *SuccPad = getSuccPad(Terminator);
3033 if (Active.count(SuccPad)) {
3034 // Found a cycle; report error
3035 Instruction *CyclePad = SuccPad;
3036 SmallVector<Instruction *, 8> CycleNodes;
3037 do {
3038 CycleNodes.push_back(CyclePad);
3039 Instruction *CycleTerminator = SiblingFuncletInfo[CyclePad];
3040 if (CycleTerminator != CyclePad)
3041 CycleNodes.push_back(CycleTerminator);
3042 CyclePad = getSuccPad(CycleTerminator);
3043 } while (CyclePad != SuccPad);
3044 Check(false, "EH pads can't handle each other's exceptions",
3045 ArrayRef<Instruction *>(CycleNodes));
3046 }
3047 // Don't re-walk a node we've already checked
3048 if (!Visited.insert(SuccPad).second)
3049 break;
3050 // Walk to this successor if it has a map entry.
3051 PredPad = SuccPad;
3052 auto TermI = SiblingFuncletInfo.find(PredPad);
3053 if (TermI == SiblingFuncletInfo.end())
3054 break;
3055 Terminator = TermI->second;
3056 Active.insert(PredPad);
3057 } while (true);
3058 // Each node only has one successor, so we've walked all the active
3059 // nodes' successors.
3060 Active.clear();
3061 }
3062}
3063
3064// visitFunction - Verify that a function is ok.
3065//
3066void Verifier::visitFunction(const Function &F) {
3067 visitGlobalValue(F);
3068
3069 // Check function arguments.
3070 FunctionType *FT = F.getFunctionType();
3071 unsigned NumArgs = F.arg_size();
3072
3073 Check(&Context == &F.getContext(),
3074 "Function context does not match Module context!", &F);
3075
3076 Check(!F.hasCommonLinkage(), "Functions may not have common linkage", &F);
3077 Check(FT->getNumParams() == NumArgs,
3078 "# formal arguments must match # of arguments for function type!", &F,
3079 FT);
3080 Check(F.getReturnType()->isFirstClassType() ||
3081 F.getReturnType()->isVoidTy() || F.getReturnType()->isStructTy(),
3082 "Functions cannot return aggregate values!", &F);
3083
3084 Check(!F.hasStructRetAttr() || F.getReturnType()->isVoidTy(),
3085 "Invalid struct return type!", &F);
3086
3087 if (MaybeAlign A = F.getAlign()) {
3088 Check(A->value() <= Value::MaximumAlignment,
3089 "huge alignment values are unsupported", &F);
3090 }
3091
3092 AttributeList Attrs = F.getAttributes();
3093
3094 Check(verifyAttributeCount(Attrs, FT->getNumParams()),
3095 "Attribute after last parameter!", &F);
3096
3097 bool IsIntrinsic = F.isIntrinsic();
3098
3099 // Check function attributes.
3100 verifyFunctionAttrs(FT, Attrs, &F, IsIntrinsic, /* IsInlineAsm */ false);
3101
3102 // On function declarations/definitions, we do not support the builtin
3103 // attribute. We do not check this in VerifyFunctionAttrs since that is
3104 // checking for Attributes that can/can not ever be on functions.
3105 Check(!Attrs.hasFnAttr(Attribute::Builtin),
3106 "Attribute 'builtin' can only be applied to a callsite.", &F);
3107
3108 Check(!Attrs.hasAttrSomewhere(Attribute::ElementType),
3109 "Attribute 'elementtype' can only be applied to a callsite.", &F);
3110
3111 Check(!Attrs.hasFnAttr("aarch64_zt0_undef"),
3112 "Attribute 'aarch64_zt0_undef' can only be applied to a callsite.");
3113
3114 if (Attrs.hasFnAttr(Attribute::Naked))
3115 for (const Argument &Arg : F.args())
3116 Check(Arg.use_empty(), "cannot use argument of naked function", &Arg);
3117
3118 // Check that this function meets the restrictions on this calling convention.
3119 // Sometimes varargs is used for perfectly forwarding thunks, so some of these
3120 // restrictions can be lifted.
3121 switch (F.getCallingConv()) {
3122 default:
3123 case CallingConv::C:
3124 break;
3125 case CallingConv::X86_INTR: {
3126 Check(F.arg_empty() || Attrs.hasParamAttr(0, Attribute::ByVal),
3127 "Calling convention parameter requires byval", &F);
3128 break;
3129 }
3130 case CallingConv::AMDGPU_KERNEL:
3131 case CallingConv::SPIR_KERNEL:
3132 case CallingConv::AMDGPU_CS_Chain:
3133 case CallingConv::AMDGPU_CS_ChainPreserve:
3134 Check(F.getReturnType()->isVoidTy(),
3135 "Calling convention requires void return type", &F);
3136 [[fallthrough]];
3137 case CallingConv::AMDGPU_VS:
3138 case CallingConv::AMDGPU_HS:
3139 case CallingConv::AMDGPU_GS:
3140 case CallingConv::AMDGPU_PS:
3141 case CallingConv::AMDGPU_CS:
3142 Check(!F.hasStructRetAttr(), "Calling convention does not allow sret", &F);
3143 if (F.getCallingConv() != CallingConv::SPIR_KERNEL) {
3144 const unsigned StackAS = DL.getAllocaAddrSpace();
3145 unsigned i = 0;
3146 for (const Argument &Arg : F.args()) {
3147 Check(!Attrs.hasParamAttr(i, Attribute::ByVal),
3148 "Calling convention disallows byval", &F);
3149 Check(!Attrs.hasParamAttr(i, Attribute::Preallocated),
3150 "Calling convention disallows preallocated", &F);
3151 Check(!Attrs.hasParamAttr(i, Attribute::InAlloca),
3152 "Calling convention disallows inalloca", &F);
3153
3154 if (Attrs.hasParamAttr(i, Attribute::ByRef)) {
3155 // FIXME: Should also disallow LDS and GDS, but we don't have the enum
3156 // value here.
3157 Check(Arg.getType()->getPointerAddressSpace() != StackAS,
3158 "Calling convention disallows stack byref", &F);
3159 }
3160
3161 ++i;
3162 }
3163 }
3164
3165 [[fallthrough]];
3166 case CallingConv::Fast:
3167 case CallingConv::Cold:
3168 case CallingConv::Intel_OCL_BI:
3169 case CallingConv::PTX_Kernel:
3170 case CallingConv::PTX_Device:
3171 Check(!F.isVarArg(),
3172 "Calling convention does not support varargs or "
3173 "perfect forwarding!",
3174 &F);
3175 break;
3176 case CallingConv::AMDGPU_Gfx_WholeWave:
3177 Check(!F.arg_empty() && F.arg_begin()->getType()->isIntegerTy(1),
3178 "Calling convention requires first argument to be i1", &F);
3179 Check(!F.arg_begin()->hasInRegAttr(),
3180 "Calling convention requires first argument to not be inreg", &F);
3181 Check(!F.isVarArg(),
3182 "Calling convention does not support varargs or "
3183 "perfect forwarding!",
3184 &F);
3185 break;
3186 }
3187
3188 // Check that the argument values match the function type for this function...
3189 unsigned i = 0;
3190 for (const Argument &Arg : F.args()) {
3191 Check(Arg.getType() == FT->getParamType(i),
3192 "Argument value does not match function argument type!", &Arg,
3193 FT->getParamType(i));
3194 Check(Arg.getType()->isFirstClassType(),
3195 "Function arguments must have first-class types!", &Arg);
3196 if (!IsIntrinsic) {
3197 Check(!Arg.getType()->isMetadataTy(),
3198 "Function takes metadata but isn't an intrinsic", &Arg, &F);
3199 Check(!Arg.getType()->isTokenLikeTy(),
3200 "Function takes token but isn't an intrinsic", &Arg, &F);
3201 Check(!Arg.getType()->isX86_AMXTy(),
3202 "Function takes x86_amx but isn't an intrinsic", &Arg, &F);
3203 }
3204
3205 // Check that swifterror argument is only used by loads and stores.
3206 if (Attrs.hasParamAttr(i, Attribute::SwiftError)) {
3207 verifySwiftErrorValue(&Arg);
3208 }
3209 ++i;
3210 }
3211
3212 if (!IsIntrinsic) {
3213 Check(!F.getReturnType()->isTokenLikeTy(),
3214 "Function returns a token but isn't an intrinsic", &F);
3215 Check(!F.getReturnType()->isX86_AMXTy(),
3216 "Function returns a x86_amx but isn't an intrinsic", &F);
3217 }
3218
3219 // Get the function metadata attachments.
3221 F.getAllMetadata(MDs);
3222 assert(F.hasMetadata() != MDs.empty() && "Bit out-of-sync");
3223 verifyFunctionMetadata(MDs);
3224
3225 // Check validity of the personality function
3226 if (F.hasPersonalityFn()) {
3227 auto *Per = dyn_cast<Function>(F.getPersonalityFn()->stripPointerCasts());
3228 if (Per)
3229 Check(Per->getParent() == F.getParent(),
3230 "Referencing personality function in another module!", &F,
3231 F.getParent(), Per, Per->getParent());
3232 }
3233
3234 // EH funclet coloring can be expensive, recompute on-demand
3235 BlockEHFuncletColors.clear();
3236
3237 if (F.isMaterializable()) {
3238 // Function has a body somewhere we can't see.
3239 Check(MDs.empty(), "unmaterialized function cannot have metadata", &F,
3240 MDs.empty() ? nullptr : MDs.front().second);
3241 } else if (F.isDeclaration()) {
3242 for (const auto &I : MDs) {
3243 // This is used for call site debug information.
3244 CheckDI(I.first != LLVMContext::MD_dbg ||
3245 !cast<DISubprogram>(I.second)->isDistinct(),
3246 "function declaration may only have a unique !dbg attachment",
3247 &F);
3248 Check(I.first != LLVMContext::MD_prof,
3249 "function declaration may not have a !prof attachment", &F);
3250
3251 // Verify the metadata itself.
3252 visitMDNode(*I.second, AreDebugLocsAllowed::Yes);
3253 }
3254 Check(!F.hasPersonalityFn(),
3255 "Function declaration shouldn't have a personality routine", &F);
3256 } else {
3257 // Verify that this function (which has a body) is not named "llvm.*". It
3258 // is not legal to define intrinsics.
3259 Check(!IsIntrinsic, "llvm intrinsics cannot be defined!", &F);
3260
3261 // Check the entry node
3262 const BasicBlock *Entry = &F.getEntryBlock();
3263 Check(pred_empty(Entry),
3264 "Entry block to function must not have predecessors!", Entry);
3265
3266 // The address of the entry block cannot be taken, unless it is dead.
3267 if (Entry->hasAddressTaken()) {
3268 Check(!BlockAddress::lookup(Entry)->isConstantUsed(),
3269 "blockaddress may not be used with the entry block!", Entry);
3270 }
3271
3272 unsigned NumDebugAttachments = 0, NumProfAttachments = 0,
3273 NumKCFIAttachments = 0;
3274 // Visit metadata attachments.
3275 for (const auto &I : MDs) {
3276 // Verify that the attachment is legal.
3277 auto AllowLocs = AreDebugLocsAllowed::No;
3278 switch (I.first) {
3279 default:
3280 break;
3281 case LLVMContext::MD_dbg: {
3282 ++NumDebugAttachments;
3283 CheckDI(NumDebugAttachments == 1,
3284 "function must have a single !dbg attachment", &F, I.second);
3285 CheckDI(isa<DISubprogram>(I.second),
3286 "function !dbg attachment must be a subprogram", &F, I.second);
3287 CheckDI(cast<DISubprogram>(I.second)->isDistinct(),
3288 "function definition may only have a distinct !dbg attachment",
3289 &F);
3290
3291 auto *SP = cast<DISubprogram>(I.second);
3292 const Function *&AttachedTo = DISubprogramAttachments[SP];
3293 CheckDI(!AttachedTo || AttachedTo == &F,
3294 "DISubprogram attached to more than one function", SP, &F);
3295 AttachedTo = &F;
3296 AllowLocs = AreDebugLocsAllowed::Yes;
3297 break;
3298 }
3299 case LLVMContext::MD_prof:
3300 ++NumProfAttachments;
3301 Check(NumProfAttachments == 1,
3302 "function must have a single !prof attachment", &F, I.second);
3303 break;
3304 case LLVMContext::MD_kcfi_type:
3305 ++NumKCFIAttachments;
3306 Check(NumKCFIAttachments == 1,
3307 "function must have a single !kcfi_type attachment", &F,
3308 I.second);
3309 break;
3310 }
3311
3312 // Verify the metadata itself.
3313 visitMDNode(*I.second, AllowLocs);
3314 }
3315 }
3316
3317 // If this function is actually an intrinsic, verify that it is only used in
3318 // direct call/invokes, never having its "address taken".
3319 // Only do this if the module is materialized, otherwise we don't have all the
3320 // uses.
3321 if (F.isIntrinsic() && F.getParent()->isMaterialized()) {
3322 const User *U;
3323 if (F.hasAddressTaken(&U, false, true, false,
3324 /*IgnoreARCAttachedCall=*/true))
3325 Check(false, "Invalid user of intrinsic instruction!", U);
3326 }
3327
3328 // Check intrinsics' signatures.
3329 switch (F.getIntrinsicID()) {
3330 case Intrinsic::experimental_gc_get_pointer_base: {
3331 FunctionType *FT = F.getFunctionType();
3332 Check(FT->getNumParams() == 1, "wrong number of parameters", F);
3333 Check(isa<PointerType>(F.getReturnType()),
3334 "gc.get.pointer.base must return a pointer", F);
3335 Check(FT->getParamType(0) == F.getReturnType(),
3336 "gc.get.pointer.base operand and result must be of the same type", F);
3337 break;
3338 }
3339 case Intrinsic::experimental_gc_get_pointer_offset: {
3340 FunctionType *FT = F.getFunctionType();
3341 Check(FT->getNumParams() == 1, "wrong number of parameters", F);
3342 Check(isa<PointerType>(FT->getParamType(0)),
3343 "gc.get.pointer.offset operand must be a pointer", F);
3344 Check(F.getReturnType()->isIntegerTy(),
3345 "gc.get.pointer.offset must return integer", F);
3346 break;
3347 }
3348 }
3349
3350 auto *N = F.getSubprogram();
3351 HasDebugInfo = (N != nullptr);
3352 if (!HasDebugInfo)
3353 return;
3354
3355 // Check that all !dbg attachments lead to back to N.
3356 //
3357 // FIXME: Check this incrementally while visiting !dbg attachments.
3358 // FIXME: Only check when N is the canonical subprogram for F.
3359 SmallPtrSet<const MDNode *, 32> Seen;
3360 auto VisitDebugLoc = [&](const Instruction &I, const MDNode *Node) {
3361 // Be careful about using DILocation here since we might be dealing with
3362 // broken code (this is the Verifier after all).
3363 const DILocation *DL = dyn_cast_or_null<DILocation>(Node);
3364 if (!DL)
3365 return;
3366 if (!Seen.insert(DL).second)
3367 return;
3368
3369 Metadata *Parent = DL->getRawScope();
3370 CheckDI(Parent && isa<DILocalScope>(Parent),
3371 "DILocation's scope must be a DILocalScope", N, &F, &I, DL, Parent);
3372
3373 DILocalScope *Scope = DL->getInlinedAtScope();
3374 Check(Scope, "Failed to find DILocalScope", DL);
3375
3376 if (!Seen.insert(Scope).second)
3377 return;
3378
3379 DISubprogram *SP = Scope->getSubprogram();
3380
3381 // Scope and SP could be the same MDNode and we don't want to skip
3382 // validation in that case
3383 if ((Scope != SP) && !Seen.insert(SP).second)
3384 return;
3385
3386 CheckDI(SP->describes(&F),
3387 "!dbg attachment points at wrong subprogram for function", N, &F,
3388 &I, DL, Scope, SP);
3389 };
3390 for (auto &BB : F)
3391 for (auto &I : BB) {
3392 VisitDebugLoc(I, I.getDebugLoc().getAsMDNode());
3393 // The llvm.loop annotations also contain two DILocations.
3394 if (auto MD = I.getMetadata(LLVMContext::MD_loop))
3395 for (unsigned i = 1; i < MD->getNumOperands(); ++i)
3396 VisitDebugLoc(I, dyn_cast_or_null<MDNode>(MD->getOperand(i)));
3397 if (BrokenDebugInfo)
3398 return;
3399 }
3400}
3401
3402// verifyBasicBlock - Verify that a basic block is well formed...
3403//
3404void Verifier::visitBasicBlock(BasicBlock &BB) {
3405 InstsInThisBlock.clear();
3406 ConvergenceVerifyHelper.visit(BB);
3407
3408 // Ensure that basic blocks have terminators!
3409 Check(BB.getTerminator(), "Basic Block does not have terminator!", &BB);
3410
3411 // Check constraints that this basic block imposes on all of the PHI nodes in
3412 // it.
3413 if (isa<PHINode>(BB.front())) {
3414 SmallVector<BasicBlock *, 8> Preds(predecessors(&BB));
3416 llvm::sort(Preds);
3417 for (const PHINode &PN : BB.phis()) {
3418 Check(PN.getNumIncomingValues() == Preds.size(),
3419 "PHINode should have one entry for each predecessor of its "
3420 "parent basic block!",
3421 &PN);
3422
3423 // Get and sort all incoming values in the PHI node...
3424 Values.clear();
3425 Values.reserve(PN.getNumIncomingValues());
3426 for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i)
3427 Values.push_back(
3428 std::make_pair(PN.getIncomingBlock(i), PN.getIncomingValue(i)));
3429 llvm::sort(Values);
3430
3431 for (unsigned i = 0, e = Values.size(); i != e; ++i) {
3432 // Check to make sure that if there is more than one entry for a
3433 // particular basic block in this PHI node, that the incoming values are
3434 // all identical.
3435 //
3436 Check(i == 0 || Values[i].first != Values[i - 1].first ||
3437 Values[i].second == Values[i - 1].second,
3438 "PHI node has multiple entries for the same basic block with "
3439 "different incoming values!",
3440 &PN, Values[i].first, Values[i].second, Values[i - 1].second);
3441
3442 // Check to make sure that the predecessors and PHI node entries are
3443 // matched up.
3444 Check(Values[i].first == Preds[i],
3445 "PHI node entries do not match predecessors!", &PN,
3446 Values[i].first, Preds[i]);
3447 }
3448 }
3449 }
3450
3451 // Check that all instructions have their parent pointers set up correctly.
3452 for (auto &I : BB)
3453 {
3454 Check(I.getParent() == &BB, "Instruction has bogus parent pointer!");
3455 }
3456
3457 // Confirm that no issues arise from the debug program.
3458 CheckDI(!BB.getTrailingDbgRecords(), "Basic Block has trailing DbgRecords!",
3459 &BB);
3460}
3461
3462void Verifier::visitTerminator(Instruction &I) {
3463 // Ensure that terminators only exist at the end of the basic block.
3464 Check(&I == I.getParent()->getTerminator(),
3465 "Terminator found in the middle of a basic block!", I.getParent());
3466 visitInstruction(I);
3467}
3468
3469void Verifier::visitCondBrInst(CondBrInst &BI) {
3471 "Branch condition is not 'i1' type!", &BI, BI.getCondition());
3472 visitTerminator(BI);
3473}
3474
3475void Verifier::visitReturnInst(ReturnInst &RI) {
3476 Function *F = RI.getParent()->getParent();
3477 unsigned N = RI.getNumOperands();
3478 if (F->getReturnType()->isVoidTy())
3479 Check(N == 0,
3480 "Found return instr that returns non-void in Function of void "
3481 "return type!",
3482 &RI, F->getReturnType());
3483 else
3484 Check(N == 1 && F->getReturnType() == RI.getOperand(0)->getType(),
3485 "Function return type does not match operand "
3486 "type of return inst!",
3487 &RI, F->getReturnType());
3488
3489 // Check to make sure that the return value has necessary properties for
3490 // terminators...
3491 visitTerminator(RI);
3492}
3493
3494void Verifier::visitSwitchInst(SwitchInst &SI) {
3495 Check(SI.getType()->isVoidTy(), "Switch must have void result type!", &SI);
3496 // Check to make sure that all of the constants in the switch instruction
3497 // have the same type as the switched-on value.
3498 Type *SwitchTy = SI.getCondition()->getType();
3499 SmallPtrSet<ConstantInt*, 32> Constants;
3500 for (auto &Case : SI.cases()) {
3501 Check(isa<ConstantInt>(Case.getCaseValue()),
3502 "Case value is not a constant integer.", &SI);
3503 Check(Case.getCaseValue()->getType() == SwitchTy,
3504 "Switch constants must all be same type as switch value!", &SI);
3505 Check(Constants.insert(Case.getCaseValue()).second,
3506 "Duplicate integer as switch case", &SI, Case.getCaseValue());
3507 }
3508
3509 visitTerminator(SI);
3510}
3511
3512void Verifier::visitIndirectBrInst(IndirectBrInst &BI) {
3514 "Indirectbr operand must have pointer type!", &BI);
3515 for (unsigned i = 0, e = BI.getNumDestinations(); i != e; ++i)
3517 "Indirectbr destinations must all have pointer type!", &BI);
3518
3519 visitTerminator(BI);
3520}
3521
3522void Verifier::visitCallBrInst(CallBrInst &CBI) {
3523 if (!CBI.isInlineAsm()) {
3525 "Callbr: indirect function / invalid signature");
3526 Check(!CBI.hasOperandBundles(),
3527 "Callbr for intrinsics currently doesn't support operand bundles");
3528
3529 switch (CBI.getIntrinsicID()) {
3530 case Intrinsic::amdgcn_kill: {
3531 Check(CBI.getNumIndirectDests() == 1,
3532 "Callbr amdgcn_kill only supports one indirect dest");
3533 bool Unreachable = isa<UnreachableInst>(CBI.getIndirectDest(0)->begin());
3534 CallInst *Call = dyn_cast<CallInst>(CBI.getIndirectDest(0)->begin());
3535 Check(Unreachable || (Call && Call->getIntrinsicID() ==
3536 Intrinsic::amdgcn_unreachable),
3537 "Callbr amdgcn_kill indirect dest needs to be unreachable");
3538 break;
3539 }
3540 default:
3541 CheckFailed(
3542 "Callbr currently only supports asm-goto and selected intrinsics");
3543 }
3544 visitIntrinsicCall(CBI.getIntrinsicID(), CBI);
3545 } else {
3546 const InlineAsm *IA = cast<InlineAsm>(CBI.getCalledOperand());
3547 Check(!IA->canThrow(), "Unwinding from Callbr is not allowed");
3548
3549 verifyInlineAsmCall(CBI);
3550 }
3551 visitTerminator(CBI);
3552}
3553
3554void Verifier::visitSelectInst(SelectInst &SI) {
3555 Check(!SelectInst::areInvalidOperands(SI.getOperand(0), SI.getOperand(1),
3556 SI.getOperand(2)),
3557 "Invalid operands for select instruction!", &SI);
3558
3559 Check(SI.getTrueValue()->getType() == SI.getType(),
3560 "Select values must have same type as select instruction!", &SI);
3561 visitInstruction(SI);
3562}
3563
3564/// visitUserOp1 - User defined operators shouldn't live beyond the lifetime of
3565/// a pass, if any exist, it's an error.
3566///
3567void Verifier::visitUserOp1(Instruction &I) {
3568 Check(false, "User-defined operators should not live outside of a pass!", &I);
3569}
3570
3571void Verifier::visitTruncInst(TruncInst &I) {
3572 // Get the source and destination types
3573 Type *SrcTy = I.getOperand(0)->getType();
3574 Type *DestTy = I.getType();
3575
3576 // Get the size of the types in bits, we'll need this later
3577 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3578 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3579
3580 Check(SrcTy->isIntOrIntVectorTy(), "Trunc only operates on integer", &I);
3581 Check(DestTy->isIntOrIntVectorTy(), "Trunc only produces integer", &I);
3582 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3583 "trunc source and destination must both be a vector or neither", &I);
3584 Check(SrcBitSize > DestBitSize, "DestTy too big for Trunc", &I);
3585
3586 visitInstruction(I);
3587}
3588
3589void Verifier::visitZExtInst(ZExtInst &I) {
3590 // Get the source and destination types
3591 Type *SrcTy = I.getOperand(0)->getType();
3592 Type *DestTy = I.getType();
3593
3594 // Get the size of the types in bits, we'll need this later
3595 Check(SrcTy->isIntOrIntVectorTy(), "ZExt only operates on integer", &I);
3596 Check(DestTy->isIntOrIntVectorTy(), "ZExt only produces an integer", &I);
3597 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3598 "zext source and destination must both be a vector or neither", &I);
3599 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3600 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3601
3602 Check(SrcBitSize < DestBitSize, "Type too small for ZExt", &I);
3603
3604 visitInstruction(I);
3605}
3606
3607void Verifier::visitSExtInst(SExtInst &I) {
3608 // Get the source and destination types
3609 Type *SrcTy = I.getOperand(0)->getType();
3610 Type *DestTy = I.getType();
3611
3612 // Get the size of the types in bits, we'll need this later
3613 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3614 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3615
3616 Check(SrcTy->isIntOrIntVectorTy(), "SExt only operates on integer", &I);
3617 Check(DestTy->isIntOrIntVectorTy(), "SExt only produces an integer", &I);
3618 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3619 "sext source and destination must both be a vector or neither", &I);
3620 Check(SrcBitSize < DestBitSize, "Type too small for SExt", &I);
3621
3622 visitInstruction(I);
3623}
3624
3625void Verifier::visitFPTruncInst(FPTruncInst &I) {
3626 // Get the source and destination types
3627 Type *SrcTy = I.getOperand(0)->getType();
3628 Type *DestTy = I.getType();
3629 // Get the size of the types in bits, we'll need this later
3630 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3631 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3632
3633 Check(SrcTy->isFPOrFPVectorTy(), "FPTrunc only operates on FP", &I);
3634 Check(DestTy->isFPOrFPVectorTy(), "FPTrunc only produces an FP", &I);
3635 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3636 "fptrunc source and destination must both be a vector or neither", &I);
3637 Check(SrcBitSize > DestBitSize, "DestTy too big for FPTrunc", &I);
3638
3639 visitInstruction(I);
3640}
3641
3642void Verifier::visitFPExtInst(FPExtInst &I) {
3643 // Get the source and destination types
3644 Type *SrcTy = I.getOperand(0)->getType();
3645 Type *DestTy = I.getType();
3646
3647 // Get the size of the types in bits, we'll need this later
3648 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3649 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3650
3651 Check(SrcTy->isFPOrFPVectorTy(), "FPExt only operates on FP", &I);
3652 Check(DestTy->isFPOrFPVectorTy(), "FPExt only produces an FP", &I);
3653 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3654 "fpext source and destination must both be a vector or neither", &I);
3655 Check(SrcBitSize < DestBitSize, "DestTy too small for FPExt", &I);
3656
3657 visitInstruction(I);
3658}
3659
3660void Verifier::visitUIToFPInst(UIToFPInst &I) {
3661 // Get the source and destination types
3662 Type *SrcTy = I.getOperand(0)->getType();
3663 Type *DestTy = I.getType();
3664
3665 bool SrcVec = SrcTy->isVectorTy();
3666 bool DstVec = DestTy->isVectorTy();
3667
3668 Check(SrcVec == DstVec,
3669 "UIToFP source and dest must both be vector or scalar", &I);
3670 Check(SrcTy->isIntOrIntVectorTy(),
3671 "UIToFP source must be integer or integer vector", &I);
3672 Check(DestTy->isFPOrFPVectorTy(), "UIToFP result must be FP or FP vector",
3673 &I);
3674
3675 if (SrcVec && DstVec)
3676 Check(cast<VectorType>(SrcTy)->getElementCount() ==
3677 cast<VectorType>(DestTy)->getElementCount(),
3678 "UIToFP source and dest vector length mismatch", &I);
3679
3680 visitInstruction(I);
3681}
3682
3683void Verifier::visitSIToFPInst(SIToFPInst &I) {
3684 // Get the source and destination types
3685 Type *SrcTy = I.getOperand(0)->getType();
3686 Type *DestTy = I.getType();
3687
3688 bool SrcVec = SrcTy->isVectorTy();
3689 bool DstVec = DestTy->isVectorTy();
3690
3691 Check(SrcVec == DstVec,
3692 "SIToFP source and dest must both be vector or scalar", &I);
3693 Check(SrcTy->isIntOrIntVectorTy(),
3694 "SIToFP source must be integer or integer vector", &I);
3695 Check(DestTy->isFPOrFPVectorTy(), "SIToFP result must be FP or FP vector",
3696 &I);
3697
3698 if (SrcVec && DstVec)
3699 Check(cast<VectorType>(SrcTy)->getElementCount() ==
3700 cast<VectorType>(DestTy)->getElementCount(),
3701 "SIToFP source and dest vector length mismatch", &I);
3702
3703 visitInstruction(I);
3704}
3705
3706void Verifier::visitFPToUIInst(FPToUIInst &I) {
3707 // Get the source and destination types
3708 Type *SrcTy = I.getOperand(0)->getType();
3709 Type *DestTy = I.getType();
3710
3711 bool SrcVec = SrcTy->isVectorTy();
3712 bool DstVec = DestTy->isVectorTy();
3713
3714 Check(SrcVec == DstVec,
3715 "FPToUI source and dest must both be vector or scalar", &I);
3716 Check(SrcTy->isFPOrFPVectorTy(), "FPToUI source must be FP or FP vector", &I);
3717 Check(DestTy->isIntOrIntVectorTy(),
3718 "FPToUI result must be integer or integer vector", &I);
3719
3720 if (SrcVec && DstVec)
3721 Check(cast<VectorType>(SrcTy)->getElementCount() ==
3722 cast<VectorType>(DestTy)->getElementCount(),
3723 "FPToUI source and dest vector length mismatch", &I);
3724
3725 visitInstruction(I);
3726}
3727
3728void Verifier::visitFPToSIInst(FPToSIInst &I) {
3729 // Get the source and destination types
3730 Type *SrcTy = I.getOperand(0)->getType();
3731 Type *DestTy = I.getType();
3732
3733 bool SrcVec = SrcTy->isVectorTy();
3734 bool DstVec = DestTy->isVectorTy();
3735
3736 Check(SrcVec == DstVec,
3737 "FPToSI source and dest must both be vector or scalar", &I);
3738 Check(SrcTy->isFPOrFPVectorTy(), "FPToSI source must be FP or FP vector", &I);
3739 Check(DestTy->isIntOrIntVectorTy(),
3740 "FPToSI result must be integer or integer vector", &I);
3741
3742 if (SrcVec && DstVec)
3743 Check(cast<VectorType>(SrcTy)->getElementCount() ==
3744 cast<VectorType>(DestTy)->getElementCount(),
3745 "FPToSI source and dest vector length mismatch", &I);
3746
3747 visitInstruction(I);
3748}
3749
3750void Verifier::checkPtrToAddr(Type *SrcTy, Type *DestTy, const Value &V) {
3751 Check(SrcTy->isPtrOrPtrVectorTy(), "PtrToAddr source must be pointer", V);
3752 Check(DestTy->isIntOrIntVectorTy(), "PtrToAddr result must be integral", V);
3753 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(), "PtrToAddr type mismatch",
3754 V);
3755
3756 if (SrcTy->isVectorTy()) {
3757 auto *VSrc = cast<VectorType>(SrcTy);
3758 auto *VDest = cast<VectorType>(DestTy);
3759 Check(VSrc->getElementCount() == VDest->getElementCount(),
3760 "PtrToAddr vector length mismatch", V);
3761 }
3762
3763 Type *AddrTy = DL.getAddressType(SrcTy);
3764 Check(AddrTy == DestTy, "PtrToAddr result must be address width", V);
3765}
3766
3767void Verifier::visitPtrToAddrInst(PtrToAddrInst &I) {
3768 checkPtrToAddr(I.getOperand(0)->getType(), I.getType(), I);
3769 visitInstruction(I);
3770}
3771
3772void Verifier::visitPtrToIntInst(PtrToIntInst &I) {
3773 // Get the source and destination types
3774 Type *SrcTy = I.getOperand(0)->getType();
3775 Type *DestTy = I.getType();
3776
3777 Check(SrcTy->isPtrOrPtrVectorTy(), "PtrToInt source must be pointer", &I);
3778
3779 Check(DestTy->isIntOrIntVectorTy(), "PtrToInt result must be integral", &I);
3780 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(), "PtrToInt type mismatch",
3781 &I);
3782
3783 if (SrcTy->isVectorTy()) {
3784 auto *VSrc = cast<VectorType>(SrcTy);
3785 auto *VDest = cast<VectorType>(DestTy);
3786 Check(VSrc->getElementCount() == VDest->getElementCount(),
3787 "PtrToInt Vector length mismatch", &I);
3788 }
3789
3790 visitInstruction(I);
3791}
3792
3793void Verifier::visitIntToPtrInst(IntToPtrInst &I) {
3794 // Get the source and destination types
3795 Type *SrcTy = I.getOperand(0)->getType();
3796 Type *DestTy = I.getType();
3797
3798 Check(SrcTy->isIntOrIntVectorTy(), "IntToPtr source must be an integral", &I);
3799 Check(DestTy->isPtrOrPtrVectorTy(), "IntToPtr result must be a pointer", &I);
3800
3801 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(), "IntToPtr type mismatch",
3802 &I);
3803 if (SrcTy->isVectorTy()) {
3804 auto *VSrc = cast<VectorType>(SrcTy);
3805 auto *VDest = cast<VectorType>(DestTy);
3806 Check(VSrc->getElementCount() == VDest->getElementCount(),
3807 "IntToPtr Vector length mismatch", &I);
3808 }
3809 visitInstruction(I);
3810}
3811
3812void Verifier::visitBitCastInst(BitCastInst &I) {
3813 Check(
3814 CastInst::castIsValid(Instruction::BitCast, I.getOperand(0), I.getType()),
3815 "Invalid bitcast", &I);
3816 visitInstruction(I);
3817}
3818
3819void Verifier::visitAddrSpaceCastInst(AddrSpaceCastInst &I) {
3820 Type *SrcTy = I.getOperand(0)->getType();
3821 Type *DestTy = I.getType();
3822
3823 Check(SrcTy->isPtrOrPtrVectorTy(), "AddrSpaceCast source must be a pointer",
3824 &I);
3825 Check(DestTy->isPtrOrPtrVectorTy(), "AddrSpaceCast result must be a pointer",
3826 &I);
3828 "AddrSpaceCast must be between different address spaces", &I);
3829 if (auto *SrcVTy = dyn_cast<VectorType>(SrcTy))
3830 Check(SrcVTy->getElementCount() ==
3831 cast<VectorType>(DestTy)->getElementCount(),
3832 "AddrSpaceCast vector pointer number of elements mismatch", &I);
3833 visitInstruction(I);
3834}
3835
3836/// visitPHINode - Ensure that a PHI node is well formed.
3837///
3838void Verifier::visitPHINode(PHINode &PN) {
3839 // Ensure that the PHI nodes are all grouped together at the top of the block.
3840 // This can be tested by checking whether the instruction before this is
3841 // either nonexistent (because this is begin()) or is a PHI node. If not,
3842 // then there is some other instruction before a PHI.
3843 Check(&PN == &PN.getParent()->front() ||
3845 "PHI nodes not grouped at top of basic block!", &PN, PN.getParent());
3846
3847 // Check that a PHI doesn't yield a Token.
3848 Check(!PN.getType()->isTokenLikeTy(), "PHI nodes cannot have token type!");
3849
3850 // Check that all of the values of the PHI node have the same type as the
3851 // result.
3852 for (Value *IncValue : PN.incoming_values()) {
3853 Check(PN.getType() == IncValue->getType(),
3854 "PHI node operands are not the same type as the result!", &PN);
3855 }
3856
3857 // All other PHI node constraints are checked in the visitBasicBlock method.
3858
3859 visitInstruction(PN);
3860}
3861
3862void Verifier::visitCallBase(CallBase &Call) {
3864 "Called function must be a pointer!", Call);
3865 FunctionType *FTy = Call.getFunctionType();
3866
3867 // Verify that the correct number of arguments are being passed
3868 if (FTy->isVarArg())
3869 Check(Call.arg_size() >= FTy->getNumParams(),
3870 "Called function requires more parameters than were provided!", Call);
3871 else
3872 Check(Call.arg_size() == FTy->getNumParams(),
3873 "Incorrect number of arguments passed to called function!", Call);
3874
3875 // Verify that all arguments to the call match the function type.
3876 for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i)
3877 Check(Call.getArgOperand(i)->getType() == FTy->getParamType(i),
3878 "Call parameter type does not match function signature!",
3879 Call.getArgOperand(i), FTy->getParamType(i), Call);
3880
3881 AttributeList Attrs = Call.getAttributes();
3882
3883 Check(verifyAttributeCount(Attrs, Call.arg_size()),
3884 "Attribute after last parameter!", Call);
3885
3886 Function *Callee =
3888 bool IsIntrinsic = Callee && Callee->isIntrinsic();
3889 if (IsIntrinsic)
3890 Check(Callee->getFunctionType() == FTy,
3891 "Intrinsic called with incompatible signature", Call);
3892
3893 // Verify if the calling convention of the callee is callable.
3895 "calling convention does not permit calls", Call);
3896
3897 // Disallow passing/returning values with alignment higher than we can
3898 // represent.
3899 // FIXME: Consider making DataLayout cap the alignment, so this isn't
3900 // necessary.
3901 auto VerifyTypeAlign = [&](Type *Ty, const Twine &Message) {
3902 if (!Ty->isSized())
3903 return;
3904 Align ABIAlign = DL.getABITypeAlign(Ty);
3905 Check(ABIAlign.value() <= Value::MaximumAlignment,
3906 "Incorrect alignment of " + Message + " to called function!", Call);
3907 };
3908
3909 if (!IsIntrinsic) {
3910 VerifyTypeAlign(FTy->getReturnType(), "return type");
3911 for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i) {
3912 Type *Ty = FTy->getParamType(i);
3913 VerifyTypeAlign(Ty, "argument passed");
3914 }
3915 }
3916
3917 if (Attrs.hasFnAttr(Attribute::Speculatable)) {
3918 // Don't allow speculatable on call sites, unless the underlying function
3919 // declaration is also speculatable.
3920 Check(Callee && Callee->isSpeculatable(),
3921 "speculatable attribute may not apply to call sites", Call);
3922 }
3923
3924 if (Attrs.hasFnAttr(Attribute::Preallocated)) {
3925 Check(Call.getIntrinsicID() == Intrinsic::call_preallocated_arg,
3926 "preallocated as a call site attribute can only be on "
3927 "llvm.call.preallocated.arg");
3928 }
3929
3930 Check(!Attrs.hasFnAttr(Attribute::DenormalFPEnv),
3931 "denormal_fpenv attribute may not apply to call sites", Call);
3932
3933 // Verify call attributes.
3934 verifyFunctionAttrs(FTy, Attrs, &Call, IsIntrinsic, Call.isInlineAsm());
3935
3936 // Conservatively check the inalloca argument.
3937 // We have a bug if we can find that there is an underlying alloca without
3938 // inalloca.
3939 if (Call.hasInAllocaArgument()) {
3940 Value *InAllocaArg = Call.getArgOperand(FTy->getNumParams() - 1);
3941 if (auto AI = dyn_cast<AllocaInst>(InAllocaArg->stripInBoundsOffsets()))
3942 Check(AI->isUsedWithInAlloca(),
3943 "inalloca argument for call has mismatched alloca", AI, Call);
3944 }
3945
3946 // For each argument of the callsite, if it has the swifterror argument,
3947 // make sure the underlying alloca/parameter it comes from has a swifterror as
3948 // well.
3949 for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i) {
3950 if (Call.paramHasAttr(i, Attribute::SwiftError)) {
3951 Value *SwiftErrorArg = Call.getArgOperand(i);
3952 if (auto AI = dyn_cast<AllocaInst>(SwiftErrorArg->stripInBoundsOffsets())) {
3953 Check(AI->isSwiftError(),
3954 "swifterror argument for call has mismatched alloca", AI, Call);
3955 continue;
3956 }
3957 auto ArgI = dyn_cast<Argument>(SwiftErrorArg);
3958 Check(ArgI, "swifterror argument should come from an alloca or parameter",
3959 SwiftErrorArg, Call);
3960 Check(ArgI->hasSwiftErrorAttr(),
3961 "swifterror argument for call has mismatched parameter", ArgI,
3962 Call);
3963 }
3964
3965 if (Attrs.hasParamAttr(i, Attribute::ImmArg)) {
3966 // Don't allow immarg on call sites, unless the underlying declaration
3967 // also has the matching immarg.
3968 Check(Callee && Callee->hasParamAttribute(i, Attribute::ImmArg),
3969 "immarg may not apply only to call sites", Call.getArgOperand(i),
3970 Call);
3971 }
3972
3973 if (Call.paramHasAttr(i, Attribute::ImmArg)) {
3974 Value *ArgVal = Call.getArgOperand(i);
3975 Check(isa<ConstantInt>(ArgVal) || isa<ConstantFP>(ArgVal),
3976 "immarg operand has non-immediate parameter", ArgVal, Call);
3977
3978 // If the imm-arg is an integer and also has a range attached,
3979 // check if the given value is within the range.
3980 if (Call.paramHasAttr(i, Attribute::Range)) {
3981 if (auto *CI = dyn_cast<ConstantInt>(ArgVal)) {
3982 const ConstantRange &CR =
3983 Call.getParamAttr(i, Attribute::Range).getValueAsConstantRange();
3984 Check(CR.contains(CI->getValue()),
3985 "immarg value " + Twine(CI->getValue().getSExtValue()) +
3986 " out of range [" + Twine(CR.getLower().getSExtValue()) +
3987 ", " + Twine(CR.getUpper().getSExtValue()) + ")",
3988 Call);
3989 }
3990 }
3991 }
3992
3993 if (Call.paramHasAttr(i, Attribute::Preallocated)) {
3994 Value *ArgVal = Call.getArgOperand(i);
3995 bool hasOB =
3997 bool isMustTail = Call.isMustTailCall();
3998 Check(hasOB != isMustTail,
3999 "preallocated operand either requires a preallocated bundle or "
4000 "the call to be musttail (but not both)",
4001 ArgVal, Call);
4002 }
4003 }
4004
4005 if (FTy->isVarArg()) {
4006 // FIXME? is 'nest' even legal here?
4007 bool SawNest = false;
4008 bool SawReturned = false;
4009
4010 for (unsigned Idx = 0; Idx < FTy->getNumParams(); ++Idx) {
4011 if (Attrs.hasParamAttr(Idx, Attribute::Nest))
4012 SawNest = true;
4013 if (Attrs.hasParamAttr(Idx, Attribute::Returned))
4014 SawReturned = true;
4015 }
4016
4017 // Check attributes on the varargs part.
4018 for (unsigned Idx = FTy->getNumParams(); Idx < Call.arg_size(); ++Idx) {
4019 Type *Ty = Call.getArgOperand(Idx)->getType();
4020 AttributeSet ArgAttrs = Attrs.getParamAttrs(Idx);
4021 verifyParameterAttrs(ArgAttrs, Ty, &Call);
4022
4023 if (ArgAttrs.hasAttribute(Attribute::Nest)) {
4024 Check(!SawNest, "More than one parameter has attribute nest!", Call);
4025 SawNest = true;
4026 }
4027
4028 if (ArgAttrs.hasAttribute(Attribute::Returned)) {
4029 Check(!SawReturned, "More than one parameter has attribute returned!",
4030 Call);
4031 Check(Ty->canLosslesslyBitCastTo(FTy->getReturnType()),
4032 "Incompatible argument and return types for 'returned' "
4033 "attribute",
4034 Call);
4035 SawReturned = true;
4036 }
4037
4038 // Statepoint intrinsic is vararg but the wrapped function may be not.
4039 // Allow sret here and check the wrapped function in verifyStatepoint.
4040 if (Call.getIntrinsicID() != Intrinsic::experimental_gc_statepoint)
4041 Check(!ArgAttrs.hasAttribute(Attribute::StructRet),
4042 "Attribute 'sret' cannot be used for vararg call arguments!",
4043 Call);
4044
4045 if (ArgAttrs.hasAttribute(Attribute::InAlloca))
4046 Check(Idx == Call.arg_size() - 1,
4047 "inalloca isn't on the last argument!", Call);
4048 }
4049 }
4050
4051 // Verify that there's no metadata unless it's a direct call to an intrinsic.
4052 if (!IsIntrinsic) {
4053 for (Type *ParamTy : FTy->params()) {
4054 Check(!ParamTy->isMetadataTy(),
4055 "Function has metadata parameter but isn't an intrinsic", Call);
4056 Check(!ParamTy->isTokenLikeTy(),
4057 "Function has token parameter but isn't an intrinsic", Call);
4058 }
4059 }
4060
4061 // Verify that indirect calls don't return tokens.
4062 if (!Call.getCalledFunction()) {
4063 Check(!FTy->getReturnType()->isTokenLikeTy(),
4064 "Return type cannot be token for indirect call!");
4065 Check(!FTy->getReturnType()->isX86_AMXTy(),
4066 "Return type cannot be x86_amx for indirect call!");
4067 }
4068
4070 visitIntrinsicCall(ID, Call);
4071
4072 // Verify that a callsite has at most one "deopt", at most one "funclet", at
4073 // most one "gc-transition", at most one "cfguardtarget", at most one
4074 // "preallocated" operand bundle, and at most one "ptrauth" operand bundle.
4075 bool FoundDeoptBundle = false, FoundFuncletBundle = false,
4076 FoundGCTransitionBundle = false, FoundCFGuardTargetBundle = false,
4077 FoundPreallocatedBundle = false, FoundGCLiveBundle = false,
4078 FoundPtrauthBundle = false, FoundKCFIBundle = false,
4079 FoundAttachedCallBundle = false;
4080 for (unsigned i = 0, e = Call.getNumOperandBundles(); i < e; ++i) {
4081 OperandBundleUse BU = Call.getOperandBundleAt(i);
4082 uint32_t Tag = BU.getTagID();
4083 if (Tag == LLVMContext::OB_deopt) {
4084 Check(!FoundDeoptBundle, "Multiple deopt operand bundles", Call);
4085 FoundDeoptBundle = true;
4086 } else if (Tag == LLVMContext::OB_gc_transition) {
4087 Check(!FoundGCTransitionBundle, "Multiple gc-transition operand bundles",
4088 Call);
4089 FoundGCTransitionBundle = true;
4090 } else if (Tag == LLVMContext::OB_funclet) {
4091 Check(!FoundFuncletBundle, "Multiple funclet operand bundles", Call);
4092 FoundFuncletBundle = true;
4093 Check(BU.Inputs.size() == 1,
4094 "Expected exactly one funclet bundle operand", Call);
4095 Check(isa<FuncletPadInst>(BU.Inputs.front()),
4096 "Funclet bundle operands should correspond to a FuncletPadInst",
4097 Call);
4098 } else if (Tag == LLVMContext::OB_cfguardtarget) {
4099 Check(!FoundCFGuardTargetBundle, "Multiple CFGuardTarget operand bundles",
4100 Call);
4101 FoundCFGuardTargetBundle = true;
4102 Check(BU.Inputs.size() == 1,
4103 "Expected exactly one cfguardtarget bundle operand", Call);
4104 } else if (Tag == LLVMContext::OB_ptrauth) {
4105 Check(!FoundPtrauthBundle, "Multiple ptrauth operand bundles", Call);
4106 FoundPtrauthBundle = true;
4107 Check(BU.Inputs.size() == 2,
4108 "Expected exactly two ptrauth bundle operands", Call);
4109 Check(isa<ConstantInt>(BU.Inputs[0]) &&
4110 BU.Inputs[0]->getType()->isIntegerTy(32),
4111 "Ptrauth bundle key operand must be an i32 constant", Call);
4112 Check(BU.Inputs[1]->getType()->isIntegerTy(64),
4113 "Ptrauth bundle discriminator operand must be an i64", Call);
4114 } else if (Tag == LLVMContext::OB_kcfi) {
4115 Check(!FoundKCFIBundle, "Multiple kcfi operand bundles", Call);
4116 FoundKCFIBundle = true;
4117 Check(BU.Inputs.size() == 1, "Expected exactly one kcfi bundle operand",
4118 Call);
4119 Check(isa<ConstantInt>(BU.Inputs[0]) &&
4120 BU.Inputs[0]->getType()->isIntegerTy(32),
4121 "Kcfi bundle operand must be an i32 constant", Call);
4122 } else if (Tag == LLVMContext::OB_preallocated) {
4123 Check(!FoundPreallocatedBundle, "Multiple preallocated operand bundles",
4124 Call);
4125 FoundPreallocatedBundle = true;
4126 Check(BU.Inputs.size() == 1,
4127 "Expected exactly one preallocated bundle operand", Call);
4128 auto Input = dyn_cast<IntrinsicInst>(BU.Inputs.front());
4129 Check(Input &&
4130 Input->getIntrinsicID() == Intrinsic::call_preallocated_setup,
4131 "\"preallocated\" argument must be a token from "
4132 "llvm.call.preallocated.setup",
4133 Call);
4134 } else if (Tag == LLVMContext::OB_gc_live) {
4135 Check(!FoundGCLiveBundle, "Multiple gc-live operand bundles", Call);
4136 FoundGCLiveBundle = true;
4138 Check(!FoundAttachedCallBundle,
4139 "Multiple \"clang.arc.attachedcall\" operand bundles", Call);
4140 FoundAttachedCallBundle = true;
4141 verifyAttachedCallBundle(Call, BU);
4142 }
4143 }
4144
4145 // Verify that callee and callsite agree on whether to use pointer auth.
4146 Check(!(Call.getCalledFunction() && FoundPtrauthBundle),
4147 "Direct call cannot have a ptrauth bundle", Call);
4148
4149 // Verify that each inlinable callsite of a debug-info-bearing function in a
4150 // debug-info-bearing function has a debug location attached to it. Failure to
4151 // do so causes assertion failures when the inliner sets up inline scope info
4152 // (Interposable functions are not inlinable, neither are functions without
4153 // definitions.)
4159 "inlinable function call in a function with "
4160 "debug info must have a !dbg location",
4161 Call);
4162
4163 if (Call.isInlineAsm())
4164 verifyInlineAsmCall(Call);
4165
4166 ConvergenceVerifyHelper.visit(Call);
4167
4168 visitInstruction(Call);
4169}
4170
4171void Verifier::verifyTailCCMustTailAttrs(const AttrBuilder &Attrs,
4172 StringRef Context) {
4173 Check(!Attrs.contains(Attribute::InAlloca),
4174 Twine("inalloca attribute not allowed in ") + Context);
4175 Check(!Attrs.contains(Attribute::InReg),
4176 Twine("inreg attribute not allowed in ") + Context);
4177 Check(!Attrs.contains(Attribute::SwiftError),
4178 Twine("swifterror attribute not allowed in ") + Context);
4179 Check(!Attrs.contains(Attribute::Preallocated),
4180 Twine("preallocated attribute not allowed in ") + Context);
4181 Check(!Attrs.contains(Attribute::ByRef),
4182 Twine("byref attribute not allowed in ") + Context);
4183}
4184
4185/// Two types are "congruent" if they are identical, or if they are both pointer
4186/// types with different pointee types and the same address space.
4187static bool isTypeCongruent(Type *L, Type *R) {
4188 if (L == R)
4189 return true;
4192 if (!PL || !PR)
4193 return false;
4194 return PL->getAddressSpace() == PR->getAddressSpace();
4195}
4196
4197static AttrBuilder getParameterABIAttributes(LLVMContext& C, unsigned I, AttributeList Attrs) {
4198 static const Attribute::AttrKind ABIAttrs[] = {
4199 Attribute::StructRet, Attribute::ByVal, Attribute::InAlloca,
4200 Attribute::InReg, Attribute::StackAlignment, Attribute::SwiftSelf,
4201 Attribute::SwiftAsync, Attribute::SwiftError, Attribute::Preallocated,
4202 Attribute::ByRef};
4203 AttrBuilder Copy(C);
4204 for (auto AK : ABIAttrs) {
4205 Attribute Attr = Attrs.getParamAttrs(I).getAttribute(AK);
4206 if (Attr.isValid())
4207 Copy.addAttribute(Attr);
4208 }
4209
4210 // `align` is ABI-affecting only in combination with `byval` or `byref`.
4211 if (Attrs.hasParamAttr(I, Attribute::Alignment) &&
4212 (Attrs.hasParamAttr(I, Attribute::ByVal) ||
4213 Attrs.hasParamAttr(I, Attribute::ByRef)))
4214 Copy.addAlignmentAttr(Attrs.getParamAlignment(I));
4215 return Copy;
4216}
4217
4218void Verifier::verifyMustTailCall(CallInst &CI) {
4219 Check(!CI.isInlineAsm(), "cannot use musttail call with inline asm", &CI);
4220
4221 Function *F = CI.getParent()->getParent();
4222 FunctionType *CallerTy = F->getFunctionType();
4223 FunctionType *CalleeTy = CI.getFunctionType();
4224 Check(CallerTy->isVarArg() == CalleeTy->isVarArg(),
4225 "cannot guarantee tail call due to mismatched varargs", &CI);
4226 Check(isTypeCongruent(CallerTy->getReturnType(), CalleeTy->getReturnType()),
4227 "cannot guarantee tail call due to mismatched return types", &CI);
4228
4229 // - The calling conventions of the caller and callee must match.
4230 Check(F->getCallingConv() == CI.getCallingConv(),
4231 "cannot guarantee tail call due to mismatched calling conv", &CI);
4232
4233 // - The call must immediately precede a :ref:`ret <i_ret>` instruction,
4234 // or a pointer bitcast followed by a ret instruction.
4235 // - The ret instruction must return the (possibly bitcasted) value
4236 // produced by the call or void.
4237 Value *RetVal = &CI;
4239
4240 // Handle the optional bitcast.
4241 if (BitCastInst *BI = dyn_cast_or_null<BitCastInst>(Next)) {
4242 Check(BI->getOperand(0) == RetVal,
4243 "bitcast following musttail call must use the call", BI);
4244 RetVal = BI;
4245 Next = BI->getNextNode();
4246 }
4247
4248 // Check the return.
4249 ReturnInst *Ret = dyn_cast_or_null<ReturnInst>(Next);
4250 Check(Ret, "musttail call must precede a ret with an optional bitcast", &CI);
4251 Check(!Ret->getReturnValue() || Ret->getReturnValue() == RetVal ||
4253 "musttail call result must be returned", Ret);
4254
4255 AttributeList CallerAttrs = F->getAttributes();
4256 AttributeList CalleeAttrs = CI.getAttributes();
4257 if (CI.getCallingConv() == CallingConv::SwiftTail ||
4258 CI.getCallingConv() == CallingConv::Tail) {
4259 StringRef CCName =
4260 CI.getCallingConv() == CallingConv::Tail ? "tailcc" : "swifttailcc";
4261
4262 // - Only sret, byval, swiftself, and swiftasync ABI-impacting attributes
4263 // are allowed in swifttailcc call
4264 for (unsigned I = 0, E = CallerTy->getNumParams(); I != E; ++I) {
4265 AttrBuilder ABIAttrs = getParameterABIAttributes(F->getContext(), I, CallerAttrs);
4266 SmallString<32> Context{CCName, StringRef(" musttail caller")};
4267 verifyTailCCMustTailAttrs(ABIAttrs, Context);
4268 }
4269 for (unsigned I = 0, E = CalleeTy->getNumParams(); I != E; ++I) {
4270 AttrBuilder ABIAttrs = getParameterABIAttributes(F->getContext(), I, CalleeAttrs);
4271 SmallString<32> Context{CCName, StringRef(" musttail callee")};
4272 verifyTailCCMustTailAttrs(ABIAttrs, Context);
4273 }
4274 // - Varargs functions are not allowed
4275 Check(!CallerTy->isVarArg(), Twine("cannot guarantee ") + CCName +
4276 " tail call for varargs function");
4277 return;
4278 }
4279
4280 // - The caller and callee prototypes must match. Pointer types of
4281 // parameters or return types may differ in pointee type, but not
4282 // address space.
4283 if (!CI.getIntrinsicID()) {
4284 Check(CallerTy->getNumParams() == CalleeTy->getNumParams(),
4285 "cannot guarantee tail call due to mismatched parameter counts", &CI);
4286 for (unsigned I = 0, E = CallerTy->getNumParams(); I != E; ++I) {
4287 Check(
4288 isTypeCongruent(CallerTy->getParamType(I), CalleeTy->getParamType(I)),
4289 "cannot guarantee tail call due to mismatched parameter types", &CI);
4290 }
4291 }
4292
4293 // - All ABI-impacting function attributes, such as sret, byval, inreg,
4294 // returned, preallocated, and inalloca, must match.
4295 for (unsigned I = 0, E = CallerTy->getNumParams(); I != E; ++I) {
4296 AttrBuilder CallerABIAttrs = getParameterABIAttributes(F->getContext(), I, CallerAttrs);
4297 AttrBuilder CalleeABIAttrs = getParameterABIAttributes(F->getContext(), I, CalleeAttrs);
4298 Check(CallerABIAttrs == CalleeABIAttrs,
4299 "cannot guarantee tail call due to mismatched ABI impacting "
4300 "function attributes",
4301 &CI, CI.getOperand(I));
4302 }
4303}
4304
4305void Verifier::visitCallInst(CallInst &CI) {
4306 visitCallBase(CI);
4307
4308 if (CI.isMustTailCall())
4309 verifyMustTailCall(CI);
4310}
4311
4312void Verifier::visitInvokeInst(InvokeInst &II) {
4313 visitCallBase(II);
4314
4315 // Verify that the first non-PHI instruction of the unwind destination is an
4316 // exception handling instruction.
4317 Check(
4318 II.getUnwindDest()->isEHPad(),
4319 "The unwind destination does not have an exception handling instruction!",
4320 &II);
4321
4322 visitTerminator(II);
4323}
4324
4325/// visitUnaryOperator - Check the argument to the unary operator.
4326///
4327void Verifier::visitUnaryOperator(UnaryOperator &U) {
4328 Check(U.getType() == U.getOperand(0)->getType(),
4329 "Unary operators must have same type for"
4330 "operands and result!",
4331 &U);
4332
4333 switch (U.getOpcode()) {
4334 // Check that floating-point arithmetic operators are only used with
4335 // floating-point operands.
4336 case Instruction::FNeg:
4337 Check(U.getType()->isFPOrFPVectorTy(),
4338 "FNeg operator only works with float types!", &U);
4339 break;
4340 default:
4341 llvm_unreachable("Unknown UnaryOperator opcode!");
4342 }
4343
4344 visitInstruction(U);
4345}
4346
4347/// visitBinaryOperator - Check that both arguments to the binary operator are
4348/// of the same type!
4349///
4350void Verifier::visitBinaryOperator(BinaryOperator &B) {
4351 Check(B.getOperand(0)->getType() == B.getOperand(1)->getType(),
4352 "Both operands to a binary operator are not of the same type!", &B);
4353
4354 switch (B.getOpcode()) {
4355 // Check that integer arithmetic operators are only used with
4356 // integral operands.
4357 case Instruction::Add:
4358 case Instruction::Sub:
4359 case Instruction::Mul:
4360 case Instruction::SDiv:
4361 case Instruction::UDiv:
4362 case Instruction::SRem:
4363 case Instruction::URem:
4364 Check(B.getType()->isIntOrIntVectorTy(),
4365 "Integer arithmetic operators only work with integral types!", &B);
4366 Check(B.getType() == B.getOperand(0)->getType(),
4367 "Integer arithmetic operators must have same type "
4368 "for operands and result!",
4369 &B);
4370 break;
4371 // Check that floating-point arithmetic operators are only used with
4372 // floating-point operands.
4373 case Instruction::FAdd:
4374 case Instruction::FSub:
4375 case Instruction::FMul:
4376 case Instruction::FDiv:
4377 case Instruction::FRem:
4378 Check(B.getType()->isFPOrFPVectorTy(),
4379 "Floating-point arithmetic operators only work with "
4380 "floating-point types!",
4381 &B);
4382 Check(B.getType() == B.getOperand(0)->getType(),
4383 "Floating-point arithmetic operators must have same type "
4384 "for operands and result!",
4385 &B);
4386 break;
4387 // Check that logical operators are only used with integral operands.
4388 case Instruction::And:
4389 case Instruction::Or:
4390 case Instruction::Xor:
4391 Check(B.getType()->isIntOrIntVectorTy(),
4392 "Logical operators only work with integral types!", &B);
4393 Check(B.getType() == B.getOperand(0)->getType(),
4394 "Logical operators must have same type for operands and result!", &B);
4395 break;
4396 case Instruction::Shl:
4397 case Instruction::LShr:
4398 case Instruction::AShr:
4399 Check(B.getType()->isIntOrIntVectorTy(),
4400 "Shifts only work with integral types!", &B);
4401 Check(B.getType() == B.getOperand(0)->getType(),
4402 "Shift return type must be same as operands!", &B);
4403 break;
4404 default:
4405 llvm_unreachable("Unknown BinaryOperator opcode!");
4406 }
4407
4408 visitInstruction(B);
4409}
4410
4411void Verifier::visitICmpInst(ICmpInst &IC) {
4412 // Check that the operands are the same type
4413 Type *Op0Ty = IC.getOperand(0)->getType();
4414 Type *Op1Ty = IC.getOperand(1)->getType();
4415 Check(Op0Ty == Op1Ty,
4416 "Both operands to ICmp instruction are not of the same type!", &IC);
4417 // Check that the operands are the right type
4418 Check(Op0Ty->isIntOrIntVectorTy() || Op0Ty->isPtrOrPtrVectorTy(),
4419 "Invalid operand types for ICmp instruction", &IC);
4420 // Check that the predicate is valid.
4421 Check(IC.isIntPredicate(), "Invalid predicate in ICmp instruction!", &IC);
4422
4423 visitInstruction(IC);
4424}
4425
4426void Verifier::visitFCmpInst(FCmpInst &FC) {
4427 // Check that the operands are the same type
4428 Type *Op0Ty = FC.getOperand(0)->getType();
4429 Type *Op1Ty = FC.getOperand(1)->getType();
4430 Check(Op0Ty == Op1Ty,
4431 "Both operands to FCmp instruction are not of the same type!", &FC);
4432 // Check that the operands are the right type
4433 Check(Op0Ty->isFPOrFPVectorTy(), "Invalid operand types for FCmp instruction",
4434 &FC);
4435 // Check that the predicate is valid.
4436 Check(FC.isFPPredicate(), "Invalid predicate in FCmp instruction!", &FC);
4437
4438 visitInstruction(FC);
4439}
4440
4441void Verifier::visitExtractElementInst(ExtractElementInst &EI) {
4443 "Invalid extractelement operands!", &EI);
4444 visitInstruction(EI);
4445}
4446
4447void Verifier::visitInsertElementInst(InsertElementInst &IE) {
4448 Check(InsertElementInst::isValidOperands(IE.getOperand(0), IE.getOperand(1),
4449 IE.getOperand(2)),
4450 "Invalid insertelement operands!", &IE);
4451 visitInstruction(IE);
4452}
4453
4454void Verifier::visitShuffleVectorInst(ShuffleVectorInst &SV) {
4456 SV.getShuffleMask()),
4457 "Invalid shufflevector operands!", &SV);
4458 visitInstruction(SV);
4459}
4460
4461void Verifier::visitGetElementPtrInst(GetElementPtrInst &GEP) {
4462 Type *TargetTy = GEP.getPointerOperandType()->getScalarType();
4463
4464 Check(isa<PointerType>(TargetTy),
4465 "GEP base pointer is not a vector or a vector of pointers", &GEP);
4466 Check(GEP.getSourceElementType()->isSized(), "GEP into unsized type!", &GEP);
4467
4468 if (auto *STy = dyn_cast<StructType>(GEP.getSourceElementType())) {
4469 Check(!STy->isScalableTy(),
4470 "getelementptr cannot target structure that contains scalable vector"
4471 "type",
4472 &GEP);
4473 }
4474
4475 SmallVector<Value *, 16> Idxs(GEP.indices());
4476 Check(
4477 all_of(Idxs, [](Value *V) { return V->getType()->isIntOrIntVectorTy(); }),
4478 "GEP indexes must be integers", &GEP);
4479 Type *ElTy =
4480 GetElementPtrInst::getIndexedType(GEP.getSourceElementType(), Idxs);
4481 Check(ElTy, "Invalid indices for GEP pointer type!", &GEP);
4482
4483 PointerType *PtrTy = dyn_cast<PointerType>(GEP.getType()->getScalarType());
4484
4485 Check(PtrTy && GEP.getResultElementType() == ElTy,
4486 "GEP is not of right type for indices!", &GEP, ElTy);
4487
4488 if (auto *GEPVTy = dyn_cast<VectorType>(GEP.getType())) {
4489 // Additional checks for vector GEPs.
4490 ElementCount GEPWidth = GEPVTy->getElementCount();
4491 if (GEP.getPointerOperandType()->isVectorTy())
4492 Check(
4493 GEPWidth ==
4494 cast<VectorType>(GEP.getPointerOperandType())->getElementCount(),
4495 "Vector GEP result width doesn't match operand's", &GEP);
4496 for (Value *Idx : Idxs) {
4497 Type *IndexTy = Idx->getType();
4498 if (auto *IndexVTy = dyn_cast<VectorType>(IndexTy)) {
4499 ElementCount IndexWidth = IndexVTy->getElementCount();
4500 Check(IndexWidth == GEPWidth, "Invalid GEP index vector width", &GEP);
4501 }
4502 Check(IndexTy->isIntOrIntVectorTy(),
4503 "All GEP indices should be of integer type");
4504 }
4505 }
4506
4507 // Check that GEP does not index into a vector with non-byte-addressable
4508 // elements.
4510 GTI != GTE; ++GTI) {
4511 if (GTI.isVector()) {
4512 Type *ElemTy = GTI.getIndexedType();
4513 Check(DL.typeSizeEqualsStoreSize(ElemTy),
4514 "GEP into vector with non-byte-addressable element type", &GEP);
4515 }
4516 }
4517
4518 Check(GEP.getAddressSpace() == PtrTy->getAddressSpace(),
4519 "GEP address space doesn't match type", &GEP);
4520
4521 visitInstruction(GEP);
4522}
4523
4524static bool isContiguous(const ConstantRange &A, const ConstantRange &B) {
4525 return A.getUpper() == B.getLower() || A.getLower() == B.getUpper();
4526}
4527
4528/// Verify !range and !absolute_symbol metadata. These have the same
4529/// restrictions, except !absolute_symbol allows the full set.
4530void Verifier::verifyRangeLikeMetadata(const Value &I, const MDNode *Range,
4531 Type *Ty, RangeLikeMetadataKind Kind) {
4532 unsigned NumOperands = Range->getNumOperands();
4533 Check(NumOperands % 2 == 0, "Unfinished range!", Range);
4534 unsigned NumRanges = NumOperands / 2;
4535 Check(NumRanges >= 1, "It should have at least one range!", Range);
4536
4537 ConstantRange LastRange(1, true); // Dummy initial value
4538 for (unsigned i = 0; i < NumRanges; ++i) {
4539 ConstantInt *Low =
4540 mdconst::dyn_extract<ConstantInt>(Range->getOperand(2 * i));
4541 Check(Low, "The lower limit must be an integer!", Low);
4542 ConstantInt *High =
4543 mdconst::dyn_extract<ConstantInt>(Range->getOperand(2 * i + 1));
4544 Check(High, "The upper limit must be an integer!", High);
4545
4546 Check(High->getType() == Low->getType(), "Range pair types must match!",
4547 &I);
4548
4549 if (Kind == RangeLikeMetadataKind::NoaliasAddrspace) {
4550 Check(High->getType()->isIntegerTy(32),
4551 "noalias.addrspace type must be i32!", &I);
4552 } else {
4553 Check(High->getType() == Ty->getScalarType(),
4554 "Range types must match instruction type!", &I);
4555 }
4556
4557 APInt HighV = High->getValue();
4558 APInt LowV = Low->getValue();
4559
4560 // ConstantRange asserts if the ranges are the same except for the min/max
4561 // value. Leave the cases it tolerates for the empty range error below.
4562 Check(LowV != HighV || LowV.isMaxValue() || LowV.isMinValue(),
4563 "The upper and lower limits cannot be the same value", &I);
4564
4565 ConstantRange CurRange(LowV, HighV);
4566 Check(!CurRange.isEmptySet() &&
4567 (Kind == RangeLikeMetadataKind::AbsoluteSymbol ||
4568 !CurRange.isFullSet()),
4569 "Range must not be empty!", Range);
4570 if (i != 0) {
4571 Check(CurRange.intersectWith(LastRange).isEmptySet(),
4572 "Intervals are overlapping", Range);
4573 Check(LowV.sgt(LastRange.getLower()), "Intervals are not in order",
4574 Range);
4575 Check(!isContiguous(CurRange, LastRange), "Intervals are contiguous",
4576 Range);
4577 }
4578 LastRange = ConstantRange(LowV, HighV);
4579 }
4580 if (NumRanges > 2) {
4581 APInt FirstLow =
4582 mdconst::dyn_extract<ConstantInt>(Range->getOperand(0))->getValue();
4583 APInt FirstHigh =
4584 mdconst::dyn_extract<ConstantInt>(Range->getOperand(1))->getValue();
4585 ConstantRange FirstRange(FirstLow, FirstHigh);
4586 Check(FirstRange.intersectWith(LastRange).isEmptySet(),
4587 "Intervals are overlapping", Range);
4588 Check(!isContiguous(FirstRange, LastRange), "Intervals are contiguous",
4589 Range);
4590 }
4591}
4592
4593void Verifier::visitRangeMetadata(Instruction &I, MDNode *Range, Type *Ty) {
4594 assert(Range && Range == I.getMetadata(LLVMContext::MD_range) &&
4595 "precondition violation");
4596 verifyRangeLikeMetadata(I, Range, Ty, RangeLikeMetadataKind::Range);
4597}
4598
4599void Verifier::visitNoFPClassMetadata(Instruction &I, MDNode *NoFPClass,
4600 Type *Ty) {
4601 Check(AttributeFuncs::isNoFPClassCompatibleType(Ty),
4602 "nofpclass only applies to floating-point typed loads", I);
4603
4604 Check(NoFPClass->getNumOperands() == 1,
4605 "nofpclass must have exactly one entry", NoFPClass);
4606 ConstantInt *MaskVal =
4608 Check(MaskVal && MaskVal->getType()->isIntegerTy(32),
4609 "nofpclass entry must be a constant i32", NoFPClass);
4610 uint32_t Val = MaskVal->getZExtValue();
4611 Check(Val != 0, "'nofpclass' must have at least one test bit set", NoFPClass,
4612 I);
4613
4614 Check((Val & ~static_cast<unsigned>(fcAllFlags)) == 0,
4615 "Invalid value for 'nofpclass' test mask", NoFPClass, I);
4616}
4617
4618void Verifier::visitNoaliasAddrspaceMetadata(Instruction &I, MDNode *Range,
4619 Type *Ty) {
4620 assert(Range && Range == I.getMetadata(LLVMContext::MD_noalias_addrspace) &&
4621 "precondition violation");
4622 verifyRangeLikeMetadata(I, Range, Ty,
4623 RangeLikeMetadataKind::NoaliasAddrspace);
4624}
4625
4626void Verifier::checkAtomicMemAccessSize(Type *Ty, const Instruction *I) {
4627 unsigned Size = DL.getTypeSizeInBits(Ty).getFixedValue();
4628 Check(Size >= 8, "atomic memory access' size must be byte-sized", Ty, I);
4629 Check(!(Size & (Size - 1)),
4630 "atomic memory access' operand must have a power-of-two size", Ty, I);
4631}
4632
4633void Verifier::visitLoadInst(LoadInst &LI) {
4635 Check(PTy, "Load operand must be a pointer.", &LI);
4636 Type *ElTy = LI.getType();
4637 if (MaybeAlign A = LI.getAlign()) {
4638 Check(A->value() <= Value::MaximumAlignment,
4639 "huge alignment values are unsupported", &LI);
4640 }
4641 Check(ElTy->isSized(), "loading unsized types is not allowed", &LI);
4642 if (LI.isAtomic()) {
4643 Check(LI.getOrdering() != AtomicOrdering::Release &&
4644 LI.getOrdering() != AtomicOrdering::AcquireRelease,
4645 "Load cannot have Release ordering", &LI);
4646 Check(ElTy->getScalarType()->isIntOrPtrTy() ||
4647 ElTy->getScalarType()->isByteTy() ||
4649 "atomic load operand must have integer, byte, pointer, floating "
4650 "point, or vector type!",
4651 ElTy, &LI);
4652
4653 checkAtomicMemAccessSize(ElTy, &LI);
4654 } else {
4656 "Non-atomic load cannot have SynchronizationScope specified", &LI);
4657 }
4658
4659 visitInstruction(LI);
4660}
4661
4662void Verifier::visitStoreInst(StoreInst &SI) {
4663 PointerType *PTy = dyn_cast<PointerType>(SI.getOperand(1)->getType());
4664 Check(PTy, "Store operand must be a pointer.", &SI);
4665 Type *ElTy = SI.getOperand(0)->getType();
4666 if (MaybeAlign A = SI.getAlign()) {
4667 Check(A->value() <= Value::MaximumAlignment,
4668 "huge alignment values are unsupported", &SI);
4669 }
4670 Check(ElTy->isSized(), "storing unsized types is not allowed", &SI);
4671 if (SI.isAtomic()) {
4672 Check(SI.getOrdering() != AtomicOrdering::Acquire &&
4673 SI.getOrdering() != AtomicOrdering::AcquireRelease,
4674 "Store cannot have Acquire ordering", &SI);
4675 Check(ElTy->getScalarType()->isIntOrPtrTy() ||
4676 ElTy->getScalarType()->isByteTy() ||
4678 "atomic store operand must have integer, byte, pointer, floating "
4679 "point, or vector type!",
4680 ElTy, &SI);
4681 checkAtomicMemAccessSize(ElTy, &SI);
4682 } else {
4683 Check(SI.getSyncScopeID() == SyncScope::System,
4684 "Non-atomic store cannot have SynchronizationScope specified", &SI);
4685 }
4686 visitInstruction(SI);
4687}
4688
4689/// Check that SwiftErrorVal is used as a swifterror argument in CS.
4690void Verifier::verifySwiftErrorCall(CallBase &Call,
4691 const Value *SwiftErrorVal) {
4692 for (const auto &I : llvm::enumerate(Call.args())) {
4693 if (I.value() == SwiftErrorVal) {
4694 Check(Call.paramHasAttr(I.index(), Attribute::SwiftError),
4695 "swifterror value when used in a callsite should be marked "
4696 "with swifterror attribute",
4697 SwiftErrorVal, Call);
4698 }
4699 }
4700}
4701
4702void Verifier::verifySwiftErrorValue(const Value *SwiftErrorVal) {
4703 // Check that swifterror value is only used by loads, stores, or as
4704 // a swifterror argument.
4705 for (const User *U : SwiftErrorVal->users()) {
4707 isa<InvokeInst>(U),
4708 "swifterror value can only be loaded and stored from, or "
4709 "as a swifterror argument!",
4710 SwiftErrorVal, U);
4711 // If it is used by a store, check it is the second operand.
4712 if (auto StoreI = dyn_cast<StoreInst>(U))
4713 Check(StoreI->getOperand(1) == SwiftErrorVal,
4714 "swifterror value should be the second operand when used "
4715 "by stores",
4716 SwiftErrorVal, U);
4717 if (auto *Call = dyn_cast<CallBase>(U))
4718 verifySwiftErrorCall(*const_cast<CallBase *>(Call), SwiftErrorVal);
4719 }
4720}
4721
4722void Verifier::visitAllocaInst(AllocaInst &AI) {
4723 Type *Ty = AI.getAllocatedType();
4724 SmallPtrSet<Type*, 4> Visited;
4725 Check(Ty->isSized(&Visited), "Cannot allocate unsized type", &AI);
4726 // Check if it's a target extension type that disallows being used on the
4727 // stack.
4729 "Alloca has illegal target extension type", &AI);
4731 "Alloca array size must have integer type", &AI);
4732 if (MaybeAlign A = AI.getAlign()) {
4733 Check(A->value() <= Value::MaximumAlignment,
4734 "huge alignment values are unsupported", &AI);
4735 }
4736
4737 if (AI.isSwiftError()) {
4738 Check(Ty->isPointerTy(), "swifterror alloca must have pointer type", &AI);
4740 "swifterror alloca must not be array allocation", &AI);
4741 verifySwiftErrorValue(&AI);
4742 }
4743
4744 if (TT.isAMDGPU()) {
4746 "alloca on amdgpu must be in addrspace(5)", &AI);
4747 }
4748
4749 visitInstruction(AI);
4750}
4751
4752void Verifier::visitAtomicCmpXchgInst(AtomicCmpXchgInst &CXI) {
4753 Type *ElTy = CXI.getOperand(1)->getType();
4754 Check(ElTy->isIntOrPtrTy(),
4755 "cmpxchg operand must have integer or pointer type", ElTy, &CXI);
4756 checkAtomicMemAccessSize(ElTy, &CXI);
4757 visitInstruction(CXI);
4758}
4759
4760void Verifier::visitAtomicRMWInst(AtomicRMWInst &RMWI) {
4761 Check(RMWI.getOrdering() != AtomicOrdering::Unordered,
4762 "atomicrmw instructions cannot be unordered.", &RMWI);
4763 auto Op = RMWI.getOperation();
4764 Type *ElTy = RMWI.getOperand(1)->getType();
4765 if (Op == AtomicRMWInst::Xchg) {
4766 Check(ElTy->isIntegerTy() || ElTy->isFloatingPointTy() ||
4767 ElTy->isPointerTy(),
4768 "atomicrmw " + AtomicRMWInst::getOperationName(Op) +
4769 " operand must have integer or floating point type!",
4770 &RMWI, ElTy);
4771 } else if (AtomicRMWInst::isFPOperation(Op)) {
4773 "atomicrmw " + AtomicRMWInst::getOperationName(Op) +
4774 " operand must have floating-point or fixed vector of floating-point "
4775 "type!",
4776 &RMWI, ElTy);
4777 } else {
4778 Check(ElTy->isIntegerTy(),
4779 "atomicrmw " + AtomicRMWInst::getOperationName(Op) +
4780 " operand must have integer type!",
4781 &RMWI, ElTy);
4782 }
4783 checkAtomicMemAccessSize(ElTy, &RMWI);
4785 "Invalid binary operation!", &RMWI);
4786 visitInstruction(RMWI);
4787}
4788
4789void Verifier::visitFenceInst(FenceInst &FI) {
4790 const AtomicOrdering Ordering = FI.getOrdering();
4791 Check(Ordering == AtomicOrdering::Acquire ||
4792 Ordering == AtomicOrdering::Release ||
4793 Ordering == AtomicOrdering::AcquireRelease ||
4794 Ordering == AtomicOrdering::SequentiallyConsistent,
4795 "fence instructions may only have acquire, release, acq_rel, or "
4796 "seq_cst ordering.",
4797 &FI);
4798 visitInstruction(FI);
4799}
4800
4801void Verifier::visitExtractValueInst(ExtractValueInst &EVI) {
4803 EVI.getIndices()) == EVI.getType(),
4804 "Invalid ExtractValueInst operands!", &EVI);
4805
4806 visitInstruction(EVI);
4807}
4808
4809void Verifier::visitInsertValueInst(InsertValueInst &IVI) {
4811 IVI.getIndices()) ==
4812 IVI.getOperand(1)->getType(),
4813 "Invalid InsertValueInst operands!", &IVI);
4814
4815 visitInstruction(IVI);
4816}
4817
4818static Value *getParentPad(Value *EHPad) {
4819 if (auto *FPI = dyn_cast<FuncletPadInst>(EHPad))
4820 return FPI->getParentPad();
4821
4822 return cast<CatchSwitchInst>(EHPad)->getParentPad();
4823}
4824
4825void Verifier::visitEHPadPredecessors(Instruction &I) {
4826 assert(I.isEHPad());
4827
4828 BasicBlock *BB = I.getParent();
4829 Function *F = BB->getParent();
4830
4831 Check(BB != &F->getEntryBlock(), "EH pad cannot be in entry block.", &I);
4832
4833 if (auto *LPI = dyn_cast<LandingPadInst>(&I)) {
4834 // The landingpad instruction defines its parent as a landing pad block. The
4835 // landing pad block may be branched to only by the unwind edge of an
4836 // invoke.
4837 for (BasicBlock *PredBB : predecessors(BB)) {
4838 const auto *II = dyn_cast<InvokeInst>(PredBB->getTerminator());
4839 Check(II && II->getUnwindDest() == BB && II->getNormalDest() != BB,
4840 "Block containing LandingPadInst must be jumped to "
4841 "only by the unwind edge of an invoke.",
4842 LPI);
4843 }
4844 return;
4845 }
4846 if (auto *CPI = dyn_cast<CatchPadInst>(&I)) {
4847 if (!pred_empty(BB))
4848 Check(BB->getUniquePredecessor() == CPI->getCatchSwitch()->getParent(),
4849 "Block containg CatchPadInst must be jumped to "
4850 "only by its catchswitch.",
4851 CPI);
4852 Check(BB != CPI->getCatchSwitch()->getUnwindDest(),
4853 "Catchswitch cannot unwind to one of its catchpads",
4854 CPI->getCatchSwitch(), CPI);
4855 return;
4856 }
4857
4858 // Verify that each pred has a legal terminator with a legal to/from EH
4859 // pad relationship.
4860 Instruction *ToPad = &I;
4861 Value *ToPadParent = getParentPad(ToPad);
4862 for (BasicBlock *PredBB : predecessors(BB)) {
4863 Instruction *TI = PredBB->getTerminator();
4864 Value *FromPad;
4865 if (auto *II = dyn_cast<InvokeInst>(TI)) {
4866 Check(II->getUnwindDest() == BB && II->getNormalDest() != BB,
4867 "EH pad must be jumped to via an unwind edge", ToPad, II);
4868 auto *CalledFn =
4869 dyn_cast<Function>(II->getCalledOperand()->stripPointerCasts());
4870 if (CalledFn && CalledFn->isIntrinsic() && II->doesNotThrow() &&
4871 !IntrinsicInst::mayLowerToFunctionCall(CalledFn->getIntrinsicID()))
4872 continue;
4873 if (auto Bundle = II->getOperandBundle(LLVMContext::OB_funclet))
4874 FromPad = Bundle->Inputs[0];
4875 else
4876 FromPad = ConstantTokenNone::get(II->getContext());
4877 } else if (auto *CRI = dyn_cast<CleanupReturnInst>(TI)) {
4878 FromPad = CRI->getOperand(0);
4879 Check(FromPad != ToPadParent, "A cleanupret must exit its cleanup", CRI);
4880 } else if (auto *CSI = dyn_cast<CatchSwitchInst>(TI)) {
4881 FromPad = CSI;
4882 } else {
4883 Check(false, "EH pad must be jumped to via an unwind edge", ToPad, TI);
4884 }
4885
4886 // The edge may exit from zero or more nested pads.
4887 SmallPtrSet<Value *, 8> Seen;
4888 for (;; FromPad = getParentPad(FromPad)) {
4889 Check(FromPad != ToPad,
4890 "EH pad cannot handle exceptions raised within it", FromPad, TI);
4891 if (FromPad == ToPadParent) {
4892 // This is a legal unwind edge.
4893 break;
4894 }
4895 Check(!isa<ConstantTokenNone>(FromPad),
4896 "A single unwind edge may only enter one EH pad", TI);
4897 Check(Seen.insert(FromPad).second, "EH pad jumps through a cycle of pads",
4898 FromPad);
4899
4900 // This will be diagnosed on the corresponding instruction already. We
4901 // need the extra check here to make sure getParentPad() works.
4902 Check(isa<FuncletPadInst>(FromPad) || isa<CatchSwitchInst>(FromPad),
4903 "Parent pad must be catchpad/cleanuppad/catchswitch", TI);
4904 }
4905 }
4906}
4907
4908void Verifier::visitLandingPadInst(LandingPadInst &LPI) {
4909 // The landingpad instruction is ill-formed if it doesn't have any clauses and
4910 // isn't a cleanup.
4911 Check(LPI.getNumClauses() > 0 || LPI.isCleanup(),
4912 "LandingPadInst needs at least one clause or to be a cleanup.", &LPI);
4913
4914 visitEHPadPredecessors(LPI);
4915
4916 if (!LandingPadResultTy)
4917 LandingPadResultTy = LPI.getType();
4918 else
4919 Check(LandingPadResultTy == LPI.getType(),
4920 "The landingpad instruction should have a consistent result type "
4921 "inside a function.",
4922 &LPI);
4923
4924 Function *F = LPI.getParent()->getParent();
4925 Check(F->hasPersonalityFn(),
4926 "LandingPadInst needs to be in a function with a personality.", &LPI);
4927
4928 // The landingpad instruction must be the first non-PHI instruction in the
4929 // block.
4930 Check(LPI.getParent()->getLandingPadInst() == &LPI,
4931 "LandingPadInst not the first non-PHI instruction in the block.", &LPI);
4932
4933 for (unsigned i = 0, e = LPI.getNumClauses(); i < e; ++i) {
4934 Constant *Clause = LPI.getClause(i);
4935 if (LPI.isCatch(i)) {
4936 Check(isa<PointerType>(Clause->getType()),
4937 "Catch operand does not have pointer type!", &LPI);
4938 } else {
4939 Check(LPI.isFilter(i), "Clause is neither catch nor filter!", &LPI);
4941 "Filter operand is not an array of constants!", &LPI);
4942 }
4943 }
4944
4945 visitInstruction(LPI);
4946}
4947
4948void Verifier::visitResumeInst(ResumeInst &RI) {
4950 "ResumeInst needs to be in a function with a personality.", &RI);
4951
4952 if (!LandingPadResultTy)
4953 LandingPadResultTy = RI.getValue()->getType();
4954 else
4955 Check(LandingPadResultTy == RI.getValue()->getType(),
4956 "The resume instruction should have a consistent result type "
4957 "inside a function.",
4958 &RI);
4959
4960 visitTerminator(RI);
4961}
4962
4963void Verifier::visitCatchPadInst(CatchPadInst &CPI) {
4964 BasicBlock *BB = CPI.getParent();
4965
4966 Function *F = BB->getParent();
4967 Check(F->hasPersonalityFn(),
4968 "CatchPadInst needs to be in a function with a personality.", &CPI);
4969
4971 "CatchPadInst needs to be directly nested in a CatchSwitchInst.",
4972 CPI.getParentPad());
4973
4974 // The catchpad instruction must be the first non-PHI instruction in the
4975 // block.
4976 Check(&*BB->getFirstNonPHIIt() == &CPI,
4977 "CatchPadInst not the first non-PHI instruction in the block.", &CPI);
4978
4980 [](Use &U) {
4981 auto *V = U.get();
4982 return isa<Constant>(V) || isa<AllocaInst>(V);
4983 }),
4984 "Argument operand must be alloca or constant.", &CPI);
4985
4986 visitEHPadPredecessors(CPI);
4987 visitFuncletPadInst(CPI);
4988}
4989
4990void Verifier::visitCatchReturnInst(CatchReturnInst &CatchReturn) {
4991 Check(isa<CatchPadInst>(CatchReturn.getOperand(0)),
4992 "CatchReturnInst needs to be provided a CatchPad", &CatchReturn,
4993 CatchReturn.getOperand(0));
4994
4995 visitTerminator(CatchReturn);
4996}
4997
4998void Verifier::visitCleanupPadInst(CleanupPadInst &CPI) {
4999 BasicBlock *BB = CPI.getParent();
5000
5001 Function *F = BB->getParent();
5002 Check(F->hasPersonalityFn(),
5003 "CleanupPadInst needs to be in a function with a personality.", &CPI);
5004
5005 // The cleanuppad instruction must be the first non-PHI instruction in the
5006 // block.
5007 Check(&*BB->getFirstNonPHIIt() == &CPI,
5008 "CleanupPadInst not the first non-PHI instruction in the block.", &CPI);
5009
5010 auto *ParentPad = CPI.getParentPad();
5011 Check(isa<ConstantTokenNone>(ParentPad) || isa<FuncletPadInst>(ParentPad),
5012 "CleanupPadInst has an invalid parent.", &CPI);
5013
5014 visitEHPadPredecessors(CPI);
5015 visitFuncletPadInst(CPI);
5016}
5017
5018void Verifier::visitFuncletPadInst(FuncletPadInst &FPI) {
5019 User *FirstUser = nullptr;
5020 Value *FirstUnwindPad = nullptr;
5021 SmallVector<FuncletPadInst *, 8> Worklist({&FPI});
5022 SmallPtrSet<FuncletPadInst *, 8> Seen;
5023
5024 while (!Worklist.empty()) {
5025 FuncletPadInst *CurrentPad = Worklist.pop_back_val();
5026 Check(Seen.insert(CurrentPad).second,
5027 "FuncletPadInst must not be nested within itself", CurrentPad);
5028 Value *UnresolvedAncestorPad = nullptr;
5029 for (User *U : CurrentPad->users()) {
5030 BasicBlock *UnwindDest;
5031 if (auto *CRI = dyn_cast<CleanupReturnInst>(U)) {
5032 UnwindDest = CRI->getUnwindDest();
5033 } else if (auto *CSI = dyn_cast<CatchSwitchInst>(U)) {
5034 // We allow catchswitch unwind to caller to nest
5035 // within an outer pad that unwinds somewhere else,
5036 // because catchswitch doesn't have a nounwind variant.
5037 // See e.g. SimplifyCFGOpt::SimplifyUnreachable.
5038 if (CSI->unwindsToCaller())
5039 continue;
5040 UnwindDest = CSI->getUnwindDest();
5041 } else if (auto *II = dyn_cast<InvokeInst>(U)) {
5042 UnwindDest = II->getUnwindDest();
5043 } else if (isa<CallInst>(U)) {
5044 // Calls which don't unwind may be found inside funclet
5045 // pads that unwind somewhere else. We don't *require*
5046 // such calls to be annotated nounwind.
5047 continue;
5048 } else if (auto *CPI = dyn_cast<CleanupPadInst>(U)) {
5049 // The unwind dest for a cleanup can only be found by
5050 // recursive search. Add it to the worklist, and we'll
5051 // search for its first use that determines where it unwinds.
5052 Worklist.push_back(CPI);
5053 continue;
5054 } else {
5055 Check(isa<CatchReturnInst>(U), "Bogus funclet pad use", U);
5056 continue;
5057 }
5058
5059 Value *UnwindPad;
5060 bool ExitsFPI;
5061 if (UnwindDest) {
5062 UnwindPad = &*UnwindDest->getFirstNonPHIIt();
5063 if (!cast<Instruction>(UnwindPad)->isEHPad())
5064 continue;
5065 Value *UnwindParent = getParentPad(UnwindPad);
5066 // Ignore unwind edges that don't exit CurrentPad.
5067 if (UnwindParent == CurrentPad)
5068 continue;
5069 // Determine whether the original funclet pad is exited,
5070 // and if we are scanning nested pads determine how many
5071 // of them are exited so we can stop searching their
5072 // children.
5073 Value *ExitedPad = CurrentPad;
5074 ExitsFPI = false;
5075 do {
5076 if (ExitedPad == &FPI) {
5077 ExitsFPI = true;
5078 // Now we can resolve any ancestors of CurrentPad up to
5079 // FPI, but not including FPI since we need to make sure
5080 // to check all direct users of FPI for consistency.
5081 UnresolvedAncestorPad = &FPI;
5082 break;
5083 }
5084 Value *ExitedParent = getParentPad(ExitedPad);
5085 if (ExitedParent == UnwindParent) {
5086 // ExitedPad is the ancestor-most pad which this unwind
5087 // edge exits, so we can resolve up to it, meaning that
5088 // ExitedParent is the first ancestor still unresolved.
5089 UnresolvedAncestorPad = ExitedParent;
5090 break;
5091 }
5092 ExitedPad = ExitedParent;
5093 } while (!isa<ConstantTokenNone>(ExitedPad));
5094 } else {
5095 // Unwinding to caller exits all pads.
5096 UnwindPad = ConstantTokenNone::get(FPI.getContext());
5097 ExitsFPI = true;
5098 UnresolvedAncestorPad = &FPI;
5099 }
5100
5101 if (ExitsFPI) {
5102 // This unwind edge exits FPI. Make sure it agrees with other
5103 // such edges.
5104 if (FirstUser) {
5105 Check(UnwindPad == FirstUnwindPad,
5106 "Unwind edges out of a funclet "
5107 "pad must have the same unwind "
5108 "dest",
5109 &FPI, U, FirstUser);
5110 } else {
5111 FirstUser = U;
5112 FirstUnwindPad = UnwindPad;
5113 // Record cleanup sibling unwinds for verifySiblingFuncletUnwinds
5114 if (isa<CleanupPadInst>(&FPI) && !isa<ConstantTokenNone>(UnwindPad) &&
5115 getParentPad(UnwindPad) == getParentPad(&FPI))
5116 SiblingFuncletInfo[&FPI] = cast<Instruction>(U);
5117 }
5118 }
5119 // Make sure we visit all uses of FPI, but for nested pads stop as
5120 // soon as we know where they unwind to.
5121 if (CurrentPad != &FPI)
5122 break;
5123 }
5124 if (UnresolvedAncestorPad) {
5125 if (CurrentPad == UnresolvedAncestorPad) {
5126 // When CurrentPad is FPI itself, we don't mark it as resolved even if
5127 // we've found an unwind edge that exits it, because we need to verify
5128 // all direct uses of FPI.
5129 assert(CurrentPad == &FPI);
5130 continue;
5131 }
5132 // Pop off the worklist any nested pads that we've found an unwind
5133 // destination for. The pads on the worklist are the uncles,
5134 // great-uncles, etc. of CurrentPad. We've found an unwind destination
5135 // for all ancestors of CurrentPad up to but not including
5136 // UnresolvedAncestorPad.
5137 Value *ResolvedPad = CurrentPad;
5138 while (!Worklist.empty()) {
5139 Value *UnclePad = Worklist.back();
5140 Value *AncestorPad = getParentPad(UnclePad);
5141 // Walk ResolvedPad up the ancestor list until we either find the
5142 // uncle's parent or the last resolved ancestor.
5143 while (ResolvedPad != AncestorPad) {
5144 Value *ResolvedParent = getParentPad(ResolvedPad);
5145 if (ResolvedParent == UnresolvedAncestorPad) {
5146 break;
5147 }
5148 ResolvedPad = ResolvedParent;
5149 }
5150 // If the resolved ancestor search didn't find the uncle's parent,
5151 // then the uncle is not yet resolved.
5152 if (ResolvedPad != AncestorPad)
5153 break;
5154 // This uncle is resolved, so pop it from the worklist.
5155 Worklist.pop_back();
5156 }
5157 }
5158 }
5159
5160 if (FirstUnwindPad) {
5161 if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(FPI.getParentPad())) {
5162 BasicBlock *SwitchUnwindDest = CatchSwitch->getUnwindDest();
5163 Value *SwitchUnwindPad;
5164 if (SwitchUnwindDest)
5165 SwitchUnwindPad = &*SwitchUnwindDest->getFirstNonPHIIt();
5166 else
5167 SwitchUnwindPad = ConstantTokenNone::get(FPI.getContext());
5168 Check(SwitchUnwindPad == FirstUnwindPad,
5169 "Unwind edges out of a catch must have the same unwind dest as "
5170 "the parent catchswitch",
5171 &FPI, FirstUser, CatchSwitch);
5172 }
5173 }
5174
5175 visitInstruction(FPI);
5176}
5177
5178void Verifier::visitCatchSwitchInst(CatchSwitchInst &CatchSwitch) {
5179 BasicBlock *BB = CatchSwitch.getParent();
5180
5181 Function *F = BB->getParent();
5182 Check(F->hasPersonalityFn(),
5183 "CatchSwitchInst needs to be in a function with a personality.",
5184 &CatchSwitch);
5185
5186 // The catchswitch instruction must be the first non-PHI instruction in the
5187 // block.
5188 Check(&*BB->getFirstNonPHIIt() == &CatchSwitch,
5189 "CatchSwitchInst not the first non-PHI instruction in the block.",
5190 &CatchSwitch);
5191
5192 auto *ParentPad = CatchSwitch.getParentPad();
5193 Check(isa<ConstantTokenNone>(ParentPad) || isa<FuncletPadInst>(ParentPad),
5194 "CatchSwitchInst has an invalid parent.", ParentPad);
5195
5196 if (BasicBlock *UnwindDest = CatchSwitch.getUnwindDest()) {
5197 BasicBlock::iterator I = UnwindDest->getFirstNonPHIIt();
5198 Check(I->isEHPad() && !isa<LandingPadInst>(I),
5199 "CatchSwitchInst must unwind to an EH block which is not a "
5200 "landingpad.",
5201 &CatchSwitch);
5202
5203 // Record catchswitch sibling unwinds for verifySiblingFuncletUnwinds
5204 if (getParentPad(&*I) == ParentPad)
5205 SiblingFuncletInfo[&CatchSwitch] = &CatchSwitch;
5206 }
5207
5208 Check(CatchSwitch.getNumHandlers() != 0,
5209 "CatchSwitchInst cannot have empty handler list", &CatchSwitch);
5210
5211 for (BasicBlock *Handler : CatchSwitch.handlers()) {
5212 Check(isa<CatchPadInst>(Handler->getFirstNonPHIIt()),
5213 "CatchSwitchInst handlers must be catchpads", &CatchSwitch, Handler);
5214 }
5215
5216 visitEHPadPredecessors(CatchSwitch);
5217 visitTerminator(CatchSwitch);
5218}
5219
5220void Verifier::visitCleanupReturnInst(CleanupReturnInst &CRI) {
5222 "CleanupReturnInst needs to be provided a CleanupPad", &CRI,
5223 CRI.getOperand(0));
5224
5225 if (BasicBlock *UnwindDest = CRI.getUnwindDest()) {
5226 BasicBlock::iterator I = UnwindDest->getFirstNonPHIIt();
5227 Check(I->isEHPad() && !isa<LandingPadInst>(I),
5228 "CleanupReturnInst must unwind to an EH block which is not a "
5229 "landingpad.",
5230 &CRI);
5231 }
5232
5233 visitTerminator(CRI);
5234}
5235
5236void Verifier::verifyDominatesUse(Instruction &I, unsigned i) {
5237 Instruction *Op = cast<Instruction>(I.getOperand(i));
5238 // If the we have an invalid invoke, don't try to compute the dominance.
5239 // We already reject it in the invoke specific checks and the dominance
5240 // computation doesn't handle multiple edges.
5241 if (InvokeInst *II = dyn_cast<InvokeInst>(Op)) {
5242 if (II->getNormalDest() == II->getUnwindDest())
5243 return;
5244 }
5245
5246 // Quick check whether the def has already been encountered in the same block.
5247 // PHI nodes are not checked to prevent accepting preceding PHIs, because PHI
5248 // uses are defined to happen on the incoming edge, not at the instruction.
5249 //
5250 // FIXME: If this operand is a MetadataAsValue (wrapping a LocalAsMetadata)
5251 // wrapping an SSA value, assert that we've already encountered it. See
5252 // related FIXME in Mapper::mapLocalAsMetadata in ValueMapper.cpp.
5253 if (!isa<PHINode>(I) && InstsInThisBlock.count(Op))
5254 return;
5255
5256 const Use &U = I.getOperandUse(i);
5257 Check(DT.dominates(Op, U), "Instruction does not dominate all uses!", Op, &I);
5258}
5259
5260void Verifier::visitDereferenceableMetadata(Instruction& I, MDNode* MD) {
5261 Check(I.getType()->isPointerTy(),
5262 "dereferenceable, dereferenceable_or_null "
5263 "apply only to pointer types",
5264 &I);
5266 "dereferenceable, dereferenceable_or_null apply only to load"
5267 " and inttoptr instructions, use attributes for calls or invokes",
5268 &I);
5269 Check(MD->getNumOperands() == 1,
5270 "dereferenceable, dereferenceable_or_null "
5271 "take one operand!",
5272 &I);
5273 ConstantInt *CI = mdconst::dyn_extract<ConstantInt>(MD->getOperand(0));
5274 Check(CI && CI->getType()->isIntegerTy(64),
5275 "dereferenceable, "
5276 "dereferenceable_or_null metadata value must be an i64!",
5277 &I);
5278}
5279
5280void Verifier::visitNofreeMetadata(Instruction &I, MDNode *MD) {
5281 Check(I.getType()->isPointerTy(), "nofree applies only to pointer types", &I);
5282 Check((isa<IntToPtrInst>(I)), "nofree applies only to inttoptr instruction",
5283 &I);
5284 Check(MD->getNumOperands() == 0, "nofree metadata must be empty", &I);
5285}
5286
5287void Verifier::visitProfMetadata(Instruction &I, MDNode *MD) {
5288 auto GetBranchingTerminatorNumOperands = [&]() {
5289 unsigned ExpectedNumOperands = 0;
5290 if (CondBrInst *BI = dyn_cast<CondBrInst>(&I))
5291 ExpectedNumOperands = BI->getNumSuccessors();
5292 else if (SwitchInst *SI = dyn_cast<SwitchInst>(&I))
5293 ExpectedNumOperands = SI->getNumSuccessors();
5294 else if (isa<CallInst>(&I))
5295 ExpectedNumOperands = 1;
5296 else if (IndirectBrInst *IBI = dyn_cast<IndirectBrInst>(&I))
5297 ExpectedNumOperands = IBI->getNumDestinations();
5298 else if (isa<SelectInst>(&I))
5299 ExpectedNumOperands = 2;
5300 else if (CallBrInst *CI = dyn_cast<CallBrInst>(&I))
5301 ExpectedNumOperands = CI->getNumSuccessors();
5302 return ExpectedNumOperands;
5303 };
5304 Check(MD->getNumOperands() >= 1,
5305 "!prof annotations should have at least 1 operand", MD);
5306 // Check first operand.
5307 Check(MD->getOperand(0) != nullptr, "first operand should not be null", MD);
5309 "expected string with name of the !prof annotation", MD);
5310 MDString *MDS = cast<MDString>(MD->getOperand(0));
5311 StringRef ProfName = MDS->getString();
5312
5314 Check(GetBranchingTerminatorNumOperands() != 0 || isa<InvokeInst>(I),
5315 "'unknown' !prof should only appear on instructions on which "
5316 "'branch_weights' would",
5317 MD);
5318 verifyUnknownProfileMetadata(MD);
5319 return;
5320 }
5321
5322 Check(MD->getNumOperands() >= 2,
5323 "!prof annotations should have no less than 2 operands", MD);
5324
5325 // Check consistency of !prof branch_weights metadata.
5326 if (ProfName == MDProfLabels::BranchWeights) {
5327 unsigned NumBranchWeights = getNumBranchWeights(*MD);
5328 if (isa<InvokeInst>(&I)) {
5329 Check(NumBranchWeights == 1 || NumBranchWeights == 2,
5330 "Wrong number of InvokeInst branch_weights operands", MD);
5331 } else {
5332 const unsigned ExpectedNumOperands = GetBranchingTerminatorNumOperands();
5333 if (ExpectedNumOperands == 0)
5334 CheckFailed("!prof branch_weights are not allowed for this instruction",
5335 MD);
5336
5337 Check(NumBranchWeights == ExpectedNumOperands, "Wrong number of operands",
5338 MD);
5339 }
5340 for (unsigned i = getBranchWeightOffset(MD); i < MD->getNumOperands();
5341 ++i) {
5342 auto &MDO = MD->getOperand(i);
5343 Check(MDO, "second operand should not be null", MD);
5345 "!prof brunch_weights operand is not a const int");
5346 }
5347 } else if (ProfName == MDProfLabels::ValueProfile) {
5348 Check(isValueProfileMD(MD), "invalid value profiling metadata", MD);
5349 ConstantInt *KindInt = mdconst::dyn_extract<ConstantInt>(MD->getOperand(1));
5350 Check(KindInt, "VP !prof missing kind argument", MD);
5351
5352 auto Kind = KindInt->getZExtValue();
5353 Check(Kind >= InstrProfValueKind::IPVK_First &&
5354 Kind <= InstrProfValueKind::IPVK_Last,
5355 "Invalid VP !prof kind", MD);
5356 Check(MD->getNumOperands() % 2 == 1,
5357 "VP !prof should have an even number "
5358 "of arguments after 'VP'",
5359 MD);
5360 if (Kind == InstrProfValueKind::IPVK_IndirectCallTarget ||
5361 Kind == InstrProfValueKind::IPVK_MemOPSize)
5363 "VP !prof indirect call or memop size expected to be applied to "
5364 "CallBase instructions only",
5365 MD);
5366 } else {
5367 CheckFailed("expected either branch_weights or VP profile name", MD);
5368 }
5369}
5370
5371void Verifier::visitDIAssignIDMetadata(Instruction &I, MDNode *MD) {
5372 assert(I.hasMetadata(LLVMContext::MD_DIAssignID));
5373 // DIAssignID metadata must be attached to either an alloca or some form of
5374 // store/memory-writing instruction.
5375 // FIXME: We allow all intrinsic insts here to avoid trying to enumerate all
5376 // possible store intrinsics.
5377 bool ExpectedInstTy =
5379 CheckDI(ExpectedInstTy, "!DIAssignID attached to unexpected instruction kind",
5380 I, MD);
5381 // Iterate over the MetadataAsValue uses of the DIAssignID - these should
5382 // only be found as DbgAssignIntrinsic operands.
5383 if (auto *AsValue = MetadataAsValue::getIfExists(Context, MD)) {
5384 for (auto *User : AsValue->users()) {
5386 "!DIAssignID should only be used by llvm.dbg.assign intrinsics",
5387 MD, User);
5388 // All of the dbg.assign intrinsics should be in the same function as I.
5389 if (auto *DAI = dyn_cast<DbgAssignIntrinsic>(User))
5390 CheckDI(DAI->getFunction() == I.getFunction(),
5391 "dbg.assign not in same function as inst", DAI, &I);
5392 }
5393 }
5394 for (DbgVariableRecord *DVR :
5395 cast<DIAssignID>(MD)->getAllDbgVariableRecordUsers()) {
5396 CheckDI(DVR->isDbgAssign(),
5397 "!DIAssignID should only be used by Assign DVRs.", MD, DVR);
5398 CheckDI(DVR->getFunction() == I.getFunction(),
5399 "DVRAssign not in same function as inst", DVR, &I);
5400 }
5401}
5402
5403void Verifier::visitMMRAMetadata(Instruction &I, MDNode *MD) {
5405 "!mmra metadata attached to unexpected instruction kind", I, MD);
5406
5407 // MMRA Metadata should either be a tag, e.g. !{!"foo", !"bar"}, or a
5408 // list of tags such as !2 in the following example:
5409 // !0 = !{!"a", !"b"}
5410 // !1 = !{!"c", !"d"}
5411 // !2 = !{!0, !1}
5412 if (MMRAMetadata::isTagMD(MD))
5413 return;
5414
5415 Check(isa<MDTuple>(MD), "!mmra expected to be a metadata tuple", I, MD);
5416 for (const MDOperand &MDOp : MD->operands())
5417 Check(MMRAMetadata::isTagMD(MDOp.get()),
5418 "!mmra metadata tuple operand is not an MMRA tag", I, MDOp.get());
5419}
5420
5421void Verifier::visitCallStackMetadata(MDNode *MD) {
5422 // Call stack metadata should consist of a list of at least 1 constant int
5423 // (representing a hash of the location).
5424 Check(MD->getNumOperands() >= 1,
5425 "call stack metadata should have at least 1 operand", MD);
5426
5427 for (const auto &Op : MD->operands())
5429 "call stack metadata operand should be constant integer", Op);
5430}
5431
5432void Verifier::visitMemProfMetadata(Instruction &I, MDNode *MD) {
5433 Check(isa<CallBase>(I), "!memprof metadata should only exist on calls", &I);
5434 Check(MD->getNumOperands() >= 1,
5435 "!memprof annotations should have at least 1 metadata operand "
5436 "(MemInfoBlock)",
5437 MD);
5438
5439 // Check each MIB
5440 for (auto &MIBOp : MD->operands()) {
5441 MDNode *MIB = dyn_cast<MDNode>(MIBOp);
5442 // The first operand of an MIB should be the call stack metadata.
5443 // There rest of the operands should be MDString tags, and there should be
5444 // at least one.
5445 Check(MIB->getNumOperands() >= 2,
5446 "Each !memprof MemInfoBlock should have at least 2 operands", MIB);
5447
5448 // Check call stack metadata (first operand).
5449 Check(MIB->getOperand(0) != nullptr,
5450 "!memprof MemInfoBlock first operand should not be null", MIB);
5451 Check(isa<MDNode>(MIB->getOperand(0)),
5452 "!memprof MemInfoBlock first operand should be an MDNode", MIB);
5453 MDNode *StackMD = dyn_cast<MDNode>(MIB->getOperand(0));
5454 visitCallStackMetadata(StackMD);
5455
5456 // The second MIB operand should be MDString.
5458 "!memprof MemInfoBlock second operand should be an MDString", MIB);
5459
5460 // Any remaining should be MDNode that are pairs of integers
5461 for (unsigned I = 2; I < MIB->getNumOperands(); ++I) {
5462 MDNode *OpNode = dyn_cast<MDNode>(MIB->getOperand(I));
5463 Check(OpNode, "Not all !memprof MemInfoBlock operands 2 to N are MDNode",
5464 MIB);
5465 Check(OpNode->getNumOperands() == 2,
5466 "Not all !memprof MemInfoBlock operands 2 to N are MDNode with 2 "
5467 "operands",
5468 MIB);
5469 // Check that all of Op's operands are ConstantInt.
5470 Check(llvm::all_of(OpNode->operands(),
5471 [](const MDOperand &Op) {
5472 return mdconst::hasa<ConstantInt>(Op);
5473 }),
5474 "Not all !memprof MemInfoBlock operands 2 to N are MDNode with "
5475 "ConstantInt operands",
5476 MIB);
5477 }
5478 }
5479}
5480
5481void Verifier::visitCallsiteMetadata(Instruction &I, MDNode *MD) {
5482 Check(isa<CallBase>(I), "!callsite metadata should only exist on calls", &I);
5483 // Verify the partial callstack annotated from memprof profiles. This callsite
5484 // is a part of a profiled allocation callstack.
5485 visitCallStackMetadata(MD);
5486}
5487
5488static inline bool isConstantIntMetadataOperand(const Metadata *MD) {
5489 if (auto *VAL = dyn_cast<ValueAsMetadata>(MD))
5490 return isa<ConstantInt>(VAL->getValue());
5491 return false;
5492}
5493
5494void Verifier::visitCalleeTypeMetadata(Instruction &I, MDNode *MD) {
5495 Check(isa<CallBase>(I), "!callee_type metadata should only exist on calls",
5496 &I);
5497 for (Metadata *Op : MD->operands()) {
5499 "The callee_type metadata must be a list of type metadata nodes", Op);
5500 auto *TypeMD = cast<MDNode>(Op);
5501 Check(TypeMD->getNumOperands() == 2,
5502 "Well-formed generalized type metadata must contain exactly two "
5503 "operands",
5504 Op);
5505 Check(isConstantIntMetadataOperand(TypeMD->getOperand(0)) &&
5506 mdconst::extract<ConstantInt>(TypeMD->getOperand(0))->isZero(),
5507 "The first operand of type metadata for functions must be zero", Op);
5508 Check(TypeMD->hasGeneralizedMDString(),
5509 "Only generalized type metadata can be part of the callee_type "
5510 "metadata list",
5511 Op);
5512 }
5513}
5514
5515void Verifier::visitAnnotationMetadata(MDNode *Annotation) {
5516 Check(isa<MDTuple>(Annotation), "annotation must be a tuple");
5517 Check(Annotation->getNumOperands() >= 1,
5518 "annotation must have at least one operand");
5519 for (const MDOperand &Op : Annotation->operands()) {
5520 bool TupleOfStrings =
5521 isa<MDTuple>(Op.get()) &&
5522 all_of(cast<MDTuple>(Op)->operands(), [](auto &Annotation) {
5523 return isa<MDString>(Annotation.get());
5524 });
5525 Check(isa<MDString>(Op.get()) || TupleOfStrings,
5526 "operands must be a string or a tuple of strings");
5527 }
5528}
5529
5530void Verifier::visitAliasScopeMetadata(const MDNode *MD) {
5531 unsigned NumOps = MD->getNumOperands();
5532 Check(NumOps >= 2 && NumOps <= 3, "scope must have two or three operands",
5533 MD);
5534 Check(MD->getOperand(0).get() == MD || isa<MDString>(MD->getOperand(0)),
5535 "first scope operand must be self-referential or string", MD);
5536 if (NumOps == 3)
5538 "third scope operand must be string (if used)", MD);
5539
5540 MDNode *Domain = dyn_cast<MDNode>(MD->getOperand(1));
5541 Check(Domain != nullptr, "second scope operand must be MDNode", MD);
5542
5543 unsigned NumDomainOps = Domain->getNumOperands();
5544 Check(NumDomainOps >= 1 && NumDomainOps <= 2,
5545 "domain must have one or two operands", Domain);
5546 Check(Domain->getOperand(0).get() == Domain ||
5547 isa<MDString>(Domain->getOperand(0)),
5548 "first domain operand must be self-referential or string", Domain);
5549 if (NumDomainOps == 2)
5550 Check(isa<MDString>(Domain->getOperand(1)),
5551 "second domain operand must be string (if used)", Domain);
5552}
5553
5554void Verifier::visitAliasScopeListMetadata(const MDNode *MD) {
5555 for (const MDOperand &Op : MD->operands()) {
5556 const MDNode *OpMD = dyn_cast<MDNode>(Op);
5557 Check(OpMD != nullptr, "scope list must consist of MDNodes", MD);
5558 visitAliasScopeMetadata(OpMD);
5559 }
5560}
5561
5562void Verifier::visitAccessGroupMetadata(const MDNode *MD) {
5563 auto IsValidAccessScope = [](const MDNode *MD) {
5564 return MD->getNumOperands() == 0 && MD->isDistinct();
5565 };
5566
5567 // It must be either an access scope itself...
5568 if (IsValidAccessScope(MD))
5569 return;
5570
5571 // ...or a list of access scopes.
5572 for (const MDOperand &Op : MD->operands()) {
5573 const MDNode *OpMD = dyn_cast<MDNode>(Op);
5574 Check(OpMD != nullptr, "Access scope list must consist of MDNodes", MD);
5575 Check(IsValidAccessScope(OpMD),
5576 "Access scope list contains invalid access scope", MD);
5577 }
5578}
5579
5580void Verifier::visitCapturesMetadata(Instruction &I, const MDNode *Captures) {
5581 static const char *ValidArgs[] = {"address_is_null", "address",
5582 "read_provenance", "provenance"};
5583
5584 auto *SI = dyn_cast<StoreInst>(&I);
5585 Check(SI, "!captures metadata can only be applied to store instructions", &I);
5586 Check(SI->getValueOperand()->getType()->isPointerTy(),
5587 "!captures metadata can only be applied to store with value operand of "
5588 "pointer type",
5589 &I);
5590 Check(Captures->getNumOperands() != 0, "!captures metadata cannot be empty",
5591 &I);
5592
5593 for (Metadata *Op : Captures->operands()) {
5594 auto *Str = dyn_cast<MDString>(Op);
5595 Check(Str, "!captures metadata must be a list of strings", &I);
5596 Check(is_contained(ValidArgs, Str->getString()),
5597 "invalid entry in !captures metadata", &I, Str);
5598 }
5599}
5600
5601void Verifier::visitAllocTokenMetadata(Instruction &I, MDNode *MD) {
5602 Check(isa<CallBase>(I), "!alloc_token should only exist on calls", &I);
5603 Check(MD->getNumOperands() == 2, "!alloc_token must have 2 operands", MD);
5604 Check(isa<MDString>(MD->getOperand(0)), "expected string", MD);
5606 "expected integer constant", MD);
5607}
5608
5609/// verifyInstruction - Verify that an instruction is well formed.
5610///
5611void Verifier::visitInstruction(Instruction &I) {
5612 BasicBlock *BB = I.getParent();
5613 Check(BB, "Instruction not embedded in basic block!", &I);
5614
5615 if (!isa<PHINode>(I)) { // Check that non-phi nodes are not self referential
5616 for (User *U : I.users()) {
5617 Check(U != (User *)&I || !DT.isReachableFromEntry(BB),
5618 "Only PHI nodes may reference their own value!", &I);
5619 }
5620 }
5621
5622 // Check that void typed values don't have names
5623 Check(!I.getType()->isVoidTy() || !I.hasName(),
5624 "Instruction has a name, but provides a void value!", &I);
5625
5626 // Check that the return value of the instruction is either void or a legal
5627 // value type.
5628 Check(I.getType()->isVoidTy() || I.getType()->isFirstClassType(),
5629 "Instruction returns a non-scalar type!", &I);
5630
5631 // Check that the instruction doesn't produce metadata. Calls are already
5632 // checked against the callee type.
5633 Check(!I.getType()->isMetadataTy() || isa<CallInst>(I) || isa<InvokeInst>(I),
5634 "Invalid use of metadata!", &I);
5635
5636 // Check that all uses of the instruction, if they are instructions
5637 // themselves, actually have parent basic blocks. If the use is not an
5638 // instruction, it is an error!
5639 for (Use &U : I.uses()) {
5640 if (Instruction *Used = dyn_cast<Instruction>(U.getUser()))
5641 Check(Used->getParent() != nullptr,
5642 "Instruction referencing"
5643 " instruction not embedded in a basic block!",
5644 &I, Used);
5645 else {
5646 CheckFailed("Use of instruction is not an instruction!", U);
5647 return;
5648 }
5649 }
5650
5651 // Get a pointer to the call base of the instruction if it is some form of
5652 // call.
5653 const CallBase *CBI = dyn_cast<CallBase>(&I);
5654
5655 for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i) {
5656 Check(I.getOperand(i) != nullptr, "Instruction has null operand!", &I);
5657
5658 // Check to make sure that only first-class-values are operands to
5659 // instructions.
5660 if (!I.getOperand(i)->getType()->isFirstClassType()) {
5661 Check(false, "Instruction operands must be first-class values!", &I);
5662 }
5663
5664 if (Function *F = dyn_cast<Function>(I.getOperand(i))) {
5665 // This code checks whether the function is used as the operand of a
5666 // clang_arc_attachedcall operand bundle.
5667 auto IsAttachedCallOperand = [](Function *F, const CallBase *CBI,
5668 int Idx) {
5669 return CBI && CBI->isOperandBundleOfType(
5671 };
5672
5673 // Check to make sure that the "address of" an intrinsic function is never
5674 // taken. Ignore cases where the address of the intrinsic function is used
5675 // as the argument of operand bundle "clang.arc.attachedcall" as those
5676 // cases are handled in verifyAttachedCallBundle.
5677 Check((!F->isIntrinsic() ||
5678 (CBI && &CBI->getCalledOperandUse() == &I.getOperandUse(i)) ||
5679 IsAttachedCallOperand(F, CBI, i)),
5680 "Cannot take the address of an intrinsic!", &I);
5681 Check(!F->isIntrinsic() || isa<CallInst>(I) || isa<CallBrInst>(I) ||
5682 F->getIntrinsicID() == Intrinsic::donothing ||
5683 F->getIntrinsicID() == Intrinsic::seh_try_begin ||
5684 F->getIntrinsicID() == Intrinsic::seh_try_end ||
5685 F->getIntrinsicID() == Intrinsic::seh_scope_begin ||
5686 F->getIntrinsicID() == Intrinsic::seh_scope_end ||
5687 F->getIntrinsicID() == Intrinsic::coro_resume ||
5688 F->getIntrinsicID() == Intrinsic::coro_destroy ||
5689 F->getIntrinsicID() == Intrinsic::coro_await_suspend_void ||
5690 F->getIntrinsicID() == Intrinsic::coro_await_suspend_bool ||
5691 F->getIntrinsicID() == Intrinsic::coro_await_suspend_handle ||
5692 F->getIntrinsicID() ==
5693 Intrinsic::experimental_patchpoint_void ||
5694 F->getIntrinsicID() == Intrinsic::experimental_patchpoint ||
5695 F->getIntrinsicID() == Intrinsic::fake_use ||
5696 F->getIntrinsicID() == Intrinsic::experimental_gc_statepoint ||
5697 F->getIntrinsicID() == Intrinsic::wasm_throw ||
5698 F->getIntrinsicID() == Intrinsic::wasm_rethrow ||
5699 IsAttachedCallOperand(F, CBI, i),
5700 "Cannot invoke an intrinsic other than donothing, patchpoint, "
5701 "statepoint, coro_resume, coro_destroy, clang.arc.attachedcall or "
5702 "wasm.(re)throw",
5703 &I);
5704 Check(F->getParent() == &M, "Referencing function in another module!", &I,
5705 &M, F, F->getParent());
5706 } else if (BasicBlock *OpBB = dyn_cast<BasicBlock>(I.getOperand(i))) {
5707 Check(OpBB->getParent() == BB->getParent(),
5708 "Referring to a basic block in another function!", &I);
5709 } else if (Argument *OpArg = dyn_cast<Argument>(I.getOperand(i))) {
5710 Check(OpArg->getParent() == BB->getParent(),
5711 "Referring to an argument in another function!", &I);
5712 } else if (GlobalValue *GV = dyn_cast<GlobalValue>(I.getOperand(i))) {
5713 Check(GV->getParent() == &M, "Referencing global in another module!", &I,
5714 &M, GV, GV->getParent());
5715 } else if (Instruction *OpInst = dyn_cast<Instruction>(I.getOperand(i))) {
5716 Check(OpInst->getFunction() == BB->getParent(),
5717 "Referring to an instruction in another function!", &I);
5718 verifyDominatesUse(I, i);
5719 } else if (isa<InlineAsm>(I.getOperand(i))) {
5720 Check(CBI && &CBI->getCalledOperandUse() == &I.getOperandUse(i),
5721 "Cannot take the address of an inline asm!", &I);
5722 } else if (auto *C = dyn_cast<Constant>(I.getOperand(i))) {
5723 visitConstantExprsRecursively(C);
5724 }
5725 }
5726
5727 if (MDNode *MD = I.getMetadata(LLVMContext::MD_fpmath)) {
5728 Check(I.getType()->isFPOrFPVectorTy(),
5729 "fpmath requires a floating point result!", &I);
5730 Check(MD->getNumOperands() == 1, "fpmath takes one operand!", &I);
5731 if (ConstantFP *CFP0 =
5733 const APFloat &Accuracy = CFP0->getValueAPF();
5734 Check(&Accuracy.getSemantics() == &APFloat::IEEEsingle(),
5735 "fpmath accuracy must have float type", &I);
5736 Check(Accuracy.isFiniteNonZero() && !Accuracy.isNegative(),
5737 "fpmath accuracy not a positive number!", &I);
5738 } else {
5739 Check(false, "invalid fpmath accuracy!", &I);
5740 }
5741 }
5742
5743 if (MDNode *Range = I.getMetadata(LLVMContext::MD_range)) {
5745 "Ranges are only for loads, calls and invokes!", &I);
5746 visitRangeMetadata(I, Range, I.getType());
5747 }
5748
5749 if (MDNode *MD = I.getMetadata(LLVMContext::MD_nofpclass)) {
5750 Check(isa<LoadInst>(I), "nofpclass is only for loads", &I);
5751 visitNoFPClassMetadata(I, MD, I.getType());
5752 }
5753
5754 if (MDNode *Range = I.getMetadata(LLVMContext::MD_noalias_addrspace)) {
5757 "noalias.addrspace are only for memory operations!", &I);
5758 visitNoaliasAddrspaceMetadata(I, Range, I.getType());
5759 }
5760
5761 if (I.hasMetadata(LLVMContext::MD_invariant_group)) {
5763 "invariant.group metadata is only for loads and stores", &I);
5764 }
5765
5766 if (MDNode *MD = I.getMetadata(LLVMContext::MD_nonnull)) {
5767 Check(I.getType()->isPointerTy(), "nonnull applies only to pointer types",
5768 &I);
5770 "nonnull applies only to load instructions, use attributes"
5771 " for calls or invokes",
5772 &I);
5773 Check(MD->getNumOperands() == 0, "nonnull metadata must be empty", &I);
5774 }
5775
5776 if (MDNode *MD = I.getMetadata(LLVMContext::MD_dereferenceable))
5777 visitDereferenceableMetadata(I, MD);
5778
5779 if (MDNode *MD = I.getMetadata(LLVMContext::MD_dereferenceable_or_null))
5780 visitDereferenceableMetadata(I, MD);
5781
5782 if (MDNode *MD = I.getMetadata(LLVMContext::MD_nofree))
5783 visitNofreeMetadata(I, MD);
5784
5785 if (MDNode *TBAA = I.getMetadata(LLVMContext::MD_tbaa))
5786 TBAAVerifyHelper.visitTBAAMetadata(&I, TBAA);
5787
5788 if (MDNode *MD = I.getMetadata(LLVMContext::MD_noalias))
5789 visitAliasScopeListMetadata(MD);
5790 if (MDNode *MD = I.getMetadata(LLVMContext::MD_alias_scope))
5791 visitAliasScopeListMetadata(MD);
5792
5793 if (MDNode *MD = I.getMetadata(LLVMContext::MD_access_group))
5794 visitAccessGroupMetadata(MD);
5795
5796 if (MDNode *AlignMD = I.getMetadata(LLVMContext::MD_align)) {
5797 Check(I.getType()->isPointerTy(), "align applies only to pointer types",
5798 &I);
5800 "align applies only to load instructions, "
5801 "use attributes for calls or invokes",
5802 &I);
5803 Check(AlignMD->getNumOperands() == 1, "align takes one operand!", &I);
5804 ConstantInt *CI = mdconst::dyn_extract<ConstantInt>(AlignMD->getOperand(0));
5805 Check(CI && CI->getType()->isIntegerTy(64),
5806 "align metadata value must be an i64!", &I);
5807 uint64_t Align = CI->getZExtValue();
5808 Check(isPowerOf2_64(Align), "align metadata value must be a power of 2!",
5809 &I);
5810 Check(Align <= Value::MaximumAlignment,
5811 "alignment is larger that implementation defined limit", &I);
5812 }
5813
5814 if (MDNode *MD = I.getMetadata(LLVMContext::MD_prof))
5815 visitProfMetadata(I, MD);
5816
5817 if (MDNode *MD = I.getMetadata(LLVMContext::MD_memprof))
5818 visitMemProfMetadata(I, MD);
5819
5820 if (MDNode *MD = I.getMetadata(LLVMContext::MD_callsite))
5821 visitCallsiteMetadata(I, MD);
5822
5823 if (MDNode *MD = I.getMetadata(LLVMContext::MD_callee_type))
5824 visitCalleeTypeMetadata(I, MD);
5825
5826 if (MDNode *MD = I.getMetadata(LLVMContext::MD_DIAssignID))
5827 visitDIAssignIDMetadata(I, MD);
5828
5829 if (MDNode *MMRA = I.getMetadata(LLVMContext::MD_mmra))
5830 visitMMRAMetadata(I, MMRA);
5831
5832 if (MDNode *Annotation = I.getMetadata(LLVMContext::MD_annotation))
5833 visitAnnotationMetadata(Annotation);
5834
5835 if (MDNode *Captures = I.getMetadata(LLVMContext::MD_captures))
5836 visitCapturesMetadata(I, Captures);
5837
5838 if (MDNode *MD = I.getMetadata(LLVMContext::MD_alloc_token))
5839 visitAllocTokenMetadata(I, MD);
5840
5841 if (MDNode *N = I.getDebugLoc().getAsMDNode()) {
5842 CheckDI(isa<DILocation>(N), "invalid !dbg metadata attachment", &I, N);
5843 visitMDNode(*N, AreDebugLocsAllowed::Yes);
5844
5845 if (auto *DL = dyn_cast<DILocation>(N)) {
5846 if (DL->getAtomGroup()) {
5847 CheckDI(DL->getScope()->getSubprogram()->getKeyInstructionsEnabled(),
5848 "DbgLoc uses atomGroup but DISubprogram doesn't have Key "
5849 "Instructions enabled",
5850 DL, DL->getScope()->getSubprogram());
5851 }
5852 }
5853 }
5854
5856 I.getAllMetadata(MDs);
5857 for (auto Attachment : MDs) {
5858 unsigned Kind = Attachment.first;
5859 auto AllowLocs =
5860 (Kind == LLVMContext::MD_dbg || Kind == LLVMContext::MD_loop)
5861 ? AreDebugLocsAllowed::Yes
5862 : AreDebugLocsAllowed::No;
5863 visitMDNode(*Attachment.second, AllowLocs);
5864 }
5865
5866 InstsInThisBlock.insert(&I);
5867}
5868
5869/// Allow intrinsics to be verified in different ways.
5870void Verifier::visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call) {
5872 Check(IF->isDeclaration(), "Intrinsic functions should never be defined!",
5873 IF);
5874
5875 // Verify that the intrinsic prototype lines up with what the .td files
5876 // describe.
5877 FunctionType *IFTy = IF->getFunctionType();
5878 bool IsVarArg = IFTy->isVarArg();
5879
5883
5884 // Walk the descriptors to extract overloaded types.
5889 "Intrinsic has incorrect return type!", IF);
5891 "Intrinsic has incorrect argument type!", IF);
5892
5893 // Verify if the intrinsic call matches the vararg property.
5894 if (IsVarArg)
5896 "Intrinsic was not defined with variable arguments!", IF);
5897 else
5899 "Callsite was not defined with variable arguments!", IF);
5900
5901 // All descriptors should be absorbed by now.
5902 Check(TableRef.empty(), "Intrinsic has too few arguments!", IF);
5903
5904 // Now that we have the intrinsic ID and the actual argument types (and we
5905 // know they are legal for the intrinsic!) get the intrinsic name through the
5906 // usual means. This allows us to verify the mangling of argument types into
5907 // the name.
5908 const std::string ExpectedName =
5909 Intrinsic::getName(ID, ArgTys, IF->getParent(), IFTy);
5910 Check(ExpectedName == IF->getName(),
5911 "Intrinsic name not mangled correctly for type arguments! "
5912 "Should be: " +
5913 ExpectedName,
5914 IF);
5915
5916 // If the intrinsic takes MDNode arguments, verify that they are either global
5917 // or are local to *this* function.
5918 for (Value *V : Call.args()) {
5919 if (auto *MD = dyn_cast<MetadataAsValue>(V))
5920 visitMetadataAsValue(*MD, Call.getCaller());
5921 if (auto *Const = dyn_cast<Constant>(V))
5922 Check(!Const->getType()->isX86_AMXTy(),
5923 "const x86_amx is not allowed in argument!");
5924 }
5925
5926 switch (ID) {
5927 default:
5928 break;
5929 case Intrinsic::assume: {
5930 if (Call.hasOperandBundles()) {
5932 Check(Cond && Cond->isOne(),
5933 "assume with operand bundles must have i1 true condition", Call);
5934 }
5935 for (auto &Elem : Call.bundle_op_infos()) {
5936 unsigned ArgCount = Elem.End - Elem.Begin;
5937 // Separate storage assumptions are special insofar as they're the only
5938 // operand bundles allowed on assumes that aren't parameter attributes.
5939 if (Elem.Tag->getKey() == "separate_storage") {
5940 Check(ArgCount == 2,
5941 "separate_storage assumptions should have 2 arguments", Call);
5942 Check(Call.getOperand(Elem.Begin)->getType()->isPointerTy() &&
5943 Call.getOperand(Elem.Begin + 1)->getType()->isPointerTy(),
5944 "arguments to separate_storage assumptions should be pointers",
5945 Call);
5946 continue;
5947 }
5948 Check(Elem.Tag->getKey() == "ignore" ||
5949 Attribute::isExistingAttribute(Elem.Tag->getKey()),
5950 "tags must be valid attribute names", Call);
5951 Attribute::AttrKind Kind =
5952 Attribute::getAttrKindFromName(Elem.Tag->getKey());
5953 if (Kind == Attribute::Alignment) {
5954 Check(ArgCount <= 3 && ArgCount >= 2,
5955 "alignment assumptions should have 2 or 3 arguments", Call);
5956 Check(Call.getOperand(Elem.Begin)->getType()->isPointerTy(),
5957 "first argument should be a pointer", Call);
5958 Check(Call.getOperand(Elem.Begin + 1)->getType()->isIntegerTy(),
5959 "second argument should be an integer", Call);
5960 if (ArgCount == 3)
5961 Check(Call.getOperand(Elem.Begin + 2)->getType()->isIntegerTy(),
5962 "third argument should be an integer if present", Call);
5963 continue;
5964 }
5965 if (Kind == Attribute::Dereferenceable) {
5966 Check(ArgCount == 2,
5967 "dereferenceable assumptions should have 2 arguments", Call);
5968 Check(Call.getOperand(Elem.Begin)->getType()->isPointerTy(),
5969 "first argument should be a pointer", Call);
5970 Check(Call.getOperand(Elem.Begin + 1)->getType()->isIntegerTy(),
5971 "second argument should be an integer", Call);
5972 continue;
5973 }
5974 Check(ArgCount <= 2, "too many arguments", Call);
5975 if (Kind == Attribute::None)
5976 break;
5977 if (Attribute::isIntAttrKind(Kind)) {
5978 Check(ArgCount == 2, "this attribute should have 2 arguments", Call);
5979 Check(isa<ConstantInt>(Call.getOperand(Elem.Begin + 1)),
5980 "the second argument should be a constant integral value", Call);
5981 } else if (Attribute::canUseAsParamAttr(Kind)) {
5982 Check((ArgCount) == 1, "this attribute should have one argument", Call);
5983 } else if (Attribute::canUseAsFnAttr(Kind)) {
5984 Check((ArgCount) == 0, "this attribute has no argument", Call);
5985 }
5986 }
5987 break;
5988 }
5989 case Intrinsic::ucmp:
5990 case Intrinsic::scmp: {
5991 Type *SrcTy = Call.getOperand(0)->getType();
5992 Type *DestTy = Call.getType();
5993
5994 Check(DestTy->getScalarSizeInBits() >= 2,
5995 "result type must be at least 2 bits wide", Call);
5996
5997 bool IsDestTypeVector = DestTy->isVectorTy();
5998 Check(SrcTy->isVectorTy() == IsDestTypeVector,
5999 "ucmp/scmp argument and result types must both be either vector or "
6000 "scalar types",
6001 Call);
6002 if (IsDestTypeVector) {
6003 auto SrcVecLen = cast<VectorType>(SrcTy)->getElementCount();
6004 auto DestVecLen = cast<VectorType>(DestTy)->getElementCount();
6005 Check(SrcVecLen == DestVecLen,
6006 "return type and arguments must have the same number of "
6007 "elements",
6008 Call);
6009 }
6010 break;
6011 }
6012 case Intrinsic::coro_id: {
6013 auto *InfoArg = Call.getArgOperand(3)->stripPointerCasts();
6014 if (isa<ConstantPointerNull>(InfoArg))
6015 break;
6016 auto *GV = dyn_cast<GlobalVariable>(InfoArg);
6017 Check(GV && GV->isConstant() && GV->hasDefinitiveInitializer(),
6018 "info argument of llvm.coro.id must refer to an initialized "
6019 "constant");
6020 Constant *Init = GV->getInitializer();
6022 "info argument of llvm.coro.id must refer to either a struct or "
6023 "an array");
6024 break;
6025 }
6026 case Intrinsic::is_fpclass: {
6027 const ConstantInt *TestMask = cast<ConstantInt>(Call.getOperand(1));
6028 Check((TestMask->getZExtValue() & ~static_cast<unsigned>(fcAllFlags)) == 0,
6029 "unsupported bits for llvm.is.fpclass test mask");
6030 break;
6031 }
6032 case Intrinsic::fptrunc_round: {
6033 // Check the rounding mode
6034 Metadata *MD = nullptr;
6036 if (MAV)
6037 MD = MAV->getMetadata();
6038
6039 Check(MD != nullptr, "missing rounding mode argument", Call);
6040
6041 Check(isa<MDString>(MD),
6042 ("invalid value for llvm.fptrunc.round metadata operand"
6043 " (the operand should be a string)"),
6044 MD);
6045
6046 std::optional<RoundingMode> RoundMode =
6047 convertStrToRoundingMode(cast<MDString>(MD)->getString());
6048 Check(RoundMode && *RoundMode != RoundingMode::Dynamic,
6049 "unsupported rounding mode argument", Call);
6050 break;
6051 }
6052 case Intrinsic::convert_to_arbitrary_fp: {
6053 // Check that vector element counts are consistent.
6054 Type *ValueTy = Call.getArgOperand(0)->getType();
6055 Type *IntTy = Call.getType();
6056
6057 if (auto *ValueVecTy = dyn_cast<VectorType>(ValueTy)) {
6058 auto *IntVecTy = dyn_cast<VectorType>(IntTy);
6059 Check(IntVecTy,
6060 "if floating-point operand is a vector, integer operand must also "
6061 "be a vector",
6062 Call);
6063 Check(ValueVecTy->getElementCount() == IntVecTy->getElementCount(),
6064 "floating-point and integer vector operands must have the same "
6065 "element count",
6066 Call);
6067 }
6068
6069 // Check interpretation metadata (argoperand 1).
6070 auto *InterpMAV = dyn_cast<MetadataAsValue>(Call.getArgOperand(1));
6071 Check(InterpMAV, "missing interpretation metadata operand", Call);
6072 auto *InterpStr = dyn_cast<MDString>(InterpMAV->getMetadata());
6073 Check(InterpStr, "interpretation metadata operand must be a string", Call);
6074 StringRef Interp = InterpStr->getString();
6075
6076 Check(!Interp.empty(), "interpretation metadata string must not be empty",
6077 Call);
6078
6079 // Valid interpretation strings: mini-float format names.
6081 "unsupported interpretation metadata string", Call);
6082
6083 // Check rounding mode metadata (argoperand 2).
6084 auto *RoundingMAV = dyn_cast<MetadataAsValue>(Call.getArgOperand(2));
6085 Check(RoundingMAV, "missing rounding mode metadata operand", Call);
6086 auto *RoundingStr = dyn_cast<MDString>(RoundingMAV->getMetadata());
6087 Check(RoundingStr, "rounding mode metadata operand must be a string", Call);
6088
6089 std::optional<RoundingMode> RM =
6090 convertStrToRoundingMode(RoundingStr->getString());
6091 Check(RM && *RM != RoundingMode::Dynamic,
6092 "unsupported rounding mode argument", Call);
6093 break;
6094 }
6095 case Intrinsic::convert_from_arbitrary_fp: {
6096 // Check that vector element counts are consistent.
6097 Type *IntTy = Call.getArgOperand(0)->getType();
6098 Type *ValueTy = Call.getType();
6099
6100 if (auto *ValueVecTy = dyn_cast<VectorType>(ValueTy)) {
6101 auto *IntVecTy = dyn_cast<VectorType>(IntTy);
6102 Check(IntVecTy,
6103 "if floating-point operand is a vector, integer operand must also "
6104 "be a vector",
6105 Call);
6106 Check(ValueVecTy->getElementCount() == IntVecTy->getElementCount(),
6107 "floating-point and integer vector operands must have the same "
6108 "element count",
6109 Call);
6110 }
6111
6112 // Check interpretation metadata (argoperand 1).
6113 auto *InterpMAV = dyn_cast<MetadataAsValue>(Call.getArgOperand(1));
6114 Check(InterpMAV, "missing interpretation metadata operand", Call);
6115 auto *InterpStr = dyn_cast<MDString>(InterpMAV->getMetadata());
6116 Check(InterpStr, "interpretation metadata operand must be a string", Call);
6117 StringRef Interp = InterpStr->getString();
6118
6119 Check(!Interp.empty(), "interpretation metadata string must not be empty",
6120 Call);
6121
6122 // Valid interpretation strings: mini-float format names.
6124 "unsupported interpretation metadata string", Call);
6125 break;
6126 }
6127#define BEGIN_REGISTER_VP_INTRINSIC(VPID, ...) case Intrinsic::VPID:
6128#include "llvm/IR/VPIntrinsics.def"
6129#undef BEGIN_REGISTER_VP_INTRINSIC
6130 visitVPIntrinsic(cast<VPIntrinsic>(Call));
6131 break;
6132#define INSTRUCTION(NAME, NARGS, ROUND_MODE, INTRINSIC) \
6133 case Intrinsic::INTRINSIC:
6134#include "llvm/IR/ConstrainedOps.def"
6135#undef INSTRUCTION
6136 visitConstrainedFPIntrinsic(cast<ConstrainedFPIntrinsic>(Call));
6137 break;
6138 case Intrinsic::dbg_declare: // llvm.dbg.declare
6139 case Intrinsic::dbg_value: // llvm.dbg.value
6140 case Intrinsic::dbg_assign: // llvm.dbg.assign
6141 case Intrinsic::dbg_label: // llvm.dbg.label
6142 // We no longer interpret debug intrinsics (the old variable-location
6143 // design). They're meaningless as far as LLVM is concerned we could make
6144 // it an error for them to appear, but it's possible we'll have users
6145 // converting back to intrinsics for the forseeable future (such as DXIL),
6146 // so tolerate their existance.
6147 break;
6148 case Intrinsic::memcpy:
6149 case Intrinsic::memcpy_inline:
6150 case Intrinsic::memmove:
6151 case Intrinsic::memset:
6152 case Intrinsic::memset_inline:
6153 break;
6154 case Intrinsic::experimental_memset_pattern: {
6155 const auto Memset = cast<MemSetPatternInst>(&Call);
6156 Check(Memset->getValue()->getType()->isSized(),
6157 "unsized types cannot be used as memset patterns", Call);
6158 break;
6159 }
6160 case Intrinsic::memcpy_element_unordered_atomic:
6161 case Intrinsic::memmove_element_unordered_atomic:
6162 case Intrinsic::memset_element_unordered_atomic: {
6163 const auto *AMI = cast<AnyMemIntrinsic>(&Call);
6164
6165 ConstantInt *ElementSizeCI =
6166 cast<ConstantInt>(AMI->getRawElementSizeInBytes());
6167 const APInt &ElementSizeVal = ElementSizeCI->getValue();
6168 Check(ElementSizeVal.isPowerOf2(),
6169 "element size of the element-wise atomic memory intrinsic "
6170 "must be a power of 2",
6171 Call);
6172
6173 auto IsValidAlignment = [&](MaybeAlign Alignment) {
6174 return Alignment && ElementSizeVal.ule(Alignment->value());
6175 };
6176 Check(IsValidAlignment(AMI->getDestAlign()),
6177 "incorrect alignment of the destination argument", Call);
6178 if (const auto *AMT = dyn_cast<AnyMemTransferInst>(AMI)) {
6179 Check(IsValidAlignment(AMT->getSourceAlign()),
6180 "incorrect alignment of the source argument", Call);
6181 }
6182 break;
6183 }
6184 case Intrinsic::call_preallocated_setup: {
6185 auto *NumArgs = cast<ConstantInt>(Call.getArgOperand(0));
6186 bool FoundCall = false;
6187 for (User *U : Call.users()) {
6188 auto *UseCall = dyn_cast<CallBase>(U);
6189 Check(UseCall != nullptr,
6190 "Uses of llvm.call.preallocated.setup must be calls");
6191 Intrinsic::ID IID = UseCall->getIntrinsicID();
6192 if (IID == Intrinsic::call_preallocated_arg) {
6193 auto *AllocArgIndex = dyn_cast<ConstantInt>(UseCall->getArgOperand(1));
6194 Check(AllocArgIndex != nullptr,
6195 "llvm.call.preallocated.alloc arg index must be a constant");
6196 auto AllocArgIndexInt = AllocArgIndex->getValue();
6197 Check(AllocArgIndexInt.sge(0) &&
6198 AllocArgIndexInt.slt(NumArgs->getValue()),
6199 "llvm.call.preallocated.alloc arg index must be between 0 and "
6200 "corresponding "
6201 "llvm.call.preallocated.setup's argument count");
6202 } else if (IID == Intrinsic::call_preallocated_teardown) {
6203 // nothing to do
6204 } else {
6205 Check(!FoundCall, "Can have at most one call corresponding to a "
6206 "llvm.call.preallocated.setup");
6207 FoundCall = true;
6208 size_t NumPreallocatedArgs = 0;
6209 for (unsigned i = 0; i < UseCall->arg_size(); i++) {
6210 if (UseCall->paramHasAttr(i, Attribute::Preallocated)) {
6211 ++NumPreallocatedArgs;
6212 }
6213 }
6214 Check(NumPreallocatedArgs != 0,
6215 "cannot use preallocated intrinsics on a call without "
6216 "preallocated arguments");
6217 Check(NumArgs->equalsInt(NumPreallocatedArgs),
6218 "llvm.call.preallocated.setup arg size must be equal to number "
6219 "of preallocated arguments "
6220 "at call site",
6221 Call, *UseCall);
6222 // getOperandBundle() cannot be called if more than one of the operand
6223 // bundle exists. There is already a check elsewhere for this, so skip
6224 // here if we see more than one.
6225 if (UseCall->countOperandBundlesOfType(LLVMContext::OB_preallocated) >
6226 1) {
6227 return;
6228 }
6229 auto PreallocatedBundle =
6230 UseCall->getOperandBundle(LLVMContext::OB_preallocated);
6231 Check(PreallocatedBundle,
6232 "Use of llvm.call.preallocated.setup outside intrinsics "
6233 "must be in \"preallocated\" operand bundle");
6234 Check(PreallocatedBundle->Inputs.front().get() == &Call,
6235 "preallocated bundle must have token from corresponding "
6236 "llvm.call.preallocated.setup");
6237 }
6238 }
6239 break;
6240 }
6241 case Intrinsic::call_preallocated_arg: {
6242 auto *Token = dyn_cast<CallBase>(Call.getArgOperand(0));
6243 Check(Token &&
6244 Token->getIntrinsicID() == Intrinsic::call_preallocated_setup,
6245 "llvm.call.preallocated.arg token argument must be a "
6246 "llvm.call.preallocated.setup");
6247 Check(Call.hasFnAttr(Attribute::Preallocated),
6248 "llvm.call.preallocated.arg must be called with a \"preallocated\" "
6249 "call site attribute");
6250 break;
6251 }
6252 case Intrinsic::call_preallocated_teardown: {
6253 auto *Token = dyn_cast<CallBase>(Call.getArgOperand(0));
6254 Check(Token &&
6255 Token->getIntrinsicID() == Intrinsic::call_preallocated_setup,
6256 "llvm.call.preallocated.teardown token argument must be a "
6257 "llvm.call.preallocated.setup");
6258 break;
6259 }
6260 case Intrinsic::gcroot:
6261 case Intrinsic::gcwrite:
6262 case Intrinsic::gcread:
6263 if (ID == Intrinsic::gcroot) {
6264 AllocaInst *AI =
6266 Check(AI, "llvm.gcroot parameter #1 must be an alloca.", Call);
6268 "llvm.gcroot parameter #2 must be a constant.", Call);
6269 if (!AI->getAllocatedType()->isPointerTy()) {
6271 "llvm.gcroot parameter #1 must either be a pointer alloca, "
6272 "or argument #2 must be a non-null constant.",
6273 Call);
6274 }
6275 }
6276
6277 Check(Call.getParent()->getParent()->hasGC(),
6278 "Enclosing function does not use GC.", Call);
6279 break;
6280 case Intrinsic::init_trampoline:
6282 "llvm.init_trampoline parameter #2 must resolve to a function.",
6283 Call);
6284 break;
6285 case Intrinsic::prefetch:
6286 Check(cast<ConstantInt>(Call.getArgOperand(1))->getZExtValue() < 2,
6287 "rw argument to llvm.prefetch must be 0-1", Call);
6288 Check(cast<ConstantInt>(Call.getArgOperand(2))->getZExtValue() < 4,
6289 "locality argument to llvm.prefetch must be 0-3", Call);
6290 Check(cast<ConstantInt>(Call.getArgOperand(3))->getZExtValue() < 2,
6291 "cache type argument to llvm.prefetch must be 0-1", Call);
6292 break;
6293 case Intrinsic::reloc_none: {
6295 cast<MetadataAsValue>(Call.getArgOperand(0))->getMetadata()),
6296 "llvm.reloc.none argument must be a metadata string", &Call);
6297 break;
6298 }
6299 case Intrinsic::stackprotector:
6301 "llvm.stackprotector parameter #2 must resolve to an alloca.", Call);
6302 break;
6303 case Intrinsic::localescape: {
6304 BasicBlock *BB = Call.getParent();
6305 Check(BB->isEntryBlock(), "llvm.localescape used outside of entry block",
6306 Call);
6307 Check(!SawFrameEscape, "multiple calls to llvm.localescape in one function",
6308 Call);
6309 for (Value *Arg : Call.args()) {
6310 if (isa<ConstantPointerNull>(Arg))
6311 continue; // Null values are allowed as placeholders.
6312 auto *AI = dyn_cast<AllocaInst>(Arg->stripPointerCasts());
6313 Check(AI && AI->isStaticAlloca(),
6314 "llvm.localescape only accepts static allocas", Call);
6315 }
6316 FrameEscapeInfo[BB->getParent()].first = Call.arg_size();
6317 SawFrameEscape = true;
6318 break;
6319 }
6320 case Intrinsic::localrecover: {
6322 Function *Fn = dyn_cast<Function>(FnArg);
6323 Check(Fn && !Fn->isDeclaration(),
6324 "llvm.localrecover first "
6325 "argument must be function defined in this module",
6326 Call);
6327 auto *IdxArg = cast<ConstantInt>(Call.getArgOperand(2));
6328 auto &Entry = FrameEscapeInfo[Fn];
6329 Entry.second = unsigned(
6330 std::max(uint64_t(Entry.second), IdxArg->getLimitedValue(~0U) + 1));
6331 break;
6332 }
6333
6334 case Intrinsic::experimental_gc_statepoint:
6335 if (auto *CI = dyn_cast<CallInst>(&Call))
6336 Check(!CI->isInlineAsm(),
6337 "gc.statepoint support for inline assembly unimplemented", CI);
6338 Check(Call.getParent()->getParent()->hasGC(),
6339 "Enclosing function does not use GC.", Call);
6340
6341 verifyStatepoint(Call);
6342 break;
6343 case Intrinsic::experimental_gc_result: {
6344 Check(Call.getParent()->getParent()->hasGC(),
6345 "Enclosing function does not use GC.", Call);
6346
6347 auto *Statepoint = Call.getArgOperand(0);
6348 if (isa<UndefValue>(Statepoint))
6349 break;
6350
6351 // Are we tied to a statepoint properly?
6352 const auto *StatepointCall = dyn_cast<CallBase>(Statepoint);
6353 Check(StatepointCall && StatepointCall->getIntrinsicID() ==
6354 Intrinsic::experimental_gc_statepoint,
6355 "gc.result operand #1 must be from a statepoint", Call,
6356 Call.getArgOperand(0));
6357
6358 // Check that result type matches wrapped callee.
6359 auto *TargetFuncType =
6360 cast<FunctionType>(StatepointCall->getParamElementType(2));
6361 Check(Call.getType() == TargetFuncType->getReturnType(),
6362 "gc.result result type does not match wrapped callee", Call);
6363 break;
6364 }
6365 case Intrinsic::experimental_gc_relocate: {
6366 Check(Call.arg_size() == 3, "wrong number of arguments", Call);
6367
6369 "gc.relocate must return a pointer or a vector of pointers", Call);
6370
6371 // Check that this relocate is correctly tied to the statepoint
6372
6373 // This is case for relocate on the unwinding path of an invoke statepoint
6374 if (LandingPadInst *LandingPad =
6376
6377 const BasicBlock *InvokeBB =
6378 LandingPad->getParent()->getUniquePredecessor();
6379
6380 // Landingpad relocates should have only one predecessor with invoke
6381 // statepoint terminator
6382 Check(InvokeBB, "safepoints should have unique landingpads",
6383 LandingPad->getParent());
6384 Check(InvokeBB->getTerminator(), "safepoint block should be well formed",
6385 InvokeBB);
6387 "gc relocate should be linked to a statepoint", InvokeBB);
6388 } else {
6389 // In all other cases relocate should be tied to the statepoint directly.
6390 // This covers relocates on a normal return path of invoke statepoint and
6391 // relocates of a call statepoint.
6392 auto *Token = Call.getArgOperand(0);
6394 "gc relocate is incorrectly tied to the statepoint", Call, Token);
6395 }
6396
6397 // Verify rest of the relocate arguments.
6398 const Value &StatepointCall = *cast<GCRelocateInst>(Call).getStatepoint();
6399
6400 // Both the base and derived must be piped through the safepoint.
6403 "gc.relocate operand #2 must be integer offset", Call);
6404
6405 Value *Derived = Call.getArgOperand(2);
6406 Check(isa<ConstantInt>(Derived),
6407 "gc.relocate operand #3 must be integer offset", Call);
6408
6409 const uint64_t BaseIndex = cast<ConstantInt>(Base)->getZExtValue();
6410 const uint64_t DerivedIndex = cast<ConstantInt>(Derived)->getZExtValue();
6411
6412 // Check the bounds
6413 if (isa<UndefValue>(StatepointCall))
6414 break;
6415 if (auto Opt = cast<GCStatepointInst>(StatepointCall)
6416 .getOperandBundle(LLVMContext::OB_gc_live)) {
6417 Check(BaseIndex < Opt->Inputs.size(),
6418 "gc.relocate: statepoint base index out of bounds", Call);
6419 Check(DerivedIndex < Opt->Inputs.size(),
6420 "gc.relocate: statepoint derived index out of bounds", Call);
6421 }
6422
6423 // Relocated value must be either a pointer type or vector-of-pointer type,
6424 // but gc_relocate does not need to return the same pointer type as the
6425 // relocated pointer. It can be casted to the correct type later if it's
6426 // desired. However, they must have the same address space and 'vectorness'
6427 GCRelocateInst &Relocate = cast<GCRelocateInst>(Call);
6428 auto *ResultType = Call.getType();
6429 auto *DerivedType = Relocate.getDerivedPtr()->getType();
6430 auto *BaseType = Relocate.getBasePtr()->getType();
6431
6432 Check(BaseType->isPtrOrPtrVectorTy(),
6433 "gc.relocate: relocated value must be a pointer", Call);
6434 Check(DerivedType->isPtrOrPtrVectorTy(),
6435 "gc.relocate: relocated value must be a pointer", Call);
6436
6437 Check(ResultType->isVectorTy() == DerivedType->isVectorTy(),
6438 "gc.relocate: vector relocates to vector and pointer to pointer",
6439 Call);
6440 Check(
6441 ResultType->getPointerAddressSpace() ==
6442 DerivedType->getPointerAddressSpace(),
6443 "gc.relocate: relocating a pointer shouldn't change its address space",
6444 Call);
6445
6446 auto GC = llvm::getGCStrategy(Relocate.getFunction()->getGC());
6447 Check(GC, "gc.relocate: calling function must have GCStrategy",
6448 Call.getFunction());
6449 if (GC) {
6450 auto isGCPtr = [&GC](Type *PTy) {
6451 return GC->isGCManagedPointer(PTy->getScalarType()).value_or(true);
6452 };
6453 Check(isGCPtr(ResultType), "gc.relocate: must return gc pointer", Call);
6454 Check(isGCPtr(BaseType),
6455 "gc.relocate: relocated value must be a gc pointer", Call);
6456 Check(isGCPtr(DerivedType),
6457 "gc.relocate: relocated value must be a gc pointer", Call);
6458 }
6459 break;
6460 }
6461 case Intrinsic::experimental_patchpoint: {
6462 if (Call.getCallingConv() == CallingConv::AnyReg) {
6464 "patchpoint: invalid return type used with anyregcc", Call);
6465 }
6466 break;
6467 }
6468 case Intrinsic::eh_exceptioncode:
6469 case Intrinsic::eh_exceptionpointer: {
6471 "eh.exceptionpointer argument must be a catchpad", Call);
6472 break;
6473 }
6474 case Intrinsic::get_active_lane_mask: {
6476 "get_active_lane_mask: must return a "
6477 "vector",
6478 Call);
6479 auto *ElemTy = Call.getType()->getScalarType();
6480 Check(ElemTy->isIntegerTy(1),
6481 "get_active_lane_mask: element type is not "
6482 "i1",
6483 Call);
6484 break;
6485 }
6486 case Intrinsic::experimental_get_vector_length: {
6487 ConstantInt *VF = cast<ConstantInt>(Call.getArgOperand(1));
6488 Check(!VF->isNegative() && !VF->isZero(),
6489 "get_vector_length: VF must be positive", Call);
6490 break;
6491 }
6492 case Intrinsic::masked_load: {
6493 Check(Call.getType()->isVectorTy(), "masked_load: must return a vector",
6494 Call);
6495
6497 Value *PassThru = Call.getArgOperand(2);
6498 Check(Mask->getType()->isVectorTy(), "masked_load: mask must be vector",
6499 Call);
6500 Check(PassThru->getType() == Call.getType(),
6501 "masked_load: pass through and return type must match", Call);
6502 Check(cast<VectorType>(Mask->getType())->getElementCount() ==
6503 cast<VectorType>(Call.getType())->getElementCount(),
6504 "masked_load: vector mask must be same length as return", Call);
6505 break;
6506 }
6507 case Intrinsic::masked_store: {
6508 Value *Val = Call.getArgOperand(0);
6510 Check(Mask->getType()->isVectorTy(), "masked_store: mask must be vector",
6511 Call);
6512 Check(cast<VectorType>(Mask->getType())->getElementCount() ==
6513 cast<VectorType>(Val->getType())->getElementCount(),
6514 "masked_store: vector mask must be same length as value", Call);
6515 break;
6516 }
6517
6518 case Intrinsic::experimental_guard: {
6519 Check(isa<CallInst>(Call), "experimental_guard cannot be invoked", Call);
6521 "experimental_guard must have exactly one "
6522 "\"deopt\" operand bundle");
6523 break;
6524 }
6525
6526 case Intrinsic::experimental_deoptimize: {
6527 Check(isa<CallInst>(Call), "experimental_deoptimize cannot be invoked",
6528 Call);
6530 "experimental_deoptimize must have exactly one "
6531 "\"deopt\" operand bundle");
6533 "experimental_deoptimize return type must match caller return type");
6534
6535 if (isa<CallInst>(Call)) {
6537 Check(RI,
6538 "calls to experimental_deoptimize must be followed by a return");
6539
6540 if (!Call.getType()->isVoidTy() && RI)
6541 Check(RI->getReturnValue() == &Call,
6542 "calls to experimental_deoptimize must be followed by a return "
6543 "of the value computed by experimental_deoptimize");
6544 }
6545
6546 break;
6547 }
6548 case Intrinsic::vastart: {
6550 "va_start called in a non-varargs function");
6551 break;
6552 }
6553 case Intrinsic::get_dynamic_area_offset: {
6554 auto *IntTy = dyn_cast<IntegerType>(Call.getType());
6555 Check(IntTy && DL.getPointerSizeInBits(DL.getAllocaAddrSpace()) ==
6556 IntTy->getBitWidth(),
6557 "get_dynamic_area_offset result type must be scalar integer matching "
6558 "alloca address space width",
6559 Call);
6560 break;
6561 }
6562 case Intrinsic::vector_reduce_and:
6563 case Intrinsic::vector_reduce_or:
6564 case Intrinsic::vector_reduce_xor:
6565 case Intrinsic::vector_reduce_add:
6566 case Intrinsic::vector_reduce_mul:
6567 case Intrinsic::vector_reduce_smax:
6568 case Intrinsic::vector_reduce_smin:
6569 case Intrinsic::vector_reduce_umax:
6570 case Intrinsic::vector_reduce_umin: {
6571 Type *ArgTy = Call.getArgOperand(0)->getType();
6572 Check(ArgTy->isIntOrIntVectorTy() && ArgTy->isVectorTy(),
6573 "Intrinsic has incorrect argument type!");
6574 break;
6575 }
6576 case Intrinsic::vector_reduce_fmax:
6577 case Intrinsic::vector_reduce_fmin: {
6578 Type *ArgTy = Call.getArgOperand(0)->getType();
6579 Check(ArgTy->isFPOrFPVectorTy() && ArgTy->isVectorTy(),
6580 "Intrinsic has incorrect argument type!");
6581 break;
6582 }
6583 case Intrinsic::vector_reduce_fadd:
6584 case Intrinsic::vector_reduce_fmul: {
6585 // Unlike the other reductions, the first argument is a start value. The
6586 // second argument is the vector to be reduced.
6587 Type *ArgTy = Call.getArgOperand(1)->getType();
6588 Check(ArgTy->isFPOrFPVectorTy() && ArgTy->isVectorTy(),
6589 "Intrinsic has incorrect argument type!");
6590 break;
6591 }
6592 case Intrinsic::smul_fix:
6593 case Intrinsic::smul_fix_sat:
6594 case Intrinsic::umul_fix:
6595 case Intrinsic::umul_fix_sat:
6596 case Intrinsic::sdiv_fix:
6597 case Intrinsic::sdiv_fix_sat:
6598 case Intrinsic::udiv_fix:
6599 case Intrinsic::udiv_fix_sat: {
6600 Value *Op1 = Call.getArgOperand(0);
6601 Value *Op2 = Call.getArgOperand(1);
6603 "first operand of [us][mul|div]_fix[_sat] must be an int type or "
6604 "vector of ints");
6606 "second operand of [us][mul|div]_fix[_sat] must be an int type or "
6607 "vector of ints");
6608
6609 auto *Op3 = cast<ConstantInt>(Call.getArgOperand(2));
6610 Check(Op3->getType()->isIntegerTy(),
6611 "third operand of [us][mul|div]_fix[_sat] must be an int type");
6612 Check(Op3->getBitWidth() <= 32,
6613 "third operand of [us][mul|div]_fix[_sat] must fit within 32 bits");
6614
6615 if (ID == Intrinsic::smul_fix || ID == Intrinsic::smul_fix_sat ||
6616 ID == Intrinsic::sdiv_fix || ID == Intrinsic::sdiv_fix_sat) {
6617 Check(Op3->getZExtValue() < Op1->getType()->getScalarSizeInBits(),
6618 "the scale of s[mul|div]_fix[_sat] must be less than the width of "
6619 "the operands");
6620 } else {
6621 Check(Op3->getZExtValue() <= Op1->getType()->getScalarSizeInBits(),
6622 "the scale of u[mul|div]_fix[_sat] must be less than or equal "
6623 "to the width of the operands");
6624 }
6625 break;
6626 }
6627 case Intrinsic::lrint:
6628 case Intrinsic::llrint:
6629 case Intrinsic::lround:
6630 case Intrinsic::llround: {
6631 Type *ValTy = Call.getArgOperand(0)->getType();
6632 Type *ResultTy = Call.getType();
6633 auto *VTy = dyn_cast<VectorType>(ValTy);
6634 auto *RTy = dyn_cast<VectorType>(ResultTy);
6635 Check(ValTy->isFPOrFPVectorTy() && ResultTy->isIntOrIntVectorTy(),
6636 ExpectedName + ": argument must be floating-point or vector "
6637 "of floating-points, and result must be integer or "
6638 "vector of integers",
6639 &Call);
6640 Check(ValTy->isVectorTy() == ResultTy->isVectorTy(),
6641 ExpectedName + ": argument and result disagree on vector use", &Call);
6642 if (VTy) {
6643 Check(VTy->getElementCount() == RTy->getElementCount(),
6644 ExpectedName + ": argument must be same length as result", &Call);
6645 }
6646 break;
6647 }
6648 case Intrinsic::bswap: {
6649 Type *Ty = Call.getType();
6650 unsigned Size = Ty->getScalarSizeInBits();
6651 Check(Size % 16 == 0, "bswap must be an even number of bytes", &Call);
6652 break;
6653 }
6654 case Intrinsic::invariant_start: {
6655 ConstantInt *InvariantSize = dyn_cast<ConstantInt>(Call.getArgOperand(0));
6656 Check(InvariantSize &&
6657 (!InvariantSize->isNegative() || InvariantSize->isMinusOne()),
6658 "invariant_start parameter must be -1, 0 or a positive number",
6659 &Call);
6660 break;
6661 }
6662 case Intrinsic::matrix_multiply:
6663 case Intrinsic::matrix_transpose:
6664 case Intrinsic::matrix_column_major_load:
6665 case Intrinsic::matrix_column_major_store: {
6667 ConstantInt *Stride = nullptr;
6668 ConstantInt *NumRows;
6669 ConstantInt *NumColumns;
6670 VectorType *ResultTy;
6671 Type *Op0ElemTy = nullptr;
6672 Type *Op1ElemTy = nullptr;
6673 switch (ID) {
6674 case Intrinsic::matrix_multiply: {
6675 NumRows = cast<ConstantInt>(Call.getArgOperand(2));
6676 ConstantInt *N = cast<ConstantInt>(Call.getArgOperand(3));
6677 NumColumns = cast<ConstantInt>(Call.getArgOperand(4));
6679 ->getNumElements() ==
6680 NumRows->getZExtValue() * N->getZExtValue(),
6681 "First argument of a matrix operation does not match specified "
6682 "shape!");
6684 ->getNumElements() ==
6685 N->getZExtValue() * NumColumns->getZExtValue(),
6686 "Second argument of a matrix operation does not match specified "
6687 "shape!");
6688
6689 ResultTy = cast<VectorType>(Call.getType());
6690 Op0ElemTy =
6691 cast<VectorType>(Call.getArgOperand(0)->getType())->getElementType();
6692 Op1ElemTy =
6693 cast<VectorType>(Call.getArgOperand(1)->getType())->getElementType();
6694 break;
6695 }
6696 case Intrinsic::matrix_transpose:
6697 NumRows = cast<ConstantInt>(Call.getArgOperand(1));
6698 NumColumns = cast<ConstantInt>(Call.getArgOperand(2));
6699 ResultTy = cast<VectorType>(Call.getType());
6700 Op0ElemTy =
6701 cast<VectorType>(Call.getArgOperand(0)->getType())->getElementType();
6702 break;
6703 case Intrinsic::matrix_column_major_load: {
6705 NumRows = cast<ConstantInt>(Call.getArgOperand(3));
6706 NumColumns = cast<ConstantInt>(Call.getArgOperand(4));
6707 ResultTy = cast<VectorType>(Call.getType());
6708 break;
6709 }
6710 case Intrinsic::matrix_column_major_store: {
6712 NumRows = cast<ConstantInt>(Call.getArgOperand(4));
6713 NumColumns = cast<ConstantInt>(Call.getArgOperand(5));
6714 ResultTy = cast<VectorType>(Call.getArgOperand(0)->getType());
6715 Op0ElemTy =
6716 cast<VectorType>(Call.getArgOperand(0)->getType())->getElementType();
6717 break;
6718 }
6719 default:
6720 llvm_unreachable("unexpected intrinsic");
6721 }
6722
6723 Check(ResultTy->getElementType()->isIntegerTy() ||
6724 ResultTy->getElementType()->isFloatingPointTy(),
6725 "Result type must be an integer or floating-point type!", IF);
6726
6727 if (Op0ElemTy)
6728 Check(ResultTy->getElementType() == Op0ElemTy,
6729 "Vector element type mismatch of the result and first operand "
6730 "vector!",
6731 IF);
6732
6733 if (Op1ElemTy)
6734 Check(ResultTy->getElementType() == Op1ElemTy,
6735 "Vector element type mismatch of the result and second operand "
6736 "vector!",
6737 IF);
6738
6740 NumRows->getZExtValue() * NumColumns->getZExtValue(),
6741 "Result of a matrix operation does not fit in the returned vector!");
6742
6743 if (Stride) {
6744 Check(Stride->getBitWidth() <= 64, "Stride bitwidth cannot exceed 64!",
6745 IF);
6746 Check(Stride->getZExtValue() >= NumRows->getZExtValue(),
6747 "Stride must be greater or equal than the number of rows!", IF);
6748 }
6749
6750 break;
6751 }
6752 case Intrinsic::stepvector: {
6754 Check(VecTy && VecTy->getScalarType()->isIntegerTy() &&
6755 VecTy->getScalarSizeInBits() >= 8,
6756 "stepvector only supported for vectors of integers "
6757 "with a bitwidth of at least 8.",
6758 &Call);
6759 break;
6760 }
6761 case Intrinsic::experimental_vector_match: {
6762 Value *Op1 = Call.getArgOperand(0);
6763 Value *Op2 = Call.getArgOperand(1);
6765
6766 VectorType *Op1Ty = dyn_cast<VectorType>(Op1->getType());
6767 VectorType *Op2Ty = dyn_cast<VectorType>(Op2->getType());
6768 VectorType *MaskTy = dyn_cast<VectorType>(Mask->getType());
6769
6770 Check(Op1Ty && Op2Ty && MaskTy, "Operands must be vectors.", &Call);
6772 "Second operand must be a fixed length vector.", &Call);
6773 Check(Op1Ty->getElementType()->isIntegerTy(),
6774 "First operand must be a vector of integers.", &Call);
6775 Check(Op1Ty->getElementType() == Op2Ty->getElementType(),
6776 "First two operands must have the same element type.", &Call);
6777 Check(Op1Ty->getElementCount() == MaskTy->getElementCount(),
6778 "First operand and mask must have the same number of elements.",
6779 &Call);
6780 Check(MaskTy->getElementType()->isIntegerTy(1),
6781 "Mask must be a vector of i1's.", &Call);
6782 Check(Call.getType() == MaskTy, "Return type must match the mask type.",
6783 &Call);
6784 break;
6785 }
6786 case Intrinsic::vector_insert: {
6787 Value *Vec = Call.getArgOperand(0);
6788 Value *SubVec = Call.getArgOperand(1);
6789 Value *Idx = Call.getArgOperand(2);
6790 unsigned IdxN = cast<ConstantInt>(Idx)->getZExtValue();
6791
6792 VectorType *VecTy = cast<VectorType>(Vec->getType());
6793 VectorType *SubVecTy = cast<VectorType>(SubVec->getType());
6794
6795 ElementCount VecEC = VecTy->getElementCount();
6796 ElementCount SubVecEC = SubVecTy->getElementCount();
6797 Check(VecTy->getElementType() == SubVecTy->getElementType(),
6798 "vector_insert parameters must have the same element "
6799 "type.",
6800 &Call);
6801 Check(IdxN % SubVecEC.getKnownMinValue() == 0,
6802 "vector_insert index must be a constant multiple of "
6803 "the subvector's known minimum vector length.");
6804
6805 // If this insertion is not the 'mixed' case where a fixed vector is
6806 // inserted into a scalable vector, ensure that the insertion of the
6807 // subvector does not overrun the parent vector.
6808 if (VecEC.isScalable() == SubVecEC.isScalable()) {
6809 Check(IdxN < VecEC.getKnownMinValue() &&
6810 IdxN + SubVecEC.getKnownMinValue() <= VecEC.getKnownMinValue(),
6811 "subvector operand of vector_insert would overrun the "
6812 "vector being inserted into.");
6813 }
6814 break;
6815 }
6816 case Intrinsic::vector_extract: {
6817 Value *Vec = Call.getArgOperand(0);
6818 Value *Idx = Call.getArgOperand(1);
6819 unsigned IdxN = cast<ConstantInt>(Idx)->getZExtValue();
6820
6821 VectorType *ResultTy = cast<VectorType>(Call.getType());
6822 VectorType *VecTy = cast<VectorType>(Vec->getType());
6823
6824 ElementCount VecEC = VecTy->getElementCount();
6825 ElementCount ResultEC = ResultTy->getElementCount();
6826
6827 Check(ResultTy->getElementType() == VecTy->getElementType(),
6828 "vector_extract result must have the same element "
6829 "type as the input vector.",
6830 &Call);
6831 Check(IdxN % ResultEC.getKnownMinValue() == 0,
6832 "vector_extract index must be a constant multiple of "
6833 "the result type's known minimum vector length.");
6834
6835 // If this extraction is not the 'mixed' case where a fixed vector is
6836 // extracted from a scalable vector, ensure that the extraction does not
6837 // overrun the parent vector.
6838 if (VecEC.isScalable() == ResultEC.isScalable()) {
6839 Check(IdxN < VecEC.getKnownMinValue() &&
6840 IdxN + ResultEC.getKnownMinValue() <= VecEC.getKnownMinValue(),
6841 "vector_extract would overrun.");
6842 }
6843 break;
6844 }
6845 case Intrinsic::vector_partial_reduce_fadd:
6846 case Intrinsic::vector_partial_reduce_add: {
6849
6850 unsigned VecWidth = VecTy->getElementCount().getKnownMinValue();
6851 unsigned AccWidth = AccTy->getElementCount().getKnownMinValue();
6852
6853 Check((VecWidth % AccWidth) == 0,
6854 "Invalid vector widths for partial "
6855 "reduction. The width of the input vector "
6856 "must be a positive integer multiple of "
6857 "the width of the accumulator vector.");
6858 break;
6859 }
6860 case Intrinsic::experimental_noalias_scope_decl: {
6861 NoAliasScopeDecls.push_back(cast<IntrinsicInst>(&Call));
6862 break;
6863 }
6864 case Intrinsic::preserve_array_access_index:
6865 case Intrinsic::preserve_struct_access_index:
6866 case Intrinsic::aarch64_ldaxr:
6867 case Intrinsic::aarch64_ldxr:
6868 case Intrinsic::arm_ldaex:
6869 case Intrinsic::arm_ldrex: {
6870 Type *ElemTy = Call.getParamElementType(0);
6871 Check(ElemTy, "Intrinsic requires elementtype attribute on first argument.",
6872 &Call);
6873 break;
6874 }
6875 case Intrinsic::aarch64_stlxr:
6876 case Intrinsic::aarch64_stxr:
6877 case Intrinsic::arm_stlex:
6878 case Intrinsic::arm_strex: {
6879 Type *ElemTy = Call.getAttributes().getParamElementType(1);
6880 Check(ElemTy,
6881 "Intrinsic requires elementtype attribute on second argument.",
6882 &Call);
6883 break;
6884 }
6885 case Intrinsic::aarch64_prefetch: {
6886 Check(cast<ConstantInt>(Call.getArgOperand(1))->getZExtValue() < 2,
6887 "write argument to llvm.aarch64.prefetch must be 0 or 1", Call);
6888 Check(cast<ConstantInt>(Call.getArgOperand(2))->getZExtValue() < 4,
6889 "target argument to llvm.aarch64.prefetch must be 0-3", Call);
6890 Check(cast<ConstantInt>(Call.getArgOperand(3))->getZExtValue() < 2,
6891 "stream argument to llvm.aarch64.prefetch must be 0 or 1", Call);
6892 Check(cast<ConstantInt>(Call.getArgOperand(4))->getZExtValue() < 2,
6893 "isdata argument to llvm.aarch64.prefetch must be 0 or 1", Call);
6894 break;
6895 }
6896 case Intrinsic::aarch64_range_prefetch: {
6897 Check(cast<ConstantInt>(Call.getArgOperand(1))->getZExtValue() < 2,
6898 "write argument to llvm.aarch64.range.prefetch must be 0 or 1", Call);
6899 Check(cast<ConstantInt>(Call.getArgOperand(2))->getZExtValue() < 2,
6900 "stream argument to llvm.aarch64.range.prefetch must be 0 or 1",
6901 Call);
6902 break;
6903 }
6904 case Intrinsic::aarch64_stshh_atomic_store: {
6905 uint64_t Order = cast<ConstantInt>(Call.getArgOperand(2))->getZExtValue();
6906 Check(Order == static_cast<uint64_t>(AtomicOrderingCABI::relaxed) ||
6907 Order == static_cast<uint64_t>(AtomicOrderingCABI::release) ||
6908 Order == static_cast<uint64_t>(AtomicOrderingCABI::seq_cst),
6909 "order argument to llvm.aarch64.stshh.atomic.store must be 0, 3 or 5",
6910 Call);
6911
6912 Check(cast<ConstantInt>(Call.getArgOperand(3))->getZExtValue() < 2,
6913 "policy argument to llvm.aarch64.stshh.atomic.store must be 0 or 1",
6914 Call);
6915
6916 uint64_t Size = cast<ConstantInt>(Call.getArgOperand(4))->getZExtValue();
6917 Check(Size == 8 || Size == 16 || Size == 32 || Size == 64,
6918 "size argument to llvm.aarch64.stshh.atomic.store must be 8, 16, "
6919 "32 or 64",
6920 Call);
6921 break;
6922 }
6923 case Intrinsic::callbr_landingpad: {
6924 const auto *CBR = dyn_cast<CallBrInst>(Call.getOperand(0));
6925 Check(CBR, "intrinstic requires callbr operand", &Call);
6926 if (!CBR)
6927 break;
6928
6929 const BasicBlock *LandingPadBB = Call.getParent();
6930 const BasicBlock *PredBB = LandingPadBB->getUniquePredecessor();
6931 if (!PredBB) {
6932 CheckFailed("Intrinsic in block must have 1 unique predecessor", &Call);
6933 break;
6934 }
6935 if (!isa<CallBrInst>(PredBB->getTerminator())) {
6936 CheckFailed("Intrinsic must have corresponding callbr in predecessor",
6937 &Call);
6938 break;
6939 }
6940 Check(llvm::is_contained(CBR->getIndirectDests(), LandingPadBB),
6941 "Intrinsic's corresponding callbr must have intrinsic's parent basic "
6942 "block in indirect destination list",
6943 &Call);
6944 const Instruction &First = *LandingPadBB->begin();
6945 Check(&First == &Call, "No other instructions may proceed intrinsic",
6946 &Call);
6947 break;
6948 }
6949 case Intrinsic::structured_gep: {
6950 // Parser should refuse those 2 cases.
6951 assert(Call.arg_size() >= 1);
6953
6954 Check(Call.paramHasAttr(0, Attribute::ElementType),
6955 "Intrinsic first parameter is missing an ElementType attribute",
6956 &Call);
6957
6958 Type *T = Call.getParamAttr(0, Attribute::ElementType).getValueAsType();
6959 for (unsigned I = 1; I < Call.arg_size(); ++I) {
6961 ConstantInt *CI = dyn_cast<ConstantInt>(Index);
6962 Check(Index->getType()->isIntegerTy(),
6963 "Index operand type must be an integer", &Call);
6964
6965 if (ArrayType *AT = dyn_cast<ArrayType>(T)) {
6966 T = AT->getElementType();
6967 } else if (StructType *ST = dyn_cast<StructType>(T)) {
6968 Check(CI, "Indexing into a struct requires a constant int", &Call);
6969 Check(CI->getZExtValue() < ST->getNumElements(),
6970 "Indexing in a struct should be inbounds", &Call);
6971 T = ST->getElementType(CI->getZExtValue());
6972 } else if (VectorType *VT = dyn_cast<VectorType>(T)) {
6973 T = VT->getElementType();
6974 } else {
6975 CheckFailed("Reached a non-composite type with more indices to process",
6976 &Call);
6977 }
6978 }
6979 break;
6980 }
6981 case Intrinsic::structured_alloca:
6982 Check(Call.hasRetAttr(Attribute::ElementType),
6983 "@llvm.structured.alloca calls require elementtype attribute.",
6984 &Call);
6985 break;
6986 case Intrinsic::amdgcn_cs_chain: {
6987 auto CallerCC = Call.getCaller()->getCallingConv();
6988 switch (CallerCC) {
6989 case CallingConv::AMDGPU_CS:
6990 case CallingConv::AMDGPU_CS_Chain:
6991 case CallingConv::AMDGPU_CS_ChainPreserve:
6992 case CallingConv::AMDGPU_ES:
6993 case CallingConv::AMDGPU_GS:
6994 case CallingConv::AMDGPU_HS:
6995 case CallingConv::AMDGPU_LS:
6996 case CallingConv::AMDGPU_VS:
6997 break;
6998 default:
6999 CheckFailed("Intrinsic cannot be called from functions with this "
7000 "calling convention",
7001 &Call);
7002 break;
7003 }
7004
7005 Check(Call.paramHasAttr(2, Attribute::InReg),
7006 "SGPR arguments must have the `inreg` attribute", &Call);
7007 Check(!Call.paramHasAttr(3, Attribute::InReg),
7008 "VGPR arguments must not have the `inreg` attribute", &Call);
7009
7010 auto *Next = Call.getNextNode();
7011 bool IsAMDUnreachable = Next && isa<IntrinsicInst>(Next) &&
7012 cast<IntrinsicInst>(Next)->getIntrinsicID() ==
7013 Intrinsic::amdgcn_unreachable;
7014 Check(Next && (isa<UnreachableInst>(Next) || IsAMDUnreachable),
7015 "llvm.amdgcn.cs.chain must be followed by unreachable", &Call);
7016 break;
7017 }
7018 case Intrinsic::amdgcn_init_exec_from_input: {
7019 const Argument *Arg = dyn_cast<Argument>(Call.getOperand(0));
7020 Check(Arg && Arg->hasInRegAttr(),
7021 "only inreg arguments to the parent function are valid as inputs to "
7022 "this intrinsic",
7023 &Call);
7024 break;
7025 }
7026 case Intrinsic::amdgcn_set_inactive_chain_arg: {
7027 auto CallerCC = Call.getCaller()->getCallingConv();
7028 switch (CallerCC) {
7029 case CallingConv::AMDGPU_CS_Chain:
7030 case CallingConv::AMDGPU_CS_ChainPreserve:
7031 break;
7032 default:
7033 CheckFailed("Intrinsic can only be used from functions with the "
7034 "amdgpu_cs_chain or amdgpu_cs_chain_preserve "
7035 "calling conventions",
7036 &Call);
7037 break;
7038 }
7039
7040 unsigned InactiveIdx = 1;
7041 Check(!Call.paramHasAttr(InactiveIdx, Attribute::InReg),
7042 "Value for inactive lanes must not have the `inreg` attribute",
7043 &Call);
7044 Check(isa<Argument>(Call.getArgOperand(InactiveIdx)),
7045 "Value for inactive lanes must be a function argument", &Call);
7046 Check(!cast<Argument>(Call.getArgOperand(InactiveIdx))->hasInRegAttr(),
7047 "Value for inactive lanes must be a VGPR function argument", &Call);
7048 break;
7049 }
7050 case Intrinsic::amdgcn_call_whole_wave: {
7052 Check(F, "Indirect whole wave calls are not allowed", &Call);
7053
7054 CallingConv::ID CC = F->getCallingConv();
7055 Check(CC == CallingConv::AMDGPU_Gfx_WholeWave,
7056 "Callee must have the amdgpu_gfx_whole_wave calling convention",
7057 &Call);
7058
7059 Check(!F->isVarArg(), "Variadic whole wave calls are not allowed", &Call);
7060
7061 Check(Call.arg_size() == F->arg_size(),
7062 "Call argument count must match callee argument count", &Call);
7063
7064 // The first argument of the call is the callee, and the first argument of
7065 // the callee is the active mask. The rest of the arguments must match.
7066 Check(F->arg_begin()->getType()->isIntegerTy(1),
7067 "Callee must have i1 as its first argument", &Call);
7068 for (auto [CallArg, FuncArg] :
7069 drop_begin(zip_equal(Call.args(), F->args()))) {
7070 Check(CallArg->getType() == FuncArg.getType(),
7071 "Argument types must match", &Call);
7072
7073 // Check that inreg attributes match between call site and function
7074 Check(Call.paramHasAttr(FuncArg.getArgNo(), Attribute::InReg) ==
7075 FuncArg.hasInRegAttr(),
7076 "Argument inreg attributes must match", &Call);
7077 }
7078 break;
7079 }
7080 case Intrinsic::amdgcn_s_prefetch_data: {
7081 Check(
7084 "llvm.amdgcn.s.prefetch.data only supports global or constant memory");
7085 break;
7086 }
7087 case Intrinsic::amdgcn_mfma_scale_f32_16x16x128_f8f6f4:
7088 case Intrinsic::amdgcn_mfma_scale_f32_32x32x64_f8f6f4: {
7089 Value *Src0 = Call.getArgOperand(0);
7090 Value *Src1 = Call.getArgOperand(1);
7091
7092 uint64_t CBSZ = cast<ConstantInt>(Call.getArgOperand(3))->getZExtValue();
7093 uint64_t BLGP = cast<ConstantInt>(Call.getArgOperand(4))->getZExtValue();
7094 Check(CBSZ <= 4, "invalid value for cbsz format", Call,
7095 Call.getArgOperand(3));
7096 Check(BLGP <= 4, "invalid value for blgp format", Call,
7097 Call.getArgOperand(4));
7098
7099 // AMDGPU::MFMAScaleFormats values
7100 auto getFormatNumRegs = [](unsigned FormatVal) {
7101 switch (FormatVal) {
7102 case 0:
7103 case 1:
7104 return 8u;
7105 case 2:
7106 case 3:
7107 return 6u;
7108 case 4:
7109 return 4u;
7110 default:
7111 llvm_unreachable("invalid format value");
7112 }
7113 };
7114
7115 auto isValidSrcASrcBVector = [](FixedVectorType *Ty) {
7116 if (!Ty || !Ty->getElementType()->isIntegerTy(32))
7117 return false;
7118 unsigned NumElts = Ty->getNumElements();
7119 return NumElts == 4 || NumElts == 6 || NumElts == 8;
7120 };
7121
7122 auto *Src0Ty = dyn_cast<FixedVectorType>(Src0->getType());
7123 auto *Src1Ty = dyn_cast<FixedVectorType>(Src1->getType());
7124 Check(isValidSrcASrcBVector(Src0Ty),
7125 "operand 0 must be 4, 6 or 8 element i32 vector", &Call, Src0);
7126 Check(isValidSrcASrcBVector(Src1Ty),
7127 "operand 1 must be 4, 6 or 8 element i32 vector", &Call, Src1);
7128
7129 // Permit excess registers for the format.
7130 Check(Src0Ty->getNumElements() >= getFormatNumRegs(CBSZ),
7131 "invalid vector type for format", &Call, Src0, Call.getArgOperand(3));
7132 Check(Src1Ty->getNumElements() >= getFormatNumRegs(BLGP),
7133 "invalid vector type for format", &Call, Src1, Call.getArgOperand(5));
7134 break;
7135 }
7136 case Intrinsic::amdgcn_wmma_f32_16x16x128_f8f6f4:
7137 case Intrinsic::amdgcn_wmma_scale_f32_16x16x128_f8f6f4:
7138 case Intrinsic::amdgcn_wmma_scale16_f32_16x16x128_f8f6f4: {
7139 Value *Src0 = Call.getArgOperand(1);
7140 Value *Src1 = Call.getArgOperand(3);
7141
7142 unsigned FmtA = cast<ConstantInt>(Call.getArgOperand(0))->getZExtValue();
7143 unsigned FmtB = cast<ConstantInt>(Call.getArgOperand(2))->getZExtValue();
7144 Check(FmtA <= 4, "invalid value for matrix format", Call,
7145 Call.getArgOperand(0));
7146 Check(FmtB <= 4, "invalid value for matrix format", Call,
7147 Call.getArgOperand(2));
7148
7149 // AMDGPU::MatrixFMT values
7150 auto getFormatNumRegs = [](unsigned FormatVal) {
7151 switch (FormatVal) {
7152 case 0:
7153 case 1:
7154 return 16u;
7155 case 2:
7156 case 3:
7157 return 12u;
7158 case 4:
7159 return 8u;
7160 default:
7161 llvm_unreachable("invalid format value");
7162 }
7163 };
7164
7165 auto isValidSrcASrcBVector = [](FixedVectorType *Ty) {
7166 if (!Ty || !Ty->getElementType()->isIntegerTy(32))
7167 return false;
7168 unsigned NumElts = Ty->getNumElements();
7169 return NumElts == 16 || NumElts == 12 || NumElts == 8;
7170 };
7171
7172 auto *Src0Ty = dyn_cast<FixedVectorType>(Src0->getType());
7173 auto *Src1Ty = dyn_cast<FixedVectorType>(Src1->getType());
7174 Check(isValidSrcASrcBVector(Src0Ty),
7175 "operand 1 must be 8, 12 or 16 element i32 vector", &Call, Src0);
7176 Check(isValidSrcASrcBVector(Src1Ty),
7177 "operand 3 must be 8, 12 or 16 element i32 vector", &Call, Src1);
7178
7179 // Permit excess registers for the format.
7180 Check(Src0Ty->getNumElements() >= getFormatNumRegs(FmtA),
7181 "invalid vector type for format", &Call, Src0, Call.getArgOperand(0));
7182 Check(Src1Ty->getNumElements() >= getFormatNumRegs(FmtB),
7183 "invalid vector type for format", &Call, Src1, Call.getArgOperand(2));
7184 break;
7185 }
7186 case Intrinsic::amdgcn_cooperative_atomic_load_32x4B:
7187 case Intrinsic::amdgcn_cooperative_atomic_load_16x8B:
7188 case Intrinsic::amdgcn_cooperative_atomic_load_8x16B:
7189 case Intrinsic::amdgcn_cooperative_atomic_store_32x4B:
7190 case Intrinsic::amdgcn_cooperative_atomic_store_16x8B:
7191 case Intrinsic::amdgcn_cooperative_atomic_store_8x16B: {
7192 // Check we only use this intrinsic on the FLAT or GLOBAL address spaces.
7193 Value *PtrArg = Call.getArgOperand(0);
7194 const unsigned AS = PtrArg->getType()->getPointerAddressSpace();
7196 "cooperative atomic intrinsics require a generic or global pointer",
7197 &Call, PtrArg);
7198
7199 // Last argument must be a MD string
7201 MDNode *MD = cast<MDNode>(Op->getMetadata());
7202 Check((MD->getNumOperands() == 1) && isa<MDString>(MD->getOperand(0)),
7203 "cooperative atomic intrinsics require that the last argument is a "
7204 "metadata string",
7205 &Call, Op);
7206 break;
7207 }
7208 case Intrinsic::nvvm_setmaxnreg_inc_sync_aligned_u32:
7209 case Intrinsic::nvvm_setmaxnreg_dec_sync_aligned_u32: {
7210 Value *V = Call.getArgOperand(0);
7211 unsigned RegCount = cast<ConstantInt>(V)->getZExtValue();
7212 Check(RegCount % 8 == 0,
7213 "reg_count argument to nvvm.setmaxnreg must be in multiples of 8");
7214 break;
7215 }
7216 case Intrinsic::experimental_convergence_entry:
7217 case Intrinsic::experimental_convergence_anchor:
7218 break;
7219 case Intrinsic::experimental_convergence_loop:
7220 break;
7221 case Intrinsic::ptrmask: {
7222 Type *Ty0 = Call.getArgOperand(0)->getType();
7223 Type *Ty1 = Call.getArgOperand(1)->getType();
7225 "llvm.ptrmask intrinsic first argument must be pointer or vector "
7226 "of pointers",
7227 &Call);
7228 Check(
7229 Ty0->isVectorTy() == Ty1->isVectorTy(),
7230 "llvm.ptrmask intrinsic arguments must be both scalars or both vectors",
7231 &Call);
7232 if (Ty0->isVectorTy())
7233 Check(cast<VectorType>(Ty0)->getElementCount() ==
7234 cast<VectorType>(Ty1)->getElementCount(),
7235 "llvm.ptrmask intrinsic arguments must have the same number of "
7236 "elements",
7237 &Call);
7238 Check(DL.getIndexTypeSizeInBits(Ty0) == Ty1->getScalarSizeInBits(),
7239 "llvm.ptrmask intrinsic second argument bitwidth must match "
7240 "pointer index type size of first argument",
7241 &Call);
7242 break;
7243 }
7244 case Intrinsic::thread_pointer: {
7246 DL.getDefaultGlobalsAddressSpace(),
7247 "llvm.thread.pointer intrinsic return type must be for the globals "
7248 "address space",
7249 &Call);
7250 break;
7251 }
7252 case Intrinsic::threadlocal_address: {
7253 const Value &Arg0 = *Call.getArgOperand(0);
7254 Check(isa<GlobalValue>(Arg0),
7255 "llvm.threadlocal.address first argument must be a GlobalValue");
7256 Check(cast<GlobalValue>(Arg0).isThreadLocal(),
7257 "llvm.threadlocal.address operand isThreadLocal() must be true");
7258 break;
7259 }
7260 case Intrinsic::lifetime_start:
7261 case Intrinsic::lifetime_end: {
7262 Value *Ptr = Call.getArgOperand(0);
7263 IntrinsicInst *II = dyn_cast<IntrinsicInst>(Ptr);
7264 Check(isa<AllocaInst>(Ptr) || isa<PoisonValue>(Ptr) ||
7265 (II && II->getIntrinsicID() == Intrinsic::structured_alloca),
7266 "llvm.lifetime.start/end can only be used on alloca or poison",
7267 &Call);
7268 break;
7269 }
7270 case Intrinsic::sponentry: {
7271 const unsigned StackAS = DL.getAllocaAddrSpace();
7272 const Type *RetTy = Call.getFunctionType()->getReturnType();
7273 Check(RetTy->getPointerAddressSpace() == StackAS,
7274 "llvm.sponentry must return a pointer to the stack", &Call);
7275 break;
7276 }
7277 };
7278
7279 // Verify that there aren't any unmediated control transfers between funclets.
7281 Function *F = Call.getParent()->getParent();
7282 if (F->hasPersonalityFn() &&
7283 isScopedEHPersonality(classifyEHPersonality(F->getPersonalityFn()))) {
7284 // Run EH funclet coloring on-demand and cache results for other intrinsic
7285 // calls in this function
7286 if (BlockEHFuncletColors.empty())
7287 BlockEHFuncletColors = colorEHFunclets(*F);
7288
7289 // Check for catch-/cleanup-pad in first funclet block
7290 bool InEHFunclet = false;
7291 BasicBlock *CallBB = Call.getParent();
7292 const ColorVector &CV = BlockEHFuncletColors.find(CallBB)->second;
7293 assert(CV.size() > 0 && "Uncolored block");
7294 for (BasicBlock *ColorFirstBB : CV)
7295 if (auto It = ColorFirstBB->getFirstNonPHIIt();
7296 It != ColorFirstBB->end())
7298 InEHFunclet = true;
7299
7300 // Check for funclet operand bundle
7301 bool HasToken = false;
7302 for (unsigned I = 0, E = Call.getNumOperandBundles(); I != E; ++I)
7304 HasToken = true;
7305
7306 // This would cause silent code truncation in WinEHPrepare
7307 if (InEHFunclet)
7308 Check(HasToken, "Missing funclet token on intrinsic call", &Call);
7309 }
7310 }
7311}
7312
7313/// Carefully grab the subprogram from a local scope.
7314///
7315/// This carefully grabs the subprogram from a local scope, avoiding the
7316/// built-in assertions that would typically fire.
7318 if (!LocalScope)
7319 return nullptr;
7320
7321 if (auto *SP = dyn_cast<DISubprogram>(LocalScope))
7322 return SP;
7323
7324 if (auto *LB = dyn_cast<DILexicalBlockBase>(LocalScope))
7325 return getSubprogram(LB->getRawScope());
7326
7327 // Just return null; broken scope chains are checked elsewhere.
7328 assert(!isa<DILocalScope>(LocalScope) && "Unknown type of local scope");
7329 return nullptr;
7330}
7331
7332void Verifier::visit(DbgLabelRecord &DLR) {
7334 "invalid #dbg_label intrinsic variable", &DLR, DLR.getRawLabel());
7335
7336 // Ignore broken !dbg attachments; they're checked elsewhere.
7337 if (MDNode *N = DLR.getDebugLoc().getAsMDNode())
7338 if (!isa<DILocation>(N))
7339 return;
7340
7341 BasicBlock *BB = DLR.getParent();
7342 Function *F = BB ? BB->getParent() : nullptr;
7343
7344 // The scopes for variables and !dbg attachments must agree.
7345 DILabel *Label = DLR.getLabel();
7346 DILocation *Loc = DLR.getDebugLoc();
7347 CheckDI(Loc, "#dbg_label record requires a !dbg attachment", &DLR, BB, F);
7348
7349 DISubprogram *LabelSP = getSubprogram(Label->getRawScope());
7350 DISubprogram *LocSP = getSubprogram(Loc->getRawScope());
7351 if (!LabelSP || !LocSP)
7352 return;
7353
7354 CheckDI(LabelSP == LocSP,
7355 "mismatched subprogram between #dbg_label label and !dbg attachment",
7356 &DLR, BB, F, Label, Label->getScope()->getSubprogram(), Loc,
7357 Loc->getScope()->getSubprogram());
7358}
7359
7360void Verifier::visit(DbgVariableRecord &DVR) {
7361 BasicBlock *BB = DVR.getParent();
7362 Function *F = BB->getParent();
7363
7364 CheckDI(DVR.getType() == DbgVariableRecord::LocationType::Value ||
7365 DVR.getType() == DbgVariableRecord::LocationType::Declare ||
7366 DVR.getType() == DbgVariableRecord::LocationType::DeclareValue ||
7367 DVR.getType() == DbgVariableRecord::LocationType::Assign,
7368 "invalid #dbg record type", &DVR, DVR.getType(), BB, F);
7369
7370 // The location for a DbgVariableRecord must be either a ValueAsMetadata,
7371 // DIArgList, or an empty MDNode (which is a legacy representation for an
7372 // "undef" location).
7373 auto *MD = DVR.getRawLocation();
7374 CheckDI(MD && (isa<ValueAsMetadata>(MD) || isa<DIArgList>(MD) ||
7375 (isa<MDNode>(MD) && !cast<MDNode>(MD)->getNumOperands())),
7376 "invalid #dbg record address/value", &DVR, MD, BB, F);
7377 if (auto *VAM = dyn_cast<ValueAsMetadata>(MD)) {
7378 visitValueAsMetadata(*VAM, F);
7379 if (DVR.isDbgDeclare()) {
7380 // Allow integers here to support inttoptr salvage.
7381 Type *Ty = VAM->getValue()->getType();
7382 CheckDI(Ty->isPointerTy() || Ty->isIntegerTy(),
7383 "location of #dbg_declare must be a pointer or int", &DVR, MD, BB,
7384 F);
7385 }
7386 } else if (auto *AL = dyn_cast<DIArgList>(MD)) {
7387 visitDIArgList(*AL, F);
7388 }
7389
7391 "invalid #dbg record variable", &DVR, DVR.getRawVariable(), BB, F);
7392 visitMDNode(*DVR.getRawVariable(), AreDebugLocsAllowed::No);
7393
7395 "invalid #dbg record expression", &DVR, DVR.getRawExpression(), BB,
7396 F);
7397 visitMDNode(*DVR.getExpression(), AreDebugLocsAllowed::No);
7398
7399 if (DVR.isDbgAssign()) {
7401 "invalid #dbg_assign DIAssignID", &DVR, DVR.getRawAssignID(), BB,
7402 F);
7403 visitMDNode(*cast<DIAssignID>(DVR.getRawAssignID()),
7404 AreDebugLocsAllowed::No);
7405
7406 const auto *RawAddr = DVR.getRawAddress();
7407 // Similarly to the location above, the address for an assign
7408 // DbgVariableRecord must be a ValueAsMetadata or an empty MDNode, which
7409 // represents an undef address.
7410 CheckDI(
7411 isa<ValueAsMetadata>(RawAddr) ||
7412 (isa<MDNode>(RawAddr) && !cast<MDNode>(RawAddr)->getNumOperands()),
7413 "invalid #dbg_assign address", &DVR, DVR.getRawAddress(), BB, F);
7414 if (auto *VAM = dyn_cast<ValueAsMetadata>(RawAddr))
7415 visitValueAsMetadata(*VAM, F);
7416
7418 "invalid #dbg_assign address expression", &DVR,
7419 DVR.getRawAddressExpression(), BB, F);
7420 visitMDNode(*DVR.getAddressExpression(), AreDebugLocsAllowed::No);
7421
7422 // All of the linked instructions should be in the same function as DVR.
7423 for (Instruction *I : at::getAssignmentInsts(&DVR))
7424 CheckDI(DVR.getFunction() == I->getFunction(),
7425 "inst not in same function as #dbg_assign", I, &DVR, BB, F);
7426 }
7427
7428 // This check is redundant with one in visitLocalVariable().
7429 DILocalVariable *Var = DVR.getVariable();
7430 CheckDI(isType(Var->getRawType()), "invalid type ref", Var, Var->getRawType(),
7431 BB, F);
7432
7433 auto *DLNode = DVR.getDebugLoc().getAsMDNode();
7434 CheckDI(isa_and_nonnull<DILocation>(DLNode), "invalid #dbg record DILocation",
7435 &DVR, DLNode, BB, F);
7436 DILocation *Loc = DVR.getDebugLoc();
7437
7438 // The scopes for variables and !dbg attachments must agree.
7439 DISubprogram *VarSP = getSubprogram(Var->getRawScope());
7440 DISubprogram *LocSP = getSubprogram(Loc->getRawScope());
7441 if (!VarSP || !LocSP)
7442 return; // Broken scope chains are checked elsewhere.
7443
7444 CheckDI(VarSP == LocSP,
7445 "mismatched subprogram between #dbg record variable and DILocation",
7446 &DVR, BB, F, Var, Var->getScope()->getSubprogram(), Loc,
7447 Loc->getScope()->getSubprogram(), BB, F);
7448
7449 verifyFnArgs(DVR);
7450}
7451
7452void Verifier::visitVPIntrinsic(VPIntrinsic &VPI) {
7453 if (auto *VPCast = dyn_cast<VPCastIntrinsic>(&VPI)) {
7454 auto *RetTy = cast<VectorType>(VPCast->getType());
7455 auto *ValTy = cast<VectorType>(VPCast->getOperand(0)->getType());
7456 Check(RetTy->getElementCount() == ValTy->getElementCount(),
7457 "VP cast intrinsic first argument and result vector lengths must be "
7458 "equal",
7459 *VPCast);
7460
7461 switch (VPCast->getIntrinsicID()) {
7462 default:
7463 llvm_unreachable("Unknown VP cast intrinsic");
7464 case Intrinsic::vp_trunc:
7465 Check(RetTy->isIntOrIntVectorTy() && ValTy->isIntOrIntVectorTy(),
7466 "llvm.vp.trunc intrinsic first argument and result element type "
7467 "must be integer",
7468 *VPCast);
7469 Check(RetTy->getScalarSizeInBits() < ValTy->getScalarSizeInBits(),
7470 "llvm.vp.trunc intrinsic the bit size of first argument must be "
7471 "larger than the bit size of the return type",
7472 *VPCast);
7473 break;
7474 case Intrinsic::vp_zext:
7475 case Intrinsic::vp_sext:
7476 Check(RetTy->isIntOrIntVectorTy() && ValTy->isIntOrIntVectorTy(),
7477 "llvm.vp.zext or llvm.vp.sext intrinsic first argument and result "
7478 "element type must be integer",
7479 *VPCast);
7480 Check(RetTy->getScalarSizeInBits() > ValTy->getScalarSizeInBits(),
7481 "llvm.vp.zext or llvm.vp.sext intrinsic the bit size of first "
7482 "argument must be smaller than the bit size of the return type",
7483 *VPCast);
7484 break;
7485 case Intrinsic::vp_fptoui:
7486 case Intrinsic::vp_fptosi:
7487 case Intrinsic::vp_lrint:
7488 case Intrinsic::vp_llrint:
7489 Check(
7490 RetTy->isIntOrIntVectorTy() && ValTy->isFPOrFPVectorTy(),
7491 "llvm.vp.fptoui, llvm.vp.fptosi, llvm.vp.lrint or llvm.vp.llrint" "intrinsic first argument element "
7492 "type must be floating-point and result element type must be integer",
7493 *VPCast);
7494 break;
7495 case Intrinsic::vp_uitofp:
7496 case Intrinsic::vp_sitofp:
7497 Check(
7498 RetTy->isFPOrFPVectorTy() && ValTy->isIntOrIntVectorTy(),
7499 "llvm.vp.uitofp or llvm.vp.sitofp intrinsic first argument element "
7500 "type must be integer and result element type must be floating-point",
7501 *VPCast);
7502 break;
7503 case Intrinsic::vp_fptrunc:
7504 Check(RetTy->isFPOrFPVectorTy() && ValTy->isFPOrFPVectorTy(),
7505 "llvm.vp.fptrunc intrinsic first argument and result element type "
7506 "must be floating-point",
7507 *VPCast);
7508 Check(RetTy->getScalarSizeInBits() < ValTy->getScalarSizeInBits(),
7509 "llvm.vp.fptrunc intrinsic the bit size of first argument must be "
7510 "larger than the bit size of the return type",
7511 *VPCast);
7512 break;
7513 case Intrinsic::vp_fpext:
7514 Check(RetTy->isFPOrFPVectorTy() && ValTy->isFPOrFPVectorTy(),
7515 "llvm.vp.fpext intrinsic first argument and result element type "
7516 "must be floating-point",
7517 *VPCast);
7518 Check(RetTy->getScalarSizeInBits() > ValTy->getScalarSizeInBits(),
7519 "llvm.vp.fpext intrinsic the bit size of first argument must be "
7520 "smaller than the bit size of the return type",
7521 *VPCast);
7522 break;
7523 case Intrinsic::vp_ptrtoint:
7524 Check(RetTy->isIntOrIntVectorTy() && ValTy->isPtrOrPtrVectorTy(),
7525 "llvm.vp.ptrtoint intrinsic first argument element type must be "
7526 "pointer and result element type must be integer",
7527 *VPCast);
7528 break;
7529 case Intrinsic::vp_inttoptr:
7530 Check(RetTy->isPtrOrPtrVectorTy() && ValTy->isIntOrIntVectorTy(),
7531 "llvm.vp.inttoptr intrinsic first argument element type must be "
7532 "integer and result element type must be pointer",
7533 *VPCast);
7534 break;
7535 }
7536 }
7537
7538 switch (VPI.getIntrinsicID()) {
7539 case Intrinsic::vp_fcmp: {
7540 auto Pred = cast<VPCmpIntrinsic>(&VPI)->getPredicate();
7542 "invalid predicate for VP FP comparison intrinsic", &VPI);
7543 break;
7544 }
7545 case Intrinsic::vp_icmp: {
7546 auto Pred = cast<VPCmpIntrinsic>(&VPI)->getPredicate();
7548 "invalid predicate for VP integer comparison intrinsic", &VPI);
7549 break;
7550 }
7551 case Intrinsic::vp_is_fpclass: {
7552 auto TestMask = cast<ConstantInt>(VPI.getOperand(1));
7553 Check((TestMask->getZExtValue() & ~static_cast<unsigned>(fcAllFlags)) == 0,
7554 "unsupported bits for llvm.vp.is.fpclass test mask");
7555 break;
7556 }
7557 case Intrinsic::experimental_vp_splice: {
7558 VectorType *VecTy = cast<VectorType>(VPI.getType());
7559 int64_t Idx = cast<ConstantInt>(VPI.getArgOperand(2))->getSExtValue();
7560 int64_t KnownMinNumElements = VecTy->getElementCount().getKnownMinValue();
7561 if (VPI.getParent() && VPI.getParent()->getParent()) {
7562 AttributeList Attrs = VPI.getParent()->getParent()->getAttributes();
7563 if (Attrs.hasFnAttr(Attribute::VScaleRange))
7564 KnownMinNumElements *= Attrs.getFnAttrs().getVScaleRangeMin();
7565 }
7566 Check((Idx < 0 && std::abs(Idx) <= KnownMinNumElements) ||
7567 (Idx >= 0 && Idx < KnownMinNumElements),
7568 "The splice index exceeds the range [-VL, VL-1] where VL is the "
7569 "known minimum number of elements in the vector. For scalable "
7570 "vectors the minimum number of elements is determined from "
7571 "vscale_range.",
7572 &VPI);
7573 break;
7574 }
7575 }
7576}
7577
7578void Verifier::visitConstrainedFPIntrinsic(ConstrainedFPIntrinsic &FPI) {
7579 unsigned NumOperands = FPI.getNonMetadataArgCount();
7580 bool HasRoundingMD =
7582
7583 // Add the expected number of metadata operands.
7584 NumOperands += (1 + HasRoundingMD);
7585
7586 // Compare intrinsics carry an extra predicate metadata operand.
7588 NumOperands += 1;
7589 Check((FPI.arg_size() == NumOperands),
7590 "invalid arguments for constrained FP intrinsic", &FPI);
7591
7592 switch (FPI.getIntrinsicID()) {
7593 case Intrinsic::experimental_constrained_lrint:
7594 case Intrinsic::experimental_constrained_llrint: {
7595 Type *ValTy = FPI.getArgOperand(0)->getType();
7596 Type *ResultTy = FPI.getType();
7597 Check(!ValTy->isVectorTy() && !ResultTy->isVectorTy(),
7598 "Intrinsic does not support vectors", &FPI);
7599 break;
7600 }
7601
7602 case Intrinsic::experimental_constrained_lround:
7603 case Intrinsic::experimental_constrained_llround: {
7604 Type *ValTy = FPI.getArgOperand(0)->getType();
7605 Type *ResultTy = FPI.getType();
7606 Check(!ValTy->isVectorTy() && !ResultTy->isVectorTy(),
7607 "Intrinsic does not support vectors", &FPI);
7608 break;
7609 }
7610
7611 case Intrinsic::experimental_constrained_fcmp:
7612 case Intrinsic::experimental_constrained_fcmps: {
7613 auto Pred = cast<ConstrainedFPCmpIntrinsic>(&FPI)->getPredicate();
7615 "invalid predicate for constrained FP comparison intrinsic", &FPI);
7616 break;
7617 }
7618
7619 case Intrinsic::experimental_constrained_fptosi:
7620 case Intrinsic::experimental_constrained_fptoui: {
7621 Value *Operand = FPI.getArgOperand(0);
7622 ElementCount SrcEC;
7623 Check(Operand->getType()->isFPOrFPVectorTy(),
7624 "Intrinsic first argument must be floating point", &FPI);
7625 if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
7626 SrcEC = cast<VectorType>(OperandT)->getElementCount();
7627 }
7628
7629 Operand = &FPI;
7630 Check(SrcEC.isNonZero() == Operand->getType()->isVectorTy(),
7631 "Intrinsic first argument and result disagree on vector use", &FPI);
7632 Check(Operand->getType()->isIntOrIntVectorTy(),
7633 "Intrinsic result must be an integer", &FPI);
7634 if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
7635 Check(SrcEC == cast<VectorType>(OperandT)->getElementCount(),
7636 "Intrinsic first argument and result vector lengths must be equal",
7637 &FPI);
7638 }
7639 break;
7640 }
7641
7642 case Intrinsic::experimental_constrained_sitofp:
7643 case Intrinsic::experimental_constrained_uitofp: {
7644 Value *Operand = FPI.getArgOperand(0);
7645 ElementCount SrcEC;
7646 Check(Operand->getType()->isIntOrIntVectorTy(),
7647 "Intrinsic first argument must be integer", &FPI);
7648 if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
7649 SrcEC = cast<VectorType>(OperandT)->getElementCount();
7650 }
7651
7652 Operand = &FPI;
7653 Check(SrcEC.isNonZero() == Operand->getType()->isVectorTy(),
7654 "Intrinsic first argument and result disagree on vector use", &FPI);
7655 Check(Operand->getType()->isFPOrFPVectorTy(),
7656 "Intrinsic result must be a floating point", &FPI);
7657 if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
7658 Check(SrcEC == cast<VectorType>(OperandT)->getElementCount(),
7659 "Intrinsic first argument and result vector lengths must be equal",
7660 &FPI);
7661 }
7662 break;
7663 }
7664
7665 case Intrinsic::experimental_constrained_fptrunc:
7666 case Intrinsic::experimental_constrained_fpext: {
7667 Value *Operand = FPI.getArgOperand(0);
7668 Type *OperandTy = Operand->getType();
7669 Value *Result = &FPI;
7670 Type *ResultTy = Result->getType();
7671 Check(OperandTy->isFPOrFPVectorTy(),
7672 "Intrinsic first argument must be FP or FP vector", &FPI);
7673 Check(ResultTy->isFPOrFPVectorTy(),
7674 "Intrinsic result must be FP or FP vector", &FPI);
7675 Check(OperandTy->isVectorTy() == ResultTy->isVectorTy(),
7676 "Intrinsic first argument and result disagree on vector use", &FPI);
7677 if (OperandTy->isVectorTy()) {
7678 Check(cast<VectorType>(OperandTy)->getElementCount() ==
7679 cast<VectorType>(ResultTy)->getElementCount(),
7680 "Intrinsic first argument and result vector lengths must be equal",
7681 &FPI);
7682 }
7683 if (FPI.getIntrinsicID() == Intrinsic::experimental_constrained_fptrunc) {
7684 Check(OperandTy->getScalarSizeInBits() > ResultTy->getScalarSizeInBits(),
7685 "Intrinsic first argument's type must be larger than result type",
7686 &FPI);
7687 } else {
7688 Check(OperandTy->getScalarSizeInBits() < ResultTy->getScalarSizeInBits(),
7689 "Intrinsic first argument's type must be smaller than result type",
7690 &FPI);
7691 }
7692 break;
7693 }
7694
7695 default:
7696 break;
7697 }
7698
7699 // If a non-metadata argument is passed in a metadata slot then the
7700 // error will be caught earlier when the incorrect argument doesn't
7701 // match the specification in the intrinsic call table. Thus, no
7702 // argument type check is needed here.
7703
7704 Check(FPI.getExceptionBehavior().has_value(),
7705 "invalid exception behavior argument", &FPI);
7706 if (HasRoundingMD) {
7707 Check(FPI.getRoundingMode().has_value(), "invalid rounding mode argument",
7708 &FPI);
7709 }
7710}
7711
7712void Verifier::verifyFragmentExpression(const DbgVariableRecord &DVR) {
7713 DILocalVariable *V = dyn_cast_or_null<DILocalVariable>(DVR.getRawVariable());
7714 DIExpression *E = dyn_cast_or_null<DIExpression>(DVR.getRawExpression());
7715
7716 // We don't know whether this intrinsic verified correctly.
7717 if (!V || !E || !E->isValid())
7718 return;
7719
7720 // Nothing to do if this isn't a DW_OP_LLVM_fragment expression.
7721 auto Fragment = E->getFragmentInfo();
7722 if (!Fragment)
7723 return;
7724
7725 // The frontend helps out GDB by emitting the members of local anonymous
7726 // unions as artificial local variables with shared storage. When SROA splits
7727 // the storage for artificial local variables that are smaller than the entire
7728 // union, the overhang piece will be outside of the allotted space for the
7729 // variable and this check fails.
7730 // FIXME: Remove this check as soon as clang stops doing this; it hides bugs.
7731 if (V->isArtificial())
7732 return;
7733
7734 verifyFragmentExpression(*V, *Fragment, &DVR);
7735}
7736
7737template <typename ValueOrMetadata>
7738void Verifier::verifyFragmentExpression(const DIVariable &V,
7740 ValueOrMetadata *Desc) {
7741 // If there's no size, the type is broken, but that should be checked
7742 // elsewhere.
7743 auto VarSize = V.getSizeInBits();
7744 if (!VarSize)
7745 return;
7746
7747 unsigned FragSize = Fragment.SizeInBits;
7748 unsigned FragOffset = Fragment.OffsetInBits;
7749 CheckDI(FragSize + FragOffset <= *VarSize,
7750 "fragment is larger than or outside of variable", Desc, &V);
7751 CheckDI(FragSize != *VarSize, "fragment covers entire variable", Desc, &V);
7752}
7753
7754void Verifier::verifyFnArgs(const DbgVariableRecord &DVR) {
7755 // This function does not take the scope of noninlined function arguments into
7756 // account. Don't run it if current function is nodebug, because it may
7757 // contain inlined debug intrinsics.
7758 if (!HasDebugInfo)
7759 return;
7760
7761 // For performance reasons only check non-inlined ones.
7762 if (DVR.getDebugLoc()->getInlinedAt())
7763 return;
7764
7765 DILocalVariable *Var = DVR.getVariable();
7766 CheckDI(Var, "#dbg record without variable");
7767
7768 unsigned ArgNo = Var->getArg();
7769 if (!ArgNo)
7770 return;
7771
7772 // Verify there are no duplicate function argument debug info entries.
7773 // These will cause hard-to-debug assertions in the DWARF backend.
7774 if (DebugFnArgs.size() < ArgNo)
7775 DebugFnArgs.resize(ArgNo, nullptr);
7776
7777 auto *Prev = DebugFnArgs[ArgNo - 1];
7778 DebugFnArgs[ArgNo - 1] = Var;
7779 CheckDI(!Prev || (Prev == Var), "conflicting debug info for argument", &DVR,
7780 Prev, Var);
7781}
7782
7783void Verifier::verifyNotEntryValue(const DbgVariableRecord &DVR) {
7784 DIExpression *E = dyn_cast_or_null<DIExpression>(DVR.getRawExpression());
7785
7786 // We don't know whether this intrinsic verified correctly.
7787 if (!E || !E->isValid())
7788 return;
7789
7791 Value *VarValue = DVR.getVariableLocationOp(0);
7792 if (isa<UndefValue>(VarValue) || isa<PoisonValue>(VarValue))
7793 return;
7794 // We allow EntryValues for swift async arguments, as they have an
7795 // ABI-guarantee to be turned into a specific register.
7796 if (auto *ArgLoc = dyn_cast_or_null<Argument>(VarValue);
7797 ArgLoc && ArgLoc->hasAttribute(Attribute::SwiftAsync))
7798 return;
7799 }
7800
7801 CheckDI(!E->isEntryValue(),
7802 "Entry values are only allowed in MIR unless they target a "
7803 "swiftasync Argument",
7804 &DVR);
7805}
7806
7807void Verifier::verifyCompileUnits() {
7808 // When more than one Module is imported into the same context, such as during
7809 // an LTO build before linking the modules, ODR type uniquing may cause types
7810 // to point to a different CU. This check does not make sense in this case.
7811 if (M.getContext().isODRUniquingDebugTypes())
7812 return;
7813 auto *CUs = M.getNamedMetadata("llvm.dbg.cu");
7814 SmallPtrSet<const Metadata *, 2> Listed;
7815 if (CUs)
7816 Listed.insert_range(CUs->operands());
7817 for (const auto *CU : CUVisited)
7818 CheckDI(Listed.count(CU), "DICompileUnit not listed in llvm.dbg.cu", CU);
7819 CUVisited.clear();
7820}
7821
7822void Verifier::verifyDeoptimizeCallingConvs() {
7823 if (DeoptimizeDeclarations.empty())
7824 return;
7825
7826 const Function *First = DeoptimizeDeclarations[0];
7827 for (const auto *F : ArrayRef(DeoptimizeDeclarations).slice(1)) {
7828 Check(First->getCallingConv() == F->getCallingConv(),
7829 "All llvm.experimental.deoptimize declarations must have the same "
7830 "calling convention",
7831 First, F);
7832 }
7833}
7834
7835void Verifier::verifyAttachedCallBundle(const CallBase &Call,
7836 const OperandBundleUse &BU) {
7837 FunctionType *FTy = Call.getFunctionType();
7838
7839 Check((FTy->getReturnType()->isPointerTy() ||
7840 (Call.doesNotReturn() && FTy->getReturnType()->isVoidTy())),
7841 "a call with operand bundle \"clang.arc.attachedcall\" must call a "
7842 "function returning a pointer or a non-returning function that has a "
7843 "void return type",
7844 Call);
7845
7846 Check(BU.Inputs.size() == 1 && isa<Function>(BU.Inputs.front()),
7847 "operand bundle \"clang.arc.attachedcall\" requires one function as "
7848 "an argument",
7849 Call);
7850
7851 auto *Fn = cast<Function>(BU.Inputs.front());
7852 Intrinsic::ID IID = Fn->getIntrinsicID();
7853
7854 if (IID) {
7855 Check((IID == Intrinsic::objc_retainAutoreleasedReturnValue ||
7856 IID == Intrinsic::objc_claimAutoreleasedReturnValue ||
7857 IID == Intrinsic::objc_unsafeClaimAutoreleasedReturnValue),
7858 "invalid function argument", Call);
7859 } else {
7860 StringRef FnName = Fn->getName();
7861 Check((FnName == "objc_retainAutoreleasedReturnValue" ||
7862 FnName == "objc_claimAutoreleasedReturnValue" ||
7863 FnName == "objc_unsafeClaimAutoreleasedReturnValue"),
7864 "invalid function argument", Call);
7865 }
7866}
7867
7868void Verifier::verifyNoAliasScopeDecl() {
7869 if (NoAliasScopeDecls.empty())
7870 return;
7871
7872 // only a single scope must be declared at a time.
7873 for (auto *II : NoAliasScopeDecls) {
7874 assert(II->getIntrinsicID() == Intrinsic::experimental_noalias_scope_decl &&
7875 "Not a llvm.experimental.noalias.scope.decl ?");
7876 const auto *ScopeListMV = dyn_cast<MetadataAsValue>(
7878 Check(ScopeListMV != nullptr,
7879 "llvm.experimental.noalias.scope.decl must have a MetadataAsValue "
7880 "argument",
7881 II);
7882
7883 const auto *ScopeListMD = dyn_cast<MDNode>(ScopeListMV->getMetadata());
7884 Check(ScopeListMD != nullptr, "!id.scope.list must point to an MDNode", II);
7885 Check(ScopeListMD->getNumOperands() == 1,
7886 "!id.scope.list must point to a list with a single scope", II);
7887 visitAliasScopeListMetadata(ScopeListMD);
7888 }
7889
7890 // Only check the domination rule when requested. Once all passes have been
7891 // adapted this option can go away.
7893 return;
7894
7895 // Now sort the intrinsics based on the scope MDNode so that declarations of
7896 // the same scopes are next to each other.
7897 auto GetScope = [](IntrinsicInst *II) {
7898 const auto *ScopeListMV = cast<MetadataAsValue>(
7900 return &cast<MDNode>(ScopeListMV->getMetadata())->getOperand(0);
7901 };
7902
7903 // We are sorting on MDNode pointers here. For valid input IR this is ok.
7904 // TODO: Sort on Metadata ID to avoid non-deterministic error messages.
7905 auto Compare = [GetScope](IntrinsicInst *Lhs, IntrinsicInst *Rhs) {
7906 return GetScope(Lhs) < GetScope(Rhs);
7907 };
7908
7909 llvm::sort(NoAliasScopeDecls, Compare);
7910
7911 // Go over the intrinsics and check that for the same scope, they are not
7912 // dominating each other.
7913 auto ItCurrent = NoAliasScopeDecls.begin();
7914 while (ItCurrent != NoAliasScopeDecls.end()) {
7915 auto CurScope = GetScope(*ItCurrent);
7916 auto ItNext = ItCurrent;
7917 do {
7918 ++ItNext;
7919 } while (ItNext != NoAliasScopeDecls.end() &&
7920 GetScope(*ItNext) == CurScope);
7921
7922 // [ItCurrent, ItNext) represents the declarations for the same scope.
7923 // Ensure they are not dominating each other.. but only if it is not too
7924 // expensive.
7925 if (ItNext - ItCurrent < 32)
7926 for (auto *I : llvm::make_range(ItCurrent, ItNext))
7927 for (auto *J : llvm::make_range(ItCurrent, ItNext))
7928 if (I != J)
7929 Check(!DT.dominates(I, J),
7930 "llvm.experimental.noalias.scope.decl dominates another one "
7931 "with the same scope",
7932 I);
7933 ItCurrent = ItNext;
7934 }
7935}
7936
7937//===----------------------------------------------------------------------===//
7938// Implement the public interfaces to this file...
7939//===----------------------------------------------------------------------===//
7940
7942 Function &F = const_cast<Function &>(f);
7943
7944 // Don't use a raw_null_ostream. Printing IR is expensive.
7945 Verifier V(OS, /*ShouldTreatBrokenDebugInfoAsError=*/true, *f.getParent());
7946
7947 // Note that this function's return value is inverted from what you would
7948 // expect of a function called "verify".
7949 return !V.verify(F);
7950}
7951
7953 bool *BrokenDebugInfo) {
7954 // Don't use a raw_null_ostream. Printing IR is expensive.
7955 Verifier V(OS, /*ShouldTreatBrokenDebugInfoAsError=*/!BrokenDebugInfo, M);
7956
7957 bool Broken = false;
7958 for (const Function &F : M)
7959 Broken |= !V.verify(F);
7960
7961 Broken |= !V.verify();
7962 if (BrokenDebugInfo)
7963 *BrokenDebugInfo = V.hasBrokenDebugInfo();
7964 // Note that this function's return value is inverted from what you would
7965 // expect of a function called "verify".
7966 return Broken;
7967}
7968
7969namespace {
7970
7971struct VerifierLegacyPass : public FunctionPass {
7972 static char ID;
7973
7974 std::unique_ptr<Verifier> V;
7975 bool FatalErrors = true;
7976
7977 VerifierLegacyPass() : FunctionPass(ID) {}
7978 explicit VerifierLegacyPass(bool FatalErrors)
7979 : FunctionPass(ID), FatalErrors(FatalErrors) {}
7980
7981 bool doInitialization(Module &M) override {
7982 V = std::make_unique<Verifier>(
7983 &dbgs(), /*ShouldTreatBrokenDebugInfoAsError=*/false, M);
7984 return false;
7985 }
7986
7987 bool runOnFunction(Function &F) override {
7988 if (!V->verify(F) && FatalErrors) {
7989 errs() << "in function " << F.getName() << '\n';
7990 report_fatal_error("Broken function found, compilation aborted!");
7991 }
7992 return false;
7993 }
7994
7995 bool doFinalization(Module &M) override {
7996 bool HasErrors = false;
7997 for (Function &F : M)
7998 if (F.isDeclaration())
7999 HasErrors |= !V->verify(F);
8000
8001 HasErrors |= !V->verify();
8002 if (FatalErrors && (HasErrors || V->hasBrokenDebugInfo()))
8003 report_fatal_error("Broken module found, compilation aborted!");
8004 return false;
8005 }
8006
8007 void getAnalysisUsage(AnalysisUsage &AU) const override {
8008 AU.setPreservesAll();
8009 }
8010};
8011
8012} // end anonymous namespace
8013
8014/// Helper to issue failure from the TBAA verification
8015template <typename... Tys> void TBAAVerifier::CheckFailed(Tys &&... Args) {
8016 if (Diagnostic)
8017 return Diagnostic->CheckFailed(Args...);
8018}
8019
8020#define CheckTBAA(C, ...) \
8021 do { \
8022 if (!(C)) { \
8023 CheckFailed(__VA_ARGS__); \
8024 return false; \
8025 } \
8026 } while (false)
8027
8028/// Verify that \p BaseNode can be used as the "base type" in the struct-path
8029/// TBAA scheme. This means \p BaseNode is either a scalar node, or a
8030/// struct-type node describing an aggregate data structure (like a struct).
8031TBAAVerifier::TBAABaseNodeSummary
8032TBAAVerifier::verifyTBAABaseNode(const Instruction *I, const MDNode *BaseNode,
8033 bool IsNewFormat) {
8034 if (BaseNode->getNumOperands() < 2) {
8035 CheckFailed("Base nodes must have at least two operands", I, BaseNode);
8036 return {true, ~0u};
8037 }
8038
8039 auto Itr = TBAABaseNodes.find(BaseNode);
8040 if (Itr != TBAABaseNodes.end())
8041 return Itr->second;
8042
8043 auto Result = verifyTBAABaseNodeImpl(I, BaseNode, IsNewFormat);
8044 auto InsertResult = TBAABaseNodes.insert({BaseNode, Result});
8045 (void)InsertResult;
8046 assert(InsertResult.second && "We just checked!");
8047 return Result;
8048}
8049
8050TBAAVerifier::TBAABaseNodeSummary
8051TBAAVerifier::verifyTBAABaseNodeImpl(const Instruction *I,
8052 const MDNode *BaseNode, bool IsNewFormat) {
8053 const TBAAVerifier::TBAABaseNodeSummary InvalidNode = {true, ~0u};
8054
8055 if (BaseNode->getNumOperands() == 2) {
8056 // Scalar nodes can only be accessed at offset 0.
8057 return isValidScalarTBAANode(BaseNode)
8058 ? TBAAVerifier::TBAABaseNodeSummary({false, 0})
8059 : InvalidNode;
8060 }
8061
8062 if (IsNewFormat) {
8063 if (BaseNode->getNumOperands() % 3 != 0) {
8064 CheckFailed("Access tag nodes must have the number of operands that is a "
8065 "multiple of 3!", BaseNode);
8066 return InvalidNode;
8067 }
8068 } else {
8069 if (BaseNode->getNumOperands() % 2 != 1) {
8070 CheckFailed("Struct tag nodes must have an odd number of operands!",
8071 BaseNode);
8072 return InvalidNode;
8073 }
8074 }
8075
8076 // Check the type size field.
8077 if (IsNewFormat) {
8078 auto *TypeSizeNode = mdconst::dyn_extract_or_null<ConstantInt>(
8079 BaseNode->getOperand(1));
8080 if (!TypeSizeNode) {
8081 CheckFailed("Type size nodes must be constants!", I, BaseNode);
8082 return InvalidNode;
8083 }
8084 }
8085
8086 // Check the type name field. In the new format it can be anything.
8087 if (!IsNewFormat && !isa<MDString>(BaseNode->getOperand(0))) {
8088 CheckFailed("Struct tag nodes have a string as their first operand",
8089 BaseNode);
8090 return InvalidNode;
8091 }
8092
8093 bool Failed = false;
8094
8095 std::optional<APInt> PrevOffset;
8096 unsigned BitWidth = ~0u;
8097
8098 // We've already checked that BaseNode is not a degenerate root node with one
8099 // operand in \c verifyTBAABaseNode, so this loop should run at least once.
8100 unsigned FirstFieldOpNo = IsNewFormat ? 3 : 1;
8101 unsigned NumOpsPerField = IsNewFormat ? 3 : 2;
8102 for (unsigned Idx = FirstFieldOpNo; Idx < BaseNode->getNumOperands();
8103 Idx += NumOpsPerField) {
8104 const MDOperand &FieldTy = BaseNode->getOperand(Idx);
8105 const MDOperand &FieldOffset = BaseNode->getOperand(Idx + 1);
8106 if (!isa<MDNode>(FieldTy)) {
8107 CheckFailed("Incorrect field entry in struct type node!", I, BaseNode);
8108 Failed = true;
8109 continue;
8110 }
8111
8112 auto *OffsetEntryCI =
8114 if (!OffsetEntryCI) {
8115 CheckFailed("Offset entries must be constants!", I, BaseNode);
8116 Failed = true;
8117 continue;
8118 }
8119
8120 if (BitWidth == ~0u)
8121 BitWidth = OffsetEntryCI->getBitWidth();
8122
8123 if (OffsetEntryCI->getBitWidth() != BitWidth) {
8124 CheckFailed(
8125 "Bitwidth between the offsets and struct type entries must match", I,
8126 BaseNode);
8127 Failed = true;
8128 continue;
8129 }
8130
8131 // NB! As far as I can tell, we generate a non-strictly increasing offset
8132 // sequence only from structs that have zero size bit fields. When
8133 // recursing into a contained struct in \c getFieldNodeFromTBAABaseNode we
8134 // pick the field lexically the latest in struct type metadata node. This
8135 // mirrors the actual behavior of the alias analysis implementation.
8136 bool IsAscending =
8137 !PrevOffset || PrevOffset->ule(OffsetEntryCI->getValue());
8138
8139 if (!IsAscending) {
8140 CheckFailed("Offsets must be increasing!", I, BaseNode);
8141 Failed = true;
8142 }
8143
8144 PrevOffset = OffsetEntryCI->getValue();
8145
8146 if (IsNewFormat) {
8147 auto *MemberSizeNode = mdconst::dyn_extract_or_null<ConstantInt>(
8148 BaseNode->getOperand(Idx + 2));
8149 if (!MemberSizeNode) {
8150 CheckFailed("Member size entries must be constants!", I, BaseNode);
8151 Failed = true;
8152 continue;
8153 }
8154 }
8155 }
8156
8157 return Failed ? InvalidNode
8158 : TBAAVerifier::TBAABaseNodeSummary(false, BitWidth);
8159}
8160
8161static bool IsRootTBAANode(const MDNode *MD) {
8162 return MD->getNumOperands() < 2;
8163}
8164
8165static bool IsScalarTBAANodeImpl(const MDNode *MD,
8167 if (MD->getNumOperands() != 2 && MD->getNumOperands() != 3)
8168 return false;
8169
8170 if (!isa<MDString>(MD->getOperand(0)))
8171 return false;
8172
8173 if (MD->getNumOperands() == 3) {
8175 if (!(Offset && Offset->isZero() && isa<MDString>(MD->getOperand(0))))
8176 return false;
8177 }
8178
8179 auto *Parent = dyn_cast_or_null<MDNode>(MD->getOperand(1));
8180 return Parent && Visited.insert(Parent).second &&
8181 (IsRootTBAANode(Parent) || IsScalarTBAANodeImpl(Parent, Visited));
8182}
8183
8184bool TBAAVerifier::isValidScalarTBAANode(const MDNode *MD) {
8185 auto ResultIt = TBAAScalarNodes.find(MD);
8186 if (ResultIt != TBAAScalarNodes.end())
8187 return ResultIt->second;
8188
8189 SmallPtrSet<const MDNode *, 4> Visited;
8190 bool Result = IsScalarTBAANodeImpl(MD, Visited);
8191 auto InsertResult = TBAAScalarNodes.insert({MD, Result});
8192 (void)InsertResult;
8193 assert(InsertResult.second && "Just checked!");
8194
8195 return Result;
8196}
8197
8198/// Returns the field node at the offset \p Offset in \p BaseNode. Update \p
8199/// Offset in place to be the offset within the field node returned.
8200///
8201/// We assume we've okayed \p BaseNode via \c verifyTBAABaseNode.
8202MDNode *TBAAVerifier::getFieldNodeFromTBAABaseNode(const Instruction *I,
8203 const MDNode *BaseNode,
8204 APInt &Offset,
8205 bool IsNewFormat) {
8206 assert(BaseNode->getNumOperands() >= 2 && "Invalid base node!");
8207
8208 // Scalar nodes have only one possible "field" -- their parent in the access
8209 // hierarchy. Offset must be zero at this point, but our caller is supposed
8210 // to check that.
8211 if (BaseNode->getNumOperands() == 2)
8212 return cast<MDNode>(BaseNode->getOperand(1));
8213
8214 unsigned FirstFieldOpNo = IsNewFormat ? 3 : 1;
8215 unsigned NumOpsPerField = IsNewFormat ? 3 : 2;
8216 for (unsigned Idx = FirstFieldOpNo; Idx < BaseNode->getNumOperands();
8217 Idx += NumOpsPerField) {
8218 auto *OffsetEntryCI =
8219 mdconst::extract<ConstantInt>(BaseNode->getOperand(Idx + 1));
8220 if (OffsetEntryCI->getValue().ugt(Offset)) {
8221 if (Idx == FirstFieldOpNo) {
8222 CheckFailed("Could not find TBAA parent in struct type node", I,
8223 BaseNode, &Offset);
8224 return nullptr;
8225 }
8226
8227 unsigned PrevIdx = Idx - NumOpsPerField;
8228 auto *PrevOffsetEntryCI =
8229 mdconst::extract<ConstantInt>(BaseNode->getOperand(PrevIdx + 1));
8230 Offset -= PrevOffsetEntryCI->getValue();
8231 return cast<MDNode>(BaseNode->getOperand(PrevIdx));
8232 }
8233 }
8234
8235 unsigned LastIdx = BaseNode->getNumOperands() - NumOpsPerField;
8236 auto *LastOffsetEntryCI = mdconst::extract<ConstantInt>(
8237 BaseNode->getOperand(LastIdx + 1));
8238 Offset -= LastOffsetEntryCI->getValue();
8239 return cast<MDNode>(BaseNode->getOperand(LastIdx));
8240}
8241
8243 if (!Type || Type->getNumOperands() < 3)
8244 return false;
8245
8246 // In the new format type nodes shall have a reference to the parent type as
8247 // its first operand.
8248 return isa_and_nonnull<MDNode>(Type->getOperand(0));
8249}
8250
8252 CheckTBAA(MD->getNumOperands() > 0, "TBAA metadata cannot have 0 operands", I,
8253 MD);
8254
8255 if (I)
8259 "This instruction shall not have a TBAA access tag!", I);
8260
8261 bool IsStructPathTBAA =
8262 isa<MDNode>(MD->getOperand(0)) && MD->getNumOperands() >= 3;
8263
8264 CheckTBAA(IsStructPathTBAA,
8265 "Old-style TBAA is no longer allowed, use struct-path TBAA instead",
8266 I);
8267
8268 MDNode *BaseNode = dyn_cast_or_null<MDNode>(MD->getOperand(0));
8269 MDNode *AccessType = dyn_cast_or_null<MDNode>(MD->getOperand(1));
8270
8271 bool IsNewFormat = isNewFormatTBAATypeNode(AccessType);
8272
8273 if (IsNewFormat) {
8274 CheckTBAA(MD->getNumOperands() == 4 || MD->getNumOperands() == 5,
8275 "Access tag metadata must have either 4 or 5 operands", I, MD);
8276 } else {
8277 CheckTBAA(MD->getNumOperands() < 5,
8278 "Struct tag metadata must have either 3 or 4 operands", I, MD);
8279 }
8280
8281 // Check the access size field.
8282 if (IsNewFormat) {
8283 auto *AccessSizeNode = mdconst::dyn_extract_or_null<ConstantInt>(
8284 MD->getOperand(3));
8285 CheckTBAA(AccessSizeNode, "Access size field must be a constant", I, MD);
8286 }
8287
8288 // Check the immutability flag.
8289 unsigned ImmutabilityFlagOpNo = IsNewFormat ? 4 : 3;
8290 if (MD->getNumOperands() == ImmutabilityFlagOpNo + 1) {
8291 auto *IsImmutableCI = mdconst::dyn_extract_or_null<ConstantInt>(
8292 MD->getOperand(ImmutabilityFlagOpNo));
8293 CheckTBAA(IsImmutableCI,
8294 "Immutability tag on struct tag metadata must be a constant", I,
8295 MD);
8296 CheckTBAA(
8297 IsImmutableCI->isZero() || IsImmutableCI->isOne(),
8298 "Immutability part of the struct tag metadata must be either 0 or 1", I,
8299 MD);
8300 }
8301
8302 CheckTBAA(BaseNode && AccessType,
8303 "Malformed struct tag metadata: base and access-type "
8304 "should be non-null and point to Metadata nodes",
8305 I, MD, BaseNode, AccessType);
8306
8307 if (!IsNewFormat) {
8308 CheckTBAA(isValidScalarTBAANode(AccessType),
8309 "Access type node must be a valid scalar type", I, MD,
8310 AccessType);
8311 }
8312
8314 CheckTBAA(OffsetCI, "Offset must be constant integer", I, MD);
8315
8316 APInt Offset = OffsetCI->getValue();
8317 bool SeenAccessTypeInPath = false;
8318
8319 SmallPtrSet<MDNode *, 4> StructPath;
8320
8321 for (/* empty */; BaseNode && !IsRootTBAANode(BaseNode);
8322 BaseNode =
8323 getFieldNodeFromTBAABaseNode(I, BaseNode, Offset, IsNewFormat)) {
8324 if (!StructPath.insert(BaseNode).second) {
8325 CheckFailed("Cycle detected in struct path", I, MD);
8326 return false;
8327 }
8328
8329 bool Invalid;
8330 unsigned BaseNodeBitWidth;
8331 std::tie(Invalid, BaseNodeBitWidth) =
8332 verifyTBAABaseNode(I, BaseNode, IsNewFormat);
8333
8334 // If the base node is invalid in itself, then we've already printed all the
8335 // errors we wanted to print.
8336 if (Invalid)
8337 return false;
8338
8339 SeenAccessTypeInPath |= BaseNode == AccessType;
8340
8341 if (isValidScalarTBAANode(BaseNode) || BaseNode == AccessType)
8342 CheckTBAA(Offset == 0, "Offset not zero at the point of scalar access", I,
8343 MD, &Offset);
8344
8345 CheckTBAA(BaseNodeBitWidth == Offset.getBitWidth() ||
8346 (BaseNodeBitWidth == 0 && Offset == 0) ||
8347 (IsNewFormat && BaseNodeBitWidth == ~0u),
8348 "Access bit-width not the same as description bit-width", I, MD,
8349 BaseNodeBitWidth, Offset.getBitWidth());
8350
8351 if (IsNewFormat && SeenAccessTypeInPath)
8352 break;
8353 }
8354
8355 CheckTBAA(SeenAccessTypeInPath, "Did not see access type in access path!", I,
8356 MD);
8357 return true;
8358}
8359
8360char VerifierLegacyPass::ID = 0;
8361INITIALIZE_PASS(VerifierLegacyPass, "verify", "Module Verifier", false, false)
8362
8364 return new VerifierLegacyPass(FatalErrors);
8365}
8366
8367AnalysisKey VerifierAnalysis::Key;
8374
8379
8381 auto Res = AM.getResult<VerifierAnalysis>(M);
8382 if (FatalErrors && (Res.IRBroken || Res.DebugInfoBroken))
8383 report_fatal_error("Broken module found, compilation aborted!");
8384
8385 return PreservedAnalyses::all();
8386}
8387
8389 auto res = AM.getResult<VerifierAnalysis>(F);
8390 if (res.IRBroken && FatalErrors)
8391 report_fatal_error("Broken function found, compilation aborted!");
8392
8393 return PreservedAnalyses::all();
8394}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
AMDGPU address space definition.
ArrayRef< TableEntry > TableRef
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Atomic ordering constants.
@ RetAttr
@ FnAttr
This file contains the simple types necessary to represent the attributes associated with functions a...
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
This file declares the LLVM IR specialization of the GenericConvergenceVerifier template.
static DISubprogram * getSubprogram(bool IsDistinct, Ts &&...Args)
dxil translate DXIL Translate Metadata
This file defines the DenseMap class.
This file contains constants used for implementing Dwarf debug support.
static bool runOnFunction(Function &F, bool PostInlining)
This file contains the declarations of entities that describe floating point environment and related ...
#define Check(C,...)
Hexagon Common GEP
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
Module.h This file contains the declarations for the Module class.
This header defines various interfaces for pass management in LLVM.
This defines the Use class.
const size_t AbstractManglingParser< Derived, Alloc >::NumOps
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
Machine Check Debug Module
This file implements a map that provides insertion order iteration.
This file provides utility for Memory Model Relaxation Annotations (MMRAs).
static bool isContiguous(const ConstantRange &A, const ConstantRange &B)
This file contains the declarations for metadata subclasses.
#define T
#define T1
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
uint64_t High
uint64_t IntrinsicInst * II
#define P(N)
ppc ctr loops verify
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
Definition PassSupport.h:56
This file contains the declarations for profiling metadata utility functions.
const SmallVectorImpl< MachineOperand > & Cond
static unsigned getNumElements(Type *Ty)
static void visit(BasicBlock &Start, std::function< bool(BasicBlock *)> op)
This file contains some templates that are useful if you are working with the STL at all.
verify safepoint Safepoint IR Verifier
BaseType
A given derived pointer can have multiple base pointers through phi/selects.
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file contains some functions that are useful when dealing with strings.
static unsigned getBitWidth(Type *Ty, const DataLayout &DL)
Returns the bitwidth of the given scalar or pointer type.
static bool IsScalarTBAANodeImpl(const MDNode *MD, SmallPtrSetImpl< const MDNode * > &Visited)
static bool isType(const Metadata *MD)
static Instruction * getSuccPad(Instruction *Terminator)
static bool isMDTuple(const Metadata *MD)
static bool isNewFormatTBAATypeNode(llvm::MDNode *Type)
#define CheckDI(C,...)
We know that a debug info condition should be true, if not print an error message.
Definition Verifier.cpp:687
static void forEachUser(const Value *User, SmallPtrSet< const Value *, 32 > &Visited, llvm::function_ref< bool(const Value *)> Callback)
Definition Verifier.cpp:728
static bool isDINode(const Metadata *MD)
static bool isScope(const Metadata *MD)
static cl::opt< bool > VerifyNoAliasScopeDomination("verify-noalias-scope-decl-dom", cl::Hidden, cl::init(false), cl::desc("Ensure that llvm.experimental.noalias.scope.decl for identical " "scopes are not dominating"))
static bool isTypeCongruent(Type *L, Type *R)
Two types are "congruent" if they are identical, or if they are both pointer types with different poi...
#define CheckTBAA(C,...)
static bool isConstantIntMetadataOperand(const Metadata *MD)
static bool IsRootTBAANode(const MDNode *MD)
static Value * getParentPad(Value *EHPad)
static bool hasConflictingReferenceFlags(unsigned Flags)
Detect mutually exclusive flags.
static AttrBuilder getParameterABIAttributes(LLVMContext &C, unsigned I, AttributeList Attrs)
static const char PassName[]
static LLVM_ABI bool isValidArbitraryFPFormat(StringRef Format)
Returns true if the given string is a valid arbitrary floating-point format interpretation for llvm....
Definition APFloat.cpp:5990
bool isFiniteNonZero() const
Definition APFloat.h:1526
bool isNegative() const
Definition APFloat.h:1516
const fltSemantics & getSemantics() const
Definition APFloat.h:1524
Class for arbitrary precision integers.
Definition APInt.h:78
bool sgt(const APInt &RHS) const
Signed greater than comparison.
Definition APInt.h:1208
bool isMinValue() const
Determine if this is the smallest unsigned value.
Definition APInt.h:418
bool ule(const APInt &RHS) const
Unsigned less or equal comparison.
Definition APInt.h:1157
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
Definition APInt.h:441
int64_t getSExtValue() const
Get sign extended value.
Definition APInt.h:1577
bool isMaxValue() const
Determine if this is the largest unsigned value.
Definition APInt.h:400
This class represents a conversion between pointers from one address space to another.
bool isSwiftError() const
Return true if this alloca is used as a swifterror argument to a call.
LLVM_ABI bool isStaticAlloca() const
Return true if this alloca is in the entry block of the function and is a constant size.
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
unsigned getAddressSpace() const
Return the address space for the allocation.
LLVM_ABI bool isArrayAllocation() const
Return true if there is an allocation size parameter to the allocation instruction that is not 1.
const Value * getArraySize() const
Get the number of elements allocated.
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
void setPreservesAll()
Set by analyses that do not transform their input at all.
LLVM_ABI bool hasInRegAttr() const
Return true if this argument has the inreg attribute.
Definition Function.cpp:292
bool empty() const
empty - Check if the array is empty.
Definition ArrayRef.h:137
static bool isFPOperation(BinOp Op)
BinOp getOperation() const
static LLVM_ABI StringRef getOperationName(BinOp Op)
AtomicOrdering getOrdering() const
Returns the ordering constraint of this rmw instruction.
bool contains(Attribute::AttrKind A) const
Return true if the builder has the specified attribute.
LLVM_ABI bool hasAttribute(Attribute::AttrKind Kind) const
Return true if the attribute exists in this set.
LLVM_ABI std::string getAsString(bool InAttrGrp=false) const
Functions, function parameters, and return types can have attributes to indicate how they should be t...
Definition Attributes.h:105
LLVM_ABI const ConstantRange & getValueAsConstantRange() const
Return the attribute's value as a ConstantRange.
LLVM_ABI StringRef getValueAsString() const
Return the attribute's value as a string.
AttrKind
This enumeration lists the attributes that can be associated with parameters, function results,...
Definition Attributes.h:124
bool isValid() const
Return true if the attribute is any kind of attribute.
Definition Attributes.h:261
LLVM_ABI Type * getValueAsType() const
Return the attribute's value as a Type.
LLVM Basic Block Representation.
Definition BasicBlock.h:62
iterator begin()
Instruction iterator methods.
Definition BasicBlock.h:461
iterator_range< const_phi_iterator > phis() const
Returns a range that iterates over the phis in the basic block.
Definition BasicBlock.h:530
const Function * getParent() const
Return the enclosing method, or null if none.
Definition BasicBlock.h:213
LLVM_ABI InstListType::const_iterator getFirstNonPHIIt() const
Returns an iterator to the first instruction in this block that is not a PHINode instruction.
LLVM_ABI bool isEntryBlock() const
Return true if this is the entry block of the containing function.
const Instruction & front() const
Definition BasicBlock.h:484
LLVM_ABI const BasicBlock * getUniquePredecessor() const
Return the predecessor of this block if it has a unique predecessor block.
InstListType::iterator iterator
Instruction iterators...
Definition BasicBlock.h:170
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction; assumes that the block is well-formed.
Definition BasicBlock.h:237
This class represents a no-op cast from one type to another.
static LLVM_ABI BlockAddress * lookup(const BasicBlock *BB)
Lookup an existing BlockAddress constant for the given BasicBlock.
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
bool isInlineAsm() const
Check if this call is an inline asm statement.
bool hasInAllocaArgument() const
Determine if there are is an inalloca argument.
OperandBundleUse getOperandBundleAt(unsigned Index) const
Return the operand bundle at a specific index.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
bool doesNotAccessMemory(unsigned OpNo) const
bool hasFnAttr(Attribute::AttrKind Kind) const
Determine whether this call has the given attribute.
bool hasRetAttr(Attribute::AttrKind Kind) const
Determine whether the return value has the given attribute.
unsigned getNumOperandBundles() const
Return the number of operand bundles associated with this User.
CallingConv::ID getCallingConv() const
LLVM_ABI bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Determine whether the argument or parameter has the given attribute.
Attribute getParamAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Get the attribute of a given kind from a given arg.
iterator_range< bundle_op_iterator > bundle_op_infos()
Return the range [bundle_op_info_begin, bundle_op_info_end).
unsigned countOperandBundlesOfType(StringRef Name) const
Return the number of operand bundles with the tag Name attached to this instruction.
bool onlyReadsMemory(unsigned OpNo) const
Value * getCalledOperand() const
Type * getParamElementType(unsigned ArgNo) const
Extract the elementtype type for a parameter.
Value * getArgOperand(unsigned i) const
FunctionType * getFunctionType() const
LLVM_ABI Intrinsic::ID getIntrinsicID() const
Returns the intrinsic ID of the intrinsic called or Intrinsic::not_intrinsic if the called function i...
iterator_range< User::op_iterator > args()
Iteration adapter for range-for loops.
bool doesNotReturn() const
Determine if the call cannot return.
LLVM_ABI bool onlyAccessesArgMemory() const
Determine if the call can access memmory only using pointers based on its arguments.
unsigned arg_size() const
AttributeList getAttributes() const
Return the attributes for this call.
bool hasOperandBundles() const
Return true if this User has any operand bundles.
LLVM_ABI Function * getCaller()
Helper to get the caller (the parent function).
BasicBlock * getIndirectDest(unsigned i) const
unsigned getNumIndirectDests() const
Return the number of callbr indirect dest labels.
bool isMustTailCall() const
static LLVM_ABI bool castIsValid(Instruction::CastOps op, Type *SrcTy, Type *DstTy)
This method can be used to determine if a cast from SrcTy to DstTy using Opcode op is valid or not.
unsigned getNumHandlers() const
return the number of 'handlers' in this catchswitch instruction, except the default handler
Value * getParentPad() const
BasicBlock * getUnwindDest() const
handler_range handlers()
iteration adapter for range-for loops.
BasicBlock * getUnwindDest() const
bool isFPPredicate() const
Definition InstrTypes.h:782
bool isIntPredicate() const
Definition InstrTypes.h:783
static bool isIntPredicate(Predicate P)
Definition InstrTypes.h:776
Value * getCondition() const
bool isMinusOne() const
This function will return true iff every bit in this constant is set to true.
Definition Constants.h:231
bool isNegative() const
Definition Constants.h:214
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
Definition Constants.h:219
unsigned getBitWidth() const
getBitWidth - Return the scalar bitwidth of this constant.
Definition Constants.h:162
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
Definition Constants.h:168
const APInt & getValue() const
Return the constant as an APInt value reference.
Definition Constants.h:159
Constant * getAddrDiscriminator() const
The address discriminator if any, or the null constant.
Definition Constants.h:1239
Constant * getPointer() const
The pointer that is signed in this ptrauth signed pointer.
Definition Constants.h:1226
ConstantInt * getKey() const
The Key ID, an i32 constant.
Definition Constants.h:1229
Constant * getDeactivationSymbol() const
Definition Constants.h:1248
ConstantInt * getDiscriminator() const
The integer discriminator, an i64 constant, or 0.
Definition Constants.h:1232
static LLVM_ABI bool isOrderedRanges(ArrayRef< ConstantRange > RangesRef)
This class represents a range of values.
const APInt & getLower() const
Return the lower value for this range.
const APInt & getUpper() const
Return the upper value for this range.
LLVM_ABI bool contains(const APInt &Val) const
Return true if the specified value is in the set.
uint32_t getBitWidth() const
Get the bit width of this ConstantRange.
static LLVM_ABI ConstantTokenNone * get(LLVMContext &Context)
Return the ConstantTokenNone.
LLVM_ABI bool isNullValue() const
Return true if this is the value that would be returned by getNullValue.
Definition Constants.cpp:74
LLVM_ABI std::optional< fp::ExceptionBehavior > getExceptionBehavior() const
LLVM_ABI std::optional< RoundingMode > getRoundingMode() const
LLVM_ABI unsigned getNonMetadataArgCount() const
DbgVariableFragmentInfo FragmentInfo
@ FixedPointBinary
Scale factor 2^Factor.
@ FixedPointDecimal
Scale factor 10^Factor.
@ FixedPointRational
Arbitrary rational scale factor.
DIGlobalVariable * getVariable() const
LLVM_ABI DISubprogram * getSubprogram() const
Get the subprogram for this scope.
DILocalScope * getScope() const
Get the local scope for this variable.
Metadata * getRawScope() const
Base class for scope-like contexts.
Subprogram description. Uses SubclassData1.
static const DIScope * getRawRetainedNodeScope(const MDNode *N)
Base class for template parameters.
Base class for variables.
Metadata * getRawType() const
Metadata * getRawScope() const
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:64
Records a position in IR for a source label (DILabel).
Base class for non-instruction debug metadata records that have positions within IR.
LLVM_ABI void print(raw_ostream &O, bool IsForDebug=false) const
DebugLoc getDebugLoc() const
LLVM_ABI const BasicBlock * getParent() const
LLVM_ABI Function * getFunction()
Record of a variable value-assignment, aka a non instruction representation of the dbg....
LLVM_ABI Value * getVariableLocationOp(unsigned OpIdx) const
DIExpression * getExpression() const
DILocalVariable * getVariable() const
Metadata * getRawLocation() const
Returns the metadata operand for the first location description.
@ End
Marks the end of the concrete types.
@ Any
To indicate all LocationTypes in searches.
DIExpression * getAddressExpression() const
MDNode * getAsMDNode() const
Return this as a bar MDNode.
Definition DebugLoc.h:290
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
Definition DenseMap.h:205
iterator find(const_arg_type_t< KeyT > Val)
Definition DenseMap.h:178
bool empty() const
Definition DenseMap.h:109
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition DenseMap.h:241
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition Dominators.h:159
This instruction extracts a single (scalar) element from a VectorType value.
static LLVM_ABI bool isValidOperands(const Value *Vec, const Value *Idx)
Return true if an extractelement instruction can be formed with the specified operands.
ArrayRef< unsigned > getIndices() const
static LLVM_ABI Type * getIndexedType(Type *Agg, ArrayRef< unsigned > Idxs)
Returns the type of the element that would be extracted with an extractvalue instruction with the spe...
This instruction compares its operands according to the predicate given to the constructor.
This class represents an extension of floating point types.
This class represents a cast from floating point to signed integer.
This class represents a cast from floating point to unsigned integer.
This class represents a truncation of floating point types.
AtomicOrdering getOrdering() const
Returns the ordering constraint of this fence instruction.
op_range arg_operands()
arg_operands - iteration adapter for range-for loops.
Value * getParentPad() const
Convenience accessors.
FunctionPass class - This class is used to implement most global optimizations.
Definition Pass.h:314
Type * getReturnType() const
FunctionType * getFunctionType() const
Returns the FunctionType for me.
Definition Function.h:211
Intrinsic::ID getIntrinsicID() const LLVM_READONLY
getIntrinsicID - This method returns the ID number of the specified function, or Intrinsic::not_intri...
Definition Function.h:246
DISubprogram * getSubprogram() const
Get the attached subprogram.
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Definition Function.h:272
bool hasPersonalityFn() const
Check whether this function has a personality function.
Definition Function.h:905
const Function & getFunction() const
Definition Function.h:166
const std::string & getGC() const
Definition Function.cpp:818
Type * getReturnType() const
Returns the type of the ret val.
Definition Function.h:216
bool isVarArg() const
isVarArg - Return true if this function takes a variable number of arguments.
Definition Function.h:229
LLVM_ABI Value * getBasePtr() const
LLVM_ABI Value * getDerivedPtr() const
static LLVM_ABI Type * getIndexedType(Type *Ty, ArrayRef< Value * > IdxList)
Returns the result type of a getelementptr with the given source element type and indexes.
static bool isValidLinkage(LinkageTypes L)
Definition GlobalAlias.h:98
const Constant * getAliasee() const
Definition GlobalAlias.h:87
LLVM_ABI const Function * getResolverFunction() const
Definition Globals.cpp:682
static bool isValidLinkage(LinkageTypes L)
Definition GlobalIFunc.h:86
const Constant * getResolver() const
Definition GlobalIFunc.h:73
LLVM_ABI void getAllMetadata(SmallVectorImpl< std::pair< unsigned, MDNode * > > &MDs) const
Appends all metadata attached to this value to MDs, sorting by KindID.
bool hasComdat() const
MDNode * getMetadata(unsigned KindID) const
Get the current metadata attachments for the given kind, if any.
Definition Value.h:577
bool hasExternalLinkage() const
bool isDSOLocal() const
bool isImplicitDSOLocal() const
LLVM_ABI bool isDeclaration() const
Return true if the primary definition of this global value is outside of the current translation unit...
Definition Globals.cpp:331
bool hasValidDeclarationLinkage() const
LinkageTypes getLinkage() const
bool hasDefaultVisibility() const
bool hasPrivateLinkage() const
bool hasHiddenVisibility() const
bool hasExternalWeakLinkage() const
bool hasDLLImportStorageClass() const
bool hasDLLExportStorageClass() const
bool isDeclarationForLinker() const
unsigned getAddressSpace() const
Module * getParent()
Get the module that this global value is contained inside of...
PointerType * getType() const
Global values are always pointers.
LLVM_ABI bool isInterposable() const
Return true if this global's definition can be substituted with an arbitrary definition at link time ...
Definition Globals.cpp:110
bool hasComdat() const
bool hasCommonLinkage() const
bool hasGlobalUnnamedAddr() const
bool hasAppendingLinkage() const
bool hasAvailableExternallyLinkage() const
Type * getValueType() const
const Constant * getInitializer() const
getInitializer - Return the initializer for this global variable.
bool hasInitializer() const
Definitions have initializers, declarations don't.
MaybeAlign getAlign() const
Returns the alignment of the given variable.
LLVM_ABI uint64_t getGlobalSize(const DataLayout &DL) const
Get the size of this global variable in bytes.
Definition Globals.cpp:563
bool isConstant() const
If the value is a global constant, its value is immutable throughout the runtime execution of the pro...
bool hasDefinitiveInitializer() const
hasDefinitiveInitializer - Whether the global variable has an initializer, and any other instances of...
This instruction compares its operands according to the predicate given to the constructor.
BasicBlock * getDestination(unsigned i)
Return the specified destination.
unsigned getNumDestinations() const
return the number of possible destinations in this indirectbr instruction.
unsigned getNumSuccessors() const
This instruction inserts a single (scalar) element into a VectorType value.
static LLVM_ABI bool isValidOperands(const Value *Vec, const Value *NewElt, const Value *Idx)
Return true if an insertelement instruction can be formed with the specified operands.
ArrayRef< unsigned > getIndices() const
Base class for instruction visitors.
Definition InstVisitor.h:78
void visit(Iterator Start, Iterator End)
Definition InstVisitor.h:87
LLVM_ABI unsigned getNumSuccessors() const LLVM_READONLY
Return the number of successors that this instruction has.
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
LLVM_ABI bool isAtomic() const LLVM_READONLY
Return true if this instruction has an AtomicOrdering of unordered or higher.
LLVM_ABI const Function * getFunction() const
Return the function this instruction belongs to.
This class represents a cast from an integer to a pointer.
static LLVM_ABI bool mayLowerToFunctionCall(Intrinsic::ID IID)
Check if the intrinsic might lower into a regular function call in the course of IR transformations.
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
bool isCleanup() const
Return 'true' if this landingpad instruction is a cleanup.
unsigned getNumClauses() const
Get the number of clauses for this landing pad.
bool isCatch(unsigned Idx) const
Return 'true' if the clause and index Idx is a catch clause.
bool isFilter(unsigned Idx) const
Return 'true' if the clause and index Idx is a filter clause.
Constant * getClause(unsigned Idx) const
Get the value of the clause at index Idx.
AtomicOrdering getOrdering() const
Returns the ordering constraint of this load instruction.
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this load instruction.
Align getAlign() const
Return the alignment of the access that is being performed.
Metadata node.
Definition Metadata.h:1080
const MDOperand & getOperand(unsigned I) const
Definition Metadata.h:1444
bool isTemporary() const
Definition Metadata.h:1264
ArrayRef< MDOperand > operands() const
Definition Metadata.h:1442
unsigned getNumOperands() const
Return number of MDNode operands.
Definition Metadata.h:1450
bool isDistinct() const
Definition Metadata.h:1263
bool isResolved() const
Check if node is fully resolved.
Definition Metadata.h:1260
LLVMContext & getContext() const
Definition Metadata.h:1244
bool equalsStr(StringRef Str) const
Definition Metadata.h:924
Metadata * get() const
Definition Metadata.h:931
LLVM_ABI StringRef getString() const
Definition Metadata.cpp:632
static LLVM_ABI bool isTagMD(const Metadata *MD)
This class implements a map that also provides access to all stored values in a deterministic order.
Definition MapVector.h:36
static LLVM_ABI MetadataAsValue * getIfExists(LLVMContext &Context, Metadata *MD)
Definition Metadata.cpp:118
Metadata * getMetadata() const
Definition Metadata.h:202
Root of the metadata hierarchy.
Definition Metadata.h:64
LLVM_ABI void print(raw_ostream &OS, const Module *M=nullptr, bool IsForDebug=false) const
Print.
unsigned getMetadataID() const
Definition Metadata.h:104
Manage lifetime of a slot tracker for printing IR.
A Module instance is used to store all the information related to an LLVM module.
Definition Module.h:67
LLVM_ABI StringRef getName() const
LLVM_ABI void print(raw_ostream &ROS, bool IsForDebug=false) const
LLVM_ABI unsigned getNumOperands() const
iterator_range< op_iterator > operands()
Definition Metadata.h:1856
op_range incoming_values()
A set of analyses that are preserved following a run of a transformation pass.
Definition Analysis.h:112
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition Analysis.h:118
This class represents a cast from a pointer to an address (non-capturing ptrtoint).
This class represents a cast from a pointer to an integer.
Value * getValue() const
Convenience accessor.
Value * getReturnValue() const
Convenience accessor. Returns null if there is no return value.
This class represents a sign extension of integer types.
This class represents a cast from signed integer to floating point.
static LLVM_ABI const char * areInvalidOperands(Value *Cond, Value *True, Value *False)
Return a string if the specified operands are invalid for a select operation, otherwise return null.
This instruction constructs a fixed permutation of two input vectors.
static LLVM_ABI bool isValidOperands(const Value *V1, const Value *V2, const Value *Mask)
Return true if a shufflevector instruction can be formed with the specified operands.
static LLVM_ABI void getShuffleMask(const Constant *Mask, SmallVectorImpl< int > &Result)
Convert the input shuffle mask operand to a vector of integers.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
void insert_range(Range &&R)
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
void reserve(size_type N)
iterator insert(iterator I, T &&Elt)
void resize(size_type N)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
std::pair< StringRef, StringRef > split(char Separator) const
Split into two substrings around the first occurrence of a separator character.
Definition StringRef.h:730
static constexpr size_t npos
Definition StringRef.h:57
bool getAsInteger(unsigned Radix, T &Result) const
Parse the current string as an integer of the specified radix.
Definition StringRef.h:490
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
Definition StringRef.h:258
constexpr bool empty() const
empty - Check if the string is empty.
Definition StringRef.h:140
unsigned getNumElements() const
Random access to the elements.
LLVM_ABI Type * getTypeAtIndex(const Value *V) const
Given an index value into the type, return the type of the element.
Definition Type.cpp:788
LLVM_ABI bool isScalableTy(SmallPtrSetImpl< const Type * > &Visited) const
Returns true if this struct contains a scalable vector.
Definition Type.cpp:510
Verify that the TBAA Metadatas are valid.
Definition Verifier.h:40
LLVM_ABI bool visitTBAAMetadata(const Instruction *I, const MDNode *MD)
Visit an instruction, or a TBAA node itself as part of a metadata, and return true if it is valid,...
unsigned size() const
Triple - Helper class for working with autoconf configuration names.
Definition Triple.h:47
This class represents a truncation of integer types.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition Twine.h:82
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:46
bool isByteTy() const
True if this is an instance of ByteType.
Definition Type.h:242
bool isVectorTy() const
True if this is an instance of VectorType.
Definition Type.h:290
LLVM_ABI bool containsNonGlobalTargetExtType(SmallPtrSetImpl< const Type * > &Visited) const
Return true if this type is or contains a target extension type that disallows being used as a global...
Definition Type.cpp:78
bool isArrayTy() const
True if this is an instance of ArrayType.
Definition Type.h:281
LLVM_ABI bool containsNonLocalTargetExtType(SmallPtrSetImpl< const Type * > &Visited) const
Return true if this type is or contains a target extension type that disallows being used as a local.
Definition Type.cpp:94
LLVM_ABI bool isScalableTy(SmallPtrSetImpl< const Type * > &Visited) const
Return true if this is a type whose size is a known multiple of vscale.
Definition Type.cpp:65
bool isLabelTy() const
Return true if this is 'label'.
Definition Type.h:230
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
Definition Type.h:263
bool isPointerTy() const
True if this is an instance of PointerType.
Definition Type.h:284
LLVM_ABI bool isTokenLikeTy() const
Returns true if this is 'token' or a token-like target type.s.
Definition Type.cpp:1136
LLVM_ABI unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
bool isSingleValueType() const
Return true if the type is a valid type for a register in codegen.
Definition Type.h:313
LLVM_ABI bool canLosslesslyBitCastTo(Type *Ty) const
Return true if this type could be converted with a lossless BitCast to type 'Ty'.
Definition Type.cpp:157
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
Definition Type.h:370
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
Definition Type.h:328
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
Definition Type.cpp:236
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
Definition Type.h:186
bool isPtrOrPtrVectorTy() const
Return true if this is a pointer type or a vector of pointer types.
Definition Type.h:287
bool isIntOrPtrTy() const
Return true if this is an integer type or a pointer type.
Definition Type.h:272
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition Type.h:257
bool isFPOrFPVectorTy() const
Return true if this is a FP type or a vector of FP.
Definition Type.h:227
bool isVoidTy() const
Return true if this is 'void'.
Definition Type.h:141
bool isMetadataTy() const
Return true if this is 'metadata'.
Definition Type.h:233
This class represents a cast unsigned integer to floating point.
op_range operands()
Definition User.h:267
Value * getOperand(unsigned i) const
Definition User.h:207
unsigned getNumOperands() const
Definition User.h:229
This class represents the va_arg llvm instruction, which returns an argument of the specified type gi...
Value * getValue() const
Definition Metadata.h:499
LLVM Value Representation.
Definition Value.h:75
iterator_range< user_iterator > materialized_users()
Definition Value.h:421
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
LLVM_ABI const Value * stripPointerCastsAndAliases() const
Strip off pointer casts, all-zero GEPs, address space casts, and aliases.
Definition Value.cpp:717
LLVMContext & getContext() const
All values hold a context through their type.
Definition Value.h:259
LLVM_ABI const Value * stripInBoundsOffsets(function_ref< void(const Value *)> Func=[](const Value *) {}) const
Strip off pointer casts and inbounds GEPs.
Definition Value.cpp:824
iterator_range< user_iterator > users()
Definition Value.h:427
bool materialized_use_empty() const
Definition Value.h:352
LLVM_ABI const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
Definition Value.cpp:713
bool hasName() const
Definition Value.h:262
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition Value.cpp:322
Check a module for errors, and report separate error states for IR and debug info errors.
Definition Verifier.h:109
LLVM_ABI Result run(Module &M, ModuleAnalysisManager &)
LLVM_ABI PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM)
This class represents zero extension of integer types.
constexpr bool isNonZero() const
Definition TypeSize.h:155
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition TypeSize.h:168
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
Definition TypeSize.h:165
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
Definition ilist_node.h:34
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
Definition ilist_node.h:348
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition raw_ostream.h:53
CallInst * Call
This file contains the declaration of the Comdat class, which represents a single COMDAT in LLVM.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ FLAT_ADDRESS
Address space for flat memory.
@ GLOBAL_ADDRESS
Address space for global memory (RAT0, VTX0).
@ PRIVATE_ADDRESS
Address space for private memory.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
constexpr char Attrs[]
Key for Kernel::Metadata::mAttrs.
bool isFlatGlobalAddrSpace(unsigned AS)
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ Entry
Definition COFF.h:862
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
@ BasicBlock
Various leaf nodes.
Definition ISDOpcodes.h:81
LLVM_ABI MatchIntrinsicTypesResult matchIntrinsicSignature(FunctionType *FTy, ArrayRef< IITDescriptor > &Infos, SmallVectorImpl< Type * > &ArgTys)
Match the specified function type with the type constraints specified by the .td file.
LLVM_ABI void getIntrinsicInfoTableEntries(ID id, SmallVectorImpl< IITDescriptor > &T)
Return the IIT table descriptor for the specified intrinsic into an array of IITDescriptors.
@ MatchIntrinsicTypes_NoMatchRet
Definition Intrinsics.h:257
@ MatchIntrinsicTypes_NoMatchArg
Definition Intrinsics.h:258
LLVM_ABI bool hasConstrainedFPRoundingModeOperand(ID QID)
Returns true if the intrinsic ID is for one of the "ConstrainedFloating-Point Intrinsics" that take r...
LLVM_ABI StringRef getName(ID id)
Return the LLVM name for an intrinsic, such as "llvm.ppc.altivec.lvx".
static const int NoAliasScopeDeclScopeArg
Definition Intrinsics.h:41
LLVM_ABI bool matchIntrinsicVarArg(bool isVarArg, ArrayRef< IITDescriptor > &Infos)
Verify if the intrinsic has variable arguments.
std::variant< std::monostate, Loc::Single, Loc::Multi, Loc::MMI, Loc::EntryValue > Variant
Alias for the std::variant specialization base class of DbgVariable.
Definition DwarfDebug.h:190
Flag
These should be considered private to the implementation of the MCInstrDesc class.
@ System
Synchronized with respect to all concurrently executing threads.
Definition LLVMContext.h:58
LLVM_ABI std::optional< VFInfo > tryDemangleForVFABI(StringRef MangledName, const FunctionType *FTy)
Function to construct a VFInfo out of a mangled names in the following format:
@ CE
Windows NT (Windows on ARM)
Definition MCAsmInfo.h:48
LLVM_ABI AssignmentInstRange getAssignmentInsts(DIAssignID *ID)
Return a range of instructions (typically just one) that have ID as an attachment.
initializer< Ty > init(const Ty &Val)
@ DW_MACINFO_undef
Definition Dwarf.h:818
@ DW_MACINFO_start_file
Definition Dwarf.h:819
@ DW_MACINFO_define
Definition Dwarf.h:817
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > dyn_extract_or_null(Y &&MD)
Extract a Value from Metadata, if any, allowing null.
Definition Metadata.h:709
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > dyn_extract(Y &&MD)
Extract a Value from Metadata, if any.
Definition Metadata.h:696
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > extract(Y &&MD)
Extract a Value from Metadata.
Definition Metadata.h:668
@ User
could "use" a pointer
NodeAddr< UseNode * > Use
Definition RDFGraph.h:385
NodeAddr< NodeBase * > Node
Definition RDFGraph.h:381
friend class Instruction
Iterator for Instructions in a `BasicBlock.
Definition BasicBlock.h:73
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition STLExtras.h:316
@ Low
Lower the current thread's priority such that it does not affect foreground tasks significantly.
Definition Threading.h:280
@ Offset
Definition DWP.cpp:532
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1739
LLVM_ABI bool canInstructionHaveMMRAs(const Instruction &I)
detail::zippy< detail::zip_first, T, U, Args... > zip_equal(T &&t, U &&u, Args &&...args)
zip iterator that assumes that all iteratees have the same length.
Definition STLExtras.h:841
LLVM_ABI unsigned getBranchWeightOffset(const MDNode *ProfileData)
Return the offset to the first branch weight data.
constexpr bool isInt(int64_t x)
Checks if an integer fits into the given bit width.
Definition MathExtras.h:165
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
Definition STLExtras.h:2554
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
LLVM_ABI bool verifyFunction(const Function &F, raw_ostream *OS=nullptr)
Check a function for errors, useful for use when debugging a pass.
AllocFnKind
Definition Attributes.h:53
testing::Matcher< const detail::ErrorHolder & > Failed()
Definition Error.h:198
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition STLExtras.h:2208
LLVM_ABI DenseMap< BasicBlock *, ColorVector > colorEHFunclets(Function &F)
If an EH funclet personality is in use (see isFuncletEHPersonality), this will recompute which blocks...
constexpr bool isUIntN(unsigned N, uint64_t x)
Checks if an unsigned integer fits into the given (dynamic) bit width.
Definition MathExtras.h:243
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
Definition MathExtras.h:284
gep_type_iterator gep_type_end(const User *GEP)
bool isa_and_nonnull(const Y &Val)
Definition Casting.h:676
Op::Description Desc
bool isScopedEHPersonality(EHPersonality Pers)
Returns true if this personality uses scope-style EH IR instructions: catchswitch,...
auto dyn_cast_or_null(const Y &Val)
Definition Casting.h:753
GenericConvergenceVerifier< SSAContext > ConvergenceVerifier
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition MathExtras.h:279
bool isModSet(const ModRefInfo MRI)
Definition ModRef.h:49
void sort(IteratorTy Start, IteratorTy End)
Definition STLExtras.h:1636
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition Error.cpp:163
generic_gep_type_iterator<> gep_type_iterator
FunctionAddr VTableAddr Count
Definition InstrProf.h:139
LLVM_ABI EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
iterator_range< SplittingIterator > split(StringRef Str, StringRef Separator)
Split the specified string over a separator and return a range-compatible iterable over its partition...
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
LLVM_ABI bool isValueProfileMD(const MDNode *ProfileData)
Checks if an MDNode contains value profiling Metadata.
LLVM_ABI raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
LLVM_ABI unsigned getNumBranchWeights(const MDNode &ProfileData)
AtomicOrdering
Atomic ordering for LLVM's memory model.
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
Definition ModRef.h:74
LLVM_ABI FunctionPass * createVerifierPass(bool FatalErrors=true)
FunctionAddr VTableAddr Next
Definition InstrProf.h:141
DWARFExpression::Operation Op
ArrayRef(const T &OneElt) -> ArrayRef< T >
constexpr unsigned BitWidth
TinyPtrVector< BasicBlock * > ColorVector
LLVM_ABI const char * LLVMLoopEstimatedTripCount
Profile-based loop metadata that should be accessed only by using llvm::getLoopEstimatedTripCount and...
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
LLVM_ABI std::optional< RoundingMode > convertStrToRoundingMode(StringRef)
Returns a valid RoundingMode enumerator when given a string that is valid as input in constrained int...
Definition FPEnv.cpp:25
gep_type_iterator gep_type_begin(const User *GEP)
LLVM_ABI std::unique_ptr< GCStrategy > getGCStrategy(const StringRef Name)
Lookup the GCStrategy object associated with the given gc name.
auto predecessors(const MachineBasicBlock *BB)
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition STLExtras.h:1947
bool pred_empty(const BasicBlock *BB)
Definition CFG.h:119
bool isHexDigit(char C)
Checks if character C is a hexadecimal numeric character.
AnalysisManager< Function > FunctionAnalysisManager
Convenience typedef for the Function analysis manager.
constexpr bool isCallableCC(CallingConv::ID CC)
LLVM_ABI bool verifyModule(const Module &M, raw_ostream *OS=nullptr, bool *BrokenDebugInfo=nullptr)
Check a module for errors.
AnalysisManager< Module > ModuleAnalysisManager
Convenience typedef for the Module analysis manager.
Definition MIRParser.h:39
#define N
constexpr uint64_t value() const
This is a hole in the type system and should not be abused.
Definition Alignment.h:77
A special type used by analysis passes to provide an address that identifies that particular analysis...
Definition Analysis.h:29
static LLVM_ABI const char * SyntheticFunctionEntryCount
static LLVM_ABI const char * UnknownBranchWeightsMarker
static LLVM_ABI const char * ValueProfile
static LLVM_ABI const char * FunctionEntryCount
static LLVM_ABI const char * BranchWeights
uint32_t getTagID() const
Return the tag of this operand bundle as an integer.
ArrayRef< Use > Inputs
void DebugInfoCheckFailed(const Twine &Message)
A debug info check failed.
Definition Verifier.cpp:307
VerifierSupport(raw_ostream *OS, const Module &M)
Definition Verifier.cpp:156
bool Broken
Track the brokenness of the module while recursively visiting.
Definition Verifier.cpp:150
void CheckFailed(const Twine &Message, const T1 &V1, const Ts &... Vs)
A check failed (with values to print).
Definition Verifier.cpp:300
bool BrokenDebugInfo
Broken debug info can be "recovered" from by stripping the debug info.
Definition Verifier.cpp:152
LLVMContext & Context
Definition Verifier.cpp:147
bool TreatBrokenDebugInfoAsError
Whether to treat broken debug info as an error.
Definition Verifier.cpp:154
void CheckFailed(const Twine &Message)
A check failed, so printout out the condition and the message.
Definition Verifier.cpp:289
const Module & M
Definition Verifier.cpp:143
const DataLayout & DL
Definition Verifier.cpp:146
void DebugInfoCheckFailed(const Twine &Message, const T1 &V1, const Ts &... Vs)
A debug info check failed (with values to print).
Definition Verifier.cpp:316
const Triple & TT
Definition Verifier.cpp:145
ModuleSlotTracker MST
Definition Verifier.cpp:144