LLVM 22.0.0git
Verifier.cpp
Go to the documentation of this file.
1//===-- Verifier.cpp - Implement the Module Verifier -----------------------==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the function verifier interface, that can be used for some
10// basic correctness checking of input to the system.
11//
12// Note that this does not provide full `Java style' security and verifications,
13// instead it just tries to ensure that code is well-formed.
14//
15// * Both of a binary operator's parameters are of the same type
16// * Verify that the indices of mem access instructions match other operands
17// * Verify that arithmetic and other things are only performed on first-class
18// types. Verify that shifts & logicals only happen on integrals f.e.
19// * All of the constants in a switch statement are of the correct type
20// * The code is in valid SSA form
21// * It should be illegal to put a label into any other type (like a structure)
22// or to return one. [except constant arrays!]
23// * Only phi nodes can be self referential: 'add i32 %0, %0 ; <int>:0' is bad
24// * PHI nodes must have an entry for each predecessor, with no extras.
25// * PHI nodes must be the first thing in a basic block, all grouped together
26// * All basic blocks should only end with terminator insts, not contain them
27// * The entry node to a function must not have predecessors
28// * All Instructions must be embedded into a basic block
29// * Functions cannot take a void-typed parameter
30// * Verify that a function's argument list agrees with it's declared type.
31// * It is illegal to specify a name for a void value.
32// * It is illegal to have a internal global value with no initializer
33// * It is illegal to have a ret instruction that returns a value that does not
34// agree with the function return value type.
35// * Function call argument types match the function prototype
36// * A landing pad is defined by a landingpad instruction, and can be jumped to
37// only by the unwind edge of an invoke instruction.
38// * A landingpad instruction must be the first non-PHI instruction in the
39// block.
40// * Landingpad instructions must be in a function with a personality function.
41// * Convergence control intrinsics are introduced in ConvergentOperations.rst.
42// The applied restrictions are too numerous to list here.
43// * The convergence entry intrinsic and the loop heart must be the first
44// non-PHI instruction in their respective block. This does not conflict with
45// the landing pads, since these two kinds cannot occur in the same block.
46// * All other things that are tested by asserts spread about the code...
47//
48//===----------------------------------------------------------------------===//
49
50#include "llvm/IR/Verifier.h"
51#include "llvm/ADT/APFloat.h"
52#include "llvm/ADT/APInt.h"
53#include "llvm/ADT/ArrayRef.h"
54#include "llvm/ADT/DenseMap.h"
55#include "llvm/ADT/MapVector.h"
56#include "llvm/ADT/STLExtras.h"
60#include "llvm/ADT/StringRef.h"
61#include "llvm/ADT/Twine.h"
63#include "llvm/IR/Argument.h"
65#include "llvm/IR/Attributes.h"
66#include "llvm/IR/BasicBlock.h"
67#include "llvm/IR/CFG.h"
68#include "llvm/IR/CallingConv.h"
69#include "llvm/IR/Comdat.h"
70#include "llvm/IR/Constant.h"
73#include "llvm/IR/Constants.h"
75#include "llvm/IR/DataLayout.h"
76#include "llvm/IR/DebugInfo.h"
78#include "llvm/IR/DebugLoc.h"
80#include "llvm/IR/Dominators.h"
82#include "llvm/IR/Function.h"
83#include "llvm/IR/GCStrategy.h"
84#include "llvm/IR/GlobalAlias.h"
85#include "llvm/IR/GlobalValue.h"
87#include "llvm/IR/InlineAsm.h"
88#include "llvm/IR/InstVisitor.h"
89#include "llvm/IR/InstrTypes.h"
90#include "llvm/IR/Instruction.h"
93#include "llvm/IR/Intrinsics.h"
94#include "llvm/IR/IntrinsicsAArch64.h"
95#include "llvm/IR/IntrinsicsAMDGPU.h"
96#include "llvm/IR/IntrinsicsARM.h"
97#include "llvm/IR/IntrinsicsNVPTX.h"
98#include "llvm/IR/IntrinsicsWebAssembly.h"
99#include "llvm/IR/LLVMContext.h"
101#include "llvm/IR/Metadata.h"
102#include "llvm/IR/Module.h"
104#include "llvm/IR/PassManager.h"
106#include "llvm/IR/Statepoint.h"
107#include "llvm/IR/Type.h"
108#include "llvm/IR/Use.h"
109#include "llvm/IR/User.h"
111#include "llvm/IR/Value.h"
113#include "llvm/Pass.h"
117#include "llvm/Support/Casting.h"
121#include "llvm/Support/ModRef.h"
124#include <algorithm>
125#include <cassert>
126#include <cstdint>
127#include <memory>
128#include <optional>
129#include <string>
130#include <utility>
131
132using namespace llvm;
133
135 "verify-noalias-scope-decl-dom", cl::Hidden, cl::init(false),
136 cl::desc("Ensure that llvm.experimental.noalias.scope.decl for identical "
137 "scopes are not dominating"));
138
141 const Module &M;
143 const Triple &TT;
146
147 /// Track the brokenness of the module while recursively visiting.
148 bool Broken = false;
149 /// Broken debug info can be "recovered" from by stripping the debug info.
150 bool BrokenDebugInfo = false;
151 /// Whether to treat broken debug info as an error.
153
155 : OS(OS), M(M), MST(&M), TT(M.getTargetTriple()), DL(M.getDataLayout()),
156 Context(M.getContext()) {}
157
158private:
159 void Write(const Module *M) {
160 *OS << "; ModuleID = '" << M->getModuleIdentifier() << "'\n";
161 }
162
163 void Write(const Value *V) {
164 if (V)
165 Write(*V);
166 }
167
168 void Write(const Value &V) {
169 if (isa<Instruction>(V)) {
170 V.print(*OS, MST);
171 *OS << '\n';
172 } else {
173 V.printAsOperand(*OS, true, MST);
174 *OS << '\n';
175 }
176 }
177
178 void Write(const DbgRecord *DR) {
179 if (DR) {
180 DR->print(*OS, MST, false);
181 *OS << '\n';
182 }
183 }
184
186 switch (Type) {
188 *OS << "value";
189 break;
191 *OS << "declare";
192 break;
194 *OS << "assign";
195 break;
197 *OS << "end";
198 break;
200 *OS << "any";
201 break;
202 };
203 }
204
205 void Write(const Metadata *MD) {
206 if (!MD)
207 return;
208 MD->print(*OS, MST, &M);
209 *OS << '\n';
210 }
211
212 template <class T> void Write(const MDTupleTypedArrayWrapper<T> &MD) {
213 Write(MD.get());
214 }
215
216 void Write(const NamedMDNode *NMD) {
217 if (!NMD)
218 return;
219 NMD->print(*OS, MST);
220 *OS << '\n';
221 }
222
223 void Write(Type *T) {
224 if (!T)
225 return;
226 *OS << ' ' << *T;
227 }
228
229 void Write(const Comdat *C) {
230 if (!C)
231 return;
232 *OS << *C;
233 }
234
235 void Write(const APInt *AI) {
236 if (!AI)
237 return;
238 *OS << *AI << '\n';
239 }
240
241 void Write(const unsigned i) { *OS << i << '\n'; }
242
243 // NOLINTNEXTLINE(readability-identifier-naming)
244 void Write(const Attribute *A) {
245 if (!A)
246 return;
247 *OS << A->getAsString() << '\n';
248 }
249
250 // NOLINTNEXTLINE(readability-identifier-naming)
251 void Write(const AttributeSet *AS) {
252 if (!AS)
253 return;
254 *OS << AS->getAsString() << '\n';
255 }
256
257 // NOLINTNEXTLINE(readability-identifier-naming)
258 void Write(const AttributeList *AL) {
259 if (!AL)
260 return;
261 AL->print(*OS);
262 }
263
264 void Write(Printable P) { *OS << P << '\n'; }
265
266 template <typename T> void Write(ArrayRef<T> Vs) {
267 for (const T &V : Vs)
268 Write(V);
269 }
270
271 template <typename T1, typename... Ts>
272 void WriteTs(const T1 &V1, const Ts &... Vs) {
273 Write(V1);
274 WriteTs(Vs...);
275 }
276
277 template <typename... Ts> void WriteTs() {}
278
279public:
280 /// A check failed, so printout out the condition and the message.
281 ///
282 /// This provides a nice place to put a breakpoint if you want to see why
283 /// something is not correct.
284 void CheckFailed(const Twine &Message) {
285 if (OS)
286 *OS << Message << '\n';
287 Broken = true;
288 }
289
290 /// A check failed (with values to print).
291 ///
292 /// This calls the Message-only version so that the above is easier to set a
293 /// breakpoint on.
294 template <typename T1, typename... Ts>
295 void CheckFailed(const Twine &Message, const T1 &V1, const Ts &... Vs) {
296 CheckFailed(Message);
297 if (OS)
298 WriteTs(V1, Vs...);
299 }
300
301 /// A debug info check failed.
302 void DebugInfoCheckFailed(const Twine &Message) {
303 if (OS)
304 *OS << Message << '\n';
306 BrokenDebugInfo = true;
307 }
308
309 /// A debug info check failed (with values to print).
310 template <typename T1, typename... Ts>
311 void DebugInfoCheckFailed(const Twine &Message, const T1 &V1,
312 const Ts &... Vs) {
313 DebugInfoCheckFailed(Message);
314 if (OS)
315 WriteTs(V1, Vs...);
316 }
317};
318
319namespace {
320
321class Verifier : public InstVisitor<Verifier>, VerifierSupport {
322 friend class InstVisitor<Verifier>;
323 DominatorTree DT;
324
325 /// When verifying a basic block, keep track of all of the
326 /// instructions we have seen so far.
327 ///
328 /// This allows us to do efficient dominance checks for the case when an
329 /// instruction has an operand that is an instruction in the same block.
330 SmallPtrSet<Instruction *, 16> InstsInThisBlock;
331
332 /// Keep track of the metadata nodes that have been checked already.
334
335 /// Keep track which DISubprogram is attached to which function.
337
338 /// Track all DICompileUnits visited.
340
341 /// The result type for a landingpad.
342 Type *LandingPadResultTy;
343
344 /// Whether we've seen a call to @llvm.localescape in this function
345 /// already.
346 bool SawFrameEscape;
347
348 /// Whether the current function has a DISubprogram attached to it.
349 bool HasDebugInfo = false;
350
351 /// Stores the count of how many objects were passed to llvm.localescape for a
352 /// given function and the largest index passed to llvm.localrecover.
354
355 // Maps catchswitches and cleanuppads that unwind to siblings to the
356 // terminators that indicate the unwind, used to detect cycles therein.
358
359 /// Cache which blocks are in which funclet, if an EH funclet personality is
360 /// in use. Otherwise empty.
361 DenseMap<BasicBlock *, ColorVector> BlockEHFuncletColors;
362
363 /// Cache of constants visited in search of ConstantExprs.
364 SmallPtrSet<const Constant *, 32> ConstantExprVisited;
365
366 /// Cache of declarations of the llvm.experimental.deoptimize.<ty> intrinsic.
367 SmallVector<const Function *, 4> DeoptimizeDeclarations;
368
369 /// Cache of attribute lists verified.
370 SmallPtrSet<const void *, 32> AttributeListsVisited;
371
372 // Verify that this GlobalValue is only used in this module.
373 // This map is used to avoid visiting uses twice. We can arrive at a user
374 // twice, if they have multiple operands. In particular for very large
375 // constant expressions, we can arrive at a particular user many times.
376 SmallPtrSet<const Value *, 32> GlobalValueVisited;
377
378 // Keeps track of duplicate function argument debug info.
380
381 TBAAVerifier TBAAVerifyHelper;
382 ConvergenceVerifier ConvergenceVerifyHelper;
383
384 SmallVector<IntrinsicInst *, 4> NoAliasScopeDecls;
385
386 void checkAtomicMemAccessSize(Type *Ty, const Instruction *I);
387
388public:
389 explicit Verifier(raw_ostream *OS, bool ShouldTreatBrokenDebugInfoAsError,
390 const Module &M)
391 : VerifierSupport(OS, M), LandingPadResultTy(nullptr),
392 SawFrameEscape(false), TBAAVerifyHelper(this) {
393 TreatBrokenDebugInfoAsError = ShouldTreatBrokenDebugInfoAsError;
394 }
395
396 bool hasBrokenDebugInfo() const { return BrokenDebugInfo; }
397
398 bool verify(const Function &F) {
399 llvm::TimeTraceScope timeScope("Verifier");
400 assert(F.getParent() == &M &&
401 "An instance of this class only works with a specific module!");
402
403 // First ensure the function is well-enough formed to compute dominance
404 // information, and directly compute a dominance tree. We don't rely on the
405 // pass manager to provide this as it isolates us from a potentially
406 // out-of-date dominator tree and makes it significantly more complex to run
407 // this code outside of a pass manager.
408 // FIXME: It's really gross that we have to cast away constness here.
409 if (!F.empty())
410 DT.recalculate(const_cast<Function &>(F));
411
412 for (const BasicBlock &BB : F) {
413 if (!BB.empty() && BB.back().isTerminator())
414 continue;
415
416 if (OS) {
417 *OS << "Basic Block in function '" << F.getName()
418 << "' does not have terminator!\n";
419 BB.printAsOperand(*OS, true, MST);
420 *OS << "\n";
421 }
422 return false;
423 }
424
425 auto FailureCB = [this](const Twine &Message) {
426 this->CheckFailed(Message);
427 };
428 ConvergenceVerifyHelper.initialize(OS, FailureCB, F);
429
430 Broken = false;
431 // FIXME: We strip const here because the inst visitor strips const.
432 visit(const_cast<Function &>(F));
433 verifySiblingFuncletUnwinds();
434
435 if (ConvergenceVerifyHelper.sawTokens())
436 ConvergenceVerifyHelper.verify(DT);
437
438 InstsInThisBlock.clear();
439 DebugFnArgs.clear();
440 LandingPadResultTy = nullptr;
441 SawFrameEscape = false;
442 SiblingFuncletInfo.clear();
443 verifyNoAliasScopeDecl();
444 NoAliasScopeDecls.clear();
445
446 return !Broken;
447 }
448
449 /// Verify the module that this instance of \c Verifier was initialized with.
450 bool verify() {
451 Broken = false;
452
453 // Collect all declarations of the llvm.experimental.deoptimize intrinsic.
454 for (const Function &F : M)
455 if (F.getIntrinsicID() == Intrinsic::experimental_deoptimize)
456 DeoptimizeDeclarations.push_back(&F);
457
458 // Now that we've visited every function, verify that we never asked to
459 // recover a frame index that wasn't escaped.
460 verifyFrameRecoverIndices();
461 for (const GlobalVariable &GV : M.globals())
462 visitGlobalVariable(GV);
463
464 for (const GlobalAlias &GA : M.aliases())
465 visitGlobalAlias(GA);
466
467 for (const GlobalIFunc &GI : M.ifuncs())
468 visitGlobalIFunc(GI);
469
470 for (const NamedMDNode &NMD : M.named_metadata())
471 visitNamedMDNode(NMD);
472
473 for (const StringMapEntry<Comdat> &SMEC : M.getComdatSymbolTable())
474 visitComdat(SMEC.getValue());
475
476 visitModuleFlags();
477 visitModuleIdents();
478 visitModuleCommandLines();
479 visitModuleErrnoTBAA();
480
481 verifyCompileUnits();
482
483 verifyDeoptimizeCallingConvs();
484 DISubprogramAttachments.clear();
485 return !Broken;
486 }
487
488private:
489 /// Whether a metadata node is allowed to be, or contain, a DILocation.
490 enum class AreDebugLocsAllowed { No, Yes };
491
492 /// Metadata that should be treated as a range, with slightly different
493 /// requirements.
494 enum class RangeLikeMetadataKind {
495 Range, // MD_range
496 AbsoluteSymbol, // MD_absolute_symbol
497 NoaliasAddrspace // MD_noalias_addrspace
498 };
499
500 // Verification methods...
501 void visitGlobalValue(const GlobalValue &GV);
502 void visitGlobalVariable(const GlobalVariable &GV);
503 void visitGlobalAlias(const GlobalAlias &GA);
504 void visitGlobalIFunc(const GlobalIFunc &GI);
505 void visitAliaseeSubExpr(const GlobalAlias &A, const Constant &C);
506 void visitAliaseeSubExpr(SmallPtrSetImpl<const GlobalAlias *> &Visited,
507 const GlobalAlias &A, const Constant &C);
508 void visitNamedMDNode(const NamedMDNode &NMD);
509 void visitMDNode(const MDNode &MD, AreDebugLocsAllowed AllowLocs);
510 void visitMetadataAsValue(const MetadataAsValue &MD, Function *F);
511 void visitValueAsMetadata(const ValueAsMetadata &MD, Function *F);
512 void visitDIArgList(const DIArgList &AL, Function *F);
513 void visitComdat(const Comdat &C);
514 void visitModuleIdents();
515 void visitModuleCommandLines();
516 void visitModuleErrnoTBAA();
517 void visitModuleFlags();
518 void visitModuleFlag(const MDNode *Op,
519 DenseMap<const MDString *, const MDNode *> &SeenIDs,
520 SmallVectorImpl<const MDNode *> &Requirements);
521 void visitModuleFlagCGProfileEntry(const MDOperand &MDO);
522 void visitFunction(const Function &F);
523 void visitBasicBlock(BasicBlock &BB);
524 void verifyRangeLikeMetadata(const Value &V, const MDNode *Range, Type *Ty,
525 RangeLikeMetadataKind Kind);
526 void visitRangeMetadata(Instruction &I, MDNode *Range, Type *Ty);
527 void visitNoaliasAddrspaceMetadata(Instruction &I, MDNode *Range, Type *Ty);
528 void visitDereferenceableMetadata(Instruction &I, MDNode *MD);
529 void visitNofreeMetadata(Instruction &I, MDNode *MD);
530 void visitProfMetadata(Instruction &I, MDNode *MD);
531 void visitCallStackMetadata(MDNode *MD);
532 void visitMemProfMetadata(Instruction &I, MDNode *MD);
533 void visitCallsiteMetadata(Instruction &I, MDNode *MD);
534 void visitCalleeTypeMetadata(Instruction &I, MDNode *MD);
535 void visitDIAssignIDMetadata(Instruction &I, MDNode *MD);
536 void visitMMRAMetadata(Instruction &I, MDNode *MD);
537 void visitAnnotationMetadata(MDNode *Annotation);
538 void visitAliasScopeMetadata(const MDNode *MD);
539 void visitAliasScopeListMetadata(const MDNode *MD);
540 void visitAccessGroupMetadata(const MDNode *MD);
541 void visitCapturesMetadata(Instruction &I, const MDNode *Captures);
542 void visitAllocTokenMetadata(Instruction &I, MDNode *MD);
543
544 template <class Ty> bool isValidMetadataArray(const MDTuple &N);
545#define HANDLE_SPECIALIZED_MDNODE_LEAF(CLASS) void visit##CLASS(const CLASS &N);
546#include "llvm/IR/Metadata.def"
547 void visitDIScope(const DIScope &N);
548 void visitDIVariable(const DIVariable &N);
549 void visitDILexicalBlockBase(const DILexicalBlockBase &N);
550 void visitDITemplateParameter(const DITemplateParameter &N);
551
552 void visitTemplateParams(const MDNode &N, const Metadata &RawParams);
553
554 void visit(DbgLabelRecord &DLR);
555 void visit(DbgVariableRecord &DVR);
556 // InstVisitor overrides...
557 using InstVisitor<Verifier>::visit;
558 void visitDbgRecords(Instruction &I);
559 void visit(Instruction &I);
560
561 void visitTruncInst(TruncInst &I);
562 void visitZExtInst(ZExtInst &I);
563 void visitSExtInst(SExtInst &I);
564 void visitFPTruncInst(FPTruncInst &I);
565 void visitFPExtInst(FPExtInst &I);
566 void visitFPToUIInst(FPToUIInst &I);
567 void visitFPToSIInst(FPToSIInst &I);
568 void visitUIToFPInst(UIToFPInst &I);
569 void visitSIToFPInst(SIToFPInst &I);
570 void visitIntToPtrInst(IntToPtrInst &I);
571 void checkPtrToAddr(Type *SrcTy, Type *DestTy, const Value &V);
572 void visitPtrToAddrInst(PtrToAddrInst &I);
573 void visitPtrToIntInst(PtrToIntInst &I);
574 void visitBitCastInst(BitCastInst &I);
575 void visitAddrSpaceCastInst(AddrSpaceCastInst &I);
576 void visitPHINode(PHINode &PN);
577 void visitCallBase(CallBase &Call);
578 void visitUnaryOperator(UnaryOperator &U);
579 void visitBinaryOperator(BinaryOperator &B);
580 void visitICmpInst(ICmpInst &IC);
581 void visitFCmpInst(FCmpInst &FC);
582 void visitExtractElementInst(ExtractElementInst &EI);
583 void visitInsertElementInst(InsertElementInst &EI);
584 void visitShuffleVectorInst(ShuffleVectorInst &EI);
585 void visitVAArgInst(VAArgInst &VAA) { visitInstruction(VAA); }
586 void visitCallInst(CallInst &CI);
587 void visitInvokeInst(InvokeInst &II);
588 void visitGetElementPtrInst(GetElementPtrInst &GEP);
589 void visitLoadInst(LoadInst &LI);
590 void visitStoreInst(StoreInst &SI);
591 void verifyDominatesUse(Instruction &I, unsigned i);
592 void visitInstruction(Instruction &I);
593 void visitTerminator(Instruction &I);
594 void visitBranchInst(BranchInst &BI);
595 void visitReturnInst(ReturnInst &RI);
596 void visitSwitchInst(SwitchInst &SI);
597 void visitIndirectBrInst(IndirectBrInst &BI);
598 void visitCallBrInst(CallBrInst &CBI);
599 void visitSelectInst(SelectInst &SI);
600 void visitUserOp1(Instruction &I);
601 void visitUserOp2(Instruction &I) { visitUserOp1(I); }
602 void visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call);
603 void visitConstrainedFPIntrinsic(ConstrainedFPIntrinsic &FPI);
604 void visitVPIntrinsic(VPIntrinsic &VPI);
605 void visitDbgLabelIntrinsic(StringRef Kind, DbgLabelInst &DLI);
606 void visitAtomicCmpXchgInst(AtomicCmpXchgInst &CXI);
607 void visitAtomicRMWInst(AtomicRMWInst &RMWI);
608 void visitFenceInst(FenceInst &FI);
609 void visitAllocaInst(AllocaInst &AI);
610 void visitExtractValueInst(ExtractValueInst &EVI);
611 void visitInsertValueInst(InsertValueInst &IVI);
612 void visitEHPadPredecessors(Instruction &I);
613 void visitLandingPadInst(LandingPadInst &LPI);
614 void visitResumeInst(ResumeInst &RI);
615 void visitCatchPadInst(CatchPadInst &CPI);
616 void visitCatchReturnInst(CatchReturnInst &CatchReturn);
617 void visitCleanupPadInst(CleanupPadInst &CPI);
618 void visitFuncletPadInst(FuncletPadInst &FPI);
619 void visitCatchSwitchInst(CatchSwitchInst &CatchSwitch);
620 void visitCleanupReturnInst(CleanupReturnInst &CRI);
621
622 void verifySwiftErrorCall(CallBase &Call, const Value *SwiftErrorVal);
623 void verifySwiftErrorValue(const Value *SwiftErrorVal);
624 void verifyTailCCMustTailAttrs(const AttrBuilder &Attrs, StringRef Context);
625 void verifyMustTailCall(CallInst &CI);
626 bool verifyAttributeCount(AttributeList Attrs, unsigned Params);
627 void verifyAttributeTypes(AttributeSet Attrs, const Value *V);
628 void verifyParameterAttrs(AttributeSet Attrs, Type *Ty, const Value *V);
629 void checkUnsignedBaseTenFuncAttr(AttributeList Attrs, StringRef Attr,
630 const Value *V);
631 void verifyFunctionAttrs(FunctionType *FT, AttributeList Attrs,
632 const Value *V, bool IsIntrinsic, bool IsInlineAsm);
633 void verifyFunctionMetadata(ArrayRef<std::pair<unsigned, MDNode *>> MDs);
634 void verifyUnknownProfileMetadata(MDNode *MD);
635 void visitConstantExprsRecursively(const Constant *EntryC);
636 void visitConstantExpr(const ConstantExpr *CE);
637 void visitConstantPtrAuth(const ConstantPtrAuth *CPA);
638 void verifyInlineAsmCall(const CallBase &Call);
639 void verifyStatepoint(const CallBase &Call);
640 void verifyFrameRecoverIndices();
641 void verifySiblingFuncletUnwinds();
642
643 void verifyFragmentExpression(const DbgVariableRecord &I);
644 template <typename ValueOrMetadata>
645 void verifyFragmentExpression(const DIVariable &V,
647 ValueOrMetadata *Desc);
648 void verifyFnArgs(const DbgVariableRecord &DVR);
649 void verifyNotEntryValue(const DbgVariableRecord &I);
650
651 /// Module-level debug info verification...
652 void verifyCompileUnits();
653
654 /// Module-level verification that all @llvm.experimental.deoptimize
655 /// declarations share the same calling convention.
656 void verifyDeoptimizeCallingConvs();
657
658 void verifyAttachedCallBundle(const CallBase &Call,
659 const OperandBundleUse &BU);
660
661 /// Verify the llvm.experimental.noalias.scope.decl declarations
662 void verifyNoAliasScopeDecl();
663};
664
665} // end anonymous namespace
666
667/// We know that cond should be true, if not print an error message.
668#define Check(C, ...) \
669 do { \
670 if (!(C)) { \
671 CheckFailed(__VA_ARGS__); \
672 return; \
673 } \
674 } while (false)
675
676/// We know that a debug info condition should be true, if not print
677/// an error message.
678#define CheckDI(C, ...) \
679 do { \
680 if (!(C)) { \
681 DebugInfoCheckFailed(__VA_ARGS__); \
682 return; \
683 } \
684 } while (false)
685
686void Verifier::visitDbgRecords(Instruction &I) {
687 if (!I.DebugMarker)
688 return;
689 CheckDI(I.DebugMarker->MarkedInstr == &I,
690 "Instruction has invalid DebugMarker", &I);
691 CheckDI(!isa<PHINode>(&I) || !I.hasDbgRecords(),
692 "PHI Node must not have any attached DbgRecords", &I);
693 for (DbgRecord &DR : I.getDbgRecordRange()) {
694 CheckDI(DR.getMarker() == I.DebugMarker,
695 "DbgRecord had invalid DebugMarker", &I, &DR);
696 if (auto *Loc =
698 visitMDNode(*Loc, AreDebugLocsAllowed::Yes);
699 if (auto *DVR = dyn_cast<DbgVariableRecord>(&DR)) {
700 visit(*DVR);
701 // These have to appear after `visit` for consistency with existing
702 // intrinsic behaviour.
703 verifyFragmentExpression(*DVR);
704 verifyNotEntryValue(*DVR);
705 } else if (auto *DLR = dyn_cast<DbgLabelRecord>(&DR)) {
706 visit(*DLR);
707 }
708 }
709}
710
711void Verifier::visit(Instruction &I) {
712 visitDbgRecords(I);
713 for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i)
714 Check(I.getOperand(i) != nullptr, "Operand is null", &I);
716}
717
718// Helper to iterate over indirect users. By returning false, the callback can ask to stop traversing further.
719static void forEachUser(const Value *User,
721 llvm::function_ref<bool(const Value *)> Callback) {
722 if (!Visited.insert(User).second)
723 return;
724
726 while (!WorkList.empty()) {
727 const Value *Cur = WorkList.pop_back_val();
728 if (!Visited.insert(Cur).second)
729 continue;
730 if (Callback(Cur))
731 append_range(WorkList, Cur->materialized_users());
732 }
733}
734
735void Verifier::visitGlobalValue(const GlobalValue &GV) {
737 "Global is external, but doesn't have external or weak linkage!", &GV);
738
739 if (const GlobalObject *GO = dyn_cast<GlobalObject>(&GV)) {
740 if (const MDNode *Associated =
741 GO->getMetadata(LLVMContext::MD_associated)) {
742 Check(Associated->getNumOperands() == 1,
743 "associated metadata must have one operand", &GV, Associated);
744 const Metadata *Op = Associated->getOperand(0).get();
745 Check(Op, "associated metadata must have a global value", GO, Associated);
746
747 const auto *VM = dyn_cast_or_null<ValueAsMetadata>(Op);
748 Check(VM, "associated metadata must be ValueAsMetadata", GO, Associated);
749 if (VM) {
750 Check(isa<PointerType>(VM->getValue()->getType()),
751 "associated value must be pointer typed", GV, Associated);
752
753 const Value *Stripped = VM->getValue()->stripPointerCastsAndAliases();
754 Check(isa<GlobalObject>(Stripped) || isa<Constant>(Stripped),
755 "associated metadata must point to a GlobalObject", GO, Stripped);
756 Check(Stripped != GO,
757 "global values should not associate to themselves", GO,
758 Associated);
759 }
760 }
761
762 // FIXME: Why is getMetadata on GlobalValue protected?
763 if (const MDNode *AbsoluteSymbol =
764 GO->getMetadata(LLVMContext::MD_absolute_symbol)) {
765 verifyRangeLikeMetadata(*GO, AbsoluteSymbol,
766 DL.getIntPtrType(GO->getType()),
767 RangeLikeMetadataKind::AbsoluteSymbol);
768 }
769 }
770
772 "Only global variables can have appending linkage!", &GV);
773
774 if (GV.hasAppendingLinkage()) {
775 const GlobalVariable *GVar = dyn_cast<GlobalVariable>(&GV);
776 Check(GVar && GVar->getValueType()->isArrayTy(),
777 "Only global arrays can have appending linkage!", GVar);
778 }
779
780 if (GV.isDeclarationForLinker())
781 Check(!GV.hasComdat(), "Declaration may not be in a Comdat!", &GV);
782
783 if (GV.hasDLLExportStorageClass()) {
785 "dllexport GlobalValue must have default or protected visibility",
786 &GV);
787 }
788 if (GV.hasDLLImportStorageClass()) {
790 "dllimport GlobalValue must have default visibility", &GV);
791 Check(!GV.isDSOLocal(), "GlobalValue with DLLImport Storage is dso_local!",
792 &GV);
793
794 Check((GV.isDeclaration() &&
797 "Global is marked as dllimport, but not external", &GV);
798 }
799
800 if (GV.isImplicitDSOLocal())
801 Check(GV.isDSOLocal(),
802 "GlobalValue with local linkage or non-default "
803 "visibility must be dso_local!",
804 &GV);
805
806 forEachUser(&GV, GlobalValueVisited, [&](const Value *V) -> bool {
807 if (const Instruction *I = dyn_cast<Instruction>(V)) {
808 if (!I->getParent() || !I->getParent()->getParent())
809 CheckFailed("Global is referenced by parentless instruction!", &GV, &M,
810 I);
811 else if (I->getParent()->getParent()->getParent() != &M)
812 CheckFailed("Global is referenced in a different module!", &GV, &M, I,
813 I->getParent()->getParent(),
814 I->getParent()->getParent()->getParent());
815 return false;
816 } else if (const Function *F = dyn_cast<Function>(V)) {
817 if (F->getParent() != &M)
818 CheckFailed("Global is used by function in a different module", &GV, &M,
819 F, F->getParent());
820 return false;
821 }
822 return true;
823 });
824}
825
826void Verifier::visitGlobalVariable(const GlobalVariable &GV) {
827 Type *GVType = GV.getValueType();
828
829 if (MaybeAlign A = GV.getAlign()) {
830 Check(A->value() <= Value::MaximumAlignment,
831 "huge alignment values are unsupported", &GV);
832 }
833
834 if (GV.hasInitializer()) {
835 Check(GV.getInitializer()->getType() == GVType,
836 "Global variable initializer type does not match global "
837 "variable type!",
838 &GV);
840 "Global variable initializer must be sized", &GV);
841 visitConstantExprsRecursively(GV.getInitializer());
842 // If the global has common linkage, it must have a zero initializer and
843 // cannot be constant.
844 if (GV.hasCommonLinkage()) {
846 "'common' global must have a zero initializer!", &GV);
847 Check(!GV.isConstant(), "'common' global may not be marked constant!",
848 &GV);
849 Check(!GV.hasComdat(), "'common' global may not be in a Comdat!", &GV);
850 }
851 }
852
853 if (GV.hasName() && (GV.getName() == "llvm.global_ctors" ||
854 GV.getName() == "llvm.global_dtors")) {
856 "invalid linkage for intrinsic global variable", &GV);
858 "invalid uses of intrinsic global variable", &GV);
859
860 // Don't worry about emitting an error for it not being an array,
861 // visitGlobalValue will complain on appending non-array.
862 if (ArrayType *ATy = dyn_cast<ArrayType>(GVType)) {
863 StructType *STy = dyn_cast<StructType>(ATy->getElementType());
864 PointerType *FuncPtrTy =
865 PointerType::get(Context, DL.getProgramAddressSpace());
866 Check(STy && (STy->getNumElements() == 2 || STy->getNumElements() == 3) &&
867 STy->getTypeAtIndex(0u)->isIntegerTy(32) &&
868 STy->getTypeAtIndex(1) == FuncPtrTy,
869 "wrong type for intrinsic global variable", &GV);
870 Check(STy->getNumElements() == 3,
871 "the third field of the element type is mandatory, "
872 "specify ptr null to migrate from the obsoleted 2-field form");
873 Type *ETy = STy->getTypeAtIndex(2);
874 Check(ETy->isPointerTy(), "wrong type for intrinsic global variable",
875 &GV);
876 }
877 }
878
879 if (GV.hasName() && (GV.getName() == "llvm.used" ||
880 GV.getName() == "llvm.compiler.used")) {
882 "invalid linkage for intrinsic global variable", &GV);
884 "invalid uses of intrinsic global variable", &GV);
885
886 if (ArrayType *ATy = dyn_cast<ArrayType>(GVType)) {
887 PointerType *PTy = dyn_cast<PointerType>(ATy->getElementType());
888 Check(PTy, "wrong type for intrinsic global variable", &GV);
889 if (GV.hasInitializer()) {
890 const Constant *Init = GV.getInitializer();
891 const ConstantArray *InitArray = dyn_cast<ConstantArray>(Init);
892 Check(InitArray, "wrong initializer for intrinsic global variable",
893 Init);
894 for (Value *Op : InitArray->operands()) {
895 Value *V = Op->stripPointerCasts();
898 Twine("invalid ") + GV.getName() + " member", V);
899 Check(V->hasName(),
900 Twine("members of ") + GV.getName() + " must be named", V);
901 }
902 }
903 }
904 }
905
906 // Visit any debug info attachments.
908 GV.getMetadata(LLVMContext::MD_dbg, MDs);
909 for (auto *MD : MDs) {
910 if (auto *GVE = dyn_cast<DIGlobalVariableExpression>(MD))
911 visitDIGlobalVariableExpression(*GVE);
912 else
913 CheckDI(false, "!dbg attachment of global variable must be a "
914 "DIGlobalVariableExpression");
915 }
916
917 // Scalable vectors cannot be global variables, since we don't know
918 // the runtime size.
919 Check(!GVType->isScalableTy(), "Globals cannot contain scalable types", &GV);
920
921 // Check if it is or contains a target extension type that disallows being
922 // used as a global.
924 "Global @" + GV.getName() + " has illegal target extension type",
925 GVType);
926
927 if (!GV.hasInitializer()) {
928 visitGlobalValue(GV);
929 return;
930 }
931
932 // Walk any aggregate initializers looking for bitcasts between address spaces
933 visitConstantExprsRecursively(GV.getInitializer());
934
935 visitGlobalValue(GV);
936}
937
938void Verifier::visitAliaseeSubExpr(const GlobalAlias &GA, const Constant &C) {
939 SmallPtrSet<const GlobalAlias*, 4> Visited;
940 Visited.insert(&GA);
941 visitAliaseeSubExpr(Visited, GA, C);
942}
943
944void Verifier::visitAliaseeSubExpr(SmallPtrSetImpl<const GlobalAlias*> &Visited,
945 const GlobalAlias &GA, const Constant &C) {
948 cast<GlobalValue>(C).hasAvailableExternallyLinkage(),
949 "available_externally alias must point to available_externally "
950 "global value",
951 &GA);
952 }
953 if (const auto *GV = dyn_cast<GlobalValue>(&C)) {
955 Check(!GV->isDeclarationForLinker(), "Alias must point to a definition",
956 &GA);
957 }
958
959 if (const auto *GA2 = dyn_cast<GlobalAlias>(GV)) {
960 Check(Visited.insert(GA2).second, "Aliases cannot form a cycle", &GA);
961
962 Check(!GA2->isInterposable(),
963 "Alias cannot point to an interposable alias", &GA);
964 } else {
965 // Only continue verifying subexpressions of GlobalAliases.
966 // Do not recurse into global initializers.
967 return;
968 }
969 }
970
971 if (const auto *CE = dyn_cast<ConstantExpr>(&C))
972 visitConstantExprsRecursively(CE);
973
974 for (const Use &U : C.operands()) {
975 Value *V = &*U;
976 if (const auto *GA2 = dyn_cast<GlobalAlias>(V))
977 visitAliaseeSubExpr(Visited, GA, *GA2->getAliasee());
978 else if (const auto *C2 = dyn_cast<Constant>(V))
979 visitAliaseeSubExpr(Visited, GA, *C2);
980 }
981}
982
983void Verifier::visitGlobalAlias(const GlobalAlias &GA) {
985 "Alias should have private, internal, linkonce, weak, linkonce_odr, "
986 "weak_odr, external, or available_externally linkage!",
987 &GA);
988 const Constant *Aliasee = GA.getAliasee();
989 Check(Aliasee, "Aliasee cannot be NULL!", &GA);
990 Check(GA.getType() == Aliasee->getType(),
991 "Alias and aliasee types should match!", &GA);
992
993 Check(isa<GlobalValue>(Aliasee) || isa<ConstantExpr>(Aliasee),
994 "Aliasee should be either GlobalValue or ConstantExpr", &GA);
995
996 visitAliaseeSubExpr(GA, *Aliasee);
997
998 visitGlobalValue(GA);
999}
1000
1001void Verifier::visitGlobalIFunc(const GlobalIFunc &GI) {
1002 visitGlobalValue(GI);
1003
1005 GI.getAllMetadata(MDs);
1006 for (const auto &I : MDs) {
1007 CheckDI(I.first != LLVMContext::MD_dbg,
1008 "an ifunc may not have a !dbg attachment", &GI);
1009 Check(I.first != LLVMContext::MD_prof,
1010 "an ifunc may not have a !prof attachment", &GI);
1011 visitMDNode(*I.second, AreDebugLocsAllowed::No);
1012 }
1013
1015 "IFunc should have private, internal, linkonce, weak, linkonce_odr, "
1016 "weak_odr, or external linkage!",
1017 &GI);
1018 // Pierce through ConstantExprs and GlobalAliases and check that the resolver
1019 // is a Function definition.
1020 const Function *Resolver = GI.getResolverFunction();
1021 Check(Resolver, "IFunc must have a Function resolver", &GI);
1022 Check(!Resolver->isDeclarationForLinker(),
1023 "IFunc resolver must be a definition", &GI);
1024
1025 // Check that the immediate resolver operand (prior to any bitcasts) has the
1026 // correct type.
1027 const Type *ResolverTy = GI.getResolver()->getType();
1028
1030 "IFunc resolver must return a pointer", &GI);
1031
1032 Check(ResolverTy == PointerType::get(Context, GI.getAddressSpace()),
1033 "IFunc resolver has incorrect type", &GI);
1034}
1035
1036void Verifier::visitNamedMDNode(const NamedMDNode &NMD) {
1037 // There used to be various other llvm.dbg.* nodes, but we don't support
1038 // upgrading them and we want to reserve the namespace for future uses.
1039 if (NMD.getName().starts_with("llvm.dbg."))
1040 CheckDI(NMD.getName() == "llvm.dbg.cu",
1041 "unrecognized named metadata node in the llvm.dbg namespace", &NMD);
1042 for (const MDNode *MD : NMD.operands()) {
1043 if (NMD.getName() == "llvm.dbg.cu")
1044 CheckDI(MD && isa<DICompileUnit>(MD), "invalid compile unit", &NMD, MD);
1045
1046 if (!MD)
1047 continue;
1048
1049 visitMDNode(*MD, AreDebugLocsAllowed::Yes);
1050 }
1051}
1052
1053void Verifier::visitMDNode(const MDNode &MD, AreDebugLocsAllowed AllowLocs) {
1054 // Only visit each node once. Metadata can be mutually recursive, so this
1055 // avoids infinite recursion here, as well as being an optimization.
1056 if (!MDNodes.insert(&MD).second)
1057 return;
1058
1059 Check(&MD.getContext() == &Context,
1060 "MDNode context does not match Module context!", &MD);
1061
1062 switch (MD.getMetadataID()) {
1063 default:
1064 llvm_unreachable("Invalid MDNode subclass");
1065 case Metadata::MDTupleKind:
1066 break;
1067#define HANDLE_SPECIALIZED_MDNODE_LEAF(CLASS) \
1068 case Metadata::CLASS##Kind: \
1069 visit##CLASS(cast<CLASS>(MD)); \
1070 break;
1071#include "llvm/IR/Metadata.def"
1072 }
1073
1074 for (const Metadata *Op : MD.operands()) {
1075 if (!Op)
1076 continue;
1077 Check(!isa<LocalAsMetadata>(Op), "Invalid operand for global metadata!",
1078 &MD, Op);
1079 CheckDI(!isa<DILocation>(Op) || AllowLocs == AreDebugLocsAllowed::Yes,
1080 "DILocation not allowed within this metadata node", &MD, Op);
1081 if (auto *N = dyn_cast<MDNode>(Op)) {
1082 visitMDNode(*N, AllowLocs);
1083 continue;
1084 }
1085 if (auto *V = dyn_cast<ValueAsMetadata>(Op)) {
1086 visitValueAsMetadata(*V, nullptr);
1087 continue;
1088 }
1089 }
1090
1091 // Check llvm.loop.estimated_trip_count.
1092 if (MD.getNumOperands() > 0 &&
1094 Check(MD.getNumOperands() == 2, "Expected two operands", &MD);
1096 Check(Count && Count->getType()->isIntegerTy() &&
1097 cast<IntegerType>(Count->getType())->getBitWidth() <= 32,
1098 "Expected second operand to be an integer constant of type i32 or "
1099 "smaller",
1100 &MD);
1101 }
1102
1103 // Check these last, so we diagnose problems in operands first.
1104 Check(!MD.isTemporary(), "Expected no forward declarations!", &MD);
1105 Check(MD.isResolved(), "All nodes should be resolved!", &MD);
1106}
1107
1108void Verifier::visitValueAsMetadata(const ValueAsMetadata &MD, Function *F) {
1109 Check(MD.getValue(), "Expected valid value", &MD);
1110 Check(!MD.getValue()->getType()->isMetadataTy(),
1111 "Unexpected metadata round-trip through values", &MD, MD.getValue());
1112
1113 auto *L = dyn_cast<LocalAsMetadata>(&MD);
1114 if (!L)
1115 return;
1116
1117 Check(F, "function-local metadata used outside a function", L);
1118
1119 // If this was an instruction, bb, or argument, verify that it is in the
1120 // function that we expect.
1121 Function *ActualF = nullptr;
1122 if (Instruction *I = dyn_cast<Instruction>(L->getValue())) {
1123 Check(I->getParent(), "function-local metadata not in basic block", L, I);
1124 ActualF = I->getParent()->getParent();
1125 } else if (BasicBlock *BB = dyn_cast<BasicBlock>(L->getValue()))
1126 ActualF = BB->getParent();
1127 else if (Argument *A = dyn_cast<Argument>(L->getValue()))
1128 ActualF = A->getParent();
1129 assert(ActualF && "Unimplemented function local metadata case!");
1130
1131 Check(ActualF == F, "function-local metadata used in wrong function", L);
1132}
1133
1134void Verifier::visitDIArgList(const DIArgList &AL, Function *F) {
1135 for (const ValueAsMetadata *VAM : AL.getArgs())
1136 visitValueAsMetadata(*VAM, F);
1137}
1138
1139void Verifier::visitMetadataAsValue(const MetadataAsValue &MDV, Function *F) {
1140 Metadata *MD = MDV.getMetadata();
1141 if (auto *N = dyn_cast<MDNode>(MD)) {
1142 visitMDNode(*N, AreDebugLocsAllowed::No);
1143 return;
1144 }
1145
1146 // Only visit each node once. Metadata can be mutually recursive, so this
1147 // avoids infinite recursion here, as well as being an optimization.
1148 if (!MDNodes.insert(MD).second)
1149 return;
1150
1151 if (auto *V = dyn_cast<ValueAsMetadata>(MD))
1152 visitValueAsMetadata(*V, F);
1153
1154 if (auto *AL = dyn_cast<DIArgList>(MD))
1155 visitDIArgList(*AL, F);
1156}
1157
1158static bool isType(const Metadata *MD) { return !MD || isa<DIType>(MD); }
1159static bool isScope(const Metadata *MD) { return !MD || isa<DIScope>(MD); }
1160static bool isDINode(const Metadata *MD) { return !MD || isa<DINode>(MD); }
1161
1162void Verifier::visitDILocation(const DILocation &N) {
1163 CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1164 "location requires a valid scope", &N, N.getRawScope());
1165 if (auto *IA = N.getRawInlinedAt())
1166 CheckDI(isa<DILocation>(IA), "inlined-at should be a location", &N, IA);
1167 if (auto *SP = dyn_cast<DISubprogram>(N.getRawScope()))
1168 CheckDI(SP->isDefinition(), "scope points into the type hierarchy", &N);
1169}
1170
1171void Verifier::visitGenericDINode(const GenericDINode &N) {
1172 CheckDI(N.getTag(), "invalid tag", &N);
1173}
1174
1175void Verifier::visitDIScope(const DIScope &N) {
1176 if (auto *F = N.getRawFile())
1177 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1178}
1179
1180void Verifier::visitDISubrangeType(const DISubrangeType &N) {
1181 CheckDI(N.getTag() == dwarf::DW_TAG_subrange_type, "invalid tag", &N);
1182 auto *BaseType = N.getRawBaseType();
1183 CheckDI(!BaseType || isType(BaseType), "BaseType must be a type");
1184 auto *LBound = N.getRawLowerBound();
1185 CheckDI(!LBound || isa<ConstantAsMetadata>(LBound) ||
1186 isa<DIVariable>(LBound) || isa<DIExpression>(LBound),
1187 "LowerBound must be signed constant or DIVariable or DIExpression",
1188 &N);
1189 auto *UBound = N.getRawUpperBound();
1190 CheckDI(!UBound || isa<ConstantAsMetadata>(UBound) ||
1191 isa<DIVariable>(UBound) || isa<DIExpression>(UBound),
1192 "UpperBound must be signed constant or DIVariable or DIExpression",
1193 &N);
1194 auto *Stride = N.getRawStride();
1195 CheckDI(!Stride || isa<ConstantAsMetadata>(Stride) ||
1196 isa<DIVariable>(Stride) || isa<DIExpression>(Stride),
1197 "Stride must be signed constant or DIVariable or DIExpression", &N);
1198 auto *Bias = N.getRawBias();
1199 CheckDI(!Bias || isa<ConstantAsMetadata>(Bias) || isa<DIVariable>(Bias) ||
1200 isa<DIExpression>(Bias),
1201 "Bias must be signed constant or DIVariable or DIExpression", &N);
1202 // Subrange types currently only support constant size.
1203 auto *Size = N.getRawSizeInBits();
1205 "SizeInBits must be a constant");
1206}
1207
1208void Verifier::visitDISubrange(const DISubrange &N) {
1209 CheckDI(N.getTag() == dwarf::DW_TAG_subrange_type, "invalid tag", &N);
1210 CheckDI(!N.getRawCountNode() || !N.getRawUpperBound(),
1211 "Subrange can have any one of count or upperBound", &N);
1212 auto *CBound = N.getRawCountNode();
1213 CheckDI(!CBound || isa<ConstantAsMetadata>(CBound) ||
1214 isa<DIVariable>(CBound) || isa<DIExpression>(CBound),
1215 "Count must be signed constant or DIVariable or DIExpression", &N);
1216 auto Count = N.getCount();
1218 cast<ConstantInt *>(Count)->getSExtValue() >= -1,
1219 "invalid subrange count", &N);
1220 auto *LBound = N.getRawLowerBound();
1221 CheckDI(!LBound || isa<ConstantAsMetadata>(LBound) ||
1222 isa<DIVariable>(LBound) || isa<DIExpression>(LBound),
1223 "LowerBound must be signed constant or DIVariable or DIExpression",
1224 &N);
1225 auto *UBound = N.getRawUpperBound();
1226 CheckDI(!UBound || isa<ConstantAsMetadata>(UBound) ||
1227 isa<DIVariable>(UBound) || isa<DIExpression>(UBound),
1228 "UpperBound must be signed constant or DIVariable or DIExpression",
1229 &N);
1230 auto *Stride = N.getRawStride();
1231 CheckDI(!Stride || isa<ConstantAsMetadata>(Stride) ||
1232 isa<DIVariable>(Stride) || isa<DIExpression>(Stride),
1233 "Stride must be signed constant or DIVariable or DIExpression", &N);
1234}
1235
1236void Verifier::visitDIGenericSubrange(const DIGenericSubrange &N) {
1237 CheckDI(N.getTag() == dwarf::DW_TAG_generic_subrange, "invalid tag", &N);
1238 CheckDI(!N.getRawCountNode() || !N.getRawUpperBound(),
1239 "GenericSubrange can have any one of count or upperBound", &N);
1240 auto *CBound = N.getRawCountNode();
1241 CheckDI(!CBound || isa<DIVariable>(CBound) || isa<DIExpression>(CBound),
1242 "Count must be signed constant or DIVariable or DIExpression", &N);
1243 auto *LBound = N.getRawLowerBound();
1244 CheckDI(LBound, "GenericSubrange must contain lowerBound", &N);
1245 CheckDI(isa<DIVariable>(LBound) || isa<DIExpression>(LBound),
1246 "LowerBound must be signed constant or DIVariable or DIExpression",
1247 &N);
1248 auto *UBound = N.getRawUpperBound();
1249 CheckDI(!UBound || isa<DIVariable>(UBound) || isa<DIExpression>(UBound),
1250 "UpperBound must be signed constant or DIVariable or DIExpression",
1251 &N);
1252 auto *Stride = N.getRawStride();
1253 CheckDI(Stride, "GenericSubrange must contain stride", &N);
1254 CheckDI(isa<DIVariable>(Stride) || isa<DIExpression>(Stride),
1255 "Stride must be signed constant or DIVariable or DIExpression", &N);
1256}
1257
1258void Verifier::visitDIEnumerator(const DIEnumerator &N) {
1259 CheckDI(N.getTag() == dwarf::DW_TAG_enumerator, "invalid tag", &N);
1260}
1261
1262void Verifier::visitDIBasicType(const DIBasicType &N) {
1263 CheckDI(N.getTag() == dwarf::DW_TAG_base_type ||
1264 N.getTag() == dwarf::DW_TAG_unspecified_type ||
1265 N.getTag() == dwarf::DW_TAG_string_type,
1266 "invalid tag", &N);
1267 // Basic types currently only support constant size.
1268 auto *Size = N.getRawSizeInBits();
1270 "SizeInBits must be a constant");
1271}
1272
1273void Verifier::visitDIFixedPointType(const DIFixedPointType &N) {
1274 visitDIBasicType(N);
1275
1276 CheckDI(N.getTag() == dwarf::DW_TAG_base_type, "invalid tag", &N);
1277 CheckDI(N.getEncoding() == dwarf::DW_ATE_signed_fixed ||
1278 N.getEncoding() == dwarf::DW_ATE_unsigned_fixed,
1279 "invalid encoding", &N);
1283 "invalid kind", &N);
1285 N.getFactorRaw() == 0,
1286 "factor should be 0 for rationals", &N);
1288 (N.getNumeratorRaw() == 0 && N.getDenominatorRaw() == 0),
1289 "numerator and denominator should be 0 for non-rationals", &N);
1290}
1291
1292void Verifier::visitDIStringType(const DIStringType &N) {
1293 CheckDI(N.getTag() == dwarf::DW_TAG_string_type, "invalid tag", &N);
1294 CheckDI(!(N.isBigEndian() && N.isLittleEndian()), "has conflicting flags",
1295 &N);
1296}
1297
1298void Verifier::visitDIDerivedType(const DIDerivedType &N) {
1299 // Common scope checks.
1300 visitDIScope(N);
1301
1302 CheckDI(N.getTag() == dwarf::DW_TAG_typedef ||
1303 N.getTag() == dwarf::DW_TAG_pointer_type ||
1304 N.getTag() == dwarf::DW_TAG_ptr_to_member_type ||
1305 N.getTag() == dwarf::DW_TAG_reference_type ||
1306 N.getTag() == dwarf::DW_TAG_rvalue_reference_type ||
1307 N.getTag() == dwarf::DW_TAG_const_type ||
1308 N.getTag() == dwarf::DW_TAG_immutable_type ||
1309 N.getTag() == dwarf::DW_TAG_volatile_type ||
1310 N.getTag() == dwarf::DW_TAG_restrict_type ||
1311 N.getTag() == dwarf::DW_TAG_atomic_type ||
1312 N.getTag() == dwarf::DW_TAG_LLVM_ptrauth_type ||
1313 N.getTag() == dwarf::DW_TAG_member ||
1314 (N.getTag() == dwarf::DW_TAG_variable && N.isStaticMember()) ||
1315 N.getTag() == dwarf::DW_TAG_inheritance ||
1316 N.getTag() == dwarf::DW_TAG_friend ||
1317 N.getTag() == dwarf::DW_TAG_set_type ||
1318 N.getTag() == dwarf::DW_TAG_template_alias,
1319 "invalid tag", &N);
1320 if (N.getTag() == dwarf::DW_TAG_ptr_to_member_type) {
1321 CheckDI(isType(N.getRawExtraData()), "invalid pointer to member type", &N,
1322 N.getRawExtraData());
1323 }
1324
1325 if (N.getTag() == dwarf::DW_TAG_set_type) {
1326 if (auto *T = N.getRawBaseType()) {
1330 CheckDI(
1331 (Enum && Enum->getTag() == dwarf::DW_TAG_enumeration_type) ||
1332 (Subrange && Subrange->getTag() == dwarf::DW_TAG_subrange_type) ||
1333 (Basic && (Basic->getEncoding() == dwarf::DW_ATE_unsigned ||
1334 Basic->getEncoding() == dwarf::DW_ATE_signed ||
1335 Basic->getEncoding() == dwarf::DW_ATE_unsigned_char ||
1336 Basic->getEncoding() == dwarf::DW_ATE_signed_char ||
1337 Basic->getEncoding() == dwarf::DW_ATE_boolean)),
1338 "invalid set base type", &N, T);
1339 }
1340 }
1341
1342 CheckDI(isScope(N.getRawScope()), "invalid scope", &N, N.getRawScope());
1343 CheckDI(isType(N.getRawBaseType()), "invalid base type", &N,
1344 N.getRawBaseType());
1345
1346 if (N.getDWARFAddressSpace()) {
1347 CheckDI(N.getTag() == dwarf::DW_TAG_pointer_type ||
1348 N.getTag() == dwarf::DW_TAG_reference_type ||
1349 N.getTag() == dwarf::DW_TAG_rvalue_reference_type,
1350 "DWARF address space only applies to pointer or reference types",
1351 &N);
1352 }
1353
1354 auto *Size = N.getRawSizeInBits();
1357 "SizeInBits must be a constant or DIVariable or DIExpression");
1358}
1359
1360/// Detect mutually exclusive flags.
1361static bool hasConflictingReferenceFlags(unsigned Flags) {
1362 return ((Flags & DINode::FlagLValueReference) &&
1363 (Flags & DINode::FlagRValueReference)) ||
1364 ((Flags & DINode::FlagTypePassByValue) &&
1365 (Flags & DINode::FlagTypePassByReference));
1366}
1367
1368void Verifier::visitTemplateParams(const MDNode &N, const Metadata &RawParams) {
1369 auto *Params = dyn_cast<MDTuple>(&RawParams);
1370 CheckDI(Params, "invalid template params", &N, &RawParams);
1371 for (Metadata *Op : Params->operands()) {
1372 CheckDI(Op && isa<DITemplateParameter>(Op), "invalid template parameter",
1373 &N, Params, Op);
1374 }
1375}
1376
1377void Verifier::visitDICompositeType(const DICompositeType &N) {
1378 // Common scope checks.
1379 visitDIScope(N);
1380
1381 CheckDI(N.getTag() == dwarf::DW_TAG_array_type ||
1382 N.getTag() == dwarf::DW_TAG_structure_type ||
1383 N.getTag() == dwarf::DW_TAG_union_type ||
1384 N.getTag() == dwarf::DW_TAG_enumeration_type ||
1385 N.getTag() == dwarf::DW_TAG_class_type ||
1386 N.getTag() == dwarf::DW_TAG_variant_part ||
1387 N.getTag() == dwarf::DW_TAG_variant ||
1388 N.getTag() == dwarf::DW_TAG_namelist,
1389 "invalid tag", &N);
1390
1391 CheckDI(isScope(N.getRawScope()), "invalid scope", &N, N.getRawScope());
1392 CheckDI(isType(N.getRawBaseType()), "invalid base type", &N,
1393 N.getRawBaseType());
1394
1395 CheckDI(!N.getRawElements() || isa<MDTuple>(N.getRawElements()),
1396 "invalid composite elements", &N, N.getRawElements());
1397 CheckDI(isType(N.getRawVTableHolder()), "invalid vtable holder", &N,
1398 N.getRawVTableHolder());
1400 "invalid reference flags", &N);
1401 unsigned DIBlockByRefStruct = 1 << 4;
1402 CheckDI((N.getFlags() & DIBlockByRefStruct) == 0,
1403 "DIBlockByRefStruct on DICompositeType is no longer supported", &N);
1404 CheckDI(llvm::all_of(N.getElements(), [](const DINode *N) { return N; }),
1405 "DISubprogram contains null entry in `elements` field", &N);
1406
1407 if (N.isVector()) {
1408 const DINodeArray Elements = N.getElements();
1409 CheckDI(Elements.size() == 1 &&
1410 Elements[0]->getTag() == dwarf::DW_TAG_subrange_type,
1411 "invalid vector, expected one element of type subrange", &N);
1412 }
1413
1414 if (auto *Params = N.getRawTemplateParams())
1415 visitTemplateParams(N, *Params);
1416
1417 if (auto *D = N.getRawDiscriminator()) {
1418 CheckDI(isa<DIDerivedType>(D) && N.getTag() == dwarf::DW_TAG_variant_part,
1419 "discriminator can only appear on variant part");
1420 }
1421
1422 if (N.getRawDataLocation()) {
1423 CheckDI(N.getTag() == dwarf::DW_TAG_array_type,
1424 "dataLocation can only appear in array type");
1425 }
1426
1427 if (N.getRawAssociated()) {
1428 CheckDI(N.getTag() == dwarf::DW_TAG_array_type,
1429 "associated can only appear in array type");
1430 }
1431
1432 if (N.getRawAllocated()) {
1433 CheckDI(N.getTag() == dwarf::DW_TAG_array_type,
1434 "allocated can only appear in array type");
1435 }
1436
1437 if (N.getRawRank()) {
1438 CheckDI(N.getTag() == dwarf::DW_TAG_array_type,
1439 "rank can only appear in array type");
1440 }
1441
1442 if (N.getTag() == dwarf::DW_TAG_array_type) {
1443 CheckDI(N.getRawBaseType(), "array types must have a base type", &N);
1444 }
1445
1446 auto *Size = N.getRawSizeInBits();
1449 "SizeInBits must be a constant or DIVariable or DIExpression");
1450}
1451
1452void Verifier::visitDISubroutineType(const DISubroutineType &N) {
1453 CheckDI(N.getTag() == dwarf::DW_TAG_subroutine_type, "invalid tag", &N);
1454 if (auto *Types = N.getRawTypeArray()) {
1455 CheckDI(isa<MDTuple>(Types), "invalid composite elements", &N, Types);
1456 for (Metadata *Ty : N.getTypeArray()->operands()) {
1457 CheckDI(isType(Ty), "invalid subroutine type ref", &N, Types, Ty);
1458 }
1459 }
1461 "invalid reference flags", &N);
1462}
1463
1464void Verifier::visitDIFile(const DIFile &N) {
1465 CheckDI(N.getTag() == dwarf::DW_TAG_file_type, "invalid tag", &N);
1466 std::optional<DIFile::ChecksumInfo<StringRef>> Checksum = N.getChecksum();
1467 if (Checksum) {
1468 CheckDI(Checksum->Kind <= DIFile::ChecksumKind::CSK_Last,
1469 "invalid checksum kind", &N);
1470 size_t Size;
1471 switch (Checksum->Kind) {
1472 case DIFile::CSK_MD5:
1473 Size = 32;
1474 break;
1475 case DIFile::CSK_SHA1:
1476 Size = 40;
1477 break;
1478 case DIFile::CSK_SHA256:
1479 Size = 64;
1480 break;
1481 }
1482 CheckDI(Checksum->Value.size() == Size, "invalid checksum length", &N);
1483 CheckDI(Checksum->Value.find_if_not(llvm::isHexDigit) == StringRef::npos,
1484 "invalid checksum", &N);
1485 }
1486}
1487
1488void Verifier::visitDICompileUnit(const DICompileUnit &N) {
1489 CheckDI(N.isDistinct(), "compile units must be distinct", &N);
1490 CheckDI(N.getTag() == dwarf::DW_TAG_compile_unit, "invalid tag", &N);
1491
1492 // Don't bother verifying the compilation directory or producer string
1493 // as those could be empty.
1494 CheckDI(N.getRawFile() && isa<DIFile>(N.getRawFile()), "invalid file", &N,
1495 N.getRawFile());
1496 CheckDI(!N.getFile()->getFilename().empty(), "invalid filename", &N,
1497 N.getFile());
1498
1499 CheckDI((N.getEmissionKind() <= DICompileUnit::LastEmissionKind),
1500 "invalid emission kind", &N);
1501
1502 if (auto *Array = N.getRawEnumTypes()) {
1503 CheckDI(isa<MDTuple>(Array), "invalid enum list", &N, Array);
1504 for (Metadata *Op : N.getEnumTypes()->operands()) {
1506 CheckDI(Enum && Enum->getTag() == dwarf::DW_TAG_enumeration_type,
1507 "invalid enum type", &N, N.getEnumTypes(), Op);
1508 }
1509 }
1510 if (auto *Array = N.getRawRetainedTypes()) {
1511 CheckDI(isa<MDTuple>(Array), "invalid retained type list", &N, Array);
1512 for (Metadata *Op : N.getRetainedTypes()->operands()) {
1513 CheckDI(
1514 Op && (isa<DIType>(Op) || (isa<DISubprogram>(Op) &&
1515 !cast<DISubprogram>(Op)->isDefinition())),
1516 "invalid retained type", &N, Op);
1517 }
1518 }
1519 if (auto *Array = N.getRawGlobalVariables()) {
1520 CheckDI(isa<MDTuple>(Array), "invalid global variable list", &N, Array);
1521 for (Metadata *Op : N.getGlobalVariables()->operands()) {
1523 "invalid global variable ref", &N, Op);
1524 }
1525 }
1526 if (auto *Array = N.getRawImportedEntities()) {
1527 CheckDI(isa<MDTuple>(Array), "invalid imported entity list", &N, Array);
1528 for (Metadata *Op : N.getImportedEntities()->operands()) {
1529 CheckDI(Op && isa<DIImportedEntity>(Op), "invalid imported entity ref",
1530 &N, Op);
1531 }
1532 }
1533 if (auto *Array = N.getRawMacros()) {
1534 CheckDI(isa<MDTuple>(Array), "invalid macro list", &N, Array);
1535 for (Metadata *Op : N.getMacros()->operands()) {
1536 CheckDI(Op && isa<DIMacroNode>(Op), "invalid macro ref", &N, Op);
1537 }
1538 }
1539 CUVisited.insert(&N);
1540}
1541
1542void Verifier::visitDISubprogram(const DISubprogram &N) {
1543 CheckDI(N.getTag() == dwarf::DW_TAG_subprogram, "invalid tag", &N);
1544 CheckDI(isScope(N.getRawScope()), "invalid scope", &N, N.getRawScope());
1545 if (auto *F = N.getRawFile())
1546 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1547 else
1548 CheckDI(N.getLine() == 0, "line specified with no file", &N, N.getLine());
1549 if (auto *T = N.getRawType())
1550 CheckDI(isa<DISubroutineType>(T), "invalid subroutine type", &N, T);
1551 CheckDI(isType(N.getRawContainingType()), "invalid containing type", &N,
1552 N.getRawContainingType());
1553 if (auto *Params = N.getRawTemplateParams())
1554 visitTemplateParams(N, *Params);
1555 if (auto *S = N.getRawDeclaration())
1556 CheckDI(isa<DISubprogram>(S) && !cast<DISubprogram>(S)->isDefinition(),
1557 "invalid subprogram declaration", &N, S);
1558 if (auto *RawNode = N.getRawRetainedNodes()) {
1559 auto *Node = dyn_cast<MDTuple>(RawNode);
1560 CheckDI(Node, "invalid retained nodes list", &N, RawNode);
1561 for (Metadata *Op : Node->operands()) {
1564 "invalid retained nodes, expected DILocalVariable, DILabel or "
1565 "DIImportedEntity",
1566 &N, Node, Op);
1567 }
1568 }
1570 "invalid reference flags", &N);
1571
1572 auto *Unit = N.getRawUnit();
1573 if (N.isDefinition()) {
1574 // Subprogram definitions (not part of the type hierarchy).
1575 CheckDI(N.isDistinct(), "subprogram definitions must be distinct", &N);
1576 CheckDI(Unit, "subprogram definitions must have a compile unit", &N);
1577 CheckDI(isa<DICompileUnit>(Unit), "invalid unit type", &N, Unit);
1578 // There's no good way to cross the CU boundary to insert a nested
1579 // DISubprogram definition in one CU into a type defined in another CU.
1580 auto *CT = dyn_cast_or_null<DICompositeType>(N.getRawScope());
1581 if (CT && CT->getRawIdentifier() &&
1582 M.getContext().isODRUniquingDebugTypes())
1583 CheckDI(N.getDeclaration(),
1584 "definition subprograms cannot be nested within DICompositeType "
1585 "when enabling ODR",
1586 &N);
1587 } else {
1588 // Subprogram declarations (part of the type hierarchy).
1589 CheckDI(!Unit, "subprogram declarations must not have a compile unit", &N);
1590 CheckDI(!N.getRawDeclaration(),
1591 "subprogram declaration must not have a declaration field");
1592 }
1593
1594 if (auto *RawThrownTypes = N.getRawThrownTypes()) {
1595 auto *ThrownTypes = dyn_cast<MDTuple>(RawThrownTypes);
1596 CheckDI(ThrownTypes, "invalid thrown types list", &N, RawThrownTypes);
1597 for (Metadata *Op : ThrownTypes->operands())
1598 CheckDI(Op && isa<DIType>(Op), "invalid thrown type", &N, ThrownTypes,
1599 Op);
1600 }
1601
1602 if (N.areAllCallsDescribed())
1603 CheckDI(N.isDefinition(),
1604 "DIFlagAllCallsDescribed must be attached to a definition");
1605}
1606
1607void Verifier::visitDILexicalBlockBase(const DILexicalBlockBase &N) {
1608 CheckDI(N.getTag() == dwarf::DW_TAG_lexical_block, "invalid tag", &N);
1609 CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1610 "invalid local scope", &N, N.getRawScope());
1611 if (auto *SP = dyn_cast<DISubprogram>(N.getRawScope()))
1612 CheckDI(SP->isDefinition(), "scope points into the type hierarchy", &N);
1613}
1614
1615void Verifier::visitDILexicalBlock(const DILexicalBlock &N) {
1616 visitDILexicalBlockBase(N);
1617
1618 CheckDI(N.getLine() || !N.getColumn(),
1619 "cannot have column info without line info", &N);
1620}
1621
1622void Verifier::visitDILexicalBlockFile(const DILexicalBlockFile &N) {
1623 visitDILexicalBlockBase(N);
1624}
1625
1626void Verifier::visitDICommonBlock(const DICommonBlock &N) {
1627 CheckDI(N.getTag() == dwarf::DW_TAG_common_block, "invalid tag", &N);
1628 if (auto *S = N.getRawScope())
1629 CheckDI(isa<DIScope>(S), "invalid scope ref", &N, S);
1630 if (auto *S = N.getRawDecl())
1631 CheckDI(isa<DIGlobalVariable>(S), "invalid declaration", &N, S);
1632}
1633
1634void Verifier::visitDINamespace(const DINamespace &N) {
1635 CheckDI(N.getTag() == dwarf::DW_TAG_namespace, "invalid tag", &N);
1636 if (auto *S = N.getRawScope())
1637 CheckDI(isa<DIScope>(S), "invalid scope ref", &N, S);
1638}
1639
1640void Verifier::visitDIMacro(const DIMacro &N) {
1641 CheckDI(N.getMacinfoType() == dwarf::DW_MACINFO_define ||
1642 N.getMacinfoType() == dwarf::DW_MACINFO_undef,
1643 "invalid macinfo type", &N);
1644 CheckDI(!N.getName().empty(), "anonymous macro", &N);
1645 if (!N.getValue().empty()) {
1646 assert(N.getValue().data()[0] != ' ' && "Macro value has a space prefix");
1647 }
1648}
1649
1650void Verifier::visitDIMacroFile(const DIMacroFile &N) {
1651 CheckDI(N.getMacinfoType() == dwarf::DW_MACINFO_start_file,
1652 "invalid macinfo type", &N);
1653 if (auto *F = N.getRawFile())
1654 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1655
1656 if (auto *Array = N.getRawElements()) {
1657 CheckDI(isa<MDTuple>(Array), "invalid macro list", &N, Array);
1658 for (Metadata *Op : N.getElements()->operands()) {
1659 CheckDI(Op && isa<DIMacroNode>(Op), "invalid macro ref", &N, Op);
1660 }
1661 }
1662}
1663
1664void Verifier::visitDIModule(const DIModule &N) {
1665 CheckDI(N.getTag() == dwarf::DW_TAG_module, "invalid tag", &N);
1666 CheckDI(!N.getName().empty(), "anonymous module", &N);
1667}
1668
1669void Verifier::visitDITemplateParameter(const DITemplateParameter &N) {
1670 CheckDI(isType(N.getRawType()), "invalid type ref", &N, N.getRawType());
1671}
1672
1673void Verifier::visitDITemplateTypeParameter(const DITemplateTypeParameter &N) {
1674 visitDITemplateParameter(N);
1675
1676 CheckDI(N.getTag() == dwarf::DW_TAG_template_type_parameter, "invalid tag",
1677 &N);
1678}
1679
1680void Verifier::visitDITemplateValueParameter(
1681 const DITemplateValueParameter &N) {
1682 visitDITemplateParameter(N);
1683
1684 CheckDI(N.getTag() == dwarf::DW_TAG_template_value_parameter ||
1685 N.getTag() == dwarf::DW_TAG_GNU_template_template_param ||
1686 N.getTag() == dwarf::DW_TAG_GNU_template_parameter_pack,
1687 "invalid tag", &N);
1688}
1689
1690void Verifier::visitDIVariable(const DIVariable &N) {
1691 if (auto *S = N.getRawScope())
1692 CheckDI(isa<DIScope>(S), "invalid scope", &N, S);
1693 if (auto *F = N.getRawFile())
1694 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1695}
1696
1697void Verifier::visitDIGlobalVariable(const DIGlobalVariable &N) {
1698 // Checks common to all variables.
1699 visitDIVariable(N);
1700
1701 CheckDI(N.getTag() == dwarf::DW_TAG_variable, "invalid tag", &N);
1702 CheckDI(isType(N.getRawType()), "invalid type ref", &N, N.getRawType());
1703 // Check only if the global variable is not an extern
1704 if (N.isDefinition())
1705 CheckDI(N.getType(), "missing global variable type", &N);
1706 if (auto *Member = N.getRawStaticDataMemberDeclaration()) {
1708 "invalid static data member declaration", &N, Member);
1709 }
1710}
1711
1712void Verifier::visitDILocalVariable(const DILocalVariable &N) {
1713 // Checks common to all variables.
1714 visitDIVariable(N);
1715
1716 CheckDI(isType(N.getRawType()), "invalid type ref", &N, N.getRawType());
1717 CheckDI(N.getTag() == dwarf::DW_TAG_variable, "invalid tag", &N);
1718 CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1719 "local variable requires a valid scope", &N, N.getRawScope());
1720 if (auto Ty = N.getType())
1721 CheckDI(!isa<DISubroutineType>(Ty), "invalid type", &N, N.getType());
1722}
1723
1724void Verifier::visitDIAssignID(const DIAssignID &N) {
1725 CheckDI(!N.getNumOperands(), "DIAssignID has no arguments", &N);
1726 CheckDI(N.isDistinct(), "DIAssignID must be distinct", &N);
1727}
1728
1729void Verifier::visitDILabel(const DILabel &N) {
1730 if (auto *S = N.getRawScope())
1731 CheckDI(isa<DIScope>(S), "invalid scope", &N, S);
1732 if (auto *F = N.getRawFile())
1733 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1734
1735 CheckDI(N.getTag() == dwarf::DW_TAG_label, "invalid tag", &N);
1736 CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1737 "label requires a valid scope", &N, N.getRawScope());
1738}
1739
1740void Verifier::visitDIExpression(const DIExpression &N) {
1741 CheckDI(N.isValid(), "invalid expression", &N);
1742}
1743
1744void Verifier::visitDIGlobalVariableExpression(
1745 const DIGlobalVariableExpression &GVE) {
1746 CheckDI(GVE.getVariable(), "missing variable");
1747 if (auto *Var = GVE.getVariable())
1748 visitDIGlobalVariable(*Var);
1749 if (auto *Expr = GVE.getExpression()) {
1750 visitDIExpression(*Expr);
1751 if (auto Fragment = Expr->getFragmentInfo())
1752 verifyFragmentExpression(*GVE.getVariable(), *Fragment, &GVE);
1753 }
1754}
1755
1756void Verifier::visitDIObjCProperty(const DIObjCProperty &N) {
1757 CheckDI(N.getTag() == dwarf::DW_TAG_APPLE_property, "invalid tag", &N);
1758 if (auto *T = N.getRawType())
1759 CheckDI(isType(T), "invalid type ref", &N, T);
1760 if (auto *F = N.getRawFile())
1761 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1762}
1763
1764void Verifier::visitDIImportedEntity(const DIImportedEntity &N) {
1765 CheckDI(N.getTag() == dwarf::DW_TAG_imported_module ||
1766 N.getTag() == dwarf::DW_TAG_imported_declaration,
1767 "invalid tag", &N);
1768 if (auto *S = N.getRawScope())
1769 CheckDI(isa<DIScope>(S), "invalid scope for imported entity", &N, S);
1770 CheckDI(isDINode(N.getRawEntity()), "invalid imported entity", &N,
1771 N.getRawEntity());
1772}
1773
1774void Verifier::visitComdat(const Comdat &C) {
1775 // In COFF the Module is invalid if the GlobalValue has private linkage.
1776 // Entities with private linkage don't have entries in the symbol table.
1777 if (TT.isOSBinFormatCOFF())
1778 if (const GlobalValue *GV = M.getNamedValue(C.getName()))
1779 Check(!GV->hasPrivateLinkage(), "comdat global value has private linkage",
1780 GV);
1781}
1782
1783void Verifier::visitModuleIdents() {
1784 const NamedMDNode *Idents = M.getNamedMetadata("llvm.ident");
1785 if (!Idents)
1786 return;
1787
1788 // llvm.ident takes a list of metadata entry. Each entry has only one string.
1789 // Scan each llvm.ident entry and make sure that this requirement is met.
1790 for (const MDNode *N : Idents->operands()) {
1791 Check(N->getNumOperands() == 1,
1792 "incorrect number of operands in llvm.ident metadata", N);
1793 Check(dyn_cast_or_null<MDString>(N->getOperand(0)),
1794 ("invalid value for llvm.ident metadata entry operand"
1795 "(the operand should be a string)"),
1796 N->getOperand(0));
1797 }
1798}
1799
1800void Verifier::visitModuleCommandLines() {
1801 const NamedMDNode *CommandLines = M.getNamedMetadata("llvm.commandline");
1802 if (!CommandLines)
1803 return;
1804
1805 // llvm.commandline takes a list of metadata entry. Each entry has only one
1806 // string. Scan each llvm.commandline entry and make sure that this
1807 // requirement is met.
1808 for (const MDNode *N : CommandLines->operands()) {
1809 Check(N->getNumOperands() == 1,
1810 "incorrect number of operands in llvm.commandline metadata", N);
1811 Check(dyn_cast_or_null<MDString>(N->getOperand(0)),
1812 ("invalid value for llvm.commandline metadata entry operand"
1813 "(the operand should be a string)"),
1814 N->getOperand(0));
1815 }
1816}
1817
1818void Verifier::visitModuleErrnoTBAA() {
1819 const NamedMDNode *ErrnoTBAA = M.getNamedMetadata("llvm.errno.tbaa");
1820 if (!ErrnoTBAA)
1821 return;
1822
1823 Check(ErrnoTBAA->getNumOperands() >= 1,
1824 "llvm.errno.tbaa must have at least one operand", ErrnoTBAA);
1825
1826 for (const MDNode *N : ErrnoTBAA->operands())
1827 TBAAVerifyHelper.visitTBAAMetadata(nullptr, N);
1828}
1829
1830void Verifier::visitModuleFlags() {
1831 const NamedMDNode *Flags = M.getModuleFlagsMetadata();
1832 if (!Flags) return;
1833
1834 // Scan each flag, and track the flags and requirements.
1835 DenseMap<const MDString*, const MDNode*> SeenIDs;
1836 SmallVector<const MDNode*, 16> Requirements;
1837 uint64_t PAuthABIPlatform = -1;
1838 uint64_t PAuthABIVersion = -1;
1839 for (const MDNode *MDN : Flags->operands()) {
1840 visitModuleFlag(MDN, SeenIDs, Requirements);
1841 if (MDN->getNumOperands() != 3)
1842 continue;
1843 if (const auto *FlagName = dyn_cast_or_null<MDString>(MDN->getOperand(1))) {
1844 if (FlagName->getString() == "aarch64-elf-pauthabi-platform") {
1845 if (const auto *PAP =
1847 PAuthABIPlatform = PAP->getZExtValue();
1848 } else if (FlagName->getString() == "aarch64-elf-pauthabi-version") {
1849 if (const auto *PAV =
1851 PAuthABIVersion = PAV->getZExtValue();
1852 }
1853 }
1854 }
1855
1856 if ((PAuthABIPlatform == uint64_t(-1)) != (PAuthABIVersion == uint64_t(-1)))
1857 CheckFailed("either both or no 'aarch64-elf-pauthabi-platform' and "
1858 "'aarch64-elf-pauthabi-version' module flags must be present");
1859
1860 // Validate that the requirements in the module are valid.
1861 for (const MDNode *Requirement : Requirements) {
1862 const MDString *Flag = cast<MDString>(Requirement->getOperand(0));
1863 const Metadata *ReqValue = Requirement->getOperand(1);
1864
1865 const MDNode *Op = SeenIDs.lookup(Flag);
1866 if (!Op) {
1867 CheckFailed("invalid requirement on flag, flag is not present in module",
1868 Flag);
1869 continue;
1870 }
1871
1872 if (Op->getOperand(2) != ReqValue) {
1873 CheckFailed(("invalid requirement on flag, "
1874 "flag does not have the required value"),
1875 Flag);
1876 continue;
1877 }
1878 }
1879}
1880
1881void
1882Verifier::visitModuleFlag(const MDNode *Op,
1883 DenseMap<const MDString *, const MDNode *> &SeenIDs,
1884 SmallVectorImpl<const MDNode *> &Requirements) {
1885 // Each module flag should have three arguments, the merge behavior (a
1886 // constant int), the flag ID (an MDString), and the value.
1887 Check(Op->getNumOperands() == 3,
1888 "incorrect number of operands in module flag", Op);
1889 Module::ModFlagBehavior MFB;
1890 if (!Module::isValidModFlagBehavior(Op->getOperand(0), MFB)) {
1892 "invalid behavior operand in module flag (expected constant integer)",
1893 Op->getOperand(0));
1894 Check(false,
1895 "invalid behavior operand in module flag (unexpected constant)",
1896 Op->getOperand(0));
1897 }
1898 MDString *ID = dyn_cast_or_null<MDString>(Op->getOperand(1));
1899 Check(ID, "invalid ID operand in module flag (expected metadata string)",
1900 Op->getOperand(1));
1901
1902 // Check the values for behaviors with additional requirements.
1903 switch (MFB) {
1904 case Module::Error:
1905 case Module::Warning:
1906 case Module::Override:
1907 // These behavior types accept any value.
1908 break;
1909
1910 case Module::Min: {
1911 auto *V = mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(2));
1912 Check(V && V->getValue().isNonNegative(),
1913 "invalid value for 'min' module flag (expected constant non-negative "
1914 "integer)",
1915 Op->getOperand(2));
1916 break;
1917 }
1918
1919 case Module::Max: {
1921 "invalid value for 'max' module flag (expected constant integer)",
1922 Op->getOperand(2));
1923 break;
1924 }
1925
1926 case Module::Require: {
1927 // The value should itself be an MDNode with two operands, a flag ID (an
1928 // MDString), and a value.
1929 MDNode *Value = dyn_cast<MDNode>(Op->getOperand(2));
1930 Check(Value && Value->getNumOperands() == 2,
1931 "invalid value for 'require' module flag (expected metadata pair)",
1932 Op->getOperand(2));
1933 Check(isa<MDString>(Value->getOperand(0)),
1934 ("invalid value for 'require' module flag "
1935 "(first value operand should be a string)"),
1936 Value->getOperand(0));
1937
1938 // Append it to the list of requirements, to check once all module flags are
1939 // scanned.
1940 Requirements.push_back(Value);
1941 break;
1942 }
1943
1944 case Module::Append:
1945 case Module::AppendUnique: {
1946 // These behavior types require the operand be an MDNode.
1947 Check(isa<MDNode>(Op->getOperand(2)),
1948 "invalid value for 'append'-type module flag "
1949 "(expected a metadata node)",
1950 Op->getOperand(2));
1951 break;
1952 }
1953 }
1954
1955 // Unless this is a "requires" flag, check the ID is unique.
1956 if (MFB != Module::Require) {
1957 bool Inserted = SeenIDs.insert(std::make_pair(ID, Op)).second;
1958 Check(Inserted,
1959 "module flag identifiers must be unique (or of 'require' type)", ID);
1960 }
1961
1962 if (ID->getString() == "wchar_size") {
1963 ConstantInt *Value
1965 Check(Value, "wchar_size metadata requires constant integer argument");
1966 }
1967
1968 if (ID->getString() == "Linker Options") {
1969 // If the llvm.linker.options named metadata exists, we assume that the
1970 // bitcode reader has upgraded the module flag. Otherwise the flag might
1971 // have been created by a client directly.
1972 Check(M.getNamedMetadata("llvm.linker.options"),
1973 "'Linker Options' named metadata no longer supported");
1974 }
1975
1976 if (ID->getString() == "SemanticInterposition") {
1977 ConstantInt *Value =
1979 Check(Value,
1980 "SemanticInterposition metadata requires constant integer argument");
1981 }
1982
1983 if (ID->getString() == "CG Profile") {
1984 for (const MDOperand &MDO : cast<MDNode>(Op->getOperand(2))->operands())
1985 visitModuleFlagCGProfileEntry(MDO);
1986 }
1987}
1988
1989void Verifier::visitModuleFlagCGProfileEntry(const MDOperand &MDO) {
1990 auto CheckFunction = [&](const MDOperand &FuncMDO) {
1991 if (!FuncMDO)
1992 return;
1993 auto F = dyn_cast<ValueAsMetadata>(FuncMDO);
1994 Check(F && isa<Function>(F->getValue()->stripPointerCasts()),
1995 "expected a Function or null", FuncMDO);
1996 };
1997 auto Node = dyn_cast_or_null<MDNode>(MDO);
1998 Check(Node && Node->getNumOperands() == 3, "expected a MDNode triple", MDO);
1999 CheckFunction(Node->getOperand(0));
2000 CheckFunction(Node->getOperand(1));
2001 auto Count = dyn_cast_or_null<ConstantAsMetadata>(Node->getOperand(2));
2002 Check(Count && Count->getType()->isIntegerTy(),
2003 "expected an integer constant", Node->getOperand(2));
2004}
2005
2006void Verifier::verifyAttributeTypes(AttributeSet Attrs, const Value *V) {
2007 for (Attribute A : Attrs) {
2008
2009 if (A.isStringAttribute()) {
2010#define GET_ATTR_NAMES
2011#define ATTRIBUTE_ENUM(ENUM_NAME, DISPLAY_NAME)
2012#define ATTRIBUTE_STRBOOL(ENUM_NAME, DISPLAY_NAME) \
2013 if (A.getKindAsString() == #DISPLAY_NAME) { \
2014 auto V = A.getValueAsString(); \
2015 if (!(V.empty() || V == "true" || V == "false")) \
2016 CheckFailed("invalid value for '" #DISPLAY_NAME "' attribute: " + V + \
2017 ""); \
2018 }
2019
2020#include "llvm/IR/Attributes.inc"
2021 continue;
2022 }
2023
2024 if (A.isIntAttribute() != Attribute::isIntAttrKind(A.getKindAsEnum())) {
2025 CheckFailed("Attribute '" + A.getAsString() + "' should have an Argument",
2026 V);
2027 return;
2028 }
2029 }
2030}
2031
2032// VerifyParameterAttrs - Check the given attributes for an argument or return
2033// value of the specified type. The value V is printed in error messages.
2034void Verifier::verifyParameterAttrs(AttributeSet Attrs, Type *Ty,
2035 const Value *V) {
2036 if (!Attrs.hasAttributes())
2037 return;
2038
2039 verifyAttributeTypes(Attrs, V);
2040
2041 for (Attribute Attr : Attrs)
2042 Check(Attr.isStringAttribute() ||
2043 Attribute::canUseAsParamAttr(Attr.getKindAsEnum()),
2044 "Attribute '" + Attr.getAsString() + "' does not apply to parameters",
2045 V);
2046
2047 if (Attrs.hasAttribute(Attribute::ImmArg)) {
2048 unsigned AttrCount =
2049 Attrs.getNumAttributes() - Attrs.hasAttribute(Attribute::Range);
2050 Check(AttrCount == 1,
2051 "Attribute 'immarg' is incompatible with other attributes except the "
2052 "'range' attribute",
2053 V);
2054 }
2055
2056 // Check for mutually incompatible attributes. Only inreg is compatible with
2057 // sret.
2058 unsigned AttrCount = 0;
2059 AttrCount += Attrs.hasAttribute(Attribute::ByVal);
2060 AttrCount += Attrs.hasAttribute(Attribute::InAlloca);
2061 AttrCount += Attrs.hasAttribute(Attribute::Preallocated);
2062 AttrCount += Attrs.hasAttribute(Attribute::StructRet) ||
2063 Attrs.hasAttribute(Attribute::InReg);
2064 AttrCount += Attrs.hasAttribute(Attribute::Nest);
2065 AttrCount += Attrs.hasAttribute(Attribute::ByRef);
2066 Check(AttrCount <= 1,
2067 "Attributes 'byval', 'inalloca', 'preallocated', 'inreg', 'nest', "
2068 "'byref', and 'sret' are incompatible!",
2069 V);
2070
2071 Check(!(Attrs.hasAttribute(Attribute::InAlloca) &&
2072 Attrs.hasAttribute(Attribute::ReadOnly)),
2073 "Attributes "
2074 "'inalloca and readonly' are incompatible!",
2075 V);
2076
2077 Check(!(Attrs.hasAttribute(Attribute::StructRet) &&
2078 Attrs.hasAttribute(Attribute::Returned)),
2079 "Attributes "
2080 "'sret and returned' are incompatible!",
2081 V);
2082
2083 Check(!(Attrs.hasAttribute(Attribute::ZExt) &&
2084 Attrs.hasAttribute(Attribute::SExt)),
2085 "Attributes "
2086 "'zeroext and signext' are incompatible!",
2087 V);
2088
2089 Check(!(Attrs.hasAttribute(Attribute::ReadNone) &&
2090 Attrs.hasAttribute(Attribute::ReadOnly)),
2091 "Attributes "
2092 "'readnone and readonly' are incompatible!",
2093 V);
2094
2095 Check(!(Attrs.hasAttribute(Attribute::ReadNone) &&
2096 Attrs.hasAttribute(Attribute::WriteOnly)),
2097 "Attributes "
2098 "'readnone and writeonly' are incompatible!",
2099 V);
2100
2101 Check(!(Attrs.hasAttribute(Attribute::ReadOnly) &&
2102 Attrs.hasAttribute(Attribute::WriteOnly)),
2103 "Attributes "
2104 "'readonly and writeonly' are incompatible!",
2105 V);
2106
2107 Check(!(Attrs.hasAttribute(Attribute::NoInline) &&
2108 Attrs.hasAttribute(Attribute::AlwaysInline)),
2109 "Attributes "
2110 "'noinline and alwaysinline' are incompatible!",
2111 V);
2112
2113 Check(!(Attrs.hasAttribute(Attribute::Writable) &&
2114 Attrs.hasAttribute(Attribute::ReadNone)),
2115 "Attributes writable and readnone are incompatible!", V);
2116
2117 Check(!(Attrs.hasAttribute(Attribute::Writable) &&
2118 Attrs.hasAttribute(Attribute::ReadOnly)),
2119 "Attributes writable and readonly are incompatible!", V);
2120
2121 AttributeMask IncompatibleAttrs = AttributeFuncs::typeIncompatible(Ty, Attrs);
2122 for (Attribute Attr : Attrs) {
2123 if (!Attr.isStringAttribute() &&
2124 IncompatibleAttrs.contains(Attr.getKindAsEnum())) {
2125 CheckFailed("Attribute '" + Attr.getAsString() +
2126 "' applied to incompatible type!", V);
2127 return;
2128 }
2129 }
2130
2131 if (isa<PointerType>(Ty)) {
2132 if (Attrs.hasAttribute(Attribute::Alignment)) {
2133 Align AttrAlign = Attrs.getAlignment().valueOrOne();
2134 Check(AttrAlign.value() <= Value::MaximumAlignment,
2135 "huge alignment values are unsupported", V);
2136 }
2137 if (Attrs.hasAttribute(Attribute::ByVal)) {
2138 Type *ByValTy = Attrs.getByValType();
2139 SmallPtrSet<Type *, 4> Visited;
2140 Check(ByValTy->isSized(&Visited),
2141 "Attribute 'byval' does not support unsized types!", V);
2142 // Check if it is or contains a target extension type that disallows being
2143 // used on the stack.
2145 "'byval' argument has illegal target extension type", V);
2146 Check(DL.getTypeAllocSize(ByValTy).getKnownMinValue() < (1ULL << 32),
2147 "huge 'byval' arguments are unsupported", V);
2148 }
2149 if (Attrs.hasAttribute(Attribute::ByRef)) {
2150 SmallPtrSet<Type *, 4> Visited;
2151 Check(Attrs.getByRefType()->isSized(&Visited),
2152 "Attribute 'byref' does not support unsized types!", V);
2153 Check(DL.getTypeAllocSize(Attrs.getByRefType()).getKnownMinValue() <
2154 (1ULL << 32),
2155 "huge 'byref' arguments are unsupported", V);
2156 }
2157 if (Attrs.hasAttribute(Attribute::InAlloca)) {
2158 SmallPtrSet<Type *, 4> Visited;
2159 Check(Attrs.getInAllocaType()->isSized(&Visited),
2160 "Attribute 'inalloca' does not support unsized types!", V);
2161 Check(DL.getTypeAllocSize(Attrs.getInAllocaType()).getKnownMinValue() <
2162 (1ULL << 32),
2163 "huge 'inalloca' arguments are unsupported", V);
2164 }
2165 if (Attrs.hasAttribute(Attribute::Preallocated)) {
2166 SmallPtrSet<Type *, 4> Visited;
2167 Check(Attrs.getPreallocatedType()->isSized(&Visited),
2168 "Attribute 'preallocated' does not support unsized types!", V);
2169 Check(
2170 DL.getTypeAllocSize(Attrs.getPreallocatedType()).getKnownMinValue() <
2171 (1ULL << 32),
2172 "huge 'preallocated' arguments are unsupported", V);
2173 }
2174 }
2175
2176 if (Attrs.hasAttribute(Attribute::Initializes)) {
2177 auto Inits = Attrs.getAttribute(Attribute::Initializes).getInitializes();
2178 Check(!Inits.empty(), "Attribute 'initializes' does not support empty list",
2179 V);
2181 "Attribute 'initializes' does not support unordered ranges", V);
2182 }
2183
2184 if (Attrs.hasAttribute(Attribute::NoFPClass)) {
2185 uint64_t Val = Attrs.getAttribute(Attribute::NoFPClass).getValueAsInt();
2186 Check(Val != 0, "Attribute 'nofpclass' must have at least one test bit set",
2187 V);
2188 Check((Val & ~static_cast<unsigned>(fcAllFlags)) == 0,
2189 "Invalid value for 'nofpclass' test mask", V);
2190 }
2191 if (Attrs.hasAttribute(Attribute::Range)) {
2192 const ConstantRange &CR =
2193 Attrs.getAttribute(Attribute::Range).getValueAsConstantRange();
2195 "Range bit width must match type bit width!", V);
2196 }
2197}
2198
2199void Verifier::checkUnsignedBaseTenFuncAttr(AttributeList Attrs, StringRef Attr,
2200 const Value *V) {
2201 if (Attrs.hasFnAttr(Attr)) {
2202 StringRef S = Attrs.getFnAttr(Attr).getValueAsString();
2203 unsigned N;
2204 if (S.getAsInteger(10, N))
2205 CheckFailed("\"" + Attr + "\" takes an unsigned integer: " + S, V);
2206 }
2207}
2208
2209// Check parameter attributes against a function type.
2210// The value V is printed in error messages.
2211void Verifier::verifyFunctionAttrs(FunctionType *FT, AttributeList Attrs,
2212 const Value *V, bool IsIntrinsic,
2213 bool IsInlineAsm) {
2214 if (Attrs.isEmpty())
2215 return;
2216
2217 if (AttributeListsVisited.insert(Attrs.getRawPointer()).second) {
2218 Check(Attrs.hasParentContext(Context),
2219 "Attribute list does not match Module context!", &Attrs, V);
2220 for (const auto &AttrSet : Attrs) {
2221 Check(!AttrSet.hasAttributes() || AttrSet.hasParentContext(Context),
2222 "Attribute set does not match Module context!", &AttrSet, V);
2223 for (const auto &A : AttrSet) {
2224 Check(A.hasParentContext(Context),
2225 "Attribute does not match Module context!", &A, V);
2226 }
2227 }
2228 }
2229
2230 bool SawNest = false;
2231 bool SawReturned = false;
2232 bool SawSRet = false;
2233 bool SawSwiftSelf = false;
2234 bool SawSwiftAsync = false;
2235 bool SawSwiftError = false;
2236
2237 // Verify return value attributes.
2238 AttributeSet RetAttrs = Attrs.getRetAttrs();
2239 for (Attribute RetAttr : RetAttrs)
2240 Check(RetAttr.isStringAttribute() ||
2241 Attribute::canUseAsRetAttr(RetAttr.getKindAsEnum()),
2242 "Attribute '" + RetAttr.getAsString() +
2243 "' does not apply to function return values",
2244 V);
2245
2246 unsigned MaxParameterWidth = 0;
2247 auto GetMaxParameterWidth = [&MaxParameterWidth](Type *Ty) {
2248 if (Ty->isVectorTy()) {
2249 if (auto *VT = dyn_cast<FixedVectorType>(Ty)) {
2250 unsigned Size = VT->getPrimitiveSizeInBits().getFixedValue();
2251 if (Size > MaxParameterWidth)
2252 MaxParameterWidth = Size;
2253 }
2254 }
2255 };
2256 GetMaxParameterWidth(FT->getReturnType());
2257 verifyParameterAttrs(RetAttrs, FT->getReturnType(), V);
2258
2259 // Verify parameter attributes.
2260 for (unsigned i = 0, e = FT->getNumParams(); i != e; ++i) {
2261 Type *Ty = FT->getParamType(i);
2262 AttributeSet ArgAttrs = Attrs.getParamAttrs(i);
2263
2264 if (!IsIntrinsic) {
2265 Check(!ArgAttrs.hasAttribute(Attribute::ImmArg),
2266 "immarg attribute only applies to intrinsics", V);
2267 if (!IsInlineAsm)
2268 Check(!ArgAttrs.hasAttribute(Attribute::ElementType),
2269 "Attribute 'elementtype' can only be applied to intrinsics"
2270 " and inline asm.",
2271 V);
2272 }
2273
2274 verifyParameterAttrs(ArgAttrs, Ty, V);
2275 GetMaxParameterWidth(Ty);
2276
2277 if (ArgAttrs.hasAttribute(Attribute::Nest)) {
2278 Check(!SawNest, "More than one parameter has attribute nest!", V);
2279 SawNest = true;
2280 }
2281
2282 if (ArgAttrs.hasAttribute(Attribute::Returned)) {
2283 Check(!SawReturned, "More than one parameter has attribute returned!", V);
2284 Check(Ty->canLosslesslyBitCastTo(FT->getReturnType()),
2285 "Incompatible argument and return types for 'returned' attribute",
2286 V);
2287 SawReturned = true;
2288 }
2289
2290 if (ArgAttrs.hasAttribute(Attribute::StructRet)) {
2291 Check(!SawSRet, "Cannot have multiple 'sret' parameters!", V);
2292 Check(i == 0 || i == 1,
2293 "Attribute 'sret' is not on first or second parameter!", V);
2294 SawSRet = true;
2295 }
2296
2297 if (ArgAttrs.hasAttribute(Attribute::SwiftSelf)) {
2298 Check(!SawSwiftSelf, "Cannot have multiple 'swiftself' parameters!", V);
2299 SawSwiftSelf = true;
2300 }
2301
2302 if (ArgAttrs.hasAttribute(Attribute::SwiftAsync)) {
2303 Check(!SawSwiftAsync, "Cannot have multiple 'swiftasync' parameters!", V);
2304 SawSwiftAsync = true;
2305 }
2306
2307 if (ArgAttrs.hasAttribute(Attribute::SwiftError)) {
2308 Check(!SawSwiftError, "Cannot have multiple 'swifterror' parameters!", V);
2309 SawSwiftError = true;
2310 }
2311
2312 if (ArgAttrs.hasAttribute(Attribute::InAlloca)) {
2313 Check(i == FT->getNumParams() - 1,
2314 "inalloca isn't on the last parameter!", V);
2315 }
2316 }
2317
2318 if (!Attrs.hasFnAttrs())
2319 return;
2320
2321 verifyAttributeTypes(Attrs.getFnAttrs(), V);
2322 for (Attribute FnAttr : Attrs.getFnAttrs())
2323 Check(FnAttr.isStringAttribute() ||
2324 Attribute::canUseAsFnAttr(FnAttr.getKindAsEnum()),
2325 "Attribute '" + FnAttr.getAsString() +
2326 "' does not apply to functions!",
2327 V);
2328
2329 Check(!(Attrs.hasFnAttr(Attribute::NoInline) &&
2330 Attrs.hasFnAttr(Attribute::AlwaysInline)),
2331 "Attributes 'noinline and alwaysinline' are incompatible!", V);
2332
2333 if (Attrs.hasFnAttr(Attribute::OptimizeNone)) {
2334 Check(Attrs.hasFnAttr(Attribute::NoInline),
2335 "Attribute 'optnone' requires 'noinline'!", V);
2336
2337 Check(!Attrs.hasFnAttr(Attribute::OptimizeForSize),
2338 "Attributes 'optsize and optnone' are incompatible!", V);
2339
2340 Check(!Attrs.hasFnAttr(Attribute::MinSize),
2341 "Attributes 'minsize and optnone' are incompatible!", V);
2342
2343 Check(!Attrs.hasFnAttr(Attribute::OptimizeForDebugging),
2344 "Attributes 'optdebug and optnone' are incompatible!", V);
2345 }
2346
2347 Check(!(Attrs.hasFnAttr(Attribute::SanitizeRealtime) &&
2348 Attrs.hasFnAttr(Attribute::SanitizeRealtimeBlocking)),
2349 "Attributes "
2350 "'sanitize_realtime and sanitize_realtime_blocking' are incompatible!",
2351 V);
2352
2353 if (Attrs.hasFnAttr(Attribute::OptimizeForDebugging)) {
2354 Check(!Attrs.hasFnAttr(Attribute::OptimizeForSize),
2355 "Attributes 'optsize and optdebug' are incompatible!", V);
2356
2357 Check(!Attrs.hasFnAttr(Attribute::MinSize),
2358 "Attributes 'minsize and optdebug' are incompatible!", V);
2359 }
2360
2361 Check(!Attrs.hasAttrSomewhere(Attribute::Writable) ||
2362 isModSet(Attrs.getMemoryEffects().getModRef(IRMemLocation::ArgMem)),
2363 "Attribute writable and memory without argmem: write are incompatible!",
2364 V);
2365
2366 if (Attrs.hasFnAttr("aarch64_pstate_sm_enabled")) {
2367 Check(!Attrs.hasFnAttr("aarch64_pstate_sm_compatible"),
2368 "Attributes 'aarch64_pstate_sm_enabled and "
2369 "aarch64_pstate_sm_compatible' are incompatible!",
2370 V);
2371 }
2372
2373 Check((Attrs.hasFnAttr("aarch64_new_za") + Attrs.hasFnAttr("aarch64_in_za") +
2374 Attrs.hasFnAttr("aarch64_inout_za") +
2375 Attrs.hasFnAttr("aarch64_out_za") +
2376 Attrs.hasFnAttr("aarch64_preserves_za") +
2377 Attrs.hasFnAttr("aarch64_za_state_agnostic")) <= 1,
2378 "Attributes 'aarch64_new_za', 'aarch64_in_za', 'aarch64_out_za', "
2379 "'aarch64_inout_za', 'aarch64_preserves_za' and "
2380 "'aarch64_za_state_agnostic' are mutually exclusive",
2381 V);
2382
2383 Check((Attrs.hasFnAttr("aarch64_new_zt0") +
2384 Attrs.hasFnAttr("aarch64_in_zt0") +
2385 Attrs.hasFnAttr("aarch64_inout_zt0") +
2386 Attrs.hasFnAttr("aarch64_out_zt0") +
2387 Attrs.hasFnAttr("aarch64_preserves_zt0") +
2388 Attrs.hasFnAttr("aarch64_za_state_agnostic")) <= 1,
2389 "Attributes 'aarch64_new_zt0', 'aarch64_in_zt0', 'aarch64_out_zt0', "
2390 "'aarch64_inout_zt0', 'aarch64_preserves_zt0' and "
2391 "'aarch64_za_state_agnostic' are mutually exclusive",
2392 V);
2393
2394 if (Attrs.hasFnAttr(Attribute::JumpTable)) {
2395 const GlobalValue *GV = cast<GlobalValue>(V);
2397 "Attribute 'jumptable' requires 'unnamed_addr'", V);
2398 }
2399
2400 if (auto Args = Attrs.getFnAttrs().getAllocSizeArgs()) {
2401 auto CheckParam = [&](StringRef Name, unsigned ParamNo) {
2402 if (ParamNo >= FT->getNumParams()) {
2403 CheckFailed("'allocsize' " + Name + " argument is out of bounds", V);
2404 return false;
2405 }
2406
2407 if (!FT->getParamType(ParamNo)->isIntegerTy()) {
2408 CheckFailed("'allocsize' " + Name +
2409 " argument must refer to an integer parameter",
2410 V);
2411 return false;
2412 }
2413
2414 return true;
2415 };
2416
2417 if (!CheckParam("element size", Args->first))
2418 return;
2419
2420 if (Args->second && !CheckParam("number of elements", *Args->second))
2421 return;
2422 }
2423
2424 if (Attrs.hasFnAttr(Attribute::AllocKind)) {
2425 AllocFnKind K = Attrs.getAllocKind();
2427 K & (AllocFnKind::Alloc | AllocFnKind::Realloc | AllocFnKind::Free);
2428 if (!is_contained(
2429 {AllocFnKind::Alloc, AllocFnKind::Realloc, AllocFnKind::Free},
2430 Type))
2431 CheckFailed(
2432 "'allockind()' requires exactly one of alloc, realloc, and free");
2433 if ((Type == AllocFnKind::Free) &&
2434 ((K & (AllocFnKind::Uninitialized | AllocFnKind::Zeroed |
2435 AllocFnKind::Aligned)) != AllocFnKind::Unknown))
2436 CheckFailed("'allockind(\"free\")' doesn't allow uninitialized, zeroed, "
2437 "or aligned modifiers.");
2438 AllocFnKind ZeroedUninit = AllocFnKind::Uninitialized | AllocFnKind::Zeroed;
2439 if ((K & ZeroedUninit) == ZeroedUninit)
2440 CheckFailed("'allockind()' can't be both zeroed and uninitialized");
2441 }
2442
2443 if (Attribute A = Attrs.getFnAttr("alloc-variant-zeroed"); A.isValid()) {
2444 StringRef S = A.getValueAsString();
2445 Check(!S.empty(), "'alloc-variant-zeroed' must not be empty");
2446 Function *Variant = M.getFunction(S);
2447 if (Variant) {
2448 Attribute Family = Attrs.getFnAttr("alloc-family");
2449 Attribute VariantFamily = Variant->getFnAttribute("alloc-family");
2450 if (Family.isValid())
2451 Check(VariantFamily.isValid() &&
2452 VariantFamily.getValueAsString() == Family.getValueAsString(),
2453 "'alloc-variant-zeroed' must name a function belonging to the "
2454 "same 'alloc-family'");
2455
2456 Check(Variant->hasFnAttribute(Attribute::AllocKind) &&
2457 (Variant->getFnAttribute(Attribute::AllocKind).getAllocKind() &
2458 AllocFnKind::Zeroed) != AllocFnKind::Unknown,
2459 "'alloc-variant-zeroed' must name a function with "
2460 "'allockind(\"zeroed\")'");
2461
2462 Check(FT == Variant->getFunctionType(),
2463 "'alloc-variant-zeroed' must name a function with the same "
2464 "signature");
2465 }
2466 }
2467
2468 if (Attrs.hasFnAttr(Attribute::VScaleRange)) {
2469 unsigned VScaleMin = Attrs.getFnAttrs().getVScaleRangeMin();
2470 if (VScaleMin == 0)
2471 CheckFailed("'vscale_range' minimum must be greater than 0", V);
2472 else if (!isPowerOf2_32(VScaleMin))
2473 CheckFailed("'vscale_range' minimum must be power-of-two value", V);
2474 std::optional<unsigned> VScaleMax = Attrs.getFnAttrs().getVScaleRangeMax();
2475 if (VScaleMax && VScaleMin > VScaleMax)
2476 CheckFailed("'vscale_range' minimum cannot be greater than maximum", V);
2477 else if (VScaleMax && !isPowerOf2_32(*VScaleMax))
2478 CheckFailed("'vscale_range' maximum must be power-of-two value", V);
2479 }
2480
2481 if (Attribute FPAttr = Attrs.getFnAttr("frame-pointer"); FPAttr.isValid()) {
2482 StringRef FP = FPAttr.getValueAsString();
2483 if (FP != "all" && FP != "non-leaf" && FP != "none" && FP != "reserved")
2484 CheckFailed("invalid value for 'frame-pointer' attribute: " + FP, V);
2485 }
2486
2487 checkUnsignedBaseTenFuncAttr(Attrs, "patchable-function-prefix", V);
2488 checkUnsignedBaseTenFuncAttr(Attrs, "patchable-function-entry", V);
2489 if (Attrs.hasFnAttr("patchable-function-entry-section"))
2490 Check(!Attrs.getFnAttr("patchable-function-entry-section")
2491 .getValueAsString()
2492 .empty(),
2493 "\"patchable-function-entry-section\" must not be empty");
2494 checkUnsignedBaseTenFuncAttr(Attrs, "warn-stack-size", V);
2495
2496 if (auto A = Attrs.getFnAttr("sign-return-address"); A.isValid()) {
2497 StringRef S = A.getValueAsString();
2498 if (S != "none" && S != "all" && S != "non-leaf")
2499 CheckFailed("invalid value for 'sign-return-address' attribute: " + S, V);
2500 }
2501
2502 if (auto A = Attrs.getFnAttr("sign-return-address-key"); A.isValid()) {
2503 StringRef S = A.getValueAsString();
2504 if (S != "a_key" && S != "b_key")
2505 CheckFailed("invalid value for 'sign-return-address-key' attribute: " + S,
2506 V);
2507 if (auto AA = Attrs.getFnAttr("sign-return-address"); !AA.isValid()) {
2508 CheckFailed(
2509 "'sign-return-address-key' present without `sign-return-address`");
2510 }
2511 }
2512
2513 if (auto A = Attrs.getFnAttr("branch-target-enforcement"); A.isValid()) {
2514 StringRef S = A.getValueAsString();
2515 if (S != "" && S != "true" && S != "false")
2516 CheckFailed(
2517 "invalid value for 'branch-target-enforcement' attribute: " + S, V);
2518 }
2519
2520 if (auto A = Attrs.getFnAttr("branch-protection-pauth-lr"); A.isValid()) {
2521 StringRef S = A.getValueAsString();
2522 if (S != "" && S != "true" && S != "false")
2523 CheckFailed(
2524 "invalid value for 'branch-protection-pauth-lr' attribute: " + S, V);
2525 }
2526
2527 if (auto A = Attrs.getFnAttr("guarded-control-stack"); A.isValid()) {
2528 StringRef S = A.getValueAsString();
2529 if (S != "" && S != "true" && S != "false")
2530 CheckFailed("invalid value for 'guarded-control-stack' attribute: " + S,
2531 V);
2532 }
2533
2534 if (auto A = Attrs.getFnAttr("vector-function-abi-variant"); A.isValid()) {
2535 StringRef S = A.getValueAsString();
2536 const std::optional<VFInfo> Info = VFABI::tryDemangleForVFABI(S, FT);
2537 if (!Info)
2538 CheckFailed("invalid name for a VFABI variant: " + S, V);
2539 }
2540
2541 if (auto A = Attrs.getFnAttr("denormal-fp-math"); A.isValid()) {
2542 StringRef S = A.getValueAsString();
2544 CheckFailed("invalid value for 'denormal-fp-math' attribute: " + S, V);
2545 }
2546
2547 if (auto A = Attrs.getFnAttr("denormal-fp-math-f32"); A.isValid()) {
2548 StringRef S = A.getValueAsString();
2550 CheckFailed("invalid value for 'denormal-fp-math-f32' attribute: " + S,
2551 V);
2552 }
2553}
2554void Verifier::verifyUnknownProfileMetadata(MDNode *MD) {
2555 Check(MD->getNumOperands() == 2,
2556 "'unknown' !prof should have a single additional operand", MD);
2557 auto *PassName = dyn_cast<MDString>(MD->getOperand(1));
2558 Check(PassName != nullptr,
2559 "'unknown' !prof should have an additional operand of type "
2560 "string");
2561 Check(!PassName->getString().empty(),
2562 "the 'unknown' !prof operand should not be an empty string");
2563}
2564
2565void Verifier::verifyFunctionMetadata(
2566 ArrayRef<std::pair<unsigned, MDNode *>> MDs) {
2567 for (const auto &Pair : MDs) {
2568 if (Pair.first == LLVMContext::MD_prof) {
2569 MDNode *MD = Pair.second;
2570 Check(MD->getNumOperands() >= 2,
2571 "!prof annotations should have no less than 2 operands", MD);
2572 // We may have functions that are synthesized by the compiler, e.g. in
2573 // WPD, that we can't currently determine the entry count.
2574 if (MD->getOperand(0).equalsStr(
2576 verifyUnknownProfileMetadata(MD);
2577 continue;
2578 }
2579
2580 // Check first operand.
2581 Check(MD->getOperand(0) != nullptr, "first operand should not be null",
2582 MD);
2584 "expected string with name of the !prof annotation", MD);
2585 MDString *MDS = cast<MDString>(MD->getOperand(0));
2586 StringRef ProfName = MDS->getString();
2589 "first operand should be 'function_entry_count'"
2590 " or 'synthetic_function_entry_count'",
2591 MD);
2592
2593 // Check second operand.
2594 Check(MD->getOperand(1) != nullptr, "second operand should not be null",
2595 MD);
2597 "expected integer argument to function_entry_count", MD);
2598 } else if (Pair.first == LLVMContext::MD_kcfi_type) {
2599 MDNode *MD = Pair.second;
2600 Check(MD->getNumOperands() == 1,
2601 "!kcfi_type must have exactly one operand", MD);
2602 Check(MD->getOperand(0) != nullptr, "!kcfi_type operand must not be null",
2603 MD);
2605 "expected a constant operand for !kcfi_type", MD);
2606 Constant *C = cast<ConstantAsMetadata>(MD->getOperand(0))->getValue();
2607 Check(isa<ConstantInt>(C) && isa<IntegerType>(C->getType()),
2608 "expected a constant integer operand for !kcfi_type", MD);
2610 "expected a 32-bit integer constant operand for !kcfi_type", MD);
2611 }
2612 }
2613}
2614
2615void Verifier::visitConstantExprsRecursively(const Constant *EntryC) {
2616 if (!ConstantExprVisited.insert(EntryC).second)
2617 return;
2618
2620 Stack.push_back(EntryC);
2621
2622 while (!Stack.empty()) {
2623 const Constant *C = Stack.pop_back_val();
2624
2625 // Check this constant expression.
2626 if (const auto *CE = dyn_cast<ConstantExpr>(C))
2627 visitConstantExpr(CE);
2628
2629 if (const auto *CPA = dyn_cast<ConstantPtrAuth>(C))
2630 visitConstantPtrAuth(CPA);
2631
2632 if (const auto *GV = dyn_cast<GlobalValue>(C)) {
2633 // Global Values get visited separately, but we do need to make sure
2634 // that the global value is in the correct module
2635 Check(GV->getParent() == &M, "Referencing global in another module!",
2636 EntryC, &M, GV, GV->getParent());
2637 continue;
2638 }
2639
2640 // Visit all sub-expressions.
2641 for (const Use &U : C->operands()) {
2642 const auto *OpC = dyn_cast<Constant>(U);
2643 if (!OpC)
2644 continue;
2645 if (!ConstantExprVisited.insert(OpC).second)
2646 continue;
2647 Stack.push_back(OpC);
2648 }
2649 }
2650}
2651
2652void Verifier::visitConstantExpr(const ConstantExpr *CE) {
2653 if (CE->getOpcode() == Instruction::BitCast)
2654 Check(CastInst::castIsValid(Instruction::BitCast, CE->getOperand(0),
2655 CE->getType()),
2656 "Invalid bitcast", CE);
2657 else if (CE->getOpcode() == Instruction::PtrToAddr)
2658 checkPtrToAddr(CE->getOperand(0)->getType(), CE->getType(), *CE);
2659}
2660
2661void Verifier::visitConstantPtrAuth(const ConstantPtrAuth *CPA) {
2662 Check(CPA->getPointer()->getType()->isPointerTy(),
2663 "signed ptrauth constant base pointer must have pointer type");
2664
2665 Check(CPA->getType() == CPA->getPointer()->getType(),
2666 "signed ptrauth constant must have same type as its base pointer");
2667
2668 Check(CPA->getKey()->getBitWidth() == 32,
2669 "signed ptrauth constant key must be i32 constant integer");
2670
2672 "signed ptrauth constant address discriminator must be a pointer");
2673
2674 Check(CPA->getDiscriminator()->getBitWidth() == 64,
2675 "signed ptrauth constant discriminator must be i64 constant integer");
2676}
2677
2678bool Verifier::verifyAttributeCount(AttributeList Attrs, unsigned Params) {
2679 // There shouldn't be more attribute sets than there are parameters plus the
2680 // function and return value.
2681 return Attrs.getNumAttrSets() <= Params + 2;
2682}
2683
2684void Verifier::verifyInlineAsmCall(const CallBase &Call) {
2685 const InlineAsm *IA = cast<InlineAsm>(Call.getCalledOperand());
2686 unsigned ArgNo = 0;
2687 unsigned LabelNo = 0;
2688 for (const InlineAsm::ConstraintInfo &CI : IA->ParseConstraints()) {
2689 if (CI.Type == InlineAsm::isLabel) {
2690 ++LabelNo;
2691 continue;
2692 }
2693
2694 // Only deal with constraints that correspond to call arguments.
2695 if (!CI.hasArg())
2696 continue;
2697
2698 if (CI.isIndirect) {
2699 const Value *Arg = Call.getArgOperand(ArgNo);
2700 Check(Arg->getType()->isPointerTy(),
2701 "Operand for indirect constraint must have pointer type", &Call);
2702
2704 "Operand for indirect constraint must have elementtype attribute",
2705 &Call);
2706 } else {
2707 Check(!Call.paramHasAttr(ArgNo, Attribute::ElementType),
2708 "Elementtype attribute can only be applied for indirect "
2709 "constraints",
2710 &Call);
2711 }
2712
2713 ArgNo++;
2714 }
2715
2716 if (auto *CallBr = dyn_cast<CallBrInst>(&Call)) {
2717 Check(LabelNo == CallBr->getNumIndirectDests(),
2718 "Number of label constraints does not match number of callbr dests",
2719 &Call);
2720 } else {
2721 Check(LabelNo == 0, "Label constraints can only be used with callbr",
2722 &Call);
2723 }
2724}
2725
2726/// Verify that statepoint intrinsic is well formed.
2727void Verifier::verifyStatepoint(const CallBase &Call) {
2728 assert(Call.getIntrinsicID() == Intrinsic::experimental_gc_statepoint);
2729
2732 "gc.statepoint must read and write all memory to preserve "
2733 "reordering restrictions required by safepoint semantics",
2734 Call);
2735
2736 const int64_t NumPatchBytes =
2737 cast<ConstantInt>(Call.getArgOperand(1))->getSExtValue();
2738 assert(isInt<32>(NumPatchBytes) && "NumPatchBytesV is an i32!");
2739 Check(NumPatchBytes >= 0,
2740 "gc.statepoint number of patchable bytes must be "
2741 "positive",
2742 Call);
2743
2744 Type *TargetElemType = Call.getParamElementType(2);
2745 Check(TargetElemType,
2746 "gc.statepoint callee argument must have elementtype attribute", Call);
2747 FunctionType *TargetFuncType = dyn_cast<FunctionType>(TargetElemType);
2748 Check(TargetFuncType,
2749 "gc.statepoint callee elementtype must be function type", Call);
2750
2751 const int NumCallArgs = cast<ConstantInt>(Call.getArgOperand(3))->getZExtValue();
2752 Check(NumCallArgs >= 0,
2753 "gc.statepoint number of arguments to underlying call "
2754 "must be positive",
2755 Call);
2756 const int NumParams = (int)TargetFuncType->getNumParams();
2757 if (TargetFuncType->isVarArg()) {
2758 Check(NumCallArgs >= NumParams,
2759 "gc.statepoint mismatch in number of vararg call args", Call);
2760
2761 // TODO: Remove this limitation
2762 Check(TargetFuncType->getReturnType()->isVoidTy(),
2763 "gc.statepoint doesn't support wrapping non-void "
2764 "vararg functions yet",
2765 Call);
2766 } else
2767 Check(NumCallArgs == NumParams,
2768 "gc.statepoint mismatch in number of call args", Call);
2769
2770 const uint64_t Flags
2771 = cast<ConstantInt>(Call.getArgOperand(4))->getZExtValue();
2772 Check((Flags & ~(uint64_t)StatepointFlags::MaskAll) == 0,
2773 "unknown flag used in gc.statepoint flags argument", Call);
2774
2775 // Verify that the types of the call parameter arguments match
2776 // the type of the wrapped callee.
2777 AttributeList Attrs = Call.getAttributes();
2778 for (int i = 0; i < NumParams; i++) {
2779 Type *ParamType = TargetFuncType->getParamType(i);
2780 Type *ArgType = Call.getArgOperand(5 + i)->getType();
2781 Check(ArgType == ParamType,
2782 "gc.statepoint call argument does not match wrapped "
2783 "function type",
2784 Call);
2785
2786 if (TargetFuncType->isVarArg()) {
2787 AttributeSet ArgAttrs = Attrs.getParamAttrs(5 + i);
2788 Check(!ArgAttrs.hasAttribute(Attribute::StructRet),
2789 "Attribute 'sret' cannot be used for vararg call arguments!", Call);
2790 }
2791 }
2792
2793 const int EndCallArgsInx = 4 + NumCallArgs;
2794
2795 const Value *NumTransitionArgsV = Call.getArgOperand(EndCallArgsInx + 1);
2796 Check(isa<ConstantInt>(NumTransitionArgsV),
2797 "gc.statepoint number of transition arguments "
2798 "must be constant integer",
2799 Call);
2800 const int NumTransitionArgs =
2801 cast<ConstantInt>(NumTransitionArgsV)->getZExtValue();
2802 Check(NumTransitionArgs == 0,
2803 "gc.statepoint w/inline transition bundle is deprecated", Call);
2804 const int EndTransitionArgsInx = EndCallArgsInx + 1 + NumTransitionArgs;
2805
2806 const Value *NumDeoptArgsV = Call.getArgOperand(EndTransitionArgsInx + 1);
2807 Check(isa<ConstantInt>(NumDeoptArgsV),
2808 "gc.statepoint number of deoptimization arguments "
2809 "must be constant integer",
2810 Call);
2811 const int NumDeoptArgs = cast<ConstantInt>(NumDeoptArgsV)->getZExtValue();
2812 Check(NumDeoptArgs == 0,
2813 "gc.statepoint w/inline deopt operands is deprecated", Call);
2814
2815 const int ExpectedNumArgs = 7 + NumCallArgs;
2816 Check(ExpectedNumArgs == (int)Call.arg_size(),
2817 "gc.statepoint too many arguments", Call);
2818
2819 // Check that the only uses of this gc.statepoint are gc.result or
2820 // gc.relocate calls which are tied to this statepoint and thus part
2821 // of the same statepoint sequence
2822 for (const User *U : Call.users()) {
2823 const CallInst *UserCall = dyn_cast<const CallInst>(U);
2824 Check(UserCall, "illegal use of statepoint token", Call, U);
2825 if (!UserCall)
2826 continue;
2827 Check(isa<GCRelocateInst>(UserCall) || isa<GCResultInst>(UserCall),
2828 "gc.result or gc.relocate are the only value uses "
2829 "of a gc.statepoint",
2830 Call, U);
2831 if (isa<GCResultInst>(UserCall)) {
2832 Check(UserCall->getArgOperand(0) == &Call,
2833 "gc.result connected to wrong gc.statepoint", Call, UserCall);
2834 } else if (isa<GCRelocateInst>(Call)) {
2835 Check(UserCall->getArgOperand(0) == &Call,
2836 "gc.relocate connected to wrong gc.statepoint", Call, UserCall);
2837 }
2838 }
2839
2840 // Note: It is legal for a single derived pointer to be listed multiple
2841 // times. It's non-optimal, but it is legal. It can also happen after
2842 // insertion if we strip a bitcast away.
2843 // Note: It is really tempting to check that each base is relocated and
2844 // that a derived pointer is never reused as a base pointer. This turns
2845 // out to be problematic since optimizations run after safepoint insertion
2846 // can recognize equality properties that the insertion logic doesn't know
2847 // about. See example statepoint.ll in the verifier subdirectory
2848}
2849
2850void Verifier::verifyFrameRecoverIndices() {
2851 for (auto &Counts : FrameEscapeInfo) {
2852 Function *F = Counts.first;
2853 unsigned EscapedObjectCount = Counts.second.first;
2854 unsigned MaxRecoveredIndex = Counts.second.second;
2855 Check(MaxRecoveredIndex <= EscapedObjectCount,
2856 "all indices passed to llvm.localrecover must be less than the "
2857 "number of arguments passed to llvm.localescape in the parent "
2858 "function",
2859 F);
2860 }
2861}
2862
2863static Instruction *getSuccPad(Instruction *Terminator) {
2864 BasicBlock *UnwindDest;
2865 if (auto *II = dyn_cast<InvokeInst>(Terminator))
2866 UnwindDest = II->getUnwindDest();
2867 else if (auto *CSI = dyn_cast<CatchSwitchInst>(Terminator))
2868 UnwindDest = CSI->getUnwindDest();
2869 else
2870 UnwindDest = cast<CleanupReturnInst>(Terminator)->getUnwindDest();
2871 return &*UnwindDest->getFirstNonPHIIt();
2872}
2873
2874void Verifier::verifySiblingFuncletUnwinds() {
2875 llvm::TimeTraceScope timeScope("Verifier verify sibling funclet unwinds");
2876 SmallPtrSet<Instruction *, 8> Visited;
2877 SmallPtrSet<Instruction *, 8> Active;
2878 for (const auto &Pair : SiblingFuncletInfo) {
2879 Instruction *PredPad = Pair.first;
2880 if (Visited.count(PredPad))
2881 continue;
2882 Active.insert(PredPad);
2883 Instruction *Terminator = Pair.second;
2884 do {
2885 Instruction *SuccPad = getSuccPad(Terminator);
2886 if (Active.count(SuccPad)) {
2887 // Found a cycle; report error
2888 Instruction *CyclePad = SuccPad;
2889 SmallVector<Instruction *, 8> CycleNodes;
2890 do {
2891 CycleNodes.push_back(CyclePad);
2892 Instruction *CycleTerminator = SiblingFuncletInfo[CyclePad];
2893 if (CycleTerminator != CyclePad)
2894 CycleNodes.push_back(CycleTerminator);
2895 CyclePad = getSuccPad(CycleTerminator);
2896 } while (CyclePad != SuccPad);
2897 Check(false, "EH pads can't handle each other's exceptions",
2898 ArrayRef<Instruction *>(CycleNodes));
2899 }
2900 // Don't re-walk a node we've already checked
2901 if (!Visited.insert(SuccPad).second)
2902 break;
2903 // Walk to this successor if it has a map entry.
2904 PredPad = SuccPad;
2905 auto TermI = SiblingFuncletInfo.find(PredPad);
2906 if (TermI == SiblingFuncletInfo.end())
2907 break;
2908 Terminator = TermI->second;
2909 Active.insert(PredPad);
2910 } while (true);
2911 // Each node only has one successor, so we've walked all the active
2912 // nodes' successors.
2913 Active.clear();
2914 }
2915}
2916
2917// visitFunction - Verify that a function is ok.
2918//
2919void Verifier::visitFunction(const Function &F) {
2920 visitGlobalValue(F);
2921
2922 // Check function arguments.
2923 FunctionType *FT = F.getFunctionType();
2924 unsigned NumArgs = F.arg_size();
2925
2926 Check(&Context == &F.getContext(),
2927 "Function context does not match Module context!", &F);
2928
2929 Check(!F.hasCommonLinkage(), "Functions may not have common linkage", &F);
2930 Check(FT->getNumParams() == NumArgs,
2931 "# formal arguments must match # of arguments for function type!", &F,
2932 FT);
2933 Check(F.getReturnType()->isFirstClassType() ||
2934 F.getReturnType()->isVoidTy() || F.getReturnType()->isStructTy(),
2935 "Functions cannot return aggregate values!", &F);
2936
2937 Check(!F.hasStructRetAttr() || F.getReturnType()->isVoidTy(),
2938 "Invalid struct return type!", &F);
2939
2940 if (MaybeAlign A = F.getAlign()) {
2941 Check(A->value() <= Value::MaximumAlignment,
2942 "huge alignment values are unsupported", &F);
2943 }
2944
2945 AttributeList Attrs = F.getAttributes();
2946
2947 Check(verifyAttributeCount(Attrs, FT->getNumParams()),
2948 "Attribute after last parameter!", &F);
2949
2950 bool IsIntrinsic = F.isIntrinsic();
2951
2952 // Check function attributes.
2953 verifyFunctionAttrs(FT, Attrs, &F, IsIntrinsic, /* IsInlineAsm */ false);
2954
2955 // On function declarations/definitions, we do not support the builtin
2956 // attribute. We do not check this in VerifyFunctionAttrs since that is
2957 // checking for Attributes that can/can not ever be on functions.
2958 Check(!Attrs.hasFnAttr(Attribute::Builtin),
2959 "Attribute 'builtin' can only be applied to a callsite.", &F);
2960
2961 Check(!Attrs.hasAttrSomewhere(Attribute::ElementType),
2962 "Attribute 'elementtype' can only be applied to a callsite.", &F);
2963
2964 Check(!Attrs.hasFnAttr("aarch64_zt0_undef"),
2965 "Attribute 'aarch64_zt0_undef' can only be applied to a callsite.");
2966
2967 if (Attrs.hasFnAttr(Attribute::Naked))
2968 for (const Argument &Arg : F.args())
2969 Check(Arg.use_empty(), "cannot use argument of naked function", &Arg);
2970
2971 // Check that this function meets the restrictions on this calling convention.
2972 // Sometimes varargs is used for perfectly forwarding thunks, so some of these
2973 // restrictions can be lifted.
2974 switch (F.getCallingConv()) {
2975 default:
2976 case CallingConv::C:
2977 break;
2978 case CallingConv::X86_INTR: {
2979 Check(F.arg_empty() || Attrs.hasParamAttr(0, Attribute::ByVal),
2980 "Calling convention parameter requires byval", &F);
2981 break;
2982 }
2983 case CallingConv::AMDGPU_KERNEL:
2984 case CallingConv::SPIR_KERNEL:
2985 case CallingConv::AMDGPU_CS_Chain:
2986 case CallingConv::AMDGPU_CS_ChainPreserve:
2987 Check(F.getReturnType()->isVoidTy(),
2988 "Calling convention requires void return type", &F);
2989 [[fallthrough]];
2990 case CallingConv::AMDGPU_VS:
2991 case CallingConv::AMDGPU_HS:
2992 case CallingConv::AMDGPU_GS:
2993 case CallingConv::AMDGPU_PS:
2994 case CallingConv::AMDGPU_CS:
2995 Check(!F.hasStructRetAttr(), "Calling convention does not allow sret", &F);
2996 if (F.getCallingConv() != CallingConv::SPIR_KERNEL) {
2997 const unsigned StackAS = DL.getAllocaAddrSpace();
2998 unsigned i = 0;
2999 for (const Argument &Arg : F.args()) {
3000 Check(!Attrs.hasParamAttr(i, Attribute::ByVal),
3001 "Calling convention disallows byval", &F);
3002 Check(!Attrs.hasParamAttr(i, Attribute::Preallocated),
3003 "Calling convention disallows preallocated", &F);
3004 Check(!Attrs.hasParamAttr(i, Attribute::InAlloca),
3005 "Calling convention disallows inalloca", &F);
3006
3007 if (Attrs.hasParamAttr(i, Attribute::ByRef)) {
3008 // FIXME: Should also disallow LDS and GDS, but we don't have the enum
3009 // value here.
3010 Check(Arg.getType()->getPointerAddressSpace() != StackAS,
3011 "Calling convention disallows stack byref", &F);
3012 }
3013
3014 ++i;
3015 }
3016 }
3017
3018 [[fallthrough]];
3019 case CallingConv::Fast:
3020 case CallingConv::Cold:
3021 case CallingConv::Intel_OCL_BI:
3022 case CallingConv::PTX_Kernel:
3023 case CallingConv::PTX_Device:
3024 Check(!F.isVarArg(),
3025 "Calling convention does not support varargs or "
3026 "perfect forwarding!",
3027 &F);
3028 break;
3029 case CallingConv::AMDGPU_Gfx_WholeWave:
3030 Check(!F.arg_empty() && F.arg_begin()->getType()->isIntegerTy(1),
3031 "Calling convention requires first argument to be i1", &F);
3032 Check(!F.arg_begin()->hasInRegAttr(),
3033 "Calling convention requires first argument to not be inreg", &F);
3034 Check(!F.isVarArg(),
3035 "Calling convention does not support varargs or "
3036 "perfect forwarding!",
3037 &F);
3038 break;
3039 }
3040
3041 // Check that the argument values match the function type for this function...
3042 unsigned i = 0;
3043 for (const Argument &Arg : F.args()) {
3044 Check(Arg.getType() == FT->getParamType(i),
3045 "Argument value does not match function argument type!", &Arg,
3046 FT->getParamType(i));
3047 Check(Arg.getType()->isFirstClassType(),
3048 "Function arguments must have first-class types!", &Arg);
3049 if (!IsIntrinsic) {
3050 Check(!Arg.getType()->isMetadataTy(),
3051 "Function takes metadata but isn't an intrinsic", &Arg, &F);
3052 Check(!Arg.getType()->isTokenLikeTy(),
3053 "Function takes token but isn't an intrinsic", &Arg, &F);
3054 Check(!Arg.getType()->isX86_AMXTy(),
3055 "Function takes x86_amx but isn't an intrinsic", &Arg, &F);
3056 }
3057
3058 // Check that swifterror argument is only used by loads and stores.
3059 if (Attrs.hasParamAttr(i, Attribute::SwiftError)) {
3060 verifySwiftErrorValue(&Arg);
3061 }
3062 ++i;
3063 }
3064
3065 if (!IsIntrinsic) {
3066 Check(!F.getReturnType()->isTokenLikeTy(),
3067 "Function returns a token but isn't an intrinsic", &F);
3068 Check(!F.getReturnType()->isX86_AMXTy(),
3069 "Function returns a x86_amx but isn't an intrinsic", &F);
3070 }
3071
3072 // Get the function metadata attachments.
3074 F.getAllMetadata(MDs);
3075 assert(F.hasMetadata() != MDs.empty() && "Bit out-of-sync");
3076 verifyFunctionMetadata(MDs);
3077
3078 // Check validity of the personality function
3079 if (F.hasPersonalityFn()) {
3080 auto *Per = dyn_cast<Function>(F.getPersonalityFn()->stripPointerCasts());
3081 if (Per)
3082 Check(Per->getParent() == F.getParent(),
3083 "Referencing personality function in another module!", &F,
3084 F.getParent(), Per, Per->getParent());
3085 }
3086
3087 // EH funclet coloring can be expensive, recompute on-demand
3088 BlockEHFuncletColors.clear();
3089
3090 if (F.isMaterializable()) {
3091 // Function has a body somewhere we can't see.
3092 Check(MDs.empty(), "unmaterialized function cannot have metadata", &F,
3093 MDs.empty() ? nullptr : MDs.front().second);
3094 } else if (F.isDeclaration()) {
3095 for (const auto &I : MDs) {
3096 // This is used for call site debug information.
3097 CheckDI(I.first != LLVMContext::MD_dbg ||
3098 !cast<DISubprogram>(I.second)->isDistinct(),
3099 "function declaration may only have a unique !dbg attachment",
3100 &F);
3101 Check(I.first != LLVMContext::MD_prof,
3102 "function declaration may not have a !prof attachment", &F);
3103
3104 // Verify the metadata itself.
3105 visitMDNode(*I.second, AreDebugLocsAllowed::Yes);
3106 }
3107 Check(!F.hasPersonalityFn(),
3108 "Function declaration shouldn't have a personality routine", &F);
3109 } else {
3110 // Verify that this function (which has a body) is not named "llvm.*". It
3111 // is not legal to define intrinsics.
3112 Check(!IsIntrinsic, "llvm intrinsics cannot be defined!", &F);
3113
3114 // Check the entry node
3115 const BasicBlock *Entry = &F.getEntryBlock();
3116 Check(pred_empty(Entry),
3117 "Entry block to function must not have predecessors!", Entry);
3118
3119 // The address of the entry block cannot be taken, unless it is dead.
3120 if (Entry->hasAddressTaken()) {
3121 Check(!BlockAddress::lookup(Entry)->isConstantUsed(),
3122 "blockaddress may not be used with the entry block!", Entry);
3123 }
3124
3125 unsigned NumDebugAttachments = 0, NumProfAttachments = 0,
3126 NumKCFIAttachments = 0;
3127 // Visit metadata attachments.
3128 for (const auto &I : MDs) {
3129 // Verify that the attachment is legal.
3130 auto AllowLocs = AreDebugLocsAllowed::No;
3131 switch (I.first) {
3132 default:
3133 break;
3134 case LLVMContext::MD_dbg: {
3135 ++NumDebugAttachments;
3136 CheckDI(NumDebugAttachments == 1,
3137 "function must have a single !dbg attachment", &F, I.second);
3138 CheckDI(isa<DISubprogram>(I.second),
3139 "function !dbg attachment must be a subprogram", &F, I.second);
3140 CheckDI(cast<DISubprogram>(I.second)->isDistinct(),
3141 "function definition may only have a distinct !dbg attachment",
3142 &F);
3143
3144 auto *SP = cast<DISubprogram>(I.second);
3145 const Function *&AttachedTo = DISubprogramAttachments[SP];
3146 CheckDI(!AttachedTo || AttachedTo == &F,
3147 "DISubprogram attached to more than one function", SP, &F);
3148 AttachedTo = &F;
3149 AllowLocs = AreDebugLocsAllowed::Yes;
3150 break;
3151 }
3152 case LLVMContext::MD_prof:
3153 ++NumProfAttachments;
3154 Check(NumProfAttachments == 1,
3155 "function must have a single !prof attachment", &F, I.second);
3156 break;
3157 case LLVMContext::MD_kcfi_type:
3158 ++NumKCFIAttachments;
3159 Check(NumKCFIAttachments == 1,
3160 "function must have a single !kcfi_type attachment", &F,
3161 I.second);
3162 break;
3163 }
3164
3165 // Verify the metadata itself.
3166 visitMDNode(*I.second, AllowLocs);
3167 }
3168 }
3169
3170 // If this function is actually an intrinsic, verify that it is only used in
3171 // direct call/invokes, never having its "address taken".
3172 // Only do this if the module is materialized, otherwise we don't have all the
3173 // uses.
3174 if (F.isIntrinsic() && F.getParent()->isMaterialized()) {
3175 const User *U;
3176 if (F.hasAddressTaken(&U, false, true, false,
3177 /*IgnoreARCAttachedCall=*/true))
3178 Check(false, "Invalid user of intrinsic instruction!", U);
3179 }
3180
3181 // Check intrinsics' signatures.
3182 switch (F.getIntrinsicID()) {
3183 case Intrinsic::experimental_gc_get_pointer_base: {
3184 FunctionType *FT = F.getFunctionType();
3185 Check(FT->getNumParams() == 1, "wrong number of parameters", F);
3186 Check(isa<PointerType>(F.getReturnType()),
3187 "gc.get.pointer.base must return a pointer", F);
3188 Check(FT->getParamType(0) == F.getReturnType(),
3189 "gc.get.pointer.base operand and result must be of the same type", F);
3190 break;
3191 }
3192 case Intrinsic::experimental_gc_get_pointer_offset: {
3193 FunctionType *FT = F.getFunctionType();
3194 Check(FT->getNumParams() == 1, "wrong number of parameters", F);
3195 Check(isa<PointerType>(FT->getParamType(0)),
3196 "gc.get.pointer.offset operand must be a pointer", F);
3197 Check(F.getReturnType()->isIntegerTy(),
3198 "gc.get.pointer.offset must return integer", F);
3199 break;
3200 }
3201 }
3202
3203 auto *N = F.getSubprogram();
3204 HasDebugInfo = (N != nullptr);
3205 if (!HasDebugInfo)
3206 return;
3207
3208 // Check that all !dbg attachments lead to back to N.
3209 //
3210 // FIXME: Check this incrementally while visiting !dbg attachments.
3211 // FIXME: Only check when N is the canonical subprogram for F.
3212 SmallPtrSet<const MDNode *, 32> Seen;
3213 auto VisitDebugLoc = [&](const Instruction &I, const MDNode *Node) {
3214 // Be careful about using DILocation here since we might be dealing with
3215 // broken code (this is the Verifier after all).
3216 const DILocation *DL = dyn_cast_or_null<DILocation>(Node);
3217 if (!DL)
3218 return;
3219 if (!Seen.insert(DL).second)
3220 return;
3221
3222 Metadata *Parent = DL->getRawScope();
3223 CheckDI(Parent && isa<DILocalScope>(Parent),
3224 "DILocation's scope must be a DILocalScope", N, &F, &I, DL, Parent);
3225
3226 DILocalScope *Scope = DL->getInlinedAtScope();
3227 Check(Scope, "Failed to find DILocalScope", DL);
3228
3229 if (!Seen.insert(Scope).second)
3230 return;
3231
3232 DISubprogram *SP = Scope->getSubprogram();
3233
3234 // Scope and SP could be the same MDNode and we don't want to skip
3235 // validation in that case
3236 if ((Scope != SP) && !Seen.insert(SP).second)
3237 return;
3238
3239 CheckDI(SP->describes(&F),
3240 "!dbg attachment points at wrong subprogram for function", N, &F,
3241 &I, DL, Scope, SP);
3242 };
3243 for (auto &BB : F)
3244 for (auto &I : BB) {
3245 VisitDebugLoc(I, I.getDebugLoc().getAsMDNode());
3246 // The llvm.loop annotations also contain two DILocations.
3247 if (auto MD = I.getMetadata(LLVMContext::MD_loop))
3248 for (unsigned i = 1; i < MD->getNumOperands(); ++i)
3249 VisitDebugLoc(I, dyn_cast_or_null<MDNode>(MD->getOperand(i)));
3250 if (BrokenDebugInfo)
3251 return;
3252 }
3253}
3254
3255// verifyBasicBlock - Verify that a basic block is well formed...
3256//
3257void Verifier::visitBasicBlock(BasicBlock &BB) {
3258 InstsInThisBlock.clear();
3259 ConvergenceVerifyHelper.visit(BB);
3260
3261 // Ensure that basic blocks have terminators!
3262 Check(BB.getTerminator(), "Basic Block does not have terminator!", &BB);
3263
3264 // Check constraints that this basic block imposes on all of the PHI nodes in
3265 // it.
3266 if (isa<PHINode>(BB.front())) {
3267 SmallVector<BasicBlock *, 8> Preds(predecessors(&BB));
3269 llvm::sort(Preds);
3270 for (const PHINode &PN : BB.phis()) {
3271 Check(PN.getNumIncomingValues() == Preds.size(),
3272 "PHINode should have one entry for each predecessor of its "
3273 "parent basic block!",
3274 &PN);
3275
3276 // Get and sort all incoming values in the PHI node...
3277 Values.clear();
3278 Values.reserve(PN.getNumIncomingValues());
3279 for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i)
3280 Values.push_back(
3281 std::make_pair(PN.getIncomingBlock(i), PN.getIncomingValue(i)));
3282 llvm::sort(Values);
3283
3284 for (unsigned i = 0, e = Values.size(); i != e; ++i) {
3285 // Check to make sure that if there is more than one entry for a
3286 // particular basic block in this PHI node, that the incoming values are
3287 // all identical.
3288 //
3289 Check(i == 0 || Values[i].first != Values[i - 1].first ||
3290 Values[i].second == Values[i - 1].second,
3291 "PHI node has multiple entries for the same basic block with "
3292 "different incoming values!",
3293 &PN, Values[i].first, Values[i].second, Values[i - 1].second);
3294
3295 // Check to make sure that the predecessors and PHI node entries are
3296 // matched up.
3297 Check(Values[i].first == Preds[i],
3298 "PHI node entries do not match predecessors!", &PN,
3299 Values[i].first, Preds[i]);
3300 }
3301 }
3302 }
3303
3304 // Check that all instructions have their parent pointers set up correctly.
3305 for (auto &I : BB)
3306 {
3307 Check(I.getParent() == &BB, "Instruction has bogus parent pointer!");
3308 }
3309
3310 // Confirm that no issues arise from the debug program.
3311 CheckDI(!BB.getTrailingDbgRecords(), "Basic Block has trailing DbgRecords!",
3312 &BB);
3313}
3314
3315void Verifier::visitTerminator(Instruction &I) {
3316 // Ensure that terminators only exist at the end of the basic block.
3317 Check(&I == I.getParent()->getTerminator(),
3318 "Terminator found in the middle of a basic block!", I.getParent());
3319 visitInstruction(I);
3320}
3321
3322void Verifier::visitBranchInst(BranchInst &BI) {
3323 if (BI.isConditional()) {
3325 "Branch condition is not 'i1' type!", &BI, BI.getCondition());
3326 }
3327 visitTerminator(BI);
3328}
3329
3330void Verifier::visitReturnInst(ReturnInst &RI) {
3331 Function *F = RI.getParent()->getParent();
3332 unsigned N = RI.getNumOperands();
3333 if (F->getReturnType()->isVoidTy())
3334 Check(N == 0,
3335 "Found return instr that returns non-void in Function of void "
3336 "return type!",
3337 &RI, F->getReturnType());
3338 else
3339 Check(N == 1 && F->getReturnType() == RI.getOperand(0)->getType(),
3340 "Function return type does not match operand "
3341 "type of return inst!",
3342 &RI, F->getReturnType());
3343
3344 // Check to make sure that the return value has necessary properties for
3345 // terminators...
3346 visitTerminator(RI);
3347}
3348
3349void Verifier::visitSwitchInst(SwitchInst &SI) {
3350 Check(SI.getType()->isVoidTy(), "Switch must have void result type!", &SI);
3351 // Check to make sure that all of the constants in the switch instruction
3352 // have the same type as the switched-on value.
3353 Type *SwitchTy = SI.getCondition()->getType();
3354 SmallPtrSet<ConstantInt*, 32> Constants;
3355 for (auto &Case : SI.cases()) {
3356 Check(isa<ConstantInt>(SI.getOperand(Case.getCaseIndex() * 2 + 2)),
3357 "Case value is not a constant integer.", &SI);
3358 Check(Case.getCaseValue()->getType() == SwitchTy,
3359 "Switch constants must all be same type as switch value!", &SI);
3360 Check(Constants.insert(Case.getCaseValue()).second,
3361 "Duplicate integer as switch case", &SI, Case.getCaseValue());
3362 }
3363
3364 visitTerminator(SI);
3365}
3366
3367void Verifier::visitIndirectBrInst(IndirectBrInst &BI) {
3369 "Indirectbr operand must have pointer type!", &BI);
3370 for (unsigned i = 0, e = BI.getNumDestinations(); i != e; ++i)
3372 "Indirectbr destinations must all have pointer type!", &BI);
3373
3374 visitTerminator(BI);
3375}
3376
3377void Verifier::visitCallBrInst(CallBrInst &CBI) {
3378 Check(CBI.isInlineAsm(), "Callbr is currently only used for asm-goto!", &CBI);
3379 const InlineAsm *IA = cast<InlineAsm>(CBI.getCalledOperand());
3380 Check(!IA->canThrow(), "Unwinding from Callbr is not allowed");
3381
3382 verifyInlineAsmCall(CBI);
3383 visitTerminator(CBI);
3384}
3385
3386void Verifier::visitSelectInst(SelectInst &SI) {
3387 Check(!SelectInst::areInvalidOperands(SI.getOperand(0), SI.getOperand(1),
3388 SI.getOperand(2)),
3389 "Invalid operands for select instruction!", &SI);
3390
3391 Check(SI.getTrueValue()->getType() == SI.getType(),
3392 "Select values must have same type as select instruction!", &SI);
3393 visitInstruction(SI);
3394}
3395
3396/// visitUserOp1 - User defined operators shouldn't live beyond the lifetime of
3397/// a pass, if any exist, it's an error.
3398///
3399void Verifier::visitUserOp1(Instruction &I) {
3400 Check(false, "User-defined operators should not live outside of a pass!", &I);
3401}
3402
3403void Verifier::visitTruncInst(TruncInst &I) {
3404 // Get the source and destination types
3405 Type *SrcTy = I.getOperand(0)->getType();
3406 Type *DestTy = I.getType();
3407
3408 // Get the size of the types in bits, we'll need this later
3409 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3410 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3411
3412 Check(SrcTy->isIntOrIntVectorTy(), "Trunc only operates on integer", &I);
3413 Check(DestTy->isIntOrIntVectorTy(), "Trunc only produces integer", &I);
3414 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3415 "trunc source and destination must both be a vector or neither", &I);
3416 Check(SrcBitSize > DestBitSize, "DestTy too big for Trunc", &I);
3417
3418 visitInstruction(I);
3419}
3420
3421void Verifier::visitZExtInst(ZExtInst &I) {
3422 // Get the source and destination types
3423 Type *SrcTy = I.getOperand(0)->getType();
3424 Type *DestTy = I.getType();
3425
3426 // Get the size of the types in bits, we'll need this later
3427 Check(SrcTy->isIntOrIntVectorTy(), "ZExt only operates on integer", &I);
3428 Check(DestTy->isIntOrIntVectorTy(), "ZExt only produces an integer", &I);
3429 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3430 "zext source and destination must both be a vector or neither", &I);
3431 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3432 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3433
3434 Check(SrcBitSize < DestBitSize, "Type too small for ZExt", &I);
3435
3436 visitInstruction(I);
3437}
3438
3439void Verifier::visitSExtInst(SExtInst &I) {
3440 // Get the source and destination types
3441 Type *SrcTy = I.getOperand(0)->getType();
3442 Type *DestTy = I.getType();
3443
3444 // Get the size of the types in bits, we'll need this later
3445 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3446 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3447
3448 Check(SrcTy->isIntOrIntVectorTy(), "SExt only operates on integer", &I);
3449 Check(DestTy->isIntOrIntVectorTy(), "SExt only produces an integer", &I);
3450 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3451 "sext source and destination must both be a vector or neither", &I);
3452 Check(SrcBitSize < DestBitSize, "Type too small for SExt", &I);
3453
3454 visitInstruction(I);
3455}
3456
3457void Verifier::visitFPTruncInst(FPTruncInst &I) {
3458 // Get the source and destination types
3459 Type *SrcTy = I.getOperand(0)->getType();
3460 Type *DestTy = I.getType();
3461 // Get the size of the types in bits, we'll need this later
3462 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3463 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3464
3465 Check(SrcTy->isFPOrFPVectorTy(), "FPTrunc only operates on FP", &I);
3466 Check(DestTy->isFPOrFPVectorTy(), "FPTrunc only produces an FP", &I);
3467 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3468 "fptrunc source and destination must both be a vector or neither", &I);
3469 Check(SrcBitSize > DestBitSize, "DestTy too big for FPTrunc", &I);
3470
3471 visitInstruction(I);
3472}
3473
3474void Verifier::visitFPExtInst(FPExtInst &I) {
3475 // Get the source and destination types
3476 Type *SrcTy = I.getOperand(0)->getType();
3477 Type *DestTy = I.getType();
3478
3479 // Get the size of the types in bits, we'll need this later
3480 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3481 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3482
3483 Check(SrcTy->isFPOrFPVectorTy(), "FPExt only operates on FP", &I);
3484 Check(DestTy->isFPOrFPVectorTy(), "FPExt only produces an FP", &I);
3485 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3486 "fpext source and destination must both be a vector or neither", &I);
3487 Check(SrcBitSize < DestBitSize, "DestTy too small for FPExt", &I);
3488
3489 visitInstruction(I);
3490}
3491
3492void Verifier::visitUIToFPInst(UIToFPInst &I) {
3493 // Get the source and destination types
3494 Type *SrcTy = I.getOperand(0)->getType();
3495 Type *DestTy = I.getType();
3496
3497 bool SrcVec = SrcTy->isVectorTy();
3498 bool DstVec = DestTy->isVectorTy();
3499
3500 Check(SrcVec == DstVec,
3501 "UIToFP source and dest must both be vector or scalar", &I);
3502 Check(SrcTy->isIntOrIntVectorTy(),
3503 "UIToFP source must be integer or integer vector", &I);
3504 Check(DestTy->isFPOrFPVectorTy(), "UIToFP result must be FP or FP vector",
3505 &I);
3506
3507 if (SrcVec && DstVec)
3508 Check(cast<VectorType>(SrcTy)->getElementCount() ==
3509 cast<VectorType>(DestTy)->getElementCount(),
3510 "UIToFP source and dest vector length mismatch", &I);
3511
3512 visitInstruction(I);
3513}
3514
3515void Verifier::visitSIToFPInst(SIToFPInst &I) {
3516 // Get the source and destination types
3517 Type *SrcTy = I.getOperand(0)->getType();
3518 Type *DestTy = I.getType();
3519
3520 bool SrcVec = SrcTy->isVectorTy();
3521 bool DstVec = DestTy->isVectorTy();
3522
3523 Check(SrcVec == DstVec,
3524 "SIToFP source and dest must both be vector or scalar", &I);
3525 Check(SrcTy->isIntOrIntVectorTy(),
3526 "SIToFP source must be integer or integer vector", &I);
3527 Check(DestTy->isFPOrFPVectorTy(), "SIToFP result must be FP or FP vector",
3528 &I);
3529
3530 if (SrcVec && DstVec)
3531 Check(cast<VectorType>(SrcTy)->getElementCount() ==
3532 cast<VectorType>(DestTy)->getElementCount(),
3533 "SIToFP source and dest vector length mismatch", &I);
3534
3535 visitInstruction(I);
3536}
3537
3538void Verifier::visitFPToUIInst(FPToUIInst &I) {
3539 // Get the source and destination types
3540 Type *SrcTy = I.getOperand(0)->getType();
3541 Type *DestTy = I.getType();
3542
3543 bool SrcVec = SrcTy->isVectorTy();
3544 bool DstVec = DestTy->isVectorTy();
3545
3546 Check(SrcVec == DstVec,
3547 "FPToUI source and dest must both be vector or scalar", &I);
3548 Check(SrcTy->isFPOrFPVectorTy(), "FPToUI source must be FP or FP vector", &I);
3549 Check(DestTy->isIntOrIntVectorTy(),
3550 "FPToUI result must be integer or integer vector", &I);
3551
3552 if (SrcVec && DstVec)
3553 Check(cast<VectorType>(SrcTy)->getElementCount() ==
3554 cast<VectorType>(DestTy)->getElementCount(),
3555 "FPToUI source and dest vector length mismatch", &I);
3556
3557 visitInstruction(I);
3558}
3559
3560void Verifier::visitFPToSIInst(FPToSIInst &I) {
3561 // Get the source and destination types
3562 Type *SrcTy = I.getOperand(0)->getType();
3563 Type *DestTy = I.getType();
3564
3565 bool SrcVec = SrcTy->isVectorTy();
3566 bool DstVec = DestTy->isVectorTy();
3567
3568 Check(SrcVec == DstVec,
3569 "FPToSI source and dest must both be vector or scalar", &I);
3570 Check(SrcTy->isFPOrFPVectorTy(), "FPToSI source must be FP or FP vector", &I);
3571 Check(DestTy->isIntOrIntVectorTy(),
3572 "FPToSI result must be integer or integer vector", &I);
3573
3574 if (SrcVec && DstVec)
3575 Check(cast<VectorType>(SrcTy)->getElementCount() ==
3576 cast<VectorType>(DestTy)->getElementCount(),
3577 "FPToSI source and dest vector length mismatch", &I);
3578
3579 visitInstruction(I);
3580}
3581
3582void Verifier::checkPtrToAddr(Type *SrcTy, Type *DestTy, const Value &V) {
3583 Check(SrcTy->isPtrOrPtrVectorTy(), "PtrToAddr source must be pointer", V);
3584 Check(DestTy->isIntOrIntVectorTy(), "PtrToAddr result must be integral", V);
3585 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(), "PtrToAddr type mismatch",
3586 V);
3587
3588 if (SrcTy->isVectorTy()) {
3589 auto *VSrc = cast<VectorType>(SrcTy);
3590 auto *VDest = cast<VectorType>(DestTy);
3591 Check(VSrc->getElementCount() == VDest->getElementCount(),
3592 "PtrToAddr vector length mismatch", V);
3593 }
3594
3595 Type *AddrTy = DL.getAddressType(SrcTy);
3596 Check(AddrTy == DestTy, "PtrToAddr result must be address width", V);
3597}
3598
3599void Verifier::visitPtrToAddrInst(PtrToAddrInst &I) {
3600 checkPtrToAddr(I.getOperand(0)->getType(), I.getType(), I);
3601 visitInstruction(I);
3602}
3603
3604void Verifier::visitPtrToIntInst(PtrToIntInst &I) {
3605 // Get the source and destination types
3606 Type *SrcTy = I.getOperand(0)->getType();
3607 Type *DestTy = I.getType();
3608
3609 Check(SrcTy->isPtrOrPtrVectorTy(), "PtrToInt source must be pointer", &I);
3610
3611 Check(DestTy->isIntOrIntVectorTy(), "PtrToInt result must be integral", &I);
3612 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(), "PtrToInt type mismatch",
3613 &I);
3614
3615 if (SrcTy->isVectorTy()) {
3616 auto *VSrc = cast<VectorType>(SrcTy);
3617 auto *VDest = cast<VectorType>(DestTy);
3618 Check(VSrc->getElementCount() == VDest->getElementCount(),
3619 "PtrToInt Vector length mismatch", &I);
3620 }
3621
3622 visitInstruction(I);
3623}
3624
3625void Verifier::visitIntToPtrInst(IntToPtrInst &I) {
3626 // Get the source and destination types
3627 Type *SrcTy = I.getOperand(0)->getType();
3628 Type *DestTy = I.getType();
3629
3630 Check(SrcTy->isIntOrIntVectorTy(), "IntToPtr source must be an integral", &I);
3631 Check(DestTy->isPtrOrPtrVectorTy(), "IntToPtr result must be a pointer", &I);
3632
3633 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(), "IntToPtr type mismatch",
3634 &I);
3635 if (SrcTy->isVectorTy()) {
3636 auto *VSrc = cast<VectorType>(SrcTy);
3637 auto *VDest = cast<VectorType>(DestTy);
3638 Check(VSrc->getElementCount() == VDest->getElementCount(),
3639 "IntToPtr Vector length mismatch", &I);
3640 }
3641 visitInstruction(I);
3642}
3643
3644void Verifier::visitBitCastInst(BitCastInst &I) {
3645 Check(
3646 CastInst::castIsValid(Instruction::BitCast, I.getOperand(0), I.getType()),
3647 "Invalid bitcast", &I);
3648 visitInstruction(I);
3649}
3650
3651void Verifier::visitAddrSpaceCastInst(AddrSpaceCastInst &I) {
3652 Type *SrcTy = I.getOperand(0)->getType();
3653 Type *DestTy = I.getType();
3654
3655 Check(SrcTy->isPtrOrPtrVectorTy(), "AddrSpaceCast source must be a pointer",
3656 &I);
3657 Check(DestTy->isPtrOrPtrVectorTy(), "AddrSpaceCast result must be a pointer",
3658 &I);
3660 "AddrSpaceCast must be between different address spaces", &I);
3661 if (auto *SrcVTy = dyn_cast<VectorType>(SrcTy))
3662 Check(SrcVTy->getElementCount() ==
3663 cast<VectorType>(DestTy)->getElementCount(),
3664 "AddrSpaceCast vector pointer number of elements mismatch", &I);
3665 visitInstruction(I);
3666}
3667
3668/// visitPHINode - Ensure that a PHI node is well formed.
3669///
3670void Verifier::visitPHINode(PHINode &PN) {
3671 // Ensure that the PHI nodes are all grouped together at the top of the block.
3672 // This can be tested by checking whether the instruction before this is
3673 // either nonexistent (because this is begin()) or is a PHI node. If not,
3674 // then there is some other instruction before a PHI.
3675 Check(&PN == &PN.getParent()->front() ||
3677 "PHI nodes not grouped at top of basic block!", &PN, PN.getParent());
3678
3679 // Check that a PHI doesn't yield a Token.
3680 Check(!PN.getType()->isTokenLikeTy(), "PHI nodes cannot have token type!");
3681
3682 // Check that all of the values of the PHI node have the same type as the
3683 // result.
3684 for (Value *IncValue : PN.incoming_values()) {
3685 Check(PN.getType() == IncValue->getType(),
3686 "PHI node operands are not the same type as the result!", &PN);
3687 }
3688
3689 // All other PHI node constraints are checked in the visitBasicBlock method.
3690
3691 visitInstruction(PN);
3692}
3693
3694void Verifier::visitCallBase(CallBase &Call) {
3696 "Called function must be a pointer!", Call);
3697 FunctionType *FTy = Call.getFunctionType();
3698
3699 // Verify that the correct number of arguments are being passed
3700 if (FTy->isVarArg())
3701 Check(Call.arg_size() >= FTy->getNumParams(),
3702 "Called function requires more parameters than were provided!", Call);
3703 else
3704 Check(Call.arg_size() == FTy->getNumParams(),
3705 "Incorrect number of arguments passed to called function!", Call);
3706
3707 // Verify that all arguments to the call match the function type.
3708 for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i)
3709 Check(Call.getArgOperand(i)->getType() == FTy->getParamType(i),
3710 "Call parameter type does not match function signature!",
3711 Call.getArgOperand(i), FTy->getParamType(i), Call);
3712
3713 AttributeList Attrs = Call.getAttributes();
3714
3715 Check(verifyAttributeCount(Attrs, Call.arg_size()),
3716 "Attribute after last parameter!", Call);
3717
3718 Function *Callee =
3720 bool IsIntrinsic = Callee && Callee->isIntrinsic();
3721 if (IsIntrinsic)
3722 Check(Callee->getValueType() == FTy,
3723 "Intrinsic called with incompatible signature", Call);
3724
3725 // Verify if the calling convention of the callee is callable.
3727 "calling convention does not permit calls", Call);
3728
3729 // Disallow passing/returning values with alignment higher than we can
3730 // represent.
3731 // FIXME: Consider making DataLayout cap the alignment, so this isn't
3732 // necessary.
3733 auto VerifyTypeAlign = [&](Type *Ty, const Twine &Message) {
3734 if (!Ty->isSized())
3735 return;
3736 Align ABIAlign = DL.getABITypeAlign(Ty);
3737 Check(ABIAlign.value() <= Value::MaximumAlignment,
3738 "Incorrect alignment of " + Message + " to called function!", Call);
3739 };
3740
3741 if (!IsIntrinsic) {
3742 VerifyTypeAlign(FTy->getReturnType(), "return type");
3743 for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i) {
3744 Type *Ty = FTy->getParamType(i);
3745 VerifyTypeAlign(Ty, "argument passed");
3746 }
3747 }
3748
3749 if (Attrs.hasFnAttr(Attribute::Speculatable)) {
3750 // Don't allow speculatable on call sites, unless the underlying function
3751 // declaration is also speculatable.
3752 Check(Callee && Callee->isSpeculatable(),
3753 "speculatable attribute may not apply to call sites", Call);
3754 }
3755
3756 if (Attrs.hasFnAttr(Attribute::Preallocated)) {
3757 Check(Call.getIntrinsicID() == Intrinsic::call_preallocated_arg,
3758 "preallocated as a call site attribute can only be on "
3759 "llvm.call.preallocated.arg");
3760 }
3761
3762 // Verify call attributes.
3763 verifyFunctionAttrs(FTy, Attrs, &Call, IsIntrinsic, Call.isInlineAsm());
3764
3765 // Conservatively check the inalloca argument.
3766 // We have a bug if we can find that there is an underlying alloca without
3767 // inalloca.
3768 if (Call.hasInAllocaArgument()) {
3769 Value *InAllocaArg = Call.getArgOperand(FTy->getNumParams() - 1);
3770 if (auto AI = dyn_cast<AllocaInst>(InAllocaArg->stripInBoundsOffsets()))
3771 Check(AI->isUsedWithInAlloca(),
3772 "inalloca argument for call has mismatched alloca", AI, Call);
3773 }
3774
3775 // For each argument of the callsite, if it has the swifterror argument,
3776 // make sure the underlying alloca/parameter it comes from has a swifterror as
3777 // well.
3778 for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i) {
3779 if (Call.paramHasAttr(i, Attribute::SwiftError)) {
3780 Value *SwiftErrorArg = Call.getArgOperand(i);
3781 if (auto AI = dyn_cast<AllocaInst>(SwiftErrorArg->stripInBoundsOffsets())) {
3782 Check(AI->isSwiftError(),
3783 "swifterror argument for call has mismatched alloca", AI, Call);
3784 continue;
3785 }
3786 auto ArgI = dyn_cast<Argument>(SwiftErrorArg);
3787 Check(ArgI, "swifterror argument should come from an alloca or parameter",
3788 SwiftErrorArg, Call);
3789 Check(ArgI->hasSwiftErrorAttr(),
3790 "swifterror argument for call has mismatched parameter", ArgI,
3791 Call);
3792 }
3793
3794 if (Attrs.hasParamAttr(i, Attribute::ImmArg)) {
3795 // Don't allow immarg on call sites, unless the underlying declaration
3796 // also has the matching immarg.
3797 Check(Callee && Callee->hasParamAttribute(i, Attribute::ImmArg),
3798 "immarg may not apply only to call sites", Call.getArgOperand(i),
3799 Call);
3800 }
3801
3802 if (Call.paramHasAttr(i, Attribute::ImmArg)) {
3803 Value *ArgVal = Call.getArgOperand(i);
3804 Check(isa<ConstantInt>(ArgVal) || isa<ConstantFP>(ArgVal),
3805 "immarg operand has non-immediate parameter", ArgVal, Call);
3806
3807 // If the imm-arg is an integer and also has a range attached,
3808 // check if the given value is within the range.
3809 if (Call.paramHasAttr(i, Attribute::Range)) {
3810 if (auto *CI = dyn_cast<ConstantInt>(ArgVal)) {
3811 const ConstantRange &CR =
3812 Call.getParamAttr(i, Attribute::Range).getValueAsConstantRange();
3813 Check(CR.contains(CI->getValue()),
3814 "immarg value " + Twine(CI->getValue().getSExtValue()) +
3815 " out of range [" + Twine(CR.getLower().getSExtValue()) +
3816 ", " + Twine(CR.getUpper().getSExtValue()) + ")",
3817 Call);
3818 }
3819 }
3820 }
3821
3822 if (Call.paramHasAttr(i, Attribute::Preallocated)) {
3823 Value *ArgVal = Call.getArgOperand(i);
3824 bool hasOB =
3826 bool isMustTail = Call.isMustTailCall();
3827 Check(hasOB != isMustTail,
3828 "preallocated operand either requires a preallocated bundle or "
3829 "the call to be musttail (but not both)",
3830 ArgVal, Call);
3831 }
3832 }
3833
3834 if (FTy->isVarArg()) {
3835 // FIXME? is 'nest' even legal here?
3836 bool SawNest = false;
3837 bool SawReturned = false;
3838
3839 for (unsigned Idx = 0; Idx < FTy->getNumParams(); ++Idx) {
3840 if (Attrs.hasParamAttr(Idx, Attribute::Nest))
3841 SawNest = true;
3842 if (Attrs.hasParamAttr(Idx, Attribute::Returned))
3843 SawReturned = true;
3844 }
3845
3846 // Check attributes on the varargs part.
3847 for (unsigned Idx = FTy->getNumParams(); Idx < Call.arg_size(); ++Idx) {
3848 Type *Ty = Call.getArgOperand(Idx)->getType();
3849 AttributeSet ArgAttrs = Attrs.getParamAttrs(Idx);
3850 verifyParameterAttrs(ArgAttrs, Ty, &Call);
3851
3852 if (ArgAttrs.hasAttribute(Attribute::Nest)) {
3853 Check(!SawNest, "More than one parameter has attribute nest!", Call);
3854 SawNest = true;
3855 }
3856
3857 if (ArgAttrs.hasAttribute(Attribute::Returned)) {
3858 Check(!SawReturned, "More than one parameter has attribute returned!",
3859 Call);
3860 Check(Ty->canLosslesslyBitCastTo(FTy->getReturnType()),
3861 "Incompatible argument and return types for 'returned' "
3862 "attribute",
3863 Call);
3864 SawReturned = true;
3865 }
3866
3867 // Statepoint intrinsic is vararg but the wrapped function may be not.
3868 // Allow sret here and check the wrapped function in verifyStatepoint.
3869 if (Call.getIntrinsicID() != Intrinsic::experimental_gc_statepoint)
3870 Check(!ArgAttrs.hasAttribute(Attribute::StructRet),
3871 "Attribute 'sret' cannot be used for vararg call arguments!",
3872 Call);
3873
3874 if (ArgAttrs.hasAttribute(Attribute::InAlloca))
3875 Check(Idx == Call.arg_size() - 1,
3876 "inalloca isn't on the last argument!", Call);
3877 }
3878 }
3879
3880 // Verify that there's no metadata unless it's a direct call to an intrinsic.
3881 if (!IsIntrinsic) {
3882 for (Type *ParamTy : FTy->params()) {
3883 Check(!ParamTy->isMetadataTy(),
3884 "Function has metadata parameter but isn't an intrinsic", Call);
3885 Check(!ParamTy->isTokenLikeTy(),
3886 "Function has token parameter but isn't an intrinsic", Call);
3887 }
3888 }
3889
3890 // Verify that indirect calls don't return tokens.
3891 if (!Call.getCalledFunction()) {
3892 Check(!FTy->getReturnType()->isTokenLikeTy(),
3893 "Return type cannot be token for indirect call!");
3894 Check(!FTy->getReturnType()->isX86_AMXTy(),
3895 "Return type cannot be x86_amx for indirect call!");
3896 }
3897
3899 visitIntrinsicCall(ID, Call);
3900
3901 // Verify that a callsite has at most one "deopt", at most one "funclet", at
3902 // most one "gc-transition", at most one "cfguardtarget", at most one
3903 // "preallocated" operand bundle, and at most one "ptrauth" operand bundle.
3904 bool FoundDeoptBundle = false, FoundFuncletBundle = false,
3905 FoundGCTransitionBundle = false, FoundCFGuardTargetBundle = false,
3906 FoundPreallocatedBundle = false, FoundGCLiveBundle = false,
3907 FoundPtrauthBundle = false, FoundKCFIBundle = false,
3908 FoundAttachedCallBundle = false;
3909 for (unsigned i = 0, e = Call.getNumOperandBundles(); i < e; ++i) {
3910 OperandBundleUse BU = Call.getOperandBundleAt(i);
3911 uint32_t Tag = BU.getTagID();
3912 if (Tag == LLVMContext::OB_deopt) {
3913 Check(!FoundDeoptBundle, "Multiple deopt operand bundles", Call);
3914 FoundDeoptBundle = true;
3915 } else if (Tag == LLVMContext::OB_gc_transition) {
3916 Check(!FoundGCTransitionBundle, "Multiple gc-transition operand bundles",
3917 Call);
3918 FoundGCTransitionBundle = true;
3919 } else if (Tag == LLVMContext::OB_funclet) {
3920 Check(!FoundFuncletBundle, "Multiple funclet operand bundles", Call);
3921 FoundFuncletBundle = true;
3922 Check(BU.Inputs.size() == 1,
3923 "Expected exactly one funclet bundle operand", Call);
3924 Check(isa<FuncletPadInst>(BU.Inputs.front()),
3925 "Funclet bundle operands should correspond to a FuncletPadInst",
3926 Call);
3927 } else if (Tag == LLVMContext::OB_cfguardtarget) {
3928 Check(!FoundCFGuardTargetBundle, "Multiple CFGuardTarget operand bundles",
3929 Call);
3930 FoundCFGuardTargetBundle = true;
3931 Check(BU.Inputs.size() == 1,
3932 "Expected exactly one cfguardtarget bundle operand", Call);
3933 } else if (Tag == LLVMContext::OB_ptrauth) {
3934 Check(!FoundPtrauthBundle, "Multiple ptrauth operand bundles", Call);
3935 FoundPtrauthBundle = true;
3936 Check(BU.Inputs.size() == 2,
3937 "Expected exactly two ptrauth bundle operands", Call);
3938 Check(isa<ConstantInt>(BU.Inputs[0]) &&
3939 BU.Inputs[0]->getType()->isIntegerTy(32),
3940 "Ptrauth bundle key operand must be an i32 constant", Call);
3941 Check(BU.Inputs[1]->getType()->isIntegerTy(64),
3942 "Ptrauth bundle discriminator operand must be an i64", Call);
3943 } else if (Tag == LLVMContext::OB_kcfi) {
3944 Check(!FoundKCFIBundle, "Multiple kcfi operand bundles", Call);
3945 FoundKCFIBundle = true;
3946 Check(BU.Inputs.size() == 1, "Expected exactly one kcfi bundle operand",
3947 Call);
3948 Check(isa<ConstantInt>(BU.Inputs[0]) &&
3949 BU.Inputs[0]->getType()->isIntegerTy(32),
3950 "Kcfi bundle operand must be an i32 constant", Call);
3951 } else if (Tag == LLVMContext::OB_preallocated) {
3952 Check(!FoundPreallocatedBundle, "Multiple preallocated operand bundles",
3953 Call);
3954 FoundPreallocatedBundle = true;
3955 Check(BU.Inputs.size() == 1,
3956 "Expected exactly one preallocated bundle operand", Call);
3957 auto Input = dyn_cast<IntrinsicInst>(BU.Inputs.front());
3958 Check(Input &&
3959 Input->getIntrinsicID() == Intrinsic::call_preallocated_setup,
3960 "\"preallocated\" argument must be a token from "
3961 "llvm.call.preallocated.setup",
3962 Call);
3963 } else if (Tag == LLVMContext::OB_gc_live) {
3964 Check(!FoundGCLiveBundle, "Multiple gc-live operand bundles", Call);
3965 FoundGCLiveBundle = true;
3967 Check(!FoundAttachedCallBundle,
3968 "Multiple \"clang.arc.attachedcall\" operand bundles", Call);
3969 FoundAttachedCallBundle = true;
3970 verifyAttachedCallBundle(Call, BU);
3971 }
3972 }
3973
3974 // Verify that callee and callsite agree on whether to use pointer auth.
3975 Check(!(Call.getCalledFunction() && FoundPtrauthBundle),
3976 "Direct call cannot have a ptrauth bundle", Call);
3977
3978 // Verify that each inlinable callsite of a debug-info-bearing function in a
3979 // debug-info-bearing function has a debug location attached to it. Failure to
3980 // do so causes assertion failures when the inliner sets up inline scope info
3981 // (Interposable functions are not inlinable, neither are functions without
3982 // definitions.)
3988 "inlinable function call in a function with "
3989 "debug info must have a !dbg location",
3990 Call);
3991
3992 if (Call.isInlineAsm())
3993 verifyInlineAsmCall(Call);
3994
3995 ConvergenceVerifyHelper.visit(Call);
3996
3997 visitInstruction(Call);
3998}
3999
4000void Verifier::verifyTailCCMustTailAttrs(const AttrBuilder &Attrs,
4001 StringRef Context) {
4002 Check(!Attrs.contains(Attribute::InAlloca),
4003 Twine("inalloca attribute not allowed in ") + Context);
4004 Check(!Attrs.contains(Attribute::InReg),
4005 Twine("inreg attribute not allowed in ") + Context);
4006 Check(!Attrs.contains(Attribute::SwiftError),
4007 Twine("swifterror attribute not allowed in ") + Context);
4008 Check(!Attrs.contains(Attribute::Preallocated),
4009 Twine("preallocated attribute not allowed in ") + Context);
4010 Check(!Attrs.contains(Attribute::ByRef),
4011 Twine("byref attribute not allowed in ") + Context);
4012}
4013
4014/// Two types are "congruent" if they are identical, or if they are both pointer
4015/// types with different pointee types and the same address space.
4016static bool isTypeCongruent(Type *L, Type *R) {
4017 if (L == R)
4018 return true;
4021 if (!PL || !PR)
4022 return false;
4023 return PL->getAddressSpace() == PR->getAddressSpace();
4024}
4025
4026static AttrBuilder getParameterABIAttributes(LLVMContext& C, unsigned I, AttributeList Attrs) {
4027 static const Attribute::AttrKind ABIAttrs[] = {
4028 Attribute::StructRet, Attribute::ByVal, Attribute::InAlloca,
4029 Attribute::InReg, Attribute::StackAlignment, Attribute::SwiftSelf,
4030 Attribute::SwiftAsync, Attribute::SwiftError, Attribute::Preallocated,
4031 Attribute::ByRef};
4032 AttrBuilder Copy(C);
4033 for (auto AK : ABIAttrs) {
4034 Attribute Attr = Attrs.getParamAttrs(I).getAttribute(AK);
4035 if (Attr.isValid())
4036 Copy.addAttribute(Attr);
4037 }
4038
4039 // `align` is ABI-affecting only in combination with `byval` or `byref`.
4040 if (Attrs.hasParamAttr(I, Attribute::Alignment) &&
4041 (Attrs.hasParamAttr(I, Attribute::ByVal) ||
4042 Attrs.hasParamAttr(I, Attribute::ByRef)))
4043 Copy.addAlignmentAttr(Attrs.getParamAlignment(I));
4044 return Copy;
4045}
4046
4047void Verifier::verifyMustTailCall(CallInst &CI) {
4048 Check(!CI.isInlineAsm(), "cannot use musttail call with inline asm", &CI);
4049
4050 Function *F = CI.getParent()->getParent();
4051 FunctionType *CallerTy = F->getFunctionType();
4052 FunctionType *CalleeTy = CI.getFunctionType();
4053 Check(CallerTy->isVarArg() == CalleeTy->isVarArg(),
4054 "cannot guarantee tail call due to mismatched varargs", &CI);
4055 Check(isTypeCongruent(CallerTy->getReturnType(), CalleeTy->getReturnType()),
4056 "cannot guarantee tail call due to mismatched return types", &CI);
4057
4058 // - The calling conventions of the caller and callee must match.
4059 Check(F->getCallingConv() == CI.getCallingConv(),
4060 "cannot guarantee tail call due to mismatched calling conv", &CI);
4061
4062 // - The call must immediately precede a :ref:`ret <i_ret>` instruction,
4063 // or a pointer bitcast followed by a ret instruction.
4064 // - The ret instruction must return the (possibly bitcasted) value
4065 // produced by the call or void.
4066 Value *RetVal = &CI;
4068
4069 // Handle the optional bitcast.
4070 if (BitCastInst *BI = dyn_cast_or_null<BitCastInst>(Next)) {
4071 Check(BI->getOperand(0) == RetVal,
4072 "bitcast following musttail call must use the call", BI);
4073 RetVal = BI;
4074 Next = BI->getNextNode();
4075 }
4076
4077 // Check the return.
4078 ReturnInst *Ret = dyn_cast_or_null<ReturnInst>(Next);
4079 Check(Ret, "musttail call must precede a ret with an optional bitcast", &CI);
4080 Check(!Ret->getReturnValue() || Ret->getReturnValue() == RetVal ||
4081 isa<UndefValue>(Ret->getReturnValue()),
4082 "musttail call result must be returned", Ret);
4083
4084 AttributeList CallerAttrs = F->getAttributes();
4085 AttributeList CalleeAttrs = CI.getAttributes();
4086 if (CI.getCallingConv() == CallingConv::SwiftTail ||
4087 CI.getCallingConv() == CallingConv::Tail) {
4088 StringRef CCName =
4089 CI.getCallingConv() == CallingConv::Tail ? "tailcc" : "swifttailcc";
4090
4091 // - Only sret, byval, swiftself, and swiftasync ABI-impacting attributes
4092 // are allowed in swifttailcc call
4093 for (unsigned I = 0, E = CallerTy->getNumParams(); I != E; ++I) {
4094 AttrBuilder ABIAttrs = getParameterABIAttributes(F->getContext(), I, CallerAttrs);
4095 SmallString<32> Context{CCName, StringRef(" musttail caller")};
4096 verifyTailCCMustTailAttrs(ABIAttrs, Context);
4097 }
4098 for (unsigned I = 0, E = CalleeTy->getNumParams(); I != E; ++I) {
4099 AttrBuilder ABIAttrs = getParameterABIAttributes(F->getContext(), I, CalleeAttrs);
4100 SmallString<32> Context{CCName, StringRef(" musttail callee")};
4101 verifyTailCCMustTailAttrs(ABIAttrs, Context);
4102 }
4103 // - Varargs functions are not allowed
4104 Check(!CallerTy->isVarArg(), Twine("cannot guarantee ") + CCName +
4105 " tail call for varargs function");
4106 return;
4107 }
4108
4109 // - The caller and callee prototypes must match. Pointer types of
4110 // parameters or return types may differ in pointee type, but not
4111 // address space.
4112 if (!CI.getIntrinsicID()) {
4113 Check(CallerTy->getNumParams() == CalleeTy->getNumParams(),
4114 "cannot guarantee tail call due to mismatched parameter counts", &CI);
4115 for (unsigned I = 0, E = CallerTy->getNumParams(); I != E; ++I) {
4116 Check(
4117 isTypeCongruent(CallerTy->getParamType(I), CalleeTy->getParamType(I)),
4118 "cannot guarantee tail call due to mismatched parameter types", &CI);
4119 }
4120 }
4121
4122 // - All ABI-impacting function attributes, such as sret, byval, inreg,
4123 // returned, preallocated, and inalloca, must match.
4124 for (unsigned I = 0, E = CallerTy->getNumParams(); I != E; ++I) {
4125 AttrBuilder CallerABIAttrs = getParameterABIAttributes(F->getContext(), I, CallerAttrs);
4126 AttrBuilder CalleeABIAttrs = getParameterABIAttributes(F->getContext(), I, CalleeAttrs);
4127 Check(CallerABIAttrs == CalleeABIAttrs,
4128 "cannot guarantee tail call due to mismatched ABI impacting "
4129 "function attributes",
4130 &CI, CI.getOperand(I));
4131 }
4132}
4133
4134void Verifier::visitCallInst(CallInst &CI) {
4135 visitCallBase(CI);
4136
4137 if (CI.isMustTailCall())
4138 verifyMustTailCall(CI);
4139}
4140
4141void Verifier::visitInvokeInst(InvokeInst &II) {
4142 visitCallBase(II);
4143
4144 // Verify that the first non-PHI instruction of the unwind destination is an
4145 // exception handling instruction.
4146 Check(
4147 II.getUnwindDest()->isEHPad(),
4148 "The unwind destination does not have an exception handling instruction!",
4149 &II);
4150
4151 visitTerminator(II);
4152}
4153
4154/// visitUnaryOperator - Check the argument to the unary operator.
4155///
4156void Verifier::visitUnaryOperator(UnaryOperator &U) {
4157 Check(U.getType() == U.getOperand(0)->getType(),
4158 "Unary operators must have same type for"
4159 "operands and result!",
4160 &U);
4161
4162 switch (U.getOpcode()) {
4163 // Check that floating-point arithmetic operators are only used with
4164 // floating-point operands.
4165 case Instruction::FNeg:
4166 Check(U.getType()->isFPOrFPVectorTy(),
4167 "FNeg operator only works with float types!", &U);
4168 break;
4169 default:
4170 llvm_unreachable("Unknown UnaryOperator opcode!");
4171 }
4172
4173 visitInstruction(U);
4174}
4175
4176/// visitBinaryOperator - Check that both arguments to the binary operator are
4177/// of the same type!
4178///
4179void Verifier::visitBinaryOperator(BinaryOperator &B) {
4180 Check(B.getOperand(0)->getType() == B.getOperand(1)->getType(),
4181 "Both operands to a binary operator are not of the same type!", &B);
4182
4183 switch (B.getOpcode()) {
4184 // Check that integer arithmetic operators are only used with
4185 // integral operands.
4186 case Instruction::Add:
4187 case Instruction::Sub:
4188 case Instruction::Mul:
4189 case Instruction::SDiv:
4190 case Instruction::UDiv:
4191 case Instruction::SRem:
4192 case Instruction::URem:
4193 Check(B.getType()->isIntOrIntVectorTy(),
4194 "Integer arithmetic operators only work with integral types!", &B);
4195 Check(B.getType() == B.getOperand(0)->getType(),
4196 "Integer arithmetic operators must have same type "
4197 "for operands and result!",
4198 &B);
4199 break;
4200 // Check that floating-point arithmetic operators are only used with
4201 // floating-point operands.
4202 case Instruction::FAdd:
4203 case Instruction::FSub:
4204 case Instruction::FMul:
4205 case Instruction::FDiv:
4206 case Instruction::FRem:
4207 Check(B.getType()->isFPOrFPVectorTy(),
4208 "Floating-point arithmetic operators only work with "
4209 "floating-point types!",
4210 &B);
4211 Check(B.getType() == B.getOperand(0)->getType(),
4212 "Floating-point arithmetic operators must have same type "
4213 "for operands and result!",
4214 &B);
4215 break;
4216 // Check that logical operators are only used with integral operands.
4217 case Instruction::And:
4218 case Instruction::Or:
4219 case Instruction::Xor:
4220 Check(B.getType()->isIntOrIntVectorTy(),
4221 "Logical operators only work with integral types!", &B);
4222 Check(B.getType() == B.getOperand(0)->getType(),
4223 "Logical operators must have same type for operands and result!", &B);
4224 break;
4225 case Instruction::Shl:
4226 case Instruction::LShr:
4227 case Instruction::AShr:
4228 Check(B.getType()->isIntOrIntVectorTy(),
4229 "Shifts only work with integral types!", &B);
4230 Check(B.getType() == B.getOperand(0)->getType(),
4231 "Shift return type must be same as operands!", &B);
4232 break;
4233 default:
4234 llvm_unreachable("Unknown BinaryOperator opcode!");
4235 }
4236
4237 visitInstruction(B);
4238}
4239
4240void Verifier::visitICmpInst(ICmpInst &IC) {
4241 // Check that the operands are the same type
4242 Type *Op0Ty = IC.getOperand(0)->getType();
4243 Type *Op1Ty = IC.getOperand(1)->getType();
4244 Check(Op0Ty == Op1Ty,
4245 "Both operands to ICmp instruction are not of the same type!", &IC);
4246 // Check that the operands are the right type
4247 Check(Op0Ty->isIntOrIntVectorTy() || Op0Ty->isPtrOrPtrVectorTy(),
4248 "Invalid operand types for ICmp instruction", &IC);
4249 // Check that the predicate is valid.
4250 Check(IC.isIntPredicate(), "Invalid predicate in ICmp instruction!", &IC);
4251
4252 visitInstruction(IC);
4253}
4254
4255void Verifier::visitFCmpInst(FCmpInst &FC) {
4256 // Check that the operands are the same type
4257 Type *Op0Ty = FC.getOperand(0)->getType();
4258 Type *Op1Ty = FC.getOperand(1)->getType();
4259 Check(Op0Ty == Op1Ty,
4260 "Both operands to FCmp instruction are not of the same type!", &FC);
4261 // Check that the operands are the right type
4262 Check(Op0Ty->isFPOrFPVectorTy(), "Invalid operand types for FCmp instruction",
4263 &FC);
4264 // Check that the predicate is valid.
4265 Check(FC.isFPPredicate(), "Invalid predicate in FCmp instruction!", &FC);
4266
4267 visitInstruction(FC);
4268}
4269
4270void Verifier::visitExtractElementInst(ExtractElementInst &EI) {
4272 "Invalid extractelement operands!", &EI);
4273 visitInstruction(EI);
4274}
4275
4276void Verifier::visitInsertElementInst(InsertElementInst &IE) {
4277 Check(InsertElementInst::isValidOperands(IE.getOperand(0), IE.getOperand(1),
4278 IE.getOperand(2)),
4279 "Invalid insertelement operands!", &IE);
4280 visitInstruction(IE);
4281}
4282
4283void Verifier::visitShuffleVectorInst(ShuffleVectorInst &SV) {
4285 SV.getShuffleMask()),
4286 "Invalid shufflevector operands!", &SV);
4287 visitInstruction(SV);
4288}
4289
4290void Verifier::visitGetElementPtrInst(GetElementPtrInst &GEP) {
4291 Type *TargetTy = GEP.getPointerOperandType()->getScalarType();
4292
4293 Check(isa<PointerType>(TargetTy),
4294 "GEP base pointer is not a vector or a vector of pointers", &GEP);
4295 Check(GEP.getSourceElementType()->isSized(), "GEP into unsized type!", &GEP);
4296
4297 if (auto *STy = dyn_cast<StructType>(GEP.getSourceElementType())) {
4298 Check(!STy->isScalableTy(),
4299 "getelementptr cannot target structure that contains scalable vector"
4300 "type",
4301 &GEP);
4302 }
4303
4304 SmallVector<Value *, 16> Idxs(GEP.indices());
4305 Check(
4306 all_of(Idxs, [](Value *V) { return V->getType()->isIntOrIntVectorTy(); }),
4307 "GEP indexes must be integers", &GEP);
4308 Type *ElTy =
4309 GetElementPtrInst::getIndexedType(GEP.getSourceElementType(), Idxs);
4310 Check(ElTy, "Invalid indices for GEP pointer type!", &GEP);
4311
4312 PointerType *PtrTy = dyn_cast<PointerType>(GEP.getType()->getScalarType());
4313
4314 Check(PtrTy && GEP.getResultElementType() == ElTy,
4315 "GEP is not of right type for indices!", &GEP, ElTy);
4316
4317 if (auto *GEPVTy = dyn_cast<VectorType>(GEP.getType())) {
4318 // Additional checks for vector GEPs.
4319 ElementCount GEPWidth = GEPVTy->getElementCount();
4320 if (GEP.getPointerOperandType()->isVectorTy())
4321 Check(
4322 GEPWidth ==
4323 cast<VectorType>(GEP.getPointerOperandType())->getElementCount(),
4324 "Vector GEP result width doesn't match operand's", &GEP);
4325 for (Value *Idx : Idxs) {
4326 Type *IndexTy = Idx->getType();
4327 if (auto *IndexVTy = dyn_cast<VectorType>(IndexTy)) {
4328 ElementCount IndexWidth = IndexVTy->getElementCount();
4329 Check(IndexWidth == GEPWidth, "Invalid GEP index vector width", &GEP);
4330 }
4331 Check(IndexTy->isIntOrIntVectorTy(),
4332 "All GEP indices should be of integer type");
4333 }
4334 }
4335
4336 Check(GEP.getAddressSpace() == PtrTy->getAddressSpace(),
4337 "GEP address space doesn't match type", &GEP);
4338
4339 visitInstruction(GEP);
4340}
4341
4342static bool isContiguous(const ConstantRange &A, const ConstantRange &B) {
4343 return A.getUpper() == B.getLower() || A.getLower() == B.getUpper();
4344}
4345
4346/// Verify !range and !absolute_symbol metadata. These have the same
4347/// restrictions, except !absolute_symbol allows the full set.
4348void Verifier::verifyRangeLikeMetadata(const Value &I, const MDNode *Range,
4349 Type *Ty, RangeLikeMetadataKind Kind) {
4350 unsigned NumOperands = Range->getNumOperands();
4351 Check(NumOperands % 2 == 0, "Unfinished range!", Range);
4352 unsigned NumRanges = NumOperands / 2;
4353 Check(NumRanges >= 1, "It should have at least one range!", Range);
4354
4355 ConstantRange LastRange(1, true); // Dummy initial value
4356 for (unsigned i = 0; i < NumRanges; ++i) {
4357 ConstantInt *Low =
4358 mdconst::dyn_extract<ConstantInt>(Range->getOperand(2 * i));
4359 Check(Low, "The lower limit must be an integer!", Low);
4360 ConstantInt *High =
4361 mdconst::dyn_extract<ConstantInt>(Range->getOperand(2 * i + 1));
4362 Check(High, "The upper limit must be an integer!", High);
4363
4364 Check(High->getType() == Low->getType(), "Range pair types must match!",
4365 &I);
4366
4367 if (Kind == RangeLikeMetadataKind::NoaliasAddrspace) {
4368 Check(High->getType()->isIntegerTy(32),
4369 "noalias.addrspace type must be i32!", &I);
4370 } else {
4371 Check(High->getType() == Ty->getScalarType(),
4372 "Range types must match instruction type!", &I);
4373 }
4374
4375 APInt HighV = High->getValue();
4376 APInt LowV = Low->getValue();
4377
4378 // ConstantRange asserts if the ranges are the same except for the min/max
4379 // value. Leave the cases it tolerates for the empty range error below.
4380 Check(LowV != HighV || LowV.isMaxValue() || LowV.isMinValue(),
4381 "The upper and lower limits cannot be the same value", &I);
4382
4383 ConstantRange CurRange(LowV, HighV);
4384 Check(!CurRange.isEmptySet() &&
4385 (Kind == RangeLikeMetadataKind::AbsoluteSymbol ||
4386 !CurRange.isFullSet()),
4387 "Range must not be empty!", Range);
4388 if (i != 0) {
4389 Check(CurRange.intersectWith(LastRange).isEmptySet(),
4390 "Intervals are overlapping", Range);
4391 Check(LowV.sgt(LastRange.getLower()), "Intervals are not in order",
4392 Range);
4393 Check(!isContiguous(CurRange, LastRange), "Intervals are contiguous",
4394 Range);
4395 }
4396 LastRange = ConstantRange(LowV, HighV);
4397 }
4398 if (NumRanges > 2) {
4399 APInt FirstLow =
4400 mdconst::dyn_extract<ConstantInt>(Range->getOperand(0))->getValue();
4401 APInt FirstHigh =
4402 mdconst::dyn_extract<ConstantInt>(Range->getOperand(1))->getValue();
4403 ConstantRange FirstRange(FirstLow, FirstHigh);
4404 Check(FirstRange.intersectWith(LastRange).isEmptySet(),
4405 "Intervals are overlapping", Range);
4406 Check(!isContiguous(FirstRange, LastRange), "Intervals are contiguous",
4407 Range);
4408 }
4409}
4410
4411void Verifier::visitRangeMetadata(Instruction &I, MDNode *Range, Type *Ty) {
4412 assert(Range && Range == I.getMetadata(LLVMContext::MD_range) &&
4413 "precondition violation");
4414 verifyRangeLikeMetadata(I, Range, Ty, RangeLikeMetadataKind::Range);
4415}
4416
4417void Verifier::visitNoaliasAddrspaceMetadata(Instruction &I, MDNode *Range,
4418 Type *Ty) {
4419 assert(Range && Range == I.getMetadata(LLVMContext::MD_noalias_addrspace) &&
4420 "precondition violation");
4421 verifyRangeLikeMetadata(I, Range, Ty,
4422 RangeLikeMetadataKind::NoaliasAddrspace);
4423}
4424
4425void Verifier::checkAtomicMemAccessSize(Type *Ty, const Instruction *I) {
4426 unsigned Size = DL.getTypeSizeInBits(Ty).getFixedValue();
4427 Check(Size >= 8, "atomic memory access' size must be byte-sized", Ty, I);
4428 Check(!(Size & (Size - 1)),
4429 "atomic memory access' operand must have a power-of-two size", Ty, I);
4430}
4431
4432void Verifier::visitLoadInst(LoadInst &LI) {
4434 Check(PTy, "Load operand must be a pointer.", &LI);
4435 Type *ElTy = LI.getType();
4436 if (MaybeAlign A = LI.getAlign()) {
4437 Check(A->value() <= Value::MaximumAlignment,
4438 "huge alignment values are unsupported", &LI);
4439 }
4440 Check(ElTy->isSized(), "loading unsized types is not allowed", &LI);
4441 if (LI.isAtomic()) {
4442 Check(LI.getOrdering() != AtomicOrdering::Release &&
4443 LI.getOrdering() != AtomicOrdering::AcquireRelease,
4444 "Load cannot have Release ordering", &LI);
4445 Check(ElTy->getScalarType()->isIntOrPtrTy() ||
4447 "atomic load operand must have integer, pointer, floating point, "
4448 "or vector type!",
4449 ElTy, &LI);
4450
4451 checkAtomicMemAccessSize(ElTy, &LI);
4452 } else {
4454 "Non-atomic load cannot have SynchronizationScope specified", &LI);
4455 }
4456
4457 visitInstruction(LI);
4458}
4459
4460void Verifier::visitStoreInst(StoreInst &SI) {
4461 PointerType *PTy = dyn_cast<PointerType>(SI.getOperand(1)->getType());
4462 Check(PTy, "Store operand must be a pointer.", &SI);
4463 Type *ElTy = SI.getOperand(0)->getType();
4464 if (MaybeAlign A = SI.getAlign()) {
4465 Check(A->value() <= Value::MaximumAlignment,
4466 "huge alignment values are unsupported", &SI);
4467 }
4468 Check(ElTy->isSized(), "storing unsized types is not allowed", &SI);
4469 if (SI.isAtomic()) {
4470 Check(SI.getOrdering() != AtomicOrdering::Acquire &&
4471 SI.getOrdering() != AtomicOrdering::AcquireRelease,
4472 "Store cannot have Acquire ordering", &SI);
4473 Check(ElTy->getScalarType()->isIntOrPtrTy() ||
4475 "atomic store operand must have integer, pointer, floating point, "
4476 "or vector type!",
4477 ElTy, &SI);
4478 checkAtomicMemAccessSize(ElTy, &SI);
4479 } else {
4480 Check(SI.getSyncScopeID() == SyncScope::System,
4481 "Non-atomic store cannot have SynchronizationScope specified", &SI);
4482 }
4483 visitInstruction(SI);
4484}
4485
4486/// Check that SwiftErrorVal is used as a swifterror argument in CS.
4487void Verifier::verifySwiftErrorCall(CallBase &Call,
4488 const Value *SwiftErrorVal) {
4489 for (const auto &I : llvm::enumerate(Call.args())) {
4490 if (I.value() == SwiftErrorVal) {
4491 Check(Call.paramHasAttr(I.index(), Attribute::SwiftError),
4492 "swifterror value when used in a callsite should be marked "
4493 "with swifterror attribute",
4494 SwiftErrorVal, Call);
4495 }
4496 }
4497}
4498
4499void Verifier::verifySwiftErrorValue(const Value *SwiftErrorVal) {
4500 // Check that swifterror value is only used by loads, stores, or as
4501 // a swifterror argument.
4502 for (const User *U : SwiftErrorVal->users()) {
4504 isa<InvokeInst>(U),
4505 "swifterror value can only be loaded and stored from, or "
4506 "as a swifterror argument!",
4507 SwiftErrorVal, U);
4508 // If it is used by a store, check it is the second operand.
4509 if (auto StoreI = dyn_cast<StoreInst>(U))
4510 Check(StoreI->getOperand(1) == SwiftErrorVal,
4511 "swifterror value should be the second operand when used "
4512 "by stores",
4513 SwiftErrorVal, U);
4514 if (auto *Call = dyn_cast<CallBase>(U))
4515 verifySwiftErrorCall(*const_cast<CallBase *>(Call), SwiftErrorVal);
4516 }
4517}
4518
4519void Verifier::visitAllocaInst(AllocaInst &AI) {
4520 Type *Ty = AI.getAllocatedType();
4521 SmallPtrSet<Type*, 4> Visited;
4522 Check(Ty->isSized(&Visited), "Cannot allocate unsized type", &AI);
4523 // Check if it's a target extension type that disallows being used on the
4524 // stack.
4526 "Alloca has illegal target extension type", &AI);
4528 "Alloca array size must have integer type", &AI);
4529 if (MaybeAlign A = AI.getAlign()) {
4530 Check(A->value() <= Value::MaximumAlignment,
4531 "huge alignment values are unsupported", &AI);
4532 }
4533
4534 if (AI.isSwiftError()) {
4535 Check(Ty->isPointerTy(), "swifterror alloca must have pointer type", &AI);
4537 "swifterror alloca must not be array allocation", &AI);
4538 verifySwiftErrorValue(&AI);
4539 }
4540
4541 if (TT.isAMDGPU()) {
4543 "alloca on amdgpu must be in addrspace(5)", &AI);
4544 }
4545
4546 visitInstruction(AI);
4547}
4548
4549void Verifier::visitAtomicCmpXchgInst(AtomicCmpXchgInst &CXI) {
4550 Type *ElTy = CXI.getOperand(1)->getType();
4551 Check(ElTy->isIntOrPtrTy(),
4552 "cmpxchg operand must have integer or pointer type", ElTy, &CXI);
4553 checkAtomicMemAccessSize(ElTy, &CXI);
4554 visitInstruction(CXI);
4555}
4556
4557void Verifier::visitAtomicRMWInst(AtomicRMWInst &RMWI) {
4558 Check(RMWI.getOrdering() != AtomicOrdering::Unordered,
4559 "atomicrmw instructions cannot be unordered.", &RMWI);
4560 auto Op = RMWI.getOperation();
4561 Type *ElTy = RMWI.getOperand(1)->getType();
4562 if (Op == AtomicRMWInst::Xchg) {
4563 Check(ElTy->isIntegerTy() || ElTy->isFloatingPointTy() ||
4564 ElTy->isPointerTy(),
4565 "atomicrmw " + AtomicRMWInst::getOperationName(Op) +
4566 " operand must have integer or floating point type!",
4567 &RMWI, ElTy);
4568 } else if (AtomicRMWInst::isFPOperation(Op)) {
4570 "atomicrmw " + AtomicRMWInst::getOperationName(Op) +
4571 " operand must have floating-point or fixed vector of floating-point "
4572 "type!",
4573 &RMWI, ElTy);
4574 } else {
4575 Check(ElTy->isIntegerTy(),
4576 "atomicrmw " + AtomicRMWInst::getOperationName(Op) +
4577 " operand must have integer type!",
4578 &RMWI, ElTy);
4579 }
4580 checkAtomicMemAccessSize(ElTy, &RMWI);
4582 "Invalid binary operation!", &RMWI);
4583 visitInstruction(RMWI);
4584}
4585
4586void Verifier::visitFenceInst(FenceInst &FI) {
4587 const AtomicOrdering Ordering = FI.getOrdering();
4588 Check(Ordering == AtomicOrdering::Acquire ||
4589 Ordering == AtomicOrdering::Release ||
4590 Ordering == AtomicOrdering::AcquireRelease ||
4591 Ordering == AtomicOrdering::SequentiallyConsistent,
4592 "fence instructions may only have acquire, release, acq_rel, or "
4593 "seq_cst ordering.",
4594 &FI);
4595 visitInstruction(FI);
4596}
4597
4598void Verifier::visitExtractValueInst(ExtractValueInst &EVI) {
4600 EVI.getIndices()) == EVI.getType(),
4601 "Invalid ExtractValueInst operands!", &EVI);
4602
4603 visitInstruction(EVI);
4604}
4605
4606void Verifier::visitInsertValueInst(InsertValueInst &IVI) {
4608 IVI.getIndices()) ==
4609 IVI.getOperand(1)->getType(),
4610 "Invalid InsertValueInst operands!", &IVI);
4611
4612 visitInstruction(IVI);
4613}
4614
4615static Value *getParentPad(Value *EHPad) {
4616 if (auto *FPI = dyn_cast<FuncletPadInst>(EHPad))
4617 return FPI->getParentPad();
4618
4619 return cast<CatchSwitchInst>(EHPad)->getParentPad();
4620}
4621
4622void Verifier::visitEHPadPredecessors(Instruction &I) {
4623 assert(I.isEHPad());
4624
4625 BasicBlock *BB = I.getParent();
4626 Function *F = BB->getParent();
4627
4628 Check(BB != &F->getEntryBlock(), "EH pad cannot be in entry block.", &I);
4629
4630 if (auto *LPI = dyn_cast<LandingPadInst>(&I)) {
4631 // The landingpad instruction defines its parent as a landing pad block. The
4632 // landing pad block may be branched to only by the unwind edge of an
4633 // invoke.
4634 for (BasicBlock *PredBB : predecessors(BB)) {
4635 const auto *II = dyn_cast<InvokeInst>(PredBB->getTerminator());
4636 Check(II && II->getUnwindDest() == BB && II->getNormalDest() != BB,
4637 "Block containing LandingPadInst must be jumped to "
4638 "only by the unwind edge of an invoke.",
4639 LPI);
4640 }
4641 return;
4642 }
4643 if (auto *CPI = dyn_cast<CatchPadInst>(&I)) {
4644 if (!pred_empty(BB))
4645 Check(BB->getUniquePredecessor() == CPI->getCatchSwitch()->getParent(),
4646 "Block containg CatchPadInst must be jumped to "
4647 "only by its catchswitch.",
4648 CPI);
4649 Check(BB != CPI->getCatchSwitch()->getUnwindDest(),
4650 "Catchswitch cannot unwind to one of its catchpads",
4651 CPI->getCatchSwitch(), CPI);
4652 return;
4653 }
4654
4655 // Verify that each pred has a legal terminator with a legal to/from EH
4656 // pad relationship.
4657 Instruction *ToPad = &I;
4658 Value *ToPadParent = getParentPad(ToPad);
4659 for (BasicBlock *PredBB : predecessors(BB)) {
4660 Instruction *TI = PredBB->getTerminator();
4661 Value *FromPad;
4662 if (auto *II = dyn_cast<InvokeInst>(TI)) {
4663 Check(II->getUnwindDest() == BB && II->getNormalDest() != BB,
4664 "EH pad must be jumped to via an unwind edge", ToPad, II);
4665 auto *CalledFn =
4666 dyn_cast<Function>(II->getCalledOperand()->stripPointerCasts());
4667 if (CalledFn && CalledFn->isIntrinsic() && II->doesNotThrow() &&
4668 !IntrinsicInst::mayLowerToFunctionCall(CalledFn->getIntrinsicID()))
4669 continue;
4670 if (auto Bundle = II->getOperandBundle(LLVMContext::OB_funclet))
4671 FromPad = Bundle->Inputs[0];
4672 else
4673 FromPad = ConstantTokenNone::get(II->getContext());
4674 } else if (auto *CRI = dyn_cast<CleanupReturnInst>(TI)) {
4675 FromPad = CRI->getOperand(0);
4676 Check(FromPad != ToPadParent, "A cleanupret must exit its cleanup", CRI);
4677 } else if (auto *CSI = dyn_cast<CatchSwitchInst>(TI)) {
4678 FromPad = CSI;
4679 } else {
4680 Check(false, "EH pad must be jumped to via an unwind edge", ToPad, TI);
4681 }
4682
4683 // The edge may exit from zero or more nested pads.
4684 SmallPtrSet<Value *, 8> Seen;
4685 for (;; FromPad = getParentPad(FromPad)) {
4686 Check(FromPad != ToPad,
4687 "EH pad cannot handle exceptions raised within it", FromPad, TI);
4688 if (FromPad == ToPadParent) {
4689 // This is a legal unwind edge.
4690 break;
4691 }
4692 Check(!isa<ConstantTokenNone>(FromPad),
4693 "A single unwind edge may only enter one EH pad", TI);
4694 Check(Seen.insert(FromPad).second, "EH pad jumps through a cycle of pads",
4695 FromPad);
4696
4697 // This will be diagnosed on the corresponding instruction already. We
4698 // need the extra check here to make sure getParentPad() works.
4699 Check(isa<FuncletPadInst>(FromPad) || isa<CatchSwitchInst>(FromPad),
4700 "Parent pad must be catchpad/cleanuppad/catchswitch", TI);
4701 }
4702 }
4703}
4704
4705void Verifier::visitLandingPadInst(LandingPadInst &LPI) {
4706 // The landingpad instruction is ill-formed if it doesn't have any clauses and
4707 // isn't a cleanup.
4708 Check(LPI.getNumClauses() > 0 || LPI.isCleanup(),
4709 "LandingPadInst needs at least one clause or to be a cleanup.", &LPI);
4710
4711 visitEHPadPredecessors(LPI);
4712
4713 if (!LandingPadResultTy)
4714 LandingPadResultTy = LPI.getType();
4715 else
4716 Check(LandingPadResultTy == LPI.getType(),
4717 "The landingpad instruction should have a consistent result type "
4718 "inside a function.",
4719 &LPI);
4720
4721 Function *F = LPI.getParent()->getParent();
4722 Check(F->hasPersonalityFn(),
4723 "LandingPadInst needs to be in a function with a personality.", &LPI);
4724
4725 // The landingpad instruction must be the first non-PHI instruction in the
4726 // block.
4727 Check(LPI.getParent()->getLandingPadInst() == &LPI,
4728 "LandingPadInst not the first non-PHI instruction in the block.", &LPI);
4729
4730 for (unsigned i = 0, e = LPI.getNumClauses(); i < e; ++i) {
4731 Constant *Clause = LPI.getClause(i);
4732 if (LPI.isCatch(i)) {
4733 Check(isa<PointerType>(Clause->getType()),
4734 "Catch operand does not have pointer type!", &LPI);
4735 } else {
4736 Check(LPI.isFilter(i), "Clause is neither catch nor filter!", &LPI);
4738 "Filter operand is not an array of constants!", &LPI);
4739 }
4740 }
4741
4742 visitInstruction(LPI);
4743}
4744
4745void Verifier::visitResumeInst(ResumeInst &RI) {
4747 "ResumeInst needs to be in a function with a personality.", &RI);
4748
4749 if (!LandingPadResultTy)
4750 LandingPadResultTy = RI.getValue()->getType();
4751 else
4752 Check(LandingPadResultTy == RI.getValue()->getType(),
4753 "The resume instruction should have a consistent result type "
4754 "inside a function.",
4755 &RI);
4756
4757 visitTerminator(RI);
4758}
4759
4760void Verifier::visitCatchPadInst(CatchPadInst &CPI) {
4761 BasicBlock *BB = CPI.getParent();
4762
4763 Function *F = BB->getParent();
4764 Check(F->hasPersonalityFn(),
4765 "CatchPadInst needs to be in a function with a personality.", &CPI);
4766
4768 "CatchPadInst needs to be directly nested in a CatchSwitchInst.",
4769 CPI.getParentPad());
4770
4771 // The catchpad instruction must be the first non-PHI instruction in the
4772 // block.
4773 Check(&*BB->getFirstNonPHIIt() == &CPI,
4774 "CatchPadInst not the first non-PHI instruction in the block.", &CPI);
4775
4776 visitEHPadPredecessors(CPI);
4777 visitFuncletPadInst(CPI);
4778}
4779
4780void Verifier::visitCatchReturnInst(CatchReturnInst &CatchReturn) {
4781 Check(isa<CatchPadInst>(CatchReturn.getOperand(0)),
4782 "CatchReturnInst needs to be provided a CatchPad", &CatchReturn,
4783 CatchReturn.getOperand(0));
4784
4785 visitTerminator(CatchReturn);
4786}
4787
4788void Verifier::visitCleanupPadInst(CleanupPadInst &CPI) {
4789 BasicBlock *BB = CPI.getParent();
4790
4791 Function *F = BB->getParent();
4792 Check(F->hasPersonalityFn(),
4793 "CleanupPadInst needs to be in a function with a personality.", &CPI);
4794
4795 // The cleanuppad instruction must be the first non-PHI instruction in the
4796 // block.
4797 Check(&*BB->getFirstNonPHIIt() == &CPI,
4798 "CleanupPadInst not the first non-PHI instruction in the block.", &CPI);
4799
4800 auto *ParentPad = CPI.getParentPad();
4801 Check(isa<ConstantTokenNone>(ParentPad) || isa<FuncletPadInst>(ParentPad),
4802 "CleanupPadInst has an invalid parent.", &CPI);
4803
4804 visitEHPadPredecessors(CPI);
4805 visitFuncletPadInst(CPI);
4806}
4807
4808void Verifier::visitFuncletPadInst(FuncletPadInst &FPI) {
4809 User *FirstUser = nullptr;
4810 Value *FirstUnwindPad = nullptr;
4811 SmallVector<FuncletPadInst *, 8> Worklist({&FPI});
4812 SmallPtrSet<FuncletPadInst *, 8> Seen;
4813
4814 while (!Worklist.empty()) {
4815 FuncletPadInst *CurrentPad = Worklist.pop_back_val();
4816 Check(Seen.insert(CurrentPad).second,
4817 "FuncletPadInst must not be nested within itself", CurrentPad);
4818 Value *UnresolvedAncestorPad = nullptr;
4819 for (User *U : CurrentPad->users()) {
4820 BasicBlock *UnwindDest;
4821 if (auto *CRI = dyn_cast<CleanupReturnInst>(U)) {
4822 UnwindDest = CRI->getUnwindDest();
4823 } else if (auto *CSI = dyn_cast<CatchSwitchInst>(U)) {
4824 // We allow catchswitch unwind to caller to nest
4825 // within an outer pad that unwinds somewhere else,
4826 // because catchswitch doesn't have a nounwind variant.
4827 // See e.g. SimplifyCFGOpt::SimplifyUnreachable.
4828 if (CSI->unwindsToCaller())
4829 continue;
4830 UnwindDest = CSI->getUnwindDest();
4831 } else if (auto *II = dyn_cast<InvokeInst>(U)) {
4832 UnwindDest = II->getUnwindDest();
4833 } else if (isa<CallInst>(U)) {
4834 // Calls which don't unwind may be found inside funclet
4835 // pads that unwind somewhere else. We don't *require*
4836 // such calls to be annotated nounwind.
4837 continue;
4838 } else if (auto *CPI = dyn_cast<CleanupPadInst>(U)) {
4839 // The unwind dest for a cleanup can only be found by
4840 // recursive search. Add it to the worklist, and we'll
4841 // search for its first use that determines where it unwinds.
4842 Worklist.push_back(CPI);
4843 continue;
4844 } else {
4845 Check(isa<CatchReturnInst>(U), "Bogus funclet pad use", U);
4846 continue;
4847 }
4848
4849 Value *UnwindPad;
4850 bool ExitsFPI;
4851 if (UnwindDest) {
4852 UnwindPad = &*UnwindDest->getFirstNonPHIIt();
4853 if (!cast<Instruction>(UnwindPad)->isEHPad())
4854 continue;
4855 Value *UnwindParent = getParentPad(UnwindPad);
4856 // Ignore unwind edges that don't exit CurrentPad.
4857 if (UnwindParent == CurrentPad)
4858 continue;
4859 // Determine whether the original funclet pad is exited,
4860 // and if we are scanning nested pads determine how many
4861 // of them are exited so we can stop searching their
4862 // children.
4863 Value *ExitedPad = CurrentPad;
4864 ExitsFPI = false;
4865 do {
4866 if (ExitedPad == &FPI) {
4867 ExitsFPI = true;
4868 // Now we can resolve any ancestors of CurrentPad up to
4869 // FPI, but not including FPI since we need to make sure
4870 // to check all direct users of FPI for consistency.
4871 UnresolvedAncestorPad = &FPI;
4872 break;
4873 }
4874 Value *ExitedParent = getParentPad(ExitedPad);
4875 if (ExitedParent == UnwindParent) {
4876 // ExitedPad is the ancestor-most pad which this unwind
4877 // edge exits, so we can resolve up to it, meaning that
4878 // ExitedParent is the first ancestor still unresolved.
4879 UnresolvedAncestorPad = ExitedParent;
4880 break;
4881 }
4882 ExitedPad = ExitedParent;
4883 } while (!isa<ConstantTokenNone>(ExitedPad));
4884 } else {
4885 // Unwinding to caller exits all pads.
4886 UnwindPad = ConstantTokenNone::get(FPI.getContext());
4887 ExitsFPI = true;
4888 UnresolvedAncestorPad = &FPI;
4889 }
4890
4891 if (ExitsFPI) {
4892 // This unwind edge exits FPI. Make sure it agrees with other
4893 // such edges.
4894 if (FirstUser) {
4895 Check(UnwindPad == FirstUnwindPad,
4896 "Unwind edges out of a funclet "
4897 "pad must have the same unwind "
4898 "dest",
4899 &FPI, U, FirstUser);
4900 } else {
4901 FirstUser = U;
4902 FirstUnwindPad = UnwindPad;
4903 // Record cleanup sibling unwinds for verifySiblingFuncletUnwinds
4904 if (isa<CleanupPadInst>(&FPI) && !isa<ConstantTokenNone>(UnwindPad) &&
4905 getParentPad(UnwindPad) == getParentPad(&FPI))
4906 SiblingFuncletInfo[&FPI] = cast<Instruction>(U);
4907 }
4908 }
4909 // Make sure we visit all uses of FPI, but for nested pads stop as
4910 // soon as we know where they unwind to.
4911 if (CurrentPad != &FPI)
4912 break;
4913 }
4914 if (UnresolvedAncestorPad) {
4915 if (CurrentPad == UnresolvedAncestorPad) {
4916 // When CurrentPad is FPI itself, we don't mark it as resolved even if
4917 // we've found an unwind edge that exits it, because we need to verify
4918 // all direct uses of FPI.
4919 assert(CurrentPad == &FPI);
4920 continue;
4921 }
4922 // Pop off the worklist any nested pads that we've found an unwind
4923 // destination for. The pads on the worklist are the uncles,
4924 // great-uncles, etc. of CurrentPad. We've found an unwind destination
4925 // for all ancestors of CurrentPad up to but not including
4926 // UnresolvedAncestorPad.
4927 Value *ResolvedPad = CurrentPad;
4928 while (!Worklist.empty()) {
4929 Value *UnclePad = Worklist.back();
4930 Value *AncestorPad = getParentPad(UnclePad);
4931 // Walk ResolvedPad up the ancestor list until we either find the
4932 // uncle's parent or the last resolved ancestor.
4933 while (ResolvedPad != AncestorPad) {
4934 Value *ResolvedParent = getParentPad(ResolvedPad);
4935 if (ResolvedParent == UnresolvedAncestorPad) {
4936 break;
4937 }
4938 ResolvedPad = ResolvedParent;
4939 }
4940 // If the resolved ancestor search didn't find the uncle's parent,
4941 // then the uncle is not yet resolved.
4942 if (ResolvedPad != AncestorPad)
4943 break;
4944 // This uncle is resolved, so pop it from the worklist.
4945 Worklist.pop_back();
4946 }
4947 }
4948 }
4949
4950 if (FirstUnwindPad) {
4951 if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(FPI.getParentPad())) {
4952 BasicBlock *SwitchUnwindDest = CatchSwitch->getUnwindDest();
4953 Value *SwitchUnwindPad;
4954 if (SwitchUnwindDest)
4955 SwitchUnwindPad = &*SwitchUnwindDest->getFirstNonPHIIt();
4956 else
4957 SwitchUnwindPad = ConstantTokenNone::get(FPI.getContext());
4958 Check(SwitchUnwindPad == FirstUnwindPad,
4959 "Unwind edges out of a catch must have the same unwind dest as "
4960 "the parent catchswitch",
4961 &FPI, FirstUser, CatchSwitch);
4962 }
4963 }
4964
4965 visitInstruction(FPI);
4966}
4967
4968void Verifier::visitCatchSwitchInst(CatchSwitchInst &CatchSwitch) {
4969 BasicBlock *BB = CatchSwitch.getParent();
4970
4971 Function *F = BB->getParent();
4972 Check(F->hasPersonalityFn(),
4973 "CatchSwitchInst needs to be in a function with a personality.",
4974 &CatchSwitch);
4975
4976 // The catchswitch instruction must be the first non-PHI instruction in the
4977 // block.
4978 Check(&*BB->getFirstNonPHIIt() == &CatchSwitch,
4979 "CatchSwitchInst not the first non-PHI instruction in the block.",
4980 &CatchSwitch);
4981
4982 auto *ParentPad = CatchSwitch.getParentPad();
4983 Check(isa<ConstantTokenNone>(ParentPad) || isa<FuncletPadInst>(ParentPad),
4984 "CatchSwitchInst has an invalid parent.", ParentPad);
4985
4986 if (BasicBlock *UnwindDest = CatchSwitch.getUnwindDest()) {
4987 BasicBlock::iterator I = UnwindDest->getFirstNonPHIIt();
4988 Check(I->isEHPad() && !isa<LandingPadInst>(I),
4989 "CatchSwitchInst must unwind to an EH block which is not a "
4990 "landingpad.",
4991 &CatchSwitch);
4992
4993 // Record catchswitch sibling unwinds for verifySiblingFuncletUnwinds
4994 if (getParentPad(&*I) == ParentPad)
4995 SiblingFuncletInfo[&CatchSwitch] = &CatchSwitch;
4996 }
4997
4998 Check(CatchSwitch.getNumHandlers() != 0,
4999 "CatchSwitchInst cannot have empty handler list", &CatchSwitch);
5000
5001 for (BasicBlock *Handler : CatchSwitch.handlers()) {
5002 Check(isa<CatchPadInst>(Handler->getFirstNonPHIIt()),
5003 "CatchSwitchInst handlers must be catchpads", &CatchSwitch, Handler);
5004 }
5005
5006 visitEHPadPredecessors(CatchSwitch);
5007 visitTerminator(CatchSwitch);
5008}
5009
5010void Verifier::visitCleanupReturnInst(CleanupReturnInst &CRI) {
5012 "CleanupReturnInst needs to be provided a CleanupPad", &CRI,
5013 CRI.getOperand(0));
5014
5015 if (BasicBlock *UnwindDest = CRI.getUnwindDest()) {
5016 BasicBlock::iterator I = UnwindDest->getFirstNonPHIIt();
5017 Check(I->isEHPad() && !isa<LandingPadInst>(I),
5018 "CleanupReturnInst must unwind to an EH block which is not a "
5019 "landingpad.",
5020 &CRI);
5021 }
5022
5023 visitTerminator(CRI);
5024}
5025
5026void Verifier::verifyDominatesUse(Instruction &I, unsigned i) {
5027 Instruction *Op = cast<Instruction>(I.getOperand(i));
5028 // If the we have an invalid invoke, don't try to compute the dominance.
5029 // We already reject it in the invoke specific checks and the dominance
5030 // computation doesn't handle multiple edges.
5031 if (InvokeInst *II = dyn_cast<InvokeInst>(Op)) {
5032 if (II->getNormalDest() == II->getUnwindDest())
5033 return;
5034 }
5035
5036 // Quick check whether the def has already been encountered in the same block.
5037 // PHI nodes are not checked to prevent accepting preceding PHIs, because PHI
5038 // uses are defined to happen on the incoming edge, not at the instruction.
5039 //
5040 // FIXME: If this operand is a MetadataAsValue (wrapping a LocalAsMetadata)
5041 // wrapping an SSA value, assert that we've already encountered it. See
5042 // related FIXME in Mapper::mapLocalAsMetadata in ValueMapper.cpp.
5043 if (!isa<PHINode>(I) && InstsInThisBlock.count(Op))
5044 return;
5045
5046 const Use &U = I.getOperandUse(i);
5047 Check(DT.dominates(Op, U), "Instruction does not dominate all uses!", Op, &I);
5048}
5049
5050void Verifier::visitDereferenceableMetadata(Instruction& I, MDNode* MD) {
5051 Check(I.getType()->isPointerTy(),
5052 "dereferenceable, dereferenceable_or_null "
5053 "apply only to pointer types",
5054 &I);
5056 "dereferenceable, dereferenceable_or_null apply only to load"
5057 " and inttoptr instructions, use attributes for calls or invokes",
5058 &I);
5059 Check(MD->getNumOperands() == 1,
5060 "dereferenceable, dereferenceable_or_null "
5061 "take one operand!",
5062 &I);
5063 ConstantInt *CI = mdconst::dyn_extract<ConstantInt>(MD->getOperand(0));
5064 Check(CI && CI->getType()->isIntegerTy(64),
5065 "dereferenceable, "
5066 "dereferenceable_or_null metadata value must be an i64!",
5067 &I);
5068}
5069
5070void Verifier::visitNofreeMetadata(Instruction &I, MDNode *MD) {
5071 Check(I.getType()->isPointerTy(), "nofree applies only to pointer types", &I);
5072 Check((isa<IntToPtrInst>(I)), "nofree applies only to inttoptr instruction",
5073 &I);
5074 Check(MD->getNumOperands() == 0, "nofree metadata must be empty", &I);
5075}
5076
5077void Verifier::visitProfMetadata(Instruction &I, MDNode *MD) {
5078 auto GetBranchingTerminatorNumOperands = [&]() {
5079 unsigned ExpectedNumOperands = 0;
5080 if (BranchInst *BI = dyn_cast<BranchInst>(&I))
5081 ExpectedNumOperands = BI->getNumSuccessors();
5082 else if (SwitchInst *SI = dyn_cast<SwitchInst>(&I))
5083 ExpectedNumOperands = SI->getNumSuccessors();
5084 else if (isa<CallInst>(&I))
5085 ExpectedNumOperands = 1;
5086 else if (IndirectBrInst *IBI = dyn_cast<IndirectBrInst>(&I))
5087 ExpectedNumOperands = IBI->getNumDestinations();
5088 else if (isa<SelectInst>(&I))
5089 ExpectedNumOperands = 2;
5090 else if (CallBrInst *CI = dyn_cast<CallBrInst>(&I))
5091 ExpectedNumOperands = CI->getNumSuccessors();
5092 return ExpectedNumOperands;
5093 };
5094 Check(MD->getNumOperands() >= 1,
5095 "!prof annotations should have at least 1 operand", MD);
5096 // Check first operand.
5097 Check(MD->getOperand(0) != nullptr, "first operand should not be null", MD);
5099 "expected string with name of the !prof annotation", MD);
5100 MDString *MDS = cast<MDString>(MD->getOperand(0));
5101 StringRef ProfName = MDS->getString();
5102
5104 Check(GetBranchingTerminatorNumOperands() != 0 || isa<InvokeInst>(I),
5105 "'unknown' !prof should only appear on instructions on which "
5106 "'branch_weights' would",
5107 MD);
5108 verifyUnknownProfileMetadata(MD);
5109 return;
5110 }
5111
5112 Check(MD->getNumOperands() >= 2,
5113 "!prof annotations should have no less than 2 operands", MD);
5114
5115 // Check consistency of !prof branch_weights metadata.
5116 if (ProfName == MDProfLabels::BranchWeights) {
5117 unsigned NumBranchWeights = getNumBranchWeights(*MD);
5118 if (isa<InvokeInst>(&I)) {
5119 Check(NumBranchWeights == 1 || NumBranchWeights == 2,
5120 "Wrong number of InvokeInst branch_weights operands", MD);
5121 } else {
5122 const unsigned ExpectedNumOperands = GetBranchingTerminatorNumOperands();
5123 if (ExpectedNumOperands == 0)
5124 CheckFailed("!prof branch_weights are not allowed for this instruction",
5125 MD);
5126
5127 Check(NumBranchWeights == ExpectedNumOperands, "Wrong number of operands",
5128 MD);
5129 }
5130 for (unsigned i = getBranchWeightOffset(MD); i < MD->getNumOperands();
5131 ++i) {
5132 auto &MDO = MD->getOperand(i);
5133 Check(MDO, "second operand should not be null", MD);
5135 "!prof brunch_weights operand is not a const int");
5136 }
5137 } else if (ProfName == MDProfLabels::ValueProfile) {
5138 Check(isValueProfileMD(MD), "invalid value profiling metadata", MD);
5139 ConstantInt *KindInt = mdconst::dyn_extract<ConstantInt>(MD->getOperand(1));
5140 Check(KindInt, "VP !prof missing kind argument", MD);
5141
5142 auto Kind = KindInt->getZExtValue();
5143 Check(Kind >= InstrProfValueKind::IPVK_First &&
5144 Kind <= InstrProfValueKind::IPVK_Last,
5145 "Invalid VP !prof kind", MD);
5146 Check(MD->getNumOperands() % 2 == 1,
5147 "VP !prof should have an even number "
5148 "of arguments after 'VP'",
5149 MD);
5150 if (Kind == InstrProfValueKind::IPVK_IndirectCallTarget ||
5151 Kind == InstrProfValueKind::IPVK_MemOPSize)
5153 "VP !prof indirect call or memop size expected to be applied to "
5154 "CallBase instructions only",
5155 MD);
5156 } else {
5157 CheckFailed("expected either branch_weights or VP profile name", MD);
5158 }
5159}
5160
5161void Verifier::visitDIAssignIDMetadata(Instruction &I, MDNode *MD) {
5162 assert(I.hasMetadata(LLVMContext::MD_DIAssignID));
5163 // DIAssignID metadata must be attached to either an alloca or some form of
5164 // store/memory-writing instruction.
5165 // FIXME: We allow all intrinsic insts here to avoid trying to enumerate all
5166 // possible store intrinsics.
5167 bool ExpectedInstTy =
5169 CheckDI(ExpectedInstTy, "!DIAssignID attached to unexpected instruction kind",
5170 I, MD);
5171 // Iterate over the MetadataAsValue uses of the DIAssignID - these should
5172 // only be found as DbgAssignIntrinsic operands.
5173 if (auto *AsValue = MetadataAsValue::getIfExists(Context, MD)) {
5174 for (auto *User : AsValue->users()) {
5176 "!DIAssignID should only be used by llvm.dbg.assign intrinsics",
5177 MD, User);
5178 // All of the dbg.assign intrinsics should be in the same function as I.
5179 if (auto *DAI = dyn_cast<DbgAssignIntrinsic>(User))
5180 CheckDI(DAI->getFunction() == I.getFunction(),
5181 "dbg.assign not in same function as inst", DAI, &I);
5182 }
5183 }
5184 for (DbgVariableRecord *DVR :
5185 cast<DIAssignID>(MD)->getAllDbgVariableRecordUsers()) {
5186 CheckDI(DVR->isDbgAssign(),
5187 "!DIAssignID should only be used by Assign DVRs.", MD, DVR);
5188 CheckDI(DVR->getFunction() == I.getFunction(),
5189 "DVRAssign not in same function as inst", DVR, &I);
5190 }
5191}
5192
5193void Verifier::visitMMRAMetadata(Instruction &I, MDNode *MD) {
5195 "!mmra metadata attached to unexpected instruction kind", I, MD);
5196
5197 // MMRA Metadata should either be a tag, e.g. !{!"foo", !"bar"}, or a
5198 // list of tags such as !2 in the following example:
5199 // !0 = !{!"a", !"b"}
5200 // !1 = !{!"c", !"d"}
5201 // !2 = !{!0, !1}
5202 if (MMRAMetadata::isTagMD(MD))
5203 return;
5204
5205 Check(isa<MDTuple>(MD), "!mmra expected to be a metadata tuple", I, MD);
5206 for (const MDOperand &MDOp : MD->operands())
5207 Check(MMRAMetadata::isTagMD(MDOp.get()),
5208 "!mmra metadata tuple operand is not an MMRA tag", I, MDOp.get());
5209}
5210
5211void Verifier::visitCallStackMetadata(MDNode *MD) {
5212 // Call stack metadata should consist of a list of at least 1 constant int
5213 // (representing a hash of the location).
5214 Check(MD->getNumOperands() >= 1,
5215 "call stack metadata should have at least 1 operand", MD);
5216
5217 for (const auto &Op : MD->operands())
5219 "call stack metadata operand should be constant integer", Op);
5220}
5221
5222void Verifier::visitMemProfMetadata(Instruction &I, MDNode *MD) {
5223 Check(isa<CallBase>(I), "!memprof metadata should only exist on calls", &I);
5224 Check(MD->getNumOperands() >= 1,
5225 "!memprof annotations should have at least 1 metadata operand "
5226 "(MemInfoBlock)",
5227 MD);
5228
5229 // Check each MIB
5230 for (auto &MIBOp : MD->operands()) {
5231 MDNode *MIB = dyn_cast<MDNode>(MIBOp);
5232 // The first operand of an MIB should be the call stack metadata.
5233 // There rest of the operands should be MDString tags, and there should be
5234 // at least one.
5235 Check(MIB->getNumOperands() >= 2,
5236 "Each !memprof MemInfoBlock should have at least 2 operands", MIB);
5237
5238 // Check call stack metadata (first operand).
5239 Check(MIB->getOperand(0) != nullptr,
5240 "!memprof MemInfoBlock first operand should not be null", MIB);
5241 Check(isa<MDNode>(MIB->getOperand(0)),
5242 "!memprof MemInfoBlock first operand should be an MDNode", MIB);
5243 MDNode *StackMD = dyn_cast<MDNode>(MIB->getOperand(0));
5244 visitCallStackMetadata(StackMD);
5245
5246 // The next set of 1 or more operands should be MDString.
5247 unsigned I = 1;
5248 for (; I < MIB->getNumOperands(); ++I) {
5249 if (!isa<MDString>(MIB->getOperand(I))) {
5250 Check(I > 1,
5251 "!memprof MemInfoBlock second operand should be an MDString",
5252 MIB);
5253 break;
5254 }
5255 }
5256
5257 // Any remaining should be MDNode that are pairs of integers
5258 for (; I < MIB->getNumOperands(); ++I) {
5259 MDNode *OpNode = dyn_cast<MDNode>(MIB->getOperand(I));
5260 Check(OpNode, "Not all !memprof MemInfoBlock operands 2 to N are MDNode",
5261 MIB);
5262 Check(OpNode->getNumOperands() == 2,
5263 "Not all !memprof MemInfoBlock operands 2 to N are MDNode with 2 "
5264 "operands",
5265 MIB);
5266 // Check that all of Op's operands are ConstantInt.
5267 Check(llvm::all_of(OpNode->operands(),
5268 [](const MDOperand &Op) {
5269 return mdconst::hasa<ConstantInt>(Op);
5270 }),
5271 "Not all !memprof MemInfoBlock operands 2 to N are MDNode with "
5272 "ConstantInt operands",
5273 MIB);
5274 }
5275 }
5276}
5277
5278void Verifier::visitCallsiteMetadata(Instruction &I, MDNode *MD) {
5279 Check(isa<CallBase>(I), "!callsite metadata should only exist on calls", &I);
5280 // Verify the partial callstack annotated from memprof profiles. This callsite
5281 // is a part of a profiled allocation callstack.
5282 visitCallStackMetadata(MD);
5283}
5284
5285static inline bool isConstantIntMetadataOperand(const Metadata *MD) {
5286 if (auto *VAL = dyn_cast<ValueAsMetadata>(MD))
5287 return isa<ConstantInt>(VAL->getValue());
5288 return false;
5289}
5290
5291void Verifier::visitCalleeTypeMetadata(Instruction &I, MDNode *MD) {
5292 Check(isa<CallBase>(I), "!callee_type metadata should only exist on calls",
5293 &I);
5294 for (Metadata *Op : MD->operands()) {
5296 "The callee_type metadata must be a list of type metadata nodes", Op);
5297 auto *TypeMD = cast<MDNode>(Op);
5298 Check(TypeMD->getNumOperands() == 2,
5299 "Well-formed generalized type metadata must contain exactly two "
5300 "operands",
5301 Op);
5302 Check(isConstantIntMetadataOperand(TypeMD->getOperand(0)) &&
5303 mdconst::extract<ConstantInt>(TypeMD->getOperand(0))->isZero(),
5304 "The first operand of type metadata for functions must be zero", Op);
5305 Check(TypeMD->hasGeneralizedMDString(),
5306 "Only generalized type metadata can be part of the callee_type "
5307 "metadata list",
5308 Op);
5309 }
5310}
5311
5312void Verifier::visitAnnotationMetadata(MDNode *Annotation) {
5313 Check(isa<MDTuple>(Annotation), "annotation must be a tuple");
5314 Check(Annotation->getNumOperands() >= 1,
5315 "annotation must have at least one operand");
5316 for (const MDOperand &Op : Annotation->operands()) {
5317 bool TupleOfStrings =
5318 isa<MDTuple>(Op.get()) &&
5319 all_of(cast<MDTuple>(Op)->operands(), [](auto &Annotation) {
5320 return isa<MDString>(Annotation.get());
5321 });
5322 Check(isa<MDString>(Op.get()) || TupleOfStrings,
5323 "operands must be a string or a tuple of strings");
5324 }
5325}
5326
5327void Verifier::visitAliasScopeMetadata(const MDNode *MD) {
5328 unsigned NumOps = MD->getNumOperands();
5329 Check(NumOps >= 2 && NumOps <= 3, "scope must have two or three operands",
5330 MD);
5331 Check(MD->getOperand(0).get() == MD || isa<MDString>(MD->getOperand(0)),
5332 "first scope operand must be self-referential or string", MD);
5333 if (NumOps == 3)
5335 "third scope operand must be string (if used)", MD);
5336
5337 MDNode *Domain = dyn_cast<MDNode>(MD->getOperand(1));
5338 Check(Domain != nullptr, "second scope operand must be MDNode", MD);
5339
5340 unsigned NumDomainOps = Domain->getNumOperands();
5341 Check(NumDomainOps >= 1 && NumDomainOps <= 2,
5342 "domain must have one or two operands", Domain);
5343 Check(Domain->getOperand(0).get() == Domain ||
5344 isa<MDString>(Domain->getOperand(0)),
5345 "first domain operand must be self-referential or string", Domain);
5346 if (NumDomainOps == 2)
5347 Check(isa<MDString>(Domain->getOperand(1)),
5348 "second domain operand must be string (if used)", Domain);
5349}
5350
5351void Verifier::visitAliasScopeListMetadata(const MDNode *MD) {
5352 for (const MDOperand &Op : MD->operands()) {
5353 const MDNode *OpMD = dyn_cast<MDNode>(Op);
5354 Check(OpMD != nullptr, "scope list must consist of MDNodes", MD);
5355 visitAliasScopeMetadata(OpMD);
5356 }
5357}
5358
5359void Verifier::visitAccessGroupMetadata(const MDNode *MD) {
5360 auto IsValidAccessScope = [](const MDNode *MD) {
5361 return MD->getNumOperands() == 0 && MD->isDistinct();
5362 };
5363
5364 // It must be either an access scope itself...
5365 if (IsValidAccessScope(MD))
5366 return;
5367
5368 // ...or a list of access scopes.
5369 for (const MDOperand &Op : MD->operands()) {
5370 const MDNode *OpMD = dyn_cast<MDNode>(Op);
5371 Check(OpMD != nullptr, "Access scope list must consist of MDNodes", MD);
5372 Check(IsValidAccessScope(OpMD),
5373 "Access scope list contains invalid access scope", MD);
5374 }
5375}
5376
5377void Verifier::visitCapturesMetadata(Instruction &I, const MDNode *Captures) {
5378 static const char *ValidArgs[] = {"address_is_null", "address",
5379 "read_provenance", "provenance"};
5380
5381 auto *SI = dyn_cast<StoreInst>(&I);
5382 Check(SI, "!captures metadata can only be applied to store instructions", &I);
5383 Check(SI->getValueOperand()->getType()->isPointerTy(),
5384 "!captures metadata can only be applied to store with value operand of "
5385 "pointer type",
5386 &I);
5387 Check(Captures->getNumOperands() != 0, "!captures metadata cannot be empty",
5388 &I);
5389
5390 for (Metadata *Op : Captures->operands()) {
5391 auto *Str = dyn_cast<MDString>(Op);
5392 Check(Str, "!captures metadata must be a list of strings", &I);
5393 Check(is_contained(ValidArgs, Str->getString()),
5394 "invalid entry in !captures metadata", &I, Str);
5395 }
5396}
5397
5398void Verifier::visitAllocTokenMetadata(Instruction &I, MDNode *MD) {
5399 Check(isa<CallBase>(I), "!alloc_token should only exist on calls", &I);
5400 Check(MD->getNumOperands() == 2, "!alloc_token must have 2 operands", MD);
5401 Check(isa<MDString>(MD->getOperand(0)), "expected string", MD);
5403 "expected integer constant", MD);
5404}
5405
5406/// verifyInstruction - Verify that an instruction is well formed.
5407///
5408void Verifier::visitInstruction(Instruction &I) {
5409 BasicBlock *BB = I.getParent();
5410 Check(BB, "Instruction not embedded in basic block!", &I);
5411
5412 if (!isa<PHINode>(I)) { // Check that non-phi nodes are not self referential
5413 for (User *U : I.users()) {
5414 Check(U != (User *)&I || !DT.isReachableFromEntry(BB),
5415 "Only PHI nodes may reference their own value!", &I);
5416 }
5417 }
5418
5419 // Check that void typed values don't have names
5420 Check(!I.getType()->isVoidTy() || !I.hasName(),
5421 "Instruction has a name, but provides a void value!", &I);
5422
5423 // Check that the return value of the instruction is either void or a legal
5424 // value type.
5425 Check(I.getType()->isVoidTy() || I.getType()->isFirstClassType(),
5426 "Instruction returns a non-scalar type!", &I);
5427
5428 // Check that the instruction doesn't produce metadata. Calls are already
5429 // checked against the callee type.
5430 Check(!I.getType()->isMetadataTy() || isa<CallInst>(I) || isa<InvokeInst>(I),
5431 "Invalid use of metadata!", &I);
5432
5433 // Check that all uses of the instruction, if they are instructions
5434 // themselves, actually have parent basic blocks. If the use is not an
5435 // instruction, it is an error!
5436 for (Use &U : I.uses()) {
5437 if (Instruction *Used = dyn_cast<Instruction>(U.getUser()))
5438 Check(Used->getParent() != nullptr,
5439 "Instruction referencing"
5440 " instruction not embedded in a basic block!",
5441 &I, Used);
5442 else {
5443 CheckFailed("Use of instruction is not an instruction!", U);
5444 return;
5445 }
5446 }
5447
5448 // Get a pointer to the call base of the instruction if it is some form of
5449 // call.
5450 const CallBase *CBI = dyn_cast<CallBase>(&I);
5451
5452 for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i) {
5453 Check(I.getOperand(i) != nullptr, "Instruction has null operand!", &I);
5454
5455 // Check to make sure that only first-class-values are operands to
5456 // instructions.
5457 if (!I.getOperand(i)->getType()->isFirstClassType()) {
5458 Check(false, "Instruction operands must be first-class values!", &I);
5459 }
5460
5461 if (Function *F = dyn_cast<Function>(I.getOperand(i))) {
5462 // This code checks whether the function is used as the operand of a
5463 // clang_arc_attachedcall operand bundle.
5464 auto IsAttachedCallOperand = [](Function *F, const CallBase *CBI,
5465 int Idx) {
5466 return CBI && CBI->isOperandBundleOfType(
5468 };
5469
5470 // Check to make sure that the "address of" an intrinsic function is never
5471 // taken. Ignore cases where the address of the intrinsic function is used
5472 // as the argument of operand bundle "clang.arc.attachedcall" as those
5473 // cases are handled in verifyAttachedCallBundle.
5474 Check((!F->isIntrinsic() ||
5475 (CBI && &CBI->getCalledOperandUse() == &I.getOperandUse(i)) ||
5476 IsAttachedCallOperand(F, CBI, i)),
5477 "Cannot take the address of an intrinsic!", &I);
5478 Check(!F->isIntrinsic() || isa<CallInst>(I) ||
5479 F->getIntrinsicID() == Intrinsic::donothing ||
5480 F->getIntrinsicID() == Intrinsic::seh_try_begin ||
5481 F->getIntrinsicID() == Intrinsic::seh_try_end ||
5482 F->getIntrinsicID() == Intrinsic::seh_scope_begin ||
5483 F->getIntrinsicID() == Intrinsic::seh_scope_end ||
5484 F->getIntrinsicID() == Intrinsic::coro_resume ||
5485 F->getIntrinsicID() == Intrinsic::coro_destroy ||
5486 F->getIntrinsicID() == Intrinsic::coro_await_suspend_void ||
5487 F->getIntrinsicID() == Intrinsic::coro_await_suspend_bool ||
5488 F->getIntrinsicID() == Intrinsic::coro_await_suspend_handle ||
5489 F->getIntrinsicID() ==
5490 Intrinsic::experimental_patchpoint_void ||
5491 F->getIntrinsicID() == Intrinsic::experimental_patchpoint ||
5492 F->getIntrinsicID() == Intrinsic::fake_use ||
5493 F->getIntrinsicID() == Intrinsic::experimental_gc_statepoint ||
5494 F->getIntrinsicID() == Intrinsic::wasm_throw ||
5495 F->getIntrinsicID() == Intrinsic::wasm_rethrow ||
5496 IsAttachedCallOperand(F, CBI, i),
5497 "Cannot invoke an intrinsic other than donothing, patchpoint, "
5498 "statepoint, coro_resume, coro_destroy, clang.arc.attachedcall or "
5499 "wasm.(re)throw",
5500 &I);
5501 Check(F->getParent() == &M, "Referencing function in another module!", &I,
5502 &M, F, F->getParent());
5503 } else if (BasicBlock *OpBB = dyn_cast<BasicBlock>(I.getOperand(i))) {
5504 Check(OpBB->getParent() == BB->getParent(),
5505 "Referring to a basic block in another function!", &I);
5506 } else if (Argument *OpArg = dyn_cast<Argument>(I.getOperand(i))) {
5507 Check(OpArg->getParent() == BB->getParent(),
5508 "Referring to an argument in another function!", &I);
5509 } else if (GlobalValue *GV = dyn_cast<GlobalValue>(I.getOperand(i))) {
5510 Check(GV->getParent() == &M, "Referencing global in another module!", &I,
5511 &M, GV, GV->getParent());
5512 } else if (Instruction *OpInst = dyn_cast<Instruction>(I.getOperand(i))) {
5513 Check(OpInst->getFunction() == BB->getParent(),
5514 "Referring to an instruction in another function!", &I);
5515 verifyDominatesUse(I, i);
5516 } else if (isa<InlineAsm>(I.getOperand(i))) {
5517 Check(CBI && &CBI->getCalledOperandUse() == &I.getOperandUse(i),
5518 "Cannot take the address of an inline asm!", &I);
5519 } else if (auto *CPA = dyn_cast<ConstantPtrAuth>(I.getOperand(i))) {
5520 visitConstantExprsRecursively(CPA);
5521 } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(I.getOperand(i))) {
5522 if (CE->getType()->isPtrOrPtrVectorTy()) {
5523 // If we have a ConstantExpr pointer, we need to see if it came from an
5524 // illegal bitcast.
5525 visitConstantExprsRecursively(CE);
5526 }
5527 }
5528 }
5529
5530 if (MDNode *MD = I.getMetadata(LLVMContext::MD_fpmath)) {
5531 Check(I.getType()->isFPOrFPVectorTy(),
5532 "fpmath requires a floating point result!", &I);
5533 Check(MD->getNumOperands() == 1, "fpmath takes one operand!", &I);
5534 if (ConstantFP *CFP0 =
5536 const APFloat &Accuracy = CFP0->getValueAPF();
5537 Check(&Accuracy.getSemantics() == &APFloat::IEEEsingle(),
5538 "fpmath accuracy must have float type", &I);
5539 Check(Accuracy.isFiniteNonZero() && !Accuracy.isNegative(),
5540 "fpmath accuracy not a positive number!", &I);
5541 } else {
5542 Check(false, "invalid fpmath accuracy!", &I);
5543 }
5544 }
5545
5546 if (MDNode *Range = I.getMetadata(LLVMContext::MD_range)) {
5548 "Ranges are only for loads, calls and invokes!", &I);
5549 visitRangeMetadata(I, Range, I.getType());
5550 }
5551
5552 if (MDNode *Range = I.getMetadata(LLVMContext::MD_noalias_addrspace)) {
5555 "noalias.addrspace are only for memory operations!", &I);
5556 visitNoaliasAddrspaceMetadata(I, Range, I.getType());
5557 }
5558
5559 if (I.hasMetadata(LLVMContext::MD_invariant_group)) {
5561 "invariant.group metadata is only for loads and stores", &I);
5562 }
5563
5564 if (MDNode *MD = I.getMetadata(LLVMContext::MD_nonnull)) {
5565 Check(I.getType()->isPointerTy(), "nonnull applies only to pointer types",
5566 &I);
5568 "nonnull applies only to load instructions, use attributes"
5569 " for calls or invokes",
5570 &I);
5571 Check(MD->getNumOperands() == 0, "nonnull metadata must be empty", &I);
5572 }
5573
5574 if (MDNode *MD = I.getMetadata(LLVMContext::MD_dereferenceable))
5575 visitDereferenceableMetadata(I, MD);
5576
5577 if (MDNode *MD = I.getMetadata(LLVMContext::MD_dereferenceable_or_null))
5578 visitDereferenceableMetadata(I, MD);
5579
5580 if (MDNode *MD = I.getMetadata(LLVMContext::MD_nofree))
5581 visitNofreeMetadata(I, MD);
5582
5583 if (MDNode *TBAA = I.getMetadata(LLVMContext::MD_tbaa))
5584 TBAAVerifyHelper.visitTBAAMetadata(&I, TBAA);
5585
5586 if (MDNode *MD = I.getMetadata(LLVMContext::MD_noalias))
5587 visitAliasScopeListMetadata(MD);
5588 if (MDNode *MD = I.getMetadata(LLVMContext::MD_alias_scope))
5589 visitAliasScopeListMetadata(MD);
5590
5591 if (MDNode *MD = I.getMetadata(LLVMContext::MD_access_group))
5592 visitAccessGroupMetadata(MD);
5593
5594 if (MDNode *AlignMD = I.getMetadata(LLVMContext::MD_align)) {
5595 Check(I.getType()->isPointerTy(), "align applies only to pointer types",
5596 &I);
5598 "align applies only to load instructions, "
5599 "use attributes for calls or invokes",
5600 &I);
5601 Check(AlignMD->getNumOperands() == 1, "align takes one operand!", &I);
5602 ConstantInt *CI = mdconst::dyn_extract<ConstantInt>(AlignMD->getOperand(0));
5603 Check(CI && CI->getType()->isIntegerTy(64),
5604 "align metadata value must be an i64!", &I);
5605 uint64_t Align = CI->getZExtValue();
5606 Check(isPowerOf2_64(Align), "align metadata value must be a power of 2!",
5607 &I);
5608 Check(Align <= Value::MaximumAlignment,
5609 "alignment is larger that implementation defined limit", &I);
5610 }
5611
5612 if (MDNode *MD = I.getMetadata(LLVMContext::MD_prof))
5613 visitProfMetadata(I, MD);
5614
5615 if (MDNode *MD = I.getMetadata(LLVMContext::MD_memprof))
5616 visitMemProfMetadata(I, MD);
5617
5618 if (MDNode *MD = I.getMetadata(LLVMContext::MD_callsite))
5619 visitCallsiteMetadata(I, MD);
5620
5621 if (MDNode *MD = I.getMetadata(LLVMContext::MD_callee_type))
5622 visitCalleeTypeMetadata(I, MD);
5623
5624 if (MDNode *MD = I.getMetadata(LLVMContext::MD_DIAssignID))
5625 visitDIAssignIDMetadata(I, MD);
5626
5627 if (MDNode *MMRA = I.getMetadata(LLVMContext::MD_mmra))
5628 visitMMRAMetadata(I, MMRA);
5629
5630 if (MDNode *Annotation = I.getMetadata(LLVMContext::MD_annotation))
5631 visitAnnotationMetadata(Annotation);
5632
5633 if (MDNode *Captures = I.getMetadata(LLVMContext::MD_captures))
5634 visitCapturesMetadata(I, Captures);
5635
5636 if (MDNode *MD = I.getMetadata(LLVMContext::MD_alloc_token))
5637 visitAllocTokenMetadata(I, MD);
5638
5639 if (MDNode *N = I.getDebugLoc().getAsMDNode()) {
5640 CheckDI(isa<DILocation>(N), "invalid !dbg metadata attachment", &I, N);
5641 visitMDNode(*N, AreDebugLocsAllowed::Yes);
5642
5643 if (auto *DL = dyn_cast<DILocation>(N)) {
5644 if (DL->getAtomGroup()) {
5645 CheckDI(DL->getScope()->getSubprogram()->getKeyInstructionsEnabled(),
5646 "DbgLoc uses atomGroup but DISubprogram doesn't have Key "
5647 "Instructions enabled",
5648 DL, DL->getScope()->getSubprogram());
5649 }
5650 }
5651 }
5652
5654 I.getAllMetadata(MDs);
5655 for (auto Attachment : MDs) {
5656 unsigned Kind = Attachment.first;
5657 auto AllowLocs =
5658 (Kind == LLVMContext::MD_dbg || Kind == LLVMContext::MD_loop)
5659 ? AreDebugLocsAllowed::Yes
5660 : AreDebugLocsAllowed::No;
5661 visitMDNode(*Attachment.second, AllowLocs);
5662 }
5663
5664 InstsInThisBlock.insert(&I);
5665}
5666
5667/// Allow intrinsics to be verified in different ways.
5668void Verifier::visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call) {
5670 Check(IF->isDeclaration(), "Intrinsic functions should never be defined!",
5671 IF);
5672
5673 // Verify that the intrinsic prototype lines up with what the .td files
5674 // describe.
5675 FunctionType *IFTy = IF->getFunctionType();
5676 bool IsVarArg = IFTy->isVarArg();
5677
5681
5682 // Walk the descriptors to extract overloaded types.
5687 "Intrinsic has incorrect return type!", IF);
5689 "Intrinsic has incorrect argument type!", IF);
5690
5691 // Verify if the intrinsic call matches the vararg property.
5692 if (IsVarArg)
5694 "Intrinsic was not defined with variable arguments!", IF);
5695 else
5697 "Callsite was not defined with variable arguments!", IF);
5698
5699 // All descriptors should be absorbed by now.
5700 Check(TableRef.empty(), "Intrinsic has too few arguments!", IF);
5701
5702 // Now that we have the intrinsic ID and the actual argument types (and we
5703 // know they are legal for the intrinsic!) get the intrinsic name through the
5704 // usual means. This allows us to verify the mangling of argument types into
5705 // the name.
5706 const std::string ExpectedName =
5707 Intrinsic::getName(ID, ArgTys, IF->getParent(), IFTy);
5708 Check(ExpectedName == IF->getName(),
5709 "Intrinsic name not mangled correctly for type arguments! "
5710 "Should be: " +
5711 ExpectedName,
5712 IF);
5713
5714 // If the intrinsic takes MDNode arguments, verify that they are either global
5715 // or are local to *this* function.
5716 for (Value *V : Call.args()) {
5717 if (auto *MD = dyn_cast<MetadataAsValue>(V))
5718 visitMetadataAsValue(*MD, Call.getCaller());
5719 if (auto *Const = dyn_cast<Constant>(V))
5720 Check(!Const->getType()->isX86_AMXTy(),
5721 "const x86_amx is not allowed in argument!");
5722 }
5723
5724 switch (ID) {
5725 default:
5726 break;
5727 case Intrinsic::assume: {
5728 if (Call.hasOperandBundles()) {
5730 Check(Cond && Cond->isOne(),
5731 "assume with operand bundles must have i1 true condition", Call);
5732 }
5733 for (auto &Elem : Call.bundle_op_infos()) {
5734 unsigned ArgCount = Elem.End - Elem.Begin;
5735 // Separate storage assumptions are special insofar as they're the only
5736 // operand bundles allowed on assumes that aren't parameter attributes.
5737 if (Elem.Tag->getKey() == "separate_storage") {
5738 Check(ArgCount == 2,
5739 "separate_storage assumptions should have 2 arguments", Call);
5740 Check(Call.getOperand(Elem.Begin)->getType()->isPointerTy() &&
5741 Call.getOperand(Elem.Begin + 1)->getType()->isPointerTy(),
5742 "arguments to separate_storage assumptions should be pointers",
5743 Call);
5744 continue;
5745 }
5746 Check(Elem.Tag->getKey() == "ignore" ||
5747 Attribute::isExistingAttribute(Elem.Tag->getKey()),
5748 "tags must be valid attribute names", Call);
5749 Attribute::AttrKind Kind =
5750 Attribute::getAttrKindFromName(Elem.Tag->getKey());
5751 if (Kind == Attribute::Alignment) {
5752 Check(ArgCount <= 3 && ArgCount >= 2,
5753 "alignment assumptions should have 2 or 3 arguments", Call);
5754 Check(Call.getOperand(Elem.Begin)->getType()->isPointerTy(),
5755 "first argument should be a pointer", Call);
5756 Check(Call.getOperand(Elem.Begin + 1)->getType()->isIntegerTy(),
5757 "second argument should be an integer", Call);
5758 if (ArgCount == 3)
5759 Check(Call.getOperand(Elem.Begin + 2)->getType()->isIntegerTy(),
5760 "third argument should be an integer if present", Call);
5761 continue;
5762 }
5763 if (Kind == Attribute::Dereferenceable) {
5764 Check(ArgCount == 2,
5765 "dereferenceable assumptions should have 2 arguments", Call);
5766 Check(Call.getOperand(Elem.Begin)->getType()->isPointerTy(),
5767 "first argument should be a pointer", Call);
5768 Check(Call.getOperand(Elem.Begin + 1)->getType()->isIntegerTy(),
5769 "second argument should be an integer", Call);
5770 continue;
5771 }
5772 Check(ArgCount <= 2, "too many arguments", Call);
5773 if (Kind == Attribute::None)
5774 break;
5775 if (Attribute::isIntAttrKind(Kind)) {
5776 Check(ArgCount == 2, "this attribute should have 2 arguments", Call);
5777 Check(isa<ConstantInt>(Call.getOperand(Elem.Begin + 1)),
5778 "the second argument should be a constant integral value", Call);
5779 } else if (Attribute::canUseAsParamAttr(Kind)) {
5780 Check((ArgCount) == 1, "this attribute should have one argument", Call);
5781 } else if (Attribute::canUseAsFnAttr(Kind)) {
5782 Check((ArgCount) == 0, "this attribute has no argument", Call);
5783 }
5784 }
5785 break;
5786 }
5787 case Intrinsic::ucmp:
5788 case Intrinsic::scmp: {
5789 Type *SrcTy = Call.getOperand(0)->getType();
5790 Type *DestTy = Call.getType();
5791
5792 Check(DestTy->getScalarSizeInBits() >= 2,
5793 "result type must be at least 2 bits wide", Call);
5794
5795 bool IsDestTypeVector = DestTy->isVectorTy();
5796 Check(SrcTy->isVectorTy() == IsDestTypeVector,
5797 "ucmp/scmp argument and result types must both be either vector or "
5798 "scalar types",
5799 Call);
5800 if (IsDestTypeVector) {
5801 auto SrcVecLen = cast<VectorType>(SrcTy)->getElementCount();
5802 auto DestVecLen = cast<VectorType>(DestTy)->getElementCount();
5803 Check(SrcVecLen == DestVecLen,
5804 "return type and arguments must have the same number of "
5805 "elements",
5806 Call);
5807 }
5808 break;
5809 }
5810 case Intrinsic::coro_id: {
5811 auto *InfoArg = Call.getArgOperand(3)->stripPointerCasts();
5812 if (isa<ConstantPointerNull>(InfoArg))
5813 break;
5814 auto *GV = dyn_cast<GlobalVariable>(InfoArg);
5815 Check(GV && GV->isConstant() && GV->hasDefinitiveInitializer(),
5816 "info argument of llvm.coro.id must refer to an initialized "
5817 "constant");
5818 Constant *Init = GV->getInitializer();
5820 "info argument of llvm.coro.id must refer to either a struct or "
5821 "an array");
5822 break;
5823 }
5824 case Intrinsic::is_fpclass: {
5825 const ConstantInt *TestMask = cast<ConstantInt>(Call.getOperand(1));
5826 Check((TestMask->getZExtValue() & ~static_cast<unsigned>(fcAllFlags)) == 0,
5827 "unsupported bits for llvm.is.fpclass test mask");
5828 break;
5829 }
5830 case Intrinsic::fptrunc_round: {
5831 // Check the rounding mode
5832 Metadata *MD = nullptr;
5834 if (MAV)
5835 MD = MAV->getMetadata();
5836
5837 Check(MD != nullptr, "missing rounding mode argument", Call);
5838
5839 Check(isa<MDString>(MD),
5840 ("invalid value for llvm.fptrunc.round metadata operand"
5841 " (the operand should be a string)"),
5842 MD);
5843
5844 std::optional<RoundingMode> RoundMode =
5845 convertStrToRoundingMode(cast<MDString>(MD)->getString());
5846 Check(RoundMode && *RoundMode != RoundingMode::Dynamic,
5847 "unsupported rounding mode argument", Call);
5848 break;
5849 }
5850#define BEGIN_REGISTER_VP_INTRINSIC(VPID, ...) case Intrinsic::VPID:
5851#include "llvm/IR/VPIntrinsics.def"
5852#undef BEGIN_REGISTER_VP_INTRINSIC
5853 visitVPIntrinsic(cast<VPIntrinsic>(Call));
5854 break;
5855#define INSTRUCTION(NAME, NARGS, ROUND_MODE, INTRINSIC) \
5856 case Intrinsic::INTRINSIC:
5857#include "llvm/IR/ConstrainedOps.def"
5858#undef INSTRUCTION
5859 visitConstrainedFPIntrinsic(cast<ConstrainedFPIntrinsic>(Call));
5860 break;
5861 case Intrinsic::dbg_declare: // llvm.dbg.declare
5862 case Intrinsic::dbg_value: // llvm.dbg.value
5863 case Intrinsic::dbg_assign: // llvm.dbg.assign
5864 case Intrinsic::dbg_label: // llvm.dbg.label
5865 // We no longer interpret debug intrinsics (the old variable-location
5866 // design). They're meaningless as far as LLVM is concerned we could make
5867 // it an error for them to appear, but it's possible we'll have users
5868 // converting back to intrinsics for the forseeable future (such as DXIL),
5869 // so tolerate their existance.
5870 break;
5871 case Intrinsic::memcpy:
5872 case Intrinsic::memcpy_inline:
5873 case Intrinsic::memmove:
5874 case Intrinsic::memset:
5875 case Intrinsic::memset_inline:
5876 break;
5877 case Intrinsic::experimental_memset_pattern: {
5878 const auto Memset = cast<MemSetPatternInst>(&Call);
5879 Check(Memset->getValue()->getType()->isSized(),
5880 "unsized types cannot be used as memset patterns", Call);
5881 break;
5882 }
5883 case Intrinsic::memcpy_element_unordered_atomic:
5884 case Intrinsic::memmove_element_unordered_atomic:
5885 case Intrinsic::memset_element_unordered_atomic: {
5886 const auto *AMI = cast<AnyMemIntrinsic>(&Call);
5887
5888 ConstantInt *ElementSizeCI =
5889 cast<ConstantInt>(AMI->getRawElementSizeInBytes());
5890 const APInt &ElementSizeVal = ElementSizeCI->getValue();
5891 Check(ElementSizeVal.isPowerOf2(),
5892 "element size of the element-wise atomic memory intrinsic "
5893 "must be a power of 2",
5894 Call);
5895
5896 auto IsValidAlignment = [&](MaybeAlign Alignment) {
5897 return Alignment && ElementSizeVal.ule(Alignment->value());
5898 };
5899 Check(IsValidAlignment(AMI->getDestAlign()),
5900 "incorrect alignment of the destination argument", Call);
5901 if (const auto *AMT = dyn_cast<AnyMemTransferInst>(AMI)) {
5902 Check(IsValidAlignment(AMT->getSourceAlign()),
5903 "incorrect alignment of the source argument", Call);
5904 }
5905 break;
5906 }
5907 case Intrinsic::call_preallocated_setup: {
5908 auto *NumArgs = cast<ConstantInt>(Call.getArgOperand(0));
5909 bool FoundCall = false;
5910 for (User *U : Call.users()) {
5911 auto *UseCall = dyn_cast<CallBase>(U);
5912 Check(UseCall != nullptr,
5913 "Uses of llvm.call.preallocated.setup must be calls");
5914 Intrinsic::ID IID = UseCall->getIntrinsicID();
5915 if (IID == Intrinsic::call_preallocated_arg) {
5916 auto *AllocArgIndex = dyn_cast<ConstantInt>(UseCall->getArgOperand(1));
5917 Check(AllocArgIndex != nullptr,
5918 "llvm.call.preallocated.alloc arg index must be a constant");
5919 auto AllocArgIndexInt = AllocArgIndex->getValue();
5920 Check(AllocArgIndexInt.sge(0) &&
5921 AllocArgIndexInt.slt(NumArgs->getValue()),
5922 "llvm.call.preallocated.alloc arg index must be between 0 and "
5923 "corresponding "
5924 "llvm.call.preallocated.setup's argument count");
5925 } else if (IID == Intrinsic::call_preallocated_teardown) {
5926 // nothing to do
5927 } else {
5928 Check(!FoundCall, "Can have at most one call corresponding to a "
5929 "llvm.call.preallocated.setup");
5930 FoundCall = true;
5931 size_t NumPreallocatedArgs = 0;
5932 for (unsigned i = 0; i < UseCall->arg_size(); i++) {
5933 if (UseCall->paramHasAttr(i, Attribute::Preallocated)) {
5934 ++NumPreallocatedArgs;
5935 }
5936 }
5937 Check(NumPreallocatedArgs != 0,
5938 "cannot use preallocated intrinsics on a call without "
5939 "preallocated arguments");
5940 Check(NumArgs->equalsInt(NumPreallocatedArgs),
5941 "llvm.call.preallocated.setup arg size must be equal to number "
5942 "of preallocated arguments "
5943 "at call site",
5944 Call, *UseCall);
5945 // getOperandBundle() cannot be called if more than one of the operand
5946 // bundle exists. There is already a check elsewhere for this, so skip
5947 // here if we see more than one.
5948 if (UseCall->countOperandBundlesOfType(LLVMContext::OB_preallocated) >
5949 1) {
5950 return;
5951 }
5952 auto PreallocatedBundle =
5953 UseCall->getOperandBundle(LLVMContext::OB_preallocated);
5954 Check(PreallocatedBundle,
5955 "Use of llvm.call.preallocated.setup outside intrinsics "
5956 "must be in \"preallocated\" operand bundle");
5957 Check(PreallocatedBundle->Inputs.front().get() == &Call,
5958 "preallocated bundle must have token from corresponding "
5959 "llvm.call.preallocated.setup");
5960 }
5961 }
5962 break;
5963 }
5964 case Intrinsic::call_preallocated_arg: {
5965 auto *Token = dyn_cast<CallBase>(Call.getArgOperand(0));
5966 Check(Token &&
5967 Token->getIntrinsicID() == Intrinsic::call_preallocated_setup,
5968 "llvm.call.preallocated.arg token argument must be a "
5969 "llvm.call.preallocated.setup");
5970 Check(Call.hasFnAttr(Attribute::Preallocated),
5971 "llvm.call.preallocated.arg must be called with a \"preallocated\" "
5972 "call site attribute");
5973 break;
5974 }
5975 case Intrinsic::call_preallocated_teardown: {
5976 auto *Token = dyn_cast<CallBase>(Call.getArgOperand(0));
5977 Check(Token &&
5978 Token->getIntrinsicID() == Intrinsic::call_preallocated_setup,
5979 "llvm.call.preallocated.teardown token argument must be a "
5980 "llvm.call.preallocated.setup");
5981 break;
5982 }
5983 case Intrinsic::gcroot:
5984 case Intrinsic::gcwrite:
5985 case Intrinsic::gcread:
5986 if (ID == Intrinsic::gcroot) {
5987 AllocaInst *AI =
5989 Check(AI, "llvm.gcroot parameter #1 must be an alloca.", Call);
5991 "llvm.gcroot parameter #2 must be a constant.", Call);
5992 if (!AI->getAllocatedType()->isPointerTy()) {
5994 "llvm.gcroot parameter #1 must either be a pointer alloca, "
5995 "or argument #2 must be a non-null constant.",
5996 Call);
5997 }
5998 }
5999
6000 Check(Call.getParent()->getParent()->hasGC(),
6001 "Enclosing function does not use GC.", Call);
6002 break;
6003 case Intrinsic::init_trampoline:
6005 "llvm.init_trampoline parameter #2 must resolve to a function.",
6006 Call);
6007 break;
6008 case Intrinsic::prefetch:
6009 Check(cast<ConstantInt>(Call.getArgOperand(1))->getZExtValue() < 2,
6010 "rw argument to llvm.prefetch must be 0-1", Call);
6011 Check(cast<ConstantInt>(Call.getArgOperand(2))->getZExtValue() < 4,
6012 "locality argument to llvm.prefetch must be 0-3", Call);
6013 Check(cast<ConstantInt>(Call.getArgOperand(3))->getZExtValue() < 2,
6014 "cache type argument to llvm.prefetch must be 0-1", Call);
6015 break;
6016 case Intrinsic::reloc_none: {
6018 cast<MetadataAsValue>(Call.getArgOperand(0))->getMetadata()),
6019 "llvm.reloc.none argument must be a metadata string", &Call);
6020 break;
6021 }
6022 case Intrinsic::stackprotector:
6024 "llvm.stackprotector parameter #2 must resolve to an alloca.", Call);
6025 break;
6026 case Intrinsic::localescape: {
6027 BasicBlock *BB = Call.getParent();
6028 Check(BB->isEntryBlock(), "llvm.localescape used outside of entry block",
6029 Call);
6030 Check(!SawFrameEscape, "multiple calls to llvm.localescape in one function",
6031 Call);
6032 for (Value *Arg : Call.args()) {
6033 if (isa<ConstantPointerNull>(Arg))
6034 continue; // Null values are allowed as placeholders.
6035 auto *AI = dyn_cast<AllocaInst>(Arg->stripPointerCasts());
6036 Check(AI && AI->isStaticAlloca(),
6037 "llvm.localescape only accepts static allocas", Call);
6038 }
6039 FrameEscapeInfo[BB->getParent()].first = Call.arg_size();
6040 SawFrameEscape = true;
6041 break;
6042 }
6043 case Intrinsic::localrecover: {
6045 Function *Fn = dyn_cast<Function>(FnArg);
6046 Check(Fn && !Fn->isDeclaration(),
6047 "llvm.localrecover first "
6048 "argument must be function defined in this module",
6049 Call);
6050 auto *IdxArg = cast<ConstantInt>(Call.getArgOperand(2));
6051 auto &Entry = FrameEscapeInfo[Fn];
6052 Entry.second = unsigned(
6053 std::max(uint64_t(Entry.second), IdxArg->getLimitedValue(~0U) + 1));
6054 break;
6055 }
6056
6057 case Intrinsic::experimental_gc_statepoint:
6058 if (auto *CI = dyn_cast<CallInst>(&Call))
6059 Check(!CI->isInlineAsm(),
6060 "gc.statepoint support for inline assembly unimplemented", CI);
6061 Check(Call.getParent()->getParent()->hasGC(),
6062 "Enclosing function does not use GC.", Call);
6063
6064 verifyStatepoint(Call);
6065 break;
6066 case Intrinsic::experimental_gc_result: {
6067 Check(Call.getParent()->getParent()->hasGC(),
6068 "Enclosing function does not use GC.", Call);
6069
6070 auto *Statepoint = Call.getArgOperand(0);
6071 if (isa<UndefValue>(Statepoint))
6072 break;
6073
6074 // Are we tied to a statepoint properly?
6075 const auto *StatepointCall = dyn_cast<CallBase>(Statepoint);
6076 Check(StatepointCall && StatepointCall->getIntrinsicID() ==
6077 Intrinsic::experimental_gc_statepoint,
6078 "gc.result operand #1 must be from a statepoint", Call,
6079 Call.getArgOperand(0));
6080
6081 // Check that result type matches wrapped callee.
6082 auto *TargetFuncType =
6083 cast<FunctionType>(StatepointCall->getParamElementType(2));
6084 Check(Call.getType() == TargetFuncType->getReturnType(),
6085 "gc.result result type does not match wrapped callee", Call);
6086 break;
6087 }
6088 case Intrinsic::experimental_gc_relocate: {
6089 Check(Call.arg_size() == 3, "wrong number of arguments", Call);
6090
6092 "gc.relocate must return a pointer or a vector of pointers", Call);
6093
6094 // Check that this relocate is correctly tied to the statepoint
6095
6096 // This is case for relocate on the unwinding path of an invoke statepoint
6097 if (LandingPadInst *LandingPad =
6099
6100 const BasicBlock *InvokeBB =
6101 LandingPad->getParent()->getUniquePredecessor();
6102
6103 // Landingpad relocates should have only one predecessor with invoke
6104 // statepoint terminator
6105 Check(InvokeBB, "safepoints should have unique landingpads",
6106 LandingPad->getParent());
6107 Check(InvokeBB->getTerminator(), "safepoint block should be well formed",
6108 InvokeBB);
6110 "gc relocate should be linked to a statepoint", InvokeBB);
6111 } else {
6112 // In all other cases relocate should be tied to the statepoint directly.
6113 // This covers relocates on a normal return path of invoke statepoint and
6114 // relocates of a call statepoint.
6115 auto *Token = Call.getArgOperand(0);
6117 "gc relocate is incorrectly tied to the statepoint", Call, Token);
6118 }
6119
6120 // Verify rest of the relocate arguments.
6121 const Value &StatepointCall = *cast<GCRelocateInst>(Call).getStatepoint();
6122
6123 // Both the base and derived must be piped through the safepoint.
6126 "gc.relocate operand #2 must be integer offset", Call);
6127
6128 Value *Derived = Call.getArgOperand(2);
6129 Check(isa<ConstantInt>(Derived),
6130 "gc.relocate operand #3 must be integer offset", Call);
6131
6132 const uint64_t BaseIndex = cast<ConstantInt>(Base)->getZExtValue();
6133 const uint64_t DerivedIndex = cast<ConstantInt>(Derived)->getZExtValue();
6134
6135 // Check the bounds
6136 if (isa<UndefValue>(StatepointCall))
6137 break;
6138 if (auto Opt = cast<GCStatepointInst>(StatepointCall)
6139 .getOperandBundle(LLVMContext::OB_gc_live)) {
6140 Check(BaseIndex < Opt->Inputs.size(),
6141 "gc.relocate: statepoint base index out of bounds", Call);
6142 Check(DerivedIndex < Opt->Inputs.size(),
6143 "gc.relocate: statepoint derived index out of bounds", Call);
6144 }
6145
6146 // Relocated value must be either a pointer type or vector-of-pointer type,
6147 // but gc_relocate does not need to return the same pointer type as the
6148 // relocated pointer. It can be casted to the correct type later if it's
6149 // desired. However, they must have the same address space and 'vectorness'
6150 GCRelocateInst &Relocate = cast<GCRelocateInst>(Call);
6151 auto *ResultType = Call.getType();
6152 auto *DerivedType = Relocate.getDerivedPtr()->getType();
6153 auto *BaseType = Relocate.getBasePtr()->getType();
6154
6155 Check(BaseType->isPtrOrPtrVectorTy(),
6156 "gc.relocate: relocated value must be a pointer", Call);
6157 Check(DerivedType->isPtrOrPtrVectorTy(),
6158 "gc.relocate: relocated value must be a pointer", Call);
6159
6160 Check(ResultType->isVectorTy() == DerivedType->isVectorTy(),
6161 "gc.relocate: vector relocates to vector and pointer to pointer",
6162 Call);
6163 Check(
6164 ResultType->getPointerAddressSpace() ==
6165 DerivedType->getPointerAddressSpace(),
6166 "gc.relocate: relocating a pointer shouldn't change its address space",
6167 Call);
6168
6169 auto GC = llvm::getGCStrategy(Relocate.getFunction()->getGC());
6170 Check(GC, "gc.relocate: calling function must have GCStrategy",
6171 Call.getFunction());
6172 if (GC) {
6173 auto isGCPtr = [&GC](Type *PTy) {
6174 return GC->isGCManagedPointer(PTy->getScalarType()).value_or(true);
6175 };
6176 Check(isGCPtr(ResultType), "gc.relocate: must return gc pointer", Call);
6177 Check(isGCPtr(BaseType),
6178 "gc.relocate: relocated value must be a gc pointer", Call);
6179 Check(isGCPtr(DerivedType),
6180 "gc.relocate: relocated value must be a gc pointer", Call);
6181 }
6182 break;
6183 }
6184 case Intrinsic::experimental_patchpoint: {
6185 if (Call.getCallingConv() == CallingConv::AnyReg) {
6187 "patchpoint: invalid return type used with anyregcc", Call);
6188 }
6189 break;
6190 }
6191 case Intrinsic::eh_exceptioncode:
6192 case Intrinsic::eh_exceptionpointer: {
6194 "eh.exceptionpointer argument must be a catchpad", Call);
6195 break;
6196 }
6197 case Intrinsic::get_active_lane_mask: {
6199 "get_active_lane_mask: must return a "
6200 "vector",
6201 Call);
6202 auto *ElemTy = Call.getType()->getScalarType();
6203 Check(ElemTy->isIntegerTy(1),
6204 "get_active_lane_mask: element type is not "
6205 "i1",
6206 Call);
6207 break;
6208 }
6209 case Intrinsic::experimental_get_vector_length: {
6210 ConstantInt *VF = cast<ConstantInt>(Call.getArgOperand(1));
6211 Check(!VF->isNegative() && !VF->isZero(),
6212 "get_vector_length: VF must be positive", Call);
6213 break;
6214 }
6215 case Intrinsic::masked_load: {
6216 Check(Call.getType()->isVectorTy(), "masked_load: must return a vector",
6217 Call);
6218
6220 Value *PassThru = Call.getArgOperand(2);
6221 Check(Mask->getType()->isVectorTy(), "masked_load: mask must be vector",
6222 Call);
6223 Check(PassThru->getType() == Call.getType(),
6224 "masked_load: pass through and return type must match", Call);
6225 Check(cast<VectorType>(Mask->getType())->getElementCount() ==
6226 cast<VectorType>(Call.getType())->getElementCount(),
6227 "masked_load: vector mask must be same length as return", Call);
6228 break;
6229 }
6230 case Intrinsic::masked_store: {
6231 Value *Val = Call.getArgOperand(0);
6233 Check(Mask->getType()->isVectorTy(), "masked_store: mask must be vector",
6234 Call);
6235 Check(cast<VectorType>(Mask->getType())->getElementCount() ==
6236 cast<VectorType>(Val->getType())->getElementCount(),
6237 "masked_store: vector mask must be same length as value", Call);
6238 break;
6239 }
6240
6241 case Intrinsic::experimental_guard: {
6242 Check(isa<CallInst>(Call), "experimental_guard cannot be invoked", Call);
6244 "experimental_guard must have exactly one "
6245 "\"deopt\" operand bundle");
6246 break;
6247 }
6248
6249 case Intrinsic::experimental_deoptimize: {
6250 Check(isa<CallInst>(Call), "experimental_deoptimize cannot be invoked",
6251 Call);
6253 "experimental_deoptimize must have exactly one "
6254 "\"deopt\" operand bundle");
6256 "experimental_deoptimize return type must match caller return type");
6257
6258 if (isa<CallInst>(Call)) {
6260 Check(RI,
6261 "calls to experimental_deoptimize must be followed by a return");
6262
6263 if (!Call.getType()->isVoidTy() && RI)
6264 Check(RI->getReturnValue() == &Call,
6265 "calls to experimental_deoptimize must be followed by a return "
6266 "of the value computed by experimental_deoptimize");
6267 }
6268
6269 break;
6270 }
6271 case Intrinsic::vastart: {
6273 "va_start called in a non-varargs function");
6274 break;
6275 }
6276 case Intrinsic::get_dynamic_area_offset: {
6277 auto *IntTy = dyn_cast<IntegerType>(Call.getType());
6278 Check(IntTy && DL.getPointerSizeInBits(DL.getAllocaAddrSpace()) ==
6279 IntTy->getBitWidth(),
6280 "get_dynamic_area_offset result type must be scalar integer matching "
6281 "alloca address space width",
6282 Call);
6283 break;
6284 }
6285 case Intrinsic::vector_reduce_and:
6286 case Intrinsic::vector_reduce_or:
6287 case Intrinsic::vector_reduce_xor:
6288 case Intrinsic::vector_reduce_add:
6289 case Intrinsic::vector_reduce_mul:
6290 case Intrinsic::vector_reduce_smax:
6291 case Intrinsic::vector_reduce_smin:
6292 case Intrinsic::vector_reduce_umax:
6293 case Intrinsic::vector_reduce_umin: {
6294 Type *ArgTy = Call.getArgOperand(0)->getType();
6295 Check(ArgTy->isIntOrIntVectorTy() && ArgTy->isVectorTy(),
6296 "Intrinsic has incorrect argument type!");
6297 break;
6298 }
6299 case Intrinsic::vector_reduce_fmax:
6300 case Intrinsic::vector_reduce_fmin: {
6301 Type *ArgTy = Call.getArgOperand(0)->getType();
6302 Check(ArgTy->isFPOrFPVectorTy() && ArgTy->isVectorTy(),
6303 "Intrinsic has incorrect argument type!");
6304 break;
6305 }
6306 case Intrinsic::vector_reduce_fadd:
6307 case Intrinsic::vector_reduce_fmul: {
6308 // Unlike the other reductions, the first argument is a start value. The
6309 // second argument is the vector to be reduced.
6310 Type *ArgTy = Call.getArgOperand(1)->getType();
6311 Check(ArgTy->isFPOrFPVectorTy() && ArgTy->isVectorTy(),
6312 "Intrinsic has incorrect argument type!");
6313 break;
6314 }
6315 case Intrinsic::smul_fix:
6316 case Intrinsic::smul_fix_sat:
6317 case Intrinsic::umul_fix:
6318 case Intrinsic::umul_fix_sat:
6319 case Intrinsic::sdiv_fix:
6320 case Intrinsic::sdiv_fix_sat:
6321 case Intrinsic::udiv_fix:
6322 case Intrinsic::udiv_fix_sat: {
6323 Value *Op1 = Call.getArgOperand(0);
6324 Value *Op2 = Call.getArgOperand(1);
6326 "first operand of [us][mul|div]_fix[_sat] must be an int type or "
6327 "vector of ints");
6329 "second operand of [us][mul|div]_fix[_sat] must be an int type or "
6330 "vector of ints");
6331
6332 auto *Op3 = cast<ConstantInt>(Call.getArgOperand(2));
6333 Check(Op3->getType()->isIntegerTy(),
6334 "third operand of [us][mul|div]_fix[_sat] must be an int type");
6335 Check(Op3->getBitWidth() <= 32,
6336 "third operand of [us][mul|div]_fix[_sat] must fit within 32 bits");
6337
6338 if (ID == Intrinsic::smul_fix || ID == Intrinsic::smul_fix_sat ||
6339 ID == Intrinsic::sdiv_fix || ID == Intrinsic::sdiv_fix_sat) {
6340 Check(Op3->getZExtValue() < Op1->getType()->getScalarSizeInBits(),
6341 "the scale of s[mul|div]_fix[_sat] must be less than the width of "
6342 "the operands");
6343 } else {
6344 Check(Op3->getZExtValue() <= Op1->getType()->getScalarSizeInBits(),
6345 "the scale of u[mul|div]_fix[_sat] must be less than or equal "
6346 "to the width of the operands");
6347 }
6348 break;
6349 }
6350 case Intrinsic::lrint:
6351 case Intrinsic::llrint:
6352 case Intrinsic::lround:
6353 case Intrinsic::llround: {
6354 Type *ValTy = Call.getArgOperand(0)->getType();
6355 Type *ResultTy = Call.getType();
6356 auto *VTy = dyn_cast<VectorType>(ValTy);
6357 auto *RTy = dyn_cast<VectorType>(ResultTy);
6358 Check(ValTy->isFPOrFPVectorTy() && ResultTy->isIntOrIntVectorTy(),
6359 ExpectedName + ": argument must be floating-point or vector "
6360 "of floating-points, and result must be integer or "
6361 "vector of integers",
6362 &Call);
6363 Check(ValTy->isVectorTy() == ResultTy->isVectorTy(),
6364 ExpectedName + ": argument and result disagree on vector use", &Call);
6365 if (VTy) {
6366 Check(VTy->getElementCount() == RTy->getElementCount(),
6367 ExpectedName + ": argument must be same length as result", &Call);
6368 }
6369 break;
6370 }
6371 case Intrinsic::bswap: {
6372 Type *Ty = Call.getType();
6373 unsigned Size = Ty->getScalarSizeInBits();
6374 Check(Size % 16 == 0, "bswap must be an even number of bytes", &Call);
6375 break;
6376 }
6377 case Intrinsic::invariant_start: {
6378 ConstantInt *InvariantSize = dyn_cast<ConstantInt>(Call.getArgOperand(0));
6379 Check(InvariantSize &&
6380 (!InvariantSize->isNegative() || InvariantSize->isMinusOne()),
6381 "invariant_start parameter must be -1, 0 or a positive number",
6382 &Call);
6383 break;
6384 }
6385 case Intrinsic::matrix_multiply:
6386 case Intrinsic::matrix_transpose:
6387 case Intrinsic::matrix_column_major_load:
6388 case Intrinsic::matrix_column_major_store: {
6390 ConstantInt *Stride = nullptr;
6391 ConstantInt *NumRows;
6392 ConstantInt *NumColumns;
6393 VectorType *ResultTy;
6394 Type *Op0ElemTy = nullptr;
6395 Type *Op1ElemTy = nullptr;
6396 switch (ID) {
6397 case Intrinsic::matrix_multiply: {
6398 NumRows = cast<ConstantInt>(Call.getArgOperand(2));
6399 ConstantInt *N = cast<ConstantInt>(Call.getArgOperand(3));
6400 NumColumns = cast<ConstantInt>(Call.getArgOperand(4));
6402 ->getNumElements() ==
6403 NumRows->getZExtValue() * N->getZExtValue(),
6404 "First argument of a matrix operation does not match specified "
6405 "shape!");
6407 ->getNumElements() ==
6408 N->getZExtValue() * NumColumns->getZExtValue(),
6409 "Second argument of a matrix operation does not match specified "
6410 "shape!");
6411
6412 ResultTy = cast<VectorType>(Call.getType());
6413 Op0ElemTy =
6414 cast<VectorType>(Call.getArgOperand(0)->getType())->getElementType();
6415 Op1ElemTy =
6416 cast<VectorType>(Call.getArgOperand(1)->getType())->getElementType();
6417 break;
6418 }
6419 case Intrinsic::matrix_transpose:
6420 NumRows = cast<ConstantInt>(Call.getArgOperand(1));
6421 NumColumns = cast<ConstantInt>(Call.getArgOperand(2));
6422 ResultTy = cast<VectorType>(Call.getType());
6423 Op0ElemTy =
6424 cast<VectorType>(Call.getArgOperand(0)->getType())->getElementType();
6425 break;
6426 case Intrinsic::matrix_column_major_load: {
6428 NumRows = cast<ConstantInt>(Call.getArgOperand(3));
6429 NumColumns = cast<ConstantInt>(Call.getArgOperand(4));
6430 ResultTy = cast<VectorType>(Call.getType());
6431 break;
6432 }
6433 case Intrinsic::matrix_column_major_store: {
6435 NumRows = cast<ConstantInt>(Call.getArgOperand(4));
6436 NumColumns = cast<ConstantInt>(Call.getArgOperand(5));
6437 ResultTy = cast<VectorType>(Call.getArgOperand(0)->getType());
6438 Op0ElemTy =
6439 cast<VectorType>(Call.getArgOperand(0)->getType())->getElementType();
6440 break;
6441 }
6442 default:
6443 llvm_unreachable("unexpected intrinsic");
6444 }
6445
6446 Check(ResultTy->getElementType()->isIntegerTy() ||
6447 ResultTy->getElementType()->isFloatingPointTy(),
6448 "Result type must be an integer or floating-point type!", IF);
6449
6450 if (Op0ElemTy)
6451 Check(ResultTy->getElementType() == Op0ElemTy,
6452 "Vector element type mismatch of the result and first operand "
6453 "vector!",
6454 IF);
6455
6456 if (Op1ElemTy)
6457 Check(ResultTy->getElementType() == Op1ElemTy,
6458 "Vector element type mismatch of the result and second operand "
6459 "vector!",
6460 IF);
6461
6463 NumRows->getZExtValue() * NumColumns->getZExtValue(),
6464 "Result of a matrix operation does not fit in the returned vector!");
6465
6466 if (Stride) {
6467 Check(Stride->getBitWidth() <= 64, "Stride bitwidth cannot exceed 64!",
6468 IF);
6469 Check(Stride->getZExtValue() >= NumRows->getZExtValue(),
6470 "Stride must be greater or equal than the number of rows!", IF);
6471 }
6472
6473 break;
6474 }
6475 case Intrinsic::vector_splice: {
6477 int64_t Idx = cast<ConstantInt>(Call.getArgOperand(2))->getSExtValue();
6478 int64_t KnownMinNumElements = VecTy->getElementCount().getKnownMinValue();
6479 if (Call.getParent() && Call.getParent()->getParent()) {
6480 AttributeList Attrs = Call.getParent()->getParent()->getAttributes();
6481 if (Attrs.hasFnAttr(Attribute::VScaleRange))
6482 KnownMinNumElements *= Attrs.getFnAttrs().getVScaleRangeMin();
6483 }
6484 Check((Idx < 0 && std::abs(Idx) <= KnownMinNumElements) ||
6485 (Idx >= 0 && Idx < KnownMinNumElements),
6486 "The splice index exceeds the range [-VL, VL-1] where VL is the "
6487 "known minimum number of elements in the vector. For scalable "
6488 "vectors the minimum number of elements is determined from "
6489 "vscale_range.",
6490 &Call);
6491 break;
6492 }
6493 case Intrinsic::stepvector: {
6495 Check(VecTy && VecTy->getScalarType()->isIntegerTy() &&
6496 VecTy->getScalarSizeInBits() >= 8,
6497 "stepvector only supported for vectors of integers "
6498 "with a bitwidth of at least 8.",
6499 &Call);
6500 break;
6501 }
6502 case Intrinsic::experimental_vector_match: {
6503 Value *Op1 = Call.getArgOperand(0);
6504 Value *Op2 = Call.getArgOperand(1);
6506
6507 VectorType *Op1Ty = dyn_cast<VectorType>(Op1->getType());
6508 VectorType *Op2Ty = dyn_cast<VectorType>(Op2->getType());
6509 VectorType *MaskTy = dyn_cast<VectorType>(Mask->getType());
6510
6511 Check(Op1Ty && Op2Ty && MaskTy, "Operands must be vectors.", &Call);
6513 "Second operand must be a fixed length vector.", &Call);
6514 Check(Op1Ty->getElementType()->isIntegerTy(),
6515 "First operand must be a vector of integers.", &Call);
6516 Check(Op1Ty->getElementType() == Op2Ty->getElementType(),
6517 "First two operands must have the same element type.", &Call);
6518 Check(Op1Ty->getElementCount() == MaskTy->getElementCount(),
6519 "First operand and mask must have the same number of elements.",
6520 &Call);
6521 Check(MaskTy->getElementType()->isIntegerTy(1),
6522 "Mask must be a vector of i1's.", &Call);
6523 Check(Call.getType() == MaskTy, "Return type must match the mask type.",
6524 &Call);
6525 break;
6526 }
6527 case Intrinsic::vector_insert: {
6528 Value *Vec = Call.getArgOperand(0);
6529 Value *SubVec = Call.getArgOperand(1);
6530 Value *Idx = Call.getArgOperand(2);
6531 unsigned IdxN = cast<ConstantInt>(Idx)->getZExtValue();
6532
6533 VectorType *VecTy = cast<VectorType>(Vec->getType());
6534 VectorType *SubVecTy = cast<VectorType>(SubVec->getType());
6535
6536 ElementCount VecEC = VecTy->getElementCount();
6537 ElementCount SubVecEC = SubVecTy->getElementCount();
6538 Check(VecTy->getElementType() == SubVecTy->getElementType(),
6539 "vector_insert parameters must have the same element "
6540 "type.",
6541 &Call);
6542 Check(IdxN % SubVecEC.getKnownMinValue() == 0,
6543 "vector_insert index must be a constant multiple of "
6544 "the subvector's known minimum vector length.");
6545
6546 // If this insertion is not the 'mixed' case where a fixed vector is
6547 // inserted into a scalable vector, ensure that the insertion of the
6548 // subvector does not overrun the parent vector.
6549 if (VecEC.isScalable() == SubVecEC.isScalable()) {
6550 Check(IdxN < VecEC.getKnownMinValue() &&
6551 IdxN + SubVecEC.getKnownMinValue() <= VecEC.getKnownMinValue(),
6552 "subvector operand of vector_insert would overrun the "
6553 "vector being inserted into.");
6554 }
6555 break;
6556 }
6557 case Intrinsic::vector_extract: {
6558 Value *Vec = Call.getArgOperand(0);
6559 Value *Idx = Call.getArgOperand(1);
6560 unsigned IdxN = cast<ConstantInt>(Idx)->getZExtValue();
6561
6562 VectorType *ResultTy = cast<VectorType>(Call.getType());
6563 VectorType *VecTy = cast<VectorType>(Vec->getType());
6564
6565 ElementCount VecEC = VecTy->getElementCount();
6566 ElementCount ResultEC = ResultTy->getElementCount();
6567
6568 Check(ResultTy->getElementType() == VecTy->getElementType(),
6569 "vector_extract result must have the same element "
6570 "type as the input vector.",
6571 &Call);
6572 Check(IdxN % ResultEC.getKnownMinValue() == 0,
6573 "vector_extract index must be a constant multiple of "
6574 "the result type's known minimum vector length.");
6575
6576 // If this extraction is not the 'mixed' case where a fixed vector is
6577 // extracted from a scalable vector, ensure that the extraction does not
6578 // overrun the parent vector.
6579 if (VecEC.isScalable() == ResultEC.isScalable()) {
6580 Check(IdxN < VecEC.getKnownMinValue() &&
6581 IdxN + ResultEC.getKnownMinValue() <= VecEC.getKnownMinValue(),
6582 "vector_extract would overrun.");
6583 }
6584 break;
6585 }
6586 case Intrinsic::vector_partial_reduce_fadd:
6587 case Intrinsic::vector_partial_reduce_add: {
6590
6591 unsigned VecWidth = VecTy->getElementCount().getKnownMinValue();
6592 unsigned AccWidth = AccTy->getElementCount().getKnownMinValue();
6593
6594 Check((VecWidth % AccWidth) == 0,
6595 "Invalid vector widths for partial "
6596 "reduction. The width of the input vector "
6597 "must be a positive integer multiple of "
6598 "the width of the accumulator vector.");
6599 break;
6600 }
6601 case Intrinsic::experimental_noalias_scope_decl: {
6602 NoAliasScopeDecls.push_back(cast<IntrinsicInst>(&Call));
6603 break;
6604 }
6605 case Intrinsic::preserve_array_access_index:
6606 case Intrinsic::preserve_struct_access_index:
6607 case Intrinsic::aarch64_ldaxr:
6608 case Intrinsic::aarch64_ldxr:
6609 case Intrinsic::arm_ldaex:
6610 case Intrinsic::arm_ldrex: {
6611 Type *ElemTy = Call.getParamElementType(0);
6612 Check(ElemTy, "Intrinsic requires elementtype attribute on first argument.",
6613 &Call);
6614 break;
6615 }
6616 case Intrinsic::aarch64_stlxr:
6617 case Intrinsic::aarch64_stxr:
6618 case Intrinsic::arm_stlex:
6619 case Intrinsic::arm_strex: {
6620 Type *ElemTy = Call.getAttributes().getParamElementType(1);
6621 Check(ElemTy,
6622 "Intrinsic requires elementtype attribute on second argument.",
6623 &Call);
6624 break;
6625 }
6626 case Intrinsic::aarch64_prefetch: {
6627 Check(cast<ConstantInt>(Call.getArgOperand(1))->getZExtValue() < 2,
6628 "write argument to llvm.aarch64.prefetch must be 0 or 1", Call);
6629 Check(cast<ConstantInt>(Call.getArgOperand(2))->getZExtValue() < 4,
6630 "target argument to llvm.aarch64.prefetch must be 0-3", Call);
6631 Check(cast<ConstantInt>(Call.getArgOperand(3))->getZExtValue() < 2,
6632 "stream argument to llvm.aarch64.prefetch must be 0 or 1", Call);
6633 Check(cast<ConstantInt>(Call.getArgOperand(4))->getZExtValue() < 2,
6634 "isdata argument to llvm.aarch64.prefetch must be 0 or 1", Call);
6635 break;
6636 }
6637 case Intrinsic::callbr_landingpad: {
6638 const auto *CBR = dyn_cast<CallBrInst>(Call.getOperand(0));
6639 Check(CBR, "intrinstic requires callbr operand", &Call);
6640 if (!CBR)
6641 break;
6642
6643 const BasicBlock *LandingPadBB = Call.getParent();
6644 const BasicBlock *PredBB = LandingPadBB->getUniquePredecessor();
6645 if (!PredBB) {
6646 CheckFailed("Intrinsic in block must have 1 unique predecessor", &Call);
6647 break;
6648 }
6649 if (!isa<CallBrInst>(PredBB->getTerminator())) {
6650 CheckFailed("Intrinsic must have corresponding callbr in predecessor",
6651 &Call);
6652 break;
6653 }
6654 Check(llvm::is_contained(CBR->getIndirectDests(), LandingPadBB),
6655 "Intrinsic's corresponding callbr must have intrinsic's parent basic "
6656 "block in indirect destination list",
6657 &Call);
6658 const Instruction &First = *LandingPadBB->begin();
6659 Check(&First == &Call, "No other instructions may proceed intrinsic",
6660 &Call);
6661 break;
6662 }
6663 case Intrinsic::amdgcn_cs_chain: {
6664 auto CallerCC = Call.getCaller()->getCallingConv();
6665 switch (CallerCC) {
6666 case CallingConv::AMDGPU_CS:
6667 case CallingConv::AMDGPU_CS_Chain:
6668 case CallingConv::AMDGPU_CS_ChainPreserve:
6669 break;
6670 default:
6671 CheckFailed("Intrinsic can only be used from functions with the "
6672 "amdgpu_cs, amdgpu_cs_chain or amdgpu_cs_chain_preserve "
6673 "calling conventions",
6674 &Call);
6675 break;
6676 }
6677
6678 Check(Call.paramHasAttr(2, Attribute::InReg),
6679 "SGPR arguments must have the `inreg` attribute", &Call);
6680 Check(!Call.paramHasAttr(3, Attribute::InReg),
6681 "VGPR arguments must not have the `inreg` attribute", &Call);
6682
6683 auto *Next = Call.getNextNode();
6684 bool IsAMDUnreachable = Next && isa<IntrinsicInst>(Next) &&
6685 cast<IntrinsicInst>(Next)->getIntrinsicID() ==
6686 Intrinsic::amdgcn_unreachable;
6687 Check(Next && (isa<UnreachableInst>(Next) || IsAMDUnreachable),
6688 "llvm.amdgcn.cs.chain must be followed by unreachable", &Call);
6689 break;
6690 }
6691 case Intrinsic::amdgcn_init_exec_from_input: {
6692 const Argument *Arg = dyn_cast<Argument>(Call.getOperand(0));
6693 Check(Arg && Arg->hasInRegAttr(),
6694 "only inreg arguments to the parent function are valid as inputs to "
6695 "this intrinsic",
6696 &Call);
6697 break;
6698 }
6699 case Intrinsic::amdgcn_set_inactive_chain_arg: {
6700 auto CallerCC = Call.getCaller()->getCallingConv();
6701 switch (CallerCC) {
6702 case CallingConv::AMDGPU_CS_Chain:
6703 case CallingConv::AMDGPU_CS_ChainPreserve:
6704 break;
6705 default:
6706 CheckFailed("Intrinsic can only be used from functions with the "
6707 "amdgpu_cs_chain or amdgpu_cs_chain_preserve "
6708 "calling conventions",
6709 &Call);
6710 break;
6711 }
6712
6713 unsigned InactiveIdx = 1;
6714 Check(!Call.paramHasAttr(InactiveIdx, Attribute::InReg),
6715 "Value for inactive lanes must not have the `inreg` attribute",
6716 &Call);
6717 Check(isa<Argument>(Call.getArgOperand(InactiveIdx)),
6718 "Value for inactive lanes must be a function argument", &Call);
6719 Check(!cast<Argument>(Call.getArgOperand(InactiveIdx))->hasInRegAttr(),
6720 "Value for inactive lanes must be a VGPR function argument", &Call);
6721 break;
6722 }
6723 case Intrinsic::amdgcn_call_whole_wave: {
6725 Check(F, "Indirect whole wave calls are not allowed", &Call);
6726
6727 CallingConv::ID CC = F->getCallingConv();
6728 Check(CC == CallingConv::AMDGPU_Gfx_WholeWave,
6729 "Callee must have the amdgpu_gfx_whole_wave calling convention",
6730 &Call);
6731
6732 Check(!F->isVarArg(), "Variadic whole wave calls are not allowed", &Call);
6733
6734 Check(Call.arg_size() == F->arg_size(),
6735 "Call argument count must match callee argument count", &Call);
6736
6737 // The first argument of the call is the callee, and the first argument of
6738 // the callee is the active mask. The rest of the arguments must match.
6739 Check(F->arg_begin()->getType()->isIntegerTy(1),
6740 "Callee must have i1 as its first argument", &Call);
6741 for (auto [CallArg, FuncArg] :
6742 drop_begin(zip_equal(Call.args(), F->args()))) {
6743 Check(CallArg->getType() == FuncArg.getType(),
6744 "Argument types must match", &Call);
6745
6746 // Check that inreg attributes match between call site and function
6747 Check(Call.paramHasAttr(FuncArg.getArgNo(), Attribute::InReg) ==
6748 FuncArg.hasInRegAttr(),
6749 "Argument inreg attributes must match", &Call);
6750 }
6751 break;
6752 }
6753 case Intrinsic::amdgcn_s_prefetch_data: {
6754 Check(
6757 "llvm.amdgcn.s.prefetch.data only supports global or constant memory");
6758 break;
6759 }
6760 case Intrinsic::amdgcn_mfma_scale_f32_16x16x128_f8f6f4:
6761 case Intrinsic::amdgcn_mfma_scale_f32_32x32x64_f8f6f4: {
6762 Value *Src0 = Call.getArgOperand(0);
6763 Value *Src1 = Call.getArgOperand(1);
6764
6765 uint64_t CBSZ = cast<ConstantInt>(Call.getArgOperand(3))->getZExtValue();
6766 uint64_t BLGP = cast<ConstantInt>(Call.getArgOperand(4))->getZExtValue();
6767 Check(CBSZ <= 4, "invalid value for cbsz format", Call,
6768 Call.getArgOperand(3));
6769 Check(BLGP <= 4, "invalid value for blgp format", Call,
6770 Call.getArgOperand(4));
6771
6772 // AMDGPU::MFMAScaleFormats values
6773 auto getFormatNumRegs = [](unsigned FormatVal) {
6774 switch (FormatVal) {
6775 case 0:
6776 case 1:
6777 return 8u;
6778 case 2:
6779 case 3:
6780 return 6u;
6781 case 4:
6782 return 4u;
6783 default:
6784 llvm_unreachable("invalid format value");
6785 }
6786 };
6787
6788 auto isValidSrcASrcBVector = [](FixedVectorType *Ty) {
6789 if (!Ty || !Ty->getElementType()->isIntegerTy(32))
6790 return false;
6791 unsigned NumElts = Ty->getNumElements();
6792 return NumElts == 4 || NumElts == 6 || NumElts == 8;
6793 };
6794
6795 auto *Src0Ty = dyn_cast<FixedVectorType>(Src0->getType());
6796 auto *Src1Ty = dyn_cast<FixedVectorType>(Src1->getType());
6797 Check(isValidSrcASrcBVector(Src0Ty),
6798 "operand 0 must be 4, 6 or 8 element i32 vector", &Call, Src0);
6799 Check(isValidSrcASrcBVector(Src1Ty),
6800 "operand 1 must be 4, 6 or 8 element i32 vector", &Call, Src1);
6801
6802 // Permit excess registers for the format.
6803 Check(Src0Ty->getNumElements() >= getFormatNumRegs(CBSZ),
6804 "invalid vector type for format", &Call, Src0, Call.getArgOperand(3));
6805 Check(Src1Ty->getNumElements() >= getFormatNumRegs(BLGP),
6806 "invalid vector type for format", &Call, Src1, Call.getArgOperand(5));
6807 break;
6808 }
6809 case Intrinsic::amdgcn_wmma_f32_16x16x128_f8f6f4:
6810 case Intrinsic::amdgcn_wmma_scale_f32_16x16x128_f8f6f4:
6811 case Intrinsic::amdgcn_wmma_scale16_f32_16x16x128_f8f6f4: {
6812 Value *Src0 = Call.getArgOperand(1);
6813 Value *Src1 = Call.getArgOperand(3);
6814
6815 unsigned FmtA = cast<ConstantInt>(Call.getArgOperand(0))->getZExtValue();
6816 unsigned FmtB = cast<ConstantInt>(Call.getArgOperand(2))->getZExtValue();
6817 Check(FmtA <= 4, "invalid value for matrix format", Call,
6818 Call.getArgOperand(0));
6819 Check(FmtB <= 4, "invalid value for matrix format", Call,
6820 Call.getArgOperand(2));
6821
6822 // AMDGPU::MatrixFMT values
6823 auto getFormatNumRegs = [](unsigned FormatVal) {
6824 switch (FormatVal) {
6825 case 0:
6826 case 1:
6827 return 16u;
6828 case 2:
6829 case 3:
6830 return 12u;
6831 case 4:
6832 return 8u;
6833 default:
6834 llvm_unreachable("invalid format value");
6835 }
6836 };
6837
6838 auto isValidSrcASrcBVector = [](FixedVectorType *Ty) {
6839 if (!Ty || !Ty->getElementType()->isIntegerTy(32))
6840 return false;
6841 unsigned NumElts = Ty->getNumElements();
6842 return NumElts == 16 || NumElts == 12 || NumElts == 8;
6843 };
6844
6845 auto *Src0Ty = dyn_cast<FixedVectorType>(Src0->getType());
6846 auto *Src1Ty = dyn_cast<FixedVectorType>(Src1->getType());
6847 Check(isValidSrcASrcBVector(Src0Ty),
6848 "operand 1 must be 8, 12 or 16 element i32 vector", &Call, Src0);
6849 Check(isValidSrcASrcBVector(Src1Ty),
6850 "operand 3 must be 8, 12 or 16 element i32 vector", &Call, Src1);
6851
6852 // Permit excess registers for the format.
6853 Check(Src0Ty->getNumElements() >= getFormatNumRegs(FmtA),
6854 "invalid vector type for format", &Call, Src0, Call.getArgOperand(0));
6855 Check(Src1Ty->getNumElements() >= getFormatNumRegs(FmtB),
6856 "invalid vector type for format", &Call, Src1, Call.getArgOperand(2));
6857 break;
6858 }
6859 case Intrinsic::amdgcn_cooperative_atomic_load_32x4B:
6860 case Intrinsic::amdgcn_cooperative_atomic_load_16x8B:
6861 case Intrinsic::amdgcn_cooperative_atomic_load_8x16B:
6862 case Intrinsic::amdgcn_cooperative_atomic_store_32x4B:
6863 case Intrinsic::amdgcn_cooperative_atomic_store_16x8B:
6864 case Intrinsic::amdgcn_cooperative_atomic_store_8x16B: {
6865 // Check we only use this intrinsic on the FLAT or GLOBAL address spaces.
6866 Value *PtrArg = Call.getArgOperand(0);
6867 const unsigned AS = PtrArg->getType()->getPointerAddressSpace();
6869 "cooperative atomic intrinsics require a generic or global pointer",
6870 &Call, PtrArg);
6871
6872 // Last argument must be a MD string
6874 MDNode *MD = cast<MDNode>(Op->getMetadata());
6875 Check((MD->getNumOperands() == 1) && isa<MDString>(MD->getOperand(0)),
6876 "cooperative atomic intrinsics require that the last argument is a "
6877 "metadata string",
6878 &Call, Op);
6879 break;
6880 }
6881 case Intrinsic::nvvm_setmaxnreg_inc_sync_aligned_u32:
6882 case Intrinsic::nvvm_setmaxnreg_dec_sync_aligned_u32: {
6883 Value *V = Call.getArgOperand(0);
6884 unsigned RegCount = cast<ConstantInt>(V)->getZExtValue();
6885 Check(RegCount % 8 == 0,
6886 "reg_count argument to nvvm.setmaxnreg must be in multiples of 8");
6887 break;
6888 }
6889 case Intrinsic::experimental_convergence_entry:
6890 case Intrinsic::experimental_convergence_anchor:
6891 break;
6892 case Intrinsic::experimental_convergence_loop:
6893 break;
6894 case Intrinsic::ptrmask: {
6895 Type *Ty0 = Call.getArgOperand(0)->getType();
6896 Type *Ty1 = Call.getArgOperand(1)->getType();
6898 "llvm.ptrmask intrinsic first argument must be pointer or vector "
6899 "of pointers",
6900 &Call);
6901 Check(
6902 Ty0->isVectorTy() == Ty1->isVectorTy(),
6903 "llvm.ptrmask intrinsic arguments must be both scalars or both vectors",
6904 &Call);
6905 if (Ty0->isVectorTy())
6906 Check(cast<VectorType>(Ty0)->getElementCount() ==
6907 cast<VectorType>(Ty1)->getElementCount(),
6908 "llvm.ptrmask intrinsic arguments must have the same number of "
6909 "elements",
6910 &Call);
6911 Check(DL.getIndexTypeSizeInBits(Ty0) == Ty1->getScalarSizeInBits(),
6912 "llvm.ptrmask intrinsic second argument bitwidth must match "
6913 "pointer index type size of first argument",
6914 &Call);
6915 break;
6916 }
6917 case Intrinsic::thread_pointer: {
6919 DL.getDefaultGlobalsAddressSpace(),
6920 "llvm.thread.pointer intrinsic return type must be for the globals "
6921 "address space",
6922 &Call);
6923 break;
6924 }
6925 case Intrinsic::threadlocal_address: {
6926 const Value &Arg0 = *Call.getArgOperand(0);
6927 Check(isa<GlobalValue>(Arg0),
6928 "llvm.threadlocal.address first argument must be a GlobalValue");
6929 Check(cast<GlobalValue>(Arg0).isThreadLocal(),
6930 "llvm.threadlocal.address operand isThreadLocal() must be true");
6931 break;
6932 }
6933 case Intrinsic::lifetime_start:
6934 case Intrinsic::lifetime_end: {
6937 "llvm.lifetime.start/end can only be used on alloca or poison",
6938 &Call);
6939 break;
6940 }
6941 };
6942
6943 // Verify that there aren't any unmediated control transfers between funclets.
6945 Function *F = Call.getParent()->getParent();
6946 if (F->hasPersonalityFn() &&
6947 isScopedEHPersonality(classifyEHPersonality(F->getPersonalityFn()))) {
6948 // Run EH funclet coloring on-demand and cache results for other intrinsic
6949 // calls in this function
6950 if (BlockEHFuncletColors.empty())
6951 BlockEHFuncletColors = colorEHFunclets(*F);
6952
6953 // Check for catch-/cleanup-pad in first funclet block
6954 bool InEHFunclet = false;
6955 BasicBlock *CallBB = Call.getParent();
6956 const ColorVector &CV = BlockEHFuncletColors.find(CallBB)->second;
6957 assert(CV.size() > 0 && "Uncolored block");
6958 for (BasicBlock *ColorFirstBB : CV)
6959 if (auto It = ColorFirstBB->getFirstNonPHIIt();
6960 It != ColorFirstBB->end())
6962 InEHFunclet = true;
6963
6964 // Check for funclet operand bundle
6965 bool HasToken = false;
6966 for (unsigned I = 0, E = Call.getNumOperandBundles(); I != E; ++I)
6968 HasToken = true;
6969
6970 // This would cause silent code truncation in WinEHPrepare
6971 if (InEHFunclet)
6972 Check(HasToken, "Missing funclet token on intrinsic call", &Call);
6973 }
6974 }
6975}
6976
6977/// Carefully grab the subprogram from a local scope.
6978///
6979/// This carefully grabs the subprogram from a local scope, avoiding the
6980/// built-in assertions that would typically fire.
6982 if (!LocalScope)
6983 return nullptr;
6984
6985 if (auto *SP = dyn_cast<DISubprogram>(LocalScope))
6986 return SP;
6987
6988 if (auto *LB = dyn_cast<DILexicalBlockBase>(LocalScope))
6989 return getSubprogram(LB->getRawScope());
6990
6991 // Just return null; broken scope chains are checked elsewhere.
6992 assert(!isa<DILocalScope>(LocalScope) && "Unknown type of local scope");
6993 return nullptr;
6994}
6995
6996void Verifier::visit(DbgLabelRecord &DLR) {
6998 "invalid #dbg_label intrinsic variable", &DLR, DLR.getRawLabel());
6999
7000 // Ignore broken !dbg attachments; they're checked elsewhere.
7001 if (MDNode *N = DLR.getDebugLoc().getAsMDNode())
7002 if (!isa<DILocation>(N))
7003 return;
7004
7005 BasicBlock *BB = DLR.getParent();
7006 Function *F = BB ? BB->getParent() : nullptr;
7007
7008 // The scopes for variables and !dbg attachments must agree.
7009 DILabel *Label = DLR.getLabel();
7010 DILocation *Loc = DLR.getDebugLoc();
7011 CheckDI(Loc, "#dbg_label record requires a !dbg attachment", &DLR, BB, F);
7012
7013 DISubprogram *LabelSP = getSubprogram(Label->getRawScope());
7014 DISubprogram *LocSP = getSubprogram(Loc->getRawScope());
7015 if (!LabelSP || !LocSP)
7016 return;
7017
7018 CheckDI(LabelSP == LocSP,
7019 "mismatched subprogram between #dbg_label label and !dbg attachment",
7020 &DLR, BB, F, Label, Label->getScope()->getSubprogram(), Loc,
7021 Loc->getScope()->getSubprogram());
7022}
7023
7024void Verifier::visit(DbgVariableRecord &DVR) {
7025 BasicBlock *BB = DVR.getParent();
7026 Function *F = BB->getParent();
7027
7028 CheckDI(DVR.getType() == DbgVariableRecord::LocationType::Value ||
7029 DVR.getType() == DbgVariableRecord::LocationType::Declare ||
7030 DVR.getType() == DbgVariableRecord::LocationType::Assign,
7031 "invalid #dbg record type", &DVR, DVR.getType(), BB, F);
7032
7033 // The location for a DbgVariableRecord must be either a ValueAsMetadata,
7034 // DIArgList, or an empty MDNode (which is a legacy representation for an
7035 // "undef" location).
7036 auto *MD = DVR.getRawLocation();
7037 CheckDI(MD && (isa<ValueAsMetadata>(MD) || isa<DIArgList>(MD) ||
7038 (isa<MDNode>(MD) && !cast<MDNode>(MD)->getNumOperands())),
7039 "invalid #dbg record address/value", &DVR, MD, BB, F);
7040 if (auto *VAM = dyn_cast<ValueAsMetadata>(MD)) {
7041 visitValueAsMetadata(*VAM, F);
7042 if (DVR.isDbgDeclare()) {
7043 // Allow integers here to support inttoptr salvage.
7044 Type *Ty = VAM->getValue()->getType();
7045 CheckDI(Ty->isPointerTy() || Ty->isIntegerTy(),
7046 "location of #dbg_declare must be a pointer or int", &DVR, MD, BB,
7047 F);
7048 }
7049 } else if (auto *AL = dyn_cast<DIArgList>(MD)) {
7050 visitDIArgList(*AL, F);
7051 }
7052
7054 "invalid #dbg record variable", &DVR, DVR.getRawVariable(), BB, F);
7055 visitMDNode(*DVR.getRawVariable(), AreDebugLocsAllowed::No);
7056
7058 "invalid #dbg record expression", &DVR, DVR.getRawExpression(), BB,
7059 F);
7060 visitMDNode(*DVR.getExpression(), AreDebugLocsAllowed::No);
7061
7062 if (DVR.isDbgAssign()) {
7064 "invalid #dbg_assign DIAssignID", &DVR, DVR.getRawAssignID(), BB,
7065 F);
7066 visitMDNode(*cast<DIAssignID>(DVR.getRawAssignID()),
7067 AreDebugLocsAllowed::No);
7068
7069 const auto *RawAddr = DVR.getRawAddress();
7070 // Similarly to the location above, the address for an assign
7071 // DbgVariableRecord must be a ValueAsMetadata or an empty MDNode, which
7072 // represents an undef address.
7073 CheckDI(
7074 isa<ValueAsMetadata>(RawAddr) ||
7075 (isa<MDNode>(RawAddr) && !cast<MDNode>(RawAddr)->getNumOperands()),
7076 "invalid #dbg_assign address", &DVR, DVR.getRawAddress(), BB, F);
7077 if (auto *VAM = dyn_cast<ValueAsMetadata>(RawAddr))
7078 visitValueAsMetadata(*VAM, F);
7079
7081 "invalid #dbg_assign address expression", &DVR,
7082 DVR.getRawAddressExpression(), BB, F);
7083 visitMDNode(*DVR.getAddressExpression(), AreDebugLocsAllowed::No);
7084
7085 // All of the linked instructions should be in the same function as DVR.
7086 for (Instruction *I : at::getAssignmentInsts(&DVR))
7087 CheckDI(DVR.getFunction() == I->getFunction(),
7088 "inst not in same function as #dbg_assign", I, &DVR, BB, F);
7089 }
7090
7091 // This check is redundant with one in visitLocalVariable().
7092 DILocalVariable *Var = DVR.getVariable();
7093 CheckDI(isType(Var->getRawType()), "invalid type ref", Var, Var->getRawType(),
7094 BB, F);
7095
7096 auto *DLNode = DVR.getDebugLoc().getAsMDNode();
7097 CheckDI(isa_and_nonnull<DILocation>(DLNode), "invalid #dbg record DILocation",
7098 &DVR, DLNode, BB, F);
7099 DILocation *Loc = DVR.getDebugLoc();
7100
7101 // The scopes for variables and !dbg attachments must agree.
7102 DISubprogram *VarSP = getSubprogram(Var->getRawScope());
7103 DISubprogram *LocSP = getSubprogram(Loc->getRawScope());
7104 if (!VarSP || !LocSP)
7105 return; // Broken scope chains are checked elsewhere.
7106
7107 CheckDI(VarSP == LocSP,
7108 "mismatched subprogram between #dbg record variable and DILocation",
7109 &DVR, BB, F, Var, Var->getScope()->getSubprogram(), Loc,
7110 Loc->getScope()->getSubprogram(), BB, F);
7111
7112 verifyFnArgs(DVR);
7113}
7114
7115void Verifier::visitVPIntrinsic(VPIntrinsic &VPI) {
7116 if (auto *VPCast = dyn_cast<VPCastIntrinsic>(&VPI)) {
7117 auto *RetTy = cast<VectorType>(VPCast->getType());
7118 auto *ValTy = cast<VectorType>(VPCast->getOperand(0)->getType());
7119 Check(RetTy->getElementCount() == ValTy->getElementCount(),
7120 "VP cast intrinsic first argument and result vector lengths must be "
7121 "equal",
7122 *VPCast);
7123
7124 switch (VPCast->getIntrinsicID()) {
7125 default:
7126 llvm_unreachable("Unknown VP cast intrinsic");
7127 case Intrinsic::vp_trunc:
7128 Check(RetTy->isIntOrIntVectorTy() && ValTy->isIntOrIntVectorTy(),
7129 "llvm.vp.trunc intrinsic first argument and result element type "
7130 "must be integer",
7131 *VPCast);
7132 Check(RetTy->getScalarSizeInBits() < ValTy->getScalarSizeInBits(),
7133 "llvm.vp.trunc intrinsic the bit size of first argument must be "
7134 "larger than the bit size of the return type",
7135 *VPCast);
7136 break;
7137 case Intrinsic::vp_zext:
7138 case Intrinsic::vp_sext:
7139 Check(RetTy->isIntOrIntVectorTy() && ValTy->isIntOrIntVectorTy(),
7140 "llvm.vp.zext or llvm.vp.sext intrinsic first argument and result "
7141 "element type must be integer",
7142 *VPCast);
7143 Check(RetTy->getScalarSizeInBits() > ValTy->getScalarSizeInBits(),
7144 "llvm.vp.zext or llvm.vp.sext intrinsic the bit size of first "
7145 "argument must be smaller than the bit size of the return type",
7146 *VPCast);
7147 break;
7148 case Intrinsic::vp_fptoui:
7149 case Intrinsic::vp_fptosi:
7150 case Intrinsic::vp_lrint:
7151 case Intrinsic::vp_llrint:
7152 Check(
7153 RetTy->isIntOrIntVectorTy() && ValTy->isFPOrFPVectorTy(),
7154 "llvm.vp.fptoui, llvm.vp.fptosi, llvm.vp.lrint or llvm.vp.llrint" "intrinsic first argument element "
7155 "type must be floating-point and result element type must be integer",
7156 *VPCast);
7157 break;
7158 case Intrinsic::vp_uitofp:
7159 case Intrinsic::vp_sitofp:
7160 Check(
7161 RetTy->isFPOrFPVectorTy() && ValTy->isIntOrIntVectorTy(),
7162 "llvm.vp.uitofp or llvm.vp.sitofp intrinsic first argument element "
7163 "type must be integer and result element type must be floating-point",
7164 *VPCast);
7165 break;
7166 case Intrinsic::vp_fptrunc:
7167 Check(RetTy->isFPOrFPVectorTy() && ValTy->isFPOrFPVectorTy(),
7168 "llvm.vp.fptrunc intrinsic first argument and result element type "
7169 "must be floating-point",
7170 *VPCast);
7171 Check(RetTy->getScalarSizeInBits() < ValTy->getScalarSizeInBits(),
7172 "llvm.vp.fptrunc intrinsic the bit size of first argument must be "
7173 "larger than the bit size of the return type",
7174 *VPCast);
7175 break;
7176 case Intrinsic::vp_fpext:
7177 Check(RetTy->isFPOrFPVectorTy() && ValTy->isFPOrFPVectorTy(),
7178 "llvm.vp.fpext intrinsic first argument and result element type "
7179 "must be floating-point",
7180 *VPCast);
7181 Check(RetTy->getScalarSizeInBits() > ValTy->getScalarSizeInBits(),
7182 "llvm.vp.fpext intrinsic the bit size of first argument must be "
7183 "smaller than the bit size of the return type",
7184 *VPCast);
7185 break;
7186 case Intrinsic::vp_ptrtoint:
7187 Check(RetTy->isIntOrIntVectorTy() && ValTy->isPtrOrPtrVectorTy(),
7188 "llvm.vp.ptrtoint intrinsic first argument element type must be "
7189 "pointer and result element type must be integer",
7190 *VPCast);
7191 break;
7192 case Intrinsic::vp_inttoptr:
7193 Check(RetTy->isPtrOrPtrVectorTy() && ValTy->isIntOrIntVectorTy(),
7194 "llvm.vp.inttoptr intrinsic first argument element type must be "
7195 "integer and result element type must be pointer",
7196 *VPCast);
7197 break;
7198 }
7199 }
7200
7201 switch (VPI.getIntrinsicID()) {
7202 case Intrinsic::vp_fcmp: {
7203 auto Pred = cast<VPCmpIntrinsic>(&VPI)->getPredicate();
7205 "invalid predicate for VP FP comparison intrinsic", &VPI);
7206 break;
7207 }
7208 case Intrinsic::vp_icmp: {
7209 auto Pred = cast<VPCmpIntrinsic>(&VPI)->getPredicate();
7211 "invalid predicate for VP integer comparison intrinsic", &VPI);
7212 break;
7213 }
7214 case Intrinsic::vp_is_fpclass: {
7215 auto TestMask = cast<ConstantInt>(VPI.getOperand(1));
7216 Check((TestMask->getZExtValue() & ~static_cast<unsigned>(fcAllFlags)) == 0,
7217 "unsupported bits for llvm.vp.is.fpclass test mask");
7218 break;
7219 }
7220 case Intrinsic::experimental_vp_splice: {
7221 VectorType *VecTy = cast<VectorType>(VPI.getType());
7222 int64_t Idx = cast<ConstantInt>(VPI.getArgOperand(2))->getSExtValue();
7223 int64_t KnownMinNumElements = VecTy->getElementCount().getKnownMinValue();
7224 if (VPI.getParent() && VPI.getParent()->getParent()) {
7225 AttributeList Attrs = VPI.getParent()->getParent()->getAttributes();
7226 if (Attrs.hasFnAttr(Attribute::VScaleRange))
7227 KnownMinNumElements *= Attrs.getFnAttrs().getVScaleRangeMin();
7228 }
7229 Check((Idx < 0 && std::abs(Idx) <= KnownMinNumElements) ||
7230 (Idx >= 0 && Idx < KnownMinNumElements),
7231 "The splice index exceeds the range [-VL, VL-1] where VL is the "
7232 "known minimum number of elements in the vector. For scalable "
7233 "vectors the minimum number of elements is determined from "
7234 "vscale_range.",
7235 &VPI);
7236 break;
7237 }
7238 }
7239}
7240
7241void Verifier::visitConstrainedFPIntrinsic(ConstrainedFPIntrinsic &FPI) {
7242 unsigned NumOperands = FPI.getNonMetadataArgCount();
7243 bool HasRoundingMD =
7245
7246 // Add the expected number of metadata operands.
7247 NumOperands += (1 + HasRoundingMD);
7248
7249 // Compare intrinsics carry an extra predicate metadata operand.
7251 NumOperands += 1;
7252 Check((FPI.arg_size() == NumOperands),
7253 "invalid arguments for constrained FP intrinsic", &FPI);
7254
7255 switch (FPI.getIntrinsicID()) {
7256 case Intrinsic::experimental_constrained_lrint:
7257 case Intrinsic::experimental_constrained_llrint: {
7258 Type *ValTy = FPI.getArgOperand(0)->getType();
7259 Type *ResultTy = FPI.getType();
7260 Check(!ValTy->isVectorTy() && !ResultTy->isVectorTy(),
7261 "Intrinsic does not support vectors", &FPI);
7262 break;
7263 }
7264
7265 case Intrinsic::experimental_constrained_lround:
7266 case Intrinsic::experimental_constrained_llround: {
7267 Type *ValTy = FPI.getArgOperand(0)->getType();
7268 Type *ResultTy = FPI.getType();
7269 Check(!ValTy->isVectorTy() && !ResultTy->isVectorTy(),
7270 "Intrinsic does not support vectors", &FPI);
7271 break;
7272 }
7273
7274 case Intrinsic::experimental_constrained_fcmp:
7275 case Intrinsic::experimental_constrained_fcmps: {
7276 auto Pred = cast<ConstrainedFPCmpIntrinsic>(&FPI)->getPredicate();
7278 "invalid predicate for constrained FP comparison intrinsic", &FPI);
7279 break;
7280 }
7281
7282 case Intrinsic::experimental_constrained_fptosi:
7283 case Intrinsic::experimental_constrained_fptoui: {
7284 Value *Operand = FPI.getArgOperand(0);
7285 ElementCount SrcEC;
7286 Check(Operand->getType()->isFPOrFPVectorTy(),
7287 "Intrinsic first argument must be floating point", &FPI);
7288 if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
7289 SrcEC = cast<VectorType>(OperandT)->getElementCount();
7290 }
7291
7292 Operand = &FPI;
7293 Check(SrcEC.isNonZero() == Operand->getType()->isVectorTy(),
7294 "Intrinsic first argument and result disagree on vector use", &FPI);
7295 Check(Operand->getType()->isIntOrIntVectorTy(),
7296 "Intrinsic result must be an integer", &FPI);
7297 if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
7298 Check(SrcEC == cast<VectorType>(OperandT)->getElementCount(),
7299 "Intrinsic first argument and result vector lengths must be equal",
7300 &FPI);
7301 }
7302 break;
7303 }
7304
7305 case Intrinsic::experimental_constrained_sitofp:
7306 case Intrinsic::experimental_constrained_uitofp: {
7307 Value *Operand = FPI.getArgOperand(0);
7308 ElementCount SrcEC;
7309 Check(Operand->getType()->isIntOrIntVectorTy(),
7310 "Intrinsic first argument must be integer", &FPI);
7311 if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
7312 SrcEC = cast<VectorType>(OperandT)->getElementCount();
7313 }
7314
7315 Operand = &FPI;
7316 Check(SrcEC.isNonZero() == Operand->getType()->isVectorTy(),
7317 "Intrinsic first argument and result disagree on vector use", &FPI);
7318 Check(Operand->getType()->isFPOrFPVectorTy(),
7319 "Intrinsic result must be a floating point", &FPI);
7320 if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
7321 Check(SrcEC == cast<VectorType>(OperandT)->getElementCount(),
7322 "Intrinsic first argument and result vector lengths must be equal",
7323 &FPI);
7324 }
7325 break;
7326 }
7327
7328 case Intrinsic::experimental_constrained_fptrunc:
7329 case Intrinsic::experimental_constrained_fpext: {
7330 Value *Operand = FPI.getArgOperand(0);
7331 Type *OperandTy = Operand->getType();
7332 Value *Result = &FPI;
7333 Type *ResultTy = Result->getType();
7334 Check(OperandTy->isFPOrFPVectorTy(),
7335 "Intrinsic first argument must be FP or FP vector", &FPI);
7336 Check(ResultTy->isFPOrFPVectorTy(),
7337 "Intrinsic result must be FP or FP vector", &FPI);
7338 Check(OperandTy->isVectorTy() == ResultTy->isVectorTy(),
7339 "Intrinsic first argument and result disagree on vector use", &FPI);
7340 if (OperandTy->isVectorTy()) {
7341 Check(cast<VectorType>(OperandTy)->getElementCount() ==
7342 cast<VectorType>(ResultTy)->getElementCount(),
7343 "Intrinsic first argument and result vector lengths must be equal",
7344 &FPI);
7345 }
7346 if (FPI.getIntrinsicID() == Intrinsic::experimental_constrained_fptrunc) {
7347 Check(OperandTy->getScalarSizeInBits() > ResultTy->getScalarSizeInBits(),
7348 "Intrinsic first argument's type must be larger than result type",
7349 &FPI);
7350 } else {
7351 Check(OperandTy->getScalarSizeInBits() < ResultTy->getScalarSizeInBits(),
7352 "Intrinsic first argument's type must be smaller than result type",
7353 &FPI);
7354 }
7355 break;
7356 }
7357
7358 default:
7359 break;
7360 }
7361
7362 // If a non-metadata argument is passed in a metadata slot then the
7363 // error will be caught earlier when the incorrect argument doesn't
7364 // match the specification in the intrinsic call table. Thus, no
7365 // argument type check is needed here.
7366
7367 Check(FPI.getExceptionBehavior().has_value(),
7368 "invalid exception behavior argument", &FPI);
7369 if (HasRoundingMD) {
7370 Check(FPI.getRoundingMode().has_value(), "invalid rounding mode argument",
7371 &FPI);
7372 }
7373}
7374
7375void Verifier::verifyFragmentExpression(const DbgVariableRecord &DVR) {
7376 DILocalVariable *V = dyn_cast_or_null<DILocalVariable>(DVR.getRawVariable());
7377 DIExpression *E = dyn_cast_or_null<DIExpression>(DVR.getRawExpression());
7378
7379 // We don't know whether this intrinsic verified correctly.
7380 if (!V || !E || !E->isValid())
7381 return;
7382
7383 // Nothing to do if this isn't a DW_OP_LLVM_fragment expression.
7384 auto Fragment = E->getFragmentInfo();
7385 if (!Fragment)
7386 return;
7387
7388 // The frontend helps out GDB by emitting the members of local anonymous
7389 // unions as artificial local variables with shared storage. When SROA splits
7390 // the storage for artificial local variables that are smaller than the entire
7391 // union, the overhang piece will be outside of the allotted space for the
7392 // variable and this check fails.
7393 // FIXME: Remove this check as soon as clang stops doing this; it hides bugs.
7394 if (V->isArtificial())
7395 return;
7396
7397 verifyFragmentExpression(*V, *Fragment, &DVR);
7398}
7399
7400template <typename ValueOrMetadata>
7401void Verifier::verifyFragmentExpression(const DIVariable &V,
7403 ValueOrMetadata *Desc) {
7404 // If there's no size, the type is broken, but that should be checked
7405 // elsewhere.
7406 auto VarSize = V.getSizeInBits();
7407 if (!VarSize)
7408 return;
7409
7410 unsigned FragSize = Fragment.SizeInBits;
7411 unsigned FragOffset = Fragment.OffsetInBits;
7412 CheckDI(FragSize + FragOffset <= *VarSize,
7413 "fragment is larger than or outside of variable", Desc, &V);
7414 CheckDI(FragSize != *VarSize, "fragment covers entire variable", Desc, &V);
7415}
7416
7417void Verifier::verifyFnArgs(const DbgVariableRecord &DVR) {
7418 // This function does not take the scope of noninlined function arguments into
7419 // account. Don't run it if current function is nodebug, because it may
7420 // contain inlined debug intrinsics.
7421 if (!HasDebugInfo)
7422 return;
7423
7424 // For performance reasons only check non-inlined ones.
7425 if (DVR.getDebugLoc()->getInlinedAt())
7426 return;
7427
7428 DILocalVariable *Var = DVR.getVariable();
7429 CheckDI(Var, "#dbg record without variable");
7430
7431 unsigned ArgNo = Var->getArg();
7432 if (!ArgNo)
7433 return;
7434
7435 // Verify there are no duplicate function argument debug info entries.
7436 // These will cause hard-to-debug assertions in the DWARF backend.
7437 if (DebugFnArgs.size() < ArgNo)
7438 DebugFnArgs.resize(ArgNo, nullptr);
7439
7440 auto *Prev = DebugFnArgs[ArgNo - 1];
7441 DebugFnArgs[ArgNo - 1] = Var;
7442 CheckDI(!Prev || (Prev == Var), "conflicting debug info for argument", &DVR,
7443 Prev, Var);
7444}
7445
7446void Verifier::verifyNotEntryValue(const DbgVariableRecord &DVR) {
7447 DIExpression *E = dyn_cast_or_null<DIExpression>(DVR.getRawExpression());
7448
7449 // We don't know whether this intrinsic verified correctly.
7450 if (!E || !E->isValid())
7451 return;
7452
7454 Value *VarValue = DVR.getVariableLocationOp(0);
7455 if (isa<UndefValue>(VarValue) || isa<PoisonValue>(VarValue))
7456 return;
7457 // We allow EntryValues for swift async arguments, as they have an
7458 // ABI-guarantee to be turned into a specific register.
7459 if (auto *ArgLoc = dyn_cast_or_null<Argument>(VarValue);
7460 ArgLoc && ArgLoc->hasAttribute(Attribute::SwiftAsync))
7461 return;
7462 }
7463
7464 CheckDI(!E->isEntryValue(),
7465 "Entry values are only allowed in MIR unless they target a "
7466 "swiftasync Argument",
7467 &DVR);
7468}
7469
7470void Verifier::verifyCompileUnits() {
7471 // When more than one Module is imported into the same context, such as during
7472 // an LTO build before linking the modules, ODR type uniquing may cause types
7473 // to point to a different CU. This check does not make sense in this case.
7474 if (M.getContext().isODRUniquingDebugTypes())
7475 return;
7476 auto *CUs = M.getNamedMetadata("llvm.dbg.cu");
7477 SmallPtrSet<const Metadata *, 2> Listed;
7478 if (CUs)
7479 Listed.insert_range(CUs->operands());
7480 for (const auto *CU : CUVisited)
7481 CheckDI(Listed.count(CU), "DICompileUnit not listed in llvm.dbg.cu", CU);
7482 CUVisited.clear();
7483}
7484
7485void Verifier::verifyDeoptimizeCallingConvs() {
7486 if (DeoptimizeDeclarations.empty())
7487 return;
7488
7489 const Function *First = DeoptimizeDeclarations[0];
7490 for (const auto *F : ArrayRef(DeoptimizeDeclarations).slice(1)) {
7491 Check(First->getCallingConv() == F->getCallingConv(),
7492 "All llvm.experimental.deoptimize declarations must have the same "
7493 "calling convention",
7494 First, F);
7495 }
7496}
7497
7498void Verifier::verifyAttachedCallBundle(const CallBase &Call,
7499 const OperandBundleUse &BU) {
7500 FunctionType *FTy = Call.getFunctionType();
7501
7502 Check((FTy->getReturnType()->isPointerTy() ||
7503 (Call.doesNotReturn() && FTy->getReturnType()->isVoidTy())),
7504 "a call with operand bundle \"clang.arc.attachedcall\" must call a "
7505 "function returning a pointer or a non-returning function that has a "
7506 "void return type",
7507 Call);
7508
7509 Check(BU.Inputs.size() == 1 && isa<Function>(BU.Inputs.front()),
7510 "operand bundle \"clang.arc.attachedcall\" requires one function as "
7511 "an argument",
7512 Call);
7513
7514 auto *Fn = cast<Function>(BU.Inputs.front());
7515 Intrinsic::ID IID = Fn->getIntrinsicID();
7516
7517 if (IID) {
7518 Check((IID == Intrinsic::objc_retainAutoreleasedReturnValue ||
7519 IID == Intrinsic::objc_claimAutoreleasedReturnValue ||
7520 IID == Intrinsic::objc_unsafeClaimAutoreleasedReturnValue),
7521 "invalid function argument", Call);
7522 } else {
7523 StringRef FnName = Fn->getName();
7524 Check((FnName == "objc_retainAutoreleasedReturnValue" ||
7525 FnName == "objc_claimAutoreleasedReturnValue" ||
7526 FnName == "objc_unsafeClaimAutoreleasedReturnValue"),
7527 "invalid function argument", Call);
7528 }
7529}
7530
7531void Verifier::verifyNoAliasScopeDecl() {
7532 if (NoAliasScopeDecls.empty())
7533 return;
7534
7535 // only a single scope must be declared at a time.
7536 for (auto *II : NoAliasScopeDecls) {
7537 assert(II->getIntrinsicID() == Intrinsic::experimental_noalias_scope_decl &&
7538 "Not a llvm.experimental.noalias.scope.decl ?");
7539 const auto *ScopeListMV = dyn_cast<MetadataAsValue>(
7541 Check(ScopeListMV != nullptr,
7542 "llvm.experimental.noalias.scope.decl must have a MetadataAsValue "
7543 "argument",
7544 II);
7545
7546 const auto *ScopeListMD = dyn_cast<MDNode>(ScopeListMV->getMetadata());
7547 Check(ScopeListMD != nullptr, "!id.scope.list must point to an MDNode", II);
7548 Check(ScopeListMD->getNumOperands() == 1,
7549 "!id.scope.list must point to a list with a single scope", II);
7550 visitAliasScopeListMetadata(ScopeListMD);
7551 }
7552
7553 // Only check the domination rule when requested. Once all passes have been
7554 // adapted this option can go away.
7556 return;
7557
7558 // Now sort the intrinsics based on the scope MDNode so that declarations of
7559 // the same scopes are next to each other.
7560 auto GetScope = [](IntrinsicInst *II) {
7561 const auto *ScopeListMV = cast<MetadataAsValue>(
7563 return &cast<MDNode>(ScopeListMV->getMetadata())->getOperand(0);
7564 };
7565
7566 // We are sorting on MDNode pointers here. For valid input IR this is ok.
7567 // TODO: Sort on Metadata ID to avoid non-deterministic error messages.
7568 auto Compare = [GetScope](IntrinsicInst *Lhs, IntrinsicInst *Rhs) {
7569 return GetScope(Lhs) < GetScope(Rhs);
7570 };
7571
7572 llvm::sort(NoAliasScopeDecls, Compare);
7573
7574 // Go over the intrinsics and check that for the same scope, they are not
7575 // dominating each other.
7576 auto ItCurrent = NoAliasScopeDecls.begin();
7577 while (ItCurrent != NoAliasScopeDecls.end()) {
7578 auto CurScope = GetScope(*ItCurrent);
7579 auto ItNext = ItCurrent;
7580 do {
7581 ++ItNext;
7582 } while (ItNext != NoAliasScopeDecls.end() &&
7583 GetScope(*ItNext) == CurScope);
7584
7585 // [ItCurrent, ItNext) represents the declarations for the same scope.
7586 // Ensure they are not dominating each other.. but only if it is not too
7587 // expensive.
7588 if (ItNext - ItCurrent < 32)
7589 for (auto *I : llvm::make_range(ItCurrent, ItNext))
7590 for (auto *J : llvm::make_range(ItCurrent, ItNext))
7591 if (I != J)
7592 Check(!DT.dominates(I, J),
7593 "llvm.experimental.noalias.scope.decl dominates another one "
7594 "with the same scope",
7595 I);
7596 ItCurrent = ItNext;
7597 }
7598}
7599
7600//===----------------------------------------------------------------------===//
7601// Implement the public interfaces to this file...
7602//===----------------------------------------------------------------------===//
7603
7605 Function &F = const_cast<Function &>(f);
7606
7607 // Don't use a raw_null_ostream. Printing IR is expensive.
7608 Verifier V(OS, /*ShouldTreatBrokenDebugInfoAsError=*/true, *f.getParent());
7609
7610 // Note that this function's return value is inverted from what you would
7611 // expect of a function called "verify".
7612 return !V.verify(F);
7613}
7614
7616 bool *BrokenDebugInfo) {
7617 // Don't use a raw_null_ostream. Printing IR is expensive.
7618 Verifier V(OS, /*ShouldTreatBrokenDebugInfoAsError=*/!BrokenDebugInfo, M);
7619
7620 bool Broken = false;
7621 for (const Function &F : M)
7622 Broken |= !V.verify(F);
7623
7624 Broken |= !V.verify();
7625 if (BrokenDebugInfo)
7626 *BrokenDebugInfo = V.hasBrokenDebugInfo();
7627 // Note that this function's return value is inverted from what you would
7628 // expect of a function called "verify".
7629 return Broken;
7630}
7631
7632namespace {
7633
7634struct VerifierLegacyPass : public FunctionPass {
7635 static char ID;
7636
7637 std::unique_ptr<Verifier> V;
7638 bool FatalErrors = true;
7639
7640 VerifierLegacyPass() : FunctionPass(ID) {
7642 }
7643 explicit VerifierLegacyPass(bool FatalErrors)
7644 : FunctionPass(ID),
7645 FatalErrors(FatalErrors) {
7647 }
7648
7649 bool doInitialization(Module &M) override {
7650 V = std::make_unique<Verifier>(
7651 &dbgs(), /*ShouldTreatBrokenDebugInfoAsError=*/false, M);
7652 return false;
7653 }
7654
7655 bool runOnFunction(Function &F) override {
7656 if (!V->verify(F) && FatalErrors) {
7657 errs() << "in function " << F.getName() << '\n';
7658 report_fatal_error("Broken function found, compilation aborted!");
7659 }
7660 return false;
7661 }
7662
7663 bool doFinalization(Module &M) override {
7664 bool HasErrors = false;
7665 for (Function &F : M)
7666 if (F.isDeclaration())
7667 HasErrors |= !V->verify(F);
7668
7669 HasErrors |= !V->verify();
7670 if (FatalErrors && (HasErrors || V->hasBrokenDebugInfo()))
7671 report_fatal_error("Broken module found, compilation aborted!");
7672 return false;
7673 }
7674
7675 void getAnalysisUsage(AnalysisUsage &AU) const override {
7676 AU.setPreservesAll();
7677 }
7678};
7679
7680} // end anonymous namespace
7681
7682/// Helper to issue failure from the TBAA verification
7683template <typename... Tys> void TBAAVerifier::CheckFailed(Tys &&... Args) {
7684 if (Diagnostic)
7685 return Diagnostic->CheckFailed(Args...);
7686}
7687
7688#define CheckTBAA(C, ...) \
7689 do { \
7690 if (!(C)) { \
7691 CheckFailed(__VA_ARGS__); \
7692 return false; \
7693 } \
7694 } while (false)
7695
7696/// Verify that \p BaseNode can be used as the "base type" in the struct-path
7697/// TBAA scheme. This means \p BaseNode is either a scalar node, or a
7698/// struct-type node describing an aggregate data structure (like a struct).
7699TBAAVerifier::TBAABaseNodeSummary
7700TBAAVerifier::verifyTBAABaseNode(const Instruction *I, const MDNode *BaseNode,
7701 bool IsNewFormat) {
7702 if (BaseNode->getNumOperands() < 2) {
7703 CheckFailed("Base nodes must have at least two operands", I, BaseNode);
7704 return {true, ~0u};
7705 }
7706
7707 auto Itr = TBAABaseNodes.find(BaseNode);
7708 if (Itr != TBAABaseNodes.end())
7709 return Itr->second;
7710
7711 auto Result = verifyTBAABaseNodeImpl(I, BaseNode, IsNewFormat);
7712 auto InsertResult = TBAABaseNodes.insert({BaseNode, Result});
7713 (void)InsertResult;
7714 assert(InsertResult.second && "We just checked!");
7715 return Result;
7716}
7717
7718TBAAVerifier::TBAABaseNodeSummary
7719TBAAVerifier::verifyTBAABaseNodeImpl(const Instruction *I,
7720 const MDNode *BaseNode, bool IsNewFormat) {
7721 const TBAAVerifier::TBAABaseNodeSummary InvalidNode = {true, ~0u};
7722
7723 if (BaseNode->getNumOperands() == 2) {
7724 // Scalar nodes can only be accessed at offset 0.
7725 return isValidScalarTBAANode(BaseNode)
7726 ? TBAAVerifier::TBAABaseNodeSummary({false, 0})
7727 : InvalidNode;
7728 }
7729
7730 if (IsNewFormat) {
7731 if (BaseNode->getNumOperands() % 3 != 0) {
7732 CheckFailed("Access tag nodes must have the number of operands that is a "
7733 "multiple of 3!", BaseNode);
7734 return InvalidNode;
7735 }
7736 } else {
7737 if (BaseNode->getNumOperands() % 2 != 1) {
7738 CheckFailed("Struct tag nodes must have an odd number of operands!",
7739 BaseNode);
7740 return InvalidNode;
7741 }
7742 }
7743
7744 // Check the type size field.
7745 if (IsNewFormat) {
7746 auto *TypeSizeNode = mdconst::dyn_extract_or_null<ConstantInt>(
7747 BaseNode->getOperand(1));
7748 if (!TypeSizeNode) {
7749 CheckFailed("Type size nodes must be constants!", I, BaseNode);
7750 return InvalidNode;
7751 }
7752 }
7753
7754 // Check the type name field. In the new format it can be anything.
7755 if (!IsNewFormat && !isa<MDString>(BaseNode->getOperand(0))) {
7756 CheckFailed("Struct tag nodes have a string as their first operand",
7757 BaseNode);
7758 return InvalidNode;
7759 }
7760
7761 bool Failed = false;
7762
7763 std::optional<APInt> PrevOffset;
7764 unsigned BitWidth = ~0u;
7765
7766 // We've already checked that BaseNode is not a degenerate root node with one
7767 // operand in \c verifyTBAABaseNode, so this loop should run at least once.
7768 unsigned FirstFieldOpNo = IsNewFormat ? 3 : 1;
7769 unsigned NumOpsPerField = IsNewFormat ? 3 : 2;
7770 for (unsigned Idx = FirstFieldOpNo; Idx < BaseNode->getNumOperands();
7771 Idx += NumOpsPerField) {
7772 const MDOperand &FieldTy = BaseNode->getOperand(Idx);
7773 const MDOperand &FieldOffset = BaseNode->getOperand(Idx + 1);
7774 if (!isa<MDNode>(FieldTy)) {
7775 CheckFailed("Incorrect field entry in struct type node!", I, BaseNode);
7776 Failed = true;
7777 continue;
7778 }
7779
7780 auto *OffsetEntryCI =
7782 if (!OffsetEntryCI) {
7783 CheckFailed("Offset entries must be constants!", I, BaseNode);
7784 Failed = true;
7785 continue;
7786 }
7787
7788 if (BitWidth == ~0u)
7789 BitWidth = OffsetEntryCI->getBitWidth();
7790
7791 if (OffsetEntryCI->getBitWidth() != BitWidth) {
7792 CheckFailed(
7793 "Bitwidth between the offsets and struct type entries must match", I,
7794 BaseNode);
7795 Failed = true;
7796 continue;
7797 }
7798
7799 // NB! As far as I can tell, we generate a non-strictly increasing offset
7800 // sequence only from structs that have zero size bit fields. When
7801 // recursing into a contained struct in \c getFieldNodeFromTBAABaseNode we
7802 // pick the field lexically the latest in struct type metadata node. This
7803 // mirrors the actual behavior of the alias analysis implementation.
7804 bool IsAscending =
7805 !PrevOffset || PrevOffset->ule(OffsetEntryCI->getValue());
7806
7807 if (!IsAscending) {
7808 CheckFailed("Offsets must be increasing!", I, BaseNode);
7809 Failed = true;
7810 }
7811
7812 PrevOffset = OffsetEntryCI->getValue();
7813
7814 if (IsNewFormat) {
7815 auto *MemberSizeNode = mdconst::dyn_extract_or_null<ConstantInt>(
7816 BaseNode->getOperand(Idx + 2));
7817 if (!MemberSizeNode) {
7818 CheckFailed("Member size entries must be constants!", I, BaseNode);
7819 Failed = true;
7820 continue;
7821 }
7822 }
7823 }
7824
7825 return Failed ? InvalidNode
7826 : TBAAVerifier::TBAABaseNodeSummary(false, BitWidth);
7827}
7828
7829static bool IsRootTBAANode(const MDNode *MD) {
7830 return MD->getNumOperands() < 2;
7831}
7832
7833static bool IsScalarTBAANodeImpl(const MDNode *MD,
7835 if (MD->getNumOperands() != 2 && MD->getNumOperands() != 3)
7836 return false;
7837
7838 if (!isa<MDString>(MD->getOperand(0)))
7839 return false;
7840
7841 if (MD->getNumOperands() == 3) {
7843 if (!(Offset && Offset->isZero() && isa<MDString>(MD->getOperand(0))))
7844 return false;
7845 }
7846
7847 auto *Parent = dyn_cast_or_null<MDNode>(MD->getOperand(1));
7848 return Parent && Visited.insert(Parent).second &&
7849 (IsRootTBAANode(Parent) || IsScalarTBAANodeImpl(Parent, Visited));
7850}
7851
7852bool TBAAVerifier::isValidScalarTBAANode(const MDNode *MD) {
7853 auto ResultIt = TBAAScalarNodes.find(MD);
7854 if (ResultIt != TBAAScalarNodes.end())
7855 return ResultIt->second;
7856
7857 SmallPtrSet<const MDNode *, 4> Visited;
7858 bool Result = IsScalarTBAANodeImpl(MD, Visited);
7859 auto InsertResult = TBAAScalarNodes.insert({MD, Result});
7860 (void)InsertResult;
7861 assert(InsertResult.second && "Just checked!");
7862
7863 return Result;
7864}
7865
7866/// Returns the field node at the offset \p Offset in \p BaseNode. Update \p
7867/// Offset in place to be the offset within the field node returned.
7868///
7869/// We assume we've okayed \p BaseNode via \c verifyTBAABaseNode.
7870MDNode *TBAAVerifier::getFieldNodeFromTBAABaseNode(const Instruction *I,
7871 const MDNode *BaseNode,
7872 APInt &Offset,
7873 bool IsNewFormat) {
7874 assert(BaseNode->getNumOperands() >= 2 && "Invalid base node!");
7875
7876 // Scalar nodes have only one possible "field" -- their parent in the access
7877 // hierarchy. Offset must be zero at this point, but our caller is supposed
7878 // to check that.
7879 if (BaseNode->getNumOperands() == 2)
7880 return cast<MDNode>(BaseNode->getOperand(1));
7881
7882 unsigned FirstFieldOpNo = IsNewFormat ? 3 : 1;
7883 unsigned NumOpsPerField = IsNewFormat ? 3 : 2;
7884 for (unsigned Idx = FirstFieldOpNo; Idx < BaseNode->getNumOperands();
7885 Idx += NumOpsPerField) {
7886 auto *OffsetEntryCI =
7887 mdconst::extract<ConstantInt>(BaseNode->getOperand(Idx + 1));
7888 if (OffsetEntryCI->getValue().ugt(Offset)) {
7889 if (Idx == FirstFieldOpNo) {
7890 CheckFailed("Could not find TBAA parent in struct type node", I,
7891 BaseNode, &Offset);
7892 return nullptr;
7893 }
7894
7895 unsigned PrevIdx = Idx - NumOpsPerField;
7896 auto *PrevOffsetEntryCI =
7897 mdconst::extract<ConstantInt>(BaseNode->getOperand(PrevIdx + 1));
7898 Offset -= PrevOffsetEntryCI->getValue();
7899 return cast<MDNode>(BaseNode->getOperand(PrevIdx));
7900 }
7901 }
7902
7903 unsigned LastIdx = BaseNode->getNumOperands() - NumOpsPerField;
7904 auto *LastOffsetEntryCI = mdconst::extract<ConstantInt>(
7905 BaseNode->getOperand(LastIdx + 1));
7906 Offset -= LastOffsetEntryCI->getValue();
7907 return cast<MDNode>(BaseNode->getOperand(LastIdx));
7908}
7909
7911 if (!Type || Type->getNumOperands() < 3)
7912 return false;
7913
7914 // In the new format type nodes shall have a reference to the parent type as
7915 // its first operand.
7916 return isa_and_nonnull<MDNode>(Type->getOperand(0));
7917}
7918
7920 CheckTBAA(MD->getNumOperands() > 0, "TBAA metadata cannot have 0 operands", I,
7921 MD);
7922
7923 if (I)
7927 "This instruction shall not have a TBAA access tag!", I);
7928
7929 bool IsStructPathTBAA =
7930 isa<MDNode>(MD->getOperand(0)) && MD->getNumOperands() >= 3;
7931
7932 CheckTBAA(IsStructPathTBAA,
7933 "Old-style TBAA is no longer allowed, use struct-path TBAA instead",
7934 I);
7935
7936 MDNode *BaseNode = dyn_cast_or_null<MDNode>(MD->getOperand(0));
7937 MDNode *AccessType = dyn_cast_or_null<MDNode>(MD->getOperand(1));
7938
7939 bool IsNewFormat = isNewFormatTBAATypeNode(AccessType);
7940
7941 if (IsNewFormat) {
7942 CheckTBAA(MD->getNumOperands() == 4 || MD->getNumOperands() == 5,
7943 "Access tag metadata must have either 4 or 5 operands", I, MD);
7944 } else {
7945 CheckTBAA(MD->getNumOperands() < 5,
7946 "Struct tag metadata must have either 3 or 4 operands", I, MD);
7947 }
7948
7949 // Check the access size field.
7950 if (IsNewFormat) {
7951 auto *AccessSizeNode = mdconst::dyn_extract_or_null<ConstantInt>(
7952 MD->getOperand(3));
7953 CheckTBAA(AccessSizeNode, "Access size field must be a constant", I, MD);
7954 }
7955
7956 // Check the immutability flag.
7957 unsigned ImmutabilityFlagOpNo = IsNewFormat ? 4 : 3;
7958 if (MD->getNumOperands() == ImmutabilityFlagOpNo + 1) {
7959 auto *IsImmutableCI = mdconst::dyn_extract_or_null<ConstantInt>(
7960 MD->getOperand(ImmutabilityFlagOpNo));
7961 CheckTBAA(IsImmutableCI,
7962 "Immutability tag on struct tag metadata must be a constant", I,
7963 MD);
7964 CheckTBAA(
7965 IsImmutableCI->isZero() || IsImmutableCI->isOne(),
7966 "Immutability part of the struct tag metadata must be either 0 or 1", I,
7967 MD);
7968 }
7969
7970 CheckTBAA(BaseNode && AccessType,
7971 "Malformed struct tag metadata: base and access-type "
7972 "should be non-null and point to Metadata nodes",
7973 I, MD, BaseNode, AccessType);
7974
7975 if (!IsNewFormat) {
7976 CheckTBAA(isValidScalarTBAANode(AccessType),
7977 "Access type node must be a valid scalar type", I, MD,
7978 AccessType);
7979 }
7980
7982 CheckTBAA(OffsetCI, "Offset must be constant integer", I, MD);
7983
7984 APInt Offset = OffsetCI->getValue();
7985 bool SeenAccessTypeInPath = false;
7986
7987 SmallPtrSet<MDNode *, 4> StructPath;
7988
7989 for (/* empty */; BaseNode && !IsRootTBAANode(BaseNode);
7990 BaseNode =
7991 getFieldNodeFromTBAABaseNode(I, BaseNode, Offset, IsNewFormat)) {
7992 if (!StructPath.insert(BaseNode).second) {
7993 CheckFailed("Cycle detected in struct path", I, MD);
7994 return false;
7995 }
7996
7997 bool Invalid;
7998 unsigned BaseNodeBitWidth;
7999 std::tie(Invalid, BaseNodeBitWidth) =
8000 verifyTBAABaseNode(I, BaseNode, IsNewFormat);
8001
8002 // If the base node is invalid in itself, then we've already printed all the
8003 // errors we wanted to print.
8004 if (Invalid)
8005 return false;
8006
8007 SeenAccessTypeInPath |= BaseNode == AccessType;
8008
8009 if (isValidScalarTBAANode(BaseNode) || BaseNode == AccessType)
8010 CheckTBAA(Offset == 0, "Offset not zero at the point of scalar access", I,
8011 MD, &Offset);
8012
8013 CheckTBAA(BaseNodeBitWidth == Offset.getBitWidth() ||
8014 (BaseNodeBitWidth == 0 && Offset == 0) ||
8015 (IsNewFormat && BaseNodeBitWidth == ~0u),
8016 "Access bit-width not the same as description bit-width", I, MD,
8017 BaseNodeBitWidth, Offset.getBitWidth());
8018
8019 if (IsNewFormat && SeenAccessTypeInPath)
8020 break;
8021 }
8022
8023 CheckTBAA(SeenAccessTypeInPath, "Did not see access type in access path!", I,
8024 MD);
8025 return true;
8026}
8027
8028char VerifierLegacyPass::ID = 0;
8029INITIALIZE_PASS(VerifierLegacyPass, "verify", "Module Verifier", false, false)
8030
8032 return new VerifierLegacyPass(FatalErrors);
8033}
8034
8035AnalysisKey VerifierAnalysis::Key;
8042
8047
8049 auto Res = AM.getResult<VerifierAnalysis>(M);
8050 if (FatalErrors && (Res.IRBroken || Res.DebugInfoBroken))
8051 report_fatal_error("Broken module found, compilation aborted!");
8052
8053 return PreservedAnalyses::all();
8054}
8055
8057 auto res = AM.getResult<VerifierAnalysis>(F);
8058 if (res.IRBroken && FatalErrors)
8059 report_fatal_error("Broken function found, compilation aborted!");
8060
8061 return PreservedAnalyses::all();
8062}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
AMDGPU address space definition.
ArrayRef< TableEntry > TableRef
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Atomic ordering constants.
@ RetAttr
@ FnAttr
This file contains the simple types necessary to represent the attributes associated with functions a...
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
Analysis containing CSE Info
Definition CSEInfo.cpp:27
This file contains the declarations for the subclasses of Constant, which represent the different fla...
This file declares the LLVM IR specialization of the GenericConvergenceVerifier template.
static DISubprogram * getSubprogram(bool IsDistinct, Ts &&...Args)
dxil translate DXIL Translate Metadata
This file defines the DenseMap class.
This file contains constants used for implementing Dwarf debug support.
static bool runOnFunction(Function &F, bool PostInlining)
#define Check(C,...)
Hexagon Common GEP
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
Module.h This file contains the declarations for the Module class.
This header defines various interfaces for pass management in LLVM.
This defines the Use class.
const size_t AbstractManglingParser< Derived, Alloc >::NumOps
#define F(x, y, z)
Definition MD5.cpp:55
#define I(x, y, z)
Definition MD5.cpp:58
Machine Check Debug Module
This file implements a map that provides insertion order iteration.
This file provides utility for Memory Model Relaxation Annotations (MMRAs).
static bool isContiguous(const ConstantRange &A, const ConstantRange &B)
This file contains the declarations for metadata subclasses.
#define T
#define T1
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
uint64_t High
uint64_t IntrinsicInst * II
#define P(N)
ppc ctr loops verify
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
Definition PassSupport.h:56
This file contains the declarations for profiling metadata utility functions.
const SmallVectorImpl< MachineOperand > & Cond
static bool isValid(const char C)
Returns true if C is a valid mangled character: <0-9a-zA-Z_>.
static unsigned getNumElements(Type *Ty)
void visit(MachineFunction &MF, MachineBasicBlock &Start, std::function< void(MachineBasicBlock *)> op)
This file contains some templates that are useful if you are working with the STL at all.
verify safepoint Safepoint IR Verifier
BaseType
A given derived pointer can have multiple base pointers through phi/selects.
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file contains some functions that are useful when dealing with strings.
static unsigned getBitWidth(Type *Ty, const DataLayout &DL)
Returns the bitwidth of the given scalar or pointer type.
static bool IsScalarTBAANodeImpl(const MDNode *MD, SmallPtrSetImpl< const MDNode * > &Visited)
static bool isType(const Metadata *MD)
static Instruction * getSuccPad(Instruction *Terminator)
static bool isNewFormatTBAATypeNode(llvm::MDNode *Type)
#define CheckDI(C,...)
We know that a debug info condition should be true, if not print an error message.
Definition Verifier.cpp:678
static void forEachUser(const Value *User, SmallPtrSet< const Value *, 32 > &Visited, llvm::function_ref< bool(const Value *)> Callback)
Definition Verifier.cpp:719
static bool isDINode(const Metadata *MD)
static bool isScope(const Metadata *MD)
static cl::opt< bool > VerifyNoAliasScopeDomination("verify-noalias-scope-decl-dom", cl::Hidden, cl::init(false), cl::desc("Ensure that llvm.experimental.noalias.scope.decl for identical " "scopes are not dominating"))
static bool isTypeCongruent(Type *L, Type *R)
Two types are "congruent" if they are identical, or if they are both pointer types with different poi...
#define CheckTBAA(C,...)
static bool isConstantIntMetadataOperand(const Metadata *MD)
static bool IsRootTBAANode(const MDNode *MD)
static Value * getParentPad(Value *EHPad)
static bool hasConflictingReferenceFlags(unsigned Flags)
Detect mutually exclusive flags.
static AttrBuilder getParameterABIAttributes(LLVMContext &C, unsigned I, AttributeList Attrs)
static const char PassName[]
bool isFiniteNonZero() const
Definition APFloat.h:1441
bool isNegative() const
Definition APFloat.h:1431
const fltSemantics & getSemantics() const
Definition APFloat.h:1439
Class for arbitrary precision integers.
Definition APInt.h:78
bool sgt(const APInt &RHS) const
Signed greater than comparison.
Definition APInt.h:1202
bool isMinValue() const
Determine if this is the smallest unsigned value.
Definition APInt.h:418
bool ule(const APInt &RHS) const
Unsigned less or equal comparison.
Definition APInt.h:1151
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
Definition APInt.h:441
int64_t getSExtValue() const
Get sign extended value.
Definition APInt.h:1563
bool isMaxValue() const
Determine if this is the largest unsigned value.
Definition APInt.h:400
This class represents a conversion between pointers from one address space to another.
bool isSwiftError() const
Return true if this alloca is used as a swifterror argument to a call.
LLVM_ABI bool isStaticAlloca() const
Return true if this alloca is in the entry block of the function and is a constant size.
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
unsigned getAddressSpace() const
Return the address space for the allocation.
LLVM_ABI bool isArrayAllocation() const
Return true if there is an allocation size parameter to the allocation instruction that is not 1.
const Value * getArraySize() const
Get the number of elements allocated.
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
void setPreservesAll()
Set by analyses that do not transform their input at all.
LLVM_ABI bool hasInRegAttr() const
Return true if this argument has the inreg attribute.
Definition Function.cpp:293
bool empty() const
empty - Check if the array is empty.
Definition ArrayRef.h:138
static bool isFPOperation(BinOp Op)
BinOp getOperation() const
static LLVM_ABI StringRef getOperationName(BinOp Op)
AtomicOrdering getOrdering() const
Returns the ordering constraint of this rmw instruction.
bool contains(Attribute::AttrKind A) const
Return true if the builder has the specified attribute.
LLVM_ABI bool hasAttribute(Attribute::AttrKind Kind) const
Return true if the attribute exists in this set.
LLVM_ABI std::string getAsString(bool InAttrGrp=false) const
Functions, function parameters, and return types can have attributes to indicate how they should be t...
Definition Attributes.h:69
LLVM_ABI const ConstantRange & getValueAsConstantRange() const
Return the attribute's value as a ConstantRange.
LLVM_ABI StringRef getValueAsString() const
Return the attribute's value as a string.
AttrKind
This enumeration lists the attributes that can be associated with parameters, function results,...
Definition Attributes.h:88
bool isValid() const
Return true if the attribute is any kind of attribute.
Definition Attributes.h:223
LLVM Basic Block Representation.
Definition BasicBlock.h:62
iterator begin()
Instruction iterator methods.
Definition BasicBlock.h:459
iterator_range< const_phi_iterator > phis() const
Returns a range that iterates over the phis in the basic block.
Definition BasicBlock.h:528
const Function * getParent() const
Return the enclosing method, or null if none.
Definition BasicBlock.h:213
LLVM_ABI InstListType::const_iterator getFirstNonPHIIt() const
Returns an iterator to the first instruction in this block that is not a PHINode instruction.
LLVM_ABI bool isEntryBlock() const
Return true if this is the entry block of the containing function.
const Instruction & front() const
Definition BasicBlock.h:482
LLVM_ABI const BasicBlock * getUniquePredecessor() const
Return the predecessor of this block if it has a unique predecessor block.
InstListType::iterator iterator
Instruction iterators...
Definition BasicBlock.h:170
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition BasicBlock.h:233
This class represents a no-op cast from one type to another.
static LLVM_ABI BlockAddress * lookup(const BasicBlock *BB)
Lookup an existing BlockAddress constant for the given BasicBlock.
bool isConditional() const
Value * getCondition() const
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
bool isInlineAsm() const
Check if this call is an inline asm statement.
bool hasInAllocaArgument() const
Determine if there are is an inalloca argument.
OperandBundleUse getOperandBundleAt(unsigned Index) const
Return the operand bundle at a specific index.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
bool doesNotAccessMemory(unsigned OpNo) const
bool hasFnAttr(Attribute::AttrKind Kind) const
Determine whether this call has the given attribute.
unsigned getNumOperandBundles() const
Return the number of operand bundles associated with this User.
CallingConv::ID getCallingConv() const
LLVM_ABI bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Determine whether the argument or parameter has the given attribute.
Attribute getParamAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Get the attribute of a given kind from a given arg.
iterator_range< bundle_op_iterator > bundle_op_infos()
Return the range [bundle_op_info_begin, bundle_op_info_end).
unsigned countOperandBundlesOfType(StringRef Name) const
Return the number of operand bundles with the tag Name attached to this instruction.
bool onlyReadsMemory(unsigned OpNo) const
Value * getCalledOperand() const
Type * getParamElementType(unsigned ArgNo) const
Extract the elementtype type for a parameter.
Value * getArgOperand(unsigned i) const
FunctionType * getFunctionType() const
LLVM_ABI Intrinsic::ID getIntrinsicID() const
Returns the intrinsic ID of the intrinsic called or Intrinsic::not_intrinsic if the called function i...
iterator_range< User::op_iterator > args()
Iteration adapter for range-for loops.
bool doesNotReturn() const
Determine if the call cannot return.
LLVM_ABI bool onlyAccessesArgMemory() const
Determine if the call can access memmory only using pointers based on its arguments.
unsigned arg_size() const
AttributeList getAttributes() const
Return the attributes for this call.
bool hasOperandBundles() const
Return true if this User has any operand bundles.
LLVM_ABI Function * getCaller()
Helper to get the caller (the parent function).
bool isMustTailCall() const
static LLVM_ABI bool castIsValid(Instruction::CastOps op, Type *SrcTy, Type *DstTy)
This method can be used to determine if a cast from SrcTy to DstTy using Opcode op is valid or not.
unsigned getNumHandlers() const
return the number of 'handlers' in this catchswitch instruction, except the default handler
Value * getParentPad() const
BasicBlock * getUnwindDest() const
handler_range handlers()
iteration adapter for range-for loops.
BasicBlock * getUnwindDest() const
bool isFPPredicate() const
Definition InstrTypes.h:782
bool isIntPredicate() const
Definition InstrTypes.h:783
static bool isIntPredicate(Predicate P)
Definition InstrTypes.h:776
bool isMinusOne() const
This function will return true iff every bit in this constant is set to true.
Definition Constants.h:226
bool isNegative() const
Definition Constants.h:209
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
Definition Constants.h:214
unsigned getBitWidth() const
getBitWidth - Return the scalar bitwidth of this constant.
Definition Constants.h:157
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
Definition Constants.h:163
const APInt & getValue() const
Return the constant as an APInt value reference.
Definition Constants.h:154
Constant * getAddrDiscriminator() const
The address discriminator if any, or the null constant.
Definition Constants.h:1072
Constant * getPointer() const
The pointer that is signed in this ptrauth signed pointer.
Definition Constants.h:1059
ConstantInt * getKey() const
The Key ID, an i32 constant.
Definition Constants.h:1062
ConstantInt * getDiscriminator() const
The integer discriminator, an i64 constant, or 0.
Definition Constants.h:1065
static LLVM_ABI bool isOrderedRanges(ArrayRef< ConstantRange > RangesRef)
This class represents a range of values.
const APInt & getLower() const
Return the lower value for this range.
const APInt & getUpper() const
Return the upper value for this range.
LLVM_ABI bool contains(const APInt &Val) const
Return true if the specified value is in the set.
uint32_t getBitWidth() const
Get the bit width of this ConstantRange.
static LLVM_ABI ConstantTokenNone * get(LLVMContext &Context)
Return the ConstantTokenNone.
LLVM_ABI bool isNullValue() const
Return true if this is the value that would be returned by getNullValue.
Definition Constants.cpp:90
LLVM_ABI std::optional< fp::ExceptionBehavior > getExceptionBehavior() const
LLVM_ABI std::optional< RoundingMode > getRoundingMode() const
LLVM_ABI unsigned getNonMetadataArgCount() const
DbgVariableFragmentInfo FragmentInfo
@ FixedPointBinary
Scale factor 2^Factor.
@ FixedPointDecimal
Scale factor 10^Factor.
@ FixedPointRational
Arbitrary rational scale factor.
DIGlobalVariable * getVariable() const
LLVM_ABI DISubprogram * getSubprogram() const
Get the subprogram for this scope.
DILocalScope * getScope() const
Get the local scope for this variable.
Metadata * getRawScope() const
Base class for scope-like contexts.
Subprogram description. Uses SubclassData1.
Base class for template parameters.
Base class for variables.
Metadata * getRawType() const
Metadata * getRawScope() const
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:63
Records a position in IR for a source label (DILabel).
Base class for non-instruction debug metadata records that have positions within IR.
LLVM_ABI void print(raw_ostream &O, bool IsForDebug=false) const
DebugLoc getDebugLoc() const
LLVM_ABI const BasicBlock * getParent() const
LLVM_ABI Function * getFunction()
Record of a variable value-assignment, aka a non instruction representation of the dbg....
LLVM_ABI Value * getVariableLocationOp(unsigned OpIdx) const
DIExpression * getExpression() const
DILocalVariable * getVariable() const
Metadata * getRawLocation() const
Returns the metadata operand for the first location description.
@ End
Marks the end of the concrete types.
@ Any
To indicate all LocationTypes in searches.
DIExpression * getAddressExpression() const
MDNode * getAsMDNode() const
Return this as a bar MDNode.
Definition DebugLoc.h:291
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
Definition DenseMap.h:205
iterator find(const_arg_type_t< KeyT > Val)
Definition DenseMap.h:178
bool empty() const
Definition DenseMap.h:109
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition DenseMap.h:233
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition Dominators.h:165
This instruction extracts a single (scalar) element from a VectorType value.
static LLVM_ABI bool isValidOperands(const Value *Vec, const Value *Idx)
Return true if an extractelement instruction can be formed with the specified operands.
ArrayRef< unsigned > getIndices() const
static LLVM_ABI Type * getIndexedType(Type *Agg, ArrayRef< unsigned > Idxs)
Returns the type of the element that would be extracted with an extractvalue instruction with the spe...
This instruction compares its operands according to the predicate given to the constructor.
This class represents an extension of floating point types.
This class represents a cast from floating point to signed integer.
This class represents a cast from floating point to unsigned integer.
This class represents a truncation of floating point types.
AtomicOrdering getOrdering() const
Returns the ordering constraint of this fence instruction.
Value * getParentPad() const
Convenience accessors.
FunctionPass class - This class is used to implement most global optimizations.
Definition Pass.h:314
Type * getReturnType() const
FunctionType * getFunctionType() const
Returns the FunctionType for me.
Definition Function.h:209
Intrinsic::ID getIntrinsicID() const LLVM_READONLY
getIntrinsicID - This method returns the ID number of the specified function, or Intrinsic::not_intri...
Definition Function.h:244
DISubprogram * getSubprogram() const
Get the attached subprogram.
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Definition Function.h:270
bool hasPersonalityFn() const
Check whether this function has a personality function.
Definition Function.h:903
const Function & getFunction() const
Definition Function.h:164
const std::string & getGC() const
Definition Function.cpp:831
Type * getReturnType() const
Returns the type of the ret val.
Definition Function.h:214
bool isVarArg() const
isVarArg - Return true if this function takes a variable number of arguments.
Definition Function.h:227
LLVM_ABI Value * getBasePtr() const
LLVM_ABI Value * getDerivedPtr() const
static LLVM_ABI Type * getIndexedType(Type *Ty, ArrayRef< Value * > IdxList)
Returns the result type of a getelementptr with the given source element type and indexes.
static bool isValidLinkage(LinkageTypes L)
Definition GlobalAlias.h:98
const Constant * getAliasee() const
Definition GlobalAlias.h:87
LLVM_ABI const Function * getResolverFunction() const
Definition Globals.cpp:665
static bool isValidLinkage(LinkageTypes L)
Definition GlobalIFunc.h:86
const Constant * getResolver() const
Definition GlobalIFunc.h:73
LLVM_ABI void getAllMetadata(SmallVectorImpl< std::pair< unsigned, MDNode * > > &MDs) const
Appends all metadata attached to this value to MDs, sorting by KindID.
bool hasComdat() const
MDNode * getMetadata(unsigned KindID) const
Get the current metadata attachments for the given kind, if any.
Definition Value.h:576
bool hasExternalLinkage() const
bool isDSOLocal() const
bool isImplicitDSOLocal() const
LLVM_ABI bool isDeclaration() const
Return true if the primary definition of this global value is outside of the current translation unit...
Definition Globals.cpp:328
bool hasValidDeclarationLinkage() const
LinkageTypes getLinkage() const
bool hasDefaultVisibility() const
bool hasPrivateLinkage() const
bool hasHiddenVisibility() const
bool hasExternalWeakLinkage() const
bool hasDLLImportStorageClass() const
bool hasDLLExportStorageClass() const
bool isDeclarationForLinker() const
unsigned getAddressSpace() const
Module * getParent()
Get the module that this global value is contained inside of...
PointerType * getType() const
Global values are always pointers.
LLVM_ABI bool isInterposable() const
Return true if this global's definition can be substituted with an arbitrary definition at link time ...
Definition Globals.cpp:107
bool hasComdat() const
bool hasCommonLinkage() const
bool hasGlobalUnnamedAddr() const
bool hasAppendingLinkage() const
bool hasAvailableExternallyLinkage() const
Type * getValueType() const
const Constant * getInitializer() const
getInitializer - Return the initializer for this global variable.
bool hasInitializer() const
Definitions have initializers, declarations don't.
MaybeAlign getAlign() const
Returns the alignment of the given variable.
bool isConstant() const
If the value is a global constant, its value is immutable throughout the runtime execution of the pro...
bool hasDefinitiveInitializer() const
hasDefinitiveInitializer - Whether the global variable has an initializer, and any other instances of...
This instruction compares its operands according to the predicate given to the constructor.
BasicBlock * getDestination(unsigned i)
Return the specified destination.
unsigned getNumDestinations() const
return the number of possible destinations in this indirectbr instruction.
unsigned getNumSuccessors() const
This instruction inserts a single (scalar) element into a VectorType value.
static LLVM_ABI bool isValidOperands(const Value *Vec, const Value *NewElt, const Value *Idx)
Return true if an insertelement instruction can be formed with the specified operands.
ArrayRef< unsigned > getIndices() const
Base class for instruction visitors.
Definition InstVisitor.h:78
void visit(Iterator Start, Iterator End)
Definition InstVisitor.h:87
LLVM_ABI unsigned getNumSuccessors() const LLVM_READONLY
Return the number of successors that this instruction has.
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
LLVM_ABI bool isAtomic() const LLVM_READONLY
Return true if this instruction has an AtomicOrdering of unordered or higher.
LLVM_ABI const Function * getFunction() const
Return the function this instruction belongs to.
This class represents a cast from an integer to a pointer.
static LLVM_ABI bool mayLowerToFunctionCall(Intrinsic::ID IID)
Check if the intrinsic might lower into a regular function call in the course of IR transformations.
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
bool isCleanup() const
Return 'true' if this landingpad instruction is a cleanup.
unsigned getNumClauses() const
Get the number of clauses for this landing pad.
bool isCatch(unsigned Idx) const
Return 'true' if the clause and index Idx is a catch clause.
bool isFilter(unsigned Idx) const
Return 'true' if the clause and index Idx is a filter clause.
Constant * getClause(unsigned Idx) const
Get the value of the clause at index Idx.
AtomicOrdering getOrdering() const
Returns the ordering constraint of this load instruction.
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this load instruction.
Align getAlign() const
Return the alignment of the access that is being performed.
Metadata node.
Definition Metadata.h:1078
const MDOperand & getOperand(unsigned I) const
Definition Metadata.h:1442
bool isTemporary() const
Definition Metadata.h:1262
ArrayRef< MDOperand > operands() const
Definition Metadata.h:1440
unsigned getNumOperands() const
Return number of MDNode operands.
Definition Metadata.h:1448
bool isDistinct() const
Definition Metadata.h:1261
bool isResolved() const
Check if node is fully resolved.
Definition Metadata.h:1258
LLVMContext & getContext() const
Definition Metadata.h:1242
bool equalsStr(StringRef Str) const
Definition Metadata.h:922
Metadata * get() const
Definition Metadata.h:929
LLVM_ABI StringRef getString() const
Definition Metadata.cpp:618
static LLVM_ABI bool isTagMD(const Metadata *MD)
This class implements a map that also provides access to all stored values in a deterministic order.
Definition MapVector.h:36
static LLVM_ABI MetadataAsValue * getIfExists(LLVMContext &Context, Metadata *MD)
Definition Metadata.cpp:112
Metadata * getMetadata() const
Definition Metadata.h:201
Root of the metadata hierarchy.
Definition Metadata.h:64
LLVM_ABI void print(raw_ostream &OS, const Module *M=nullptr, bool IsForDebug=false) const
Print.
unsigned getMetadataID() const
Definition Metadata.h:104
Manage lifetime of a slot tracker for printing IR.
A Module instance is used to store all the information related to an LLVM module.
Definition Module.h:67
LLVM_ABI StringRef getName() const
LLVM_ABI void print(raw_ostream &ROS, bool IsForDebug=false) const
LLVM_ABI unsigned getNumOperands() const
iterator_range< op_iterator > operands()
Definition Metadata.h:1853
op_range incoming_values()
static LLVM_ABI PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
A set of analyses that are preserved following a run of a transformation pass.
Definition Analysis.h:112
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition Analysis.h:118
This class represents a cast from a pointer to an address (non-capturing ptrtoint).
This class represents a cast from a pointer to an integer.
Value * getValue() const
Convenience accessor.
This class represents a sign extension of integer types.
This class represents a cast from signed integer to floating point.
static LLVM_ABI const char * areInvalidOperands(Value *Cond, Value *True, Value *False)
Return a string if the specified operands are invalid for a select operation, otherwise return null.
This instruction constructs a fixed permutation of two input vectors.
static LLVM_ABI bool isValidOperands(const Value *V1, const Value *V2, const Value *Mask)
Return true if a shufflevector instruction can be formed with the specified operands.
static LLVM_ABI void getShuffleMask(const Constant *Mask, SmallVectorImpl< int > &Result)
Convert the input shuffle mask operand to a vector of integers.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
void insert_range(Range &&R)
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
void reserve(size_type N)
iterator insert(iterator I, T &&Elt)
void resize(size_type N)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
static constexpr size_t npos
Definition StringRef.h:57
bool getAsInteger(unsigned Radix, T &Result) const
Parse the current string as an integer of the specified radix.
Definition StringRef.h:472
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
Definition StringRef.h:261
constexpr bool empty() const
empty - Check if the string is empty.
Definition StringRef.h:143
unsigned getNumElements() const
Random access to the elements.
LLVM_ABI Type * getTypeAtIndex(const Value *V) const
Given an index value into the type, return the type of the element.
Definition Type.cpp:719
LLVM_ABI bool isScalableTy(SmallPtrSetImpl< const Type * > &Visited) const
Returns true if this struct contains a scalable vector.
Definition Type.cpp:441
Verify that the TBAA Metadatas are valid.
Definition Verifier.h:40
LLVM_ABI bool visitTBAAMetadata(const Instruction *I, const MDNode *MD)
Visit an instruction, or a TBAA node itself as part of a metadata, and return true if it is valid,...
unsigned size() const
Triple - Helper class for working with autoconf configuration names.
Definition Triple.h:47
This class represents a truncation of integer types.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition Twine.h:82
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
bool isVectorTy() const
True if this is an instance of VectorType.
Definition Type.h:273
LLVM_ABI bool containsNonGlobalTargetExtType(SmallPtrSetImpl< const Type * > &Visited) const
Return true if this type is or contains a target extension type that disallows being used as a global...
Definition Type.cpp:75
bool isArrayTy() const
True if this is an instance of ArrayType.
Definition Type.h:264
LLVM_ABI bool containsNonLocalTargetExtType(SmallPtrSetImpl< const Type * > &Visited) const
Return true if this type is or contains a target extension type that disallows being used as a local.
Definition Type.cpp:91
LLVM_ABI bool isScalableTy(SmallPtrSetImpl< const Type * > &Visited) const
Return true if this is a type whose size is a known multiple of vscale.
Definition Type.cpp:62
bool isLabelTy() const
Return true if this is 'label'.
Definition Type.h:228
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
Definition Type.h:246
bool isPointerTy() const
True if this is an instance of PointerType.
Definition Type.h:267
bool isTokenLikeTy() const
Returns true if this is 'token' or a token-like target type.s.
Definition Type.cpp:1062
LLVM_ABI unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
bool isSingleValueType() const
Return true if the type is a valid type for a register in codegen.
Definition Type.h:296
LLVM_ABI bool canLosslesslyBitCastTo(Type *Ty) const
Return true if this type could be converted with a lossless BitCast to type 'Ty'.
Definition Type.cpp:154
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
Definition Type.h:352
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
Definition Type.h:311
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
Definition Type.cpp:231
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
Definition Type.h:184
bool isPtrOrPtrVectorTy() const
Return true if this is a pointer type or a vector of pointer types.
Definition Type.h:270
bool isIntOrPtrTy() const
Return true if this is an integer type or a pointer type.
Definition Type.h:255
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition Type.h:240
bool isFPOrFPVectorTy() const
Return true if this is a FP type or a vector of FP.
Definition Type.h:225
bool isVoidTy() const
Return true if this is 'void'.
Definition Type.h:139
bool isMetadataTy() const
Return true if this is 'metadata'.
Definition Type.h:231
This class represents a cast unsigned integer to floating point.
op_range operands()
Definition User.h:292
Value * getOperand(unsigned i) const
Definition User.h:232
unsigned getNumOperands() const
Definition User.h:254
This class represents the va_arg llvm instruction, which returns an argument of the specified type gi...
Value * getValue() const
Definition Metadata.h:498
LLVM Value Representation.
Definition Value.h:75
iterator_range< user_iterator > materialized_users()
Definition Value.h:420
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
LLVM_ABI const Value * stripInBoundsOffsets(function_ref< void(const Value *)> Func=[](const Value *) {}) const
Strip off pointer casts and inbounds GEPs.
Definition Value.cpp:812
iterator_range< user_iterator > users()
Definition Value.h:426
bool materialized_use_empty() const
Definition Value.h:351
LLVM_ABI const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
Definition Value.cpp:701
LLVM_ABI LLVMContext & getContext() const
All values hold a context through their type.
Definition Value.cpp:1099
bool hasName() const
Definition Value.h:262
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition Value.cpp:322
Check a module for errors, and report separate error states for IR and debug info errors.
Definition Verifier.h:109
LLVM_ABI Result run(Module &M, ModuleAnalysisManager &)
LLVM_ABI PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM)
This class represents zero extension of integer types.
constexpr bool isNonZero() const
Definition TypeSize.h:156
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition TypeSize.h:169
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
Definition TypeSize.h:166
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
Definition ilist_node.h:34
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
Definition ilist_node.h:348
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition raw_ostream.h:53
CallInst * Call
This file contains the declaration of the Comdat class, which represents a single COMDAT in LLVM.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ FLAT_ADDRESS
Address space for flat memory.
@ GLOBAL_ADDRESS
Address space for global memory (RAT0, VTX0).
@ PRIVATE_ADDRESS
Address space for private memory.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
constexpr char Attrs[]
Key for Kernel::Metadata::mAttrs.
bool isFlatGlobalAddrSpace(unsigned AS)
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ Entry
Definition COFF.h:862
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
@ BasicBlock
Various leaf nodes.
Definition ISDOpcodes.h:81
LLVM_ABI MatchIntrinsicTypesResult matchIntrinsicSignature(FunctionType *FTy, ArrayRef< IITDescriptor > &Infos, SmallVectorImpl< Type * > &ArgTys)
Match the specified function type with the type constraints specified by the .td file.
LLVM_ABI void getIntrinsicInfoTableEntries(ID id, SmallVectorImpl< IITDescriptor > &T)
Return the IIT table descriptor for the specified intrinsic into an array of IITDescriptors.
@ MatchIntrinsicTypes_NoMatchRet
Definition Intrinsics.h:240
@ MatchIntrinsicTypes_NoMatchArg
Definition Intrinsics.h:241
LLVM_ABI bool hasConstrainedFPRoundingModeOperand(ID QID)
Returns true if the intrinsic ID is for one of the "ConstrainedFloating-Point Intrinsics" that take r...
LLVM_ABI StringRef getName(ID id)
Return the LLVM name for an intrinsic, such as "llvm.ppc.altivec.lvx".
static const int NoAliasScopeDeclScopeArg
Definition Intrinsics.h:39
LLVM_ABI bool matchIntrinsicVarArg(bool isVarArg, ArrayRef< IITDescriptor > &Infos)
Verify if the intrinsic has variable arguments.
std::variant< std::monostate, Loc::Single, Loc::Multi, Loc::MMI, Loc::EntryValue > Variant
Alias for the std::variant specialization base class of DbgVariable.
Definition DwarfDebug.h:189
Flag
These should be considered private to the implementation of the MCInstrDesc class.
@ System
Synchronized with respect to all concurrently executing threads.
Definition LLVMContext.h:58
LLVM_ABI std::optional< VFInfo > tryDemangleForVFABI(StringRef MangledName, const FunctionType *FTy)
Function to construct a VFInfo out of a mangled names in the following format:
@ CE
Windows NT (Windows on ARM)
Definition MCAsmInfo.h:48
LLVM_ABI AssignmentInstRange getAssignmentInsts(DIAssignID *ID)
Return a range of instructions (typically just one) that have ID as an attachment.
initializer< Ty > init(const Ty &Val)
@ DW_MACINFO_undef
Definition Dwarf.h:818
@ DW_MACINFO_start_file
Definition Dwarf.h:819
@ DW_MACINFO_define
Definition Dwarf.h:817
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > dyn_extract_or_null(Y &&MD)
Extract a Value from Metadata, if any, allowing null.
Definition Metadata.h:708
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > dyn_extract(Y &&MD)
Extract a Value from Metadata, if any.
Definition Metadata.h:695
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > extract(Y &&MD)
Extract a Value from Metadata.
Definition Metadata.h:667
@ User
could "use" a pointer
NodeAddr< UseNode * > Use
Definition RDFGraph.h:385
NodeAddr< NodeBase * > Node
Definition RDFGraph.h:381
friend class Instruction
Iterator for Instructions in a `BasicBlock.
Definition BasicBlock.h:73
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition STLExtras.h:316
@ Low
Lower the current thread's priority such that it does not affect foreground tasks significantly.
Definition Threading.h:280
@ Offset
Definition DWP.cpp:477
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1725
LLVM_ABI bool canInstructionHaveMMRAs(const Instruction &I)
detail::zippy< detail::zip_first, T, U, Args... > zip_equal(T &&t, U &&u, Args &&...args)
zip iterator that assumes that all iteratees have the same length.
Definition STLExtras.h:839
LLVM_ABI unsigned getBranchWeightOffset(const MDNode *ProfileData)
Return the offset to the first branch weight data.
constexpr bool isInt(int64_t x)
Checks if an integer fits into the given bit width.
Definition MathExtras.h:165
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
Definition STLExtras.h:2472
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
LLVM_ABI bool verifyFunction(const Function &F, raw_ostream *OS=nullptr)
Check a function for errors, useful for use when debugging a pass.
AllocFnKind
Definition Attributes.h:51
testing::Matcher< const detail::ErrorHolder & > Failed()
Definition Error.h:198
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition STLExtras.h:2136
LLVM_ABI DenseMap< BasicBlock *, ColorVector > colorEHFunclets(Function &F)
If an EH funclet personality is in use (see isFuncletEHPersonality), this will recompute which blocks...
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
Definition MathExtras.h:284
bool isa_and_nonnull(const Y &Val)
Definition Casting.h:676
Op::Description Desc
bool isScopedEHPersonality(EHPersonality Pers)
Returns true if this personality uses scope-style EH IR instructions: catchswitch,...
auto dyn_cast_or_null(const Y &Val)
Definition Casting.h:753
GenericConvergenceVerifier< SSAContext > ConvergenceVerifier
LLVM_ABI void initializeVerifierLegacyPassPass(PassRegistry &)
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition MathExtras.h:279
bool isModSet(const ModRefInfo MRI)
Definition ModRef.h:49
void sort(IteratorTy Start, IteratorTy End)
Definition STLExtras.h:1622
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition Error.cpp:167
FunctionAddr VTableAddr Count
Definition InstrProf.h:139
LLVM_ABI EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
LLVM_ABI bool isValueProfileMD(const MDNode *ProfileData)
Checks if an MDNode contains value profiling Metadata.
LLVM_ABI raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
LLVM_ABI unsigned getNumBranchWeights(const MDNode &ProfileData)
AtomicOrdering
Atomic ordering for LLVM's memory model.
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
Definition ModRef.h:71
LLVM_ABI FunctionPass * createVerifierPass(bool FatalErrors=true)
FunctionAddr VTableAddr Next
Definition InstrProf.h:141
DWARFExpression::Operation Op
ArrayRef(const T &OneElt) -> ArrayRef< T >
constexpr unsigned BitWidth
TinyPtrVector< BasicBlock * > ColorVector
LLVM_ABI const char * LLVMLoopEstimatedTripCount
Profile-based loop metadata that should be accessed only by using llvm::getLoopEstimatedTripCount and...
DenormalMode parseDenormalFPAttribute(StringRef Str)
Returns the denormal mode to use for inputs and outputs.
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
LLVM_ABI std::optional< RoundingMode > convertStrToRoundingMode(StringRef)
Returns a valid RoundingMode enumerator when given a string that is valid as input in constrained int...
Definition FPEnv.cpp:25
LLVM_ABI std::unique_ptr< GCStrategy > getGCStrategy(const StringRef Name)
Lookup the GCStrategy object associated with the given gc name.
auto predecessors(const MachineBasicBlock *BB)
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition STLExtras.h:1897
bool pred_empty(const BasicBlock *BB)
Definition CFG.h:119
bool isHexDigit(char C)
Checks if character C is a hexadecimal numeric character.
AnalysisManager< Function > FunctionAnalysisManager
Convenience typedef for the Function analysis manager.
constexpr bool isCallableCC(CallingConv::ID CC)
LLVM_ABI bool verifyModule(const Module &M, raw_ostream *OS=nullptr, bool *BrokenDebugInfo=nullptr)
Check a module for errors.
AnalysisManager< Module > ModuleAnalysisManager
Convenience typedef for the Module analysis manager.
Definition MIRParser.h:39
#define N
constexpr uint64_t value() const
This is a hole in the type system and should not be abused.
Definition Alignment.h:77
A special type used by analysis passes to provide an address that identifies that particular analysis...
Definition Analysis.h:29
static LLVM_ABI const char * SyntheticFunctionEntryCount
static LLVM_ABI const char * UnknownBranchWeightsMarker
static LLVM_ABI const char * ValueProfile
static LLVM_ABI const char * FunctionEntryCount
static LLVM_ABI const char * BranchWeights
uint32_t getTagID() const
Return the tag of this operand bundle as an integer.
ArrayRef< Use > Inputs
void DebugInfoCheckFailed(const Twine &Message)
A debug info check failed.
Definition Verifier.cpp:302
VerifierSupport(raw_ostream *OS, const Module &M)
Definition Verifier.cpp:154
bool Broken
Track the brokenness of the module while recursively visiting.
Definition Verifier.cpp:148
void CheckFailed(const Twine &Message, const T1 &V1, const Ts &... Vs)
A check failed (with values to print).
Definition Verifier.cpp:295
bool BrokenDebugInfo
Broken debug info can be "recovered" from by stripping the debug info.
Definition Verifier.cpp:150
LLVMContext & Context
Definition Verifier.cpp:145
bool TreatBrokenDebugInfoAsError
Whether to treat broken debug info as an error.
Definition Verifier.cpp:152
void CheckFailed(const Twine &Message)
A check failed, so printout out the condition and the message.
Definition Verifier.cpp:284
const Module & M
Definition Verifier.cpp:141
const DataLayout & DL
Definition Verifier.cpp:144
void DebugInfoCheckFailed(const Twine &Message, const T1 &V1, const Ts &... Vs)
A debug info check failed (with values to print).
Definition Verifier.cpp:311
const Triple & TT
Definition Verifier.cpp:143
ModuleSlotTracker MST
Definition Verifier.cpp:142