184#include "llvm/IR/IntrinsicsAArch64.h"
185#include "llvm/IR/IntrinsicsX86.h"
216#define DEBUG_TYPE "msan"
219 "Controls which checks to insert");
222 "Controls which instruction to instrument");
241 "msan-track-origins",
246 cl::desc(
"keep going after reporting a UMR"),
255 "msan-poison-stack-with-call",
260 "msan-poison-stack-pattern",
261 cl::desc(
"poison uninitialized stack variables with the given pattern"),
266 cl::desc(
"Print name of local stack variable"),
271 cl::desc(
"Poison fully undef temporary values. "
272 "Partially undefined constant vectors "
273 "are unaffected by this flag (see "
274 "-msan-poison-undef-vectors)."),
278 "msan-poison-undef-vectors",
279 cl::desc(
"Precisely poison partially undefined constant vectors. "
280 "If false (legacy behavior), the entire vector is "
281 "considered fully initialized, which may lead to false "
282 "negatives. Fully undefined constant vectors are "
283 "unaffected by this flag (see -msan-poison-undef)."),
287 "msan-precise-disjoint-or",
288 cl::desc(
"Precisely poison disjoint OR. If false (legacy behavior), "
289 "disjointedness is ignored (i.e., 1|1 is initialized)."),
294 cl::desc(
"propagate shadow through ICmpEQ and ICmpNE"),
299 cl::desc(
"exact handling of relational integer ICmp"),
303 "msan-handle-lifetime-intrinsics",
305 "when possible, poison scoped variables at the beginning of the scope "
306 "(slower, but more precise)"),
317 "msan-handle-asm-conservative",
328 "msan-check-access-address",
329 cl::desc(
"report accesses through a pointer which has poisoned shadow"),
334 cl::desc(
"check arguments and return values at function call boundaries"),
338 "msan-dump-strict-instructions",
339 cl::desc(
"print out instructions with default strict semantics i.e.,"
340 "check that all the inputs are fully initialized, and mark "
341 "the output as fully initialized. These semantics are applied "
342 "to instructions that could not be handled explicitly nor "
351 "msan-dump-heuristic-instructions",
352 cl::desc(
"Prints 'unknown' instructions that were handled heuristically. "
353 "Use -msan-dump-strict-instructions to print instructions that "
354 "could not be handled explicitly nor heuristically."),
358 "msan-instrumentation-with-call-threshold",
360 "If the function being instrumented requires more than "
361 "this number of checks and origin stores, use callbacks instead of "
362 "inline checks (-1 means never use callbacks)."),
367 cl::desc(
"Enable KernelMemorySanitizer instrumentation"),
377 cl::desc(
"Insert checks for constant shadow values"),
384 cl::desc(
"Place MSan constructors in comdat sections"),
390 cl::desc(
"Define custom MSan AndMask"),
394 cl::desc(
"Define custom MSan XorMask"),
398 cl::desc(
"Define custom MSan ShadowBase"),
402 cl::desc(
"Define custom MSan OriginBase"),
407 cl::desc(
"Define threshold for number of checks per "
408 "debug location to force origin update."),
420struct MemoryMapParams {
427struct PlatformMemoryMapParams {
428 const MemoryMapParams *bits32;
429 const MemoryMapParams *bits64;
591class MemorySanitizer {
600 MemorySanitizer(MemorySanitizer &&) =
delete;
601 MemorySanitizer &operator=(MemorySanitizer &&) =
delete;
602 MemorySanitizer(
const MemorySanitizer &) =
delete;
603 MemorySanitizer &operator=(
const MemorySanitizer &) =
delete;
605 bool sanitizeFunction(Function &
F, TargetLibraryInfo &TLI);
608 friend struct MemorySanitizerVisitor;
609 friend struct VarArgHelperBase;
610 friend struct VarArgAMD64Helper;
611 friend struct VarArgAArch64Helper;
612 friend struct VarArgPowerPC64Helper;
613 friend struct VarArgPowerPC32Helper;
614 friend struct VarArgSystemZHelper;
615 friend struct VarArgI386Helper;
616 friend struct VarArgGenericHelper;
618 void initializeModule(
Module &M);
619 void initializeCallbacks(
Module &M,
const TargetLibraryInfo &TLI);
620 void createKernelApi(
Module &M,
const TargetLibraryInfo &TLI);
621 void createUserspaceApi(
Module &M,
const TargetLibraryInfo &TLI);
623 template <
typename... ArgsTy>
624 FunctionCallee getOrInsertMsanMetadataFunction(
Module &M, StringRef Name,
650 Value *ParamOriginTLS;
656 Value *RetvalOriginTLS;
662 Value *VAArgOriginTLS;
665 Value *VAArgOverflowSizeTLS;
668 bool CallbacksInitialized =
false;
671 FunctionCallee WarningFn;
675 FunctionCallee MaybeWarningVarSizeFn;
680 FunctionCallee MsanSetAllocaOriginWithDescriptionFn;
682 FunctionCallee MsanSetAllocaOriginNoDescriptionFn;
685 FunctionCallee MsanPoisonStackFn;
689 FunctionCallee MsanChainOriginFn;
692 FunctionCallee MsanSetOriginFn;
695 FunctionCallee MemmoveFn, MemcpyFn, MemsetFn;
698 StructType *MsanContextStateTy;
699 FunctionCallee MsanGetContextStateFn;
702 FunctionCallee MsanPoisonAllocaFn, MsanUnpoisonAllocaFn;
708 FunctionCallee MsanMetadataPtrForLoadN, MsanMetadataPtrForStoreN;
709 FunctionCallee MsanMetadataPtrForLoad_1_8[4];
710 FunctionCallee MsanMetadataPtrForStore_1_8[4];
711 FunctionCallee MsanInstrumentAsmStoreFn;
714 Value *MsanMetadataAlloca;
717 FunctionCallee getKmsanShadowOriginAccessFn(
bool isStore,
int size);
720 const MemoryMapParams *MapParams;
724 MemoryMapParams CustomMapParams;
726 MDNode *ColdCallWeights;
729 MDNode *OriginStoreWeights;
732void insertModuleCtor(
Module &M) {
769 if (!Options.Kernel) {
778 MemorySanitizer Msan(*
F.getParent(), Options);
797 OS, MapClassName2PassName);
803 if (Options.EagerChecks)
804 OS <<
"eager-checks;";
805 OS <<
"track-origins=" << Options.TrackOrigins;
821template <
typename... ArgsTy>
823MemorySanitizer::getOrInsertMsanMetadataFunction(
Module &M,
StringRef Name,
828 std::forward<ArgsTy>(Args)...);
831 return M.getOrInsertFunction(Name, MsanMetadata,
832 std::forward<ArgsTy>(Args)...);
841 RetvalOriginTLS =
nullptr;
843 ParamOriginTLS =
nullptr;
845 VAArgOriginTLS =
nullptr;
846 VAArgOverflowSizeTLS =
nullptr;
848 WarningFn =
M.getOrInsertFunction(
"__msan_warning",
850 IRB.getVoidTy(), IRB.getInt32Ty());
861 MsanGetContextStateFn =
862 M.getOrInsertFunction(
"__msan_get_context_state", PtrTy);
866 for (
int ind = 0,
size = 1; ind < 4; ind++,
size <<= 1) {
867 std::string name_load =
868 "__msan_metadata_ptr_for_load_" + std::to_string(
size);
869 std::string name_store =
870 "__msan_metadata_ptr_for_store_" + std::to_string(
size);
871 MsanMetadataPtrForLoad_1_8[ind] =
872 getOrInsertMsanMetadataFunction(M, name_load, PtrTy);
873 MsanMetadataPtrForStore_1_8[ind] =
874 getOrInsertMsanMetadataFunction(M, name_store, PtrTy);
877 MsanMetadataPtrForLoadN = getOrInsertMsanMetadataFunction(
878 M,
"__msan_metadata_ptr_for_load_n", PtrTy, IntptrTy);
879 MsanMetadataPtrForStoreN = getOrInsertMsanMetadataFunction(
880 M,
"__msan_metadata_ptr_for_store_n", PtrTy, IntptrTy);
883 MsanPoisonAllocaFn =
M.getOrInsertFunction(
884 "__msan_poison_alloca", IRB.getVoidTy(), PtrTy, IntptrTy, PtrTy);
885 MsanUnpoisonAllocaFn =
M.getOrInsertFunction(
886 "__msan_unpoison_alloca", IRB.getVoidTy(), PtrTy, IntptrTy);
890 return M.getOrInsertGlobal(Name, Ty, [&] {
892 nullptr, Name,
nullptr,
898void MemorySanitizer::createUserspaceApi(
Module &M,
906 StringRef WarningFnName = Recover ?
"__msan_warning_with_origin"
907 :
"__msan_warning_with_origin_noreturn";
908 WarningFn =
M.getOrInsertFunction(WarningFnName,
910 IRB.getVoidTy(), IRB.getInt32Ty());
913 Recover ?
"__msan_warning" :
"__msan_warning_noreturn";
914 WarningFn =
M.getOrInsertFunction(WarningFnName, IRB.getVoidTy());
941 IRB.getIntPtrTy(
M.getDataLayout()));
945 unsigned AccessSize = 1 << AccessSizeIndex;
946 std::string FunctionName =
"__msan_maybe_warning_" +
itostr(AccessSize);
947 MaybeWarningFn[AccessSizeIndex] =
M.getOrInsertFunction(
949 IRB.getVoidTy(), IRB.getIntNTy(AccessSize * 8), IRB.getInt32Ty());
950 MaybeWarningVarSizeFn =
M.getOrInsertFunction(
951 "__msan_maybe_warning_N", TLI.
getAttrList(
C, {},
false),
952 IRB.getVoidTy(), PtrTy, IRB.getInt64Ty(), IRB.getInt32Ty());
953 FunctionName =
"__msan_maybe_store_origin_" +
itostr(AccessSize);
954 MaybeStoreOriginFn[AccessSizeIndex] =
M.getOrInsertFunction(
956 IRB.getVoidTy(), IRB.getIntNTy(AccessSize * 8), PtrTy,
960 MsanSetAllocaOriginWithDescriptionFn =
961 M.getOrInsertFunction(
"__msan_set_alloca_origin_with_descr",
962 IRB.getVoidTy(), PtrTy, IntptrTy, PtrTy, PtrTy);
963 MsanSetAllocaOriginNoDescriptionFn =
964 M.getOrInsertFunction(
"__msan_set_alloca_origin_no_descr",
965 IRB.getVoidTy(), PtrTy, IntptrTy, PtrTy);
966 MsanPoisonStackFn =
M.getOrInsertFunction(
"__msan_poison_stack",
967 IRB.getVoidTy(), PtrTy, IntptrTy);
971void MemorySanitizer::initializeCallbacks(
Module &M,
974 if (CallbacksInitialized)
980 MsanChainOriginFn =
M.getOrInsertFunction(
981 "__msan_chain_origin",
984 MsanSetOriginFn =
M.getOrInsertFunction(
986 IRB.getVoidTy(), PtrTy, IntptrTy, IRB.getInt32Ty());
988 M.getOrInsertFunction(
"__msan_memmove", PtrTy, PtrTy, PtrTy, IntptrTy);
990 M.getOrInsertFunction(
"__msan_memcpy", PtrTy, PtrTy, PtrTy, IntptrTy);
991 MemsetFn =
M.getOrInsertFunction(
"__msan_memset",
993 PtrTy, PtrTy, IRB.getInt32Ty(), IntptrTy);
995 MsanInstrumentAsmStoreFn =
M.getOrInsertFunction(
996 "__msan_instrument_asm_store", IRB.getVoidTy(), PtrTy, IntptrTy);
999 createKernelApi(M, TLI);
1001 createUserspaceApi(M, TLI);
1003 CallbacksInitialized =
true;
1009 isStore ? MsanMetadataPtrForStore_1_8 : MsanMetadataPtrForLoad_1_8;
1027void MemorySanitizer::initializeModule(
Module &M) {
1028 auto &
DL =
M.getDataLayout();
1030 TargetTriple =
M.getTargetTriple();
1032 bool ShadowPassed =
ClShadowBase.getNumOccurrences() > 0;
1033 bool OriginPassed =
ClOriginBase.getNumOccurrences() > 0;
1035 if (ShadowPassed || OriginPassed) {
1040 MapParams = &CustomMapParams;
1042 switch (TargetTriple.getOS()) {
1044 switch (TargetTriple.getArch()) {
1059 switch (TargetTriple.getArch()) {
1068 switch (TargetTriple.getArch()) {
1102 C = &(
M.getContext());
1104 IntptrTy = IRB.getIntPtrTy(
DL);
1105 OriginTy = IRB.getInt32Ty();
1106 PtrTy = IRB.getPtrTy();
1111 if (!CompileKernel) {
1113 M.getOrInsertGlobal(
"__msan_track_origins", IRB.getInt32Ty(), [&] {
1114 return new GlobalVariable(
1115 M, IRB.getInt32Ty(), true, GlobalValue::WeakODRLinkage,
1116 IRB.getInt32(TrackOrigins),
"__msan_track_origins");
1120 M.getOrInsertGlobal(
"__msan_keep_going", IRB.getInt32Ty(), [&] {
1121 return new GlobalVariable(M, IRB.getInt32Ty(), true,
1122 GlobalValue::WeakODRLinkage,
1123 IRB.getInt32(Recover),
"__msan_keep_going");
1138struct VarArgHelper {
1139 virtual ~VarArgHelper() =
default;
1142 virtual void visitCallBase(CallBase &CB,
IRBuilder<> &IRB) = 0;
1145 virtual void visitVAStartInst(VAStartInst &
I) = 0;
1148 virtual void visitVACopyInst(VACopyInst &
I) = 0;
1154 virtual void finalizeInstrumentation() = 0;
1157struct MemorySanitizerVisitor;
1162 MemorySanitizerVisitor &Visitor);
1169 if (TypeSizeFixed <= 8)
1178class NextNodeIRBuilder :
public IRBuilder<> {
1191struct MemorySanitizerVisitor :
public InstVisitor<MemorySanitizerVisitor> {
1193 MemorySanitizer &MS;
1195 ValueMap<Value *, Value *> ShadowMap, OriginMap;
1196 std::unique_ptr<VarArgHelper> VAHelper;
1197 const TargetLibraryInfo *TLI;
1204 bool PropagateShadow;
1207 bool PoisonUndefVectors;
1209 struct ShadowOriginAndInsertPoint {
1214 ShadowOriginAndInsertPoint(
Value *S,
Value *O, Instruction *
I)
1215 : Shadow(S), Origin(
O), OrigIns(
I) {}
1218 DenseMap<const DILocation *, int> LazyWarningDebugLocationCount;
1219 SmallSetVector<AllocaInst *, 16> AllocaSet;
1222 int64_t SplittableBlocksCount = 0;
1224 MemorySanitizerVisitor(Function &
F, MemorySanitizer &MS,
1225 const TargetLibraryInfo &TLI)
1227 bool SanitizeFunction =
1229 InsertChecks = SanitizeFunction;
1230 PropagateShadow = SanitizeFunction;
1241 MS.initializeCallbacks(*
F.getParent(), TLI);
1243 IRBuilder<>(&
F.getEntryBlock(),
F.getEntryBlock().getFirstNonPHIIt())
1244 .CreateIntrinsic(Intrinsic::donothing, {});
1246 if (MS.CompileKernel) {
1248 insertKmsanPrologue(IRB);
1252 <<
"MemorySanitizer is not inserting checks into '"
1253 <<
F.getName() <<
"'\n");
1256 bool instrumentWithCalls(
Value *V) {
1260 ++SplittableBlocksCount;
1265 bool isInPrologue(Instruction &
I) {
1266 return I.getParent() == FnPrologueEnd->
getParent() &&
1275 if (MS.TrackOrigins <= 1)
1277 return IRB.
CreateCall(MS.MsanChainOriginFn, V);
1281 const DataLayout &
DL =
F.getDataLayout();
1282 unsigned IntptrSize =
DL.getTypeStoreSize(MS.IntptrTy);
1292 TypeSize TS, Align Alignment) {
1293 const DataLayout &
DL =
F.getDataLayout();
1294 const Align IntptrAlignment =
DL.getABITypeAlign(MS.IntptrTy);
1295 unsigned IntptrSize =
DL.getTypeStoreSize(MS.IntptrTy);
1307 auto [InsertPt,
Index] =
1319 Align CurrentAlignment = Alignment;
1320 if (Alignment >= IntptrAlignment && IntptrSize >
kOriginSize) {
1321 Value *IntptrOrigin = originToIntptr(IRB, Origin);
1323 for (
unsigned i = 0; i <
Size / IntptrSize; ++i) {
1328 CurrentAlignment = IntptrAlignment;
1341 Value *OriginPtr, Align Alignment) {
1342 const DataLayout &
DL =
F.getDataLayout();
1344 TypeSize StoreSize =
DL.getTypeStoreSize(Shadow->
getType());
1346 Value *ConvertedShadow = convertShadowToScalar(Shadow, IRB);
1355 paintOrigin(IRB, updateOrigin(Origin, IRB), OriginPtr, StoreSize,
1362 TypeSize TypeSizeInBits =
DL.getTypeSizeInBits(ConvertedShadow->
getType());
1364 if (instrumentWithCalls(ConvertedShadow) &&
1366 FunctionCallee Fn = MS.MaybeStoreOriginFn[SizeIndex];
1367 Value *ConvertedShadow2 =
1369 CallBase *CB = IRB.
CreateCall(Fn, {ConvertedShadow2, Addr, Origin});
1373 Value *
Cmp = convertToBool(ConvertedShadow, IRB,
"_mscmp");
1377 paintOrigin(IRBNew, updateOrigin(Origin, IRBNew), OriginPtr, StoreSize,
1382 void materializeStores() {
1383 for (StoreInst *SI : StoreList) {
1385 Value *Val =
SI->getValueOperand();
1386 Value *Addr =
SI->getPointerOperand();
1387 Value *Shadow =
SI->isAtomic() ? getCleanShadow(Val) : getShadow(Val);
1388 Value *ShadowPtr, *OriginPtr;
1390 const Align Alignment =
SI->getAlign();
1392 std::tie(ShadowPtr, OriginPtr) =
1393 getShadowOriginPtr(Addr, IRB, ShadowTy, Alignment,
true);
1395 [[maybe_unused]] StoreInst *NewSI =
1402 if (MS.TrackOrigins && !
SI->isAtomic())
1403 storeOrigin(IRB, Addr, Shadow, getOrigin(Val), OriginPtr,
1410 if (MS.TrackOrigins < 2)
1413 if (LazyWarningDebugLocationCount.
empty())
1414 for (
const auto &
I : InstrumentationList)
1415 ++LazyWarningDebugLocationCount[
I.OrigIns->getDebugLoc()];
1431 auto NewDebugLoc = OI->getDebugLoc();
1438 IRBOrigin.SetCurrentDebugLocation(NewDebugLoc);
1439 Origin = updateOrigin(Origin, IRBOrigin);
1444 if (MS.CompileKernel || MS.TrackOrigins)
1455 const DataLayout &
DL =
F.getDataLayout();
1456 TypeSize TypeSizeInBits =
DL.getTypeSizeInBits(ConvertedShadow->
getType());
1458 if (instrumentWithCalls(ConvertedShadow) && !MS.CompileKernel) {
1460 ConvertedShadow = convertShadowToScalar(ConvertedShadow, IRB);
1461 Value *ConvertedShadow2 =
1465 FunctionCallee Fn = MS.MaybeWarningFn[SizeIndex];
1469 MS.TrackOrigins && Origin ? Origin : (
Value *)IRB.
getInt32(0)});
1473 FunctionCallee Fn = MS.MaybeWarningVarSizeFn;
1476 unsigned ShadowSize =
DL.getTypeAllocSize(ConvertedShadow2->
getType());
1479 {ShadowAlloca, ConstantInt::get(IRB.
getInt64Ty(), ShadowSize),
1480 MS.TrackOrigins && Origin ? Origin : (
Value *)IRB.
getInt32(0)});
1485 Value *
Cmp = convertToBool(ConvertedShadow, IRB,
"_mscmp");
1488 !MS.Recover, MS.ColdCallWeights);
1491 insertWarningFn(IRB, Origin);
1496 void materializeInstructionChecks(
1498 const DataLayout &
DL =
F.getDataLayout();
1501 bool Combine = !MS.TrackOrigins;
1503 Value *Shadow =
nullptr;
1504 for (
const auto &ShadowData : InstructionChecks) {
1505 assert(ShadowData.OrigIns == Instruction);
1508 Value *ConvertedShadow = ShadowData.Shadow;
1517 insertWarningFn(IRB, ShadowData.Origin);
1527 materializeOneCheck(IRB, ConvertedShadow, ShadowData.Origin);
1532 Shadow = ConvertedShadow;
1536 Shadow = convertToBool(Shadow, IRB,
"_mscmp");
1537 ConvertedShadow = convertToBool(ConvertedShadow, IRB,
"_mscmp");
1538 Shadow = IRB.
CreateOr(Shadow, ConvertedShadow,
"_msor");
1544 materializeOneCheck(IRB, Shadow,
nullptr);
1548 static bool isAArch64SVCount(
Type *Ty) {
1550 return TTy->
getName() ==
"aarch64.svcount";
1556 static bool isScalableNonVectorType(
Type *Ty) {
1557 if (!isAArch64SVCount(Ty))
1558 LLVM_DEBUG(
dbgs() <<
"isScalableNonVectorType: Unexpected type " << *Ty
1564 void materializeChecks() {
1567 SmallPtrSet<Instruction *, 16>
Done;
1570 for (
auto I = InstrumentationList.begin();
1571 I != InstrumentationList.end();) {
1572 auto OrigIns =
I->OrigIns;
1576 auto J = std::find_if(
I + 1, InstrumentationList.end(),
1577 [OrigIns](
const ShadowOriginAndInsertPoint &R) {
1578 return OrigIns != R.OrigIns;
1592 MS.ParamTLS = IRB.
CreateGEP(MS.MsanContextStateTy, ContextState,
1593 {Zero, IRB.getInt32(0)},
"param_shadow");
1594 MS.RetvalTLS = IRB.
CreateGEP(MS.MsanContextStateTy, ContextState,
1595 {Zero, IRB.getInt32(1)},
"retval_shadow");
1596 MS.VAArgTLS = IRB.
CreateGEP(MS.MsanContextStateTy, ContextState,
1597 {Zero, IRB.getInt32(2)},
"va_arg_shadow");
1598 MS.VAArgOriginTLS = IRB.
CreateGEP(MS.MsanContextStateTy, ContextState,
1599 {Zero, IRB.getInt32(3)},
"va_arg_origin");
1600 MS.VAArgOverflowSizeTLS =
1601 IRB.
CreateGEP(MS.MsanContextStateTy, ContextState,
1602 {Zero, IRB.getInt32(4)},
"va_arg_overflow_size");
1603 MS.ParamOriginTLS = IRB.
CreateGEP(MS.MsanContextStateTy, ContextState,
1604 {Zero, IRB.getInt32(5)},
"param_origin");
1605 MS.RetvalOriginTLS =
1606 IRB.
CreateGEP(MS.MsanContextStateTy, ContextState,
1607 {Zero, IRB.getInt32(6)},
"retval_origin");
1609 MS.MsanMetadataAlloca = IRB.
CreateAlloca(MS.MsanMetadata, 0u);
1622 for (Instruction *
I : Instructions)
1626 for (PHINode *PN : ShadowPHINodes) {
1628 PHINode *PNO = MS.TrackOrigins ?
cast<PHINode>(getOrigin(PN)) : nullptr;
1629 size_t NumValues = PN->getNumIncomingValues();
1630 for (
size_t v = 0;
v < NumValues;
v++) {
1631 PNS->
addIncoming(getShadow(PN, v), PN->getIncomingBlock(v));
1633 PNO->
addIncoming(getOrigin(PN, v), PN->getIncomingBlock(v));
1637 VAHelper->finalizeInstrumentation();
1642 for (
auto Item : LifetimeStartList) {
1643 instrumentAlloca(*Item.second, Item.first);
1644 AllocaSet.
remove(Item.second);
1649 for (AllocaInst *AI : AllocaSet)
1650 instrumentAlloca(*AI);
1653 materializeChecks();
1657 materializeStores();
1663 Type *getShadowTy(
Value *V) {
return getShadowTy(
V->getType()); }
1674 const DataLayout &
DL =
F.getDataLayout();
1676 uint32_t EltSize =
DL.getTypeSizeInBits(VT->getElementType());
1678 VT->getElementCount());
1681 return ArrayType::get(getShadowTy(AT->getElementType()),
1682 AT->getNumElements());
1686 for (
unsigned i = 0, n =
ST->getNumElements(); i < n; i++)
1687 Elements.push_back(getShadowTy(
ST->getElementType(i)));
1689 LLVM_DEBUG(
dbgs() <<
"getShadowTy: " << *ST <<
" ===> " << *Res <<
"\n");
1692 if (isScalableNonVectorType(OrigTy)) {
1693 LLVM_DEBUG(
dbgs() <<
"getShadowTy: Scalable non-vector type: " << *OrigTy
1698 uint32_t TypeSize =
DL.getTypeSizeInBits(OrigTy);
1708 for (
unsigned Idx = 0; Idx <
Struct->getNumElements(); Idx++) {
1711 Value *ShadowBool = convertToBool(ShadowItem, IRB);
1713 if (Aggregator != FalseVal)
1714 Aggregator = IRB.
CreateOr(Aggregator, ShadowBool);
1716 Aggregator = ShadowBool;
1723 Value *collapseArrayShadow(ArrayType *Array,
Value *Shadow,
1725 if (!
Array->getNumElements())
1729 Value *Aggregator = convertShadowToScalar(FirstItem, IRB);
1731 for (
unsigned Idx = 1; Idx <
Array->getNumElements(); Idx++) {
1733 Value *ShadowInner = convertShadowToScalar(ShadowItem, IRB);
1734 Aggregator = IRB.
CreateOr(Aggregator, ShadowInner);
1744 return collapseStructShadow(
Struct, V, IRB);
1746 return collapseArrayShadow(Array, V, IRB);
1751 V->getType()->getPrimitiveSizeInBits().getFixedValue();
1759 Type *VTy =
V->getType();
1761 return convertToBool(convertShadowToScalar(V, IRB), IRB,
name);
1768 Type *ptrToIntPtrType(
Type *PtrTy)
const {
1770 return VectorType::get(ptrToIntPtrType(VectTy->getElementType()),
1771 VectTy->getElementCount());
1777 Type *getPtrToShadowPtrType(
Type *IntPtrTy,
Type *ShadowTy)
const {
1779 return VectorType::get(
1780 getPtrToShadowPtrType(VectTy->getElementType(), ShadowTy),
1781 VectTy->getElementCount());
1783 assert(IntPtrTy == MS.IntptrTy);
1790 VectTy->getElementCount(),
1791 constToIntPtr(VectTy->getElementType(),
C));
1793 assert(IntPtrTy == MS.IntptrTy);
1794 return ConstantInt::get(MS.IntptrTy,
C);
1807 Type *IntptrTy = ptrToIntPtrType(Addr->
getType());
1810 if (uint64_t AndMask = MS.MapParams->AndMask)
1811 OffsetLong = IRB.
CreateAnd(OffsetLong, constToIntPtr(IntptrTy, ~AndMask));
1813 if (uint64_t XorMask = MS.MapParams->XorMask)
1814 OffsetLong = IRB.
CreateXor(OffsetLong, constToIntPtr(IntptrTy, XorMask));
1826 std::pair<Value *, Value *>
1828 MaybeAlign Alignment) {
1833 assert(VectTy->getElementType()->isPointerTy());
1835 Type *IntptrTy = ptrToIntPtrType(Addr->
getType());
1836 Value *ShadowOffset = getShadowPtrOffset(Addr, IRB);
1837 Value *ShadowLong = ShadowOffset;
1838 if (uint64_t ShadowBase = MS.MapParams->ShadowBase) {
1840 IRB.
CreateAdd(ShadowLong, constToIntPtr(IntptrTy, ShadowBase));
1843 ShadowLong, getPtrToShadowPtrType(IntptrTy, ShadowTy));
1845 Value *OriginPtr =
nullptr;
1846 if (MS.TrackOrigins) {
1847 Value *OriginLong = ShadowOffset;
1848 uint64_t OriginBase = MS.MapParams->OriginBase;
1849 if (OriginBase != 0)
1851 IRB.
CreateAdd(OriginLong, constToIntPtr(IntptrTy, OriginBase));
1854 OriginLong = IRB.
CreateAnd(OriginLong, constToIntPtr(IntptrTy, ~Mask));
1857 OriginLong, getPtrToShadowPtrType(IntptrTy, MS.OriginTy));
1859 return std::make_pair(ShadowPtr, OriginPtr);
1862 template <
typename... ArgsTy>
1867 {MS.MsanMetadataAlloca, std::forward<ArgsTy>(Args)...});
1868 return IRB.
CreateLoad(MS.MsanMetadata, MS.MsanMetadataAlloca);
1871 return IRB.
CreateCall(Callee, {std::forward<ArgsTy>(Args)...});
1874 std::pair<Value *, Value *> getShadowOriginPtrKernelNoVec(
Value *Addr,
1878 Value *ShadowOriginPtrs;
1879 const DataLayout &
DL =
F.getDataLayout();
1880 TypeSize
Size =
DL.getTypeStoreSize(ShadowTy);
1882 FunctionCallee Getter = MS.getKmsanShadowOriginAccessFn(
isStore,
Size);
1885 ShadowOriginPtrs = createMetadataCall(IRB, Getter, AddrCast);
1887 Value *SizeVal = ConstantInt::get(MS.IntptrTy,
Size);
1888 ShadowOriginPtrs = createMetadataCall(
1890 isStore ? MS.MsanMetadataPtrForStoreN : MS.MsanMetadataPtrForLoadN,
1897 return std::make_pair(ShadowPtr, OriginPtr);
1903 std::pair<Value *, Value *> getShadowOriginPtrKernel(
Value *Addr,
1910 return getShadowOriginPtrKernelNoVec(Addr, IRB, ShadowTy,
isStore);
1915 Value *ShadowPtrs = ConstantInt::getNullValue(
1917 Value *OriginPtrs =
nullptr;
1918 if (MS.TrackOrigins)
1919 OriginPtrs = ConstantInt::getNullValue(
1921 for (
unsigned i = 0; i < NumElements; ++i) {
1924 auto [ShadowPtr, OriginPtr] =
1925 getShadowOriginPtrKernelNoVec(OneAddr, IRB, ShadowTy,
isStore);
1928 ShadowPtrs, ShadowPtr, ConstantInt::get(IRB.
getInt32Ty(), i));
1929 if (MS.TrackOrigins)
1931 OriginPtrs, OriginPtr, ConstantInt::get(IRB.
getInt32Ty(), i));
1933 return {ShadowPtrs, OriginPtrs};
1936 std::pair<Value *, Value *> getShadowOriginPtr(
Value *Addr,
IRBuilder<> &IRB,
1938 MaybeAlign Alignment,
1940 if (MS.CompileKernel)
1941 return getShadowOriginPtrKernel(Addr, IRB, ShadowTy,
isStore);
1942 return getShadowOriginPtrUserspace(Addr, IRB, ShadowTy, Alignment);
1950 ConstantInt::get(MS.IntptrTy, ArgOffset),
"_msarg");
1955 if (!MS.TrackOrigins)
1958 ConstantInt::get(MS.IntptrTy, ArgOffset),
1968 Value *getOriginPtrForRetval() {
1970 return MS.RetvalOriginTLS;
1975 assert(!ShadowMap.
count(V) &&
"Values may only have one shadow");
1976 ShadowMap[
V] = PropagateShadow ? SV : getCleanShadow(V);
1981 if (!MS.TrackOrigins)
1983 assert(!OriginMap.
count(V) &&
"Values may only have one origin");
1984 LLVM_DEBUG(
dbgs() <<
"ORIGIN: " << *V <<
" ==> " << *Origin <<
"\n");
1985 OriginMap[
V] = Origin;
1989 Type *ShadowTy = getShadowTy(OrigTy);
1999 Constant *getCleanShadow(
Value *V) {
return getCleanShadow(
V->getType()); }
2008 getPoisonedShadow(AT->getElementType()));
2013 for (
unsigned i = 0, n =
ST->getNumElements(); i < n; i++)
2014 Vals.
push_back(getPoisonedShadow(
ST->getElementType(i)));
2022 Type *ShadowTy = getShadowTy(V);
2025 return getPoisonedShadow(ShadowTy);
2037 if (!PropagateShadow ||
I->getMetadata(LLVMContext::MD_nosanitize))
2038 return getCleanShadow(V);
2040 Value *Shadow = ShadowMap[
V];
2042 LLVM_DEBUG(
dbgs() <<
"No shadow: " << *V <<
"\n" << *(
I->getParent()));
2043 assert(Shadow &&
"No shadow for a value");
2050 Value *
AllOnes = (PropagateShadow && PoisonUndef) ? getPoisonedShadow(V)
2051 : getCleanShadow(V);
2057 Value *&ShadowPtr = ShadowMap[
V];
2062 unsigned ArgOffset = 0;
2063 const DataLayout &
DL =
F->getDataLayout();
2064 for (
auto &FArg :
F->args()) {
2065 if (!FArg.getType()->isSized() || FArg.getType()->isScalableTy()) {
2067 ?
"vscale not fully supported\n"
2068 :
"Arg is not sized\n"));
2070 ShadowPtr = getCleanShadow(V);
2071 setOrigin(
A, getCleanOrigin());
2077 unsigned Size = FArg.hasByValAttr()
2078 ?
DL.getTypeAllocSize(FArg.getParamByValType())
2079 :
DL.getTypeAllocSize(FArg.getType());
2083 if (FArg.hasByValAttr()) {
2087 const Align ArgAlign =
DL.getValueOrABITypeAlignment(
2088 FArg.getParamAlign(), FArg.getParamByValType());
2089 Value *CpShadowPtr, *CpOriginPtr;
2090 std::tie(CpShadowPtr, CpOriginPtr) =
2091 getShadowOriginPtr(V, EntryIRB, EntryIRB.getInt8Ty(), ArgAlign,
2093 if (!PropagateShadow || Overflow) {
2095 EntryIRB.CreateMemSet(
2099 Value *
Base = getShadowPtrForArgument(EntryIRB, ArgOffset);
2101 [[maybe_unused]]
Value *Cpy = EntryIRB.CreateMemCpy(
2102 CpShadowPtr, CopyAlign,
Base, CopyAlign,
Size);
2105 if (MS.TrackOrigins) {
2106 Value *OriginPtr = getOriginPtrForArgument(EntryIRB, ArgOffset);
2110 EntryIRB.CreateMemCpy(
2119 if (!PropagateShadow || Overflow || FArg.hasByValAttr() ||
2120 (MS.EagerChecks && FArg.hasAttribute(Attribute::NoUndef))) {
2121 ShadowPtr = getCleanShadow(V);
2122 setOrigin(
A, getCleanOrigin());
2125 Value *
Base = getShadowPtrForArgument(EntryIRB, ArgOffset);
2126 ShadowPtr = EntryIRB.CreateAlignedLoad(getShadowTy(&FArg),
Base,
2128 if (MS.TrackOrigins) {
2129 Value *OriginPtr = getOriginPtrForArgument(EntryIRB, ArgOffset);
2130 setOrigin(
A, EntryIRB.CreateLoad(MS.OriginTy, OriginPtr));
2134 <<
" ARG: " << FArg <<
" ==> " << *ShadowPtr <<
"\n");
2140 assert(ShadowPtr &&
"Could not find shadow for an argument");
2147 cast<Constant>(V)->containsUndefOrPoisonElement() && PropagateShadow &&
2148 PoisonUndefVectors) {
2151 for (
unsigned i = 0; i != NumElems; ++i) {
2154 : getCleanShadow(Elem);
2158 LLVM_DEBUG(
dbgs() <<
"Partial undef constant vector: " << *V <<
" ==> "
2159 << *ShadowConstant <<
"\n");
2161 return ShadowConstant;
2167 return getCleanShadow(V);
2171 Value *getShadow(Instruction *
I,
int i) {
2172 return getShadow(
I->getOperand(i));
2177 if (!MS.TrackOrigins)
2180 return getCleanOrigin();
2182 "Unexpected value type in getOrigin()");
2184 if (
I->getMetadata(LLVMContext::MD_nosanitize))
2185 return getCleanOrigin();
2187 Value *Origin = OriginMap[
V];
2188 assert(Origin &&
"Missing origin");
2193 Value *getOrigin(Instruction *
I,
int i) {
2194 return getOrigin(
I->getOperand(i));
2201 void insertCheckShadow(
Value *Shadow,
Value *Origin, Instruction *OrigIns) {
2207 LLVM_DEBUG(
dbgs() <<
"Skipping check of " << *Shadow <<
" before "
2208 << *OrigIns <<
"\n");
2213 if (isScalableNonVectorType(ShadowTy)) {
2214 LLVM_DEBUG(
dbgs() <<
"Skipping check of scalable non-vector " << *Shadow
2215 <<
" before " << *OrigIns <<
"\n");
2221 "Can only insert checks for integer, vector, and aggregate shadow "
2224 InstrumentationList.push_back(
2225 ShadowOriginAndInsertPoint(Shadow, Origin, OrigIns));
2233 void insertCheckShadowOf(
Value *Val, Instruction *OrigIns) {
2235 Value *Shadow, *Origin;
2237 Shadow = getShadow(Val);
2240 Origin = getOrigin(Val);
2247 insertCheckShadow(Shadow, Origin, OrigIns);
2252 case AtomicOrdering::NotAtomic:
2253 return AtomicOrdering::NotAtomic;
2254 case AtomicOrdering::Unordered:
2255 case AtomicOrdering::Monotonic:
2256 case AtomicOrdering::Release:
2257 return AtomicOrdering::Release;
2258 case AtomicOrdering::Acquire:
2259 case AtomicOrdering::AcquireRelease:
2260 return AtomicOrdering::AcquireRelease;
2261 case AtomicOrdering::SequentiallyConsistent:
2262 return AtomicOrdering::SequentiallyConsistent;
2268 constexpr int NumOrderings = (int)AtomicOrderingCABI::seq_cst + 1;
2269 uint32_t OrderingTable[NumOrderings] = {};
2271 OrderingTable[(int)AtomicOrderingCABI::relaxed] =
2272 OrderingTable[(
int)AtomicOrderingCABI::release] =
2273 (int)AtomicOrderingCABI::release;
2274 OrderingTable[(int)AtomicOrderingCABI::consume] =
2275 OrderingTable[(
int)AtomicOrderingCABI::acquire] =
2276 OrderingTable[(int)AtomicOrderingCABI::acq_rel] =
2277 (
int)AtomicOrderingCABI::acq_rel;
2278 OrderingTable[(int)AtomicOrderingCABI::seq_cst] =
2279 (
int)AtomicOrderingCABI::seq_cst;
2286 case AtomicOrdering::NotAtomic:
2287 return AtomicOrdering::NotAtomic;
2288 case AtomicOrdering::Unordered:
2289 case AtomicOrdering::Monotonic:
2290 case AtomicOrdering::Acquire:
2291 return AtomicOrdering::Acquire;
2292 case AtomicOrdering::Release:
2293 case AtomicOrdering::AcquireRelease:
2294 return AtomicOrdering::AcquireRelease;
2295 case AtomicOrdering::SequentiallyConsistent:
2296 return AtomicOrdering::SequentiallyConsistent;
2302 constexpr int NumOrderings = (int)AtomicOrderingCABI::seq_cst + 1;
2303 uint32_t OrderingTable[NumOrderings] = {};
2305 OrderingTable[(int)AtomicOrderingCABI::relaxed] =
2306 OrderingTable[(
int)AtomicOrderingCABI::acquire] =
2307 OrderingTable[(int)AtomicOrderingCABI::consume] =
2308 (
int)AtomicOrderingCABI::acquire;
2309 OrderingTable[(int)AtomicOrderingCABI::release] =
2310 OrderingTable[(
int)AtomicOrderingCABI::acq_rel] =
2311 (int)AtomicOrderingCABI::acq_rel;
2312 OrderingTable[(int)AtomicOrderingCABI::seq_cst] =
2313 (
int)AtomicOrderingCABI::seq_cst;
2319 using InstVisitor<MemorySanitizerVisitor>
::visit;
2320 void visit(Instruction &
I) {
2321 if (
I.getMetadata(LLVMContext::MD_nosanitize))
2324 if (isInPrologue(
I))
2329 setShadow(&
I, getCleanShadow(&
I));
2330 setOrigin(&
I, getCleanOrigin());
2341 void visitLoadInst(LoadInst &
I) {
2342 assert(
I.getType()->isSized() &&
"Load type must have size");
2343 assert(!
I.getMetadata(LLVMContext::MD_nosanitize));
2344 NextNodeIRBuilder IRB(&
I);
2345 Type *ShadowTy = getShadowTy(&
I);
2346 Value *Addr =
I.getPointerOperand();
2347 Value *ShadowPtr =
nullptr, *OriginPtr =
nullptr;
2348 const Align Alignment =
I.getAlign();
2349 if (PropagateShadow) {
2350 std::tie(ShadowPtr, OriginPtr) =
2351 getShadowOriginPtr(Addr, IRB, ShadowTy, Alignment,
false);
2355 setShadow(&
I, getCleanShadow(&
I));
2359 insertCheckShadowOf(
I.getPointerOperand(), &
I);
2364 if (MS.TrackOrigins) {
2365 if (PropagateShadow) {
2370 setOrigin(&
I, getCleanOrigin());
2379 void visitStoreInst(StoreInst &
I) {
2380 StoreList.push_back(&
I);
2382 insertCheckShadowOf(
I.getPointerOperand(), &
I);
2385 void handleCASOrRMW(Instruction &
I) {
2389 Value *Addr =
I.getOperand(0);
2390 Value *Val =
I.getOperand(1);
2391 Value *ShadowPtr = getShadowOriginPtr(Addr, IRB, getShadowTy(Val),
Align(1),
2396 insertCheckShadowOf(Addr, &
I);
2402 insertCheckShadowOf(Val, &
I);
2406 setShadow(&
I, getCleanShadow(&
I));
2407 setOrigin(&
I, getCleanOrigin());
2410 void visitAtomicRMWInst(AtomicRMWInst &
I) {
2415 void visitAtomicCmpXchgInst(AtomicCmpXchgInst &
I) {
2421 void visitExtractElementInst(ExtractElementInst &
I) {
2422 insertCheckShadowOf(
I.getOperand(1), &
I);
2426 setOrigin(&
I, getOrigin(&
I, 0));
2429 void visitInsertElementInst(InsertElementInst &
I) {
2430 insertCheckShadowOf(
I.getOperand(2), &
I);
2432 auto *Shadow0 = getShadow(&
I, 0);
2433 auto *Shadow1 = getShadow(&
I, 1);
2436 setOriginForNaryOp(
I);
2439 void visitShuffleVectorInst(ShuffleVectorInst &
I) {
2441 auto *Shadow0 = getShadow(&
I, 0);
2442 auto *Shadow1 = getShadow(&
I, 1);
2445 setOriginForNaryOp(
I);
2449 void visitSExtInst(SExtInst &
I) {
2451 setShadow(&
I, IRB.
CreateSExt(getShadow(&
I, 0),
I.getType(),
"_msprop"));
2452 setOrigin(&
I, getOrigin(&
I, 0));
2455 void visitZExtInst(ZExtInst &
I) {
2457 setShadow(&
I, IRB.
CreateZExt(getShadow(&
I, 0),
I.getType(),
"_msprop"));
2458 setOrigin(&
I, getOrigin(&
I, 0));
2461 void visitTruncInst(TruncInst &
I) {
2463 setShadow(&
I, IRB.
CreateTrunc(getShadow(&
I, 0),
I.getType(),
"_msprop"));
2464 setOrigin(&
I, getOrigin(&
I, 0));
2467 void visitBitCastInst(BitCastInst &
I) {
2472 if (CI->isMustTailCall())
2476 setOrigin(&
I, getOrigin(&
I, 0));
2479 void visitPtrToIntInst(PtrToIntInst &
I) {
2482 "_msprop_ptrtoint"));
2483 setOrigin(&
I, getOrigin(&
I, 0));
2486 void visitIntToPtrInst(IntToPtrInst &
I) {
2489 "_msprop_inttoptr"));
2490 setOrigin(&
I, getOrigin(&
I, 0));
2493 void visitFPToSIInst(CastInst &
I) { handleShadowOr(
I); }
2494 void visitFPToUIInst(CastInst &
I) { handleShadowOr(
I); }
2495 void visitSIToFPInst(CastInst &
I) { handleShadowOr(
I); }
2496 void visitUIToFPInst(CastInst &
I) { handleShadowOr(
I); }
2497 void visitFPExtInst(CastInst &
I) { handleShadowOr(
I); }
2498 void visitFPTruncInst(CastInst &
I) { handleShadowOr(
I); }
2505 void visitAnd(BinaryOperator &
I) {
2513 Value *S2 = getShadow(&
I, 1);
2514 Value *V1 =
I.getOperand(0);
2515 Value *V2 =
I.getOperand(1);
2523 setShadow(&
I, IRB.
CreateOr({S1S2, V1S2, S1V2}));
2524 setOriginForNaryOp(
I);
2527 void visitOr(BinaryOperator &
I) {
2540 Value *S2 = getShadow(&
I, 1);
2541 Value *V1 =
I.getOperand(0);
2542 Value *V2 =
I.getOperand(1);
2561 S = IRB.
CreateOr(S, DisjointOrShadow,
"_ms_disjoint");
2565 setOriginForNaryOp(
I);
2583 template <
bool CombineShadow>
class Combiner {
2584 Value *Shadow =
nullptr;
2585 Value *Origin =
nullptr;
2587 MemorySanitizerVisitor *MSV;
2590 Combiner(MemorySanitizerVisitor *MSV,
IRBuilder<> &IRB)
2591 : IRB(IRB), MSV(MSV) {}
2595 if (CombineShadow) {
2600 OpShadow = MSV->CreateShadowCast(IRB, OpShadow, Shadow->getType());
2601 Shadow = IRB.
CreateOr(Shadow, OpShadow,
"_msprop");
2605 if (MSV->MS.TrackOrigins) {
2612 if (!ConstOrigin || !ConstOrigin->
isNullValue()) {
2613 Value *
Cond = MSV->convertToBool(OpShadow, IRB);
2623 Value *OpShadow = MSV->getShadow(V);
2624 Value *OpOrigin = MSV->MS.TrackOrigins ? MSV->getOrigin(V) :
nullptr;
2625 return Add(OpShadow, OpOrigin);
2630 void Done(Instruction *
I) {
2631 if (CombineShadow) {
2633 Shadow = MSV->CreateShadowCast(IRB, Shadow, MSV->getShadowTy(
I));
2634 MSV->setShadow(
I, Shadow);
2636 if (MSV->MS.TrackOrigins) {
2638 MSV->setOrigin(
I, Origin);
2644 void DoneAndStoreOrigin(TypeSize TS,
Value *OriginPtr) {
2645 if (MSV->MS.TrackOrigins) {
2652 using ShadowAndOriginCombiner = Combiner<true>;
2653 using OriginCombiner = Combiner<false>;
2656 void setOriginForNaryOp(Instruction &
I) {
2657 if (!MS.TrackOrigins)
2660 OriginCombiner
OC(
this, IRB);
2661 for (Use &
Op :
I.operands())
2666 size_t VectorOrPrimitiveTypeSizeInBits(
Type *Ty) {
2668 "Vector of pointers is not a valid shadow type");
2678 Type *srcTy =
V->getType();
2681 size_t srcSizeInBits = VectorOrPrimitiveTypeSizeInBits(srcTy);
2682 size_t dstSizeInBits = VectorOrPrimitiveTypeSizeInBits(dstTy);
2683 if (srcSizeInBits > 1 && dstSizeInBits == 1)
2701 Type *ShadowTy = getShadowTy(V);
2702 if (
V->getType() == ShadowTy)
2704 if (
V->getType()->isPtrOrPtrVectorTy())
2711 void handleShadowOr(Instruction &
I) {
2713 ShadowAndOriginCombiner SC(
this, IRB);
2714 for (Use &
Op :
I.operands())
2731 Value *horizontalReduce(IntrinsicInst &
I,
unsigned ReductionFactor,
2734 unsigned TotalNumElems =
2739 TotalNumElems = TotalNumElems * 2;
2742 assert(TotalNumElems % ReductionFactor == 0);
2747 for (
unsigned i = 0; i < ReductionFactor; i++) {
2748 SmallVector<int, 16>
Mask;
2749 for (
unsigned X = 0;
X < TotalNumElems;
X += ReductionFactor)
2750 Mask.push_back(
X + i);
2772 void handlePairwiseShadowOrIntrinsic(IntrinsicInst &
I) {
2773 assert(
I.arg_size() == 1 ||
I.arg_size() == 2);
2775 assert(
I.getType()->isVectorTy());
2776 assert(
I.getArgOperand(0)->getType()->isVectorTy());
2778 [[maybe_unused]] FixedVectorType *ParamType =
2782 [[maybe_unused]] FixedVectorType *
ReturnType =
2790 Value *FirstArgShadow = getShadow(&
I, 0);
2791 Value *SecondArgShadow =
nullptr;
2792 if (
I.arg_size() == 2)
2793 SecondArgShadow = getShadow(&
I, 1);
2795 Value *OrShadow = horizontalReduce(
I, 2, FirstArgShadow,
2798 OrShadow = CreateShadowCast(IRB, OrShadow, getShadowTy(&
I));
2800 setShadow(&
I, OrShadow);
2801 setOriginForNaryOp(
I);
2811 void handlePairwiseShadowOrIntrinsic(IntrinsicInst &
I,
2812 int ReinterpretElemWidth) {
2813 assert(
I.arg_size() == 1 ||
I.arg_size() == 2);
2815 assert(
I.getType()->isVectorTy());
2816 assert(
I.getArgOperand(0)->getType()->isVectorTy());
2818 FixedVectorType *ParamType =
2823 [[maybe_unused]] FixedVectorType *
ReturnType =
2830 FixedVectorType *ReinterpretShadowTy =
nullptr;
2838 Value *FirstArgShadow = getShadow(&
I, 0);
2839 FirstArgShadow = IRB.
CreateBitCast(FirstArgShadow, ReinterpretShadowTy);
2849 Value *SecondArgShadow =
nullptr;
2850 if (
I.arg_size() == 2) {
2851 SecondArgShadow = getShadow(&
I, 1);
2852 SecondArgShadow = IRB.
CreateBitCast(SecondArgShadow, ReinterpretShadowTy);
2855 Value *OrShadow = horizontalReduce(
I, 2, FirstArgShadow,
2858 OrShadow = CreateShadowCast(IRB, OrShadow, getShadowTy(&
I));
2860 setShadow(&
I, OrShadow);
2861 setOriginForNaryOp(
I);
2864 void visitFNeg(UnaryOperator &
I) { handleShadowOr(
I); }
2875 void handleMulByConstant(BinaryOperator &
I, Constant *ConstArg,
2881 Type *EltTy = VTy->getElementType();
2883 for (
unsigned Idx = 0; Idx < NumElements; ++Idx) {
2884 if (ConstantInt *Elt =
2886 const APInt &
V = Elt->getValue();
2887 APInt V2 = APInt(
V.getBitWidth(), 1) <<
V.countr_zero();
2888 Elements.push_back(ConstantInt::get(EltTy, V2));
2890 Elements.push_back(ConstantInt::get(EltTy, 1));
2896 const APInt &
V = Elt->getValue();
2897 APInt V2 = APInt(
V.getBitWidth(), 1) <<
V.countr_zero();
2898 ShadowMul = ConstantInt::get(Ty, V2);
2900 ShadowMul = ConstantInt::get(Ty, 1);
2906 IRB.
CreateMul(getShadow(OtherArg), ShadowMul,
"msprop_mul_cst"));
2907 setOrigin(&
I, getOrigin(OtherArg));
2910 void visitMul(BinaryOperator &
I) {
2913 if (constOp0 && !constOp1)
2914 handleMulByConstant(
I, constOp0,
I.getOperand(1));
2915 else if (constOp1 && !constOp0)
2916 handleMulByConstant(
I, constOp1,
I.getOperand(0));
2921 void visitFAdd(BinaryOperator &
I) { handleShadowOr(
I); }
2922 void visitFSub(BinaryOperator &
I) { handleShadowOr(
I); }
2923 void visitFMul(BinaryOperator &
I) { handleShadowOr(
I); }
2924 void visitAdd(BinaryOperator &
I) { handleShadowOr(
I); }
2925 void visitSub(BinaryOperator &
I) { handleShadowOr(
I); }
2926 void visitXor(BinaryOperator &
I) { handleShadowOr(
I); }
2928 void handleIntegerDiv(Instruction &
I) {
2931 insertCheckShadowOf(
I.getOperand(1), &
I);
2932 setShadow(&
I, getShadow(&
I, 0));
2933 setOrigin(&
I, getOrigin(&
I, 0));
2936 void visitUDiv(BinaryOperator &
I) { handleIntegerDiv(
I); }
2937 void visitSDiv(BinaryOperator &
I) { handleIntegerDiv(
I); }
2938 void visitURem(BinaryOperator &
I) { handleIntegerDiv(
I); }
2939 void visitSRem(BinaryOperator &
I) { handleIntegerDiv(
I); }
2943 void visitFDiv(BinaryOperator &
I) { handleShadowOr(
I); }
2944 void visitFRem(BinaryOperator &
I) { handleShadowOr(
I); }
2950 void handleEqualityComparison(ICmpInst &
I) {
2954 Value *Sa = getShadow(
A);
2955 Value *Sb = getShadow(
B);
2981 setOriginForNaryOp(
I);
2989 void handleRelationalComparisonExact(ICmpInst &
I) {
2993 Value *Sa = getShadow(
A);
2994 Value *Sb = getShadow(
B);
3005 bool IsSigned =
I.isSigned();
3007 auto GetMinMaxUnsigned = [&](
Value *
V,
Value *S) {
3017 V = IRB.
CreateXor(V, ConstantInt::get(
V->getType(), MinVal));
3022 return std::make_pair(Min, Max);
3025 auto [Amin, Amax] = GetMinMaxUnsigned(
A, Sa);
3026 auto [Bmin, Bmax] = GetMinMaxUnsigned(
B, Sb);
3032 setOriginForNaryOp(
I);
3039 void handleSignedRelationalComparison(ICmpInst &
I) {
3044 op =
I.getOperand(0);
3045 pre =
I.getPredicate();
3047 op =
I.getOperand(1);
3048 pre =
I.getSwappedPredicate();
3061 setShadow(&
I, Shadow);
3062 setOrigin(&
I, getOrigin(
op));
3068 void visitICmpInst(ICmpInst &
I) {
3073 if (
I.isEquality()) {
3074 handleEqualityComparison(
I);
3080 handleRelationalComparisonExact(
I);
3084 handleSignedRelationalComparison(
I);
3090 handleRelationalComparisonExact(
I);
3097 void visitFCmpInst(FCmpInst &
I) { handleShadowOr(
I); }
3099 void handleShift(BinaryOperator &
I) {
3104 Value *S2 = getShadow(&
I, 1);
3107 Value *V2 =
I.getOperand(1);
3109 setShadow(&
I, IRB.
CreateOr(Shift, S2Conv));
3110 setOriginForNaryOp(
I);
3113 void visitShl(BinaryOperator &
I) { handleShift(
I); }
3114 void visitAShr(BinaryOperator &
I) { handleShift(
I); }
3115 void visitLShr(BinaryOperator &
I) { handleShift(
I); }
3117 void handleFunnelShift(IntrinsicInst &
I) {
3121 Value *S0 = getShadow(&
I, 0);
3123 Value *S2 = getShadow(&
I, 2);
3126 Value *V2 =
I.getOperand(2);
3129 setShadow(&
I, IRB.
CreateOr(Shift, S2Conv));
3130 setOriginForNaryOp(
I);
3143 void visitMemMoveInst(MemMoveInst &
I) {
3144 getShadow(
I.getArgOperand(1));
3147 {I.getArgOperand(0), I.getArgOperand(1),
3148 IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false)});
3166 void visitMemCpyInst(MemCpyInst &
I) {
3167 getShadow(
I.getArgOperand(1));
3170 {I.getArgOperand(0), I.getArgOperand(1),
3171 IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false)});
3176 void visitMemSetInst(MemSetInst &
I) {
3180 {I.getArgOperand(0),
3181 IRB.CreateIntCast(I.getArgOperand(1), IRB.getInt32Ty(), false),
3182 IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false)});
3186 void visitVAStartInst(VAStartInst &
I) { VAHelper->visitVAStartInst(
I); }
3188 void visitVACopyInst(VACopyInst &
I) { VAHelper->visitVACopyInst(
I); }
3194 bool handleVectorStoreIntrinsic(IntrinsicInst &
I) {
3198 Value *Addr =
I.getArgOperand(0);
3199 Value *Shadow = getShadow(&
I, 1);
3200 Value *ShadowPtr, *OriginPtr;
3204 std::tie(ShadowPtr, OriginPtr) = getShadowOriginPtr(
3209 insertCheckShadowOf(Addr, &
I);
3212 if (MS.TrackOrigins)
3221 bool handleVectorLoadIntrinsic(IntrinsicInst &
I) {
3225 Value *Addr =
I.getArgOperand(0);
3227 Type *ShadowTy = getShadowTy(&
I);
3228 Value *ShadowPtr =
nullptr, *OriginPtr =
nullptr;
3229 if (PropagateShadow) {
3233 std::tie(ShadowPtr, OriginPtr) =
3234 getShadowOriginPtr(Addr, IRB, ShadowTy, Alignment,
false);
3238 setShadow(&
I, getCleanShadow(&
I));
3242 insertCheckShadowOf(Addr, &
I);
3244 if (MS.TrackOrigins) {
3245 if (PropagateShadow)
3246 setOrigin(&
I, IRB.
CreateLoad(MS.OriginTy, OriginPtr));
3248 setOrigin(&
I, getCleanOrigin());
3268 [[maybe_unused]]
bool
3269 maybeHandleSimpleNomemIntrinsic(IntrinsicInst &
I,
3270 unsigned int trailingFlags) {
3271 Type *RetTy =
I.getType();
3275 unsigned NumArgOperands =
I.arg_size();
3276 assert(NumArgOperands >= trailingFlags);
3277 for (
unsigned i = 0; i < NumArgOperands - trailingFlags; ++i) {
3278 Type *Ty =
I.getArgOperand(i)->getType();
3284 ShadowAndOriginCombiner SC(
this, IRB);
3285 for (
unsigned i = 0; i < NumArgOperands; ++i)
3286 SC.Add(
I.getArgOperand(i));
3303 bool maybeHandleUnknownIntrinsicUnlogged(IntrinsicInst &
I) {
3304 unsigned NumArgOperands =
I.arg_size();
3305 if (NumArgOperands == 0)
3308 if (NumArgOperands == 2 &&
I.getArgOperand(0)->getType()->isPointerTy() &&
3309 I.getArgOperand(1)->getType()->isVectorTy() &&
3310 I.getType()->isVoidTy() && !
I.onlyReadsMemory()) {
3312 return handleVectorStoreIntrinsic(
I);
3315 if (NumArgOperands == 1 &&
I.getArgOperand(0)->getType()->isPointerTy() &&
3316 I.getType()->isVectorTy() &&
I.onlyReadsMemory()) {
3318 return handleVectorLoadIntrinsic(
I);
3321 if (
I.doesNotAccessMemory())
3322 if (maybeHandleSimpleNomemIntrinsic(
I, 0))
3330 bool maybeHandleUnknownIntrinsic(IntrinsicInst &
I) {
3331 if (maybeHandleUnknownIntrinsicUnlogged(
I)) {
3335 LLVM_DEBUG(
dbgs() <<
"UNKNOWN INSTRUCTION HANDLED HEURISTICALLY: " <<
I
3342 void handleInvariantGroup(IntrinsicInst &
I) {
3343 setShadow(&
I, getShadow(&
I, 0));
3344 setOrigin(&
I, getOrigin(&
I, 0));
3347 void handleLifetimeStart(IntrinsicInst &
I) {
3352 LifetimeStartList.push_back(std::make_pair(&
I, AI));
3355 void handleBswap(IntrinsicInst &
I) {
3358 Type *OpType =
Op->getType();
3361 setOrigin(&
I, getOrigin(
Op));
3382 void handleCountLeadingTrailingZeros(IntrinsicInst &
I) {
3384 Value *Src =
I.getArgOperand(0);
3385 Value *SrcShadow = getShadow(Src);
3389 I.getType(),
I.getIntrinsicID(), {Src, False});
3391 I.getType(),
I.getIntrinsicID(), {SrcShadow, False});
3394 ConcreteZerosCount, ShadowZerosCount,
"_mscz_cmp_zeros");
3396 Value *NotAllZeroShadow =
3398 Value *OutputShadow =
3399 IRB.
CreateAnd(CompareConcreteZeros, NotAllZeroShadow,
"_mscz_main");
3405 OutputShadow = IRB.
CreateOr(OutputShadow, BoolZeroPoison,
"_mscz_bs");
3408 OutputShadow = IRB.
CreateSExt(OutputShadow, getShadowTy(Src),
"_mscz_os");
3410 setShadow(&
I, OutputShadow);
3411 setOriginForNaryOp(
I);
3421 void handleNEONVectorConvertIntrinsic(IntrinsicInst &
I) {
3425 Value *S0 = getShadow(&
I, 0);
3434 setShadow(&
I, OutShadow);
3435 setOriginForNaryOp(
I);
3444 FixedVectorType *maybeShrinkVectorShadowType(
Value *Src, IntrinsicInst &
I) {
3464 Value *maybeExtendVectorShadowWithZeros(
Value *Shadow, IntrinsicInst &
I) {
3469 Value *FullShadow = getCleanShadow(&
I);
3470 unsigned ShadowNumElems =
3472 unsigned FullShadowNumElems =
3475 assert((ShadowNumElems == FullShadowNumElems) ||
3476 (ShadowNumElems * 2 == FullShadowNumElems));
3478 if (ShadowNumElems == FullShadowNumElems) {
3479 FullShadow = Shadow;
3483 std::iota(ShadowMask.begin(), ShadowMask.end(), 0);
3508 void handleSSEVectorConvertIntrinsicByProp(IntrinsicInst &
I,
3509 bool HasRoundingMode) {
3510 if (HasRoundingMode) {
3518 Value *Src =
I.getArgOperand(0);
3519 assert(Src->getType()->isVectorTy());
3523 VectorType *ShadowType = maybeShrinkVectorShadowType(Src,
I);
3526 Value *S0 = getShadow(&
I, 0);
3538 Value *FullShadow = maybeExtendVectorShadowWithZeros(Shadow,
I);
3540 setShadow(&
I, FullShadow);
3541 setOriginForNaryOp(
I);
3562 void handleSSEVectorConvertIntrinsic(IntrinsicInst &
I,
int NumUsedElements,
3563 bool HasRoundingMode =
false) {
3565 Value *CopyOp, *ConvertOp;
3567 assert((!HasRoundingMode ||
3569 "Invalid rounding mode");
3571 switch (
I.arg_size() - HasRoundingMode) {
3573 CopyOp =
I.getArgOperand(0);
3574 ConvertOp =
I.getArgOperand(1);
3577 ConvertOp =
I.getArgOperand(0);
3591 Value *ConvertShadow = getShadow(ConvertOp);
3592 Value *AggShadow =
nullptr;
3595 ConvertShadow, ConstantInt::get(IRB.
getInt32Ty(), 0));
3596 for (
int i = 1; i < NumUsedElements; ++i) {
3598 ConvertShadow, ConstantInt::get(IRB.
getInt32Ty(), i));
3599 AggShadow = IRB.
CreateOr(AggShadow, MoreShadow);
3602 AggShadow = ConvertShadow;
3605 insertCheckShadow(AggShadow, getOrigin(ConvertOp), &
I);
3612 Value *ResultShadow = getShadow(CopyOp);
3614 for (
int i = 0; i < NumUsedElements; ++i) {
3616 ResultShadow, ConstantInt::getNullValue(EltTy),
3619 setShadow(&
I, ResultShadow);
3620 setOrigin(&
I, getOrigin(CopyOp));
3622 setShadow(&
I, getCleanShadow(&
I));
3623 setOrigin(&
I, getCleanOrigin());
3631 S = CreateShadowCast(IRB, S, IRB.
getInt64Ty(),
true);
3634 return CreateShadowCast(IRB, S2,
T,
true);
3642 return CreateShadowCast(IRB, S2,
T,
true);
3659 void handleVectorShiftIntrinsic(IntrinsicInst &
I,
bool Variable) {
3665 Value *S2 = getShadow(&
I, 1);
3667 : Lower64ShadowExtend(IRB, S2, getShadowTy(&
I));
3668 Value *V1 =
I.getOperand(0);
3669 Value *V2 =
I.getOperand(1);
3671 {IRB.CreateBitCast(S1, V1->getType()), V2});
3673 setShadow(&
I, IRB.
CreateOr(Shift, S2Conv));
3674 setOriginForNaryOp(
I);
3679 Type *getMMXVectorTy(
unsigned EltSizeInBits,
3680 unsigned X86_MMXSizeInBits = 64) {
3681 assert(EltSizeInBits != 0 && (X86_MMXSizeInBits % EltSizeInBits) == 0 &&
3682 "Illegal MMX vector element size");
3684 X86_MMXSizeInBits / EltSizeInBits);
3691 case Intrinsic::x86_sse2_packsswb_128:
3692 case Intrinsic::x86_sse2_packuswb_128:
3693 return Intrinsic::x86_sse2_packsswb_128;
3695 case Intrinsic::x86_sse2_packssdw_128:
3696 case Intrinsic::x86_sse41_packusdw:
3697 return Intrinsic::x86_sse2_packssdw_128;
3699 case Intrinsic::x86_avx2_packsswb:
3700 case Intrinsic::x86_avx2_packuswb:
3701 return Intrinsic::x86_avx2_packsswb;
3703 case Intrinsic::x86_avx2_packssdw:
3704 case Intrinsic::x86_avx2_packusdw:
3705 return Intrinsic::x86_avx2_packssdw;
3707 case Intrinsic::x86_mmx_packsswb:
3708 case Intrinsic::x86_mmx_packuswb:
3709 return Intrinsic::x86_mmx_packsswb;
3711 case Intrinsic::x86_mmx_packssdw:
3712 return Intrinsic::x86_mmx_packssdw;
3714 case Intrinsic::x86_avx512_packssdw_512:
3715 case Intrinsic::x86_avx512_packusdw_512:
3716 return Intrinsic::x86_avx512_packssdw_512;
3718 case Intrinsic::x86_avx512_packsswb_512:
3719 case Intrinsic::x86_avx512_packuswb_512:
3720 return Intrinsic::x86_avx512_packsswb_512;
3736 void handleVectorPackIntrinsic(IntrinsicInst &
I,
3737 unsigned MMXEltSizeInBits = 0) {
3741 Value *S2 = getShadow(&
I, 1);
3742 assert(
S1->getType()->isVectorTy());
3748 MMXEltSizeInBits ? getMMXVectorTy(MMXEltSizeInBits) :
S1->
getType();
3749 if (MMXEltSizeInBits) {
3757 if (MMXEltSizeInBits) {
3763 {S1_ext, S2_ext},
nullptr,
3764 "_msprop_vector_pack");
3765 if (MMXEltSizeInBits)
3768 setOriginForNaryOp(
I);
3772 Constant *createDppMask(
unsigned Width,
unsigned Mask) {
3785 const unsigned Width =
3792 Value *DstMaskV = createDppMask(Width, DstMask);
3809 void handleDppIntrinsic(IntrinsicInst &
I) {
3812 Value *S0 = getShadow(&
I, 0);
3816 const unsigned Width =
3818 assert(Width == 2 || Width == 4 || Width == 8);
3821 const unsigned SrcMask =
Mask >> 4;
3822 const unsigned DstMask =
Mask & 0xf;
3825 Value *SI1 = findDppPoisonedOutput(IRB, S, SrcMask, DstMask);
3830 SI1, findDppPoisonedOutput(IRB, S, SrcMask << 4, DstMask << 4));
3837 setOriginForNaryOp(
I);
3841 C = CreateAppToShadowCast(IRB,
C);
3850 void handleBlendvIntrinsic(IntrinsicInst &
I) {
3855 Value *Sc = getShadow(&
I, 2);
3856 Value *Oc = MS.TrackOrigins ? getOrigin(
C) : nullptr;
3861 C = convertBlendvToSelectMask(IRB,
C);
3862 Sc = convertBlendvToSelectMask(IRB, Sc);
3868 handleSelectLikeInst(
I,
C,
T,
F);
3872 void handleVectorSadIntrinsic(IntrinsicInst &
I,
bool IsMMX =
false) {
3873 const unsigned SignificantBitsPerResultElement = 16;
3875 unsigned ZeroBitsPerResultElement =
3879 auto *Shadow0 = getShadow(&
I, 0);
3880 auto *Shadow1 = getShadow(&
I, 1);
3885 S = IRB.
CreateLShr(S, ZeroBitsPerResultElement);
3888 setOriginForNaryOp(
I);
3910 void handleVectorPmaddIntrinsic(IntrinsicInst &
I,
unsigned ReductionFactor,
3912 unsigned EltSizeInBits = 0) {
3915 [[maybe_unused]] FixedVectorType *
ReturnType =
3920 Value *Va =
nullptr;
3921 Value *Vb =
nullptr;
3922 Value *Sa =
nullptr;
3923 Value *Sb =
nullptr;
3925 assert(
I.arg_size() == 2 ||
I.arg_size() == 3);
3926 if (
I.arg_size() == 2) {
3927 Va =
I.getOperand(0);
3928 Vb =
I.getOperand(1);
3930 Sa = getShadow(&
I, 0);
3931 Sb = getShadow(&
I, 1);
3932 }
else if (
I.arg_size() == 3) {
3934 Va =
I.getOperand(1);
3935 Vb =
I.getOperand(2);
3937 Sa = getShadow(&
I, 1);
3938 Sb = getShadow(&
I, 2);
3947 if (
I.arg_size() == 3) {
3948 [[maybe_unused]]
auto *AccumulatorType =
3950 assert(AccumulatorType == ReturnType);
3953 FixedVectorType *ImplicitReturnType =
3956 if (EltSizeInBits) {
3958 getMMXVectorTy(EltSizeInBits * ReductionFactor,
3970 ReturnType->getNumElements() * ReductionFactor);
3992 VaInt = CreateAppToShadowCast(IRB, Va);
3993 VbInt = CreateAppToShadowCast(IRB, Vb);
4003 And = IRB.
CreateOr({SaAndSbNonZero, VaAndSbNonZero, SaAndVbNonZero});
4025 ImplicitReturnType);
4030 OutShadow = CreateShadowCast(IRB, OutShadow, getShadowTy(&
I));
4033 if (
I.arg_size() == 3)
4034 OutShadow = IRB.
CreateOr(OutShadow, getShadow(&
I, 0));
4036 setShadow(&
I, OutShadow);
4037 setOriginForNaryOp(
I);
4043 void handleVectorComparePackedIntrinsic(IntrinsicInst &
I) {
4045 Type *ResTy = getShadowTy(&
I);
4046 auto *Shadow0 = getShadow(&
I, 0);
4047 auto *Shadow1 = getShadow(&
I, 1);
4052 setOriginForNaryOp(
I);
4058 void handleVectorCompareScalarIntrinsic(IntrinsicInst &
I) {
4060 auto *Shadow0 = getShadow(&
I, 0);
4061 auto *Shadow1 = getShadow(&
I, 1);
4063 Value *S = LowerElementShadowExtend(IRB, S0, getShadowTy(&
I));
4065 setOriginForNaryOp(
I);
4074 void handleVectorReduceIntrinsic(IntrinsicInst &
I,
bool AllowShadowCast) {
4079 if (AllowShadowCast)
4080 S = CreateShadowCast(IRB, S, getShadowTy(&
I));
4084 setOriginForNaryOp(
I);
4094 void handleVectorReduceWithStarterIntrinsic(IntrinsicInst &
I) {
4098 Value *Shadow0 = getShadow(&
I, 0);
4104 setOriginForNaryOp(
I);
4110 void handleVectorReduceOrIntrinsic(IntrinsicInst &
I) {
4114 Value *OperandShadow = getShadow(&
I, 0);
4116 Value *OperandUnsetOrPoison = IRB.
CreateOr(OperandUnsetBits, OperandShadow);
4124 setOrigin(&
I, getOrigin(&
I, 0));
4130 void handleVectorReduceAndIntrinsic(IntrinsicInst &
I) {
4134 Value *OperandShadow = getShadow(&
I, 0);
4135 Value *OperandSetOrPoison = IRB.
CreateOr(
I.getOperand(0), OperandShadow);
4143 setOrigin(&
I, getOrigin(&
I, 0));
4146 void handleStmxcsr(IntrinsicInst &
I) {
4148 Value *Addr =
I.getArgOperand(0);
4151 getShadowOriginPtr(Addr, IRB, Ty,
Align(1),
true).first;
4156 insertCheckShadowOf(Addr, &
I);
4159 void handleLdmxcsr(IntrinsicInst &
I) {
4164 Value *Addr =
I.getArgOperand(0);
4167 Value *ShadowPtr, *OriginPtr;
4168 std::tie(ShadowPtr, OriginPtr) =
4169 getShadowOriginPtr(Addr, IRB, Ty, Alignment,
false);
4172 insertCheckShadowOf(Addr, &
I);
4175 Value *Origin = MS.TrackOrigins ? IRB.
CreateLoad(MS.OriginTy, OriginPtr)
4177 insertCheckShadow(Shadow, Origin, &
I);
4180 void handleMaskedExpandLoad(IntrinsicInst &
I) {
4183 MaybeAlign
Align =
I.getParamAlign(0);
4185 Value *PassThru =
I.getArgOperand(2);
4188 insertCheckShadowOf(
Ptr, &
I);
4189 insertCheckShadowOf(Mask, &
I);
4192 if (!PropagateShadow) {
4193 setShadow(&
I, getCleanShadow(&
I));
4194 setOrigin(&
I, getCleanOrigin());
4198 Type *ShadowTy = getShadowTy(&
I);
4200 auto [ShadowPtr, OriginPtr] =
4201 getShadowOriginPtr(
Ptr, IRB, ElementShadowTy, Align,
false);
4205 getShadow(PassThru),
"_msmaskedexpload");
4207 setShadow(&
I, Shadow);
4210 setOrigin(&
I, getCleanOrigin());
4213 void handleMaskedCompressStore(IntrinsicInst &
I) {
4215 Value *Values =
I.getArgOperand(0);
4217 MaybeAlign
Align =
I.getParamAlign(1);
4221 insertCheckShadowOf(
Ptr, &
I);
4222 insertCheckShadowOf(Mask, &
I);
4225 Value *Shadow = getShadow(Values);
4226 Type *ElementShadowTy =
4228 auto [ShadowPtr, OriginPtrs] =
4229 getShadowOriginPtr(
Ptr, IRB, ElementShadowTy, Align,
true);
4236 void handleMaskedGather(IntrinsicInst &
I) {
4238 Value *Ptrs =
I.getArgOperand(0);
4239 const Align Alignment =
I.getParamAlign(0).valueOrOne();
4241 Value *PassThru =
I.getArgOperand(2);
4243 Type *PtrsShadowTy = getShadowTy(Ptrs);
4245 insertCheckShadowOf(Mask, &
I);
4249 insertCheckShadow(MaskedPtrShadow, getOrigin(Ptrs), &
I);
4252 if (!PropagateShadow) {
4253 setShadow(&
I, getCleanShadow(&
I));
4254 setOrigin(&
I, getCleanOrigin());
4258 Type *ShadowTy = getShadowTy(&
I);
4260 auto [ShadowPtrs, OriginPtrs] = getShadowOriginPtr(
4261 Ptrs, IRB, ElementShadowTy, Alignment,
false);
4265 getShadow(PassThru),
"_msmaskedgather");
4267 setShadow(&
I, Shadow);
4270 setOrigin(&
I, getCleanOrigin());
4273 void handleMaskedScatter(IntrinsicInst &
I) {
4275 Value *Values =
I.getArgOperand(0);
4276 Value *Ptrs =
I.getArgOperand(1);
4277 const Align Alignment =
I.getParamAlign(1).valueOrOne();
4280 Type *PtrsShadowTy = getShadowTy(Ptrs);
4282 insertCheckShadowOf(Mask, &
I);
4286 insertCheckShadow(MaskedPtrShadow, getOrigin(Ptrs), &
I);
4289 Value *Shadow = getShadow(Values);
4290 Type *ElementShadowTy =
4292 auto [ShadowPtrs, OriginPtrs] = getShadowOriginPtr(
4293 Ptrs, IRB, ElementShadowTy, Alignment,
true);
4304 void handleMaskedStore(IntrinsicInst &
I) {
4306 Value *
V =
I.getArgOperand(0);
4308 const Align Alignment =
I.getParamAlign(1).valueOrOne();
4310 Value *Shadow = getShadow(V);
4313 insertCheckShadowOf(
Ptr, &
I);
4314 insertCheckShadowOf(Mask, &
I);
4319 std::tie(ShadowPtr, OriginPtr) = getShadowOriginPtr(
4320 Ptr, IRB, Shadow->
getType(), Alignment,
true);
4324 if (!MS.TrackOrigins)
4327 auto &
DL =
F.getDataLayout();
4328 paintOrigin(IRB, getOrigin(V), OriginPtr,
4337 void handleMaskedLoad(IntrinsicInst &
I) {
4340 const Align Alignment =
I.getParamAlign(0).valueOrOne();
4342 Value *PassThru =
I.getArgOperand(2);
4345 insertCheckShadowOf(
Ptr, &
I);
4346 insertCheckShadowOf(Mask, &
I);
4349 if (!PropagateShadow) {
4350 setShadow(&
I, getCleanShadow(&
I));
4351 setOrigin(&
I, getCleanOrigin());
4355 Type *ShadowTy = getShadowTy(&
I);
4356 Value *ShadowPtr, *OriginPtr;
4357 std::tie(ShadowPtr, OriginPtr) =
4358 getShadowOriginPtr(
Ptr, IRB, ShadowTy, Alignment,
false);
4360 getShadow(PassThru),
"_msmaskedld"));
4362 if (!MS.TrackOrigins)
4369 Value *NotNull = convertToBool(MaskedPassThruShadow, IRB,
"_mscmp");
4374 setOrigin(&
I, Origin);
4390 void handleAVXMaskedStore(IntrinsicInst &
I) {
4395 Value *Dst =
I.getArgOperand(0);
4396 assert(Dst->getType()->isPointerTy() &&
"Destination is not a pointer!");
4401 Value *Src =
I.getArgOperand(2);
4406 Value *SrcShadow = getShadow(Src);
4409 insertCheckShadowOf(Dst, &
I);
4410 insertCheckShadowOf(Mask, &
I);
4413 Value *DstShadowPtr;
4414 Value *DstOriginPtr;
4415 std::tie(DstShadowPtr, DstOriginPtr) = getShadowOriginPtr(
4416 Dst, IRB, SrcShadow->
getType(), Alignment,
true);
4418 SmallVector<Value *, 2> ShadowArgs;
4419 ShadowArgs.
append(1, DstShadowPtr);
4420 ShadowArgs.
append(1, Mask);
4431 if (!MS.TrackOrigins)
4435 auto &
DL =
F.getDataLayout();
4436 paintOrigin(IRB, getOrigin(Src), DstOriginPtr,
4437 DL.getTypeStoreSize(SrcShadow->
getType()),
4456 void handleAVXMaskedLoad(IntrinsicInst &
I) {
4461 Value *Src =
I.getArgOperand(0);
4462 assert(Src->getType()->isPointerTy() &&
"Source is not a pointer!");
4470 insertCheckShadowOf(Mask, &
I);
4473 Type *SrcShadowTy = getShadowTy(Src);
4474 Value *SrcShadowPtr, *SrcOriginPtr;
4475 std::tie(SrcShadowPtr, SrcOriginPtr) =
4476 getShadowOriginPtr(Src, IRB, SrcShadowTy, Alignment,
false);
4478 SmallVector<Value *, 2> ShadowArgs;
4479 ShadowArgs.
append(1, SrcShadowPtr);
4480 ShadowArgs.
append(1, Mask);
4489 if (!MS.TrackOrigins)
4496 setOrigin(&
I, PtrSrcOrigin);
4505 assert(isFixedIntVector(Idx));
4506 auto IdxVectorSize =
4514 auto *IdxShadow = getShadow(Idx);
4519 insertCheckShadow(Truncated, getOrigin(Idx),
I);
4524 void handleAVXVpermilvar(IntrinsicInst &
I) {
4526 Value *Shadow = getShadow(&
I, 0);
4527 maskedCheckAVXIndexShadow(IRB,
I.getArgOperand(1), &
I);
4531 Shadow = IRB.
CreateBitCast(Shadow,
I.getArgOperand(0)->getType());
4533 {Shadow, I.getArgOperand(1)});
4536 setOriginForNaryOp(
I);
4541 void handleAVXVpermi2var(IntrinsicInst &
I) {
4546 [[maybe_unused]]
auto ArgVectorSize =
4549 ->getNumElements() == ArgVectorSize);
4551 ->getNumElements() == ArgVectorSize);
4552 assert(
I.getArgOperand(0)->getType() ==
I.getArgOperand(2)->getType());
4553 assert(
I.getType() ==
I.getArgOperand(0)->getType());
4554 assert(
I.getArgOperand(1)->getType()->isIntOrIntVectorTy());
4556 Value *AShadow = getShadow(&
I, 0);
4557 Value *Idx =
I.getArgOperand(1);
4558 Value *BShadow = getShadow(&
I, 2);
4560 maskedCheckAVXIndexShadow(IRB, Idx, &
I);
4564 AShadow = IRB.
CreateBitCast(AShadow,
I.getArgOperand(0)->getType());
4565 BShadow = IRB.
CreateBitCast(BShadow,
I.getArgOperand(2)->getType());
4567 {AShadow, Idx, BShadow});
4569 setOriginForNaryOp(
I);
4572 [[maybe_unused]]
static bool isFixedIntVectorTy(
const Type *
T) {
4576 [[maybe_unused]]
static bool isFixedFPVectorTy(
const Type *
T) {
4580 [[maybe_unused]]
static bool isFixedIntVector(
const Value *V) {
4581 return isFixedIntVectorTy(
V->getType());
4584 [[maybe_unused]]
static bool isFixedFPVector(
const Value *V) {
4585 return isFixedFPVectorTy(
V->getType());
4607 void handleAVX512VectorConvertFPToInt(IntrinsicInst &
I,
bool LastMask) {
4612 Value *WriteThrough;
4616 WriteThrough =
I.getOperand(2);
4617 Mask =
I.getOperand(3);
4620 WriteThrough =
I.getOperand(1);
4621 Mask =
I.getOperand(2);
4626 assert(isFixedIntVector(WriteThrough));
4628 unsigned ANumElements =
4630 [[maybe_unused]]
unsigned WriteThruNumElements =
4632 assert(ANumElements == WriteThruNumElements ||
4633 ANumElements * 2 == WriteThruNumElements);
4636 unsigned MaskNumElements =
Mask->getType()->getScalarSizeInBits();
4637 assert(ANumElements == MaskNumElements ||
4638 ANumElements * 2 == MaskNumElements);
4640 assert(WriteThruNumElements == MaskNumElements);
4644 insertCheckShadowOf(Mask, &
I);
4654 Value *AShadow = getShadow(
A);
4655 AShadow = maybeExtendVectorShadowWithZeros(AShadow,
I);
4657 if (ANumElements * 2 == MaskNumElements) {
4669 "_ms_mask_bitcast");
4679 getShadowTy(&
I),
"_ms_a_shadow");
4681 Value *WriteThroughShadow = getShadow(WriteThrough);
4683 "_ms_writethru_select");
4685 setShadow(&
I, Shadow);
4686 setOriginForNaryOp(
I);
4694 void handleBmiIntrinsic(IntrinsicInst &
I) {
4696 Type *ShadowTy = getShadowTy(&
I);
4699 Value *SMask = getShadow(&
I, 1);
4704 {getShadow(&I, 0), I.getOperand(1)});
4707 setOriginForNaryOp(
I);
4710 static SmallVector<int, 8> getPclmulMask(
unsigned Width,
bool OddElements) {
4711 SmallVector<int, 8>
Mask;
4712 for (
unsigned X = OddElements ? 1 : 0;
X < Width;
X += 2) {
4726 void handlePclmulIntrinsic(IntrinsicInst &
I) {
4731 "pclmul 3rd operand must be a constant");
4734 getPclmulMask(Width, Imm & 0x01));
4736 getPclmulMask(Width, Imm & 0x10));
4737 ShadowAndOriginCombiner SOC(
this, IRB);
4738 SOC.Add(Shuf0, getOrigin(&
I, 0));
4739 SOC.Add(Shuf1, getOrigin(&
I, 1));
4744 void handleUnarySdSsIntrinsic(IntrinsicInst &
I) {
4749 Value *Second = getShadow(&
I, 1);
4751 SmallVector<int, 16>
Mask;
4752 Mask.push_back(Width);
4753 for (
unsigned i = 1; i < Width; i++)
4757 setShadow(&
I, Shadow);
4758 setOriginForNaryOp(
I);
4761 void handleVtestIntrinsic(IntrinsicInst &
I) {
4763 Value *Shadow0 = getShadow(&
I, 0);
4764 Value *Shadow1 = getShadow(&
I, 1);
4770 setShadow(&
I, Shadow);
4771 setOriginForNaryOp(
I);
4774 void handleBinarySdSsIntrinsic(IntrinsicInst &
I) {
4779 Value *Second = getShadow(&
I, 1);
4782 SmallVector<int, 16>
Mask;
4783 Mask.push_back(Width);
4784 for (
unsigned i = 1; i < Width; i++)
4788 setShadow(&
I, Shadow);
4789 setOriginForNaryOp(
I);
4795 void handleRoundPdPsIntrinsic(IntrinsicInst &
I) {
4796 assert(
I.getArgOperand(0)->getType() ==
I.getType());
4801 ShadowAndOriginCombiner SC(
this, IRB);
4802 SC.Add(
I.getArgOperand(0));
4810 void handleAbsIntrinsic(IntrinsicInst &
I) {
4812 Value *Src =
I.getArgOperand(0);
4813 Value *IsIntMinPoison =
I.getArgOperand(1);
4815 assert(
I.getType()->isIntOrIntVectorTy());
4817 assert(Src->getType() ==
I.getType());
4823 Value *SrcShadow = getShadow(Src);
4827 Value *MinValVec = ConstantInt::get(Src->getType(), MinVal);
4830 Value *PoisonedShadow = getPoisonedShadow(Src);
4831 Value *PoisonedIfIntMinShadow =
4834 IRB.
CreateSelect(IsIntMinPoison, PoisonedIfIntMinShadow, SrcShadow);
4836 setShadow(&
I, Shadow);
4837 setOrigin(&
I, getOrigin(&
I, 0));
4840 void handleIsFpClass(IntrinsicInst &
I) {
4842 Value *Shadow = getShadow(&
I, 0);
4843 setShadow(&
I, IRB.
CreateICmpNE(Shadow, getCleanShadow(Shadow)));
4844 setOrigin(&
I, getOrigin(&
I, 0));
4847 void handleArithmeticWithOverflow(IntrinsicInst &
I) {
4849 Value *Shadow0 = getShadow(&
I, 0);
4850 Value *Shadow1 = getShadow(&
I, 1);
4853 IRB.
CreateICmpNE(ShadowElt0, getCleanShadow(ShadowElt0));
4859 setShadow(&
I, Shadow);
4860 setOriginForNaryOp(
I);
4866 Value *Shadow = getShadow(V);
4888 void handleAVX512VectorDownConvert(IntrinsicInst &
I) {
4893 Value *WriteThrough =
I.getOperand(1);
4897 assert(isFixedIntVector(WriteThrough));
4899 unsigned ANumElements =
4901 unsigned OutputNumElements =
4903 assert(ANumElements == OutputNumElements ||
4904 ANumElements * 2 == OutputNumElements);
4907 assert(
Mask->getType()->getScalarSizeInBits() == ANumElements);
4908 insertCheckShadowOf(Mask, &
I);
4919 if (ANumElements != OutputNumElements) {
4921 Mask = IRB.
CreateZExt(Mask, Type::getIntNTy(*MS.C, OutputNumElements),
4928 Value *AShadow = getShadow(
A);
4932 VectorType *ShadowType = maybeShrinkVectorShadowType(
A,
I);
4942 AShadow = IRB.
CreateTrunc(AShadow, ShadowType,
"_ms_trunc_shadow");
4943 AShadow = maybeExtendVectorShadowWithZeros(AShadow,
I);
4945 Value *WriteThroughShadow = getShadow(WriteThrough);
4948 setShadow(&
I, Shadow);
4949 setOriginForNaryOp(
I);
4976 void handleAVX512VectorGenericMaskedFP(IntrinsicInst &
I,
unsigned AIndex,
4977 unsigned WriteThruIndex,
4978 unsigned MaskIndex) {
4981 unsigned NumArgs =
I.arg_size();
4982 assert(AIndex < NumArgs);
4983 assert(WriteThruIndex < NumArgs);
4984 assert(MaskIndex < NumArgs);
4985 assert(AIndex != WriteThruIndex);
4986 assert(AIndex != MaskIndex);
4987 assert(WriteThruIndex != MaskIndex);
4989 Value *
A =
I.getOperand(AIndex);
4990 Value *WriteThru =
I.getOperand(WriteThruIndex);
4994 assert(isFixedFPVector(WriteThru));
4996 [[maybe_unused]]
unsigned ANumElements =
4998 unsigned OutputNumElements =
5000 assert(ANumElements == OutputNumElements);
5002 for (
unsigned i = 0; i < NumArgs; ++i) {
5003 if (i != AIndex && i != WriteThruIndex) {
5006 assert(
I.getOperand(i)->getType()->isIntegerTy());
5007 insertCheckShadowOf(
I.getOperand(i), &
I);
5012 if (
Mask->getType()->getScalarSizeInBits() == 8 && ANumElements < 8)
5014 assert(
Mask->getType()->getScalarSizeInBits() == ANumElements);
5021 Value *AShadow = getShadow(
A);
5027 Value *WriteThruShadow = getShadow(WriteThru);
5030 setShadow(&
I, Shadow);
5032 setOriginForNaryOp(
I);
5042 void visitGenericScalarHalfwordInst(IntrinsicInst &
I) {
5048 Value *WriteThrough =
I.getOperand(2);
5055 insertCheckShadowOf(Mask, &
I);
5059 unsigned NumElements =
5061 assert(NumElements == 8);
5062 assert(
A->getType() ==
B->getType());
5064 assert(
Mask->getType()->getPrimitiveSizeInBits() == NumElements);
5067 Value *ALowerShadow = extractLowerShadow(IRB,
A);
5068 Value *BLowerShadow = extractLowerShadow(IRB,
B);
5070 Value *ABLowerShadow = IRB.
CreateOr(ALowerShadow, BLowerShadow);
5072 Value *WriteThroughLowerShadow = extractLowerShadow(IRB, WriteThrough);
5079 Value *AShadow = getShadow(
A);
5080 Value *DstLowerShadow =
5081 IRB.
CreateSelect(MaskLower, ABLowerShadow, WriteThroughLowerShadow);
5083 AShadow, DstLowerShadow, ConstantInt::get(IRB.
getInt32Ty(), 0),
5086 setShadow(&
I, DstShadow);
5087 setOriginForNaryOp(
I);
5117 void handleAVXGF2P8Affine(IntrinsicInst &
I) {
5128 ->getScalarSizeInBits() == 8);
5130 assert(
A->getType() ==
X->getType());
5132 assert(
B->getType()->isIntegerTy());
5133 assert(
B->getType()->getScalarSizeInBits() == 8);
5135 assert(
I.getType() ==
A->getType());
5137 Value *AShadow = getShadow(
A);
5138 Value *XShadow = getShadow(
X);
5139 Value *BZeroShadow = getCleanShadow(
B);
5142 I.getType(),
I.getIntrinsicID(), {XShadow, AShadow, BZeroShadow});
5144 {X, AShadow, BZeroShadow});
5146 {XShadow, A, BZeroShadow});
5149 Value *BShadow = getShadow(
B);
5150 Value *BBroadcastShadow = getCleanShadow(AShadow);
5155 for (
unsigned i = 0; i < NumElements; i++)
5159 {AShadowXShadow, AShadowX, XShadowA, BBroadcastShadow}));
5160 setOriginForNaryOp(
I);
5174 void handleNEONVectorLoad(IntrinsicInst &
I,
bool WithLane) {
5175 unsigned int numArgs =
I.arg_size();
5178 assert(
I.getType()->isStructTy());
5188 assert(4 <= numArgs && numArgs <= 6);
5202 for (
unsigned int i = 0; i < numArgs - 2; i++)
5203 ShadowArgs.
push_back(getShadow(
I.getArgOperand(i)));
5206 Value *LaneNumber =
I.getArgOperand(numArgs - 2);
5210 insertCheckShadowOf(LaneNumber, &
I);
5213 Value *Src =
I.getArgOperand(numArgs - 1);
5214 assert(Src->getType()->isPointerTy() &&
"Source is not a pointer!");
5216 Type *SrcShadowTy = getShadowTy(Src);
5217 auto [SrcShadowPtr, SrcOriginPtr] =
5218 getShadowOriginPtr(Src, IRB, SrcShadowTy,
Align(1),
false);
5228 if (!MS.TrackOrigins)
5232 setOrigin(&
I, PtrSrcOrigin);
5249 void handleNEONVectorStoreIntrinsic(IntrinsicInst &
I,
bool useLane) {
5253 int numArgOperands =
I.arg_size();
5256 assert(numArgOperands >= 1);
5257 Value *Addr =
I.getArgOperand(numArgOperands - 1);
5259 int skipTrailingOperands = 1;
5262 insertCheckShadowOf(Addr, &
I);
5266 skipTrailingOperands++;
5267 assert(numArgOperands >=
static_cast<int>(skipTrailingOperands));
5269 I.getArgOperand(numArgOperands - skipTrailingOperands)->getType()));
5272 SmallVector<Value *, 8> ShadowArgs;
5274 for (
int i = 0; i < numArgOperands - skipTrailingOperands; i++) {
5276 Value *Shadow = getShadow(&
I, i);
5277 ShadowArgs.
append(1, Shadow);
5294 (numArgOperands - skipTrailingOperands));
5295 Type *OutputShadowTy = getShadowTy(OutputVectorTy);
5299 I.getArgOperand(numArgOperands - skipTrailingOperands));
5301 Value *OutputShadowPtr, *OutputOriginPtr;
5303 std::tie(OutputShadowPtr, OutputOriginPtr) = getShadowOriginPtr(
5304 Addr, IRB, OutputShadowTy,
Align(1),
true);
5305 ShadowArgs.
append(1, OutputShadowPtr);
5311 if (MS.TrackOrigins) {
5319 OriginCombiner
OC(
this, IRB);
5320 for (
int i = 0; i < numArgOperands - skipTrailingOperands; i++)
5321 OC.Add(
I.getArgOperand(i));
5323 const DataLayout &
DL =
F.getDataLayout();
5324 OC.DoneAndStoreOrigin(
DL.getTypeStoreSize(OutputVectorTy),
5351 void handleIntrinsicByApplyingToShadow(IntrinsicInst &
I,
5353 unsigned int trailingVerbatimArgs) {
5356 assert(trailingVerbatimArgs <
I.arg_size());
5358 SmallVector<Value *, 8> ShadowArgs;
5360 for (
unsigned int i = 0; i <
I.arg_size() - trailingVerbatimArgs; i++) {
5361 Value *Shadow = getShadow(&
I, i);
5369 for (
unsigned int i =
I.arg_size() - trailingVerbatimArgs; i <
I.arg_size();
5371 Value *Arg =
I.getArgOperand(i);
5377 Value *CombinedShadow = CI;
5380 for (
unsigned int i =
I.arg_size() - trailingVerbatimArgs; i <
I.arg_size();
5383 CreateShadowCast(IRB, getShadow(&
I, i), CombinedShadow->
getType());
5384 CombinedShadow = IRB.
CreateOr(Shadow, CombinedShadow,
"_msprop");
5389 setOriginForNaryOp(
I);
5395 void handleNEONVectorMultiplyIntrinsic(IntrinsicInst &
I) {
5401 bool maybeHandleCrossPlatformIntrinsic(IntrinsicInst &
I) {
5402 switch (
I.getIntrinsicID()) {
5403 case Intrinsic::uadd_with_overflow:
5404 case Intrinsic::sadd_with_overflow:
5405 case Intrinsic::usub_with_overflow:
5406 case Intrinsic::ssub_with_overflow:
5407 case Intrinsic::umul_with_overflow:
5408 case Intrinsic::smul_with_overflow:
5409 handleArithmeticWithOverflow(
I);
5411 case Intrinsic::abs:
5412 handleAbsIntrinsic(
I);
5414 case Intrinsic::bitreverse:
5415 handleIntrinsicByApplyingToShadow(
I,
I.getIntrinsicID(),
5418 case Intrinsic::is_fpclass:
5421 case Intrinsic::lifetime_start:
5422 handleLifetimeStart(
I);
5424 case Intrinsic::launder_invariant_group:
5425 case Intrinsic::strip_invariant_group:
5426 handleInvariantGroup(
I);
5428 case Intrinsic::bswap:
5431 case Intrinsic::ctlz:
5432 case Intrinsic::cttz:
5433 handleCountLeadingTrailingZeros(
I);
5435 case Intrinsic::masked_compressstore:
5436 handleMaskedCompressStore(
I);
5438 case Intrinsic::masked_expandload:
5439 handleMaskedExpandLoad(
I);
5441 case Intrinsic::masked_gather:
5442 handleMaskedGather(
I);
5444 case Intrinsic::masked_scatter:
5445 handleMaskedScatter(
I);
5447 case Intrinsic::masked_store:
5448 handleMaskedStore(
I);
5450 case Intrinsic::masked_load:
5451 handleMaskedLoad(
I);
5453 case Intrinsic::vector_reduce_and:
5454 handleVectorReduceAndIntrinsic(
I);
5456 case Intrinsic::vector_reduce_or:
5457 handleVectorReduceOrIntrinsic(
I);
5460 case Intrinsic::vector_reduce_add:
5461 case Intrinsic::vector_reduce_xor:
5462 case Intrinsic::vector_reduce_mul:
5465 case Intrinsic::vector_reduce_smax:
5466 case Intrinsic::vector_reduce_smin:
5467 case Intrinsic::vector_reduce_umax:
5468 case Intrinsic::vector_reduce_umin:
5471 case Intrinsic::vector_reduce_fmax:
5472 case Intrinsic::vector_reduce_fmin:
5473 handleVectorReduceIntrinsic(
I,
false);
5476 case Intrinsic::vector_reduce_fadd:
5477 case Intrinsic::vector_reduce_fmul:
5478 handleVectorReduceWithStarterIntrinsic(
I);
5481 case Intrinsic::scmp:
5482 case Intrinsic::ucmp: {
5487 case Intrinsic::fshl:
5488 case Intrinsic::fshr:
5489 handleFunnelShift(
I);
5492 case Intrinsic::is_constant:
5494 setShadow(&
I, getCleanShadow(&
I));
5495 setOrigin(&
I, getCleanOrigin());
5505 bool maybeHandleX86SIMDIntrinsic(IntrinsicInst &
I) {
5506 switch (
I.getIntrinsicID()) {
5507 case Intrinsic::x86_sse_stmxcsr:
5510 case Intrinsic::x86_sse_ldmxcsr:
5517 case Intrinsic::x86_avx512_vcvtsd2usi64:
5518 case Intrinsic::x86_avx512_vcvtsd2usi32:
5519 case Intrinsic::x86_avx512_vcvtss2usi64:
5520 case Intrinsic::x86_avx512_vcvtss2usi32:
5521 case Intrinsic::x86_avx512_cvttss2usi64:
5522 case Intrinsic::x86_avx512_cvttss2usi:
5523 case Intrinsic::x86_avx512_cvttsd2usi64:
5524 case Intrinsic::x86_avx512_cvttsd2usi:
5525 case Intrinsic::x86_avx512_cvtusi2ss:
5526 case Intrinsic::x86_avx512_cvtusi642sd:
5527 case Intrinsic::x86_avx512_cvtusi642ss:
5528 handleSSEVectorConvertIntrinsic(
I, 1,
true);
5530 case Intrinsic::x86_sse2_cvtsd2si64:
5531 case Intrinsic::x86_sse2_cvtsd2si:
5532 case Intrinsic::x86_sse2_cvtsd2ss:
5533 case Intrinsic::x86_sse2_cvttsd2si64:
5534 case Intrinsic::x86_sse2_cvttsd2si:
5535 case Intrinsic::x86_sse_cvtss2si64:
5536 case Intrinsic::x86_sse_cvtss2si:
5537 case Intrinsic::x86_sse_cvttss2si64:
5538 case Intrinsic::x86_sse_cvttss2si:
5539 handleSSEVectorConvertIntrinsic(
I, 1);
5541 case Intrinsic::x86_sse_cvtps2pi:
5542 case Intrinsic::x86_sse_cvttps2pi:
5543 handleSSEVectorConvertIntrinsic(
I, 2);
5551 case Intrinsic::x86_vcvtps2ph_128:
5552 case Intrinsic::x86_vcvtps2ph_256: {
5553 handleSSEVectorConvertIntrinsicByProp(
I,
true);
5562 case Intrinsic::x86_avx512_mask_cvtps2dq_512:
5563 handleAVX512VectorConvertFPToInt(
I,
false);
5568 case Intrinsic::x86_sse2_cvtpd2ps:
5569 case Intrinsic::x86_sse2_cvtps2dq:
5570 case Intrinsic::x86_sse2_cvtpd2dq:
5571 case Intrinsic::x86_sse2_cvttps2dq:
5572 case Intrinsic::x86_sse2_cvttpd2dq:
5573 case Intrinsic::x86_avx_cvt_pd2_ps_256:
5574 case Intrinsic::x86_avx_cvt_ps2dq_256:
5575 case Intrinsic::x86_avx_cvt_pd2dq_256:
5576 case Intrinsic::x86_avx_cvtt_ps2dq_256:
5577 case Intrinsic::x86_avx_cvtt_pd2dq_256: {
5578 handleSSEVectorConvertIntrinsicByProp(
I,
false);
5589 case Intrinsic::x86_avx512_mask_vcvtps2ph_512:
5590 case Intrinsic::x86_avx512_mask_vcvtps2ph_256:
5591 case Intrinsic::x86_avx512_mask_vcvtps2ph_128:
5592 handleAVX512VectorConvertFPToInt(
I,
true);
5596 case Intrinsic::x86_avx512_psll_w_512:
5597 case Intrinsic::x86_avx512_psll_d_512:
5598 case Intrinsic::x86_avx512_psll_q_512:
5599 case Intrinsic::x86_avx512_pslli_w_512:
5600 case Intrinsic::x86_avx512_pslli_d_512:
5601 case Intrinsic::x86_avx512_pslli_q_512:
5602 case Intrinsic::x86_avx512_psrl_w_512:
5603 case Intrinsic::x86_avx512_psrl_d_512:
5604 case Intrinsic::x86_avx512_psrl_q_512:
5605 case Intrinsic::x86_avx512_psra_w_512:
5606 case Intrinsic::x86_avx512_psra_d_512:
5607 case Intrinsic::x86_avx512_psra_q_512:
5608 case Intrinsic::x86_avx512_psrli_w_512:
5609 case Intrinsic::x86_avx512_psrli_d_512:
5610 case Intrinsic::x86_avx512_psrli_q_512:
5611 case Intrinsic::x86_avx512_psrai_w_512:
5612 case Intrinsic::x86_avx512_psrai_d_512:
5613 case Intrinsic::x86_avx512_psrai_q_512:
5614 case Intrinsic::x86_avx512_psra_q_256:
5615 case Intrinsic::x86_avx512_psra_q_128:
5616 case Intrinsic::x86_avx512_psrai_q_256:
5617 case Intrinsic::x86_avx512_psrai_q_128:
5618 case Intrinsic::x86_avx2_psll_w:
5619 case Intrinsic::x86_avx2_psll_d:
5620 case Intrinsic::x86_avx2_psll_q:
5621 case Intrinsic::x86_avx2_pslli_w:
5622 case Intrinsic::x86_avx2_pslli_d:
5623 case Intrinsic::x86_avx2_pslli_q:
5624 case Intrinsic::x86_avx2_psrl_w:
5625 case Intrinsic::x86_avx2_psrl_d:
5626 case Intrinsic::x86_avx2_psrl_q:
5627 case Intrinsic::x86_avx2_psra_w:
5628 case Intrinsic::x86_avx2_psra_d:
5629 case Intrinsic::x86_avx2_psrli_w:
5630 case Intrinsic::x86_avx2_psrli_d:
5631 case Intrinsic::x86_avx2_psrli_q:
5632 case Intrinsic::x86_avx2_psrai_w:
5633 case Intrinsic::x86_avx2_psrai_d:
5634 case Intrinsic::x86_sse2_psll_w:
5635 case Intrinsic::x86_sse2_psll_d:
5636 case Intrinsic::x86_sse2_psll_q:
5637 case Intrinsic::x86_sse2_pslli_w:
5638 case Intrinsic::x86_sse2_pslli_d:
5639 case Intrinsic::x86_sse2_pslli_q:
5640 case Intrinsic::x86_sse2_psrl_w:
5641 case Intrinsic::x86_sse2_psrl_d:
5642 case Intrinsic::x86_sse2_psrl_q:
5643 case Intrinsic::x86_sse2_psra_w:
5644 case Intrinsic::x86_sse2_psra_d:
5645 case Intrinsic::x86_sse2_psrli_w:
5646 case Intrinsic::x86_sse2_psrli_d:
5647 case Intrinsic::x86_sse2_psrli_q:
5648 case Intrinsic::x86_sse2_psrai_w:
5649 case Intrinsic::x86_sse2_psrai_d:
5650 case Intrinsic::x86_mmx_psll_w:
5651 case Intrinsic::x86_mmx_psll_d:
5652 case Intrinsic::x86_mmx_psll_q:
5653 case Intrinsic::x86_mmx_pslli_w:
5654 case Intrinsic::x86_mmx_pslli_d:
5655 case Intrinsic::x86_mmx_pslli_q:
5656 case Intrinsic::x86_mmx_psrl_w:
5657 case Intrinsic::x86_mmx_psrl_d:
5658 case Intrinsic::x86_mmx_psrl_q:
5659 case Intrinsic::x86_mmx_psra_w:
5660 case Intrinsic::x86_mmx_psra_d:
5661 case Intrinsic::x86_mmx_psrli_w:
5662 case Intrinsic::x86_mmx_psrli_d:
5663 case Intrinsic::x86_mmx_psrli_q:
5664 case Intrinsic::x86_mmx_psrai_w:
5665 case Intrinsic::x86_mmx_psrai_d:
5666 handleVectorShiftIntrinsic(
I,
false);
5668 case Intrinsic::x86_avx2_psllv_d:
5669 case Intrinsic::x86_avx2_psllv_d_256:
5670 case Intrinsic::x86_avx512_psllv_d_512:
5671 case Intrinsic::x86_avx2_psllv_q:
5672 case Intrinsic::x86_avx2_psllv_q_256:
5673 case Intrinsic::x86_avx512_psllv_q_512:
5674 case Intrinsic::x86_avx2_psrlv_d:
5675 case Intrinsic::x86_avx2_psrlv_d_256:
5676 case Intrinsic::x86_avx512_psrlv_d_512:
5677 case Intrinsic::x86_avx2_psrlv_q:
5678 case Intrinsic::x86_avx2_psrlv_q_256:
5679 case Intrinsic::x86_avx512_psrlv_q_512:
5680 case Intrinsic::x86_avx2_psrav_d:
5681 case Intrinsic::x86_avx2_psrav_d_256:
5682 case Intrinsic::x86_avx512_psrav_d_512:
5683 case Intrinsic::x86_avx512_psrav_q_128:
5684 case Intrinsic::x86_avx512_psrav_q_256:
5685 case Intrinsic::x86_avx512_psrav_q_512:
5686 handleVectorShiftIntrinsic(
I,
true);
5690 case Intrinsic::x86_sse2_packsswb_128:
5691 case Intrinsic::x86_sse2_packssdw_128:
5692 case Intrinsic::x86_sse2_packuswb_128:
5693 case Intrinsic::x86_sse41_packusdw:
5694 case Intrinsic::x86_avx2_packsswb:
5695 case Intrinsic::x86_avx2_packssdw:
5696 case Intrinsic::x86_avx2_packuswb:
5697 case Intrinsic::x86_avx2_packusdw:
5703 case Intrinsic::x86_avx512_packsswb_512:
5704 case Intrinsic::x86_avx512_packssdw_512:
5705 case Intrinsic::x86_avx512_packuswb_512:
5706 case Intrinsic::x86_avx512_packusdw_512:
5707 handleVectorPackIntrinsic(
I);
5710 case Intrinsic::x86_sse41_pblendvb:
5711 case Intrinsic::x86_sse41_blendvpd:
5712 case Intrinsic::x86_sse41_blendvps:
5713 case Intrinsic::x86_avx_blendv_pd_256:
5714 case Intrinsic::x86_avx_blendv_ps_256:
5715 case Intrinsic::x86_avx2_pblendvb:
5716 handleBlendvIntrinsic(
I);
5719 case Intrinsic::x86_avx_dp_ps_256:
5720 case Intrinsic::x86_sse41_dppd:
5721 case Intrinsic::x86_sse41_dpps:
5722 handleDppIntrinsic(
I);
5725 case Intrinsic::x86_mmx_packsswb:
5726 case Intrinsic::x86_mmx_packuswb:
5727 handleVectorPackIntrinsic(
I, 16);
5730 case Intrinsic::x86_mmx_packssdw:
5731 handleVectorPackIntrinsic(
I, 32);
5734 case Intrinsic::x86_mmx_psad_bw:
5735 handleVectorSadIntrinsic(
I,
true);
5737 case Intrinsic::x86_sse2_psad_bw:
5738 case Intrinsic::x86_avx2_psad_bw:
5739 handleVectorSadIntrinsic(
I);
5765 case Intrinsic::x86_sse2_pmadd_wd:
5766 case Intrinsic::x86_avx2_pmadd_wd:
5767 case Intrinsic::x86_avx512_pmaddw_d_512:
5768 case Intrinsic::x86_ssse3_pmadd_ub_sw_128:
5769 case Intrinsic::x86_avx2_pmadd_ub_sw:
5770 case Intrinsic::x86_avx512_pmaddubs_w_512:
5771 handleVectorPmaddIntrinsic(
I, 2,
5776 case Intrinsic::x86_ssse3_pmadd_ub_sw:
5777 handleVectorPmaddIntrinsic(
I, 2,
5782 case Intrinsic::x86_mmx_pmadd_wd:
5783 handleVectorPmaddIntrinsic(
I, 2,
5846 case Intrinsic::x86_avx512_vpdpbusd_128:
5847 case Intrinsic::x86_avx512_vpdpbusd_256:
5848 case Intrinsic::x86_avx512_vpdpbusd_512:
5849 case Intrinsic::x86_avx512_vpdpbusds_128:
5850 case Intrinsic::x86_avx512_vpdpbusds_256:
5851 case Intrinsic::x86_avx512_vpdpbusds_512:
5852 case Intrinsic::x86_avx2_vpdpbssd_128:
5853 case Intrinsic::x86_avx2_vpdpbssd_256:
5854 case Intrinsic::x86_avx10_vpdpbssd_512:
5855 case Intrinsic::x86_avx2_vpdpbssds_128:
5856 case Intrinsic::x86_avx2_vpdpbssds_256:
5857 case Intrinsic::x86_avx10_vpdpbssds_512:
5858 case Intrinsic::x86_avx2_vpdpbsud_128:
5859 case Intrinsic::x86_avx2_vpdpbsud_256:
5860 case Intrinsic::x86_avx10_vpdpbsud_512:
5861 case Intrinsic::x86_avx2_vpdpbsuds_128:
5862 case Intrinsic::x86_avx2_vpdpbsuds_256:
5863 case Intrinsic::x86_avx10_vpdpbsuds_512:
5864 case Intrinsic::x86_avx2_vpdpbuud_128:
5865 case Intrinsic::x86_avx2_vpdpbuud_256:
5866 case Intrinsic::x86_avx10_vpdpbuud_512:
5867 case Intrinsic::x86_avx2_vpdpbuuds_128:
5868 case Intrinsic::x86_avx2_vpdpbuuds_256:
5869 case Intrinsic::x86_avx10_vpdpbuuds_512:
5870 handleVectorPmaddIntrinsic(
I, 4,
5918 case Intrinsic::x86_avx512_vpdpwssd_128:
5919 case Intrinsic::x86_avx512_vpdpwssd_256:
5920 case Intrinsic::x86_avx512_vpdpwssd_512:
5921 case Intrinsic::x86_avx512_vpdpwssds_128:
5922 case Intrinsic::x86_avx512_vpdpwssds_256:
5923 case Intrinsic::x86_avx512_vpdpwssds_512:
5924 handleVectorPmaddIntrinsic(
I, 2,
5938 case Intrinsic::x86_sse_cmp_ss:
5939 case Intrinsic::x86_sse2_cmp_sd:
5940 case Intrinsic::x86_sse_comieq_ss:
5941 case Intrinsic::x86_sse_comilt_ss:
5942 case Intrinsic::x86_sse_comile_ss:
5943 case Intrinsic::x86_sse_comigt_ss:
5944 case Intrinsic::x86_sse_comige_ss:
5945 case Intrinsic::x86_sse_comineq_ss:
5946 case Intrinsic::x86_sse_ucomieq_ss:
5947 case Intrinsic::x86_sse_ucomilt_ss:
5948 case Intrinsic::x86_sse_ucomile_ss:
5949 case Intrinsic::x86_sse_ucomigt_ss:
5950 case Intrinsic::x86_sse_ucomige_ss:
5951 case Intrinsic::x86_sse_ucomineq_ss:
5952 case Intrinsic::x86_sse2_comieq_sd:
5953 case Intrinsic::x86_sse2_comilt_sd:
5954 case Intrinsic::x86_sse2_comile_sd:
5955 case Intrinsic::x86_sse2_comigt_sd:
5956 case Intrinsic::x86_sse2_comige_sd:
5957 case Intrinsic::x86_sse2_comineq_sd:
5958 case Intrinsic::x86_sse2_ucomieq_sd:
5959 case Intrinsic::x86_sse2_ucomilt_sd:
5960 case Intrinsic::x86_sse2_ucomile_sd:
5961 case Intrinsic::x86_sse2_ucomigt_sd:
5962 case Intrinsic::x86_sse2_ucomige_sd:
5963 case Intrinsic::x86_sse2_ucomineq_sd:
5964 handleVectorCompareScalarIntrinsic(
I);
5967 case Intrinsic::x86_avx_cmp_pd_256:
5968 case Intrinsic::x86_avx_cmp_ps_256:
5969 case Intrinsic::x86_sse2_cmp_pd:
5970 case Intrinsic::x86_sse_cmp_ps:
5971 handleVectorComparePackedIntrinsic(
I);
5974 case Intrinsic::x86_bmi_bextr_32:
5975 case Intrinsic::x86_bmi_bextr_64:
5976 case Intrinsic::x86_bmi_bzhi_32:
5977 case Intrinsic::x86_bmi_bzhi_64:
5978 case Intrinsic::x86_bmi_pdep_32:
5979 case Intrinsic::x86_bmi_pdep_64:
5980 case Intrinsic::x86_bmi_pext_32:
5981 case Intrinsic::x86_bmi_pext_64:
5982 handleBmiIntrinsic(
I);
5985 case Intrinsic::x86_pclmulqdq:
5986 case Intrinsic::x86_pclmulqdq_256:
5987 case Intrinsic::x86_pclmulqdq_512:
5988 handlePclmulIntrinsic(
I);
5991 case Intrinsic::x86_avx_round_pd_256:
5992 case Intrinsic::x86_avx_round_ps_256:
5993 case Intrinsic::x86_sse41_round_pd:
5994 case Intrinsic::x86_sse41_round_ps:
5995 handleRoundPdPsIntrinsic(
I);
5998 case Intrinsic::x86_sse41_round_sd:
5999 case Intrinsic::x86_sse41_round_ss:
6000 handleUnarySdSsIntrinsic(
I);
6003 case Intrinsic::x86_sse2_max_sd:
6004 case Intrinsic::x86_sse_max_ss:
6005 case Intrinsic::x86_sse2_min_sd:
6006 case Intrinsic::x86_sse_min_ss:
6007 handleBinarySdSsIntrinsic(
I);
6010 case Intrinsic::x86_avx_vtestc_pd:
6011 case Intrinsic::x86_avx_vtestc_pd_256:
6012 case Intrinsic::x86_avx_vtestc_ps:
6013 case Intrinsic::x86_avx_vtestc_ps_256:
6014 case Intrinsic::x86_avx_vtestnzc_pd:
6015 case Intrinsic::x86_avx_vtestnzc_pd_256:
6016 case Intrinsic::x86_avx_vtestnzc_ps:
6017 case Intrinsic::x86_avx_vtestnzc_ps_256:
6018 case Intrinsic::x86_avx_vtestz_pd:
6019 case Intrinsic::x86_avx_vtestz_pd_256:
6020 case Intrinsic::x86_avx_vtestz_ps:
6021 case Intrinsic::x86_avx_vtestz_ps_256:
6022 case Intrinsic::x86_avx_ptestc_256:
6023 case Intrinsic::x86_avx_ptestnzc_256:
6024 case Intrinsic::x86_avx_ptestz_256:
6025 case Intrinsic::x86_sse41_ptestc:
6026 case Intrinsic::x86_sse41_ptestnzc:
6027 case Intrinsic::x86_sse41_ptestz:
6028 handleVtestIntrinsic(
I);
6032 case Intrinsic::x86_ssse3_phadd_w:
6033 case Intrinsic::x86_ssse3_phadd_w_128:
6034 case Intrinsic::x86_avx2_phadd_w:
6035 case Intrinsic::x86_ssse3_phsub_w:
6036 case Intrinsic::x86_ssse3_phsub_w_128:
6037 case Intrinsic::x86_avx2_phsub_w: {
6038 handlePairwiseShadowOrIntrinsic(
I, 16);
6043 case Intrinsic::x86_ssse3_phadd_d:
6044 case Intrinsic::x86_ssse3_phadd_d_128:
6045 case Intrinsic::x86_avx2_phadd_d:
6046 case Intrinsic::x86_ssse3_phsub_d:
6047 case Intrinsic::x86_ssse3_phsub_d_128:
6048 case Intrinsic::x86_avx2_phsub_d: {
6049 handlePairwiseShadowOrIntrinsic(
I, 32);
6054 case Intrinsic::x86_ssse3_phadd_sw:
6055 case Intrinsic::x86_ssse3_phadd_sw_128:
6056 case Intrinsic::x86_avx2_phadd_sw:
6057 case Intrinsic::x86_ssse3_phsub_sw:
6058 case Intrinsic::x86_ssse3_phsub_sw_128:
6059 case Intrinsic::x86_avx2_phsub_sw: {
6060 handlePairwiseShadowOrIntrinsic(
I, 16);
6065 case Intrinsic::x86_sse3_hadd_ps:
6066 case Intrinsic::x86_sse3_hadd_pd:
6067 case Intrinsic::x86_avx_hadd_pd_256:
6068 case Intrinsic::x86_avx_hadd_ps_256:
6069 case Intrinsic::x86_sse3_hsub_ps:
6070 case Intrinsic::x86_sse3_hsub_pd:
6071 case Intrinsic::x86_avx_hsub_pd_256:
6072 case Intrinsic::x86_avx_hsub_ps_256: {
6073 handlePairwiseShadowOrIntrinsic(
I);
6077 case Intrinsic::x86_avx_maskstore_ps:
6078 case Intrinsic::x86_avx_maskstore_pd:
6079 case Intrinsic::x86_avx_maskstore_ps_256:
6080 case Intrinsic::x86_avx_maskstore_pd_256:
6081 case Intrinsic::x86_avx2_maskstore_d:
6082 case Intrinsic::x86_avx2_maskstore_q:
6083 case Intrinsic::x86_avx2_maskstore_d_256:
6084 case Intrinsic::x86_avx2_maskstore_q_256: {
6085 handleAVXMaskedStore(
I);
6089 case Intrinsic::x86_avx_maskload_ps:
6090 case Intrinsic::x86_avx_maskload_pd:
6091 case Intrinsic::x86_avx_maskload_ps_256:
6092 case Intrinsic::x86_avx_maskload_pd_256:
6093 case Intrinsic::x86_avx2_maskload_d:
6094 case Intrinsic::x86_avx2_maskload_q:
6095 case Intrinsic::x86_avx2_maskload_d_256:
6096 case Intrinsic::x86_avx2_maskload_q_256: {
6097 handleAVXMaskedLoad(
I);
6102 case Intrinsic::x86_avx512fp16_add_ph_512:
6103 case Intrinsic::x86_avx512fp16_sub_ph_512:
6104 case Intrinsic::x86_avx512fp16_mul_ph_512:
6105 case Intrinsic::x86_avx512fp16_div_ph_512:
6106 case Intrinsic::x86_avx512fp16_max_ph_512:
6107 case Intrinsic::x86_avx512fp16_min_ph_512:
6108 case Intrinsic::x86_avx512_min_ps_512:
6109 case Intrinsic::x86_avx512_min_pd_512:
6110 case Intrinsic::x86_avx512_max_ps_512:
6111 case Intrinsic::x86_avx512_max_pd_512: {
6116 [[maybe_unused]]
bool Success =
6117 maybeHandleSimpleNomemIntrinsic(
I, 1);
6122 case Intrinsic::x86_avx_vpermilvar_pd:
6123 case Intrinsic::x86_avx_vpermilvar_pd_256:
6124 case Intrinsic::x86_avx512_vpermilvar_pd_512:
6125 case Intrinsic::x86_avx_vpermilvar_ps:
6126 case Intrinsic::x86_avx_vpermilvar_ps_256:
6127 case Intrinsic::x86_avx512_vpermilvar_ps_512: {
6128 handleAVXVpermilvar(
I);
6132 case Intrinsic::x86_avx512_vpermi2var_d_128:
6133 case Intrinsic::x86_avx512_vpermi2var_d_256:
6134 case Intrinsic::x86_avx512_vpermi2var_d_512:
6135 case Intrinsic::x86_avx512_vpermi2var_hi_128:
6136 case Intrinsic::x86_avx512_vpermi2var_hi_256:
6137 case Intrinsic::x86_avx512_vpermi2var_hi_512:
6138 case Intrinsic::x86_avx512_vpermi2var_pd_128:
6139 case Intrinsic::x86_avx512_vpermi2var_pd_256:
6140 case Intrinsic::x86_avx512_vpermi2var_pd_512:
6141 case Intrinsic::x86_avx512_vpermi2var_ps_128:
6142 case Intrinsic::x86_avx512_vpermi2var_ps_256:
6143 case Intrinsic::x86_avx512_vpermi2var_ps_512:
6144 case Intrinsic::x86_avx512_vpermi2var_q_128:
6145 case Intrinsic::x86_avx512_vpermi2var_q_256:
6146 case Intrinsic::x86_avx512_vpermi2var_q_512:
6147 case Intrinsic::x86_avx512_vpermi2var_qi_128:
6148 case Intrinsic::x86_avx512_vpermi2var_qi_256:
6149 case Intrinsic::x86_avx512_vpermi2var_qi_512:
6150 handleAVXVpermi2var(
I);
6164 case Intrinsic::x86_avx2_pshuf_b:
6165 case Intrinsic::x86_sse_pshuf_w:
6166 case Intrinsic::x86_ssse3_pshuf_b_128:
6167 case Intrinsic::x86_ssse3_pshuf_b:
6168 case Intrinsic::x86_avx512_pshuf_b_512:
6169 handleIntrinsicByApplyingToShadow(
I,
I.getIntrinsicID(),
6175 case Intrinsic::x86_avx512_mask_pmov_dw_512:
6176 case Intrinsic::x86_avx512_mask_pmov_db_512:
6177 case Intrinsic::x86_avx512_mask_pmov_qb_512:
6178 case Intrinsic::x86_avx512_mask_pmov_qw_512: {
6181 handleIntrinsicByApplyingToShadow(
I,
I.getIntrinsicID(),
6189 case Intrinsic::x86_avx512_mask_pmovs_dw_512:
6190 case Intrinsic::x86_avx512_mask_pmovus_dw_512: {
6191 handleIntrinsicByApplyingToShadow(
I,
6192 Intrinsic::x86_avx512_mask_pmov_dw_512,
6197 case Intrinsic::x86_avx512_mask_pmovs_db_512:
6198 case Intrinsic::x86_avx512_mask_pmovus_db_512: {
6199 handleIntrinsicByApplyingToShadow(
I,
6200 Intrinsic::x86_avx512_mask_pmov_db_512,
6205 case Intrinsic::x86_avx512_mask_pmovs_qb_512:
6206 case Intrinsic::x86_avx512_mask_pmovus_qb_512: {
6207 handleIntrinsicByApplyingToShadow(
I,
6208 Intrinsic::x86_avx512_mask_pmov_qb_512,
6213 case Intrinsic::x86_avx512_mask_pmovs_qw_512:
6214 case Intrinsic::x86_avx512_mask_pmovus_qw_512: {
6215 handleIntrinsicByApplyingToShadow(
I,
6216 Intrinsic::x86_avx512_mask_pmov_qw_512,
6221 case Intrinsic::x86_avx512_mask_pmovs_qd_512:
6222 case Intrinsic::x86_avx512_mask_pmovus_qd_512:
6223 case Intrinsic::x86_avx512_mask_pmovs_wb_512:
6224 case Intrinsic::x86_avx512_mask_pmovus_wb_512: {
6228 handleAVX512VectorDownConvert(
I);
6268 case Intrinsic::x86_avx512_rsqrt14_ps_512:
6269 case Intrinsic::x86_avx512_rsqrt14_ps_256:
6270 case Intrinsic::x86_avx512_rsqrt14_ps_128:
6271 case Intrinsic::x86_avx512_rsqrt14_pd_512:
6272 case Intrinsic::x86_avx512_rsqrt14_pd_256:
6273 case Intrinsic::x86_avx512_rsqrt14_pd_128:
6274 case Intrinsic::x86_avx10_mask_rsqrt_bf16_512:
6275 case Intrinsic::x86_avx10_mask_rsqrt_bf16_256:
6276 case Intrinsic::x86_avx10_mask_rsqrt_bf16_128:
6277 case Intrinsic::x86_avx512fp16_mask_rsqrt_ph_512:
6278 case Intrinsic::x86_avx512fp16_mask_rsqrt_ph_256:
6279 case Intrinsic::x86_avx512fp16_mask_rsqrt_ph_128:
6280 handleAVX512VectorGenericMaskedFP(
I, 0, 1,
6320 case Intrinsic::x86_avx512_rcp14_ps_512:
6321 case Intrinsic::x86_avx512_rcp14_ps_256:
6322 case Intrinsic::x86_avx512_rcp14_ps_128:
6323 case Intrinsic::x86_avx512_rcp14_pd_512:
6324 case Intrinsic::x86_avx512_rcp14_pd_256:
6325 case Intrinsic::x86_avx512_rcp14_pd_128:
6326 case Intrinsic::x86_avx10_mask_rcp_bf16_512:
6327 case Intrinsic::x86_avx10_mask_rcp_bf16_256:
6328 case Intrinsic::x86_avx10_mask_rcp_bf16_128:
6329 case Intrinsic::x86_avx512fp16_mask_rcp_ph_512:
6330 case Intrinsic::x86_avx512fp16_mask_rcp_ph_256:
6331 case Intrinsic::x86_avx512fp16_mask_rcp_ph_128:
6332 handleAVX512VectorGenericMaskedFP(
I, 0, 1,
6376 case Intrinsic::x86_avx512fp16_mask_rndscale_ph_512:
6377 case Intrinsic::x86_avx512fp16_mask_rndscale_ph_256:
6378 case Intrinsic::x86_avx512fp16_mask_rndscale_ph_128:
6379 case Intrinsic::x86_avx512_mask_rndscale_ps_512:
6380 case Intrinsic::x86_avx512_mask_rndscale_ps_256:
6381 case Intrinsic::x86_avx512_mask_rndscale_ps_128:
6382 case Intrinsic::x86_avx512_mask_rndscale_pd_512:
6383 case Intrinsic::x86_avx512_mask_rndscale_pd_256:
6384 case Intrinsic::x86_avx512_mask_rndscale_pd_128:
6385 case Intrinsic::x86_avx10_mask_rndscale_bf16_512:
6386 case Intrinsic::x86_avx10_mask_rndscale_bf16_256:
6387 case Intrinsic::x86_avx10_mask_rndscale_bf16_128:
6388 handleAVX512VectorGenericMaskedFP(
I, 0, 2,
6393 case Intrinsic::x86_avx512fp16_mask_add_sh_round:
6394 case Intrinsic::x86_avx512fp16_mask_sub_sh_round:
6395 case Intrinsic::x86_avx512fp16_mask_mul_sh_round:
6396 case Intrinsic::x86_avx512fp16_mask_div_sh_round:
6397 case Intrinsic::x86_avx512fp16_mask_max_sh_round:
6398 case Intrinsic::x86_avx512fp16_mask_min_sh_round: {
6399 visitGenericScalarHalfwordInst(
I);
6404 case Intrinsic::x86_vgf2p8affineqb_128:
6405 case Intrinsic::x86_vgf2p8affineqb_256:
6406 case Intrinsic::x86_vgf2p8affineqb_512:
6407 handleAVXGF2P8Affine(
I);
6417 bool maybeHandleArmSIMDIntrinsic(IntrinsicInst &
I) {
6418 switch (
I.getIntrinsicID()) {
6419 case Intrinsic::aarch64_neon_rshrn:
6420 case Intrinsic::aarch64_neon_sqrshl:
6421 case Intrinsic::aarch64_neon_sqrshrn:
6422 case Intrinsic::aarch64_neon_sqrshrun:
6423 case Intrinsic::aarch64_neon_sqshl:
6424 case Intrinsic::aarch64_neon_sqshlu:
6425 case Intrinsic::aarch64_neon_sqshrn:
6426 case Intrinsic::aarch64_neon_sqshrun:
6427 case Intrinsic::aarch64_neon_srshl:
6428 case Intrinsic::aarch64_neon_sshl:
6429 case Intrinsic::aarch64_neon_uqrshl:
6430 case Intrinsic::aarch64_neon_uqrshrn:
6431 case Intrinsic::aarch64_neon_uqshl:
6432 case Intrinsic::aarch64_neon_uqshrn:
6433 case Intrinsic::aarch64_neon_urshl:
6434 case Intrinsic::aarch64_neon_ushl:
6436 handleVectorShiftIntrinsic(
I,
false);
6441 case Intrinsic::aarch64_neon_fmaxp:
6442 case Intrinsic::aarch64_neon_fminp:
6444 case Intrinsic::aarch64_neon_fmaxnmp:
6445 case Intrinsic::aarch64_neon_fminnmp:
6447 case Intrinsic::aarch64_neon_smaxp:
6448 case Intrinsic::aarch64_neon_sminp:
6449 case Intrinsic::aarch64_neon_umaxp:
6450 case Intrinsic::aarch64_neon_uminp:
6452 case Intrinsic::aarch64_neon_addp:
6454 case Intrinsic::aarch64_neon_faddp:
6456 case Intrinsic::aarch64_neon_saddlp:
6457 case Intrinsic::aarch64_neon_uaddlp: {
6458 handlePairwiseShadowOrIntrinsic(
I);
6463 case Intrinsic::aarch64_neon_fcvtas:
6464 case Intrinsic::aarch64_neon_fcvtau:
6466 case Intrinsic::aarch64_neon_fcvtms:
6467 case Intrinsic::aarch64_neon_fcvtmu:
6469 case Intrinsic::aarch64_neon_fcvtns:
6470 case Intrinsic::aarch64_neon_fcvtnu:
6472 case Intrinsic::aarch64_neon_fcvtps:
6473 case Intrinsic::aarch64_neon_fcvtpu:
6475 case Intrinsic::aarch64_neon_fcvtzs:
6476 case Intrinsic::aarch64_neon_fcvtzu:
6478 case Intrinsic::aarch64_neon_fcvtxn: {
6479 handleNEONVectorConvertIntrinsic(
I);
6484 case Intrinsic::aarch64_neon_faddv:
6485 case Intrinsic::aarch64_neon_saddv:
6486 case Intrinsic::aarch64_neon_uaddv:
6489 case Intrinsic::aarch64_neon_smaxv:
6490 case Intrinsic::aarch64_neon_sminv:
6491 case Intrinsic::aarch64_neon_umaxv:
6492 case Intrinsic::aarch64_neon_uminv:
6496 case Intrinsic::aarch64_neon_fmaxv:
6497 case Intrinsic::aarch64_neon_fminv:
6498 case Intrinsic::aarch64_neon_fmaxnmv:
6499 case Intrinsic::aarch64_neon_fminnmv:
6501 case Intrinsic::aarch64_neon_saddlv:
6502 case Intrinsic::aarch64_neon_uaddlv:
6503 handleVectorReduceIntrinsic(
I,
true);
6506 case Intrinsic::aarch64_neon_ld1x2:
6507 case Intrinsic::aarch64_neon_ld1x3:
6508 case Intrinsic::aarch64_neon_ld1x4:
6509 case Intrinsic::aarch64_neon_ld2:
6510 case Intrinsic::aarch64_neon_ld3:
6511 case Intrinsic::aarch64_neon_ld4:
6512 case Intrinsic::aarch64_neon_ld2r:
6513 case Intrinsic::aarch64_neon_ld3r:
6514 case Intrinsic::aarch64_neon_ld4r: {
6515 handleNEONVectorLoad(
I,
false);
6519 case Intrinsic::aarch64_neon_ld2lane:
6520 case Intrinsic::aarch64_neon_ld3lane:
6521 case Intrinsic::aarch64_neon_ld4lane: {
6522 handleNEONVectorLoad(
I,
true);
6527 case Intrinsic::aarch64_neon_sqxtn:
6528 case Intrinsic::aarch64_neon_sqxtun:
6529 case Intrinsic::aarch64_neon_uqxtn:
6536 case Intrinsic::aarch64_neon_st1x2:
6537 case Intrinsic::aarch64_neon_st1x3:
6538 case Intrinsic::aarch64_neon_st1x4:
6539 case Intrinsic::aarch64_neon_st2:
6540 case Intrinsic::aarch64_neon_st3:
6541 case Intrinsic::aarch64_neon_st4: {
6542 handleNEONVectorStoreIntrinsic(
I,
false);
6546 case Intrinsic::aarch64_neon_st2lane:
6547 case Intrinsic::aarch64_neon_st3lane:
6548 case Intrinsic::aarch64_neon_st4lane: {
6549 handleNEONVectorStoreIntrinsic(
I,
true);
6562 case Intrinsic::aarch64_neon_tbl1:
6563 case Intrinsic::aarch64_neon_tbl2:
6564 case Intrinsic::aarch64_neon_tbl3:
6565 case Intrinsic::aarch64_neon_tbl4:
6566 case Intrinsic::aarch64_neon_tbx1:
6567 case Intrinsic::aarch64_neon_tbx2:
6568 case Intrinsic::aarch64_neon_tbx3:
6569 case Intrinsic::aarch64_neon_tbx4: {
6571 handleIntrinsicByApplyingToShadow(
6572 I,
I.getIntrinsicID(),
6577 case Intrinsic::aarch64_neon_fmulx:
6578 case Intrinsic::aarch64_neon_pmul:
6579 case Intrinsic::aarch64_neon_pmull:
6580 case Intrinsic::aarch64_neon_smull:
6581 case Intrinsic::aarch64_neon_pmull64:
6582 case Intrinsic::aarch64_neon_umull: {
6583 handleNEONVectorMultiplyIntrinsic(
I);
6594 void visitIntrinsicInst(IntrinsicInst &
I) {
6595 if (maybeHandleCrossPlatformIntrinsic(
I))
6598 if (maybeHandleX86SIMDIntrinsic(
I))
6601 if (maybeHandleArmSIMDIntrinsic(
I))
6604 if (maybeHandleUnknownIntrinsic(
I))
6607 visitInstruction(
I);
6610 void visitLibAtomicLoad(CallBase &CB) {
6621 Value *NewOrdering =
6625 NextNodeIRBuilder NextIRB(&CB);
6626 Value *SrcShadowPtr, *SrcOriginPtr;
6627 std::tie(SrcShadowPtr, SrcOriginPtr) =
6628 getShadowOriginPtr(SrcPtr, NextIRB, NextIRB.getInt8Ty(),
Align(1),
6630 Value *DstShadowPtr =
6631 getShadowOriginPtr(DstPtr, NextIRB, NextIRB.getInt8Ty(),
Align(1),
6635 NextIRB.CreateMemCpy(DstShadowPtr,
Align(1), SrcShadowPtr,
Align(1),
Size);
6636 if (MS.TrackOrigins) {
6637 Value *SrcOrigin = NextIRB.CreateAlignedLoad(MS.OriginTy, SrcOriginPtr,
6639 Value *NewOrigin = updateOrigin(SrcOrigin, NextIRB);
6640 NextIRB.CreateCall(MS.MsanSetOriginFn, {DstPtr, Size, NewOrigin});
6644 void visitLibAtomicStore(CallBase &CB) {
6651 Value *NewOrdering =
6655 Value *DstShadowPtr =
6665 void visitCallBase(CallBase &CB) {
6673 visitAsmInstruction(CB);
6675 visitInstruction(CB);
6684 case LibFunc_atomic_load:
6686 llvm::errs() <<
"MSAN -- cannot instrument invoke of libatomic load."
6690 visitLibAtomicLoad(CB);
6692 case LibFunc_atomic_store:
6693 visitLibAtomicStore(CB);
6709 B.addAttribute(Attribute::Memory).addAttribute(Attribute::Speculatable);
6713 Func->removeFnAttrs(
B);
6719 bool MayCheckCall = MS.EagerChecks;
6723 MayCheckCall &= !
Func->getName().starts_with(
"__sanitizer_unaligned_");
6726 unsigned ArgOffset = 0;
6729 if (!
A->getType()->isSized()) {
6730 LLVM_DEBUG(
dbgs() <<
"Arg " << i <<
" is not sized: " << CB <<
"\n");
6734 if (
A->getType()->isScalableTy()) {
6735 LLVM_DEBUG(
dbgs() <<
"Arg " << i <<
" is vscale: " << CB <<
"\n");
6737 insertCheckShadowOf(
A, &CB);
6742 const DataLayout &
DL =
F.getDataLayout();
6746 bool EagerCheck = MayCheckCall && !ByVal && NoUndef;
6749 insertCheckShadowOf(
A, &CB);
6750 Size =
DL.getTypeAllocSize(
A->getType());
6756 Value *ArgShadow = getShadow(
A);
6757 Value *ArgShadowBase = getShadowPtrForArgument(IRB, ArgOffset);
6759 <<
" Shadow: " << *ArgShadow <<
"\n");
6763 assert(
A->getType()->isPointerTy() &&
6764 "ByVal argument is not a pointer!");
6769 MaybeAlign Alignment = std::nullopt;
6772 Value *AShadowPtr, *AOriginPtr;
6773 std::tie(AShadowPtr, AOriginPtr) =
6774 getShadowOriginPtr(
A, IRB, IRB.
getInt8Ty(), Alignment,
6776 if (!PropagateShadow) {
6783 if (MS.TrackOrigins) {
6784 Value *ArgOriginBase = getOriginPtrForArgument(IRB, ArgOffset);
6798 Size =
DL.getTypeAllocSize(
A->getType());
6804 if (MS.TrackOrigins && !(Cst && Cst->
isNullValue())) {
6806 getOriginPtrForArgument(IRB, ArgOffset));
6809 assert(Store !=
nullptr);
6818 if (FT->isVarArg()) {
6819 VAHelper->visitCallBase(CB, IRB);
6829 if (MayCheckCall && CB.
hasRetAttr(Attribute::NoUndef)) {
6830 setShadow(&CB, getCleanShadow(&CB));
6831 setOrigin(&CB, getCleanOrigin());
6837 Value *
Base = getShadowPtrForRetval(IRBBefore);
6838 IRBBefore.CreateAlignedStore(getCleanShadow(&CB),
Base,
6850 setShadow(&CB, getCleanShadow(&CB));
6851 setOrigin(&CB, getCleanOrigin());
6858 "Could not find insertion point for retval shadow load");
6861 Value *RetvalShadow = IRBAfter.CreateAlignedLoad(
6864 setShadow(&CB, RetvalShadow);
6865 if (MS.TrackOrigins)
6866 setOrigin(&CB, IRBAfter.CreateLoad(MS.OriginTy, getOriginPtrForRetval()));
6871 RetVal =
I->getOperand(0);
6874 return I->isMustTailCall();
6879 void visitReturnInst(ReturnInst &
I) {
6881 Value *RetVal =
I.getReturnValue();
6887 Value *ShadowPtr = getShadowPtrForRetval(IRB);
6888 bool HasNoUndef =
F.hasRetAttribute(Attribute::NoUndef);
6889 bool StoreShadow = !(MS.EagerChecks && HasNoUndef);
6892 bool EagerCheck = (MS.EagerChecks && HasNoUndef) || (
F.getName() ==
"main");
6894 Value *Shadow = getShadow(RetVal);
6895 bool StoreOrigin =
true;
6897 insertCheckShadowOf(RetVal, &
I);
6898 Shadow = getCleanShadow(RetVal);
6899 StoreOrigin =
false;
6906 if (MS.TrackOrigins && StoreOrigin)
6907 IRB.
CreateStore(getOrigin(RetVal), getOriginPtrForRetval());
6911 void visitPHINode(PHINode &
I) {
6913 if (!PropagateShadow) {
6914 setShadow(&
I, getCleanShadow(&
I));
6915 setOrigin(&
I, getCleanOrigin());
6919 ShadowPHINodes.push_back(&
I);
6920 setShadow(&
I, IRB.
CreatePHI(getShadowTy(&
I),
I.getNumIncomingValues(),
6922 if (MS.TrackOrigins)
6924 &
I, IRB.
CreatePHI(MS.OriginTy,
I.getNumIncomingValues(),
"_msphi_o"));
6927 Value *getLocalVarIdptr(AllocaInst &
I) {
6928 ConstantInt *IntConst =
6929 ConstantInt::get(Type::getInt32Ty((*
F.getParent()).getContext()), 0);
6930 return new GlobalVariable(*
F.getParent(), IntConst->
getType(),
6935 Value *getLocalVarDescription(AllocaInst &
I) {
6941 IRB.
CreateCall(MS.MsanPoisonStackFn, {&I, Len});
6943 Value *ShadowBase, *OriginBase;
6944 std::tie(ShadowBase, OriginBase) = getShadowOriginPtr(
6948 IRB.
CreateMemSet(ShadowBase, PoisonValue, Len,
I.getAlign());
6951 if (PoisonStack && MS.TrackOrigins) {
6952 Value *Idptr = getLocalVarIdptr(
I);
6954 Value *Descr = getLocalVarDescription(
I);
6955 IRB.
CreateCall(MS.MsanSetAllocaOriginWithDescriptionFn,
6956 {&I, Len, Idptr, Descr});
6958 IRB.
CreateCall(MS.MsanSetAllocaOriginNoDescriptionFn, {&I, Len, Idptr});
6964 Value *Descr = getLocalVarDescription(
I);
6966 IRB.
CreateCall(MS.MsanPoisonAllocaFn, {&I, Len, Descr});
6968 IRB.
CreateCall(MS.MsanUnpoisonAllocaFn, {&I, Len});
6972 void instrumentAlloca(AllocaInst &
I, Instruction *InsPoint =
nullptr) {
6975 NextNodeIRBuilder IRB(InsPoint);
6976 const DataLayout &
DL =
F.getDataLayout();
6977 TypeSize TS =
DL.getTypeAllocSize(
I.getAllocatedType());
6979 if (
I.isArrayAllocation())
6983 if (MS.CompileKernel)
6984 poisonAllocaKmsan(
I, IRB, Len);
6986 poisonAllocaUserspace(
I, IRB, Len);
6989 void visitAllocaInst(AllocaInst &
I) {
6990 setShadow(&
I, getCleanShadow(&
I));
6991 setOrigin(&
I, getCleanOrigin());
6997 void visitSelectInst(SelectInst &
I) {
7003 handleSelectLikeInst(
I,
B,
C,
D);
7009 Value *Sb = getShadow(
B);
7010 Value *Sc = getShadow(
C);
7011 Value *Sd = getShadow(
D);
7013 Value *Ob = MS.TrackOrigins ? getOrigin(
B) : nullptr;
7014 Value *Oc = MS.TrackOrigins ? getOrigin(
C) : nullptr;
7015 Value *Od = MS.TrackOrigins ? getOrigin(
D) : nullptr;
7020 if (
I.getType()->isAggregateType()) {
7024 Sa1 = getPoisonedShadow(getShadowTy(
I.getType()));
7025 }
else if (isScalableNonVectorType(
I.getType())) {
7033 Sa1 = getCleanShadow(getShadowTy(
I.getType()));
7041 C = CreateAppToShadowCast(IRB,
C);
7042 D = CreateAppToShadowCast(IRB,
D);
7049 if (MS.TrackOrigins) {
7052 if (
B->getType()->isVectorTy()) {
7053 B = convertToBool(
B, IRB);
7054 Sb = convertToBool(Sb, IRB);
7062 void visitLandingPadInst(LandingPadInst &
I) {
7065 setShadow(&
I, getCleanShadow(&
I));
7066 setOrigin(&
I, getCleanOrigin());
7069 void visitCatchSwitchInst(CatchSwitchInst &
I) {
7070 setShadow(&
I, getCleanShadow(&
I));
7071 setOrigin(&
I, getCleanOrigin());
7074 void visitFuncletPadInst(FuncletPadInst &
I) {
7075 setShadow(&
I, getCleanShadow(&
I));
7076 setOrigin(&
I, getCleanOrigin());
7079 void visitGetElementPtrInst(GetElementPtrInst &
I) { handleShadowOr(
I); }
7081 void visitExtractValueInst(ExtractValueInst &
I) {
7083 Value *Agg =
I.getAggregateOperand();
7085 Value *AggShadow = getShadow(Agg);
7089 setShadow(&
I, ResShadow);
7090 setOriginForNaryOp(
I);
7093 void visitInsertValueInst(InsertValueInst &
I) {
7096 Value *AggShadow = getShadow(
I.getAggregateOperand());
7097 Value *InsShadow = getShadow(
I.getInsertedValueOperand());
7103 setOriginForNaryOp(
I);
7106 void dumpInst(Instruction &
I) {
7110 errs() <<
"ZZZ " <<
I.getOpcodeName() <<
"\n";
7112 errs() <<
"QQQ " <<
I <<
"\n";
7115 void visitResumeInst(ResumeInst &
I) {
7120 void visitCleanupReturnInst(CleanupReturnInst &CRI) {
7125 void visitCatchReturnInst(CatchReturnInst &CRI) {
7130 void instrumentAsmArgument(
Value *Operand,
Type *ElemTy, Instruction &
I,
7139 insertCheckShadowOf(Operand, &
I);
7146 auto Size =
DL.getTypeStoreSize(ElemTy);
7148 if (MS.CompileKernel) {
7149 IRB.
CreateCall(MS.MsanInstrumentAsmStoreFn, {Operand, SizeVal});
7155 auto [ShadowPtr,
_] =
7156 getShadowOriginPtrUserspace(Operand, IRB, IRB.
getInt8Ty(),
Align(1));
7166 int getNumOutputArgs(InlineAsm *IA, CallBase *CB) {
7167 int NumRetOutputs = 0;
7174 NumRetOutputs =
ST->getNumElements();
7179 for (
const InlineAsm::ConstraintInfo &
Info : Constraints) {
7180 switch (
Info.Type) {
7188 return NumOutputs - NumRetOutputs;
7191 void visitAsmInstruction(Instruction &
I) {
7207 const DataLayout &
DL =
F.getDataLayout();
7211 int OutputArgs = getNumOutputArgs(IA, CB);
7217 for (
int i = OutputArgs; i < NumOperands; i++) {
7225 for (
int i = 0; i < OutputArgs; i++) {
7231 setShadow(&
I, getCleanShadow(&
I));
7232 setOrigin(&
I, getCleanOrigin());
7235 void visitFreezeInst(FreezeInst &
I) {
7237 setShadow(&
I, getCleanShadow(&
I));
7238 setOrigin(&
I, getCleanOrigin());
7241 void visitInstruction(Instruction &
I) {
7246 for (
size_t i = 0, n =
I.getNumOperands(); i < n; i++) {
7247 Value *Operand =
I.getOperand(i);
7249 insertCheckShadowOf(Operand, &
I);
7251 setShadow(&
I, getCleanShadow(&
I));
7252 setOrigin(&
I, getCleanOrigin());
7256struct VarArgHelperBase :
public VarArgHelper {
7258 MemorySanitizer &MS;
7259 MemorySanitizerVisitor &MSV;
7261 const unsigned VAListTagSize;
7263 VarArgHelperBase(Function &
F, MemorySanitizer &MS,
7264 MemorySanitizerVisitor &MSV,
unsigned VAListTagSize)
7265 :
F(
F), MS(MS), MSV(MSV), VAListTagSize(VAListTagSize) {}
7269 return IRB.
CreateAdd(
Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
7275 MS.VAArgTLS, ConstantInt::get(MS.IntptrTy, ArgOffset),
"_msarg_va_s");
7284 return getShadowPtrForVAArgument(IRB, ArgOffset);
7293 ConstantInt::get(MS.IntptrTy, ArgOffset),
7298 unsigned BaseOffset) {
7307 TailSize,
Align(8));
7310 void unpoisonVAListTagForInst(IntrinsicInst &
I) {
7312 Value *VAListTag =
I.getArgOperand(0);
7314 auto [ShadowPtr, OriginPtr] = MSV.getShadowOriginPtr(
7315 VAListTag, IRB, IRB.
getInt8Ty(), Alignment,
true);
7318 VAListTagSize, Alignment,
false);
7321 void visitVAStartInst(VAStartInst &
I)
override {
7322 if (
F.getCallingConv() == CallingConv::Win64)
7325 unpoisonVAListTagForInst(
I);
7328 void visitVACopyInst(VACopyInst &
I)
override {
7329 if (
F.getCallingConv() == CallingConv::Win64)
7331 unpoisonVAListTagForInst(
I);
7336struct VarArgAMD64Helper :
public VarArgHelperBase {
7339 static const unsigned AMD64GpEndOffset = 48;
7340 static const unsigned AMD64FpEndOffsetSSE = 176;
7342 static const unsigned AMD64FpEndOffsetNoSSE = AMD64GpEndOffset;
7344 unsigned AMD64FpEndOffset;
7345 AllocaInst *VAArgTLSCopy =
nullptr;
7346 AllocaInst *VAArgTLSOriginCopy =
nullptr;
7347 Value *VAArgOverflowSize =
nullptr;
7349 enum ArgKind { AK_GeneralPurpose, AK_FloatingPoint, AK_Memory };
7351 VarArgAMD64Helper(Function &
F, MemorySanitizer &MS,
7352 MemorySanitizerVisitor &MSV)
7353 : VarArgHelperBase(
F, MS, MSV, 24) {
7354 AMD64FpEndOffset = AMD64FpEndOffsetSSE;
7355 for (
const auto &Attr :
F.getAttributes().getFnAttrs()) {
7356 if (Attr.isStringAttribute() &&
7357 (Attr.getKindAsString() ==
"target-features")) {
7358 if (Attr.getValueAsString().contains(
"-sse"))
7359 AMD64FpEndOffset = AMD64FpEndOffsetNoSSE;
7365 ArgKind classifyArgument(
Value *arg) {
7368 if (
T->isX86_FP80Ty())
7370 if (
T->isFPOrFPVectorTy())
7371 return AK_FloatingPoint;
7372 if (
T->isIntegerTy() &&
T->getPrimitiveSizeInBits() <= 64)
7373 return AK_GeneralPurpose;
7374 if (
T->isPointerTy())
7375 return AK_GeneralPurpose;
7387 void visitCallBase(CallBase &CB,
IRBuilder<> &IRB)
override {
7388 unsigned GpOffset = 0;
7389 unsigned FpOffset = AMD64GpEndOffset;
7390 unsigned OverflowOffset = AMD64FpEndOffset;
7391 const DataLayout &
DL =
F.getDataLayout();
7395 bool IsByVal = CB.
paramHasAttr(ArgNo, Attribute::ByVal);
7402 assert(
A->getType()->isPointerTy());
7404 uint64_t ArgSize =
DL.getTypeAllocSize(RealTy);
7405 uint64_t AlignedSize =
alignTo(ArgSize, 8);
7406 unsigned BaseOffset = OverflowOffset;
7407 Value *ShadowBase = getShadowPtrForVAArgument(IRB, OverflowOffset);
7408 Value *OriginBase =
nullptr;
7409 if (MS.TrackOrigins)
7410 OriginBase = getOriginPtrForVAArgument(IRB, OverflowOffset);
7411 OverflowOffset += AlignedSize;
7414 CleanUnusedTLS(IRB, ShadowBase, BaseOffset);
7418 Value *ShadowPtr, *OriginPtr;
7419 std::tie(ShadowPtr, OriginPtr) =
7424 if (MS.TrackOrigins)
7428 ArgKind AK = classifyArgument(
A);
7429 if (AK == AK_GeneralPurpose && GpOffset >= AMD64GpEndOffset)
7431 if (AK == AK_FloatingPoint && FpOffset >= AMD64FpEndOffset)
7433 Value *ShadowBase, *OriginBase =
nullptr;
7435 case AK_GeneralPurpose:
7436 ShadowBase = getShadowPtrForVAArgument(IRB, GpOffset);
7437 if (MS.TrackOrigins)
7438 OriginBase = getOriginPtrForVAArgument(IRB, GpOffset);
7442 case AK_FloatingPoint:
7443 ShadowBase = getShadowPtrForVAArgument(IRB, FpOffset);
7444 if (MS.TrackOrigins)
7445 OriginBase = getOriginPtrForVAArgument(IRB, FpOffset);
7452 uint64_t ArgSize =
DL.getTypeAllocSize(
A->getType());
7453 uint64_t AlignedSize =
alignTo(ArgSize, 8);
7454 unsigned BaseOffset = OverflowOffset;
7455 ShadowBase = getShadowPtrForVAArgument(IRB, OverflowOffset);
7456 if (MS.TrackOrigins) {
7457 OriginBase = getOriginPtrForVAArgument(IRB, OverflowOffset);
7459 OverflowOffset += AlignedSize;
7462 CleanUnusedTLS(IRB, ShadowBase, BaseOffset);
7471 Value *Shadow = MSV.getShadow(
A);
7473 if (MS.TrackOrigins) {
7474 Value *Origin = MSV.getOrigin(
A);
7475 TypeSize StoreSize =
DL.getTypeStoreSize(Shadow->
getType());
7476 MSV.paintOrigin(IRB, Origin, OriginBase, StoreSize,
7482 ConstantInt::get(IRB.
getInt64Ty(), OverflowOffset - AMD64FpEndOffset);
7483 IRB.
CreateStore(OverflowSize, MS.VAArgOverflowSizeTLS);
7486 void finalizeInstrumentation()
override {
7487 assert(!VAArgOverflowSize && !VAArgTLSCopy &&
7488 "finalizeInstrumentation called twice");
7489 if (!VAStartInstrumentationList.
empty()) {
7496 ConstantInt::get(MS.IntptrTy, AMD64FpEndOffset), VAArgOverflowSize);
7497 VAArgTLSCopy = IRB.
CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
7503 Intrinsic::umin, CopySize,
7507 if (MS.TrackOrigins) {
7508 VAArgTLSOriginCopy = IRB.
CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
7517 for (CallInst *OrigInst : VAStartInstrumentationList) {
7518 NextNodeIRBuilder IRB(OrigInst);
7519 Value *VAListTag = OrigInst->getArgOperand(0);
7521 Value *RegSaveAreaPtrPtr =
7522 IRB.
CreatePtrAdd(VAListTag, ConstantInt::get(MS.IntptrTy, 16));
7524 Value *RegSaveAreaShadowPtr, *RegSaveAreaOriginPtr;
7526 std::tie(RegSaveAreaShadowPtr, RegSaveAreaOriginPtr) =
7527 MSV.getShadowOriginPtr(RegSaveAreaPtr, IRB, IRB.
getInt8Ty(),
7529 IRB.
CreateMemCpy(RegSaveAreaShadowPtr, Alignment, VAArgTLSCopy, Alignment,
7531 if (MS.TrackOrigins)
7532 IRB.
CreateMemCpy(RegSaveAreaOriginPtr, Alignment, VAArgTLSOriginCopy,
7533 Alignment, AMD64FpEndOffset);
7534 Value *OverflowArgAreaPtrPtr =
7535 IRB.
CreatePtrAdd(VAListTag, ConstantInt::get(MS.IntptrTy, 8));
7536 Value *OverflowArgAreaPtr =
7537 IRB.
CreateLoad(MS.PtrTy, OverflowArgAreaPtrPtr);
7538 Value *OverflowArgAreaShadowPtr, *OverflowArgAreaOriginPtr;
7539 std::tie(OverflowArgAreaShadowPtr, OverflowArgAreaOriginPtr) =
7540 MSV.getShadowOriginPtr(OverflowArgAreaPtr, IRB, IRB.
getInt8Ty(),
7544 IRB.
CreateMemCpy(OverflowArgAreaShadowPtr, Alignment, SrcPtr, Alignment,
7546 if (MS.TrackOrigins) {
7549 IRB.
CreateMemCpy(OverflowArgAreaOriginPtr, Alignment, SrcPtr, Alignment,
7557struct VarArgAArch64Helper :
public VarArgHelperBase {
7558 static const unsigned kAArch64GrArgSize = 64;
7559 static const unsigned kAArch64VrArgSize = 128;
7561 static const unsigned AArch64GrBegOffset = 0;
7562 static const unsigned AArch64GrEndOffset = kAArch64GrArgSize;
7564 static const unsigned AArch64VrBegOffset = AArch64GrEndOffset;
7565 static const unsigned AArch64VrEndOffset =
7566 AArch64VrBegOffset + kAArch64VrArgSize;
7567 static const unsigned AArch64VAEndOffset = AArch64VrEndOffset;
7569 AllocaInst *VAArgTLSCopy =
nullptr;
7570 Value *VAArgOverflowSize =
nullptr;
7572 enum ArgKind { AK_GeneralPurpose, AK_FloatingPoint, AK_Memory };
7574 VarArgAArch64Helper(Function &
F, MemorySanitizer &MS,
7575 MemorySanitizerVisitor &MSV)
7576 : VarArgHelperBase(
F, MS, MSV, 32) {}
7579 std::pair<ArgKind, uint64_t> classifyArgument(
Type *
T) {
7580 if (
T->isIntOrPtrTy() &&
T->getPrimitiveSizeInBits() <= 64)
7581 return {AK_GeneralPurpose, 1};
7582 if (
T->isFloatingPointTy() &&
T->getPrimitiveSizeInBits() <= 128)
7583 return {AK_FloatingPoint, 1};
7585 if (
T->isArrayTy()) {
7586 auto R = classifyArgument(
T->getArrayElementType());
7587 R.second *=
T->getScalarType()->getArrayNumElements();
7592 auto R = classifyArgument(FV->getScalarType());
7593 R.second *= FV->getNumElements();
7598 return {AK_Memory, 0};
7610 void visitCallBase(CallBase &CB,
IRBuilder<> &IRB)
override {
7611 unsigned GrOffset = AArch64GrBegOffset;
7612 unsigned VrOffset = AArch64VrBegOffset;
7613 unsigned OverflowOffset = AArch64VAEndOffset;
7615 const DataLayout &
DL =
F.getDataLayout();
7618 auto [AK, RegNum] = classifyArgument(
A->getType());
7619 if (AK == AK_GeneralPurpose &&
7620 (GrOffset + RegNum * 8) > AArch64GrEndOffset)
7622 if (AK == AK_FloatingPoint &&
7623 (VrOffset + RegNum * 16) > AArch64VrEndOffset)
7627 case AK_GeneralPurpose:
7628 Base = getShadowPtrForVAArgument(IRB, GrOffset);
7629 GrOffset += 8 * RegNum;
7631 case AK_FloatingPoint:
7632 Base = getShadowPtrForVAArgument(IRB, VrOffset);
7633 VrOffset += 16 * RegNum;
7640 uint64_t ArgSize =
DL.getTypeAllocSize(
A->getType());
7641 uint64_t AlignedSize =
alignTo(ArgSize, 8);
7642 unsigned BaseOffset = OverflowOffset;
7643 Base = getShadowPtrForVAArgument(IRB, BaseOffset);
7644 OverflowOffset += AlignedSize;
7647 CleanUnusedTLS(IRB,
Base, BaseOffset);
7659 ConstantInt::get(IRB.
getInt64Ty(), OverflowOffset - AArch64VAEndOffset);
7660 IRB.
CreateStore(OverflowSize, MS.VAArgOverflowSizeTLS);
7665 Value *SaveAreaPtrPtr =
7666 IRB.
CreatePtrAdd(VAListTag, ConstantInt::get(MS.IntptrTy, offset));
7667 return IRB.
CreateLoad(Type::getInt64Ty(*MS.C), SaveAreaPtrPtr);
7672 Value *SaveAreaPtr =
7673 IRB.
CreatePtrAdd(VAListTag, ConstantInt::get(MS.IntptrTy, offset));
7675 return IRB.
CreateSExt(SaveArea32, MS.IntptrTy);
7678 void finalizeInstrumentation()
override {
7679 assert(!VAArgOverflowSize && !VAArgTLSCopy &&
7680 "finalizeInstrumentation called twice");
7681 if (!VAStartInstrumentationList.empty()) {
7688 ConstantInt::get(MS.IntptrTy, AArch64VAEndOffset), VAArgOverflowSize);
7689 VAArgTLSCopy = IRB.
CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
7695 Intrinsic::umin, CopySize,
7701 Value *GrArgSize = ConstantInt::get(MS.IntptrTy, kAArch64GrArgSize);
7702 Value *VrArgSize = ConstantInt::get(MS.IntptrTy, kAArch64VrArgSize);
7706 for (CallInst *OrigInst : VAStartInstrumentationList) {
7707 NextNodeIRBuilder IRB(OrigInst);
7709 Value *VAListTag = OrigInst->getArgOperand(0);
7726 Value *StackSaveAreaPtr =
7727 IRB.
CreateIntToPtr(getVAField64(IRB, VAListTag, 0), RegSaveAreaPtrTy);
7730 Value *GrTopSaveAreaPtr = getVAField64(IRB, VAListTag, 8);
7731 Value *GrOffSaveArea = getVAField32(IRB, VAListTag, 24);
7734 IRB.
CreateAdd(GrTopSaveAreaPtr, GrOffSaveArea), RegSaveAreaPtrTy);
7737 Value *VrTopSaveAreaPtr = getVAField64(IRB, VAListTag, 16);
7738 Value *VrOffSaveArea = getVAField32(IRB, VAListTag, 28);
7741 IRB.
CreateAdd(VrTopSaveAreaPtr, VrOffSaveArea), RegSaveAreaPtrTy);
7747 Value *GrRegSaveAreaShadowPtrOff =
7748 IRB.
CreateAdd(GrArgSize, GrOffSaveArea);
7750 Value *GrRegSaveAreaShadowPtr =
7751 MSV.getShadowOriginPtr(GrRegSaveAreaPtr, IRB, IRB.
getInt8Ty(),
7757 Value *GrCopySize = IRB.
CreateSub(GrArgSize, GrRegSaveAreaShadowPtrOff);
7763 Value *VrRegSaveAreaShadowPtrOff =
7764 IRB.
CreateAdd(VrArgSize, VrOffSaveArea);
7766 Value *VrRegSaveAreaShadowPtr =
7767 MSV.getShadowOriginPtr(VrRegSaveAreaPtr, IRB, IRB.
getInt8Ty(),
7774 VrRegSaveAreaShadowPtrOff);
7775 Value *VrCopySize = IRB.
CreateSub(VrArgSize, VrRegSaveAreaShadowPtrOff);
7781 Value *StackSaveAreaShadowPtr =
7782 MSV.getShadowOriginPtr(StackSaveAreaPtr, IRB, IRB.
getInt8Ty(),
7787 VAArgTLSCopy, IRB.
getInt32(AArch64VAEndOffset));
7790 Align(16), VAArgOverflowSize);
7796struct VarArgPowerPC64Helper :
public VarArgHelperBase {
7797 AllocaInst *VAArgTLSCopy =
nullptr;
7798 Value *VAArgSize =
nullptr;
7800 VarArgPowerPC64Helper(Function &
F, MemorySanitizer &MS,
7801 MemorySanitizerVisitor &MSV)
7802 : VarArgHelperBase(
F, MS, MSV, 8) {}
7804 void visitCallBase(CallBase &CB,
IRBuilder<> &IRB)
override {
7812 Triple TargetTriple(
F.getParent()->getTargetTriple());
7816 if (TargetTriple.isPPC64ELFv2ABI())
7820 unsigned VAArgOffset = VAArgBase;
7821 const DataLayout &
DL =
F.getDataLayout();
7824 bool IsByVal = CB.
paramHasAttr(ArgNo, Attribute::ByVal);
7826 assert(
A->getType()->isPointerTy());
7828 uint64_t ArgSize =
DL.getTypeAllocSize(RealTy);
7831 ArgAlign =
Align(8);
7832 VAArgOffset =
alignTo(VAArgOffset, ArgAlign);
7835 getShadowPtrForVAArgument(IRB, VAArgOffset - VAArgBase, ArgSize);
7837 Value *AShadowPtr, *AOriginPtr;
7838 std::tie(AShadowPtr, AOriginPtr) =
7839 MSV.getShadowOriginPtr(
A, IRB, IRB.
getInt8Ty(),
7849 uint64_t ArgSize =
DL.getTypeAllocSize(
A->getType());
7851 if (
A->getType()->isArrayTy()) {
7854 Type *ElementTy =
A->getType()->getArrayElementType();
7856 ArgAlign =
Align(
DL.getTypeAllocSize(ElementTy));
7857 }
else if (
A->getType()->isVectorTy()) {
7859 ArgAlign =
Align(ArgSize);
7862 ArgAlign =
Align(8);
7863 VAArgOffset =
alignTo(VAArgOffset, ArgAlign);
7864 if (
DL.isBigEndian()) {
7868 VAArgOffset += (8 - ArgSize);
7872 getShadowPtrForVAArgument(IRB, VAArgOffset - VAArgBase, ArgSize);
7876 VAArgOffset += ArgSize;
7880 VAArgBase = VAArgOffset;
7884 ConstantInt::get(MS.IntptrTy, VAArgOffset - VAArgBase);
7887 IRB.
CreateStore(TotalVAArgSize, MS.VAArgOverflowSizeTLS);
7890 void finalizeInstrumentation()
override {
7891 assert(!VAArgSize && !VAArgTLSCopy &&
7892 "finalizeInstrumentation called twice");
7895 Value *CopySize = VAArgSize;
7897 if (!VAStartInstrumentationList.empty()) {
7901 VAArgTLSCopy = IRB.
CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
7907 Intrinsic::umin, CopySize,
7915 for (CallInst *OrigInst : VAStartInstrumentationList) {
7916 NextNodeIRBuilder IRB(OrigInst);
7917 Value *VAListTag = OrigInst->getArgOperand(0);
7920 RegSaveAreaPtrPtr = IRB.
CreateIntToPtr(RegSaveAreaPtrPtr, MS.PtrTy);
7923 Value *RegSaveAreaShadowPtr, *RegSaveAreaOriginPtr;
7924 const DataLayout &
DL =
F.getDataLayout();
7925 unsigned IntptrSize =
DL.getTypeStoreSize(MS.IntptrTy);
7927 std::tie(RegSaveAreaShadowPtr, RegSaveAreaOriginPtr) =
7928 MSV.getShadowOriginPtr(RegSaveAreaPtr, IRB, IRB.
getInt8Ty(),
7930 IRB.
CreateMemCpy(RegSaveAreaShadowPtr, Alignment, VAArgTLSCopy, Alignment,
7937struct VarArgPowerPC32Helper :
public VarArgHelperBase {
7938 AllocaInst *VAArgTLSCopy =
nullptr;
7939 Value *VAArgSize =
nullptr;
7941 VarArgPowerPC32Helper(Function &
F, MemorySanitizer &MS,
7942 MemorySanitizerVisitor &MSV)
7943 : VarArgHelperBase(
F, MS, MSV, 12) {}
7945 void visitCallBase(CallBase &CB,
IRBuilder<> &IRB)
override {
7949 unsigned VAArgOffset = VAArgBase;
7950 const DataLayout &
DL =
F.getDataLayout();
7951 unsigned IntptrSize =
DL.getTypeStoreSize(MS.IntptrTy);
7954 bool IsByVal = CB.
paramHasAttr(ArgNo, Attribute::ByVal);
7956 assert(
A->getType()->isPointerTy());
7958 uint64_t ArgSize =
DL.getTypeAllocSize(RealTy);
7960 if (ArgAlign < IntptrSize)
7961 ArgAlign =
Align(IntptrSize);
7962 VAArgOffset =
alignTo(VAArgOffset, ArgAlign);
7965 getShadowPtrForVAArgument(IRB, VAArgOffset - VAArgBase, ArgSize);
7967 Value *AShadowPtr, *AOriginPtr;
7968 std::tie(AShadowPtr, AOriginPtr) =
7969 MSV.getShadowOriginPtr(
A, IRB, IRB.
getInt8Ty(),
7979 Type *ArgTy =
A->getType();
7985 uint64_t ArgSize =
DL.getTypeAllocSize(ArgTy);
7992 ArgAlign =
Align(
DL.getTypeAllocSize(ElementTy));
7995 ArgAlign =
Align(ArgSize);
7997 if (ArgAlign < IntptrSize)
7998 ArgAlign =
Align(IntptrSize);
7999 VAArgOffset =
alignTo(VAArgOffset, ArgAlign);
8000 if (
DL.isBigEndian()) {
8003 if (ArgSize < IntptrSize)
8004 VAArgOffset += (IntptrSize - ArgSize);
8007 Base = getShadowPtrForVAArgument(IRB, VAArgOffset - VAArgBase,
8013 VAArgOffset += ArgSize;
8020 ConstantInt::get(MS.IntptrTy, VAArgOffset - VAArgBase);
8023 IRB.
CreateStore(TotalVAArgSize, MS.VAArgOverflowSizeTLS);
8026 void finalizeInstrumentation()
override {
8027 assert(!VAArgSize && !VAArgTLSCopy &&
8028 "finalizeInstrumentation called twice");
8030 VAArgSize = IRB.
CreateLoad(MS.IntptrTy, MS.VAArgOverflowSizeTLS);
8031 Value *CopySize = VAArgSize;
8033 if (!VAStartInstrumentationList.empty()) {
8037 VAArgTLSCopy = IRB.
CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
8043 Intrinsic::umin, CopySize,
8051 for (CallInst *OrigInst : VAStartInstrumentationList) {
8052 NextNodeIRBuilder IRB(OrigInst);
8053 Value *VAListTag = OrigInst->getArgOperand(0);
8055 Value *RegSaveAreaSize = CopySize;
8059 IRB.
CreateAdd(RegSaveAreaPtrPtr, ConstantInt::get(MS.IntptrTy, 8));
8063 Intrinsic::umin, CopySize, ConstantInt::get(MS.IntptrTy, 32));
8065 RegSaveAreaPtrPtr = IRB.
CreateIntToPtr(RegSaveAreaPtrPtr, MS.PtrTy);
8068 const DataLayout &
DL =
F.getDataLayout();
8069 unsigned IntptrSize =
DL.getTypeStoreSize(MS.IntptrTy);
8073 Value *RegSaveAreaShadowPtr, *RegSaveAreaOriginPtr;
8074 std::tie(RegSaveAreaShadowPtr, RegSaveAreaOriginPtr) =
8075 MSV.getShadowOriginPtr(RegSaveAreaPtr, IRB, IRB.
getInt8Ty(),
8077 IRB.
CreateMemCpy(RegSaveAreaShadowPtr, Alignment, VAArgTLSCopy,
8078 Alignment, RegSaveAreaSize);
8080 RegSaveAreaShadowPtr =
8083 ConstantInt::get(MS.IntptrTy, 32));
8088 ConstantInt::get(MS.IntptrTy, 32), Alignment);
8093 Value *OverflowAreaSize = IRB.
CreateSub(CopySize, RegSaveAreaSize);
8096 OverflowAreaPtrPtr =
8097 IRB.
CreateAdd(OverflowAreaPtrPtr, ConstantInt::get(MS.IntptrTy, 4));
8098 OverflowAreaPtrPtr = IRB.
CreateIntToPtr(OverflowAreaPtrPtr, MS.PtrTy);
8100 Value *OverflowAreaPtr = IRB.
CreateLoad(MS.PtrTy, OverflowAreaPtrPtr);
8102 Value *OverflowAreaShadowPtr, *OverflowAreaOriginPtr;
8103 std::tie(OverflowAreaShadowPtr, OverflowAreaOriginPtr) =
8104 MSV.getShadowOriginPtr(OverflowAreaPtr, IRB, IRB.
getInt8Ty(),
8107 Value *OverflowVAArgTLSCopyPtr =
8109 OverflowVAArgTLSCopyPtr =
8110 IRB.
CreateAdd(OverflowVAArgTLSCopyPtr, RegSaveAreaSize);
8112 OverflowVAArgTLSCopyPtr =
8115 OverflowVAArgTLSCopyPtr, Alignment, OverflowAreaSize);
8122struct VarArgSystemZHelper :
public VarArgHelperBase {
8123 static const unsigned SystemZGpOffset = 16;
8124 static const unsigned SystemZGpEndOffset = 56;
8125 static const unsigned SystemZFpOffset = 128;
8126 static const unsigned SystemZFpEndOffset = 160;
8127 static const unsigned SystemZMaxVrArgs = 8;
8128 static const unsigned SystemZRegSaveAreaSize = 160;
8129 static const unsigned SystemZOverflowOffset = 160;
8130 static const unsigned SystemZVAListTagSize = 32;
8131 static const unsigned SystemZOverflowArgAreaPtrOffset = 16;
8132 static const unsigned SystemZRegSaveAreaPtrOffset = 24;
8134 bool IsSoftFloatABI;
8135 AllocaInst *VAArgTLSCopy =
nullptr;
8136 AllocaInst *VAArgTLSOriginCopy =
nullptr;
8137 Value *VAArgOverflowSize =
nullptr;
8139 enum class ArgKind {
8147 enum class ShadowExtension {
None,
Zero, Sign };
8149 VarArgSystemZHelper(Function &
F, MemorySanitizer &MS,
8150 MemorySanitizerVisitor &MSV)
8151 : VarArgHelperBase(
F, MS, MSV, SystemZVAListTagSize),
8152 IsSoftFloatABI(
F.getFnAttribute(
"use-soft-float").getValueAsBool()) {}
8154 ArgKind classifyArgument(
Type *
T) {
8161 if (
T->isIntegerTy(128) ||
T->isFP128Ty())
8162 return ArgKind::Indirect;
8163 if (
T->isFloatingPointTy())
8164 return IsSoftFloatABI ? ArgKind::GeneralPurpose : ArgKind::FloatingPoint;
8165 if (
T->isIntegerTy() ||
T->isPointerTy())
8166 return ArgKind::GeneralPurpose;
8167 if (
T->isVectorTy())
8168 return ArgKind::Vector;
8169 return ArgKind::Memory;
8172 ShadowExtension getShadowExtension(
const CallBase &CB,
unsigned ArgNo) {
8182 return ShadowExtension::Zero;
8186 return ShadowExtension::Sign;
8188 return ShadowExtension::None;
8191 void visitCallBase(CallBase &CB,
IRBuilder<> &IRB)
override {
8192 unsigned GpOffset = SystemZGpOffset;
8193 unsigned FpOffset = SystemZFpOffset;
8194 unsigned VrIndex = 0;
8195 unsigned OverflowOffset = SystemZOverflowOffset;
8196 const DataLayout &
DL =
F.getDataLayout();
8202 ArgKind AK = classifyArgument(
T);
8203 if (AK == ArgKind::Indirect) {
8205 AK = ArgKind::GeneralPurpose;
8207 if (AK == ArgKind::GeneralPurpose && GpOffset >= SystemZGpEndOffset)
8208 AK = ArgKind::Memory;
8209 if (AK == ArgKind::FloatingPoint && FpOffset >= SystemZFpEndOffset)
8210 AK = ArgKind::Memory;
8211 if (AK == ArgKind::Vector && (VrIndex >= SystemZMaxVrArgs || !IsFixed))
8212 AK = ArgKind::Memory;
8213 Value *ShadowBase =
nullptr;
8214 Value *OriginBase =
nullptr;
8215 ShadowExtension SE = ShadowExtension::None;
8217 case ArgKind::GeneralPurpose: {
8219 uint64_t ArgSize = 8;
8222 SE = getShadowExtension(CB, ArgNo);
8223 uint64_t GapSize = 0;
8224 if (SE == ShadowExtension::None) {
8225 uint64_t ArgAllocSize =
DL.getTypeAllocSize(
T);
8226 assert(ArgAllocSize <= ArgSize);
8227 GapSize = ArgSize - ArgAllocSize;
8229 ShadowBase = getShadowAddrForVAArgument(IRB, GpOffset + GapSize);
8230 if (MS.TrackOrigins)
8231 OriginBase = getOriginPtrForVAArgument(IRB, GpOffset + GapSize);
8233 GpOffset += ArgSize;
8239 case ArgKind::FloatingPoint: {
8241 uint64_t ArgSize = 8;
8248 ShadowBase = getShadowAddrForVAArgument(IRB, FpOffset);
8249 if (MS.TrackOrigins)
8250 OriginBase = getOriginPtrForVAArgument(IRB, FpOffset);
8252 FpOffset += ArgSize;
8258 case ArgKind::Vector: {
8265 case ArgKind::Memory: {
8270 uint64_t ArgAllocSize =
DL.getTypeAllocSize(
T);
8271 uint64_t ArgSize =
alignTo(ArgAllocSize, 8);
8273 SE = getShadowExtension(CB, ArgNo);
8275 SE == ShadowExtension::None ? ArgSize - ArgAllocSize : 0;
8277 getShadowAddrForVAArgument(IRB, OverflowOffset + GapSize);
8278 if (MS.TrackOrigins)
8280 getOriginPtrForVAArgument(IRB, OverflowOffset + GapSize);
8281 OverflowOffset += ArgSize;
8288 case ArgKind::Indirect:
8291 if (ShadowBase ==
nullptr)
8293 Value *Shadow = MSV.getShadow(
A);
8294 if (SE != ShadowExtension::None)
8295 Shadow = MSV.CreateShadowCast(IRB, Shadow, IRB.
getInt64Ty(),
8296 SE == ShadowExtension::Sign);
8297 ShadowBase = IRB.
CreateIntToPtr(ShadowBase, MS.PtrTy,
"_msarg_va_s");
8299 if (MS.TrackOrigins) {
8300 Value *Origin = MSV.getOrigin(
A);
8301 TypeSize StoreSize =
DL.getTypeStoreSize(Shadow->
getType());
8302 MSV.paintOrigin(IRB, Origin, OriginBase, StoreSize,
8306 Constant *OverflowSize = ConstantInt::get(
8307 IRB.
getInt64Ty(), OverflowOffset - SystemZOverflowOffset);
8308 IRB.
CreateStore(OverflowSize, MS.VAArgOverflowSizeTLS);
8315 ConstantInt::get(MS.IntptrTy, SystemZRegSaveAreaPtrOffset)),
8318 Value *RegSaveAreaShadowPtr, *RegSaveAreaOriginPtr;
8320 std::tie(RegSaveAreaShadowPtr, RegSaveAreaOriginPtr) =
8321 MSV.getShadowOriginPtr(RegSaveAreaPtr, IRB, IRB.
getInt8Ty(), Alignment,
8326 unsigned RegSaveAreaSize =
8327 IsSoftFloatABI ? SystemZGpEndOffset : SystemZRegSaveAreaSize;
8328 IRB.
CreateMemCpy(RegSaveAreaShadowPtr, Alignment, VAArgTLSCopy, Alignment,
8330 if (MS.TrackOrigins)
8331 IRB.
CreateMemCpy(RegSaveAreaOriginPtr, Alignment, VAArgTLSOriginCopy,
8332 Alignment, RegSaveAreaSize);
8341 ConstantInt::get(MS.IntptrTy, SystemZOverflowArgAreaPtrOffset)),
8343 Value *OverflowArgAreaPtr = IRB.
CreateLoad(MS.PtrTy, OverflowArgAreaPtrPtr);
8344 Value *OverflowArgAreaShadowPtr, *OverflowArgAreaOriginPtr;
8346 std::tie(OverflowArgAreaShadowPtr, OverflowArgAreaOriginPtr) =
8347 MSV.getShadowOriginPtr(OverflowArgAreaPtr, IRB, IRB.
getInt8Ty(),
8350 SystemZOverflowOffset);
8351 IRB.
CreateMemCpy(OverflowArgAreaShadowPtr, Alignment, SrcPtr, Alignment,
8353 if (MS.TrackOrigins) {
8355 SystemZOverflowOffset);
8356 IRB.
CreateMemCpy(OverflowArgAreaOriginPtr, Alignment, SrcPtr, Alignment,
8361 void finalizeInstrumentation()
override {
8362 assert(!VAArgOverflowSize && !VAArgTLSCopy &&
8363 "finalizeInstrumentation called twice");
8364 if (!VAStartInstrumentationList.empty()) {
8371 IRB.
CreateAdd(ConstantInt::get(MS.IntptrTy, SystemZOverflowOffset),
8373 VAArgTLSCopy = IRB.
CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
8379 Intrinsic::umin, CopySize,
8383 if (MS.TrackOrigins) {
8384 VAArgTLSOriginCopy = IRB.
CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
8393 for (CallInst *OrigInst : VAStartInstrumentationList) {
8394 NextNodeIRBuilder IRB(OrigInst);
8395 Value *VAListTag = OrigInst->getArgOperand(0);
8396 copyRegSaveArea(IRB, VAListTag);
8397 copyOverflowArea(IRB, VAListTag);
8403struct VarArgI386Helper :
public VarArgHelperBase {
8404 AllocaInst *VAArgTLSCopy =
nullptr;
8405 Value *VAArgSize =
nullptr;
8407 VarArgI386Helper(Function &
F, MemorySanitizer &MS,
8408 MemorySanitizerVisitor &MSV)
8409 : VarArgHelperBase(
F, MS, MSV, 4) {}
8411 void visitCallBase(CallBase &CB,
IRBuilder<> &IRB)
override {
8412 const DataLayout &
DL =
F.getDataLayout();
8413 unsigned IntptrSize =
DL.getTypeStoreSize(MS.IntptrTy);
8414 unsigned VAArgOffset = 0;
8417 bool IsByVal = CB.
paramHasAttr(ArgNo, Attribute::ByVal);
8419 assert(
A->getType()->isPointerTy());
8421 uint64_t ArgSize =
DL.getTypeAllocSize(RealTy);
8423 if (ArgAlign < IntptrSize)
8424 ArgAlign =
Align(IntptrSize);
8425 VAArgOffset =
alignTo(VAArgOffset, ArgAlign);
8427 Value *
Base = getShadowPtrForVAArgument(IRB, VAArgOffset, ArgSize);
8429 Value *AShadowPtr, *AOriginPtr;
8430 std::tie(AShadowPtr, AOriginPtr) =
8431 MSV.getShadowOriginPtr(
A, IRB, IRB.
getInt8Ty(),
8441 uint64_t ArgSize =
DL.getTypeAllocSize(
A->getType());
8443 VAArgOffset =
alignTo(VAArgOffset, ArgAlign);
8444 if (
DL.isBigEndian()) {
8447 if (ArgSize < IntptrSize)
8448 VAArgOffset += (IntptrSize - ArgSize);
8451 Base = getShadowPtrForVAArgument(IRB, VAArgOffset, ArgSize);
8454 VAArgOffset += ArgSize;
8460 Constant *TotalVAArgSize = ConstantInt::get(MS.IntptrTy, VAArgOffset);
8463 IRB.
CreateStore(TotalVAArgSize, MS.VAArgOverflowSizeTLS);
8466 void finalizeInstrumentation()
override {
8467 assert(!VAArgSize && !VAArgTLSCopy &&
8468 "finalizeInstrumentation called twice");
8470 VAArgSize = IRB.
CreateLoad(MS.IntptrTy, MS.VAArgOverflowSizeTLS);
8471 Value *CopySize = VAArgSize;
8473 if (!VAStartInstrumentationList.empty()) {
8476 VAArgTLSCopy = IRB.
CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
8482 Intrinsic::umin, CopySize,
8490 for (CallInst *OrigInst : VAStartInstrumentationList) {
8491 NextNodeIRBuilder IRB(OrigInst);
8492 Value *VAListTag = OrigInst->getArgOperand(0);
8493 Type *RegSaveAreaPtrTy = PointerType::getUnqual(*MS.C);
8494 Value *RegSaveAreaPtrPtr =
8496 PointerType::get(*MS.C, 0));
8497 Value *RegSaveAreaPtr =
8498 IRB.
CreateLoad(RegSaveAreaPtrTy, RegSaveAreaPtrPtr);
8499 Value *RegSaveAreaShadowPtr, *RegSaveAreaOriginPtr;
8500 const DataLayout &
DL =
F.getDataLayout();
8501 unsigned IntptrSize =
DL.getTypeStoreSize(MS.IntptrTy);
8503 std::tie(RegSaveAreaShadowPtr, RegSaveAreaOriginPtr) =
8504 MSV.getShadowOriginPtr(RegSaveAreaPtr, IRB, IRB.
getInt8Ty(),
8506 IRB.
CreateMemCpy(RegSaveAreaShadowPtr, Alignment, VAArgTLSCopy, Alignment,
8514struct VarArgGenericHelper :
public VarArgHelperBase {
8515 AllocaInst *VAArgTLSCopy =
nullptr;
8516 Value *VAArgSize =
nullptr;
8518 VarArgGenericHelper(Function &
F, MemorySanitizer &MS,
8519 MemorySanitizerVisitor &MSV,
const unsigned VAListTagSize)
8520 : VarArgHelperBase(
F, MS, MSV, VAListTagSize) {}
8522 void visitCallBase(CallBase &CB,
IRBuilder<> &IRB)
override {
8523 unsigned VAArgOffset = 0;
8524 const DataLayout &
DL =
F.getDataLayout();
8525 unsigned IntptrSize =
DL.getTypeStoreSize(MS.IntptrTy);
8530 uint64_t ArgSize =
DL.getTypeAllocSize(
A->getType());
8531 if (
DL.isBigEndian()) {
8534 if (ArgSize < IntptrSize)
8535 VAArgOffset += (IntptrSize - ArgSize);
8537 Value *
Base = getShadowPtrForVAArgument(IRB, VAArgOffset, ArgSize);
8538 VAArgOffset += ArgSize;
8539 VAArgOffset =
alignTo(VAArgOffset, IntptrSize);
8545 Constant *TotalVAArgSize = ConstantInt::get(MS.IntptrTy, VAArgOffset);
8548 IRB.
CreateStore(TotalVAArgSize, MS.VAArgOverflowSizeTLS);
8551 void finalizeInstrumentation()
override {
8552 assert(!VAArgSize && !VAArgTLSCopy &&
8553 "finalizeInstrumentation called twice");
8555 VAArgSize = IRB.
CreateLoad(MS.IntptrTy, MS.VAArgOverflowSizeTLS);
8556 Value *CopySize = VAArgSize;
8558 if (!VAStartInstrumentationList.empty()) {
8561 VAArgTLSCopy = IRB.
CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
8567 Intrinsic::umin, CopySize,
8575 for (CallInst *OrigInst : VAStartInstrumentationList) {
8576 NextNodeIRBuilder IRB(OrigInst);
8577 Value *VAListTag = OrigInst->getArgOperand(0);
8578 Type *RegSaveAreaPtrTy = PointerType::getUnqual(*MS.C);
8579 Value *RegSaveAreaPtrPtr =
8581 PointerType::get(*MS.C, 0));
8582 Value *RegSaveAreaPtr =
8583 IRB.
CreateLoad(RegSaveAreaPtrTy, RegSaveAreaPtrPtr);
8584 Value *RegSaveAreaShadowPtr, *RegSaveAreaOriginPtr;
8585 const DataLayout &
DL =
F.getDataLayout();
8586 unsigned IntptrSize =
DL.getTypeStoreSize(MS.IntptrTy);
8588 std::tie(RegSaveAreaShadowPtr, RegSaveAreaOriginPtr) =
8589 MSV.getShadowOriginPtr(RegSaveAreaPtr, IRB, IRB.
getInt8Ty(),
8591 IRB.
CreateMemCpy(RegSaveAreaShadowPtr, Alignment, VAArgTLSCopy, Alignment,
8599using VarArgARM32Helper = VarArgGenericHelper;
8600using VarArgRISCVHelper = VarArgGenericHelper;
8601using VarArgMIPSHelper = VarArgGenericHelper;
8602using VarArgLoongArch64Helper = VarArgGenericHelper;
8605struct VarArgNoOpHelper :
public VarArgHelper {
8606 VarArgNoOpHelper(Function &
F, MemorySanitizer &MS,
8607 MemorySanitizerVisitor &MSV) {}
8609 void visitCallBase(CallBase &CB,
IRBuilder<> &IRB)
override {}
8611 void visitVAStartInst(VAStartInst &
I)
override {}
8613 void visitVACopyInst(VACopyInst &
I)
override {}
8615 void finalizeInstrumentation()
override {}
8621 MemorySanitizerVisitor &Visitor) {
8624 Triple TargetTriple(Func.getParent()->getTargetTriple());
8627 return new VarArgI386Helper(Func, Msan, Visitor);
8630 return new VarArgAMD64Helper(Func, Msan, Visitor);
8632 if (TargetTriple.
isARM())
8633 return new VarArgARM32Helper(Func, Msan, Visitor, 4);
8636 return new VarArgAArch64Helper(Func, Msan, Visitor);
8639 return new VarArgSystemZHelper(Func, Msan, Visitor);
8644 return new VarArgPowerPC32Helper(Func, Msan, Visitor);
8647 return new VarArgPowerPC64Helper(Func, Msan, Visitor);
8650 return new VarArgRISCVHelper(Func, Msan, Visitor, 4);
8653 return new VarArgRISCVHelper(Func, Msan, Visitor, 8);
8656 return new VarArgMIPSHelper(Func, Msan, Visitor, 4);
8659 return new VarArgMIPSHelper(Func, Msan, Visitor, 8);
8662 return new VarArgLoongArch64Helper(Func, Msan, Visitor,
8665 return new VarArgNoOpHelper(Func, Msan, Visitor);
8672 if (
F.hasFnAttribute(Attribute::DisableSanitizerInstrumentation))
8675 MemorySanitizerVisitor Visitor(
F, *
this, TLI);
8682 return Visitor.runOnFunction();
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
This file implements a class to represent arbitrary precision integral constant values and operations...
static bool isStore(int Opcode)
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static cl::opt< ITMode > IT(cl::desc("IT block support"), cl::Hidden, cl::init(DefaultIT), cl::values(clEnumValN(DefaultIT, "arm-default-it", "Generate any type of IT block"), clEnumValN(RestrictedIT, "arm-restrict-it", "Disallow complex IT blocks")))
static const size_t kNumberOfAccessSizes
static cl::opt< bool > ClWithComdat("asan-with-comdat", cl::desc("Place ASan constructors in comdat sections"), cl::Hidden, cl::init(true))
VarLocInsertPt getNextNode(const DbgRecord *DVR)
Atomic ordering constants.
This file contains the simple types necessary to represent the attributes associated with functions a...
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
Analysis containing CSE Info
This file contains the declarations for the subclasses of Constant, which represent the different fla...
const MemoryMapParams Linux_LoongArch64_MemoryMapParams
const MemoryMapParams Linux_X86_64_MemoryMapParams
static cl::opt< int > ClTrackOrigins("dfsan-track-origins", cl::desc("Track origins of labels"), cl::Hidden, cl::init(0))
static AtomicOrdering addReleaseOrdering(AtomicOrdering AO)
static AtomicOrdering addAcquireOrdering(AtomicOrdering AO)
const MemoryMapParams Linux_AArch64_MemoryMapParams
static bool isAMustTailRetVal(Value *RetVal)
This file provides an implementation of debug counters.
#define DEBUG_COUNTER(VARNAME, COUNTERNAME, DESC)
This file defines the DenseMap class.
This file builds on the ADT/GraphTraits.h file to build generic depth first graph iterator.
static bool runOnFunction(Function &F, bool PostInlining)
This is the interface for a simple mod/ref and alias analysis over globals.
static size_t TypeSizeToSizeIndex(uint32_t TypeSize)
Module.h This file contains the declarations for the Module class.
Machine Check Debug Module
static const PlatformMemoryMapParams Linux_S390_MemoryMapParams
static const Align kMinOriginAlignment
static cl::opt< uint64_t > ClShadowBase("msan-shadow-base", cl::desc("Define custom MSan ShadowBase"), cl::Hidden, cl::init(0))
static cl::opt< bool > ClPoisonUndef("msan-poison-undef", cl::desc("Poison fully undef temporary values. " "Partially undefined constant vectors " "are unaffected by this flag (see " "-msan-poison-undef-vectors)."), cl::Hidden, cl::init(true))
static const PlatformMemoryMapParams Linux_X86_MemoryMapParams
static cl::opt< uint64_t > ClOriginBase("msan-origin-base", cl::desc("Define custom MSan OriginBase"), cl::Hidden, cl::init(0))
static cl::opt< bool > ClCheckConstantShadow("msan-check-constant-shadow", cl::desc("Insert checks for constant shadow values"), cl::Hidden, cl::init(true))
static const PlatformMemoryMapParams Linux_LoongArch_MemoryMapParams
static const MemoryMapParams NetBSD_X86_64_MemoryMapParams
static const PlatformMemoryMapParams Linux_MIPS_MemoryMapParams
static const unsigned kOriginSize
static cl::opt< bool > ClWithComdat("msan-with-comdat", cl::desc("Place MSan constructors in comdat sections"), cl::Hidden, cl::init(false))
static cl::opt< int > ClTrackOrigins("msan-track-origins", cl::desc("Track origins (allocation sites) of poisoned memory"), cl::Hidden, cl::init(0))
Track origins of uninitialized values.
static cl::opt< int > ClInstrumentationWithCallThreshold("msan-instrumentation-with-call-threshold", cl::desc("If the function being instrumented requires more than " "this number of checks and origin stores, use callbacks instead of " "inline checks (-1 means never use callbacks)."), cl::Hidden, cl::init(3500))
static cl::opt< int > ClPoisonStackPattern("msan-poison-stack-pattern", cl::desc("poison uninitialized stack variables with the given pattern"), cl::Hidden, cl::init(0xff))
static const Align kShadowTLSAlignment
static cl::opt< bool > ClHandleICmpExact("msan-handle-icmp-exact", cl::desc("exact handling of relational integer ICmp"), cl::Hidden, cl::init(true))
static const PlatformMemoryMapParams Linux_ARM_MemoryMapParams
static cl::opt< bool > ClDumpStrictInstructions("msan-dump-strict-instructions", cl::desc("print out instructions with default strict semantics i.e.," "check that all the inputs are fully initialized, and mark " "the output as fully initialized. These semantics are applied " "to instructions that could not be handled explicitly nor " "heuristically."), cl::Hidden, cl::init(false))
static Constant * getOrInsertGlobal(Module &M, StringRef Name, Type *Ty)
static cl::opt< bool > ClPreciseDisjointOr("msan-precise-disjoint-or", cl::desc("Precisely poison disjoint OR. If false (legacy behavior), " "disjointedness is ignored (i.e., 1|1 is initialized)."), cl::Hidden, cl::init(false))
static const MemoryMapParams Linux_S390X_MemoryMapParams
static cl::opt< bool > ClPoisonStack("msan-poison-stack", cl::desc("poison uninitialized stack variables"), cl::Hidden, cl::init(true))
static const MemoryMapParams Linux_I386_MemoryMapParams
const char kMsanInitName[]
static cl::opt< bool > ClPoisonUndefVectors("msan-poison-undef-vectors", cl::desc("Precisely poison partially undefined constant vectors. " "If false (legacy behavior), the entire vector is " "considered fully initialized, which may lead to false " "negatives. Fully undefined constant vectors are " "unaffected by this flag (see -msan-poison-undef)."), cl::Hidden, cl::init(false))
static cl::opt< bool > ClPrintStackNames("msan-print-stack-names", cl::desc("Print name of local stack variable"), cl::Hidden, cl::init(true))
static cl::opt< uint64_t > ClAndMask("msan-and-mask", cl::desc("Define custom MSan AndMask"), cl::Hidden, cl::init(0))
static cl::opt< bool > ClHandleLifetimeIntrinsics("msan-handle-lifetime-intrinsics", cl::desc("when possible, poison scoped variables at the beginning of the scope " "(slower, but more precise)"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClKeepGoing("msan-keep-going", cl::desc("keep going after reporting a UMR"), cl::Hidden, cl::init(false))
static const MemoryMapParams FreeBSD_X86_64_MemoryMapParams
static GlobalVariable * createPrivateConstGlobalForString(Module &M, StringRef Str)
Create a non-const global initialized with the given string.
static const PlatformMemoryMapParams Linux_PowerPC_MemoryMapParams
static const size_t kNumberOfAccessSizes
static cl::opt< bool > ClEagerChecks("msan-eager-checks", cl::desc("check arguments and return values at function call boundaries"), cl::Hidden, cl::init(false))
static cl::opt< int > ClDisambiguateWarning("msan-disambiguate-warning-threshold", cl::desc("Define threshold for number of checks per " "debug location to force origin update."), cl::Hidden, cl::init(3))
static VarArgHelper * CreateVarArgHelper(Function &Func, MemorySanitizer &Msan, MemorySanitizerVisitor &Visitor)
static const MemoryMapParams Linux_MIPS64_MemoryMapParams
static const MemoryMapParams Linux_PowerPC64_MemoryMapParams
static cl::opt< uint64_t > ClXorMask("msan-xor-mask", cl::desc("Define custom MSan XorMask"), cl::Hidden, cl::init(0))
static cl::opt< bool > ClHandleAsmConservative("msan-handle-asm-conservative", cl::desc("conservative handling of inline assembly"), cl::Hidden, cl::init(true))
static const PlatformMemoryMapParams FreeBSD_X86_MemoryMapParams
static const PlatformMemoryMapParams FreeBSD_ARM_MemoryMapParams
static const unsigned kParamTLSSize
static cl::opt< bool > ClHandleICmp("msan-handle-icmp", cl::desc("propagate shadow through ICmpEQ and ICmpNE"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClEnableKmsan("msan-kernel", cl::desc("Enable KernelMemorySanitizer instrumentation"), cl::Hidden, cl::init(false))
static cl::opt< bool > ClPoisonStackWithCall("msan-poison-stack-with-call", cl::desc("poison uninitialized stack variables with a call"), cl::Hidden, cl::init(false))
static const PlatformMemoryMapParams NetBSD_X86_MemoryMapParams
static cl::opt< bool > ClDumpHeuristicInstructions("msan-dump-heuristic-instructions", cl::desc("Prints 'unknown' instructions that were handled heuristically. " "Use -msan-dump-strict-instructions to print instructions that " "could not be handled explicitly nor heuristically."), cl::Hidden, cl::init(false))
static const unsigned kRetvalTLSSize
static const MemoryMapParams FreeBSD_AArch64_MemoryMapParams
const char kMsanModuleCtorName[]
static const MemoryMapParams FreeBSD_I386_MemoryMapParams
static cl::opt< bool > ClCheckAccessAddress("msan-check-access-address", cl::desc("report accesses through a pointer which has poisoned shadow"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClDisableChecks("msan-disable-checks", cl::desc("Apply no_sanitize to the whole file"), cl::Hidden, cl::init(false))
FunctionAnalysisManager FAM
const SmallVectorImpl< MachineOperand > & Cond
void visit(MachineFunction &MF, MachineBasicBlock &Start, std::function< void(MachineBasicBlock *)> op)
This file implements a set that has insertion order iteration characteristics.
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
static SymbolRef::Type getType(const Symbol *Sym)
static APInt getSignedMinValue(unsigned numBits)
Gets minimum signed value of APInt for a specific bit width.
void setAlignment(Align Align)
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
const T & front() const
front - Get the first element.
static LLVM_ABI ArrayType * get(Type *ElementType, uint64_t NumElements)
This static method is the primary way to construct an ArrayType.
This class stores enough information to efficiently remove some attributes from an existing AttrBuild...
AttributeMask & addAttribute(Attribute::AttrKind Val)
Add an attribute to the mask.
LLVM_ABI const_iterator getFirstInsertionPt() const
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
LLVM_ABI const BasicBlock * getSinglePredecessor() const
Return the predecessor of this block if it has a single predecessor block.
InstListType::iterator iterator
Instruction iterators...
bool isInlineAsm() const
Check if this call is an inline asm statement.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
bool hasRetAttr(Attribute::AttrKind Kind) const
Determine whether the return value has the given attribute.
LLVM_ABI bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Determine whether the argument or parameter has the given attribute.
void removeFnAttrs(const AttributeMask &AttrsToRemove)
Removes the attributes from the function.
MaybeAlign getParamAlign(unsigned ArgNo) const
Extract the alignment for a call or parameter (0=unknown).
Type * getParamByValType(unsigned ArgNo) const
Extract the byval type for a call or parameter.
Value * getCalledOperand() const
Type * getParamElementType(unsigned ArgNo) const
Extract the elementtype type for a parameter.
Value * getArgOperand(unsigned i) const
void setArgOperand(unsigned i, Value *v)
FunctionType * getFunctionType() const
iterator_range< User::op_iterator > args()
Iteration adapter for range-for loops.
void addParamAttr(unsigned ArgNo, Attribute::AttrKind Kind)
Adds the attribute to the indicated argument.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ ICMP_SLT
signed less than
@ ICMP_SLE
signed less or equal
@ ICMP_SGT
signed greater than
@ ICMP_SGE
signed greater or equal
static LLVM_ABI Constant * get(ArrayType *T, ArrayRef< Constant * > V)
static LLVM_ABI Constant * getString(LLVMContext &Context, StringRef Initializer, bool AddNull=true)
This method constructs a CDS and initializes it with a text string.
static LLVM_ABI Constant * get(LLVMContext &Context, ArrayRef< uint8_t > Elts)
get() constructors - Return a constant with vector type with an element count and element type matchi...
static ConstantInt * getSigned(IntegerType *Ty, int64_t V)
Return a ConstantInt with the specified value for the specified type.
static LLVM_ABI ConstantInt * getBool(LLVMContext &Context, bool V)
static LLVM_ABI Constant * get(StructType *T, ArrayRef< Constant * > V)
static LLVM_ABI Constant * getSplat(ElementCount EC, Constant *Elt)
Return a ConstantVector with the specified constant in each element.
static LLVM_ABI Constant * get(ArrayRef< Constant * > V)
This is an important base class in LLVM.
static LLVM_ABI Constant * getAllOnesValue(Type *Ty)
LLVM_ABI bool isAllOnesValue() const
Return true if this is the value that would be returned by getAllOnesValue.
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
LLVM_ABI Constant * getAggregateElement(unsigned Elt) const
For aggregates (struct/array/vector) return the constant that corresponds to the specified element if...
LLVM_ABI bool isZeroValue() const
Return true if the value is negative zero or null value.
LLVM_ABI bool isNullValue() const
Return true if this is the value that would be returned by getNullValue.
static bool shouldExecute(unsigned CounterName)
unsigned getNumElements() const
static LLVM_ABI FixedVectorType * get(Type *ElementType, unsigned NumElts)
static FixedVectorType * getHalfElementsVectorType(FixedVectorType *VTy)
A handy container for a FunctionType+Callee-pointer pair, which can be passed around as a single enti...
unsigned getNumParams() const
Return the number of fixed parameters this function type requires.
LLVM_ABI void setComdat(Comdat *C)
@ PrivateLinkage
Like Internal, but omit from symbol table.
@ ExternalLinkage
Externally visible function.
Analysis pass providing a never-invalidated alias analysis result.
ConstantInt * getInt1(bool V)
Get a constant value representing either true or false.
Value * CreateInsertElement(Type *VecTy, Value *NewElt, Value *Idx, const Twine &Name="")
Value * CreateConstGEP1_32(Type *Ty, Value *Ptr, unsigned Idx0, const Twine &Name="")
AllocaInst * CreateAlloca(Type *Ty, unsigned AddrSpace, Value *ArraySize=nullptr, const Twine &Name="")
IntegerType * getInt1Ty()
Fetch the type representing a single bit.
LLVM_ABI CallInst * CreateMaskedCompressStore(Value *Val, Value *Ptr, MaybeAlign Align, Value *Mask=nullptr)
Create a call to Masked Compress Store intrinsic.
Value * CreateInsertValue(Value *Agg, Value *Val, ArrayRef< unsigned > Idxs, const Twine &Name="")
Value * CreateExtractElement(Value *Vec, Value *Idx, const Twine &Name="")
IntegerType * getIntNTy(unsigned N)
Fetch the type representing an N-bit integer.
LoadInst * CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align, const char *Name)
Value * CreateZExtOrTrunc(Value *V, Type *DestTy, const Twine &Name="")
Create a ZExt or Trunc from the integer value V to DestTy.
CallInst * CreateMemCpy(Value *Dst, MaybeAlign DstAlign, Value *Src, MaybeAlign SrcAlign, uint64_t Size, bool isVolatile=false, const AAMDNodes &AAInfo=AAMDNodes())
Create and insert a memcpy between the specified pointers.
LLVM_ABI CallInst * CreateAndReduce(Value *Src)
Create a vector int AND reduction intrinsic of the source vector.
Value * CreatePointerCast(Value *V, Type *DestTy, const Twine &Name="")
Value * CreateExtractValue(Value *Agg, ArrayRef< unsigned > Idxs, const Twine &Name="")
LLVM_ABI CallInst * CreateMaskedLoad(Type *Ty, Value *Ptr, Align Alignment, Value *Mask, Value *PassThru=nullptr, const Twine &Name="")
Create a call to Masked Load intrinsic.
LLVM_ABI Value * CreateSelect(Value *C, Value *True, Value *False, const Twine &Name="", Instruction *MDFrom=nullptr)
BasicBlock::iterator GetInsertPoint() const
Value * CreateSExt(Value *V, Type *DestTy, const Twine &Name="")
Value * CreateIntToPtr(Value *V, Type *DestTy, const Twine &Name="")
Value * CreateLShr(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
IntegerType * getInt32Ty()
Fetch the type representing a 32-bit integer.
ConstantInt * getInt8(uint8_t C)
Get a constant 8-bit value.
Value * CreatePtrAdd(Value *Ptr, Value *Offset, const Twine &Name="", GEPNoWrapFlags NW=GEPNoWrapFlags::none())
IntegerType * getInt64Ty()
Fetch the type representing a 64-bit integer.
Value * CreateUDiv(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
Value * CreateICmpNE(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateGEP(Type *Ty, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &Name="", GEPNoWrapFlags NW=GEPNoWrapFlags::none())
Value * CreateNeg(Value *V, const Twine &Name="", bool HasNSW=false)
LLVM_ABI CallInst * CreateOrReduce(Value *Src)
Create a vector int OR reduction intrinsic of the source vector.
LLVM_ABI Value * CreateBinaryIntrinsic(Intrinsic::ID ID, Value *LHS, Value *RHS, FMFSource FMFSource={}, const Twine &Name="")
Create a call to intrinsic ID with 2 operands which is mangled on the first type.
LLVM_ABI CallInst * CreateIntrinsic(Intrinsic::ID ID, ArrayRef< Type * > Types, ArrayRef< Value * > Args, FMFSource FMFSource={}, const Twine &Name="")
Create a call to intrinsic ID with Args, mangled using Types.
ConstantInt * getInt32(uint32_t C)
Get a constant 32-bit value.
PHINode * CreatePHI(Type *Ty, unsigned NumReservedValues, const Twine &Name="")
Value * CreateNot(Value *V, const Twine &Name="")
Value * CreateICmpEQ(Value *LHS, Value *RHS, const Twine &Name="")
LLVM_ABI DebugLoc getCurrentDebugLocation() const
Get location information used by debugging information.
Value * CreateSub(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Value * CreateBitCast(Value *V, Type *DestTy, const Twine &Name="")
ConstantInt * getIntN(unsigned N, uint64_t C)
Get a constant N-bit value, zero extended or truncated from a 64-bit value.
LoadInst * CreateLoad(Type *Ty, Value *Ptr, const char *Name)
Provided to resolve 'CreateLoad(Ty, Ptr, "...")' correctly, instead of converting the string to 'bool...
Value * CreateShl(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
CallInst * CreateMemSet(Value *Ptr, Value *Val, uint64_t Size, MaybeAlign Align, bool isVolatile=false, const AAMDNodes &AAInfo=AAMDNodes())
Create and insert a memset to the specified pointer and the specified value.
Value * CreateZExt(Value *V, Type *DestTy, const Twine &Name="", bool IsNonNeg=false)
Value * CreateShuffleVector(Value *V1, Value *V2, Value *Mask, const Twine &Name="")
LLVMContext & getContext() const
Value * CreateAnd(Value *LHS, Value *RHS, const Twine &Name="")
StoreInst * CreateStore(Value *Val, Value *Ptr, bool isVolatile=false)
LLVM_ABI CallInst * CreateMaskedStore(Value *Val, Value *Ptr, Align Alignment, Value *Mask)
Create a call to Masked Store intrinsic.
Value * CreateAdd(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Value * CreatePtrToInt(Value *V, Type *DestTy, const Twine &Name="")
Value * CreateIsNotNull(Value *Arg, const Twine &Name="")
Return a boolean value testing if Arg != 0.
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value * > Args={}, const Twine &Name="", MDNode *FPMathTag=nullptr)
Value * CreateTrunc(Value *V, Type *DestTy, const Twine &Name="", bool IsNUW=false, bool IsNSW=false)
PointerType * getPtrTy(unsigned AddrSpace=0)
Fetch the type representing a pointer.
Value * CreateBinOp(Instruction::BinaryOps Opc, Value *LHS, Value *RHS, const Twine &Name="", MDNode *FPMathTag=nullptr)
Value * CreateICmpSLT(Value *LHS, Value *RHS, const Twine &Name="")
LLVM_ABI Value * CreateTypeSize(Type *Ty, TypeSize Size)
Create an expression which evaluates to the number of units in Size at runtime.
Value * CreateICmpUGE(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateIntCast(Value *V, Type *DestTy, bool isSigned, const Twine &Name="")
Value * CreateIsNull(Value *Arg, const Twine &Name="")
Return a boolean value testing if Arg == 0.
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
Type * getVoidTy()
Fetch the type representing void.
StoreInst * CreateAlignedStore(Value *Val, Value *Ptr, MaybeAlign Align, bool isVolatile=false)
LLVM_ABI CallInst * CreateMaskedExpandLoad(Type *Ty, Value *Ptr, MaybeAlign Align, Value *Mask=nullptr, Value *PassThru=nullptr, const Twine &Name="")
Create a call to Masked Expand Load intrinsic.
Value * CreateInBoundsPtrAdd(Value *Ptr, Value *Offset, const Twine &Name="")
Value * CreateAShr(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
Value * CreateXor(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateICmp(CmpInst::Predicate P, Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateOr(Value *LHS, Value *RHS, const Twine &Name="", bool IsDisjoint=false)
IntegerType * getInt8Ty()
Fetch the type representing an 8-bit integer.
Value * CreateMul(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
LLVM_ABI CallInst * CreateMaskedScatter(Value *Val, Value *Ptrs, Align Alignment, Value *Mask=nullptr)
Create a call to Masked Scatter intrinsic.
LLVM_ABI CallInst * CreateMaskedGather(Type *Ty, Value *Ptrs, Align Alignment, Value *Mask=nullptr, Value *PassThru=nullptr, const Twine &Name="")
Create a call to Masked Gather intrinsic.
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
std::vector< ConstraintInfo > ConstraintInfoVector
void visit(Iterator Start, Iterator End)
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
LLVM_ABI InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
LLVM_ABI bool comesBefore(const Instruction *Other) const
Given an instruction Other in the same basic block as this instruction, return true if this instructi...
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
LLVM_ABI MDNode * createUnlikelyBranchWeights()
Return metadata containing two branch weights, with significant bias towards false destination.
A Module instance is used to store all the information related to an LLVM module.
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
A set of analyses that are preserved following a run of a transformation pass.
static PreservedAnalyses none()
Convenience factory function for the empty preserved set.
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
PreservedAnalyses & abandon()
Mark an analysis as abandoned.
bool remove(const value_type &X)
Remove an item from the set vector.
bool insert(const value_type &X)
Insert a new element into the SetVector.
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void push_back(const T &Elt)
StringRef - Represent a constant reference to a string, i.e.
static LLVM_ABI StructType * get(LLVMContext &Context, ArrayRef< Type * > Elements, bool isPacked=false)
This static method is the primary way to create a literal StructType.
unsigned getNumElements() const
Random access to the elements.
Type * getElementType(unsigned N) const
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
AttributeList getAttrList(LLVMContext *C, ArrayRef< unsigned > ArgNos, bool Signed, bool Ret=false, AttributeList AL=AttributeList()) const
bool getLibFunc(StringRef funcName, LibFunc &F) const
Searches for a particular function name.
Triple - Helper class for working with autoconf configuration names.
bool isMIPS64() const
Tests whether the target is MIPS 64-bit (little and big endian).
bool isRISCV32() const
Tests whether the target is 32-bit RISC-V.
bool isPPC32() const
Tests whether the target is 32-bit PowerPC (little and big endian).
ArchType getArch() const
Get the parsed architecture type of this triple.
bool isRISCV64() const
Tests whether the target is 64-bit RISC-V.
bool isLoongArch64() const
Tests whether the target is 64-bit LoongArch.
bool isMIPS32() const
Tests whether the target is MIPS 32-bit (little and big endian).
bool isARM() const
Tests whether the target is ARM (little and big endian).
bool isPPC64() const
Tests whether the target is 64-bit PowerPC (little and big endian).
bool isAArch64() const
Tests whether the target is AArch64 (little and big endian).
bool isSystemZ() const
Tests whether the target is SystemZ.
The instances of the Type class are immutable: once they are created, they are never changed.
LLVM_ABI unsigned getIntegerBitWidth() const
bool isVectorTy() const
True if this is an instance of VectorType.
bool isArrayTy() const
True if this is an instance of ArrayType.
LLVM_ABI bool isScalableTy(SmallPtrSetImpl< const Type * > &Visited) const
Return true if this is a type whose size is a known multiple of vscale.
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
bool isPointerTy() const
True if this is an instance of PointerType.
Type * getArrayElementType() const
bool isPPC_FP128Ty() const
Return true if this is powerpc long double.
static LLVM_ABI Type * getVoidTy(LLVMContext &C)
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
LLVM_ABI TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
bool isIntOrPtrTy() const
Return true if this is an integer type or a pointer type.
bool isIntegerTy() const
True if this is an instance of IntegerType.
bool isFPOrFPVectorTy() const
Return true if this is a FP type or a vector of FP.
bool isVoidTy() const
Return true if this is 'void'.
Value * getOperand(unsigned i) const
unsigned getNumOperands() const
size_type count(const KeyT &Val) const
Return 1 if the specified key is in the map, 0 otherwise.
Type * getType() const
All values are typed, get the type of this value.
LLVM_ABI void setName(const Twine &Name)
Change the name of the value.
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
ElementCount getElementCount() const
Return an ElementCount instance to represent the (possibly scalable) number of elements in the vector...
Type * getElementType() const
int getNumOccurrences() const
constexpr ScalarTy getFixedValue() const
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
self_iterator getIterator()
This class implements an extremely fast bulk output stream that can only output to a stream.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ C
The default llvm calling convention, compatible with C.
@ BasicBlock
Various leaf nodes.
initializer< Ty > init(const Ty &Val)
Function * Kernel
Summary of a kernel (=entry point for target offloading).
NodeAddr< FuncNode * > Func
friend class Instruction
Iterator for Instructions in a `BasicBlock.
This is an optimization pass for GlobalISel generic memory operations.
unsigned Log2_32_Ceil(uint32_t Value)
Return the ceil log base 2 of the specified value, 32 if the value is zero.
FunctionAddr VTableAddr Value
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
bool isAligned(Align Lhs, uint64_t SizeInBytes)
Checks that SizeInBytes is a multiple of the alignment.
LLVM_ABI std::pair< Instruction *, Value * > SplitBlockAndInsertSimpleForLoop(Value *End, BasicBlock::iterator SplitBefore)
Insert a for (int i = 0; i < End; i++) loop structure (with the exception that End is assumed > 0,...
InnerAnalysisManagerProxy< FunctionAnalysisManager, Module > FunctionAnalysisManagerModuleProxy
Provide the FunctionAnalysisManager to Module proxy.
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
unsigned Log2_64(uint64_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
auto dyn_cast_or_null(const Y &Val)
LLVM_ABI std::pair< Function *, FunctionCallee > getOrCreateSanitizerCtorAndInitFunctions(Module &M, StringRef CtorName, StringRef InitName, ArrayRef< Type * > InitArgTypes, ArrayRef< Value * > InitArgs, function_ref< void(Function *, FunctionCallee)> FunctionsCreatedCallback, StringRef VersionCheckName=StringRef(), bool Weak=false)
Creates sanitizer constructor function lazily.
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
LLVM_ABI bool isKnownNonZero(const Value *V, const SimplifyQuery &Q, unsigned Depth=0)
Return true if the given value is known to be non-zero when defined.
LLVM_ABI raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
AtomicOrdering
Atomic ordering for LLVM's memory model.
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
IRBuilder(LLVMContext &, FolderTy, InserterTy, MDNode *, ArrayRef< OperandBundleDef >) -> IRBuilder< FolderTy, InserterTy >
@ Or
Bitwise or logical OR of integers.
@ And
Bitwise or logical AND of integers.
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
DWARFExpression::Operation Op
RoundingMode
Rounding mode.
ArrayRef(const T &OneElt) -> ArrayRef< T >
constexpr unsigned BitWidth
LLVM_ABI void appendToGlobalCtors(Module &M, Function *F, int Priority, Constant *Data=nullptr)
Append F to the list of global ctors of module M with the given Priority.
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
iterator_range< df_iterator< T > > depth_first(const T &G)
LLVM_ABI Instruction * SplitBlockAndInsertIfThen(Value *Cond, BasicBlock::iterator SplitBefore, bool Unreachable, MDNode *BranchWeights=nullptr, DomTreeUpdater *DTU=nullptr, LoopInfo *LI=nullptr, BasicBlock *ThenBlock=nullptr)
Split the containing block at the specified instruction - everything before SplitBefore stays in the ...
LLVM_ABI void maybeMarkSanitizerLibraryCallNoBuiltin(CallInst *CI, const TargetLibraryInfo *TLI)
Given a CallInst, check if it calls a string function known to CodeGen, and mark it with NoBuiltin if...
LLVM_ABI bool removeUnreachableBlocks(Function &F, DomTreeUpdater *DTU=nullptr, MemorySSAUpdater *MSSAU=nullptr)
Remove all blocks that can not be reached from the function's entry.
LLVM_ABI bool checkIfAlreadyInstrumented(Module &M, StringRef Flag)
Check if module has flag attached, if not add the flag.
std::string itostr(int64_t X)
AnalysisManager< Module > ModuleAnalysisManager
Convenience typedef for the Module analysis manager.
This struct is a compact representation of a valid (non-zero power of two) alignment.
constexpr uint64_t value() const
This is a hole in the type system and should not be abused.
LLVM_ABI void printPipeline(raw_ostream &OS, function_ref< StringRef(StringRef)> MapClassName2PassName)
LLVM_ABI PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM)
A CRTP mix-in to automatically provide informational APIs needed for passes.