LLVM 23.0.0git
AddressSanitizer.cpp
Go to the documentation of this file.
1//===- AddressSanitizer.cpp - memory error detector -----------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file is a part of AddressSanitizer, an address basic correctness
10// checker.
11// Details of the algorithm:
12// https://github.com/google/sanitizers/wiki/AddressSanitizerAlgorithm
13//
14// FIXME: This sanitizer does not yet handle scalable vectors
15//
16//===----------------------------------------------------------------------===//
17
19#include "llvm/ADT/ArrayRef.h"
20#include "llvm/ADT/DenseMap.h"
23#include "llvm/ADT/SmallSet.h"
25#include "llvm/ADT/Statistic.h"
27#include "llvm/ADT/StringRef.h"
28#include "llvm/ADT/Twine.h"
37#include "llvm/IR/Argument.h"
38#include "llvm/IR/Attributes.h"
39#include "llvm/IR/BasicBlock.h"
40#include "llvm/IR/Comdat.h"
41#include "llvm/IR/Constant.h"
42#include "llvm/IR/Constants.h"
43#include "llvm/IR/DIBuilder.h"
44#include "llvm/IR/DataLayout.h"
46#include "llvm/IR/DebugLoc.h"
49#include "llvm/IR/Function.h"
50#include "llvm/IR/GlobalAlias.h"
51#include "llvm/IR/GlobalValue.h"
53#include "llvm/IR/IRBuilder.h"
54#include "llvm/IR/InlineAsm.h"
55#include "llvm/IR/InstVisitor.h"
56#include "llvm/IR/InstrTypes.h"
57#include "llvm/IR/Instruction.h"
60#include "llvm/IR/Intrinsics.h"
61#include "llvm/IR/LLVMContext.h"
62#include "llvm/IR/MDBuilder.h"
63#include "llvm/IR/Metadata.h"
64#include "llvm/IR/Module.h"
65#include "llvm/IR/Type.h"
66#include "llvm/IR/Use.h"
67#include "llvm/IR/Value.h"
71#include "llvm/Support/Debug.h"
74#include "llvm/Support/ModRef.h"
85#include <algorithm>
86#include <cassert>
87#include <cstddef>
88#include <cstdint>
89#include <iomanip>
90#include <limits>
91#include <sstream>
92#include <string>
93#include <tuple>
94
95using namespace llvm;
96
97#define DEBUG_TYPE "asan"
98
100static const uint64_t kDefaultShadowOffset32 = 1ULL << 29;
101static const uint64_t kDefaultShadowOffset64 = 1ULL << 44;
103 std::numeric_limits<uint64_t>::max();
104static const uint64_t kSmallX86_64ShadowOffsetBase = 0x7FFFFFFF; // < 2G.
106static const uint64_t kLinuxKasan_ShadowOffset64 = 0xdffffc0000000000;
107static const uint64_t kPPC64_ShadowOffset64 = 1ULL << 44;
108static const uint64_t kSystemZ_ShadowOffset64 = 1ULL << 52;
109static const uint64_t kMIPS_ShadowOffsetN32 = 1ULL << 29;
110static const uint64_t kMIPS32_ShadowOffset32 = 0x0aaa0000;
111static const uint64_t kMIPS64_ShadowOffset64 = 1ULL << 37;
112static const uint64_t kAArch64_ShadowOffset64 = 1ULL << 36;
113static const uint64_t kLoongArch64_ShadowOffset64 = 1ULL << 46;
115static const uint64_t kFreeBSD_ShadowOffset32 = 1ULL << 30;
116static const uint64_t kFreeBSD_ShadowOffset64 = 1ULL << 46;
117static const uint64_t kFreeBSDAArch64_ShadowOffset64 = 1ULL << 47;
118static const uint64_t kFreeBSDKasan_ShadowOffset64 = 0xdffff7c000000000;
119static const uint64_t kNetBSD_ShadowOffset32 = 1ULL << 30;
120static const uint64_t kNetBSD_ShadowOffset64 = 1ULL << 46;
121static const uint64_t kNetBSDKasan_ShadowOffset64 = 0xdfff900000000000;
122static const uint64_t kPS_ShadowOffset64 = 1ULL << 40;
123static const uint64_t kWindowsShadowOffset32 = 3ULL << 28;
125
126// The shadow memory space is dynamically allocated.
128
129static const size_t kMinStackMallocSize = 1 << 6; // 64B
130static const size_t kMaxStackMallocSize = 1 << 16; // 64K
131static const uintptr_t kCurrentStackFrameMagic = 0x41B58AB3;
132static const uintptr_t kRetiredStackFrameMagic = 0x45E0360E;
133
134const char kAsanModuleCtorName[] = "asan.module_ctor";
135const char kAsanModuleDtorName[] = "asan.module_dtor";
137// On Emscripten, the system needs more than one priorities for constructors.
139const char kAsanReportErrorTemplate[] = "__asan_report_";
140const char kAsanRegisterGlobalsName[] = "__asan_register_globals";
141const char kAsanUnregisterGlobalsName[] = "__asan_unregister_globals";
142const char kAsanRegisterImageGlobalsName[] = "__asan_register_image_globals";
144 "__asan_unregister_image_globals";
145const char kAsanRegisterElfGlobalsName[] = "__asan_register_elf_globals";
146const char kAsanUnregisterElfGlobalsName[] = "__asan_unregister_elf_globals";
147const char kAsanPoisonGlobalsName[] = "__asan_before_dynamic_init";
148const char kAsanUnpoisonGlobalsName[] = "__asan_after_dynamic_init";
149const char kAsanInitName[] = "__asan_init";
150const char kAsanVersionCheckNamePrefix[] = "__asan_version_mismatch_check_v";
151const char kAsanPtrCmp[] = "__sanitizer_ptr_cmp";
152const char kAsanPtrSub[] = "__sanitizer_ptr_sub";
153const char kAsanHandleNoReturnName[] = "__asan_handle_no_return";
154static const int kMaxAsanStackMallocSizeClass = 10;
155const char kAsanStackMallocNameTemplate[] = "__asan_stack_malloc_";
157 "__asan_stack_malloc_always_";
158const char kAsanStackFreeNameTemplate[] = "__asan_stack_free_";
159const char kAsanGenPrefix[] = "___asan_gen_";
160const char kODRGenPrefix[] = "__odr_asan_gen_";
161const char kSanCovGenPrefix[] = "__sancov_gen_";
162const char kAsanSetShadowPrefix[] = "__asan_set_shadow_";
163const char kAsanPoisonStackMemoryName[] = "__asan_poison_stack_memory";
164const char kAsanUnpoisonStackMemoryName[] = "__asan_unpoison_stack_memory";
165
166// ASan version script has __asan_* wildcard. Triple underscore prevents a
167// linker (gold) warning about attempting to export a local symbol.
168const char kAsanGlobalsRegisteredFlagName[] = "___asan_globals_registered";
169
171 "__asan_option_detect_stack_use_after_return";
172
174 "__asan_shadow_memory_dynamic_address";
175
176const char kAsanAllocaPoison[] = "__asan_alloca_poison";
177const char kAsanAllocasUnpoison[] = "__asan_allocas_unpoison";
178
179const char kAMDGPUAddressSharedName[] = "llvm.amdgcn.is.shared";
180const char kAMDGPUAddressPrivateName[] = "llvm.amdgcn.is.private";
181const char kAMDGPUBallotName[] = "llvm.amdgcn.ballot.i64";
182const char kAMDGPUUnreachableName[] = "llvm.amdgcn.unreachable";
183
184// Accesses sizes are powers of two: 1, 2, 4, 8, 16.
185static const size_t kNumberOfAccessSizes = 5;
186
187static const uint64_t kAllocaRzSize = 32;
188
189// ASanAccessInfo implementation constants.
190constexpr size_t kCompileKernelShift = 0;
191constexpr size_t kCompileKernelMask = 0x1;
192constexpr size_t kAccessSizeIndexShift = 1;
193constexpr size_t kAccessSizeIndexMask = 0xf;
194constexpr size_t kIsWriteShift = 5;
195constexpr size_t kIsWriteMask = 0x1;
196
197// Command-line flags.
198
200 "asan-kernel", cl::desc("Enable KernelAddressSanitizer instrumentation"),
201 cl::Hidden, cl::init(false));
202
204 "asan-recover",
205 cl::desc("Enable recovery mode (continue-after-error)."),
206 cl::Hidden, cl::init(false));
207
209 "asan-guard-against-version-mismatch",
210 cl::desc("Guard against compiler/runtime version mismatch."), cl::Hidden,
211 cl::init(true));
212
213// This flag may need to be replaced with -f[no-]asan-reads.
214static cl::opt<bool> ClInstrumentReads("asan-instrument-reads",
215 cl::desc("instrument read instructions"),
216 cl::Hidden, cl::init(true));
217
219 "asan-instrument-writes", cl::desc("instrument write instructions"),
220 cl::Hidden, cl::init(true));
221
222static cl::opt<bool>
223 ClUseStackSafety("asan-use-stack-safety", cl::Hidden, cl::init(true),
224 cl::Hidden, cl::desc("Use Stack Safety analysis results"),
226
228 "asan-instrument-atomics",
229 cl::desc("instrument atomic instructions (rmw, cmpxchg)"), cl::Hidden,
230 cl::init(true));
231
232static cl::opt<bool>
233 ClInstrumentByval("asan-instrument-byval",
234 cl::desc("instrument byval call arguments"), cl::Hidden,
235 cl::init(true));
236
238 "asan-always-slow-path",
239 cl::desc("use instrumentation with slow path for all accesses"), cl::Hidden,
240 cl::init(false));
241
243 "asan-force-dynamic-shadow",
244 cl::desc("Load shadow address into a local variable for each function"),
245 cl::Hidden, cl::init(false));
246
247static cl::opt<bool>
248 ClWithIfunc("asan-with-ifunc",
249 cl::desc("Access dynamic shadow through an ifunc global on "
250 "platforms that support this"),
251 cl::Hidden, cl::init(true));
252
253static cl::opt<int>
254 ClShadowAddrSpace("asan-shadow-addr-space",
255 cl::desc("Address space for pointers to the shadow map"),
256 cl::Hidden, cl::init(0));
257
259 "asan-with-ifunc-suppress-remat",
260 cl::desc("Suppress rematerialization of dynamic shadow address by passing "
261 "it through inline asm in prologue."),
262 cl::Hidden, cl::init(true));
263
264// This flag limits the number of instructions to be instrumented
265// in any given BB. Normally, this should be set to unlimited (INT_MAX),
266// but due to http://llvm.org/bugs/show_bug.cgi?id=12652 we temporary
267// set it to 10000.
269 "asan-max-ins-per-bb", cl::init(10000),
270 cl::desc("maximal number of instructions to instrument in any given BB"),
271 cl::Hidden);
272
273// This flag may need to be replaced with -f[no]asan-stack.
274static cl::opt<bool> ClStack("asan-stack", cl::desc("Handle stack memory"),
275 cl::Hidden, cl::init(true));
277 "asan-max-inline-poisoning-size",
278 cl::desc(
279 "Inline shadow poisoning for blocks up to the given size in bytes."),
280 cl::Hidden, cl::init(64));
281
283 "asan-use-after-return",
284 cl::desc("Sets the mode of detection for stack-use-after-return."),
287 "Never detect stack use after return."),
290 "Detect stack use after return if "
291 "binary flag 'ASAN_OPTIONS=detect_stack_use_after_return' is set."),
293 "Always detect stack use after return.")),
295
296static cl::opt<bool> ClRedzoneByvalArgs("asan-redzone-byval-args",
297 cl::desc("Create redzones for byval "
298 "arguments (extra copy "
299 "required)"), cl::Hidden,
300 cl::init(true));
301
302static cl::opt<bool> ClUseAfterScope("asan-use-after-scope",
303 cl::desc("Check stack-use-after-scope"),
304 cl::Hidden, cl::init(false));
305
306// This flag may need to be replaced with -f[no]asan-globals.
307static cl::opt<bool> ClGlobals("asan-globals",
308 cl::desc("Handle global objects"), cl::Hidden,
309 cl::init(true));
310
311static cl::opt<bool> ClInitializers("asan-initialization-order",
312 cl::desc("Handle C++ initializer order"),
313 cl::Hidden, cl::init(true));
314
316 "asan-detect-invalid-pointer-pair",
317 cl::desc("Instrument <, <=, >, >=, - with pointer operands"), cl::Hidden,
318 cl::init(false));
319
321 "asan-detect-invalid-pointer-cmp",
322 cl::desc("Instrument <, <=, >, >= with pointer operands"), cl::Hidden,
323 cl::init(false));
324
326 "asan-detect-invalid-pointer-sub",
327 cl::desc("Instrument - operations with pointer operands"), cl::Hidden,
328 cl::init(false));
329
331 "asan-realign-stack",
332 cl::desc("Realign stack to the value of this flag (power of two)"),
333 cl::Hidden, cl::init(32));
334
336 "asan-instrumentation-with-call-threshold",
337 cl::desc("If the function being instrumented contains more than "
338 "this number of memory accesses, use callbacks instead of "
339 "inline checks (-1 means never use callbacks)."),
340 cl::Hidden, cl::init(7000));
341
343 "asan-memory-access-callback-prefix",
344 cl::desc("Prefix for memory access callbacks"), cl::Hidden,
345 cl::init("__asan_"));
346
348 "asan-kernel-mem-intrinsic-prefix",
349 cl::desc("Use prefix for memory intrinsics in KASAN mode"), cl::Hidden,
350 cl::init(false));
351
352static cl::opt<bool>
353 ClInstrumentDynamicAllocas("asan-instrument-dynamic-allocas",
354 cl::desc("instrument dynamic allocas"),
355 cl::Hidden, cl::init(true));
356
358 "asan-skip-promotable-allocas",
359 cl::desc("Do not instrument promotable allocas"), cl::Hidden,
360 cl::init(true));
361
363 "asan-constructor-kind",
364 cl::desc("Sets the ASan constructor kind"),
365 cl::values(clEnumValN(AsanCtorKind::None, "none", "No constructors"),
367 "Use global constructors")),
369// These flags allow to change the shadow mapping.
370// The shadow mapping looks like
371// Shadow = (Mem >> scale) + offset
372
373static cl::opt<int> ClMappingScale("asan-mapping-scale",
374 cl::desc("scale of asan shadow mapping"),
375 cl::Hidden, cl::init(0));
376
378 ClMappingOffset("asan-mapping-offset",
379 cl::desc("offset of asan shadow mapping [EXPERIMENTAL]"),
380 cl::Hidden, cl::init(0));
381
382// Optimization flags. Not user visible, used mostly for testing
383// and benchmarking the tool.
384
385static cl::opt<bool> ClOpt("asan-opt", cl::desc("Optimize instrumentation"),
386 cl::Hidden, cl::init(true));
387
388static cl::opt<bool> ClOptimizeCallbacks("asan-optimize-callbacks",
389 cl::desc("Optimize callbacks"),
390 cl::Hidden, cl::init(false));
391
393 "asan-opt-same-temp", cl::desc("Instrument the same temp just once"),
394 cl::Hidden, cl::init(true));
395
396static cl::opt<bool> ClOptGlobals("asan-opt-globals",
397 cl::desc("Don't instrument scalar globals"),
398 cl::Hidden, cl::init(true));
399
401 "asan-opt-stack", cl::desc("Don't instrument scalar stack variables"),
402 cl::Hidden, cl::init(false));
403
405 "asan-stack-dynamic-alloca",
406 cl::desc("Use dynamic alloca to represent stack variables"), cl::Hidden,
407 cl::init(true));
408
410 "asan-force-experiment",
411 cl::desc("Force optimization experiment (for testing)"), cl::Hidden,
412 cl::init(0));
413
414static cl::opt<bool>
415 ClUsePrivateAlias("asan-use-private-alias",
416 cl::desc("Use private aliases for global variables"),
417 cl::Hidden, cl::init(true));
418
419static cl::opt<bool>
420 ClUseOdrIndicator("asan-use-odr-indicator",
421 cl::desc("Use odr indicators to improve ODR reporting"),
422 cl::Hidden, cl::init(true));
423
424static cl::opt<bool>
425 ClUseGlobalsGC("asan-globals-live-support",
426 cl::desc("Use linker features to support dead "
427 "code stripping of globals"),
428 cl::Hidden, cl::init(true));
429
430// This is on by default even though there is a bug in gold:
431// https://sourceware.org/bugzilla/show_bug.cgi?id=19002
432static cl::opt<bool>
433 ClWithComdat("asan-with-comdat",
434 cl::desc("Place ASan constructors in comdat sections"),
435 cl::Hidden, cl::init(true));
436
438 "asan-destructor-kind",
439 cl::desc("Sets the ASan destructor kind. The default is to use the value "
440 "provided to the pass constructor"),
441 cl::values(clEnumValN(AsanDtorKind::None, "none", "No destructors"),
443 "Use global destructors")),
445
448 "asan-instrument-address-spaces",
449 cl::desc("Only instrument variables in the specified address spaces."),
450 cl::Hidden, cl::CommaSeparated, cl::callback([](const unsigned &AddrSpace) {
451 SrcAddrSpaces.insert(AddrSpace);
452 }));
453
454// Debug flags.
455
456static cl::opt<int> ClDebug("asan-debug", cl::desc("debug"), cl::Hidden,
457 cl::init(0));
458
459static cl::opt<int> ClDebugStack("asan-debug-stack", cl::desc("debug stack"),
460 cl::Hidden, cl::init(0));
461
463 cl::desc("Debug func"));
464
465static cl::opt<int> ClDebugMin("asan-debug-min", cl::desc("Debug min inst"),
466 cl::Hidden, cl::init(-1));
467
468static cl::opt<int> ClDebugMax("asan-debug-max", cl::desc("Debug max inst"),
469 cl::Hidden, cl::init(-1));
470
471STATISTIC(NumInstrumentedReads, "Number of instrumented reads");
472STATISTIC(NumInstrumentedWrites, "Number of instrumented writes");
473STATISTIC(NumOptimizedAccessesToGlobalVar,
474 "Number of optimized accesses to global vars");
475STATISTIC(NumOptimizedAccessesToStackVar,
476 "Number of optimized accesses to stack vars");
477
478namespace {
479
480/// This struct defines the shadow mapping using the rule:
481/// shadow = (mem >> Scale) ADD-or-OR Offset.
482/// If InGlobal is true, then
483/// extern char __asan_shadow[];
484/// shadow = (mem >> Scale) + &__asan_shadow
485struct ShadowMapping {
486 int Scale;
488 bool OrShadowOffset;
489 bool InGlobal;
490};
491
492} // end anonymous namespace
493
494static ShadowMapping getShadowMapping(const Triple &TargetTriple, int LongSize,
495 bool IsKasan) {
496 bool IsAndroid = TargetTriple.isAndroid();
497 bool IsIOS = TargetTriple.isiOS() || TargetTriple.isWatchOS() ||
498 TargetTriple.isDriverKit();
499 bool IsMacOS = TargetTriple.isMacOSX();
500 bool IsFreeBSD = TargetTriple.isOSFreeBSD();
501 bool IsNetBSD = TargetTriple.isOSNetBSD();
502 bool IsPS = TargetTriple.isPS();
503 bool IsLinux = TargetTriple.isOSLinux();
504 bool IsPPC64 = TargetTriple.getArch() == Triple::ppc64 ||
505 TargetTriple.getArch() == Triple::ppc64le;
506 bool IsSystemZ = TargetTriple.getArch() == Triple::systemz;
507 bool IsX86_64 = TargetTriple.getArch() == Triple::x86_64;
508 bool IsMIPSN32ABI = TargetTriple.isABIN32();
509 bool IsMIPS32 = TargetTriple.isMIPS32();
510 bool IsMIPS64 = TargetTriple.isMIPS64();
511 bool IsArmOrThumb = TargetTriple.isARM() || TargetTriple.isThumb();
512 bool IsAArch64 = TargetTriple.getArch() == Triple::aarch64 ||
513 TargetTriple.getArch() == Triple::aarch64_be;
514 bool IsLoongArch64 = TargetTriple.isLoongArch64();
515 bool IsRISCV64 = TargetTriple.getArch() == Triple::riscv64;
516 bool IsWindows = TargetTriple.isOSWindows();
517 bool IsFuchsia = TargetTriple.isOSFuchsia();
518 bool IsAMDGPU = TargetTriple.isAMDGPU();
519 bool IsHaiku = TargetTriple.isOSHaiku();
520 bool IsWasm = TargetTriple.isWasm();
521 bool IsBPF = TargetTriple.isBPF();
522
523 ShadowMapping Mapping;
524
525 Mapping.Scale = kDefaultShadowScale;
526 if (ClMappingScale.getNumOccurrences() > 0) {
527 Mapping.Scale = ClMappingScale;
528 }
529
530 if (LongSize == 32) {
531 if (IsAndroid)
532 Mapping.Offset = kDynamicShadowSentinel;
533 else if (IsMIPSN32ABI)
534 Mapping.Offset = kMIPS_ShadowOffsetN32;
535 else if (IsMIPS32)
536 Mapping.Offset = kMIPS32_ShadowOffset32;
537 else if (IsFreeBSD)
538 Mapping.Offset = kFreeBSD_ShadowOffset32;
539 else if (IsNetBSD)
540 Mapping.Offset = kNetBSD_ShadowOffset32;
541 else if (IsIOS)
542 Mapping.Offset = kDynamicShadowSentinel;
543 else if (IsWindows)
544 Mapping.Offset = kWindowsShadowOffset32;
545 else if (IsWasm)
546 Mapping.Offset = kWebAssemblyShadowOffset;
547 else
548 Mapping.Offset = kDefaultShadowOffset32;
549 } else { // LongSize == 64
550 // Fuchsia is always PIE, which means that the beginning of the address
551 // space is always available.
552 if (IsFuchsia) {
553 // kDynamicShadowSentinel tells instrumentation to use the dynamic shadow.
554 Mapping.Offset = kDynamicShadowSentinel;
555 } else if (IsPPC64)
556 Mapping.Offset = kPPC64_ShadowOffset64;
557 else if (IsSystemZ)
558 Mapping.Offset = kSystemZ_ShadowOffset64;
559 else if (IsFreeBSD && IsAArch64)
560 Mapping.Offset = kFreeBSDAArch64_ShadowOffset64;
561 else if (IsFreeBSD && !IsMIPS64) {
562 if (IsKasan)
563 Mapping.Offset = kFreeBSDKasan_ShadowOffset64;
564 else
565 Mapping.Offset = kFreeBSD_ShadowOffset64;
566 } else if (IsNetBSD) {
567 if (IsKasan)
568 Mapping.Offset = kNetBSDKasan_ShadowOffset64;
569 else
570 Mapping.Offset = kNetBSD_ShadowOffset64;
571 } else if (IsPS)
572 Mapping.Offset = kPS_ShadowOffset64;
573 else if (IsLinux && IsX86_64) {
574 if (IsKasan)
575 Mapping.Offset = kLinuxKasan_ShadowOffset64;
576 else
577 Mapping.Offset = (kSmallX86_64ShadowOffsetBase &
578 (kSmallX86_64ShadowOffsetAlignMask << Mapping.Scale));
579 } else if (IsWindows && (IsX86_64 || IsAArch64)) {
580 Mapping.Offset = kWindowsShadowOffset64;
581 } else if (IsMIPS64)
582 Mapping.Offset = kMIPS64_ShadowOffset64;
583 else if (IsIOS)
584 Mapping.Offset = kDynamicShadowSentinel;
585 else if (IsMacOS && IsAArch64)
586 Mapping.Offset = kDynamicShadowSentinel;
587 else if (IsAArch64)
588 Mapping.Offset = kAArch64_ShadowOffset64;
589 else if (IsLoongArch64)
590 Mapping.Offset = kLoongArch64_ShadowOffset64;
591 else if (IsRISCV64)
592 Mapping.Offset = kRISCV64_ShadowOffset64;
593 else if (IsAMDGPU)
594 Mapping.Offset = (kSmallX86_64ShadowOffsetBase &
595 (kSmallX86_64ShadowOffsetAlignMask << Mapping.Scale));
596 else if (IsHaiku && IsX86_64)
597 Mapping.Offset = (kSmallX86_64ShadowOffsetBase &
598 (kSmallX86_64ShadowOffsetAlignMask << Mapping.Scale));
599 else if (IsBPF)
600 Mapping.Offset = kDynamicShadowSentinel;
601 else
602 Mapping.Offset = kDefaultShadowOffset64;
603 }
604
606 Mapping.Offset = kDynamicShadowSentinel;
607 }
608
609 if (ClMappingOffset.getNumOccurrences() > 0) {
610 Mapping.Offset = ClMappingOffset;
611 }
612
613 // OR-ing shadow offset if more efficient (at least on x86) if the offset
614 // is a power of two, but on ppc64 and loongarch64 we have to use add since
615 // the shadow offset is not necessarily 1/8-th of the address space. On
616 // SystemZ, we could OR the constant in a single instruction, but it's more
617 // efficient to load it once and use indexed addressing.
618 Mapping.OrShadowOffset = !IsAArch64 && !IsPPC64 && !IsSystemZ && !IsPS &&
619 !IsRISCV64 && !IsLoongArch64 &&
620 !(Mapping.Offset & (Mapping.Offset - 1)) &&
621 Mapping.Offset != kDynamicShadowSentinel;
622 Mapping.InGlobal = ClWithIfunc && IsAndroid && IsArmOrThumb;
623
624 return Mapping;
625}
626
627void llvm::getAddressSanitizerParams(const Triple &TargetTriple, int LongSize,
628 bool IsKasan, uint64_t *ShadowBase,
629 int *MappingScale, bool *OrShadowOffset) {
630 auto Mapping = getShadowMapping(TargetTriple, LongSize, IsKasan);
631 *ShadowBase = Mapping.Offset;
632 *MappingScale = Mapping.Scale;
633 *OrShadowOffset = Mapping.OrShadowOffset;
634}
635
637 // Adding sanitizer checks invalidates previously inferred memory attributes.
638 //
639 // This is not only true for sanitized functions, because AttrInfer can
640 // infer those attributes on libc functions, which is not true if those
641 // are instrumented (Android) or intercepted.
642 //
643 // We might want to model ASan shadow memory more opaquely to get rid of
644 // this problem altogether, by hiding the shadow memory write in an
645 // intrinsic, essentially like in the AArch64StackTagging pass. But that's
646 // for another day.
647
648 bool Changed = false;
649 // We add memory(readwrite) to functions that don't already have that set and
650 // can access any non-inaccessible memory. Sanitizer instrumentation can
651 // read/write shadow memory, which is IRMemLocation::Other. Sanitizer
652 // instrumentation can instrument any memory accesses to non-inaccessible
653 // memory.
654 if (!F.getMemoryEffects()
655 .getWithoutLoc(IRMemLocation::InaccessibleMem)
656 .doesNotAccessMemory() &&
657 !isModAndRefSet(F.getMemoryEffects().getModRef(IRMemLocation::Other))) {
658 F.setMemoryEffects(F.getMemoryEffects() |
660 Changed = true;
661 }
662 // HWASan reads from argument memory even for previously write-only accesses.
663 if (ReadsArgMem) {
664 if (F.getMemoryEffects().getModRef(IRMemLocation::ArgMem) ==
666 F.setMemoryEffects(F.getMemoryEffects() |
668 Changed = true;
669 }
670 for (Argument &A : F.args()) {
671 if (A.hasAttribute(Attribute::WriteOnly)) {
672 A.removeAttr(Attribute::WriteOnly);
673 Changed = true;
674 }
675 }
676 }
677 if (Changed) {
678 // nobuiltin makes sure later passes don't restore assumptions about
679 // the function.
680 F.addFnAttr(Attribute::NoBuiltin);
681 }
682}
683
689
697
698static uint64_t getRedzoneSizeForScale(int MappingScale) {
699 // Redzone used for stack and globals is at least 32 bytes.
700 // For scales 6 and 7, the redzone has to be 64 and 128 bytes respectively.
701 return std::max(32U, 1U << MappingScale);
702}
703
705 if (TargetTriple.isOSEmscripten())
707 else
709}
710
711static Twine genName(StringRef suffix) {
712 return Twine(kAsanGenPrefix) + suffix;
713}
714
715namespace {
716/// Helper RAII class to post-process inserted asan runtime calls during a
717/// pass on a single Function. Upon end of scope, detects and applies the
718/// required funclet OpBundle.
719class RuntimeCallInserter {
720 Function *OwnerFn = nullptr;
721 bool TrackInsertedCalls = false;
722 SmallVector<CallInst *> InsertedCalls;
723
724public:
725 RuntimeCallInserter(Function &Fn) : OwnerFn(&Fn) {
726 if (Fn.hasPersonalityFn()) {
727 auto Personality = classifyEHPersonality(Fn.getPersonalityFn());
728 if (isScopedEHPersonality(Personality))
729 TrackInsertedCalls = true;
730 }
731 }
732
733 ~RuntimeCallInserter() {
734 if (InsertedCalls.empty())
735 return;
736 assert(TrackInsertedCalls && "Calls were wrongly tracked");
737
738 DenseMap<BasicBlock *, ColorVector> BlockColors = colorEHFunclets(*OwnerFn);
739 for (CallInst *CI : InsertedCalls) {
740 BasicBlock *BB = CI->getParent();
741 assert(BB && "Instruction doesn't belong to a BasicBlock");
742 assert(BB->getParent() == OwnerFn &&
743 "Instruction doesn't belong to the expected Function!");
744
745 ColorVector &Colors = BlockColors[BB];
746 // funclet opbundles are only valid in monochromatic BBs.
747 // Note that unreachable BBs are seen as colorless by colorEHFunclets()
748 // and will be DCE'ed later.
749 if (Colors.empty())
750 continue;
751 if (Colors.size() != 1) {
752 OwnerFn->getContext().emitError(
753 "Instruction's BasicBlock is not monochromatic");
754 continue;
755 }
756
757 BasicBlock *Color = Colors.front();
758 BasicBlock::iterator EHPadIt = Color->getFirstNonPHIIt();
759
760 if (EHPadIt != Color->end() && EHPadIt->isEHPad()) {
761 // Replace CI with a clone with an added funclet OperandBundle
762 OperandBundleDef OB("funclet", &*EHPadIt);
764 OB, CI->getIterator());
765 NewCall->copyMetadata(*CI);
766 CI->replaceAllUsesWith(NewCall);
767 CI->eraseFromParent();
768 }
769 }
770 }
771
772 CallInst *createRuntimeCall(IRBuilder<> &IRB, FunctionCallee Callee,
773 ArrayRef<Value *> Args = {},
774 const Twine &Name = "") {
775 assert(IRB.GetInsertBlock()->getParent() == OwnerFn);
776
777 CallInst *Inst = IRB.CreateCall(Callee, Args, Name, nullptr);
778 if (TrackInsertedCalls)
779 InsertedCalls.push_back(Inst);
780 return Inst;
781 }
782};
783
784/// AddressSanitizer: instrument the code in module to find memory bugs.
785struct AddressSanitizer {
786 AddressSanitizer(Module &M, const StackSafetyGlobalInfo *SSGI,
787 int InstrumentationWithCallsThreshold,
788 uint32_t MaxInlinePoisoningSize, bool CompileKernel = false,
789 bool Recover = false, bool UseAfterScope = false,
790 AsanDetectStackUseAfterReturnMode UseAfterReturn =
791 AsanDetectStackUseAfterReturnMode::Runtime)
792 : M(M),
793 CompileKernel(ClEnableKasan.getNumOccurrences() > 0 ? ClEnableKasan
794 : CompileKernel),
795 Recover(ClRecover.getNumOccurrences() > 0 ? ClRecover : Recover),
796 UseAfterScope(UseAfterScope || ClUseAfterScope),
797 UseAfterReturn(ClUseAfterReturn.getNumOccurrences() ? ClUseAfterReturn
798 : UseAfterReturn),
799 SSGI(SSGI),
800 InstrumentationWithCallsThreshold(
801 ClInstrumentationWithCallsThreshold.getNumOccurrences() > 0
803 : InstrumentationWithCallsThreshold),
804 MaxInlinePoisoningSize(ClMaxInlinePoisoningSize.getNumOccurrences() > 0
806 : MaxInlinePoisoningSize) {
807 C = &(M.getContext());
808 DL = &M.getDataLayout();
809 LongSize = M.getDataLayout().getPointerSizeInBits();
810 IntptrTy = Type::getIntNTy(*C, LongSize);
811 PtrTy = PointerType::getUnqual(*C);
812 Int32Ty = Type::getInt32Ty(*C);
813 TargetTriple = M.getTargetTriple();
814
815 Mapping = getShadowMapping(TargetTriple, LongSize, this->CompileKernel);
816
817 assert(this->UseAfterReturn != AsanDetectStackUseAfterReturnMode::Invalid);
818 }
819
820 TypeSize getAllocaSizeInBytes(const AllocaInst &AI) const {
821 return *AI.getAllocationSize(AI.getDataLayout());
822 }
823
824 /// Check if we want (and can) handle this alloca.
825 bool isInterestingAlloca(const AllocaInst &AI);
826
827 bool ignoreAccess(Instruction *Inst, Value *Ptr);
829 Instruction *I, SmallVectorImpl<InterestingMemoryOperand> &Interesting,
830 const TargetTransformInfo *TTI);
831
832 void instrumentMop(ObjectSizeOffsetVisitor &ObjSizeVis,
833 InterestingMemoryOperand &O, bool UseCalls,
834 const DataLayout &DL, RuntimeCallInserter &RTCI);
835 void instrumentPointerComparisonOrSubtraction(Instruction *I,
836 RuntimeCallInserter &RTCI);
837 void instrumentAddress(Instruction *OrigIns, Instruction *InsertBefore,
838 Value *Addr, MaybeAlign Alignment,
839 uint32_t TypeStoreSize, bool IsWrite,
840 Value *SizeArgument, bool UseCalls, uint32_t Exp,
841 RuntimeCallInserter &RTCI);
842 Instruction *instrumentAMDGPUAddress(Instruction *OrigIns,
843 Instruction *InsertBefore, Value *Addr,
844 uint32_t TypeStoreSize, bool IsWrite,
845 Value *SizeArgument);
846 Instruction *genAMDGPUReportBlock(IRBuilder<> &IRB, Value *Cond,
847 bool Recover);
848 void instrumentUnusualSizeOrAlignment(Instruction *I,
849 Instruction *InsertBefore, Value *Addr,
850 TypeSize TypeStoreSize, bool IsWrite,
851 Value *SizeArgument, bool UseCalls,
852 uint32_t Exp,
853 RuntimeCallInserter &RTCI);
854 void instrumentMaskedLoadOrStore(AddressSanitizer *Pass, const DataLayout &DL,
855 Type *IntptrTy, Value *Mask, Value *EVL,
856 Value *Stride, Instruction *I, Value *Addr,
857 MaybeAlign Alignment, unsigned Granularity,
858 Type *OpType, bool IsWrite,
859 Value *SizeArgument, bool UseCalls,
860 uint32_t Exp, RuntimeCallInserter &RTCI);
861 Value *createSlowPathCmp(IRBuilder<> &IRB, Value *AddrLong,
862 Value *ShadowValue, uint32_t TypeStoreSize);
863 Instruction *generateCrashCode(Instruction *InsertBefore, Value *Addr,
864 bool IsWrite, size_t AccessSizeIndex,
865 Value *SizeArgument, uint32_t Exp,
866 RuntimeCallInserter &RTCI);
867 void instrumentMemIntrinsic(MemIntrinsic *MI, RuntimeCallInserter &RTCI);
868 Value *memToShadow(Value *Shadow, IRBuilder<> &IRB);
869 bool suppressInstrumentationSiteForDebug(int &Instrumented);
870 bool instrumentFunction(Function &F, const TargetLibraryInfo *TLI,
871 const TargetTransformInfo *TTI);
872 bool maybeInsertAsanInitAtFunctionEntry(Function &F);
873 bool maybeInsertDynamicShadowAtFunctionEntry(Function &F);
874 void markEscapedLocalAllocas(Function &F);
875 void markCatchParametersAsUninteresting(Function &F);
876
877private:
878 friend struct FunctionStackPoisoner;
879
880 void initializeCallbacks(const TargetLibraryInfo *TLI);
881
882 bool LooksLikeCodeInBug11395(Instruction *I);
883 bool GlobalIsLinkerInitialized(GlobalVariable *G);
884 bool isSafeAccess(ObjectSizeOffsetVisitor &ObjSizeVis, Value *Addr,
885 TypeSize TypeStoreSize) const;
886
887 /// Helper to cleanup per-function state.
888 struct FunctionStateRAII {
889 AddressSanitizer *Pass;
890
891 FunctionStateRAII(AddressSanitizer *Pass) : Pass(Pass) {
892 assert(Pass->ProcessedAllocas.empty() &&
893 "last pass forgot to clear cache");
894 assert(!Pass->LocalDynamicShadow);
895 }
896
897 ~FunctionStateRAII() {
898 Pass->LocalDynamicShadow = nullptr;
899 Pass->ProcessedAllocas.clear();
900 }
901 };
902
903 Module &M;
904 LLVMContext *C;
905 const DataLayout *DL;
906 Triple TargetTriple;
907 int LongSize;
908 bool CompileKernel;
909 bool Recover;
910 bool UseAfterScope;
912 Type *IntptrTy;
913 Type *Int32Ty;
914 PointerType *PtrTy;
915 ShadowMapping Mapping;
916 FunctionCallee AsanHandleNoReturnFunc;
917 FunctionCallee AsanPtrCmpFunction, AsanPtrSubFunction;
918 Constant *AsanShadowGlobal;
919
920 // These arrays is indexed by AccessIsWrite, Experiment and log2(AccessSize).
921 FunctionCallee AsanErrorCallback[2][2][kNumberOfAccessSizes];
922 FunctionCallee AsanMemoryAccessCallback[2][2][kNumberOfAccessSizes];
923
924 // These arrays is indexed by AccessIsWrite and Experiment.
925 FunctionCallee AsanErrorCallbackSized[2][2];
926 FunctionCallee AsanMemoryAccessCallbackSized[2][2];
927
928 FunctionCallee AsanMemmove, AsanMemcpy, AsanMemset;
929 Value *LocalDynamicShadow = nullptr;
930 const StackSafetyGlobalInfo *SSGI;
931 DenseMap<const AllocaInst *, bool> ProcessedAllocas;
932
933 FunctionCallee AMDGPUAddressShared;
934 FunctionCallee AMDGPUAddressPrivate;
935 int InstrumentationWithCallsThreshold;
936 uint32_t MaxInlinePoisoningSize;
937};
938
939class ModuleAddressSanitizer {
940public:
941 ModuleAddressSanitizer(Module &M, bool InsertVersionCheck,
942 bool CompileKernel = false, bool Recover = false,
943 bool UseGlobalsGC = true, bool UseOdrIndicator = true,
944 AsanDtorKind DestructorKind = AsanDtorKind::Global,
945 AsanCtorKind ConstructorKind = AsanCtorKind::Global)
946 : M(M),
947 CompileKernel(ClEnableKasan.getNumOccurrences() > 0 ? ClEnableKasan
948 : CompileKernel),
949 InsertVersionCheck(ClInsertVersionCheck.getNumOccurrences() > 0
951 : InsertVersionCheck),
952 Recover(ClRecover.getNumOccurrences() > 0 ? ClRecover : Recover),
953 UseGlobalsGC(UseGlobalsGC && ClUseGlobalsGC && !this->CompileKernel),
954 // Enable aliases as they should have no downside with ODR indicators.
955 UsePrivateAlias(ClUsePrivateAlias.getNumOccurrences() > 0
957 : UseOdrIndicator),
958 UseOdrIndicator(ClUseOdrIndicator.getNumOccurrences() > 0
960 : UseOdrIndicator),
961 // Not a typo: ClWithComdat is almost completely pointless without
962 // ClUseGlobalsGC (because then it only works on modules without
963 // globals, which are rare); it is a prerequisite for ClUseGlobalsGC;
964 // and both suffer from gold PR19002 for which UseGlobalsGC constructor
965 // argument is designed as workaround. Therefore, disable both
966 // ClWithComdat and ClUseGlobalsGC unless the frontend says it's ok to
967 // do globals-gc.
968 UseCtorComdat(UseGlobalsGC && ClWithComdat && !this->CompileKernel),
969 DestructorKind(DestructorKind),
970 ConstructorKind(ClConstructorKind.getNumOccurrences() > 0
972 : ConstructorKind) {
973 C = &(M.getContext());
974 int LongSize = M.getDataLayout().getPointerSizeInBits();
975 IntptrTy = Type::getIntNTy(*C, LongSize);
976 PtrTy = PointerType::getUnqual(*C);
977 TargetTriple = M.getTargetTriple();
978 Mapping = getShadowMapping(TargetTriple, LongSize, this->CompileKernel);
979
980 if (ClOverrideDestructorKind != AsanDtorKind::Invalid)
981 this->DestructorKind = ClOverrideDestructorKind;
982 assert(this->DestructorKind != AsanDtorKind::Invalid);
983 }
984
985 bool instrumentModule();
986
987private:
988 void initializeCallbacks();
989
990 void instrumentGlobals(IRBuilder<> &IRB, bool *CtorComdat);
991 void InstrumentGlobalsCOFF(IRBuilder<> &IRB,
992 ArrayRef<GlobalVariable *> ExtendedGlobals,
993 ArrayRef<Constant *> MetadataInitializers);
994 void instrumentGlobalsELF(IRBuilder<> &IRB,
995 ArrayRef<GlobalVariable *> ExtendedGlobals,
996 ArrayRef<Constant *> MetadataInitializers,
997 const std::string &UniqueModuleId);
998 void InstrumentGlobalsMachO(IRBuilder<> &IRB,
999 ArrayRef<GlobalVariable *> ExtendedGlobals,
1000 ArrayRef<Constant *> MetadataInitializers);
1001 void
1002 InstrumentGlobalsWithMetadataArray(IRBuilder<> &IRB,
1003 ArrayRef<GlobalVariable *> ExtendedGlobals,
1004 ArrayRef<Constant *> MetadataInitializers);
1005
1006 GlobalVariable *CreateMetadataGlobal(Constant *Initializer,
1007 StringRef OriginalName);
1008 void SetComdatForGlobalMetadata(GlobalVariable *G, GlobalVariable *Metadata,
1009 StringRef InternalSuffix);
1010 Instruction *CreateAsanModuleDtor();
1011
1012 const GlobalVariable *getExcludedAliasedGlobal(const GlobalAlias &GA) const;
1013 bool shouldInstrumentGlobal(GlobalVariable *G) const;
1014 bool ShouldUseMachOGlobalsSection() const;
1015 StringRef getGlobalMetadataSection() const;
1016 void poisonOneInitializer(Function &GlobalInit);
1017 void createInitializerPoisonCalls();
1018 uint64_t getMinRedzoneSizeForGlobal() const {
1019 return getRedzoneSizeForScale(Mapping.Scale);
1020 }
1021 uint64_t getRedzoneSizeForGlobal(uint64_t SizeInBytes) const;
1022 int GetAsanVersion() const;
1023 GlobalVariable *getOrCreateModuleName();
1024
1025 Module &M;
1026 bool CompileKernel;
1027 bool InsertVersionCheck;
1028 bool Recover;
1029 bool UseGlobalsGC;
1030 bool UsePrivateAlias;
1031 bool UseOdrIndicator;
1032 bool UseCtorComdat;
1033 AsanDtorKind DestructorKind;
1034 AsanCtorKind ConstructorKind;
1035 Type *IntptrTy;
1036 PointerType *PtrTy;
1037 LLVMContext *C;
1038 Triple TargetTriple;
1039 ShadowMapping Mapping;
1040 FunctionCallee AsanPoisonGlobals;
1041 FunctionCallee AsanUnpoisonGlobals;
1042 FunctionCallee AsanRegisterGlobals;
1043 FunctionCallee AsanUnregisterGlobals;
1044 FunctionCallee AsanRegisterImageGlobals;
1045 FunctionCallee AsanUnregisterImageGlobals;
1046 FunctionCallee AsanRegisterElfGlobals;
1047 FunctionCallee AsanUnregisterElfGlobals;
1048
1049 Function *AsanCtorFunction = nullptr;
1050 Function *AsanDtorFunction = nullptr;
1051 GlobalVariable *ModuleName = nullptr;
1052};
1053
1054// Stack poisoning does not play well with exception handling.
1055// When an exception is thrown, we essentially bypass the code
1056// that unpoisones the stack. This is why the run-time library has
1057// to intercept __cxa_throw (as well as longjmp, etc) and unpoison the entire
1058// stack in the interceptor. This however does not work inside the
1059// actual function which catches the exception. Most likely because the
1060// compiler hoists the load of the shadow value somewhere too high.
1061// This causes asan to report a non-existing bug on 453.povray.
1062// It sounds like an LLVM bug.
1063struct FunctionStackPoisoner : public InstVisitor<FunctionStackPoisoner> {
1064 Function &F;
1065 AddressSanitizer &ASan;
1066 RuntimeCallInserter &RTCI;
1067 DIBuilder DIB;
1068 LLVMContext *C;
1069 Type *IntptrTy;
1070 Type *IntptrPtrTy;
1071 ShadowMapping Mapping;
1072
1074 SmallVector<AllocaInst *, 16> StaticAllocasToMoveUp;
1075 SmallVector<Instruction *, 8> RetVec;
1076
1077 FunctionCallee AsanStackMallocFunc[kMaxAsanStackMallocSizeClass + 1],
1078 AsanStackFreeFunc[kMaxAsanStackMallocSizeClass + 1];
1079 FunctionCallee AsanSetShadowFunc[0x100] = {};
1080 FunctionCallee AsanPoisonStackMemoryFunc, AsanUnpoisonStackMemoryFunc;
1081 FunctionCallee AsanAllocaPoisonFunc, AsanAllocasUnpoisonFunc;
1082
1083 // Stores a place and arguments of poisoning/unpoisoning call for alloca.
1084 struct AllocaPoisonCall {
1085 IntrinsicInst *InsBefore;
1086 AllocaInst *AI;
1087 uint64_t Size;
1088 bool DoPoison;
1089 };
1090 SmallVector<AllocaPoisonCall, 8> DynamicAllocaPoisonCallVec;
1091 SmallVector<AllocaPoisonCall, 8> StaticAllocaPoisonCallVec;
1092
1093 SmallVector<AllocaInst *, 1> DynamicAllocaVec;
1094 SmallVector<IntrinsicInst *, 1> StackRestoreVec;
1095 AllocaInst *DynamicAllocaLayout = nullptr;
1096 IntrinsicInst *LocalEscapeCall = nullptr;
1097
1098 bool HasInlineAsm = false;
1099 bool HasReturnsTwiceCall = false;
1100 bool PoisonStack;
1101
1102 FunctionStackPoisoner(Function &F, AddressSanitizer &ASan,
1103 RuntimeCallInserter &RTCI)
1104 : F(F), ASan(ASan), RTCI(RTCI),
1105 DIB(*F.getParent(), /*AllowUnresolved*/ false), C(ASan.C),
1106 IntptrTy(ASan.IntptrTy),
1107 IntptrPtrTy(PointerType::get(IntptrTy->getContext(), 0)),
1108 Mapping(ASan.Mapping),
1109 PoisonStack(ClStack && !F.getParent()->getTargetTriple().isAMDGPU()) {}
1110
1111 bool runOnFunction() {
1112 if (!PoisonStack)
1113 return false;
1114
1116 copyArgsPassedByValToAllocas();
1117
1118 // Collect alloca, ret, lifetime instructions etc.
1119 for (BasicBlock *BB : depth_first(&F.getEntryBlock())) visit(*BB);
1120
1121 if (AllocaVec.empty() && DynamicAllocaVec.empty()) return false;
1122
1123 initializeCallbacks(*F.getParent());
1124
1125 processDynamicAllocas();
1126 processStaticAllocas();
1127
1128 if (ClDebugStack) {
1129 LLVM_DEBUG(dbgs() << F);
1130 }
1131 return true;
1132 }
1133
1134 // Arguments marked with the "byval" attribute are implicitly copied without
1135 // using an alloca instruction. To produce redzones for those arguments, we
1136 // copy them a second time into memory allocated with an alloca instruction.
1137 void copyArgsPassedByValToAllocas();
1138
1139 // Finds all Alloca instructions and puts
1140 // poisoned red zones around all of them.
1141 // Then unpoison everything back before the function returns.
1142 void processStaticAllocas();
1143 void processDynamicAllocas();
1144
1145 void createDynamicAllocasInitStorage();
1146
1147 // ----------------------- Visitors.
1148 /// Collect all Ret instructions, or the musttail call instruction if it
1149 /// precedes the return instruction.
1150 void visitReturnInst(ReturnInst &RI) {
1151 if (CallInst *CI = RI.getParent()->getTerminatingMustTailCall())
1152 RetVec.push_back(CI);
1153 else
1154 RetVec.push_back(&RI);
1155 }
1156
1157 /// Collect all Resume instructions.
1158 void visitResumeInst(ResumeInst &RI) { RetVec.push_back(&RI); }
1159
1160 /// Collect all CatchReturnInst instructions.
1161 void visitCleanupReturnInst(CleanupReturnInst &CRI) { RetVec.push_back(&CRI); }
1162
1163 void unpoisonDynamicAllocasBeforeInst(Instruction *InstBefore,
1164 Value *SavedStack) {
1165 IRBuilder<> IRB(InstBefore);
1166 Value *DynamicAreaPtr = IRB.CreatePtrToInt(SavedStack, IntptrTy);
1167 // When we insert _asan_allocas_unpoison before @llvm.stackrestore, we
1168 // need to adjust extracted SP to compute the address of the most recent
1169 // alloca. We have a special @llvm.get.dynamic.area.offset intrinsic for
1170 // this purpose.
1171 if (!isa<ReturnInst>(InstBefore)) {
1172 Value *DynamicAreaOffset = IRB.CreateIntrinsic(
1173 Intrinsic::get_dynamic_area_offset, {IntptrTy}, {});
1174
1175 DynamicAreaPtr = IRB.CreateAdd(IRB.CreatePtrToInt(SavedStack, IntptrTy),
1176 DynamicAreaOffset);
1177 }
1178
1179 RTCI.createRuntimeCall(
1180 IRB, AsanAllocasUnpoisonFunc,
1181 {IRB.CreateLoad(IntptrTy, DynamicAllocaLayout), DynamicAreaPtr});
1182 }
1183
1184 // Unpoison dynamic allocas redzones.
1185 void unpoisonDynamicAllocas() {
1186 for (Instruction *Ret : RetVec)
1187 unpoisonDynamicAllocasBeforeInst(Ret, DynamicAllocaLayout);
1188
1189 for (Instruction *StackRestoreInst : StackRestoreVec)
1190 unpoisonDynamicAllocasBeforeInst(StackRestoreInst,
1191 StackRestoreInst->getOperand(0));
1192 }
1193
1194 // Deploy and poison redzones around dynamic alloca call. To do this, we
1195 // should replace this call with another one with changed parameters and
1196 // replace all its uses with new address, so
1197 // addr = alloca type, old_size, align
1198 // is replaced by
1199 // new_size = (old_size + additional_size) * sizeof(type)
1200 // tmp = alloca i8, new_size, max(align, 32)
1201 // addr = tmp + 32 (first 32 bytes are for the left redzone).
1202 // Additional_size is added to make new memory allocation contain not only
1203 // requested memory, but also left, partial and right redzones.
1204 void handleDynamicAllocaCall(AllocaInst *AI);
1205
1206 /// Collect Alloca instructions we want (and can) handle.
1207 void visitAllocaInst(AllocaInst &AI) {
1208 // FIXME: Handle scalable vectors instead of ignoring them.
1209 const Type *AllocaType = AI.getAllocatedType();
1210 const auto *STy = dyn_cast<StructType>(AllocaType);
1211 if (!ASan.isInterestingAlloca(AI) || isa<ScalableVectorType>(AllocaType) ||
1212 (STy && STy->containsHomogeneousScalableVectorTypes())) {
1213 if (AI.isStaticAlloca()) {
1214 // Skip over allocas that are present *before* the first instrumented
1215 // alloca, we don't want to move those around.
1216 if (AllocaVec.empty())
1217 return;
1218
1219 StaticAllocasToMoveUp.push_back(&AI);
1220 }
1221 return;
1222 }
1223
1224 if (!AI.isStaticAlloca())
1225 DynamicAllocaVec.push_back(&AI);
1226 else
1227 AllocaVec.push_back(&AI);
1228 }
1229
1230 /// Collect lifetime intrinsic calls to check for use-after-scope
1231 /// errors.
1232 void visitIntrinsicInst(IntrinsicInst &II) {
1233 Intrinsic::ID ID = II.getIntrinsicID();
1234 if (ID == Intrinsic::stackrestore) StackRestoreVec.push_back(&II);
1235 if (ID == Intrinsic::localescape) LocalEscapeCall = &II;
1236 if (!ASan.UseAfterScope)
1237 return;
1238 if (!II.isLifetimeStartOrEnd())
1239 return;
1240 // Find alloca instruction that corresponds to llvm.lifetime argument.
1241 AllocaInst *AI = dyn_cast<AllocaInst>(II.getArgOperand(0));
1242 // We're interested only in allocas we can handle.
1243 if (!AI || !ASan.isInterestingAlloca(*AI))
1244 return;
1245
1246 std::optional<TypeSize> Size = AI->getAllocationSize(AI->getDataLayout());
1247 // Check that size is known and can be stored in IntptrTy.
1248 // TODO: Add support for scalable vectors if possible.
1249 if (!Size || Size->isScalable() ||
1251 return;
1252
1253 bool DoPoison = (ID == Intrinsic::lifetime_end);
1254 AllocaPoisonCall APC = {&II, AI, *Size, DoPoison};
1255 if (AI->isStaticAlloca())
1256 StaticAllocaPoisonCallVec.push_back(APC);
1258 DynamicAllocaPoisonCallVec.push_back(APC);
1259 }
1260
1261 void visitCallBase(CallBase &CB) {
1262 if (CallInst *CI = dyn_cast<CallInst>(&CB)) {
1263 HasInlineAsm |= CI->isInlineAsm() && &CB != ASan.LocalDynamicShadow;
1264 HasReturnsTwiceCall |= CI->canReturnTwice();
1265 }
1266 }
1267
1268 // ---------------------- Helpers.
1269 void initializeCallbacks(Module &M);
1270
1271 // Copies bytes from ShadowBytes into shadow memory for indexes where
1272 // ShadowMask is not zero. If ShadowMask[i] is zero, we assume that
1273 // ShadowBytes[i] is constantly zero and doesn't need to be overwritten.
1274 void copyToShadow(ArrayRef<uint8_t> ShadowMask, ArrayRef<uint8_t> ShadowBytes,
1275 IRBuilder<> &IRB, Value *ShadowBase);
1276 void copyToShadow(ArrayRef<uint8_t> ShadowMask, ArrayRef<uint8_t> ShadowBytes,
1277 size_t Begin, size_t End, IRBuilder<> &IRB,
1278 Value *ShadowBase);
1279 void copyToShadowInline(ArrayRef<uint8_t> ShadowMask,
1280 ArrayRef<uint8_t> ShadowBytes, size_t Begin,
1281 size_t End, IRBuilder<> &IRB, Value *ShadowBase);
1282
1283 void poisonAlloca(Value *V, uint64_t Size, IRBuilder<> &IRB, bool DoPoison);
1284
1285 Value *createAllocaForLayout(IRBuilder<> &IRB, const ASanStackFrameLayout &L,
1286 bool Dynamic);
1287 PHINode *createPHI(IRBuilder<> &IRB, Value *Cond, Value *ValueIfTrue,
1288 Instruction *ThenTerm, Value *ValueIfFalse);
1289};
1290
1291} // end anonymous namespace
1292
1294 raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) {
1296 OS, MapClassName2PassName);
1297 OS << '<';
1298 if (Options.CompileKernel)
1299 OS << "kernel;";
1300 if (Options.UseAfterScope)
1301 OS << "use-after-scope";
1302 OS << '>';
1303}
1304
1306 const AddressSanitizerOptions &Options, bool UseGlobalGC,
1307 bool UseOdrIndicator, AsanDtorKind DestructorKind,
1308 AsanCtorKind ConstructorKind)
1309 : Options(Options), UseGlobalGC(UseGlobalGC),
1310 UseOdrIndicator(UseOdrIndicator), DestructorKind(DestructorKind),
1311 ConstructorKind(ConstructorKind) {}
1312
1315 // Return early if nosanitize_address module flag is present for the module.
1316 // This implies that asan pass has already run before.
1317 if (checkIfAlreadyInstrumented(M, "nosanitize_address"))
1318 return PreservedAnalyses::all();
1319
1320 ModuleAddressSanitizer ModuleSanitizer(
1321 M, Options.InsertVersionCheck, Options.CompileKernel, Options.Recover,
1322 UseGlobalGC, UseOdrIndicator, DestructorKind, ConstructorKind);
1323 bool Modified = false;
1324 auto &FAM = MAM.getResult<FunctionAnalysisManagerModuleProxy>(M).getManager();
1325 const StackSafetyGlobalInfo *const SSGI =
1326 ClUseStackSafety ? &MAM.getResult<StackSafetyGlobalAnalysis>(M) : nullptr;
1327 for (Function &F : M) {
1328 if (F.empty())
1329 continue;
1330 if (F.getLinkage() == GlobalValue::AvailableExternallyLinkage)
1331 continue;
1332 if (!ClDebugFunc.empty() && ClDebugFunc == F.getName())
1333 continue;
1334 if (F.getName().starts_with("__asan_"))
1335 continue;
1336 if (F.isPresplitCoroutine())
1337 continue;
1338 AddressSanitizer FunctionSanitizer(
1339 M, SSGI, Options.InstrumentationWithCallsThreshold,
1340 Options.MaxInlinePoisoningSize, Options.CompileKernel, Options.Recover,
1341 Options.UseAfterScope, Options.UseAfterReturn);
1342 const TargetLibraryInfo &TLI = FAM.getResult<TargetLibraryAnalysis>(F);
1343 const TargetTransformInfo &TTI = FAM.getResult<TargetIRAnalysis>(F);
1344 Modified |= FunctionSanitizer.instrumentFunction(F, &TLI, &TTI);
1345 }
1346 Modified |= ModuleSanitizer.instrumentModule();
1347 if (!Modified)
1348 return PreservedAnalyses::all();
1349
1351 // GlobalsAA is considered stateless and does not get invalidated unless
1352 // explicitly invalidated; PreservedAnalyses::none() is not enough. Sanitizers
1353 // make changes that require GlobalsAA to be invalidated.
1354 PA.abandon<GlobalsAA>();
1355 return PA;
1356}
1357
1359 size_t Res = llvm::countr_zero(TypeSize / 8);
1361 return Res;
1362}
1363
1364/// Check if \p G has been created by a trusted compiler pass.
1366 // Do not instrument @llvm.global_ctors, @llvm.used, etc.
1367 if (G->getName().starts_with("llvm.") ||
1368 // Do not instrument gcov counter arrays.
1369 G->getName().starts_with("__llvm_gcov_ctr") ||
1370 // Do not instrument rtti proxy symbols for function sanitizer.
1371 G->getName().starts_with("__llvm_rtti_proxy"))
1372 return true;
1373
1374 // Do not instrument asan globals.
1375 if (G->getName().starts_with(kAsanGenPrefix) ||
1376 G->getName().starts_with(kSanCovGenPrefix) ||
1377 G->getName().starts_with(kODRGenPrefix))
1378 return true;
1379
1380 return false;
1381}
1382
1384 Type *PtrTy = cast<PointerType>(Addr->getType()->getScalarType());
1385 unsigned int AddrSpace = PtrTy->getPointerAddressSpace();
1386 // Globals in address space 1 and 4 are supported for AMDGPU.
1387 if (AddrSpace == 3 || AddrSpace == 5)
1388 return true;
1389 return false;
1390}
1391
1392static bool isSupportedAddrspace(const Triple &TargetTriple, Value *Addr) {
1393 Type *PtrTy = cast<PointerType>(Addr->getType()->getScalarType());
1394 unsigned int AddrSpace = PtrTy->getPointerAddressSpace();
1395
1396 if (!SrcAddrSpaces.empty())
1397 return SrcAddrSpaces.count(AddrSpace);
1398
1399 if (TargetTriple.isAMDGPU())
1400 return !isUnsupportedAMDGPUAddrspace(Addr);
1401
1402 return AddrSpace == 0;
1403}
1404
1405Value *AddressSanitizer::memToShadow(Value *Shadow, IRBuilder<> &IRB) {
1406 // Shadow >> scale
1407 Shadow = IRB.CreateLShr(Shadow, Mapping.Scale);
1408 if (Mapping.Offset == 0) return Shadow;
1409 // (Shadow >> scale) | offset
1410 Value *ShadowBase;
1411 if (LocalDynamicShadow)
1412 ShadowBase = LocalDynamicShadow;
1413 else
1414 ShadowBase = ConstantInt::get(IntptrTy, Mapping.Offset);
1415 if (Mapping.OrShadowOffset)
1416 return IRB.CreateOr(Shadow, ShadowBase);
1417 else
1418 return IRB.CreateAdd(Shadow, ShadowBase);
1419}
1420
1421// Instrument memset/memmove/memcpy
1422void AddressSanitizer::instrumentMemIntrinsic(MemIntrinsic *MI,
1423 RuntimeCallInserter &RTCI) {
1425 if (isa<MemTransferInst>(MI)) {
1426 RTCI.createRuntimeCall(
1427 IRB, isa<MemMoveInst>(MI) ? AsanMemmove : AsanMemcpy,
1428 {IRB.CreateAddrSpaceCast(MI->getOperand(0), PtrTy),
1429 IRB.CreateAddrSpaceCast(MI->getOperand(1), PtrTy),
1430 IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false)});
1431 } else if (isa<MemSetInst>(MI)) {
1432 RTCI.createRuntimeCall(
1433 IRB, AsanMemset,
1434 {IRB.CreateAddrSpaceCast(MI->getOperand(0), PtrTy),
1435 IRB.CreateIntCast(MI->getOperand(1), IRB.getInt32Ty(), false),
1436 IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false)});
1437 }
1438 MI->eraseFromParent();
1439}
1440
1441/// Check if we want (and can) handle this alloca.
1442bool AddressSanitizer::isInterestingAlloca(const AllocaInst &AI) {
1443 auto [It, Inserted] = ProcessedAllocas.try_emplace(&AI);
1444
1445 if (!Inserted)
1446 return It->getSecond();
1447
1448 bool IsInteresting =
1449 (AI.getAllocatedType()->isSized() &&
1450 // alloca() may be called with 0 size, ignore it.
1451 ((!AI.isStaticAlloca()) || !getAllocaSizeInBytes(AI).isZero()) &&
1452 // We are only interested in allocas not promotable to registers.
1453 // Promotable allocas are common under -O0.
1455 // inalloca allocas are not treated as static, and we don't want
1456 // dynamic alloca instrumentation for them as well.
1457 !AI.isUsedWithInAlloca() &&
1458 // swifterror allocas are register promoted by ISel
1459 !AI.isSwiftError() &&
1460 // safe allocas are not interesting
1461 !(SSGI && SSGI->isSafe(AI)));
1462
1463 It->second = IsInteresting;
1464 return IsInteresting;
1465}
1466
1467bool AddressSanitizer::ignoreAccess(Instruction *Inst, Value *Ptr) {
1468 // Check whether the target supports sanitizing the address space
1469 // of the pointer.
1470 if (!isSupportedAddrspace(TargetTriple, Ptr))
1471 return true;
1472
1473 // Ignore swifterror addresses.
1474 // swifterror memory addresses are mem2reg promoted by instruction
1475 // selection. As such they cannot have regular uses like an instrumentation
1476 // function and it makes no sense to track them as memory.
1477 if (Ptr->isSwiftError())
1478 return true;
1479
1480 // Treat memory accesses to promotable allocas as non-interesting since they
1481 // will not cause memory violations. This greatly speeds up the instrumented
1482 // executable at -O0.
1483 if (auto AI = dyn_cast_or_null<AllocaInst>(Ptr))
1484 if (ClSkipPromotableAllocas && !isInterestingAlloca(*AI))
1485 return true;
1486
1487 if (SSGI != nullptr && SSGI->stackAccessIsSafe(*Inst) &&
1488 findAllocaForValue(Ptr))
1489 return true;
1490
1491 return false;
1492}
1493
1494void AddressSanitizer::getInterestingMemoryOperands(
1496 const TargetTransformInfo *TTI) {
1497 // Do not instrument the load fetching the dynamic shadow address.
1498 if (LocalDynamicShadow == I)
1499 return;
1500
1501 if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
1502 if (!ClInstrumentReads || ignoreAccess(I, LI->getPointerOperand()))
1503 return;
1504 Interesting.emplace_back(I, LI->getPointerOperandIndex(), false,
1505 LI->getType(), LI->getAlign());
1506 } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
1507 if (!ClInstrumentWrites || ignoreAccess(I, SI->getPointerOperand()))
1508 return;
1509 Interesting.emplace_back(I, SI->getPointerOperandIndex(), true,
1510 SI->getValueOperand()->getType(), SI->getAlign());
1511 } else if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(I)) {
1512 if (!ClInstrumentAtomics || ignoreAccess(I, RMW->getPointerOperand()))
1513 return;
1514 Interesting.emplace_back(I, RMW->getPointerOperandIndex(), true,
1515 RMW->getValOperand()->getType(), std::nullopt);
1516 } else if (AtomicCmpXchgInst *XCHG = dyn_cast<AtomicCmpXchgInst>(I)) {
1517 if (!ClInstrumentAtomics || ignoreAccess(I, XCHG->getPointerOperand()))
1518 return;
1519 Interesting.emplace_back(I, XCHG->getPointerOperandIndex(), true,
1520 XCHG->getCompareOperand()->getType(),
1521 std::nullopt);
1522 } else if (auto CI = dyn_cast<CallInst>(I)) {
1523 switch (CI->getIntrinsicID()) {
1524 case Intrinsic::masked_load:
1525 case Intrinsic::masked_store:
1526 case Intrinsic::masked_gather:
1527 case Intrinsic::masked_scatter: {
1528 bool IsWrite = CI->getType()->isVoidTy();
1529 // Masked store has an initial operand for the value.
1530 unsigned OpOffset = IsWrite ? 1 : 0;
1531 if (IsWrite ? !ClInstrumentWrites : !ClInstrumentReads)
1532 return;
1533
1534 auto BasePtr = CI->getOperand(OpOffset);
1535 if (ignoreAccess(I, BasePtr))
1536 return;
1537 Type *Ty = IsWrite ? CI->getArgOperand(0)->getType() : CI->getType();
1538 MaybeAlign Alignment = CI->getParamAlign(0);
1539 Value *Mask = CI->getOperand(1 + OpOffset);
1540 Interesting.emplace_back(I, OpOffset, IsWrite, Ty, Alignment, Mask);
1541 break;
1542 }
1543 case Intrinsic::masked_expandload:
1544 case Intrinsic::masked_compressstore: {
1545 bool IsWrite = CI->getIntrinsicID() == Intrinsic::masked_compressstore;
1546 unsigned OpOffset = IsWrite ? 1 : 0;
1547 if (IsWrite ? !ClInstrumentWrites : !ClInstrumentReads)
1548 return;
1549 auto BasePtr = CI->getOperand(OpOffset);
1550 if (ignoreAccess(I, BasePtr))
1551 return;
1552 MaybeAlign Alignment = BasePtr->getPointerAlignment(*DL);
1553 Type *Ty = IsWrite ? CI->getArgOperand(0)->getType() : CI->getType();
1554
1555 IRBuilder IB(I);
1556 Value *Mask = CI->getOperand(1 + OpOffset);
1557 // Use the popcount of Mask as the effective vector length.
1558 Type *ExtTy = VectorType::get(IntptrTy, cast<VectorType>(Ty));
1559 Value *ExtMask = IB.CreateZExt(Mask, ExtTy);
1560 Value *EVL = IB.CreateAddReduce(ExtMask);
1561 Value *TrueMask = ConstantInt::get(Mask->getType(), 1);
1562 Interesting.emplace_back(I, OpOffset, IsWrite, Ty, Alignment, TrueMask,
1563 EVL);
1564 break;
1565 }
1566 case Intrinsic::vp_load:
1567 case Intrinsic::vp_store:
1568 case Intrinsic::experimental_vp_strided_load:
1569 case Intrinsic::experimental_vp_strided_store: {
1570 auto *VPI = cast<VPIntrinsic>(CI);
1571 unsigned IID = CI->getIntrinsicID();
1572 bool IsWrite = CI->getType()->isVoidTy();
1573 if (IsWrite ? !ClInstrumentWrites : !ClInstrumentReads)
1574 return;
1575 unsigned PtrOpNo = *VPI->getMemoryPointerParamPos(IID);
1576 Type *Ty = IsWrite ? CI->getArgOperand(0)->getType() : CI->getType();
1577 MaybeAlign Alignment = VPI->getOperand(PtrOpNo)->getPointerAlignment(*DL);
1578 Value *Stride = nullptr;
1579 if (IID == Intrinsic::experimental_vp_strided_store ||
1580 IID == Intrinsic::experimental_vp_strided_load) {
1581 Stride = VPI->getOperand(PtrOpNo + 1);
1582 // Use the pointer alignment as the element alignment if the stride is a
1583 // multiple of the pointer alignment. Otherwise, the element alignment
1584 // should be Align(1).
1585 unsigned PointerAlign = Alignment.valueOrOne().value();
1586 if (!isa<ConstantInt>(Stride) ||
1587 cast<ConstantInt>(Stride)->getZExtValue() % PointerAlign != 0)
1588 Alignment = Align(1);
1589 }
1590 Interesting.emplace_back(I, PtrOpNo, IsWrite, Ty, Alignment,
1591 VPI->getMaskParam(), VPI->getVectorLengthParam(),
1592 Stride);
1593 break;
1594 }
1595 case Intrinsic::vp_gather:
1596 case Intrinsic::vp_scatter: {
1597 auto *VPI = cast<VPIntrinsic>(CI);
1598 unsigned IID = CI->getIntrinsicID();
1599 bool IsWrite = IID == Intrinsic::vp_scatter;
1600 if (IsWrite ? !ClInstrumentWrites : !ClInstrumentReads)
1601 return;
1602 unsigned PtrOpNo = *VPI->getMemoryPointerParamPos(IID);
1603 Type *Ty = IsWrite ? CI->getArgOperand(0)->getType() : CI->getType();
1604 MaybeAlign Alignment = VPI->getPointerAlignment();
1605 Interesting.emplace_back(I, PtrOpNo, IsWrite, Ty, Alignment,
1606 VPI->getMaskParam(),
1607 VPI->getVectorLengthParam());
1608 break;
1609 }
1610 default:
1611 if (auto *II = dyn_cast<IntrinsicInst>(I)) {
1612 MemIntrinsicInfo IntrInfo;
1613 if (TTI->getTgtMemIntrinsic(II, IntrInfo))
1614 Interesting = IntrInfo.InterestingOperands;
1615 return;
1616 }
1617 for (unsigned ArgNo = 0; ArgNo < CI->arg_size(); ArgNo++) {
1618 if (!ClInstrumentByval || !CI->isByValArgument(ArgNo) ||
1619 ignoreAccess(I, CI->getArgOperand(ArgNo)))
1620 continue;
1621 Type *Ty = CI->getParamByValType(ArgNo);
1622 Interesting.emplace_back(I, ArgNo, false, Ty, Align(1));
1623 }
1624 }
1625 }
1626}
1627
1628static bool isPointerOperand(Value *V) {
1629 return V->getType()->isPointerTy() || isa<PtrToIntInst>(V);
1630}
1631
1632// This is a rough heuristic; it may cause both false positives and
1633// false negatives. The proper implementation requires cooperation with
1634// the frontend.
1636 if (ICmpInst *Cmp = dyn_cast<ICmpInst>(I)) {
1637 if (!Cmp->isRelational())
1638 return false;
1639 } else {
1640 return false;
1641 }
1642 return isPointerOperand(I->getOperand(0)) &&
1643 isPointerOperand(I->getOperand(1));
1644}
1645
1646// This is a rough heuristic; it may cause both false positives and
1647// false negatives. The proper implementation requires cooperation with
1648// the frontend.
1651 if (BO->getOpcode() != Instruction::Sub)
1652 return false;
1653 } else {
1654 return false;
1655 }
1656 return isPointerOperand(I->getOperand(0)) &&
1657 isPointerOperand(I->getOperand(1));
1658}
1659
1660bool AddressSanitizer::GlobalIsLinkerInitialized(GlobalVariable *G) {
1661 // If a global variable does not have dynamic initialization we don't
1662 // have to instrument it. However, if a global does not have initializer
1663 // at all, we assume it has dynamic initializer (in other TU).
1664 if (!G->hasInitializer())
1665 return false;
1666
1667 if (G->hasSanitizerMetadata() && G->getSanitizerMetadata().IsDynInit)
1668 return false;
1669
1670 return true;
1671}
1672
1673void AddressSanitizer::instrumentPointerComparisonOrSubtraction(
1674 Instruction *I, RuntimeCallInserter &RTCI) {
1675 IRBuilder<> IRB(I);
1676 FunctionCallee F = isa<ICmpInst>(I) ? AsanPtrCmpFunction : AsanPtrSubFunction;
1677 Value *Param[2] = {I->getOperand(0), I->getOperand(1)};
1678 for (Value *&i : Param) {
1679 if (i->getType()->isPointerTy())
1680 i = IRB.CreatePointerCast(i, IntptrTy);
1681 }
1682 RTCI.createRuntimeCall(IRB, F, Param);
1683}
1684
1685static void doInstrumentAddress(AddressSanitizer *Pass, Instruction *I,
1686 Instruction *InsertBefore, Value *Addr,
1687 MaybeAlign Alignment, unsigned Granularity,
1688 TypeSize TypeStoreSize, bool IsWrite,
1689 Value *SizeArgument, bool UseCalls,
1690 uint32_t Exp, RuntimeCallInserter &RTCI) {
1691 // Instrument a 1-, 2-, 4-, 8-, or 16- byte access with one check
1692 // if the data is properly aligned.
1693 if (!TypeStoreSize.isScalable()) {
1694 const auto FixedSize = TypeStoreSize.getFixedValue();
1695 switch (FixedSize) {
1696 case 8:
1697 case 16:
1698 case 32:
1699 case 64:
1700 case 128:
1701 if (!Alignment || *Alignment >= Granularity ||
1702 *Alignment >= FixedSize / 8)
1703 return Pass->instrumentAddress(I, InsertBefore, Addr, Alignment,
1704 FixedSize, IsWrite, nullptr, UseCalls,
1705 Exp, RTCI);
1706 }
1707 }
1708 Pass->instrumentUnusualSizeOrAlignment(I, InsertBefore, Addr, TypeStoreSize,
1709 IsWrite, nullptr, UseCalls, Exp, RTCI);
1710}
1711
1712void AddressSanitizer::instrumentMaskedLoadOrStore(
1713 AddressSanitizer *Pass, const DataLayout &DL, Type *IntptrTy, Value *Mask,
1714 Value *EVL, Value *Stride, Instruction *I, Value *Addr,
1715 MaybeAlign Alignment, unsigned Granularity, Type *OpType, bool IsWrite,
1716 Value *SizeArgument, bool UseCalls, uint32_t Exp,
1717 RuntimeCallInserter &RTCI) {
1718 auto *VTy = cast<VectorType>(OpType);
1719 TypeSize ElemTypeSize = DL.getTypeStoreSizeInBits(VTy->getScalarType());
1720 auto Zero = ConstantInt::get(IntptrTy, 0);
1721
1722 IRBuilder IB(I);
1723 Instruction *LoopInsertBefore = I;
1724 if (EVL) {
1725 // The end argument of SplitBlockAndInsertForLane is assumed bigger
1726 // than zero, so we should check whether EVL is zero here.
1727 Type *EVLType = EVL->getType();
1728 Value *IsEVLZero = IB.CreateICmpNE(EVL, ConstantInt::get(EVLType, 0));
1729 LoopInsertBefore = SplitBlockAndInsertIfThen(IsEVLZero, I, false);
1730 IB.SetInsertPoint(LoopInsertBefore);
1731 // Cast EVL to IntptrTy.
1732 EVL = IB.CreateZExtOrTrunc(EVL, IntptrTy);
1733 // To avoid undefined behavior for extracting with out of range index, use
1734 // the minimum of evl and element count as trip count.
1735 Value *EC = IB.CreateElementCount(IntptrTy, VTy->getElementCount());
1736 EVL = IB.CreateBinaryIntrinsic(Intrinsic::umin, EVL, EC);
1737 } else {
1738 EVL = IB.CreateElementCount(IntptrTy, VTy->getElementCount());
1739 }
1740
1741 // Cast Stride to IntptrTy.
1742 if (Stride)
1743 Stride = IB.CreateZExtOrTrunc(Stride, IntptrTy);
1744
1745 SplitBlockAndInsertForEachLane(EVL, LoopInsertBefore->getIterator(),
1746 [&](IRBuilderBase &IRB, Value *Index) {
1747 Value *MaskElem = IRB.CreateExtractElement(Mask, Index);
1748 if (auto *MaskElemC = dyn_cast<ConstantInt>(MaskElem)) {
1749 if (MaskElemC->isZero())
1750 // No check
1751 return;
1752 // Unconditional check
1753 } else {
1754 // Conditional check
1755 Instruction *ThenTerm = SplitBlockAndInsertIfThen(
1756 MaskElem, &*IRB.GetInsertPoint(), false);
1757 IRB.SetInsertPoint(ThenTerm);
1758 }
1759
1760 Value *InstrumentedAddress;
1761 if (isa<VectorType>(Addr->getType())) {
1762 assert(
1763 cast<VectorType>(Addr->getType())->getElementType()->isPointerTy() &&
1764 "Expected vector of pointer.");
1765 InstrumentedAddress = IRB.CreateExtractElement(Addr, Index);
1766 } else if (Stride) {
1767 Index = IRB.CreateMul(Index, Stride);
1768 InstrumentedAddress = IRB.CreatePtrAdd(Addr, Index);
1769 } else {
1770 InstrumentedAddress = IRB.CreateGEP(VTy, Addr, {Zero, Index});
1771 }
1772 doInstrumentAddress(Pass, I, &*IRB.GetInsertPoint(), InstrumentedAddress,
1773 Alignment, Granularity, ElemTypeSize, IsWrite,
1774 SizeArgument, UseCalls, Exp, RTCI);
1775 });
1776}
1777
1778void AddressSanitizer::instrumentMop(ObjectSizeOffsetVisitor &ObjSizeVis,
1779 InterestingMemoryOperand &O, bool UseCalls,
1780 const DataLayout &DL,
1781 RuntimeCallInserter &RTCI) {
1782 Value *Addr = O.getPtr();
1783
1784 // Optimization experiments.
1785 // The experiments can be used to evaluate potential optimizations that remove
1786 // instrumentation (assess false negatives). Instead of completely removing
1787 // some instrumentation, you set Exp to a non-zero value (mask of optimization
1788 // experiments that want to remove instrumentation of this instruction).
1789 // If Exp is non-zero, this pass will emit special calls into runtime
1790 // (e.g. __asan_report_exp_load1 instead of __asan_report_load1). These calls
1791 // make runtime terminate the program in a special way (with a different
1792 // exit status). Then you run the new compiler on a buggy corpus, collect
1793 // the special terminations (ideally, you don't see them at all -- no false
1794 // negatives) and make the decision on the optimization.
1795 uint32_t Exp = ClForceExperiment;
1796
1797 if (ClOpt && ClOptGlobals) {
1798 // If initialization order checking is disabled, a simple access to a
1799 // dynamically initialized global is always valid.
1801 if (G && (!ClInitializers || GlobalIsLinkerInitialized(G)) &&
1802 isSafeAccess(ObjSizeVis, Addr, O.TypeStoreSize)) {
1803 NumOptimizedAccessesToGlobalVar++;
1804 return;
1805 }
1806 }
1807
1808 if (ClOpt && ClOptStack) {
1809 // A direct inbounds access to a stack variable is always valid.
1811 isSafeAccess(ObjSizeVis, Addr, O.TypeStoreSize)) {
1812 NumOptimizedAccessesToStackVar++;
1813 return;
1814 }
1815 }
1816
1817 if (O.IsWrite)
1818 NumInstrumentedWrites++;
1819 else
1820 NumInstrumentedReads++;
1821
1822 if (O.MaybeByteOffset) {
1823 Type *Ty = Type::getInt8Ty(*C);
1824 IRBuilder IB(O.getInsn());
1825
1826 Value *OffsetOp = O.MaybeByteOffset;
1827 if (TargetTriple.isRISCV()) {
1828 Type *OffsetTy = OffsetOp->getType();
1829 // RVV indexed loads/stores zero-extend offset operands which are narrower
1830 // than XLEN to XLEN.
1831 if (OffsetTy->getScalarType()->getIntegerBitWidth() <
1832 static_cast<unsigned>(LongSize)) {
1833 VectorType *OrigType = cast<VectorType>(OffsetTy);
1834 Type *ExtendTy = VectorType::get(IntptrTy, OrigType);
1835 OffsetOp = IB.CreateZExt(OffsetOp, ExtendTy);
1836 }
1837 }
1838 Addr = IB.CreateGEP(Ty, Addr, {OffsetOp});
1839 }
1840
1841 unsigned Granularity = 1 << Mapping.Scale;
1842 if (O.MaybeMask) {
1843 instrumentMaskedLoadOrStore(this, DL, IntptrTy, O.MaybeMask, O.MaybeEVL,
1844 O.MaybeStride, O.getInsn(), Addr, O.Alignment,
1845 Granularity, O.OpType, O.IsWrite, nullptr,
1846 UseCalls, Exp, RTCI);
1847 } else {
1848 doInstrumentAddress(this, O.getInsn(), O.getInsn(), Addr, O.Alignment,
1849 Granularity, O.TypeStoreSize, O.IsWrite, nullptr,
1850 UseCalls, Exp, RTCI);
1851 }
1852}
1853
1854Instruction *AddressSanitizer::generateCrashCode(Instruction *InsertBefore,
1855 Value *Addr, bool IsWrite,
1856 size_t AccessSizeIndex,
1857 Value *SizeArgument,
1858 uint32_t Exp,
1859 RuntimeCallInserter &RTCI) {
1860 InstrumentationIRBuilder IRB(InsertBefore);
1861 Value *ExpVal = Exp == 0 ? nullptr : ConstantInt::get(IRB.getInt32Ty(), Exp);
1862 CallInst *Call = nullptr;
1863 if (SizeArgument) {
1864 if (Exp == 0)
1865 Call = RTCI.createRuntimeCall(IRB, AsanErrorCallbackSized[IsWrite][0],
1866 {Addr, SizeArgument});
1867 else
1868 Call = RTCI.createRuntimeCall(IRB, AsanErrorCallbackSized[IsWrite][1],
1869 {Addr, SizeArgument, ExpVal});
1870 } else {
1871 if (Exp == 0)
1872 Call = RTCI.createRuntimeCall(
1873 IRB, AsanErrorCallback[IsWrite][0][AccessSizeIndex], Addr);
1874 else
1875 Call = RTCI.createRuntimeCall(
1876 IRB, AsanErrorCallback[IsWrite][1][AccessSizeIndex], {Addr, ExpVal});
1877 }
1878
1880 return Call;
1881}
1882
1883Value *AddressSanitizer::createSlowPathCmp(IRBuilder<> &IRB, Value *AddrLong,
1884 Value *ShadowValue,
1885 uint32_t TypeStoreSize) {
1886 size_t Granularity = static_cast<size_t>(1) << Mapping.Scale;
1887 // Addr & (Granularity - 1)
1888 Value *LastAccessedByte =
1889 IRB.CreateAnd(AddrLong, ConstantInt::get(IntptrTy, Granularity - 1));
1890 // (Addr & (Granularity - 1)) + size - 1
1891 if (TypeStoreSize / 8 > 1)
1892 LastAccessedByte = IRB.CreateAdd(
1893 LastAccessedByte, ConstantInt::get(IntptrTy, TypeStoreSize / 8 - 1));
1894 // (uint8_t) ((Addr & (Granularity-1)) + size - 1)
1895 LastAccessedByte =
1896 IRB.CreateIntCast(LastAccessedByte, ShadowValue->getType(), false);
1897 // ((uint8_t) ((Addr & (Granularity-1)) + size - 1)) >= ShadowValue
1898 return IRB.CreateICmpSGE(LastAccessedByte, ShadowValue);
1899}
1900
1901Instruction *AddressSanitizer::instrumentAMDGPUAddress(
1902 Instruction *OrigIns, Instruction *InsertBefore, Value *Addr,
1903 uint32_t TypeStoreSize, bool IsWrite, Value *SizeArgument) {
1904 // Do not instrument unsupported addrspaces.
1906 return nullptr;
1907 Type *PtrTy = cast<PointerType>(Addr->getType()->getScalarType());
1908 // Follow host instrumentation for global and constant addresses.
1909 if (PtrTy->getPointerAddressSpace() != 0)
1910 return InsertBefore;
1911 // Instrument generic addresses in supported addressspaces.
1912 IRBuilder<> IRB(InsertBefore);
1913 Value *IsShared = IRB.CreateCall(AMDGPUAddressShared, {Addr});
1914 Value *IsPrivate = IRB.CreateCall(AMDGPUAddressPrivate, {Addr});
1915 Value *IsSharedOrPrivate = IRB.CreateOr(IsShared, IsPrivate);
1916 Value *Cmp = IRB.CreateNot(IsSharedOrPrivate);
1917 Value *AddrSpaceZeroLanding =
1918 SplitBlockAndInsertIfThen(Cmp, InsertBefore, false);
1919 InsertBefore = cast<Instruction>(AddrSpaceZeroLanding);
1920 return InsertBefore;
1921}
1922
1923Instruction *AddressSanitizer::genAMDGPUReportBlock(IRBuilder<> &IRB,
1924 Value *Cond, bool Recover) {
1925 Module &M = *IRB.GetInsertBlock()->getModule();
1926 Value *ReportCond = Cond;
1927 if (!Recover) {
1928 auto Ballot = M.getOrInsertFunction(kAMDGPUBallotName, IRB.getInt64Ty(),
1929 IRB.getInt1Ty());
1930 ReportCond = IRB.CreateIsNotNull(IRB.CreateCall(Ballot, {Cond}));
1931 }
1932
1933 auto *Trm =
1934 SplitBlockAndInsertIfThen(ReportCond, &*IRB.GetInsertPoint(), false,
1936 Trm->getParent()->setName("asan.report");
1937
1938 if (Recover)
1939 return Trm;
1940
1941 Trm = SplitBlockAndInsertIfThen(Cond, Trm, false);
1942 IRB.SetInsertPoint(Trm);
1943 return IRB.CreateCall(
1944 M.getOrInsertFunction(kAMDGPUUnreachableName, IRB.getVoidTy()), {});
1945}
1946
1947void AddressSanitizer::instrumentAddress(Instruction *OrigIns,
1948 Instruction *InsertBefore, Value *Addr,
1949 MaybeAlign Alignment,
1950 uint32_t TypeStoreSize, bool IsWrite,
1951 Value *SizeArgument, bool UseCalls,
1952 uint32_t Exp,
1953 RuntimeCallInserter &RTCI) {
1954 if (TargetTriple.isAMDGPU()) {
1955 InsertBefore = instrumentAMDGPUAddress(OrigIns, InsertBefore, Addr,
1956 TypeStoreSize, IsWrite, SizeArgument);
1957 if (!InsertBefore)
1958 return;
1959 }
1960
1961 InstrumentationIRBuilder IRB(InsertBefore);
1962 size_t AccessSizeIndex = TypeStoreSizeToSizeIndex(TypeStoreSize);
1963
1964 if (UseCalls && ClOptimizeCallbacks) {
1965 const ASanAccessInfo AccessInfo(IsWrite, CompileKernel, AccessSizeIndex);
1966 IRB.CreateIntrinsic(Intrinsic::asan_check_memaccess, {},
1967 {IRB.CreatePointerCast(Addr, PtrTy),
1968 ConstantInt::get(Int32Ty, AccessInfo.Packed)});
1969 return;
1970 }
1971
1972 Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy);
1973 if (UseCalls) {
1974 if (Exp == 0)
1975 RTCI.createRuntimeCall(
1976 IRB, AsanMemoryAccessCallback[IsWrite][0][AccessSizeIndex], AddrLong);
1977 else
1978 RTCI.createRuntimeCall(
1979 IRB, AsanMemoryAccessCallback[IsWrite][1][AccessSizeIndex],
1980 {AddrLong, ConstantInt::get(IRB.getInt32Ty(), Exp)});
1981 return;
1982 }
1983
1984 Type *ShadowTy =
1985 IntegerType::get(*C, std::max(8U, TypeStoreSize >> Mapping.Scale));
1986 Type *ShadowPtrTy = PointerType::get(*C, ClShadowAddrSpace);
1987 Value *ShadowPtr = memToShadow(AddrLong, IRB);
1988 const uint64_t ShadowAlign =
1989 std::max<uint64_t>(Alignment.valueOrOne().value() >> Mapping.Scale, 1);
1990 Value *ShadowValue = IRB.CreateAlignedLoad(
1991 ShadowTy, IRB.CreateIntToPtr(ShadowPtr, ShadowPtrTy), Align(ShadowAlign));
1992
1993 Value *Cmp = IRB.CreateIsNotNull(ShadowValue);
1994 size_t Granularity = 1ULL << Mapping.Scale;
1995 Instruction *CrashTerm = nullptr;
1996
1997 bool GenSlowPath = (ClAlwaysSlowPath || (TypeStoreSize < 8 * Granularity));
1998
1999 if (TargetTriple.isAMDGCN()) {
2000 if (GenSlowPath) {
2001 auto *Cmp2 = createSlowPathCmp(IRB, AddrLong, ShadowValue, TypeStoreSize);
2002 Cmp = IRB.CreateAnd(Cmp, Cmp2);
2003 }
2004 CrashTerm = genAMDGPUReportBlock(IRB, Cmp, Recover);
2005 } else if (GenSlowPath) {
2006 // We use branch weights for the slow path check, to indicate that the slow
2007 // path is rarely taken. This seems to be the case for SPEC benchmarks.
2009 Cmp, InsertBefore, false, MDBuilder(*C).createUnlikelyBranchWeights());
2010 BasicBlock *NextBB = cast<UncondBrInst>(CheckTerm)->getSuccessor();
2011 IRB.SetInsertPoint(CheckTerm);
2012 Value *Cmp2 = createSlowPathCmp(IRB, AddrLong, ShadowValue, TypeStoreSize);
2013 if (Recover) {
2014 CrashTerm = SplitBlockAndInsertIfThen(Cmp2, CheckTerm, false);
2015 } else {
2016 BasicBlock *CrashBlock =
2017 BasicBlock::Create(*C, "", NextBB->getParent(), NextBB);
2018 CrashTerm = new UnreachableInst(*C, CrashBlock);
2019 CondBrInst *NewTerm = CondBrInst::Create(Cmp2, CrashBlock, NextBB);
2020 ReplaceInstWithInst(CheckTerm, NewTerm);
2021 }
2022 } else {
2023 CrashTerm = SplitBlockAndInsertIfThen(Cmp, InsertBefore, !Recover);
2024 }
2025
2026 Instruction *Crash = generateCrashCode(
2027 CrashTerm, AddrLong, IsWrite, AccessSizeIndex, SizeArgument, Exp, RTCI);
2028 if (OrigIns->getDebugLoc())
2029 Crash->setDebugLoc(OrigIns->getDebugLoc());
2030}
2031
2032// Instrument unusual size or unusual alignment.
2033// We can not do it with a single check, so we do 1-byte check for the first
2034// and the last bytes. We call __asan_report_*_n(addr, real_size) to be able
2035// to report the actual access size.
2036void AddressSanitizer::instrumentUnusualSizeOrAlignment(
2037 Instruction *I, Instruction *InsertBefore, Value *Addr,
2038 TypeSize TypeStoreSize, bool IsWrite, Value *SizeArgument, bool UseCalls,
2039 uint32_t Exp, RuntimeCallInserter &RTCI) {
2040 InstrumentationIRBuilder IRB(InsertBefore);
2041 Value *NumBits = IRB.CreateTypeSize(IntptrTy, TypeStoreSize);
2042 Value *Size = IRB.CreateLShr(NumBits, ConstantInt::get(IntptrTy, 3));
2043
2044 Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy);
2045 if (UseCalls) {
2046 if (Exp == 0)
2047 RTCI.createRuntimeCall(IRB, AsanMemoryAccessCallbackSized[IsWrite][0],
2048 {AddrLong, Size});
2049 else
2050 RTCI.createRuntimeCall(
2051 IRB, AsanMemoryAccessCallbackSized[IsWrite][1],
2052 {AddrLong, Size, ConstantInt::get(IRB.getInt32Ty(), Exp)});
2053 } else {
2054 Value *SizeMinusOne = IRB.CreateSub(Size, ConstantInt::get(IntptrTy, 1));
2055 Value *LastByte = IRB.CreateIntToPtr(
2056 IRB.CreateAdd(AddrLong, SizeMinusOne),
2057 Addr->getType());
2058 instrumentAddress(I, InsertBefore, Addr, {}, 8, IsWrite, Size, false, Exp,
2059 RTCI);
2060 instrumentAddress(I, InsertBefore, LastByte, {}, 8, IsWrite, Size, false,
2061 Exp, RTCI);
2062 }
2063}
2064
2065void ModuleAddressSanitizer::poisonOneInitializer(Function &GlobalInit) {
2066 // Set up the arguments to our poison/unpoison functions.
2067 IRBuilder<> IRB(&GlobalInit.front(),
2068 GlobalInit.front().getFirstInsertionPt());
2069
2070 // Add a call to poison all external globals before the given function starts.
2071 Value *ModuleNameAddr =
2072 ConstantExpr::getPointerCast(getOrCreateModuleName(), IntptrTy);
2073 IRB.CreateCall(AsanPoisonGlobals, ModuleNameAddr);
2074
2075 // Add calls to unpoison all globals before each return instruction.
2076 for (auto &BB : GlobalInit)
2078 CallInst::Create(AsanUnpoisonGlobals, "", RI->getIterator());
2079}
2080
2081void ModuleAddressSanitizer::createInitializerPoisonCalls() {
2082 GlobalVariable *GV = M.getGlobalVariable("llvm.global_ctors");
2083 if (!GV)
2084 return;
2085
2087 if (!CA)
2088 return;
2089
2090 for (Use &OP : CA->operands()) {
2091 if (isa<ConstantAggregateZero>(OP)) continue;
2093
2094 // Must have a function or null ptr.
2095 if (Function *F = dyn_cast<Function>(CS->getOperand(1))) {
2096 if (F->getName() == kAsanModuleCtorName) continue;
2097 auto *Priority = cast<ConstantInt>(CS->getOperand(0));
2098 // Don't instrument CTORs that will run before asan.module_ctor.
2099 if (Priority->getLimitedValue() <= GetCtorAndDtorPriority(TargetTriple))
2100 continue;
2101 poisonOneInitializer(*F);
2102 }
2103 }
2104}
2105
2106const GlobalVariable *
2107ModuleAddressSanitizer::getExcludedAliasedGlobal(const GlobalAlias &GA) const {
2108 // In case this function should be expanded to include rules that do not just
2109 // apply when CompileKernel is true, either guard all existing rules with an
2110 // 'if (CompileKernel) { ... }' or be absolutely sure that all these rules
2111 // should also apply to user space.
2112 assert(CompileKernel && "Only expecting to be called when compiling kernel");
2113
2114 const Constant *C = GA.getAliasee();
2115
2116 // When compiling the kernel, globals that are aliased by symbols prefixed
2117 // by "__" are special and cannot be padded with a redzone.
2118 if (GA.getName().starts_with("__"))
2119 return dyn_cast<GlobalVariable>(C->stripPointerCastsAndAliases());
2120
2121 return nullptr;
2122}
2123
2124bool ModuleAddressSanitizer::shouldInstrumentGlobal(GlobalVariable *G) const {
2125 Type *Ty = G->getValueType();
2126 LLVM_DEBUG(dbgs() << "GLOBAL: " << *G << "\n");
2127
2128 if (G->hasSanitizerMetadata() && G->getSanitizerMetadata().NoAddress)
2129 return false;
2130 if (!Ty->isSized()) return false;
2131 if (!G->hasInitializer()) return false;
2132 if (!isSupportedAddrspace(TargetTriple, G))
2133 return false;
2134 if (GlobalWasGeneratedByCompiler(G)) return false; // Our own globals.
2135 // Two problems with thread-locals:
2136 // - The address of the main thread's copy can't be computed at link-time.
2137 // - Need to poison all copies, not just the main thread's one.
2138 if (G->isThreadLocal()) return false;
2139 // For now, just ignore this Global if the alignment is large.
2140 if (G->getAlign() && *G->getAlign() > getMinRedzoneSizeForGlobal()) return false;
2141
2142 // For non-COFF targets, only instrument globals known to be defined by this
2143 // TU.
2144 // FIXME: We can instrument comdat globals on ELF if we are using the
2145 // GC-friendly metadata scheme.
2146 if (!TargetTriple.isOSBinFormatCOFF()) {
2147 if (!G->hasExactDefinition() || G->hasComdat())
2148 return false;
2149 } else {
2150 // On COFF, don't instrument non-ODR linkages.
2151 if (G->isInterposable())
2152 return false;
2153 // If the global has AvailableExternally linkage, then it is not in this
2154 // module, which means it does not need to be instrumented.
2155 if (G->hasAvailableExternallyLinkage())
2156 return false;
2157 }
2158
2159 // If a comdat is present, it must have a selection kind that implies ODR
2160 // semantics: no duplicates, any, or exact match.
2161 if (Comdat *C = G->getComdat()) {
2162 switch (C->getSelectionKind()) {
2163 case Comdat::Any:
2164 case Comdat::ExactMatch:
2166 break;
2167 case Comdat::Largest:
2168 case Comdat::SameSize:
2169 return false;
2170 }
2171 }
2172
2173 if (G->hasSection()) {
2174 // The kernel uses explicit sections for mostly special global variables
2175 // that we should not instrument. E.g. the kernel may rely on their layout
2176 // without redzones, or remove them at link time ("discard.*"), etc.
2177 if (CompileKernel)
2178 return false;
2179
2180 StringRef Section = G->getSection();
2181
2182 // Globals from llvm.metadata aren't emitted, do not instrument them.
2183 if (Section == "llvm.metadata") return false;
2184 // Do not instrument globals from special LLVM sections.
2185 if (Section.contains("__llvm") || Section.contains("__LLVM"))
2186 return false;
2187
2188 // Do not instrument function pointers to initialization and termination
2189 // routines: dynamic linker will not properly handle redzones.
2190 if (Section.starts_with(".preinit_array") ||
2191 Section.starts_with(".init_array") ||
2192 Section.starts_with(".fini_array")) {
2193 return false;
2194 }
2195
2196 // Do not instrument user-defined sections (with names resembling
2197 // valid C identifiers)
2198 if (TargetTriple.isOSBinFormatELF()) {
2199 if (llvm::all_of(Section,
2200 [](char c) { return llvm::isAlnum(c) || c == '_'; }))
2201 return false;
2202 }
2203
2204 // On COFF, if the section name contains '$', it is highly likely that the
2205 // user is using section sorting to create an array of globals similar to
2206 // the way initialization callbacks are registered in .init_array and
2207 // .CRT$XCU. The ATL also registers things in .ATL$__[azm]. Adding redzones
2208 // to such globals is counterproductive, because the intent is that they
2209 // will form an array, and out-of-bounds accesses are expected.
2210 // See https://github.com/google/sanitizers/issues/305
2211 // and http://msdn.microsoft.com/en-US/en-en/library/bb918180(v=vs.120).aspx
2212 if (TargetTriple.isOSBinFormatCOFF() && Section.contains('$')) {
2213 LLVM_DEBUG(dbgs() << "Ignoring global in sorted section (contains '$'): "
2214 << *G << "\n");
2215 return false;
2216 }
2217
2218 if (TargetTriple.isOSBinFormatMachO()) {
2219 StringRef ParsedSegment, ParsedSection;
2220 unsigned TAA = 0, StubSize = 0;
2221 bool TAAParsed;
2223 Section, ParsedSegment, ParsedSection, TAA, TAAParsed, StubSize));
2224
2225 // Ignore the globals from the __OBJC section. The ObjC runtime assumes
2226 // those conform to /usr/lib/objc/runtime.h, so we can't add redzones to
2227 // them.
2228 if (ParsedSegment == "__OBJC" ||
2229 (ParsedSegment == "__DATA" && ParsedSection.starts_with("__objc_"))) {
2230 LLVM_DEBUG(dbgs() << "Ignoring ObjC runtime global: " << *G << "\n");
2231 return false;
2232 }
2233 // See https://github.com/google/sanitizers/issues/32
2234 // Constant CFString instances are compiled in the following way:
2235 // -- the string buffer is emitted into
2236 // __TEXT,__cstring,cstring_literals
2237 // -- the constant NSConstantString structure referencing that buffer
2238 // is placed into __DATA,__cfstring
2239 // Therefore there's no point in placing redzones into __DATA,__cfstring.
2240 // Moreover, it causes the linker to crash on OS X 10.7
2241 if (ParsedSegment == "__DATA" && ParsedSection == "__cfstring") {
2242 LLVM_DEBUG(dbgs() << "Ignoring CFString: " << *G << "\n");
2243 return false;
2244 }
2245 // The linker merges the contents of cstring_literals and removes the
2246 // trailing zeroes.
2247 if (ParsedSegment == "__TEXT" && (TAA & MachO::S_CSTRING_LITERALS)) {
2248 LLVM_DEBUG(dbgs() << "Ignoring a cstring literal: " << *G << "\n");
2249 return false;
2250 }
2251 }
2252 }
2253
2254 if (CompileKernel) {
2255 // Globals that prefixed by "__" are special and cannot be padded with a
2256 // redzone.
2257 if (G->getName().starts_with("__"))
2258 return false;
2259 }
2260
2261 return true;
2262}
2263
2264// On Mach-O platforms, we emit global metadata in a separate section of the
2265// binary in order to allow the linker to properly dead strip. This is only
2266// supported on recent versions of ld64.
2267bool ModuleAddressSanitizer::ShouldUseMachOGlobalsSection() const {
2268 if (!TargetTriple.isOSBinFormatMachO())
2269 return false;
2270
2271 if (TargetTriple.isMacOSX() && !TargetTriple.isMacOSXVersionLT(10, 11))
2272 return true;
2273 if (TargetTriple.isiOS() /* or tvOS */ && !TargetTriple.isOSVersionLT(9))
2274 return true;
2275 if (TargetTriple.isWatchOS() && !TargetTriple.isOSVersionLT(2))
2276 return true;
2277 if (TargetTriple.isDriverKit())
2278 return true;
2279 if (TargetTriple.isXROS())
2280 return true;
2281
2282 return false;
2283}
2284
2285StringRef ModuleAddressSanitizer::getGlobalMetadataSection() const {
2286 switch (TargetTriple.getObjectFormat()) {
2287 case Triple::COFF: return ".ASAN$GL";
2288 case Triple::ELF: return "asan_globals";
2289 case Triple::MachO: return "__DATA,__asan_globals,regular";
2290 case Triple::Wasm:
2291 case Triple::GOFF:
2292 case Triple::SPIRV:
2293 case Triple::XCOFF:
2296 "ModuleAddressSanitizer not implemented for object file format");
2298 break;
2299 }
2300 llvm_unreachable("unsupported object format");
2301}
2302
2303void ModuleAddressSanitizer::initializeCallbacks() {
2304 IRBuilder<> IRB(*C);
2305
2306 // Declare our poisoning and unpoisoning functions.
2307 AsanPoisonGlobals =
2308 M.getOrInsertFunction(kAsanPoisonGlobalsName, IRB.getVoidTy(), IntptrTy);
2309 AsanUnpoisonGlobals =
2310 M.getOrInsertFunction(kAsanUnpoisonGlobalsName, IRB.getVoidTy());
2311
2312 // Declare functions that register/unregister globals.
2313 AsanRegisterGlobals = M.getOrInsertFunction(
2314 kAsanRegisterGlobalsName, IRB.getVoidTy(), IntptrTy, IntptrTy);
2315 AsanUnregisterGlobals = M.getOrInsertFunction(
2316 kAsanUnregisterGlobalsName, IRB.getVoidTy(), IntptrTy, IntptrTy);
2317
2318 // Declare the functions that find globals in a shared object and then invoke
2319 // the (un)register function on them.
2320 AsanRegisterImageGlobals = M.getOrInsertFunction(
2321 kAsanRegisterImageGlobalsName, IRB.getVoidTy(), IntptrTy);
2322 AsanUnregisterImageGlobals = M.getOrInsertFunction(
2324
2325 AsanRegisterElfGlobals =
2326 M.getOrInsertFunction(kAsanRegisterElfGlobalsName, IRB.getVoidTy(),
2327 IntptrTy, IntptrTy, IntptrTy);
2328 AsanUnregisterElfGlobals =
2329 M.getOrInsertFunction(kAsanUnregisterElfGlobalsName, IRB.getVoidTy(),
2330 IntptrTy, IntptrTy, IntptrTy);
2331}
2332
2333// Put the metadata and the instrumented global in the same group. This ensures
2334// that the metadata is discarded if the instrumented global is discarded.
2335void ModuleAddressSanitizer::SetComdatForGlobalMetadata(
2336 GlobalVariable *G, GlobalVariable *Metadata, StringRef InternalSuffix) {
2337 Module &M = *G->getParent();
2338 Comdat *C = G->getComdat();
2339 if (!C) {
2340 if (!G->hasName()) {
2341 // If G is unnamed, it must be internal. Give it an artificial name
2342 // so we can put it in a comdat.
2343 assert(G->hasLocalLinkage());
2344 G->setName(genName("anon_global"));
2345 }
2346
2347 if (!InternalSuffix.empty() && G->hasLocalLinkage()) {
2348 std::string Name = std::string(G->getName());
2349 Name += InternalSuffix;
2350 C = M.getOrInsertComdat(Name);
2351 } else {
2352 C = M.getOrInsertComdat(G->getName());
2353 }
2354
2355 // Make this IMAGE_COMDAT_SELECT_NODUPLICATES on COFF. Also upgrade private
2356 // linkage to internal linkage so that a symbol table entry is emitted. This
2357 // is necessary in order to create the comdat group.
2358 if (TargetTriple.isOSBinFormatCOFF()) {
2359 C->setSelectionKind(Comdat::NoDeduplicate);
2360 if (G->hasPrivateLinkage())
2361 G->setLinkage(GlobalValue::InternalLinkage);
2362 }
2363 G->setComdat(C);
2364 }
2365
2366 assert(G->hasComdat());
2367 Metadata->setComdat(G->getComdat());
2368}
2369
2370// Create a separate metadata global and put it in the appropriate ASan
2371// global registration section.
2373ModuleAddressSanitizer::CreateMetadataGlobal(Constant *Initializer,
2374 StringRef OriginalName) {
2375 auto Linkage = TargetTriple.isOSBinFormatMachO()
2379 M, Initializer->getType(), false, Linkage, Initializer,
2380 Twine("__asan_global_") + GlobalValue::dropLLVMManglingEscape(OriginalName));
2381 Metadata->setSection(getGlobalMetadataSection());
2382 // Place metadata in a large section for x86-64 ELF binaries to mitigate
2383 // relocation pressure.
2385 return Metadata;
2386}
2387
2388Instruction *ModuleAddressSanitizer::CreateAsanModuleDtor() {
2389 AsanDtorFunction = Function::createWithDefaultAttr(
2392 AsanDtorFunction->addFnAttr(Attribute::NoUnwind);
2393 // Ensure Dtor cannot be discarded, even if in a comdat.
2394 appendToUsed(M, {AsanDtorFunction});
2395 BasicBlock *AsanDtorBB = BasicBlock::Create(*C, "", AsanDtorFunction);
2396
2397 return ReturnInst::Create(*C, AsanDtorBB);
2398}
2399
2400void ModuleAddressSanitizer::InstrumentGlobalsCOFF(
2401 IRBuilder<> &IRB, ArrayRef<GlobalVariable *> ExtendedGlobals,
2402 ArrayRef<Constant *> MetadataInitializers) {
2403 assert(ExtendedGlobals.size() == MetadataInitializers.size());
2404 auto &DL = M.getDataLayout();
2405
2406 SmallVector<GlobalValue *, 16> MetadataGlobals(ExtendedGlobals.size());
2407 for (size_t i = 0; i < ExtendedGlobals.size(); i++) {
2408 Constant *Initializer = MetadataInitializers[i];
2409 GlobalVariable *G = ExtendedGlobals[i];
2410 GlobalVariable *Metadata = CreateMetadataGlobal(Initializer, G->getName());
2411 MDNode *MD = MDNode::get(M.getContext(), ValueAsMetadata::get(G));
2412 Metadata->setMetadata(LLVMContext::MD_associated, MD);
2413 MetadataGlobals[i] = Metadata;
2414
2415 // The MSVC linker always inserts padding when linking incrementally. We
2416 // cope with that by aligning each struct to its size, which must be a power
2417 // of two.
2418 unsigned SizeOfGlobalStruct = DL.getTypeAllocSize(Initializer->getType());
2419 assert(isPowerOf2_32(SizeOfGlobalStruct) &&
2420 "global metadata will not be padded appropriately");
2421 Metadata->setAlignment(assumeAligned(SizeOfGlobalStruct));
2422
2423 SetComdatForGlobalMetadata(G, Metadata, "");
2424 }
2425
2426 // Update llvm.compiler.used, adding the new metadata globals. This is
2427 // needed so that during LTO these variables stay alive.
2428 if (!MetadataGlobals.empty())
2429 appendToCompilerUsed(M, MetadataGlobals);
2430}
2431
2432void ModuleAddressSanitizer::instrumentGlobalsELF(
2433 IRBuilder<> &IRB, ArrayRef<GlobalVariable *> ExtendedGlobals,
2434 ArrayRef<Constant *> MetadataInitializers,
2435 const std::string &UniqueModuleId) {
2436 assert(ExtendedGlobals.size() == MetadataInitializers.size());
2437
2438 // Putting globals in a comdat changes the semantic and potentially cause
2439 // false negative odr violations at link time. If odr indicators are used, we
2440 // keep the comdat sections, as link time odr violations will be detected on
2441 // the odr indicator symbols.
2442 bool UseComdatForGlobalsGC = UseOdrIndicator && !UniqueModuleId.empty();
2443
2444 SmallVector<GlobalValue *, 16> MetadataGlobals(ExtendedGlobals.size());
2445 for (size_t i = 0; i < ExtendedGlobals.size(); i++) {
2446 GlobalVariable *G = ExtendedGlobals[i];
2448 CreateMetadataGlobal(MetadataInitializers[i], G->getName());
2449 MDNode *MD = MDNode::get(M.getContext(), ValueAsMetadata::get(G));
2450 Metadata->setMetadata(LLVMContext::MD_associated, MD);
2451 MetadataGlobals[i] = Metadata;
2452
2453 if (UseComdatForGlobalsGC)
2454 SetComdatForGlobalMetadata(G, Metadata, UniqueModuleId);
2455 }
2456
2457 // Update llvm.compiler.used, adding the new metadata globals. This is
2458 // needed so that during LTO these variables stay alive.
2459 if (!MetadataGlobals.empty())
2460 appendToCompilerUsed(M, MetadataGlobals);
2461
2462 // RegisteredFlag serves two purposes. First, we can pass it to dladdr()
2463 // to look up the loaded image that contains it. Second, we can store in it
2464 // whether registration has already occurred, to prevent duplicate
2465 // registration.
2466 //
2467 // Common linkage ensures that there is only one global per shared library.
2468 GlobalVariable *RegisteredFlag = new GlobalVariable(
2469 M, IntptrTy, false, GlobalVariable::CommonLinkage,
2470 ConstantInt::get(IntptrTy, 0), kAsanGlobalsRegisteredFlagName);
2472
2473 // Create start and stop symbols.
2474 GlobalVariable *StartELFMetadata = new GlobalVariable(
2475 M, IntptrTy, false, GlobalVariable::ExternalWeakLinkage, nullptr,
2476 "__start_" + getGlobalMetadataSection());
2478 GlobalVariable *StopELFMetadata = new GlobalVariable(
2479 M, IntptrTy, false, GlobalVariable::ExternalWeakLinkage, nullptr,
2480 "__stop_" + getGlobalMetadataSection());
2482
2483 // Create a call to register the globals with the runtime.
2484 if (ConstructorKind == AsanCtorKind::Global)
2485 IRB.CreateCall(AsanRegisterElfGlobals,
2486 {IRB.CreatePointerCast(RegisteredFlag, IntptrTy),
2487 IRB.CreatePointerCast(StartELFMetadata, IntptrTy),
2488 IRB.CreatePointerCast(StopELFMetadata, IntptrTy)});
2489
2490 // We also need to unregister globals at the end, e.g., when a shared library
2491 // gets closed.
2492 if (DestructorKind != AsanDtorKind::None && !MetadataGlobals.empty()) {
2493 IRBuilder<> IrbDtor(CreateAsanModuleDtor());
2494 IrbDtor.CreateCall(AsanUnregisterElfGlobals,
2495 {IRB.CreatePointerCast(RegisteredFlag, IntptrTy),
2496 IRB.CreatePointerCast(StartELFMetadata, IntptrTy),
2497 IRB.CreatePointerCast(StopELFMetadata, IntptrTy)});
2498 }
2499}
2500
2501void ModuleAddressSanitizer::InstrumentGlobalsMachO(
2502 IRBuilder<> &IRB, ArrayRef<GlobalVariable *> ExtendedGlobals,
2503 ArrayRef<Constant *> MetadataInitializers) {
2504 assert(ExtendedGlobals.size() == MetadataInitializers.size());
2505
2506 // On recent Mach-O platforms, use a structure which binds the liveness of
2507 // the global variable to the metadata struct. Keep the list of "Liveness" GV
2508 // created to be added to llvm.compiler.used
2509 StructType *LivenessTy = StructType::get(IntptrTy, IntptrTy);
2510 SmallVector<GlobalValue *, 16> LivenessGlobals(ExtendedGlobals.size());
2511
2512 for (size_t i = 0; i < ExtendedGlobals.size(); i++) {
2513 Constant *Initializer = MetadataInitializers[i];
2514 GlobalVariable *G = ExtendedGlobals[i];
2515 GlobalVariable *Metadata = CreateMetadataGlobal(Initializer, G->getName());
2516
2517 // On recent Mach-O platforms, we emit the global metadata in a way that
2518 // allows the linker to properly strip dead globals.
2519 auto LivenessBinder =
2520 ConstantStruct::get(LivenessTy, Initializer->getAggregateElement(0u),
2522 GlobalVariable *Liveness = new GlobalVariable(
2523 M, LivenessTy, false, GlobalVariable::InternalLinkage, LivenessBinder,
2524 Twine("__asan_binder_") + G->getName());
2525 Liveness->setSection("__DATA,__asan_liveness,regular,live_support");
2526 LivenessGlobals[i] = Liveness;
2527 }
2528
2529 // Update llvm.compiler.used, adding the new liveness globals. This is
2530 // needed so that during LTO these variables stay alive. The alternative
2531 // would be to have the linker handling the LTO symbols, but libLTO
2532 // current API does not expose access to the section for each symbol.
2533 if (!LivenessGlobals.empty())
2534 appendToCompilerUsed(M, LivenessGlobals);
2535
2536 // RegisteredFlag serves two purposes. First, we can pass it to dladdr()
2537 // to look up the loaded image that contains it. Second, we can store in it
2538 // whether registration has already occurred, to prevent duplicate
2539 // registration.
2540 //
2541 // common linkage ensures that there is only one global per shared library.
2542 GlobalVariable *RegisteredFlag = new GlobalVariable(
2543 M, IntptrTy, false, GlobalVariable::CommonLinkage,
2544 ConstantInt::get(IntptrTy, 0), kAsanGlobalsRegisteredFlagName);
2546
2547 if (ConstructorKind == AsanCtorKind::Global)
2548 IRB.CreateCall(AsanRegisterImageGlobals,
2549 {IRB.CreatePointerCast(RegisteredFlag, IntptrTy)});
2550
2551 // We also need to unregister globals at the end, e.g., when a shared library
2552 // gets closed.
2553 if (DestructorKind != AsanDtorKind::None) {
2554 IRBuilder<> IrbDtor(CreateAsanModuleDtor());
2555 IrbDtor.CreateCall(AsanUnregisterImageGlobals,
2556 {IRB.CreatePointerCast(RegisteredFlag, IntptrTy)});
2557 }
2558}
2559
2560void ModuleAddressSanitizer::InstrumentGlobalsWithMetadataArray(
2561 IRBuilder<> &IRB, ArrayRef<GlobalVariable *> ExtendedGlobals,
2562 ArrayRef<Constant *> MetadataInitializers) {
2563 assert(ExtendedGlobals.size() == MetadataInitializers.size());
2564 unsigned N = ExtendedGlobals.size();
2565 assert(N > 0);
2566
2567 // On platforms that don't have a custom metadata section, we emit an array
2568 // of global metadata structures.
2569 ArrayType *ArrayOfGlobalStructTy =
2570 ArrayType::get(MetadataInitializers[0]->getType(), N);
2571 auto AllGlobals = new GlobalVariable(
2572 M, ArrayOfGlobalStructTy, false, GlobalVariable::InternalLinkage,
2573 ConstantArray::get(ArrayOfGlobalStructTy, MetadataInitializers), "");
2574 if (Mapping.Scale > 3)
2575 AllGlobals->setAlignment(Align(1ULL << Mapping.Scale));
2576
2577 if (ConstructorKind == AsanCtorKind::Global)
2578 IRB.CreateCall(AsanRegisterGlobals,
2579 {IRB.CreatePointerCast(AllGlobals, IntptrTy),
2580 ConstantInt::get(IntptrTy, N)});
2581
2582 // We also need to unregister globals at the end, e.g., when a shared library
2583 // gets closed.
2584 if (DestructorKind != AsanDtorKind::None) {
2585 IRBuilder<> IrbDtor(CreateAsanModuleDtor());
2586 IrbDtor.CreateCall(AsanUnregisterGlobals,
2587 {IRB.CreatePointerCast(AllGlobals, IntptrTy),
2588 ConstantInt::get(IntptrTy, N)});
2589 }
2590}
2591
2592// This function replaces all global variables with new variables that have
2593// trailing redzones. It also creates a function that poisons
2594// redzones and inserts this function into llvm.global_ctors.
2595// Sets *CtorComdat to true if the global registration code emitted into the
2596// asan constructor is comdat-compatible.
2597void ModuleAddressSanitizer::instrumentGlobals(IRBuilder<> &IRB,
2598 bool *CtorComdat) {
2599 // Build set of globals that are aliased by some GA, where
2600 // getExcludedAliasedGlobal(GA) returns the relevant GlobalVariable.
2601 SmallPtrSet<const GlobalVariable *, 16> AliasedGlobalExclusions;
2602 if (CompileKernel) {
2603 for (auto &GA : M.aliases()) {
2604 if (const GlobalVariable *GV = getExcludedAliasedGlobal(GA))
2605 AliasedGlobalExclusions.insert(GV);
2606 }
2607 }
2608
2609 SmallVector<GlobalVariable *, 16> GlobalsToChange;
2610 for (auto &G : M.globals()) {
2611 if (!AliasedGlobalExclusions.count(&G) && shouldInstrumentGlobal(&G))
2612 GlobalsToChange.push_back(&G);
2613 }
2614
2615 size_t n = GlobalsToChange.size();
2616 auto &DL = M.getDataLayout();
2617
2618 // A global is described by a structure
2619 // size_t beg;
2620 // size_t size;
2621 // size_t size_with_redzone;
2622 // const char *name;
2623 // const char *module_name;
2624 // size_t has_dynamic_init;
2625 // size_t padding_for_windows_msvc_incremental_link;
2626 // size_t odr_indicator;
2627 // We initialize an array of such structures and pass it to a run-time call.
2628 StructType *GlobalStructTy =
2629 StructType::get(IntptrTy, IntptrTy, IntptrTy, IntptrTy, IntptrTy,
2630 IntptrTy, IntptrTy, IntptrTy);
2632 SmallVector<Constant *, 16> Initializers(n);
2633
2634 for (size_t i = 0; i < n; i++) {
2635 GlobalVariable *G = GlobalsToChange[i];
2636
2638 if (G->hasSanitizerMetadata())
2639 MD = G->getSanitizerMetadata();
2640
2641 // The runtime library tries demangling symbol names in the descriptor but
2642 // functionality like __cxa_demangle may be unavailable (e.g.
2643 // -static-libstdc++). So we demangle the symbol names here.
2644 std::string NameForGlobal = G->getName().str();
2647 /*AllowMerging*/ true, genName("global"));
2648
2649 Type *Ty = G->getValueType();
2650 const uint64_t SizeInBytes = DL.getTypeAllocSize(Ty);
2651 const uint64_t RightRedzoneSize = getRedzoneSizeForGlobal(SizeInBytes);
2652 Type *RightRedZoneTy = ArrayType::get(IRB.getInt8Ty(), RightRedzoneSize);
2653
2654 StructType *NewTy = StructType::get(Ty, RightRedZoneTy);
2655 Constant *NewInitializer = ConstantStruct::get(
2656 NewTy, G->getInitializer(), Constant::getNullValue(RightRedZoneTy));
2657
2658 // Create a new global variable with enough space for a redzone.
2659 GlobalValue::LinkageTypes Linkage = G->getLinkage();
2660 if (G->isConstant() && Linkage == GlobalValue::PrivateLinkage)
2662 GlobalVariable *NewGlobal = new GlobalVariable(
2663 M, NewTy, G->isConstant(), Linkage, NewInitializer, "", G,
2664 G->getThreadLocalMode(), G->getAddressSpace());
2665 NewGlobal->copyAttributesFrom(G);
2666 NewGlobal->setComdat(G->getComdat());
2667 NewGlobal->setAlignment(Align(getMinRedzoneSizeForGlobal()));
2668 // Don't fold globals with redzones. ODR violation detector and redzone
2669 // poisoning implicitly creates a dependence on the global's address, so it
2670 // is no longer valid for it to be marked unnamed_addr.
2672
2673 // Move null-terminated C strings to "__asan_cstring" section on Darwin.
2674 if (TargetTriple.isOSBinFormatMachO() && !G->hasSection() &&
2675 G->isConstant()) {
2676 auto Seq = dyn_cast<ConstantDataSequential>(G->getInitializer());
2677 if (Seq && Seq->isCString())
2678 NewGlobal->setSection("__TEXT,__asan_cstring,regular");
2679 }
2680
2681 // Transfer the debug info and type metadata. The payload starts at offset
2682 // zero so we can copy the metadata over as is.
2683 NewGlobal->copyMetadata(G, 0);
2684
2685 G->replaceAllUsesWith(NewGlobal);
2686 NewGlobal->takeName(G);
2687 G->eraseFromParent();
2688 NewGlobals[i] = NewGlobal;
2689
2690 Constant *ODRIndicator = Constant::getNullValue(IntptrTy);
2691 GlobalValue *InstrumentedGlobal = NewGlobal;
2692
2693 bool CanUsePrivateAliases =
2694 TargetTriple.isOSBinFormatELF() || TargetTriple.isOSBinFormatMachO() ||
2695 TargetTriple.isOSBinFormatWasm();
2696 if (CanUsePrivateAliases && UsePrivateAlias) {
2697 // Create local alias for NewGlobal to avoid crash on ODR between
2698 // instrumented and non-instrumented libraries.
2699 InstrumentedGlobal =
2701 }
2702
2703 // ODR should not happen for local linkage.
2704 if (NewGlobal->hasLocalLinkage()) {
2705 ODRIndicator = ConstantInt::getAllOnesValue(IntptrTy);
2706 } else if (UseOdrIndicator) {
2707 // With local aliases, we need to provide another externally visible
2708 // symbol __odr_asan_XXX to detect ODR violation.
2709 auto *ODRIndicatorSym =
2710 new GlobalVariable(M, IRB.getInt8Ty(), false, Linkage,
2712 kODRGenPrefix + NameForGlobal, nullptr,
2713 NewGlobal->getThreadLocalMode());
2714
2715 // Set meaningful attributes for indicator symbol.
2716 ODRIndicatorSym->setVisibility(NewGlobal->getVisibility());
2717 ODRIndicatorSym->setDLLStorageClass(NewGlobal->getDLLStorageClass());
2718 ODRIndicatorSym->setAlignment(Align(1));
2719 ODRIndicator = ConstantExpr::getPtrToInt(ODRIndicatorSym, IntptrTy);
2720 }
2721
2722 Constant *Initializer = ConstantStruct::get(
2723 GlobalStructTy,
2724 ConstantExpr::getPointerCast(InstrumentedGlobal, IntptrTy),
2725 ConstantInt::get(IntptrTy, SizeInBytes),
2726 ConstantInt::get(IntptrTy, SizeInBytes + RightRedzoneSize),
2727 ConstantExpr::getPointerCast(Name, IntptrTy),
2728 ConstantExpr::getPointerCast(getOrCreateModuleName(), IntptrTy),
2729 ConstantInt::get(IntptrTy, MD.IsDynInit),
2730 Constant::getNullValue(IntptrTy), ODRIndicator);
2731
2732 LLVM_DEBUG(dbgs() << "NEW GLOBAL: " << *NewGlobal << "\n");
2733
2734 Initializers[i] = Initializer;
2735 }
2736
2737 // Add instrumented globals to llvm.compiler.used list to avoid LTO from
2738 // ConstantMerge'ing them.
2739 SmallVector<GlobalValue *, 16> GlobalsToAddToUsedList;
2740 for (size_t i = 0; i < n; i++) {
2741 GlobalVariable *G = NewGlobals[i];
2742 if (G->getName().empty()) continue;
2743 GlobalsToAddToUsedList.push_back(G);
2744 }
2745 appendToCompilerUsed(M, ArrayRef<GlobalValue *>(GlobalsToAddToUsedList));
2746
2747 if (UseGlobalsGC && TargetTriple.isOSBinFormatELF()) {
2748 // Use COMDAT and register globals even if n == 0 to ensure that (a) the
2749 // linkage unit will only have one module constructor, and (b) the register
2750 // function will be called. The module destructor is not created when n ==
2751 // 0.
2752 *CtorComdat = true;
2753 instrumentGlobalsELF(IRB, NewGlobals, Initializers, getUniqueModuleId(&M));
2754 } else if (n == 0) {
2755 // When UseGlobalsGC is false, COMDAT can still be used if n == 0, because
2756 // all compile units will have identical module constructor/destructor.
2757 *CtorComdat = TargetTriple.isOSBinFormatELF();
2758 } else {
2759 *CtorComdat = false;
2760 if (UseGlobalsGC && TargetTriple.isOSBinFormatCOFF()) {
2761 InstrumentGlobalsCOFF(IRB, NewGlobals, Initializers);
2762 } else if (UseGlobalsGC && ShouldUseMachOGlobalsSection()) {
2763 InstrumentGlobalsMachO(IRB, NewGlobals, Initializers);
2764 } else {
2765 InstrumentGlobalsWithMetadataArray(IRB, NewGlobals, Initializers);
2766 }
2767 }
2768
2769 // Create calls for poisoning before initializers run and unpoisoning after.
2770 if (ClInitializers)
2771 createInitializerPoisonCalls();
2772
2773 LLVM_DEBUG(dbgs() << M);
2774}
2775
2776uint64_t
2777ModuleAddressSanitizer::getRedzoneSizeForGlobal(uint64_t SizeInBytes) const {
2778 constexpr uint64_t kMaxRZ = 1 << 18;
2779 const uint64_t MinRZ = getMinRedzoneSizeForGlobal();
2780
2781 uint64_t RZ = 0;
2782 if (SizeInBytes <= MinRZ / 2) {
2783 // Reduce redzone size for small size objects, e.g. int, char[1]. MinRZ is
2784 // at least 32 bytes, optimize when SizeInBytes is less than or equal to
2785 // half of MinRZ.
2786 RZ = MinRZ - SizeInBytes;
2787 } else {
2788 // Calculate RZ, where MinRZ <= RZ <= MaxRZ, and RZ ~ 1/4 * SizeInBytes.
2789 RZ = std::clamp((SizeInBytes / MinRZ / 4) * MinRZ, MinRZ, kMaxRZ);
2790
2791 // Round up to multiple of MinRZ.
2792 if (SizeInBytes % MinRZ)
2793 RZ += MinRZ - (SizeInBytes % MinRZ);
2794 }
2795
2796 assert((RZ + SizeInBytes) % MinRZ == 0);
2797
2798 return RZ;
2799}
2800
2801int ModuleAddressSanitizer::GetAsanVersion() const {
2802 int LongSize = M.getDataLayout().getPointerSizeInBits();
2803 bool isAndroid = M.getTargetTriple().isAndroid();
2804 int Version = 8;
2805 // 32-bit Android is one version ahead because of the switch to dynamic
2806 // shadow.
2807 Version += (LongSize == 32 && isAndroid);
2808 return Version;
2809}
2810
2811GlobalVariable *ModuleAddressSanitizer::getOrCreateModuleName() {
2812 if (!ModuleName) {
2813 // We shouldn't merge same module names, as this string serves as unique
2814 // module ID in runtime.
2815 ModuleName =
2816 createPrivateGlobalForString(M, M.getModuleIdentifier(),
2817 /*AllowMerging*/ false, genName("module"));
2818 }
2819 return ModuleName;
2820}
2821
2822bool ModuleAddressSanitizer::instrumentModule() {
2823 initializeCallbacks();
2824
2825 for (Function &F : M)
2826 removeASanIncompatibleFnAttributes(F, /*ReadsArgMem=*/false);
2827
2828 // Create a module constructor. A destructor is created lazily because not all
2829 // platforms, and not all modules need it.
2830 if (ConstructorKind == AsanCtorKind::Global) {
2831 if (CompileKernel) {
2832 // The kernel always builds with its own runtime, and therefore does not
2833 // need the init and version check calls.
2834 AsanCtorFunction = createSanitizerCtor(M, kAsanModuleCtorName);
2835 } else {
2836 std::string AsanVersion = std::to_string(GetAsanVersion());
2837 std::string VersionCheckName =
2838 InsertVersionCheck ? (kAsanVersionCheckNamePrefix + AsanVersion) : "";
2839 std::tie(AsanCtorFunction, std::ignore) =
2841 M, kAsanModuleCtorName, kAsanInitName, /*InitArgTypes=*/{},
2842 /*InitArgs=*/{}, VersionCheckName);
2843 }
2844 }
2845
2846 bool CtorComdat = true;
2847 if (ClGlobals) {
2848 assert(AsanCtorFunction || ConstructorKind == AsanCtorKind::None);
2849 if (AsanCtorFunction) {
2850 IRBuilder<> IRB(AsanCtorFunction->getEntryBlock().getTerminator());
2851 instrumentGlobals(IRB, &CtorComdat);
2852 } else {
2853 IRBuilder<> IRB(*C);
2854 instrumentGlobals(IRB, &CtorComdat);
2855 }
2856 }
2857
2858 const uint64_t Priority = GetCtorAndDtorPriority(TargetTriple);
2859
2860 // Put the constructor and destructor in comdat if both
2861 // (1) global instrumentation is not TU-specific
2862 // (2) target is ELF.
2863 if (UseCtorComdat && TargetTriple.isOSBinFormatELF() && CtorComdat) {
2864 if (AsanCtorFunction) {
2865 AsanCtorFunction->setComdat(M.getOrInsertComdat(kAsanModuleCtorName));
2866 appendToGlobalCtors(M, AsanCtorFunction, Priority, AsanCtorFunction);
2867 }
2868 if (AsanDtorFunction) {
2869 AsanDtorFunction->setComdat(M.getOrInsertComdat(kAsanModuleDtorName));
2870 appendToGlobalDtors(M, AsanDtorFunction, Priority, AsanDtorFunction);
2871 }
2872 } else {
2873 if (AsanCtorFunction)
2874 appendToGlobalCtors(M, AsanCtorFunction, Priority);
2875 if (AsanDtorFunction)
2876 appendToGlobalDtors(M, AsanDtorFunction, Priority);
2877 }
2878
2879 return true;
2880}
2881
2882void AddressSanitizer::initializeCallbacks(const TargetLibraryInfo *TLI) {
2883 IRBuilder<> IRB(*C);
2884 // Create __asan_report* callbacks.
2885 // IsWrite, TypeSize and Exp are encoded in the function name.
2886 for (int Exp = 0; Exp < 2; Exp++) {
2887 for (size_t AccessIsWrite = 0; AccessIsWrite <= 1; AccessIsWrite++) {
2888 const std::string TypeStr = AccessIsWrite ? "store" : "load";
2889 const std::string ExpStr = Exp ? "exp_" : "";
2890 const std::string EndingStr = Recover ? "_noabort" : "";
2891
2892 SmallVector<Type *, 3> Args2 = {IntptrTy, IntptrTy};
2893 SmallVector<Type *, 2> Args1{1, IntptrTy};
2894 AttributeList AL2;
2895 AttributeList AL1;
2896 if (Exp) {
2897 Type *ExpType = Type::getInt32Ty(*C);
2898 Args2.push_back(ExpType);
2899 Args1.push_back(ExpType);
2900 if (auto AK = TLI->getExtAttrForI32Param(false)) {
2901 AL2 = AL2.addParamAttribute(*C, 2, AK);
2902 AL1 = AL1.addParamAttribute(*C, 1, AK);
2903 }
2904 }
2905 AsanErrorCallbackSized[AccessIsWrite][Exp] = M.getOrInsertFunction(
2906 kAsanReportErrorTemplate + ExpStr + TypeStr + "_n" + EndingStr,
2907 FunctionType::get(IRB.getVoidTy(), Args2, false), AL2);
2908
2909 AsanMemoryAccessCallbackSized[AccessIsWrite][Exp] = M.getOrInsertFunction(
2910 ClMemoryAccessCallbackPrefix + ExpStr + TypeStr + "N" + EndingStr,
2911 FunctionType::get(IRB.getVoidTy(), Args2, false), AL2);
2912
2913 for (size_t AccessSizeIndex = 0; AccessSizeIndex < kNumberOfAccessSizes;
2914 AccessSizeIndex++) {
2915 const std::string Suffix = TypeStr + itostr(1ULL << AccessSizeIndex);
2916 AsanErrorCallback[AccessIsWrite][Exp][AccessSizeIndex] =
2917 M.getOrInsertFunction(
2918 kAsanReportErrorTemplate + ExpStr + Suffix + EndingStr,
2919 FunctionType::get(IRB.getVoidTy(), Args1, false), AL1);
2920
2921 AsanMemoryAccessCallback[AccessIsWrite][Exp][AccessSizeIndex] =
2922 M.getOrInsertFunction(
2923 ClMemoryAccessCallbackPrefix + ExpStr + Suffix + EndingStr,
2924 FunctionType::get(IRB.getVoidTy(), Args1, false), AL1);
2925 }
2926 }
2927 }
2928
2929 const std::string MemIntrinCallbackPrefix =
2930 (CompileKernel && !ClKasanMemIntrinCallbackPrefix)
2931 ? std::string("")
2933 AsanMemmove = M.getOrInsertFunction(MemIntrinCallbackPrefix + "memmove",
2934 PtrTy, PtrTy, PtrTy, IntptrTy);
2935 AsanMemcpy = M.getOrInsertFunction(MemIntrinCallbackPrefix + "memcpy", PtrTy,
2936 PtrTy, PtrTy, IntptrTy);
2937 AsanMemset = M.getOrInsertFunction(MemIntrinCallbackPrefix + "memset",
2938 TLI->getAttrList(C, {1}, /*Signed=*/false),
2939 PtrTy, PtrTy, IRB.getInt32Ty(), IntptrTy);
2940
2941 AsanHandleNoReturnFunc =
2942 M.getOrInsertFunction(kAsanHandleNoReturnName, IRB.getVoidTy());
2943
2944 AsanPtrCmpFunction =
2945 M.getOrInsertFunction(kAsanPtrCmp, IRB.getVoidTy(), IntptrTy, IntptrTy);
2946 AsanPtrSubFunction =
2947 M.getOrInsertFunction(kAsanPtrSub, IRB.getVoidTy(), IntptrTy, IntptrTy);
2948 if (Mapping.InGlobal)
2949 AsanShadowGlobal = M.getOrInsertGlobal("__asan_shadow",
2950 ArrayType::get(IRB.getInt8Ty(), 0));
2951
2952 AMDGPUAddressShared =
2953 M.getOrInsertFunction(kAMDGPUAddressSharedName, IRB.getInt1Ty(), PtrTy);
2954 AMDGPUAddressPrivate =
2955 M.getOrInsertFunction(kAMDGPUAddressPrivateName, IRB.getInt1Ty(), PtrTy);
2956}
2957
2958bool AddressSanitizer::maybeInsertAsanInitAtFunctionEntry(Function &F) {
2959 // For each NSObject descendant having a +load method, this method is invoked
2960 // by the ObjC runtime before any of the static constructors is called.
2961 // Therefore we need to instrument such methods with a call to __asan_init
2962 // at the beginning in order to initialize our runtime before any access to
2963 // the shadow memory.
2964 // We cannot just ignore these methods, because they may call other
2965 // instrumented functions.
2966 if (F.getName().contains(" load]")) {
2967 FunctionCallee AsanInitFunction =
2968 declareSanitizerInitFunction(*F.getParent(), kAsanInitName, {});
2969 IRBuilder<> IRB(&F.front(), F.front().begin());
2970 IRB.CreateCall(AsanInitFunction, {});
2971 return true;
2972 }
2973 return false;
2974}
2975
2976bool AddressSanitizer::maybeInsertDynamicShadowAtFunctionEntry(Function &F) {
2977 // Generate code only when dynamic addressing is needed.
2978 if (Mapping.Offset != kDynamicShadowSentinel)
2979 return false;
2980
2981 IRBuilder<> IRB(&F.front().front());
2982 if (Mapping.InGlobal) {
2984 // An empty inline asm with input reg == output reg.
2985 // An opaque pointer-to-int cast, basically.
2987 FunctionType::get(IntptrTy, {AsanShadowGlobal->getType()}, false),
2988 StringRef(""), StringRef("=r,0"),
2989 /*hasSideEffects=*/false);
2990 LocalDynamicShadow =
2991 IRB.CreateCall(Asm, {AsanShadowGlobal}, ".asan.shadow");
2992 } else {
2993 LocalDynamicShadow =
2994 IRB.CreatePointerCast(AsanShadowGlobal, IntptrTy, ".asan.shadow");
2995 }
2996 } else {
2997 Value *GlobalDynamicAddress = F.getParent()->getOrInsertGlobal(
2999 LocalDynamicShadow = IRB.CreateLoad(IntptrTy, GlobalDynamicAddress);
3000 }
3001 return true;
3002}
3003
3004void AddressSanitizer::markEscapedLocalAllocas(Function &F) {
3005 // Find the one possible call to llvm.localescape and pre-mark allocas passed
3006 // to it as uninteresting. This assumes we haven't started processing allocas
3007 // yet. This check is done up front because iterating the use list in
3008 // isInterestingAlloca would be algorithmically slower.
3009 assert(ProcessedAllocas.empty() && "must process localescape before allocas");
3010
3011 // Try to get the declaration of llvm.localescape. If it's not in the module,
3012 // we can exit early.
3013 if (!F.getParent()->getFunction("llvm.localescape")) return;
3014
3015 // Look for a call to llvm.localescape call in the entry block. It can't be in
3016 // any other block.
3017 for (Instruction &I : F.getEntryBlock()) {
3019 if (II && II->getIntrinsicID() == Intrinsic::localescape) {
3020 // We found a call. Mark all the allocas passed in as uninteresting.
3021 for (Value *Arg : II->args()) {
3022 AllocaInst *AI = dyn_cast<AllocaInst>(Arg->stripPointerCasts());
3023 assert(AI && AI->isStaticAlloca() &&
3024 "non-static alloca arg to localescape");
3025 ProcessedAllocas[AI] = false;
3026 }
3027 break;
3028 }
3029 }
3030}
3031// Mitigation for https://github.com/google/sanitizers/issues/749
3032// We don't instrument Windows catch-block parameters to avoid
3033// interfering with exception handling assumptions.
3034void AddressSanitizer::markCatchParametersAsUninteresting(Function &F) {
3035 for (BasicBlock &BB : F) {
3036 for (Instruction &I : BB) {
3037 if (auto *CatchPad = dyn_cast<CatchPadInst>(&I)) {
3038 // Mark the parameters to a catch-block as uninteresting to avoid
3039 // instrumenting them.
3040 for (Value *Operand : CatchPad->arg_operands())
3041 if (auto *AI = dyn_cast<AllocaInst>(Operand))
3042 ProcessedAllocas[AI] = false;
3043 }
3044 }
3045 }
3046}
3047
3048bool AddressSanitizer::suppressInstrumentationSiteForDebug(int &Instrumented) {
3049 bool ShouldInstrument =
3050 ClDebugMin < 0 || ClDebugMax < 0 ||
3051 (Instrumented >= ClDebugMin && Instrumented <= ClDebugMax);
3052 Instrumented++;
3053 return !ShouldInstrument;
3054}
3055
3056bool AddressSanitizer::instrumentFunction(Function &F,
3057 const TargetLibraryInfo *TLI,
3058 const TargetTransformInfo *TTI) {
3059 bool FunctionModified = false;
3060
3061 // Do not apply any instrumentation for naked functions.
3062 if (F.hasFnAttribute(Attribute::Naked))
3063 return FunctionModified;
3064
3065 // If needed, insert __asan_init before checking for SanitizeAddress attr.
3066 // This function needs to be called even if the function body is not
3067 // instrumented.
3068 if (maybeInsertAsanInitAtFunctionEntry(F))
3069 FunctionModified = true;
3070
3071 // Leave if the function doesn't need instrumentation.
3072 if (!F.hasFnAttribute(Attribute::SanitizeAddress)) return FunctionModified;
3073
3074 if (F.hasFnAttribute(Attribute::DisableSanitizerInstrumentation))
3075 return FunctionModified;
3076
3077 LLVM_DEBUG(dbgs() << "ASAN instrumenting:\n" << F << "\n");
3078
3079 initializeCallbacks(TLI);
3080
3081 FunctionStateRAII CleanupObj(this);
3082
3083 RuntimeCallInserter RTCI(F);
3084
3085 FunctionModified |= maybeInsertDynamicShadowAtFunctionEntry(F);
3086
3087 // We can't instrument allocas used with llvm.localescape. Only static allocas
3088 // can be passed to that intrinsic.
3089 markEscapedLocalAllocas(F);
3090
3091 if (TargetTriple.isOSWindows())
3092 markCatchParametersAsUninteresting(F);
3093
3094 // We want to instrument every address only once per basic block (unless there
3095 // are calls between uses).
3096 SmallPtrSet<Value *, 16> TempsToInstrument;
3097 SmallVector<InterestingMemoryOperand, 16> OperandsToInstrument;
3098 SmallVector<MemIntrinsic *, 16> IntrinToInstrument;
3099 SmallVector<Instruction *, 8> NoReturnCalls;
3101 SmallVector<Instruction *, 16> PointerComparisonsOrSubtracts;
3102
3103 // Fill the set of memory operations to instrument.
3104 for (auto &BB : F) {
3105 AllBlocks.push_back(&BB);
3106 TempsToInstrument.clear();
3107 int NumInsnsPerBB = 0;
3108 for (auto &Inst : BB) {
3109 if (LooksLikeCodeInBug11395(&Inst)) return false;
3110 // Skip instructions inserted by another instrumentation.
3111 if (Inst.hasMetadata(LLVMContext::MD_nosanitize))
3112 continue;
3113 SmallVector<InterestingMemoryOperand, 1> InterestingOperands;
3114 getInterestingMemoryOperands(&Inst, InterestingOperands, TTI);
3115
3116 if (!InterestingOperands.empty()) {
3117 for (auto &Operand : InterestingOperands) {
3118 if (ClOpt && ClOptSameTemp) {
3119 Value *Ptr = Operand.getPtr();
3120 // If we have a mask, skip instrumentation if we've already
3121 // instrumented the full object. But don't add to TempsToInstrument
3122 // because we might get another load/store with a different mask.
3123 if (Operand.MaybeMask) {
3124 if (TempsToInstrument.count(Ptr))
3125 continue; // We've seen this (whole) temp in the current BB.
3126 } else {
3127 if (!TempsToInstrument.insert(Ptr).second)
3128 continue; // We've seen this temp in the current BB.
3129 }
3130 }
3131 OperandsToInstrument.push_back(Operand);
3132 NumInsnsPerBB++;
3133 }
3134 } else if (((ClInvalidPointerPairs || ClInvalidPointerCmp) &&
3138 PointerComparisonsOrSubtracts.push_back(&Inst);
3139 } else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(&Inst)) {
3140 // ok, take it.
3141 IntrinToInstrument.push_back(MI);
3142 NumInsnsPerBB++;
3143 } else {
3144 if (auto *CB = dyn_cast<CallBase>(&Inst)) {
3145 // A call inside BB.
3146 TempsToInstrument.clear();
3147 if (CB->doesNotReturn())
3148 NoReturnCalls.push_back(CB);
3149 }
3150 if (CallInst *CI = dyn_cast<CallInst>(&Inst))
3152 }
3153 if (NumInsnsPerBB >= ClMaxInsnsToInstrumentPerBB) break;
3154 }
3155 }
3156
3157 bool UseCalls = (InstrumentationWithCallsThreshold >= 0 &&
3158 OperandsToInstrument.size() + IntrinToInstrument.size() >
3159 (unsigned)InstrumentationWithCallsThreshold);
3160 const DataLayout &DL = F.getDataLayout();
3161 ObjectSizeOffsetVisitor ObjSizeVis(DL, TLI, F.getContext());
3162
3163 // Instrument.
3164 int NumInstrumented = 0;
3165 for (auto &Operand : OperandsToInstrument) {
3166 if (!suppressInstrumentationSiteForDebug(NumInstrumented))
3167 instrumentMop(ObjSizeVis, Operand, UseCalls,
3168 F.getDataLayout(), RTCI);
3169 FunctionModified = true;
3170 }
3171 for (auto *Inst : IntrinToInstrument) {
3172 if (!suppressInstrumentationSiteForDebug(NumInstrumented))
3173 instrumentMemIntrinsic(Inst, RTCI);
3174 FunctionModified = true;
3175 }
3176
3177 FunctionStackPoisoner FSP(F, *this, RTCI);
3178 bool ChangedStack = FSP.runOnFunction();
3179
3180 // We must unpoison the stack before NoReturn calls (throw, _exit, etc).
3181 // See e.g. https://github.com/google/sanitizers/issues/37
3182 for (auto *CI : NoReturnCalls) {
3183 IRBuilder<> IRB(CI);
3184 RTCI.createRuntimeCall(IRB, AsanHandleNoReturnFunc, {});
3185 }
3186
3187 for (auto *Inst : PointerComparisonsOrSubtracts) {
3188 instrumentPointerComparisonOrSubtraction(Inst, RTCI);
3189 FunctionModified = true;
3190 }
3191
3192 if (ChangedStack || !NoReturnCalls.empty())
3193 FunctionModified = true;
3194
3195 LLVM_DEBUG(dbgs() << "ASAN done instrumenting: " << FunctionModified << " "
3196 << F << "\n");
3197
3198 return FunctionModified;
3199}
3200
3201// Workaround for bug 11395: we don't want to instrument stack in functions
3202// with large assembly blobs (32-bit only), otherwise reg alloc may crash.
3203// FIXME: remove once the bug 11395 is fixed.
3204bool AddressSanitizer::LooksLikeCodeInBug11395(Instruction *I) {
3205 if (LongSize != 32) return false;
3207 if (!CI || !CI->isInlineAsm()) return false;
3208 if (CI->arg_size() <= 5)
3209 return false;
3210 // We have inline assembly with quite a few arguments.
3211 return true;
3212}
3213
3214void FunctionStackPoisoner::initializeCallbacks(Module &M) {
3215 IRBuilder<> IRB(*C);
3216 if (ASan.UseAfterReturn == AsanDetectStackUseAfterReturnMode::Always ||
3217 ASan.UseAfterReturn == AsanDetectStackUseAfterReturnMode::Runtime) {
3218 const char *MallocNameTemplate =
3219 ASan.UseAfterReturn == AsanDetectStackUseAfterReturnMode::Always
3222 for (int Index = 0; Index <= kMaxAsanStackMallocSizeClass; Index++) {
3223 std::string Suffix = itostr(Index);
3224 AsanStackMallocFunc[Index] = M.getOrInsertFunction(
3225 MallocNameTemplate + Suffix, IntptrTy, IntptrTy);
3226 AsanStackFreeFunc[Index] =
3227 M.getOrInsertFunction(kAsanStackFreeNameTemplate + Suffix,
3228 IRB.getVoidTy(), IntptrTy, IntptrTy);
3229 }
3230 }
3231 if (ASan.UseAfterScope) {
3232 AsanPoisonStackMemoryFunc = M.getOrInsertFunction(
3233 kAsanPoisonStackMemoryName, IRB.getVoidTy(), IntptrTy, IntptrTy);
3234 AsanUnpoisonStackMemoryFunc = M.getOrInsertFunction(
3235 kAsanUnpoisonStackMemoryName, IRB.getVoidTy(), IntptrTy, IntptrTy);
3236 }
3237
3238 for (size_t Val : {0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0xf1, 0xf2,
3239 0xf3, 0xf5, 0xf8}) {
3240 std::ostringstream Name;
3242 Name << std::setw(2) << std::setfill('0') << std::hex << Val;
3243 AsanSetShadowFunc[Val] =
3244 M.getOrInsertFunction(Name.str(), IRB.getVoidTy(), IntptrTy, IntptrTy);
3245 }
3246
3247 AsanAllocaPoisonFunc = M.getOrInsertFunction(
3248 kAsanAllocaPoison, IRB.getVoidTy(), IntptrTy, IntptrTy);
3249 AsanAllocasUnpoisonFunc = M.getOrInsertFunction(
3250 kAsanAllocasUnpoison, IRB.getVoidTy(), IntptrTy, IntptrTy);
3251}
3252
3253void FunctionStackPoisoner::copyToShadowInline(ArrayRef<uint8_t> ShadowMask,
3254 ArrayRef<uint8_t> ShadowBytes,
3255 size_t Begin, size_t End,
3256 IRBuilder<> &IRB,
3257 Value *ShadowBase) {
3258 if (Begin >= End)
3259 return;
3260
3261 const size_t LargestStoreSizeInBytes =
3262 std::min<size_t>(sizeof(uint64_t), ASan.LongSize / 8);
3263
3264 const bool IsLittleEndian = F.getDataLayout().isLittleEndian();
3265
3266 // Poison given range in shadow using larges store size with out leading and
3267 // trailing zeros in ShadowMask. Zeros never change, so they need neither
3268 // poisoning nor up-poisoning. Still we don't mind if some of them get into a
3269 // middle of a store.
3270 for (size_t i = Begin; i < End;) {
3271 if (!ShadowMask[i]) {
3272 assert(!ShadowBytes[i]);
3273 ++i;
3274 continue;
3275 }
3276
3277 size_t StoreSizeInBytes = LargestStoreSizeInBytes;
3278 // Fit store size into the range.
3279 while (StoreSizeInBytes > End - i)
3280 StoreSizeInBytes /= 2;
3281
3282 // Minimize store size by trimming trailing zeros.
3283 for (size_t j = StoreSizeInBytes - 1; j && !ShadowMask[i + j]; --j) {
3284 while (j <= StoreSizeInBytes / 2)
3285 StoreSizeInBytes /= 2;
3286 }
3287
3288 uint64_t Val = 0;
3289 for (size_t j = 0; j < StoreSizeInBytes; j++) {
3290 if (IsLittleEndian)
3291 Val |= (uint64_t)ShadowBytes[i + j] << (8 * j);
3292 else
3293 Val = (Val << 8) | ShadowBytes[i + j];
3294 }
3295
3296 Value *Ptr = IRB.CreateAdd(ShadowBase, ConstantInt::get(IntptrTy, i));
3297 Value *Poison = IRB.getIntN(StoreSizeInBytes * 8, Val);
3299 Poison, IRB.CreateIntToPtr(Ptr, PointerType::getUnqual(Poison->getContext())),
3300 Align(1));
3301
3302 i += StoreSizeInBytes;
3303 }
3304}
3305
3306void FunctionStackPoisoner::copyToShadow(ArrayRef<uint8_t> ShadowMask,
3307 ArrayRef<uint8_t> ShadowBytes,
3308 IRBuilder<> &IRB, Value *ShadowBase) {
3309 copyToShadow(ShadowMask, ShadowBytes, 0, ShadowMask.size(), IRB, ShadowBase);
3310}
3311
3312void FunctionStackPoisoner::copyToShadow(ArrayRef<uint8_t> ShadowMask,
3313 ArrayRef<uint8_t> ShadowBytes,
3314 size_t Begin, size_t End,
3315 IRBuilder<> &IRB, Value *ShadowBase) {
3316 assert(ShadowMask.size() == ShadowBytes.size());
3317 size_t Done = Begin;
3318 for (size_t i = Begin, j = Begin + 1; i < End; i = j++) {
3319 if (!ShadowMask[i]) {
3320 assert(!ShadowBytes[i]);
3321 continue;
3322 }
3323 uint8_t Val = ShadowBytes[i];
3324 if (!AsanSetShadowFunc[Val])
3325 continue;
3326
3327 // Skip same values.
3328 for (; j < End && ShadowMask[j] && Val == ShadowBytes[j]; ++j) {
3329 }
3330
3331 if (j - i >= ASan.MaxInlinePoisoningSize) {
3332 copyToShadowInline(ShadowMask, ShadowBytes, Done, i, IRB, ShadowBase);
3333 RTCI.createRuntimeCall(
3334 IRB, AsanSetShadowFunc[Val],
3335 {IRB.CreateAdd(ShadowBase, ConstantInt::get(IntptrTy, i)),
3336 ConstantInt::get(IntptrTy, j - i)});
3337 Done = j;
3338 }
3339 }
3340
3341 copyToShadowInline(ShadowMask, ShadowBytes, Done, End, IRB, ShadowBase);
3342}
3343
3344// Fake stack allocator (asan_fake_stack.h) has 11 size classes
3345// for every power of 2 from kMinStackMallocSize to kMaxAsanStackMallocSizeClass
3346static int StackMallocSizeClass(uint64_t LocalStackSize) {
3347 assert(LocalStackSize <= kMaxStackMallocSize);
3348 uint64_t MaxSize = kMinStackMallocSize;
3349 for (int i = 0;; i++, MaxSize *= 2)
3350 if (LocalStackSize <= MaxSize) return i;
3351 llvm_unreachable("impossible LocalStackSize");
3352}
3353
3354void FunctionStackPoisoner::copyArgsPassedByValToAllocas() {
3355 Instruction *CopyInsertPoint = &F.front().front();
3356 if (CopyInsertPoint == ASan.LocalDynamicShadow) {
3357 // Insert after the dynamic shadow location is determined
3358 CopyInsertPoint = CopyInsertPoint->getNextNode();
3359 assert(CopyInsertPoint);
3360 }
3361 IRBuilder<> IRB(CopyInsertPoint);
3362 const DataLayout &DL = F.getDataLayout();
3363 for (Argument &Arg : F.args()) {
3364 if (Arg.hasByValAttr()) {
3365 Type *Ty = Arg.getParamByValType();
3366 const Align Alignment =
3367 DL.getValueOrABITypeAlignment(Arg.getParamAlign(), Ty);
3368
3369 AllocaInst *AI = IRB.CreateAlloca(
3370 Ty, nullptr,
3371 (Arg.hasName() ? Arg.getName() : "Arg" + Twine(Arg.getArgNo())) +
3372 ".byval");
3373 AI->setAlignment(Alignment);
3374 Arg.replaceAllUsesWith(AI);
3375
3376 uint64_t AllocSize = DL.getTypeAllocSize(Ty);
3377 IRB.CreateMemCpy(AI, Alignment, &Arg, Alignment, AllocSize);
3378 }
3379 }
3380}
3381
3382PHINode *FunctionStackPoisoner::createPHI(IRBuilder<> &IRB, Value *Cond,
3383 Value *ValueIfTrue,
3384 Instruction *ThenTerm,
3385 Value *ValueIfFalse) {
3386 PHINode *PHI = IRB.CreatePHI(ValueIfTrue->getType(), 2);
3387 BasicBlock *CondBlock = cast<Instruction>(Cond)->getParent();
3388 PHI->addIncoming(ValueIfFalse, CondBlock);
3389 BasicBlock *ThenBlock = ThenTerm->getParent();
3390 PHI->addIncoming(ValueIfTrue, ThenBlock);
3391 return PHI;
3392}
3393
3394Value *FunctionStackPoisoner::createAllocaForLayout(
3395 IRBuilder<> &IRB, const ASanStackFrameLayout &L, bool Dynamic) {
3396 AllocaInst *Alloca;
3397 if (Dynamic) {
3398 Alloca = IRB.CreateAlloca(IRB.getInt8Ty(),
3399 ConstantInt::get(IRB.getInt64Ty(), L.FrameSize),
3400 "MyAlloca");
3401 } else {
3402 Alloca = IRB.CreateAlloca(ArrayType::get(IRB.getInt8Ty(), L.FrameSize),
3403 nullptr, "MyAlloca");
3404 assert(Alloca->isStaticAlloca());
3405 }
3406 assert((ClRealignStack & (ClRealignStack - 1)) == 0);
3407 uint64_t FrameAlignment = std::max(L.FrameAlignment, uint64_t(ClRealignStack));
3408 Alloca->setAlignment(Align(FrameAlignment));
3409 return Alloca;
3410}
3411
3412void FunctionStackPoisoner::createDynamicAllocasInitStorage() {
3413 BasicBlock &FirstBB = *F.begin();
3414 IRBuilder<> IRB(dyn_cast<Instruction>(FirstBB.begin()));
3415 DynamicAllocaLayout = IRB.CreateAlloca(IntptrTy, nullptr);
3416 IRB.CreateStore(Constant::getNullValue(IntptrTy), DynamicAllocaLayout);
3417 DynamicAllocaLayout->setAlignment(Align(32));
3418}
3419
3420void FunctionStackPoisoner::processDynamicAllocas() {
3421 if (!ClInstrumentDynamicAllocas || DynamicAllocaVec.empty()) {
3422 assert(DynamicAllocaPoisonCallVec.empty());
3423 return;
3424 }
3425
3426 // Insert poison calls for lifetime intrinsics for dynamic allocas.
3427 for (const auto &APC : DynamicAllocaPoisonCallVec) {
3428 assert(APC.InsBefore);
3429 assert(APC.AI);
3430 assert(ASan.isInterestingAlloca(*APC.AI));
3431 assert(!APC.AI->isStaticAlloca());
3432
3433 IRBuilder<> IRB(APC.InsBefore);
3434 poisonAlloca(APC.AI, APC.Size, IRB, APC.DoPoison);
3435 // Dynamic allocas will be unpoisoned unconditionally below in
3436 // unpoisonDynamicAllocas.
3437 // Flag that we need unpoison static allocas.
3438 }
3439
3440 // Handle dynamic allocas.
3441 createDynamicAllocasInitStorage();
3442 for (auto &AI : DynamicAllocaVec)
3443 handleDynamicAllocaCall(AI);
3444 unpoisonDynamicAllocas();
3445}
3446
3447/// Collect instructions in the entry block after \p InsBefore which initialize
3448/// permanent storage for a function argument. These instructions must remain in
3449/// the entry block so that uninitialized values do not appear in backtraces. An
3450/// added benefit is that this conserves spill slots. This does not move stores
3451/// before instrumented / "interesting" allocas.
3453 AddressSanitizer &ASan, Instruction &InsBefore,
3454 SmallVectorImpl<Instruction *> &InitInsts) {
3455 Instruction *Start = InsBefore.getNextNode();
3456 for (Instruction *It = Start; It; It = It->getNextNode()) {
3457 // Argument initialization looks like:
3458 // 1) store <Argument>, <Alloca> OR
3459 // 2) <CastArgument> = cast <Argument> to ...
3460 // store <CastArgument> to <Alloca>
3461 // Do not consider any other kind of instruction.
3462 //
3463 // Note: This covers all known cases, but may not be exhaustive. An
3464 // alternative to pattern-matching stores is to DFS over all Argument uses:
3465 // this might be more general, but is probably much more complicated.
3466 if (isa<AllocaInst>(It) || isa<CastInst>(It))
3467 continue;
3468 if (auto *Store = dyn_cast<StoreInst>(It)) {
3469 // The store destination must be an alloca that isn't interesting for
3470 // ASan to instrument. These are moved up before InsBefore, and they're
3471 // not interesting because allocas for arguments can be mem2reg'd.
3472 auto *Alloca = dyn_cast<AllocaInst>(Store->getPointerOperand());
3473 if (!Alloca || ASan.isInterestingAlloca(*Alloca))
3474 continue;
3475
3476 Value *Val = Store->getValueOperand();
3477 bool IsDirectArgInit = isa<Argument>(Val);
3478 bool IsArgInitViaCast =
3479 isa<CastInst>(Val) &&
3480 isa<Argument>(cast<CastInst>(Val)->getOperand(0)) &&
3481 // Check that the cast appears directly before the store. Otherwise
3482 // moving the cast before InsBefore may break the IR.
3483 Val == It->getPrevNode();
3484 bool IsArgInit = IsDirectArgInit || IsArgInitViaCast;
3485 if (!IsArgInit)
3486 continue;
3487
3488 if (IsArgInitViaCast)
3489 InitInsts.push_back(cast<Instruction>(Val));
3490 InitInsts.push_back(Store);
3491 continue;
3492 }
3493
3494 // Do not reorder past unknown instructions: argument initialization should
3495 // only involve casts and stores.
3496 return;
3497 }
3498}
3499
3501 // Alloca could have been renamed for uniqueness. Its true name will have been
3502 // recorded as an annotation.
3503 if (AI->hasMetadata(LLVMContext::MD_annotation)) {
3504 MDTuple *AllocaAnnotations =
3505 cast<MDTuple>(AI->getMetadata(LLVMContext::MD_annotation));
3506 for (auto &Annotation : AllocaAnnotations->operands()) {
3507 if (!isa<MDTuple>(Annotation))
3508 continue;
3509 auto AnnotationTuple = cast<MDTuple>(Annotation);
3510 for (unsigned Index = 0; Index < AnnotationTuple->getNumOperands();
3511 Index++) {
3512 // All annotations are strings
3513 auto MetadataString =
3514 cast<MDString>(AnnotationTuple->getOperand(Index));
3515 if (MetadataString->getString() == "alloca_name_altered")
3516 return cast<MDString>(AnnotationTuple->getOperand(Index + 1))
3517 ->getString();
3518 }
3519 }
3520 }
3521 return AI->getName();
3522}
3523
3524void FunctionStackPoisoner::processStaticAllocas() {
3525 if (AllocaVec.empty()) {
3526 assert(StaticAllocaPoisonCallVec.empty());
3527 return;
3528 }
3529
3530 int StackMallocIdx = -1;
3531 DebugLoc EntryDebugLocation;
3532 if (auto SP = F.getSubprogram())
3533 EntryDebugLocation =
3534 DILocation::get(SP->getContext(), SP->getScopeLine(), 0, SP);
3535
3536 Instruction *InsBefore = AllocaVec[0];
3537 IRBuilder<> IRB(InsBefore);
3538
3539 // Make sure non-instrumented allocas stay in the entry block. Otherwise,
3540 // debug info is broken, because only entry-block allocas are treated as
3541 // regular stack slots.
3542 auto InsBeforeB = InsBefore->getParent();
3543 assert(InsBeforeB == &F.getEntryBlock());
3544 for (auto *AI : StaticAllocasToMoveUp)
3545 if (AI->getParent() == InsBeforeB)
3546 AI->moveBefore(InsBefore->getIterator());
3547
3548 // Move stores of arguments into entry-block allocas as well. This prevents
3549 // extra stack slots from being generated (to house the argument values until
3550 // they can be stored into the allocas). This also prevents uninitialized
3551 // values from being shown in backtraces.
3552 SmallVector<Instruction *, 8> ArgInitInsts;
3553 findStoresToUninstrumentedArgAllocas(ASan, *InsBefore, ArgInitInsts);
3554 for (Instruction *ArgInitInst : ArgInitInsts)
3555 ArgInitInst->moveBefore(InsBefore->getIterator());
3556
3557 // If we have a call to llvm.localescape, keep it in the entry block.
3558 if (LocalEscapeCall)
3559 LocalEscapeCall->moveBefore(InsBefore->getIterator());
3560
3562 SVD.reserve(AllocaVec.size());
3563 for (AllocaInst *AI : AllocaVec) {
3566 ASan.getAllocaSizeInBytes(*AI),
3567 0,
3568 AI->getAlign().value(),
3569 AI,
3570 0,
3571 0};
3572 SVD.push_back(D);
3573 }
3574
3575 // Minimal header size (left redzone) is 4 pointers,
3576 // i.e. 32 bytes on 64-bit platforms and 16 bytes in 32-bit platforms.
3577 uint64_t Granularity = 1ULL << Mapping.Scale;
3578 uint64_t MinHeaderSize = std::max((uint64_t)ASan.LongSize / 2, Granularity);
3579 const ASanStackFrameLayout &L =
3580 ComputeASanStackFrameLayout(SVD, Granularity, MinHeaderSize);
3581
3582 // Build AllocaToSVDMap for ASanStackVariableDescription lookup.
3584 for (auto &Desc : SVD)
3585 AllocaToSVDMap[Desc.AI] = &Desc;
3586
3587 // Update SVD with information from lifetime intrinsics.
3588 for (const auto &APC : StaticAllocaPoisonCallVec) {
3589 assert(APC.InsBefore);
3590 assert(APC.AI);
3591 assert(ASan.isInterestingAlloca(*APC.AI));
3592 assert(APC.AI->isStaticAlloca());
3593
3594 ASanStackVariableDescription &Desc = *AllocaToSVDMap[APC.AI];
3595 Desc.LifetimeSize = Desc.Size;
3596 if (const DILocation *FnLoc = EntryDebugLocation.get()) {
3597 if (const DILocation *LifetimeLoc = APC.InsBefore->getDebugLoc().get()) {
3598 if (LifetimeLoc->getFile() == FnLoc->getFile())
3599 if (unsigned Line = LifetimeLoc->getLine())
3600 Desc.Line = std::min(Desc.Line ? Desc.Line : Line, Line);
3601 }
3602 }
3603 }
3604
3605 auto DescriptionString = ComputeASanStackFrameDescription(SVD);
3606 LLVM_DEBUG(dbgs() << DescriptionString << " --- " << L.FrameSize << "\n");
3607 uint64_t LocalStackSize = L.FrameSize;
3608 bool DoStackMalloc =
3609 ASan.UseAfterReturn != AsanDetectStackUseAfterReturnMode::Never &&
3610 !ASan.CompileKernel && LocalStackSize <= kMaxStackMallocSize;
3611 bool DoDynamicAlloca = ClDynamicAllocaStack;
3612 // Don't do dynamic alloca or stack malloc if:
3613 // 1) There is inline asm: too often it makes assumptions on which registers
3614 // are available.
3615 // 2) There is a returns_twice call (typically setjmp), which is
3616 // optimization-hostile, and doesn't play well with introduced indirect
3617 // register-relative calculation of local variable addresses.
3618 DoDynamicAlloca &= !HasInlineAsm && !HasReturnsTwiceCall;
3619 DoStackMalloc &= !HasInlineAsm && !HasReturnsTwiceCall;
3620
3621 Type *PtrTy = F.getDataLayout().getAllocaPtrType(F.getContext());
3622 Value *StaticAlloca =
3623 DoDynamicAlloca ? nullptr : createAllocaForLayout(IRB, L, false);
3624
3625 Value *FakeStackPtr;
3626 Value *FakeStackInt;
3627 Value *LocalStackBase;
3628 Value *LocalStackBaseAlloca;
3629 uint8_t DIExprFlags = DIExpression::ApplyOffset;
3630
3631 if (DoStackMalloc) {
3632 LocalStackBaseAlloca =
3633 IRB.CreateAlloca(IntptrTy, nullptr, "asan_local_stack_base");
3634 if (ASan.UseAfterReturn == AsanDetectStackUseAfterReturnMode::Runtime) {
3635 // void *FakeStack = __asan_option_detect_stack_use_after_return
3636 // ? __asan_stack_malloc_N(LocalStackSize)
3637 // : nullptr;
3638 // void *LocalStackBase = (FakeStack) ? FakeStack :
3639 // alloca(LocalStackSize);
3640 Constant *OptionDetectUseAfterReturn = F.getParent()->getOrInsertGlobal(
3642 Value *UseAfterReturnIsEnabled = IRB.CreateICmpNE(
3643 IRB.CreateLoad(IRB.getInt32Ty(), OptionDetectUseAfterReturn),
3645 Instruction *Term =
3646 SplitBlockAndInsertIfThen(UseAfterReturnIsEnabled, InsBefore, false);
3647 IRBuilder<> IRBIf(Term);
3648 StackMallocIdx = StackMallocSizeClass(LocalStackSize);
3649 assert(StackMallocIdx <= kMaxAsanStackMallocSizeClass);
3650 Value *FakeStackValue =
3651 RTCI.createRuntimeCall(IRBIf, AsanStackMallocFunc[StackMallocIdx],
3652 ConstantInt::get(IntptrTy, LocalStackSize));
3653 IRB.SetInsertPoint(InsBefore);
3654 FakeStackInt = createPHI(IRB, UseAfterReturnIsEnabled, FakeStackValue,
3655 Term, ConstantInt::get(IntptrTy, 0));
3656 } else {
3657 // assert(ASan.UseAfterReturn == AsanDetectStackUseAfterReturnMode:Always)
3658 // void *FakeStack = __asan_stack_malloc_N(LocalStackSize);
3659 // void *LocalStackBase = (FakeStack) ? FakeStack :
3660 // alloca(LocalStackSize);
3661 StackMallocIdx = StackMallocSizeClass(LocalStackSize);
3662 FakeStackInt =
3663 RTCI.createRuntimeCall(IRB, AsanStackMallocFunc[StackMallocIdx],
3664 ConstantInt::get(IntptrTy, LocalStackSize));
3665 }
3666 FakeStackPtr = IRB.CreateIntToPtr(FakeStackInt, PtrTy);
3667 Value *NoFakeStack =
3668 IRB.CreateICmpEQ(FakeStackInt, Constant::getNullValue(IntptrTy));
3669 Instruction *Term =
3670 SplitBlockAndInsertIfThen(NoFakeStack, InsBefore, false);
3671 IRBuilder<> IRBIf(Term);
3672 Value *AllocaValue =
3673 DoDynamicAlloca ? createAllocaForLayout(IRBIf, L, true) : StaticAlloca;
3674
3675 IRB.SetInsertPoint(InsBefore);
3676 LocalStackBase =
3677 createPHI(IRB, NoFakeStack, AllocaValue, Term, FakeStackPtr);
3678 IRB.CreateStore(LocalStackBase, LocalStackBaseAlloca);
3679 DIExprFlags |= DIExpression::DerefBefore;
3680 } else {
3681 // void *FakeStack = nullptr;
3682 // void *LocalStackBase = alloca(LocalStackSize);
3683 FakeStackInt = Constant::getNullValue(IntptrTy);
3684 FakeStackPtr = Constant::getNullValue(PtrTy);
3685 LocalStackBase =
3686 DoDynamicAlloca ? createAllocaForLayout(IRB, L, true) : StaticAlloca;
3687 LocalStackBaseAlloca = LocalStackBase;
3688 }
3689
3690 // Replace Alloca instructions with base+offset.
3691 SmallVector<Value *> NewAllocaPtrs;
3692 for (const auto &Desc : SVD) {
3693 AllocaInst *AI = Desc.AI;
3694 replaceDbgDeclare(AI, LocalStackBaseAlloca, DIB, DIExprFlags, Desc.Offset);
3695 Value *NewAllocaPtr = IRB.CreatePtrAdd(
3696 LocalStackBase, ConstantInt::get(IntptrTy, Desc.Offset));
3697 AI->replaceAllUsesWith(NewAllocaPtr);
3698 NewAllocaPtrs.push_back(NewAllocaPtr);
3699 }
3700
3701 // The left-most redzone has enough space for at least 4 pointers.
3702 // Write the Magic value to redzone[0].
3703 IRB.CreateStore(ConstantInt::get(IntptrTy, kCurrentStackFrameMagic),
3704 LocalStackBase);
3705 // Write the frame description constant to redzone[1].
3706 Value *BasePlus1 = IRB.CreatePtrAdd(
3707 LocalStackBase, ConstantInt::get(IntptrTy, ASan.LongSize / 8));
3708 GlobalVariable *StackDescriptionGlobal =
3709 createPrivateGlobalForString(*F.getParent(), DescriptionString,
3710 /*AllowMerging*/ true, genName("stack"));
3711 Value *Description = IRB.CreatePointerCast(StackDescriptionGlobal, IntptrTy);
3712 IRB.CreateStore(Description, BasePlus1);
3713 // Write the PC to redzone[2].
3714 Value *BasePlus2 = IRB.CreatePtrAdd(
3715 LocalStackBase, ConstantInt::get(IntptrTy, 2 * ASan.LongSize / 8));
3716 IRB.CreateStore(IRB.CreatePointerCast(&F, IntptrTy), BasePlus2);
3717
3718 const auto &ShadowAfterScope = GetShadowBytesAfterScope(SVD, L);
3719
3720 // Poison the stack red zones at the entry.
3721 Value *ShadowBase =
3722 ASan.memToShadow(IRB.CreatePtrToInt(LocalStackBase, IntptrTy), IRB);
3723 // As mask we must use most poisoned case: red zones and after scope.
3724 // As bytes we can use either the same or just red zones only.
3725 copyToShadow(ShadowAfterScope, ShadowAfterScope, IRB, ShadowBase);
3726
3727 if (!StaticAllocaPoisonCallVec.empty()) {
3728 const auto &ShadowInScope = GetShadowBytes(SVD, L);
3729
3730 // Poison static allocas near lifetime intrinsics.
3731 for (const auto &APC : StaticAllocaPoisonCallVec) {
3732 const ASanStackVariableDescription &Desc = *AllocaToSVDMap[APC.AI];
3733 assert(Desc.Offset % L.Granularity == 0);
3734 size_t Begin = Desc.Offset / L.Granularity;
3735 size_t End = Begin + (APC.Size + L.Granularity - 1) / L.Granularity;
3736
3737 IRBuilder<> IRB(APC.InsBefore);
3738 copyToShadow(ShadowAfterScope,
3739 APC.DoPoison ? ShadowAfterScope : ShadowInScope, Begin, End,
3740 IRB, ShadowBase);
3741 }
3742 }
3743
3744 // Remove lifetime markers now that these are no longer allocas.
3745 for (Value *NewAllocaPtr : NewAllocaPtrs) {
3746 for (User *U : make_early_inc_range(NewAllocaPtr->users())) {
3747 auto *I = cast<Instruction>(U);
3748 if (I->isLifetimeStartOrEnd())
3749 I->eraseFromParent();
3750 }
3751 }
3752
3753 SmallVector<uint8_t, 64> ShadowClean(ShadowAfterScope.size(), 0);
3754 SmallVector<uint8_t, 64> ShadowAfterReturn;
3755
3756 // (Un)poison the stack before all ret instructions.
3757 for (Instruction *Ret : RetVec) {
3758 IRBuilder<> IRBRet(Ret);
3759 // Mark the current frame as retired.
3760 IRBRet.CreateStore(ConstantInt::get(IntptrTy, kRetiredStackFrameMagic),
3761 LocalStackBase);
3762 if (DoStackMalloc) {
3763 assert(StackMallocIdx >= 0);
3764 // if FakeStack != 0 // LocalStackBase == FakeStack
3765 // // In use-after-return mode, poison the whole stack frame.
3766 // if StackMallocIdx <= 4
3767 // // For small sizes inline the whole thing:
3768 // memset(ShadowBase, kAsanStackAfterReturnMagic, ShadowSize);
3769 // **SavedFlagPtr(FakeStack) = 0
3770 // else
3771 // __asan_stack_free_N(FakeStack, LocalStackSize)
3772 // else
3773 // <This is not a fake stack; unpoison the redzones>
3774 Value *Cmp =
3775 IRBRet.CreateICmpNE(FakeStackInt, Constant::getNullValue(IntptrTy));
3776 Instruction *ThenTerm, *ElseTerm;
3777 SplitBlockAndInsertIfThenElse(Cmp, Ret, &ThenTerm, &ElseTerm);
3778
3779 IRBuilder<> IRBPoison(ThenTerm);
3780 if (ASan.MaxInlinePoisoningSize != 0 && StackMallocIdx <= 4) {
3781 int ClassSize = kMinStackMallocSize << StackMallocIdx;
3782 ShadowAfterReturn.resize(ClassSize / L.Granularity,
3784 copyToShadow(ShadowAfterReturn, ShadowAfterReturn, IRBPoison,
3785 ShadowBase);
3786 Value *SavedFlagPtrPtr = IRBPoison.CreatePtrAdd(
3787 FakeStackPtr,
3788 ConstantInt::get(IntptrTy, ClassSize - ASan.LongSize / 8));
3789 Value *SavedFlagPtr = IRBPoison.CreateLoad(IntptrTy, SavedFlagPtrPtr);
3790 IRBPoison.CreateStore(
3791 Constant::getNullValue(IRBPoison.getInt8Ty()),
3792 IRBPoison.CreateIntToPtr(SavedFlagPtr, IRBPoison.getPtrTy()));
3793 } else {
3794 // For larger frames call __asan_stack_free_*.
3795 RTCI.createRuntimeCall(
3796 IRBPoison, AsanStackFreeFunc[StackMallocIdx],
3797 {FakeStackInt, ConstantInt::get(IntptrTy, LocalStackSize)});
3798 }
3799
3800 IRBuilder<> IRBElse(ElseTerm);
3801 copyToShadow(ShadowAfterScope, ShadowClean, IRBElse, ShadowBase);
3802 } else {
3803 copyToShadow(ShadowAfterScope, ShadowClean, IRBRet, ShadowBase);
3804 }
3805 }
3806
3807 // We are done. Remove the old unused alloca instructions.
3808 for (auto *AI : AllocaVec)
3809 AI->eraseFromParent();
3810}
3811
3812void FunctionStackPoisoner::poisonAlloca(Value *V, uint64_t Size,
3813 IRBuilder<> &IRB, bool DoPoison) {
3814 // For now just insert the call to ASan runtime.
3815 Value *AddrArg = IRB.CreatePointerCast(V, IntptrTy);
3816 Value *SizeArg = ConstantInt::get(IntptrTy, Size);
3817 RTCI.createRuntimeCall(
3818 IRB, DoPoison ? AsanPoisonStackMemoryFunc : AsanUnpoisonStackMemoryFunc,
3819 {AddrArg, SizeArg});
3820}
3821
3822// Handling llvm.lifetime intrinsics for a given %alloca:
3823// (1) collect all llvm.lifetime.xxx(%size, %value) describing the alloca.
3824// (2) if %size is constant, poison memory for llvm.lifetime.end (to detect
3825// invalid accesses) and unpoison it for llvm.lifetime.start (the memory
3826// could be poisoned by previous llvm.lifetime.end instruction, as the
3827// variable may go in and out of scope several times, e.g. in loops).
3828// (3) if we poisoned at least one %alloca in a function,
3829// unpoison the whole stack frame at function exit.
3830void FunctionStackPoisoner::handleDynamicAllocaCall(AllocaInst *AI) {
3831 IRBuilder<> IRB(AI);
3832
3833 const Align Alignment = std::max(Align(kAllocaRzSize), AI->getAlign());
3834 const uint64_t AllocaRedzoneMask = kAllocaRzSize - 1;
3835
3836 Value *Zero = Constant::getNullValue(IntptrTy);
3837 Value *AllocaRzSize = ConstantInt::get(IntptrTy, kAllocaRzSize);
3838 Value *AllocaRzMask = ConstantInt::get(IntptrTy, AllocaRedzoneMask);
3839
3840 // Since we need to extend alloca with additional memory to locate
3841 // redzones, and OldSize is number of allocated blocks with
3842 // ElementSize size, get allocated memory size in bytes by
3843 // OldSize * ElementSize.
3844 Value *OldSize = IRB.CreateAllocationSize(IntptrTy, AI);
3845
3846 // PartialSize = OldSize % 32
3847 Value *PartialSize = IRB.CreateAnd(OldSize, AllocaRzMask);
3848
3849 // Misalign = kAllocaRzSize - PartialSize;
3850 Value *Misalign = IRB.CreateSub(AllocaRzSize, PartialSize);
3851
3852 // PartialPadding = Misalign != kAllocaRzSize ? Misalign : 0;
3853 Value *Cond = IRB.CreateICmpNE(Misalign, AllocaRzSize);
3854 Value *PartialPadding = IRB.CreateSelect(Cond, Misalign, Zero);
3855
3856 // AdditionalChunkSize = Alignment + PartialPadding + kAllocaRzSize
3857 // Alignment is added to locate left redzone, PartialPadding for possible
3858 // partial redzone and kAllocaRzSize for right redzone respectively.
3859 Value *AdditionalChunkSize = IRB.CreateAdd(
3860 ConstantInt::get(IntptrTy, Alignment.value() + kAllocaRzSize),
3861 PartialPadding);
3862
3863 Value *NewSize = IRB.CreateAdd(OldSize, AdditionalChunkSize);
3864
3865 // Insert new alloca with new NewSize and Alignment params.
3866 AllocaInst *NewAlloca = IRB.CreateAlloca(IRB.getInt8Ty(), NewSize);
3867 NewAlloca->setAlignment(Alignment);
3868
3869 // NewAddress = Address + Alignment
3870 Value *NewAddress =
3871 IRB.CreateAdd(IRB.CreatePtrToInt(NewAlloca, IntptrTy),
3872 ConstantInt::get(IntptrTy, Alignment.value()));
3873
3874 // Insert __asan_alloca_poison call for new created alloca.
3875 RTCI.createRuntimeCall(IRB, AsanAllocaPoisonFunc, {NewAddress, OldSize});
3876
3877 // Store the last alloca's address to DynamicAllocaLayout. We'll need this
3878 // for unpoisoning stuff.
3879 IRB.CreateStore(IRB.CreatePtrToInt(NewAlloca, IntptrTy), DynamicAllocaLayout);
3880
3881 Value *NewAddressPtr = IRB.CreateIntToPtr(NewAddress, AI->getType());
3882
3883 // Remove lifetime markers now that this is no longer an alloca.
3884 for (User *U : make_early_inc_range(AI->users())) {
3885 auto *I = cast<Instruction>(U);
3886 if (I->isLifetimeStartOrEnd())
3887 I->eraseFromParent();
3888 }
3889
3890 // Replace all uses of AddressReturnedByAlloca with NewAddressPtr.
3891 AI->replaceAllUsesWith(NewAddressPtr);
3892
3893 // We are done. Erase old alloca from parent.
3894 AI->eraseFromParent();
3895}
3896
3897// isSafeAccess returns true if Addr is always inbounds with respect to its
3898// base object. For example, it is a field access or an array access with
3899// constant inbounds index.
3900bool AddressSanitizer::isSafeAccess(ObjectSizeOffsetVisitor &ObjSizeVis,
3901 Value *Addr, TypeSize TypeStoreSize) const {
3902 if (TypeStoreSize.isScalable())
3903 // TODO: We can use vscale_range to convert a scalable value to an
3904 // upper bound on the access size.
3905 return false;
3906
3907 SizeOffsetAPInt SizeOffset = ObjSizeVis.compute(Addr);
3908 if (!SizeOffset.bothKnown())
3909 return false;
3910
3911 uint64_t Size = SizeOffset.Size.getZExtValue();
3912 int64_t Offset = SizeOffset.Offset.getSExtValue();
3913
3914 // Three checks are required to ensure safety:
3915 // . Offset >= 0 (since the offset is given from the base ptr)
3916 // . Size >= Offset (unsigned)
3917 // . Size - Offset >= NeededSize (unsigned)
3918 return Offset >= 0 && Size >= uint64_t(Offset) &&
3919 Size - uint64_t(Offset) >= TypeStoreSize / 8;
3920}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static cl::opt< bool > ClUseStackSafety("stack-tagging-use-stack-safety", cl::Hidden, cl::init(true), cl::desc("Use Stack Safety analysis results"))
Rewrite undef for PHI
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static void findStoresToUninstrumentedArgAllocas(AddressSanitizer &ASan, Instruction &InsBefore, SmallVectorImpl< Instruction * > &InitInsts)
Collect instructions in the entry block after InsBefore which initialize permanent storage for a func...
static void doInstrumentAddress(AddressSanitizer *Pass, Instruction *I, Instruction *InsertBefore, Value *Addr, MaybeAlign Alignment, unsigned Granularity, TypeSize TypeStoreSize, bool IsWrite, Value *SizeArgument, bool UseCalls, uint32_t Exp, RuntimeCallInserter &RTCI)
static const uint64_t kDefaultShadowScale
const char kAMDGPUUnreachableName[]
constexpr size_t kAccessSizeIndexMask
static cl::opt< int > ClDebugMin("asan-debug-min", cl::desc("Debug min inst"), cl::Hidden, cl::init(-1))
static cl::opt< bool > ClUsePrivateAlias("asan-use-private-alias", cl::desc("Use private aliases for global variables"), cl::Hidden, cl::init(true))
static const uint64_t kPS_ShadowOffset64
static const uint64_t kFreeBSD_ShadowOffset32
constexpr size_t kIsWriteShift
static const uint64_t kSmallX86_64ShadowOffsetAlignMask
static bool isInterestingPointerSubtraction(Instruction *I)
const char kAMDGPUAddressSharedName[]
const char kAsanStackFreeNameTemplate[]
constexpr size_t kCompileKernelMask
static cl::opt< bool > ClForceDynamicShadow("asan-force-dynamic-shadow", cl::desc("Load shadow address into a local variable for each function"), cl::Hidden, cl::init(false))
const char kAsanOptionDetectUseAfterReturn[]
static cl::opt< std::string > ClMemoryAccessCallbackPrefix("asan-memory-access-callback-prefix", cl::desc("Prefix for memory access callbacks"), cl::Hidden, cl::init("__asan_"))
static const uint64_t kRISCV64_ShadowOffset64
static cl::opt< bool > ClInsertVersionCheck("asan-guard-against-version-mismatch", cl::desc("Guard against compiler/runtime version mismatch."), cl::Hidden, cl::init(true))
const char kAsanSetShadowPrefix[]
static cl::opt< AsanDtorKind > ClOverrideDestructorKind("asan-destructor-kind", cl::desc("Sets the ASan destructor kind. The default is to use the value " "provided to the pass constructor"), cl::values(clEnumValN(AsanDtorKind::None, "none", "No destructors"), clEnumValN(AsanDtorKind::Global, "global", "Use global destructors")), cl::init(AsanDtorKind::Invalid), cl::Hidden)
static Twine genName(StringRef suffix)
static cl::opt< bool > ClInstrumentWrites("asan-instrument-writes", cl::desc("instrument write instructions"), cl::Hidden, cl::init(true))
const char kAsanPtrCmp[]
static uint64_t GetCtorAndDtorPriority(Triple &TargetTriple)
const char kAsanStackMallocNameTemplate[]
static cl::opt< bool > ClInstrumentByval("asan-instrument-byval", cl::desc("instrument byval call arguments"), cl::Hidden, cl::init(true))
const char kAsanInitName[]
static cl::opt< bool > ClGlobals("asan-globals", cl::desc("Handle global objects"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClRedzoneByvalArgs("asan-redzone-byval-args", cl::desc("Create redzones for byval " "arguments (extra copy " "required)"), cl::Hidden, cl::init(true))
static const uint64_t kWindowsShadowOffset64
const char kAsanGenPrefix[]
constexpr size_t kIsWriteMask
static uint64_t getRedzoneSizeForScale(int MappingScale)
static const uint64_t kDefaultShadowOffset64
static cl::opt< bool > ClOptimizeCallbacks("asan-optimize-callbacks", cl::desc("Optimize callbacks"), cl::Hidden, cl::init(false))
const char kAsanUnregisterGlobalsName[]
static const uint64_t kAsanCtorAndDtorPriority
const char kAsanUnpoisonGlobalsName[]
static cl::opt< bool > ClWithIfuncSuppressRemat("asan-with-ifunc-suppress-remat", cl::desc("Suppress rematerialization of dynamic shadow address by passing " "it through inline asm in prologue."), cl::Hidden, cl::init(true))
static cl::opt< int > ClDebugStack("asan-debug-stack", cl::desc("debug stack"), cl::Hidden, cl::init(0))
const char kAsanUnregisterElfGlobalsName[]
static bool isUnsupportedAMDGPUAddrspace(Value *Addr)
const char kAsanRegisterImageGlobalsName[]
static const uint64_t kWebAssemblyShadowOffset
static cl::opt< bool > ClOpt("asan-opt", cl::desc("Optimize instrumentation"), cl::Hidden, cl::init(true))
static const uint64_t kAllocaRzSize
const char kODRGenPrefix[]
static const uint64_t kSystemZ_ShadowOffset64
static const uint64_t kDefaultShadowOffset32
const char kAsanShadowMemoryDynamicAddress[]
static cl::opt< bool > ClUseOdrIndicator("asan-use-odr-indicator", cl::desc("Use odr indicators to improve ODR reporting"), cl::Hidden, cl::init(true))
static bool GlobalWasGeneratedByCompiler(GlobalVariable *G)
Check if G has been created by a trusted compiler pass.
const char kAsanStackMallocAlwaysNameTemplate[]
static cl::opt< int > ClShadowAddrSpace("asan-shadow-addr-space", cl::desc("Address space for pointers to the shadow map"), cl::Hidden, cl::init(0))
static cl::opt< bool > ClInvalidPointerCmp("asan-detect-invalid-pointer-cmp", cl::desc("Instrument <, <=, >, >= with pointer operands"), cl::Hidden, cl::init(false))
static const uint64_t kAsanEmscriptenCtorAndDtorPriority
static cl::opt< int > ClInstrumentationWithCallsThreshold("asan-instrumentation-with-call-threshold", cl::desc("If the function being instrumented contains more than " "this number of memory accesses, use callbacks instead of " "inline checks (-1 means never use callbacks)."), cl::Hidden, cl::init(7000))
static cl::opt< int > ClDebugMax("asan-debug-max", cl::desc("Debug max inst"), cl::Hidden, cl::init(-1))
static cl::opt< bool > ClInvalidPointerSub("asan-detect-invalid-pointer-sub", cl::desc("Instrument - operations with pointer operands"), cl::Hidden, cl::init(false))
static const uint64_t kFreeBSD_ShadowOffset64
static cl::opt< uint32_t > ClForceExperiment("asan-force-experiment", cl::desc("Force optimization experiment (for testing)"), cl::Hidden, cl::init(0))
const char kSanCovGenPrefix[]
static const uint64_t kFreeBSDKasan_ShadowOffset64
const char kAsanModuleDtorName[]
static const uint64_t kDynamicShadowSentinel
static bool isInterestingPointerComparison(Instruction *I)
static cl::list< unsigned > ClAddrSpaces("asan-instrument-address-spaces", cl::desc("Only instrument variables in the specified address spaces."), cl::Hidden, cl::CommaSeparated, cl::callback([](const unsigned &AddrSpace) { SrcAddrSpaces.insert(AddrSpace);}))
static cl::opt< bool > ClStack("asan-stack", cl::desc("Handle stack memory"), cl::Hidden, cl::init(true))
static const uint64_t kMIPS64_ShadowOffset64
static const uint64_t kLinuxKasan_ShadowOffset64
static int StackMallocSizeClass(uint64_t LocalStackSize)
static cl::opt< uint32_t > ClMaxInlinePoisoningSize("asan-max-inline-poisoning-size", cl::desc("Inline shadow poisoning for blocks up to the given size in bytes."), cl::Hidden, cl::init(64))
static cl::opt< bool > ClInstrumentAtomics("asan-instrument-atomics", cl::desc("instrument atomic instructions (rmw, cmpxchg)"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClUseAfterScope("asan-use-after-scope", cl::desc("Check stack-use-after-scope"), cl::Hidden, cl::init(false))
constexpr size_t kAccessSizeIndexShift
static cl::opt< int > ClMappingScale("asan-mapping-scale", cl::desc("scale of asan shadow mapping"), cl::Hidden, cl::init(0))
const char kAsanPoisonStackMemoryName[]
static cl::opt< bool > ClEnableKasan("asan-kernel", cl::desc("Enable KernelAddressSanitizer instrumentation"), cl::Hidden, cl::init(false))
static cl::opt< std::string > ClDebugFunc("asan-debug-func", cl::Hidden, cl::desc("Debug func"))
static bool isSupportedAddrspace(const Triple &TargetTriple, Value *Addr)
static cl::opt< bool > ClUseGlobalsGC("asan-globals-live-support", cl::desc("Use linker features to support dead " "code stripping of globals"), cl::Hidden, cl::init(true))
static const size_t kNumberOfAccessSizes
const char kAsanUnpoisonStackMemoryName[]
static const uint64_t kLoongArch64_ShadowOffset64
const char kAsanRegisterGlobalsName[]
static cl::opt< bool > ClInstrumentDynamicAllocas("asan-instrument-dynamic-allocas", cl::desc("instrument dynamic allocas"), cl::Hidden, cl::init(true))
const char kAsanModuleCtorName[]
const char kAsanGlobalsRegisteredFlagName[]
static const size_t kMaxStackMallocSize
static cl::opt< bool > ClRecover("asan-recover", cl::desc("Enable recovery mode (continue-after-error)."), cl::Hidden, cl::init(false))
static cl::opt< bool > ClOptSameTemp("asan-opt-same-temp", cl::desc("Instrument the same temp just once"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClDynamicAllocaStack("asan-stack-dynamic-alloca", cl::desc("Use dynamic alloca to represent stack variables"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClOptStack("asan-opt-stack", cl::desc("Don't instrument scalar stack variables"), cl::Hidden, cl::init(false))
static const uint64_t kMIPS_ShadowOffsetN32
const char kAsanUnregisterImageGlobalsName[]
static cl::opt< AsanDetectStackUseAfterReturnMode > ClUseAfterReturn("asan-use-after-return", cl::desc("Sets the mode of detection for stack-use-after-return."), cl::values(clEnumValN(AsanDetectStackUseAfterReturnMode::Never, "never", "Never detect stack use after return."), clEnumValN(AsanDetectStackUseAfterReturnMode::Runtime, "runtime", "Detect stack use after return if " "binary flag 'ASAN_OPTIONS=detect_stack_use_after_return' is set."), clEnumValN(AsanDetectStackUseAfterReturnMode::Always, "always", "Always detect stack use after return.")), cl::Hidden, cl::init(AsanDetectStackUseAfterReturnMode::Runtime))
static cl::opt< bool > ClOptGlobals("asan-opt-globals", cl::desc("Don't instrument scalar globals"), cl::Hidden, cl::init(true))
static const uintptr_t kCurrentStackFrameMagic
static ShadowMapping getShadowMapping(const Triple &TargetTriple, int LongSize, bool IsKasan)
static const uint64_t kPPC64_ShadowOffset64
static cl::opt< AsanCtorKind > ClConstructorKind("asan-constructor-kind", cl::desc("Sets the ASan constructor kind"), cl::values(clEnumValN(AsanCtorKind::None, "none", "No constructors"), clEnumValN(AsanCtorKind::Global, "global", "Use global constructors")), cl::init(AsanCtorKind::Global), cl::Hidden)
static const int kMaxAsanStackMallocSizeClass
static const uint64_t kMIPS32_ShadowOffset32
static cl::opt< bool > ClAlwaysSlowPath("asan-always-slow-path", cl::desc("use instrumentation with slow path for all accesses"), cl::Hidden, cl::init(false))
static const uint64_t kNetBSD_ShadowOffset32
static const uint64_t kFreeBSDAArch64_ShadowOffset64
static const uint64_t kSmallX86_64ShadowOffsetBase
static cl::opt< bool > ClInitializers("asan-initialization-order", cl::desc("Handle C++ initializer order"), cl::Hidden, cl::init(true))
static const uint64_t kNetBSD_ShadowOffset64
const char kAsanPtrSub[]
static cl::opt< unsigned > ClRealignStack("asan-realign-stack", cl::desc("Realign stack to the value of this flag (power of two)"), cl::Hidden, cl::init(32))
static const uint64_t kWindowsShadowOffset32
static cl::opt< bool > ClInstrumentReads("asan-instrument-reads", cl::desc("instrument read instructions"), cl::Hidden, cl::init(true))
static size_t TypeStoreSizeToSizeIndex(uint32_t TypeSize)
const char kAsanAllocaPoison[]
constexpr size_t kCompileKernelShift
static SmallSet< unsigned, 8 > SrcAddrSpaces
static cl::opt< bool > ClWithIfunc("asan-with-ifunc", cl::desc("Access dynamic shadow through an ifunc global on " "platforms that support this"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClKasanMemIntrinCallbackPrefix("asan-kernel-mem-intrinsic-prefix", cl::desc("Use prefix for memory intrinsics in KASAN mode"), cl::Hidden, cl::init(false))
const char kAsanVersionCheckNamePrefix[]
const char kAMDGPUAddressPrivateName[]
static const uint64_t kNetBSDKasan_ShadowOffset64
const char kAMDGPUBallotName[]
const char kAsanRegisterElfGlobalsName[]
static cl::opt< uint64_t > ClMappingOffset("asan-mapping-offset", cl::desc("offset of asan shadow mapping [EXPERIMENTAL]"), cl::Hidden, cl::init(0))
const char kAsanReportErrorTemplate[]
static cl::opt< bool > ClWithComdat("asan-with-comdat", cl::desc("Place ASan constructors in comdat sections"), cl::Hidden, cl::init(true))
static StringRef getAllocaName(AllocaInst *AI)
static cl::opt< bool > ClSkipPromotableAllocas("asan-skip-promotable-allocas", cl::desc("Do not instrument promotable allocas"), cl::Hidden, cl::init(true))
static cl::opt< int > ClMaxInsnsToInstrumentPerBB("asan-max-ins-per-bb", cl::init(10000), cl::desc("maximal number of instructions to instrument in any given BB"), cl::Hidden)
static const uintptr_t kRetiredStackFrameMagic
static cl::opt< bool > ClUseStackSafety("asan-use-stack-safety", cl::Hidden, cl::init(true), cl::Hidden, cl::desc("Use Stack Safety analysis results"), cl::Optional)
const char kAsanPoisonGlobalsName[]
const char kAsanHandleNoReturnName[]
static const size_t kMinStackMallocSize
static cl::opt< int > ClDebug("asan-debug", cl::desc("debug"), cl::Hidden, cl::init(0))
const char kAsanAllocasUnpoison[]
static const uint64_t kAArch64_ShadowOffset64
static cl::opt< bool > ClInvalidPointerPairs("asan-detect-invalid-pointer-pair", cl::desc("Instrument <, <=, >, >=, - with pointer operands"), cl::Hidden, cl::init(false))
Function Alias Analysis false
This file contains the simple types necessary to represent the attributes associated with functions a...
static bool isPointerOperand(Value *I, User *U)
static const Function * getParent(const Value *V)
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
#define clEnumValN(ENUMVAL, FLAGNAME, DESC)
This file contains the declarations for the subclasses of Constant, which represent the different fla...
DXIL Finalize Linkage
dxil translate DXIL Translate Metadata
This file defines the DenseMap class.
This file builds on the ADT/GraphTraits.h file to build generic depth first graph iterator.
static bool runOnFunction(Function &F, bool PostInlining)
This is the interface for a simple mod/ref and alias analysis over globals.
IRTranslator LLVM IR MI
Module.h This file contains the declarations for the Module class.
This defines the Use class.
std::pair< Instruction::BinaryOps, Value * > OffsetOp
Find all possible pairs (BinOp, RHS) that BinOp V, RHS can be simplified.
static bool isZero(Value *V, const DataLayout &DL, DominatorTree *DT, AssumptionCache *AC)
Definition Lint.cpp:539
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
#define G(x, y, z)
Definition MD5.cpp:55
print mir2vec MIR2Vec Vocabulary Printer Pass
Definition MIR2Vec.cpp:598
Machine Check Debug Module
This file contains the declarations for metadata subclasses.
uint64_t IntrinsicInst * II
FunctionAnalysisManager FAM
ModuleAnalysisManager MAM
if(PassOpts->AAPipeline)
const SmallVectorImpl< MachineOperand > & Cond
static void visit(BasicBlock &Start, std::function< bool(BasicBlock *)> op)
#define OP(OPC)
Definition Instruction.h:46
This file defines the SmallPtrSet class.
This file defines the SmallSet class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition Statistic.h:171
This file contains some functions that are useful when dealing with strings.
#define LLVM_DEBUG(...)
Definition Debug.h:114
static SymbolRef::Type getType(const Symbol *Sym)
Definition TapiFile.cpp:39
This pass exposes codegen information to IR-level passes.
uint64_t getZExtValue() const
Get zero extended value.
Definition APInt.h:1555
int64_t getSExtValue() const
Get sign extended value.
Definition APInt.h:1577
LLVM_ABI AddressSanitizerPass(const AddressSanitizerOptions &Options, bool UseGlobalGC=true, bool UseOdrIndicator=true, AsanDtorKind DestructorKind=AsanDtorKind::Global, AsanCtorKind ConstructorKind=AsanCtorKind::Global)
LLVM_ABI PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM)
LLVM_ABI void printPipeline(raw_ostream &OS, function_ref< StringRef(StringRef)> MapClassName2PassName)
an instruction to allocate memory on the stack
bool isSwiftError() const
Return true if this alloca is used as a swifterror argument to a call.
LLVM_ABI bool isStaticAlloca() const
Return true if this alloca is in the entry block of the function and is a constant size.
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
PointerType * getType() const
Overload to return most specific pointer type.
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
bool isUsedWithInAlloca() const
Return true if this alloca is used as an inalloca argument to a call.
LLVM_ABI std::optional< TypeSize > getAllocationSize(const DataLayout &DL) const
Get allocation size in bytes.
void setAlignment(Align Align)
This class represents an incoming formal argument to a Function.
Definition Argument.h:32
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
size_t size() const
size - Get the array size.
Definition ArrayRef.h:142
Class to represent array types.
static LLVM_ABI ArrayType * get(Type *ElementType, uint64_t NumElements)
This static method is the primary way to construct an ArrayType.
An instruction that atomically checks whether a specified value is in a memory location,...
an instruction that atomically reads a memory location, combines it with another value,...
LLVM Basic Block Representation.
Definition BasicBlock.h:62
iterator begin()
Instruction iterator methods.
Definition BasicBlock.h:461
LLVM_ABI const_iterator getFirstInsertionPt() const
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
const Function * getParent() const
Return the enclosing method, or null if none.
Definition BasicBlock.h:213
static BasicBlock * Create(LLVMContext &Context, const Twine &Name="", Function *Parent=nullptr, BasicBlock *InsertBefore=nullptr)
Creates a new BasicBlock.
Definition BasicBlock.h:206
InstListType::iterator iterator
Instruction iterators...
Definition BasicBlock.h:170
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction; assumes that the block is well-formed.
Definition BasicBlock.h:237
LLVM_ABI const Module * getModule() const
Return the module owning the function this basic block belongs to, or nullptr if the function does no...
bool isInlineAsm() const
Check if this call is an inline asm statement.
void setCannotMerge()
static LLVM_ABI CallBase * addOperandBundle(CallBase *CB, uint32_t ID, OperandBundleDef OB, InsertPosition InsertPt=nullptr)
Create a clone of CB with operand bundle OB added.
bool doesNotReturn() const
Determine if the call cannot return.
unsigned arg_size() const
This class represents a function call, abstracting a target machine's calling convention.
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
@ Largest
The linker will choose the largest COMDAT.
Definition Comdat.h:39
@ SameSize
The data referenced by the COMDAT must be the same size.
Definition Comdat.h:41
@ Any
The linker may choose any COMDAT.
Definition Comdat.h:37
@ NoDeduplicate
No deduplication is performed.
Definition Comdat.h:40
@ ExactMatch
The data referenced by the COMDAT must be the same.
Definition Comdat.h:38
Conditional Branch instruction.
static CondBrInst * Create(Value *Cond, BasicBlock *IfTrue, BasicBlock *IfFalse, InsertPosition InsertBefore=nullptr)
ConstantArray - Constant Array Declarations.
Definition Constants.h:576
static LLVM_ABI Constant * get(ArrayType *T, ArrayRef< Constant * > V)
static LLVM_ABI Constant * getPointerCast(Constant *C, Type *Ty)
Create a BitCast, AddrSpaceCast, or a PtrToInt cast constant expression.
static LLVM_ABI Constant * getPtrToInt(Constant *C, Type *Ty, bool OnlyIfReduced=false)
static LLVM_ABI bool isValueValidForType(Type *Ty, uint64_t V)
This static method returns true if the type Ty is big enough to represent the value V.
static LLVM_ABI Constant * get(StructType *T, ArrayRef< Constant * > V)
This is an important base class in LLVM.
Definition Constant.h:43
static LLVM_ABI Constant * getAllOnesValue(Type *Ty)
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
LLVM_ABI Constant * getAggregateElement(unsigned Elt) const
For aggregates (struct/array/vector) return the constant that corresponds to the specified element if...
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:64
A debug info location.
Definition DebugLoc.h:123
LLVM_ABI DILocation * get() const
Get the underlying DILocation.
Definition DebugLoc.cpp:48
A handy container for a FunctionType+Callee-pointer pair, which can be passed around as a single enti...
static LLVM_ABI FunctionType * get(Type *Result, ArrayRef< Type * > Params, bool isVarArg)
This static method is the primary way of constructing a FunctionType.
const BasicBlock & front() const
Definition Function.h:860
static Function * createWithDefaultAttr(FunctionType *Ty, LinkageTypes Linkage, unsigned AddrSpace, const Twine &N="", Module *M=nullptr)
Creates a function with some attributes recorded in llvm.module.flags and the LLVMContext applied.
Definition Function.cpp:378
bool hasPersonalityFn() const
Check whether this function has a personality function.
Definition Function.h:905
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Definition Function.cpp:358
const Constant * getAliasee() const
Definition GlobalAlias.h:87
static LLVM_ABI GlobalAlias * create(Type *Ty, unsigned AddressSpace, LinkageTypes Linkage, const Twine &Name, Constant *Aliasee, Module *Parent)
If a parent module is specified, the alias is automatically inserted into the end of the specified mo...
Definition Globals.cpp:615
LLVM_ABI void copyMetadata(const GlobalObject *Src, unsigned Offset)
Copy metadata from Src, adjusting offsets by Offset.
LLVM_ABI void setComdat(Comdat *C)
Definition Globals.cpp:217
LLVM_ABI void setSection(StringRef S)
Change the section for this global.
Definition Globals.cpp:278
VisibilityTypes getVisibility() const
void setUnnamedAddr(UnnamedAddr Val)
bool hasLocalLinkage() const
static StringRef dropLLVMManglingEscape(StringRef Name)
If the given string begins with the GlobalValue name mangling escape character '\1',...
ThreadLocalMode getThreadLocalMode() const
@ HiddenVisibility
The GV is hidden.
Definition GlobalValue.h:69
void setVisibility(VisibilityTypes V)
LinkageTypes
An enumeration for the kinds of linkage for global values.
Definition GlobalValue.h:52
@ PrivateLinkage
Like Internal, but omit from symbol table.
Definition GlobalValue.h:61
@ CommonLinkage
Tentative definitions.
Definition GlobalValue.h:63
@ InternalLinkage
Rename collisions when linking (static functions).
Definition GlobalValue.h:60
@ AvailableExternallyLinkage
Available for inspection, not emission.
Definition GlobalValue.h:54
@ ExternalWeakLinkage
ExternalWeak linkage description.
Definition GlobalValue.h:62
DLLStorageClassTypes getDLLStorageClass() const
const Constant * getInitializer() const
getInitializer - Return the initializer for this global variable.
LLVM_ABI void copyAttributesFrom(const GlobalVariable *Src)
copyAttributesFrom - copy all additional attributes (those not needed to create a GlobalVariable) fro...
Definition Globals.cpp:570
void setAlignment(Align Align)
Sets the alignment attribute of the GlobalVariable.
Analysis pass providing a never-invalidated alias analysis result.
This instruction compares its operands according to the predicate given to the constructor.
Common base class shared among various IRBuilders.
Definition IRBuilder.h:114
AllocaInst * CreateAlloca(Type *Ty, unsigned AddrSpace, Value *ArraySize=nullptr, const Twine &Name="")
Definition IRBuilder.h:1860
IntegerType * getInt1Ty()
Fetch the type representing a single bit.
Definition IRBuilder.h:564
LLVM_ABI Value * CreateAllocationSize(Type *DestTy, AllocaInst *AI)
Get allocation size of an alloca as a runtime Value* (handles both static and dynamic allocas and vsc...
LoadInst * CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align, const char *Name)
Definition IRBuilder.h:1894
CallInst * CreateMemCpy(Value *Dst, MaybeAlign DstAlign, Value *Src, MaybeAlign SrcAlign, uint64_t Size, bool isVolatile=false, const AAMDNodes &AAInfo=AAMDNodes())
Create and insert a memcpy between the specified pointers.
Definition IRBuilder.h:710
Value * CreatePointerCast(Value *V, Type *DestTy, const Twine &Name="")
Definition IRBuilder.h:2246
Value * CreateICmpSGE(Value *LHS, Value *RHS, const Twine &Name="")
Definition IRBuilder.h:2359
LLVM_ABI Value * CreateSelect(Value *C, Value *True, Value *False, const Twine &Name="", Instruction *MDFrom=nullptr)
BasicBlock::iterator GetInsertPoint() const
Definition IRBuilder.h:202
Value * CreateIntToPtr(Value *V, Type *DestTy, const Twine &Name="")
Definition IRBuilder.h:2194
Value * CreateLShr(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
Definition IRBuilder.h:1539
IntegerType * getInt32Ty()
Fetch the type representing a 32-bit integer.
Definition IRBuilder.h:579
Value * CreatePtrAdd(Value *Ptr, Value *Offset, const Twine &Name="", GEPNoWrapFlags NW=GEPNoWrapFlags::none())
Definition IRBuilder.h:2048
BasicBlock * GetInsertBlock() const
Definition IRBuilder.h:201
IntegerType * getInt64Ty()
Fetch the type representing a 64-bit integer.
Definition IRBuilder.h:584
Value * CreateICmpNE(Value *LHS, Value *RHS, const Twine &Name="")
Definition IRBuilder.h:2335
Value * CreateGEP(Type *Ty, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &Name="", GEPNoWrapFlags NW=GEPNoWrapFlags::none())
Definition IRBuilder.h:1967
LLVM_ABI CallInst * CreateIntrinsic(Intrinsic::ID ID, ArrayRef< Type * > Types, ArrayRef< Value * > Args, FMFSource FMFSource={}, const Twine &Name="")
Create a call to intrinsic ID with Args, mangled using Types.
PHINode * CreatePHI(Type *Ty, unsigned NumReservedValues, const Twine &Name="")
Definition IRBuilder.h:2496
Value * CreateNot(Value *V, const Twine &Name="")
Definition IRBuilder.h:1835
Value * CreateICmpEQ(Value *LHS, Value *RHS, const Twine &Name="")
Definition IRBuilder.h:2331
Value * CreateSub(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition IRBuilder.h:1446
ConstantInt * getIntN(unsigned N, uint64_t C)
Get a constant N-bit value, zero extended from a 64-bit value.
Definition IRBuilder.h:532
LoadInst * CreateLoad(Type *Ty, Value *Ptr, const char *Name)
Provided to resolve 'CreateLoad(Ty, Ptr, "...")' correctly, instead of converting the string to 'bool...
Definition IRBuilder.h:1877
Value * CreateAnd(Value *LHS, Value *RHS, const Twine &Name="")
Definition IRBuilder.h:1577
StoreInst * CreateStore(Value *Val, Value *Ptr, bool isVolatile=false)
Definition IRBuilder.h:1890
Value * CreateAdd(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition IRBuilder.h:1429
Value * CreatePtrToInt(Value *V, Type *DestTy, const Twine &Name="")
Definition IRBuilder.h:2189
Value * CreateIsNotNull(Value *Arg, const Twine &Name="")
Return a boolean value testing if Arg != 0.
Definition IRBuilder.h:2664
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value * > Args={}, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition IRBuilder.h:2510
LLVM_ABI Value * CreateTypeSize(Type *Ty, TypeSize Size)
Create an expression which evaluates to the number of units in Size at runtime.
Value * CreateIntCast(Value *V, Type *DestTy, bool isSigned, const Twine &Name="")
Definition IRBuilder.h:2272
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
Definition IRBuilder.h:207
Type * getVoidTy()
Fetch the type representing void.
Definition IRBuilder.h:617
StoreInst * CreateAlignedStore(Value *Val, Value *Ptr, MaybeAlign Align, bool isVolatile=false)
Definition IRBuilder.h:1913
Value * CreateOr(Value *LHS, Value *RHS, const Twine &Name="", bool IsDisjoint=false)
Definition IRBuilder.h:1599
IntegerType * getInt8Ty()
Fetch the type representing an 8-bit integer.
Definition IRBuilder.h:569
Value * CreateAddrSpaceCast(Value *V, Type *DestTy, const Twine &Name="")
Definition IRBuilder.h:2204
Value * CreateMul(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition IRBuilder.h:1463
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition IRBuilder.h:2811
static LLVM_ABI InlineAsm * get(FunctionType *Ty, StringRef AsmString, StringRef Constraints, bool hasSideEffects, bool isAlignStack=false, AsmDialect asmDialect=AD_ATT, bool canThrow=false)
InlineAsm::get - Return the specified uniqued inline asm string.
Definition InlineAsm.cpp:43
Base class for instruction visitors.
Definition InstVisitor.h:78
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
bool hasMetadata() const
Return true if this instruction has any metadata attached to it.
LLVM_ABI void moveBefore(InstListType::iterator InsertPos)
Unlink this instruction from its current basic block and insert it into the basic block that MovePos ...
LLVM_ABI InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
LLVM_ABI const DataLayout & getDataLayout() const
Get the data layout of the module this instruction belongs to.
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition Type.cpp:354
A wrapper class for inspecting calls to intrinsic functions.
LLVM_ABI void emitError(const Instruction *I, const Twine &ErrorStr)
emitError - Emit an error message to the currently installed error handler with optional location inf...
An instruction for reading from memory.
static Error ParseSectionSpecifier(StringRef Spec, StringRef &Segment, StringRef &Section, unsigned &TAA, bool &TAAParsed, unsigned &StubSize)
Parse the section specifier indicated by "Spec".
LLVM_ABI MDNode * createUnlikelyBranchWeights()
Return metadata containing two branch weights, with significant bias towards false destination.
Definition MDBuilder.cpp:48
Metadata node.
Definition Metadata.h:1080
ArrayRef< MDOperand > operands() const
Definition Metadata.h:1442
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
Definition Metadata.h:1572
Tuple of metadata.
Definition Metadata.h:1500
This is the common base class for memset/memcpy/memmove.
static MemoryEffectsBase argMemOnly(ModRefInfo MR=ModRefInfo::ModRef)
Definition ModRef.h:143
static MemoryEffectsBase otherMemOnly(ModRefInfo MR=ModRefInfo::ModRef)
Definition ModRef.h:159
Root of the metadata hierarchy.
Definition Metadata.h:64
A Module instance is used to store all the information related to an LLVM module.
Definition Module.h:67
Evaluate the size and offset of an object pointed to by a Value* statically.
LLVM_ABI SizeOffsetAPInt compute(Value *V)
Pass interface - Implemented by all 'passes'.
Definition Pass.h:99
static PointerType * getUnqual(Type *ElementType)
This constructs a pointer to an object of the specified type in the default address space (address sp...
static LLVM_ABI PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
A set of analyses that are preserved following a run of a transformation pass.
Definition Analysis.h:112
static PreservedAnalyses none()
Convenience factory function for the empty preserved set.
Definition Analysis.h:115
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition Analysis.h:118
PreservedAnalyses & abandon()
Mark an analysis as abandoned.
Definition Analysis.h:171
Return a value (possibly void), from a function.
static ReturnInst * Create(LLVMContext &C, Value *retVal=nullptr, InsertPosition InsertBefore=nullptr)
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
Definition SmallSet.h:134
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void reserve(size_type N)
void resize(size_type N)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
This pass performs the global (interprocedural) stack safety analysis (new pass manager).
bool stackAccessIsSafe(const Instruction &I) const
bool isSafe(const AllocaInst &AI) const
An instruction for storing to memory.
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
Definition StringRef.h:258
constexpr bool empty() const
empty - Check if the string is empty.
Definition StringRef.h:140
Class to represent struct types.
static LLVM_ABI StructType * get(LLVMContext &Context, ArrayRef< Type * > Elements, bool isPacked=false)
This static method is the primary way to create a literal StructType.
Definition Type.cpp:483
Analysis pass providing the TargetTransformInfo.
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
AttributeList getAttrList(LLVMContext *C, ArrayRef< unsigned > ArgNos, bool Signed, bool Ret=false, AttributeList AL=AttributeList()) const
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
EltTy front() const
unsigned size() const
Triple - Helper class for working with autoconf configuration names.
Definition Triple.h:47
bool isThumb() const
Tests whether the target is Thumb (little and big endian).
Definition Triple.h:959
bool isDriverKit() const
Is this an Apple DriverKit triple.
Definition Triple.h:630
bool isBPF() const
Tests whether the target is eBPF.
Definition Triple.h:1205
bool isOSNetBSD() const
Definition Triple.h:667
bool isAndroid() const
Tests whether the target is Android.
Definition Triple.h:866
bool isABIN32() const
Definition Triple.h:1193
bool isMIPS64() const
Tests whether the target is MIPS 64-bit (little and big endian).
Definition Triple.h:1089
ArchType getArch() const
Get the parsed architecture type of this triple.
Definition Triple.h:427
bool isLoongArch64() const
Tests whether the target is 64-bit LoongArch.
Definition Triple.h:1078
bool isMIPS32() const
Tests whether the target is MIPS 32-bit (little and big endian).
Definition Triple.h:1084
bool isOSWindows() const
Tests whether the OS is Windows.
Definition Triple.h:716
@ UnknownObjectFormat
Definition Triple.h:327
bool isARM() const
Tests whether the target is ARM (little and big endian).
Definition Triple.h:964
bool isOSLinux() const
Tests whether the OS is Linux.
Definition Triple.h:765
bool isAMDGPU() const
Definition Triple.h:956
bool isMacOSX() const
Is this a Mac OS X triple.
Definition Triple.h:596
bool isOSFreeBSD() const
Definition Triple.h:675
bool isOSEmscripten() const
Tests whether the OS is Emscripten.
Definition Triple.h:786
bool isWatchOS() const
Is this an Apple watchOS triple.
Definition Triple.h:615
bool isiOS() const
Is this an iOS triple.
Definition Triple.h:605
bool isPS() const
Tests whether the target is the PS4 or PS5 platform.
Definition Triple.h:863
bool isWasm() const
Tests whether the target is wasm (32- and 64-bit).
Definition Triple.h:1177
bool isOSFuchsia() const
Definition Triple.h:679
bool isOSHaiku() const
Tests whether the OS is Haiku.
Definition Triple.h:706
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition Twine.h:82
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:46
LLVM_ABI unsigned getIntegerBitWidth() const
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
Definition Type.cpp:313
LLVM_ABI unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
static LLVM_ABI Type * getVoidTy(LLVMContext &C)
Definition Type.cpp:286
static LLVM_ABI IntegerType * getInt8Ty(LLVMContext &C)
Definition Type.cpp:311
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
Definition Type.h:370
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
Definition Type.h:328
This function has undefined behavior.
A Use represents the edge between a Value definition and its users.
Definition Use.h:35
op_range operands()
Definition User.h:267
Value * getOperand(unsigned i) const
Definition User.h:207
static LLVM_ABI ValueAsMetadata * get(Value *V)
Definition Metadata.cpp:509
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
LLVM_ABI void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition Value.cpp:553
iterator_range< user_iterator > users()
Definition Value.h:427
LLVM_ABI bool isSwiftError() const
Return true if this value is a swifterror value.
Definition Value.cpp:1129
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition Value.cpp:322
LLVM_ABI void takeName(Value *V)
Transfer the name from V to this value.
Definition Value.cpp:403
Base class of all SIMD vector types.
static LLVM_ABI VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
constexpr ScalarTy getFixedValue() const
Definition TypeSize.h:200
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition TypeSize.h:168
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
Definition ilist_node.h:34
self_iterator getIterator()
Definition ilist_node.h:123
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
Definition ilist_node.h:348
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition raw_ostream.h:53
CallInst * Call
Changed
This file contains the declaration of the Comdat class, which represents a single COMDAT in LLVM.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
void getInterestingMemoryOperands(Module &M, Instruction *I, SmallVectorImpl< InterestingMemoryOperand > &Interesting)
Get all the memory operands from the instruction that needs to be instrumented.
void instrumentAddress(Module &M, IRBuilder<> &IRB, Instruction *OrigIns, Instruction *InsertBefore, Value *Addr, Align Alignment, TypeSize TypeStoreSize, bool IsWrite, Value *SizeArgument, bool UseCalls, bool Recover, int AsanScale, int AsanOffset)
Instrument the memory operand Addr.
uint64_t getRedzoneSizeForGlobal(int AsanScale, uint64_t SizeInBytes)
Given SizeInBytes of the Value to be instrunmented, Returns the redzone size corresponding to it.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
@ BasicBlock
Various leaf nodes.
Definition ISDOpcodes.h:81
@ S_CSTRING_LITERALS
S_CSTRING_LITERALS - Section with literal C strings.
Definition MachO.h:131
@ OB
OB - OneByte - Set if this instruction has a one byte opcode.
ValuesClass values(OptsTy... Options)
Helper to build a ValuesClass by forwarding a variable number of arguments as an initializer list to ...
initializer< Ty > init(const Ty &Val)
cb< typename detail::callback_traits< F >::result_type, typename detail::callback_traits< F >::arg_type > callback(F CB)
uint64_t getAllocaSizeInBytes(const AllocaInst &AI)
Context & getContext() const
Definition BasicBlock.h:99
friend class Instruction
Iterator for Instructions in a `BasicBlock.
Definition BasicBlock.h:73
This is an optimization pass for GlobalISel generic memory operations.
LLVM_ABI void ReplaceInstWithInst(BasicBlock *BB, BasicBlock::iterator &BI, Instruction *I)
Replace the instruction specified by BI with the instruction specified by I.
@ Offset
Definition DWP.cpp:532
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1739
LLVM_ABI SmallVector< uint8_t, 64 > GetShadowBytesAfterScope(const SmallVectorImpl< ASanStackVariableDescription > &Vars, const ASanStackFrameLayout &Layout)
LLVM_ABI GlobalVariable * createPrivateGlobalForString(Module &M, StringRef Str, bool AllowMerging, Twine NamePrefix="")
LLVM_ABI AllocaInst * findAllocaForValue(Value *V, bool OffsetZero=false)
Returns unique alloca where the value comes from, or nullptr.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
@ Done
Definition Threading.h:60
FunctionAddr VTableAddr uintptr_t uintptr_t Int32Ty
Definition InstrProf.h:328
LLVM_ABI Function * createSanitizerCtor(Module &M, StringRef CtorName)
Creates sanitizer constructor function.
AsanDetectStackUseAfterReturnMode
Mode of ASan detect stack use after return.
@ Always
Always detect stack use after return.
@ Never
Never detect stack use after return.
@ Runtime
Detect stack use after return if not disabled runtime with (ASAN_OPTIONS=detect_stack_use_after_retur...
LLVM_ABI DenseMap< BasicBlock *, ColorVector > colorEHFunclets(Function &F)
If an EH funclet personality is in use (see isFuncletEHPersonality), this will recompute which blocks...
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition STLExtras.h:634
InnerAnalysisManagerProxy< FunctionAnalysisManager, Module > FunctionAnalysisManagerModuleProxy
Provide the FunctionAnalysisManager to Module proxy.
Op::Description Desc
LLVM_ABI bool isAllocaPromotable(const AllocaInst *AI)
Return true if this alloca is legal for promotion.
LLVM_ABI SmallString< 64 > ComputeASanStackFrameDescription(const SmallVectorImpl< ASanStackVariableDescription > &Vars)
LLVM_ABI SmallVector< uint8_t, 64 > GetShadowBytes(const SmallVectorImpl< ASanStackVariableDescription > &Vars, const ASanStackFrameLayout &Layout)
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
Definition bit.h:202
auto dyn_cast_or_null(const Y &Val)
Definition Casting.h:753
LLVM_ABI FunctionCallee declareSanitizerInitFunction(Module &M, StringRef InitName, ArrayRef< Type * > InitArgTypes, bool Weak=false)
FunctionAddr VTableAddr uintptr_t uintptr_t Version
Definition InstrProf.h:334
LLVM_ABI std::string getUniqueModuleId(Module *M)
Produce a unique identifier for this module by taking the MD5 sum of the names of the module's strong...
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition MathExtras.h:279
LLVM_ABI std::pair< Function *, FunctionCallee > createSanitizerCtorAndInitFunctions(Module &M, StringRef CtorName, StringRef InitName, ArrayRef< Type * > InitArgTypes, ArrayRef< Value * > InitArgs, StringRef VersionCheckName=StringRef(), bool Weak=false)
Creates sanitizer constructor function, and calls sanitizer's init function from it.
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
LLVM_ABI void SplitBlockAndInsertIfThenElse(Value *Cond, BasicBlock::iterator SplitBefore, Instruction **ThenTerm, Instruction **ElseTerm, MDNode *BranchWeights=nullptr, DomTreeUpdater *DTU=nullptr, LoopInfo *LI=nullptr)
SplitBlockAndInsertIfThenElse is similar to SplitBlockAndInsertIfThen, but also creates the ElseBlock...
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition Error.cpp:163
bool isAlnum(char C)
Checks whether character C is either a decimal digit or an uppercase or lowercase letter as classifie...
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
AsanDtorKind
Types of ASan module destructors supported.
@ Invalid
Not a valid destructor Kind.
@ Global
Append to llvm.global_dtors.
@ None
Do not emit any destructors for ASan.
LLVM_ABI ASanStackFrameLayout ComputeASanStackFrameLayout(SmallVectorImpl< ASanStackVariableDescription > &Vars, uint64_t Granularity, uint64_t MinHeaderSize)
@ Ref
The access may reference the value stored in memory.
Definition ModRef.h:32
@ ModRef
The access may reference and may modify the value stored in memory.
Definition ModRef.h:36
@ Mod
The access may modify the value stored in memory.
Definition ModRef.h:34
@ ArgMem
Access to memory via argument pointers.
Definition ModRef.h:62
@ Other
Any other memory.
Definition ModRef.h:68
@ InaccessibleMem
Memory that is inaccessible via LLVM IR.
Definition ModRef.h:64
TargetTransformInfo TTI
void cantFail(Error Err, const char *Msg=nullptr)
Report a fatal error if Err is a failure value.
Definition Error.h:769
IRBuilder(LLVMContext &, FolderTy, InserterTy, MDNode *, ArrayRef< OperandBundleDef >) -> IRBuilder< FolderTy, InserterTy >
OperandBundleDefT< Value * > OperandBundleDef
Definition AutoUpgrade.h:34
LLVM_ABI void appendToCompilerUsed(Module &M, ArrayRef< GlobalValue * > Values)
Adds global values to the llvm.compiler.used list.
static const int kAsanStackUseAfterReturnMagic
LLVM_ABI void setGlobalVariableLargeSection(const Triple &TargetTriple, GlobalVariable &GV)
void removeASanIncompatibleFnAttributes(Function &F, bool ReadsArgMem)
Remove memory attributes that are incompatible with the instrumentation added by AddressSanitizer and...
@ Dynamic
Denotes mode unknown at compile time.
ArrayRef(const T &OneElt) -> ArrayRef< T >
bool isModAndRefSet(const ModRefInfo MRI)
Definition ModRef.h:46
LLVM_ABI void appendToGlobalCtors(Module &M, Function *F, int Priority, Constant *Data=nullptr)
Append F to the list of global ctors of module M with the given Priority.
TinyPtrVector< BasicBlock * > ColorVector
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
Align assumeAligned(uint64_t Value)
Treats the value 0 as a 1, so Align is always at least 1.
Definition Alignment.h:100
iterator_range< df_iterator< T > > depth_first(const T &G)
LLVM_ABI Instruction * SplitBlockAndInsertIfThen(Value *Cond, BasicBlock::iterator SplitBefore, bool Unreachable, MDNode *BranchWeights=nullptr, DomTreeUpdater *DTU=nullptr, LoopInfo *LI=nullptr, BasicBlock *ThenBlock=nullptr)
Split the containing block at the specified instruction - everything before SplitBefore stays in the ...
LLVM_ABI const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=MaxLookupSearchDepth)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
AsanCtorKind
Types of ASan module constructors supported.
LLVM_ABI void maybeMarkSanitizerLibraryCallNoBuiltin(CallInst *CI, const TargetLibraryInfo *TLI)
Given a CallInst, check if it calls a string function known to CodeGen, and mark it with NoBuiltin if...
Definition Local.cpp:3889
LLVM_ABI void appendToUsed(Module &M, ArrayRef< GlobalValue * > Values)
Adds global values to the llvm.used list.
LLVM_ABI void appendToGlobalDtors(Module &M, Function *F, int Priority, Constant *Data=nullptr)
Same as appendToGlobalCtors(), but for global dtors.
LLVM_ABI bool checkIfAlreadyInstrumented(Module &M, StringRef Flag)
Check if module has flag attached, if not add the flag.
void getAddressSanitizerParams(const Triple &TargetTriple, int LongSize, bool IsKasan, uint64_t *ShadowBase, int *MappingScale, bool *OrShadowOffset)
DEMANGLE_ABI std::string demangle(std::string_view MangledName)
Attempt to demangle a string using different demangling schemes.
Definition Demangle.cpp:20
std::string itostr(int64_t X)
LLVM_ABI void SplitBlockAndInsertForEachLane(ElementCount EC, Type *IndexTy, BasicBlock::iterator InsertBefore, std::function< void(IRBuilderBase &, Value *)> Func)
Utility function for performing a given action on each lane of a vector with EC elements.
AnalysisManager< Module > ModuleAnalysisManager
Convenience typedef for the Module analysis manager.
Definition MIRParser.h:39
LLVM_ABI bool replaceDbgDeclare(Value *Address, Value *NewAddress, DIBuilder &Builder, uint8_t DIExprFlags, int Offset)
Replaces dbg.declare record when the address it describes is replaced with a new value.
Definition Local.cpp:1957
#define N
LLVM_ABI ASanAccessInfo(int32_t Packed)
const uint8_t AccessSizeIndex
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
constexpr uint64_t value() const
This is a hole in the type system and should not be abused.
Definition Alignment.h:77
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Definition Alignment.h:106
Align valueOrOne() const
For convenience, returns a valid alignment or 1 if undefined.
Definition Alignment.h:130
Information about a load/store intrinsic defined by the target.
SmallVector< InterestingMemoryOperand, 1 > InterestingOperands
A CRTP mix-in to automatically provide informational APIs needed for passes.
Definition PassManager.h:70
SizeOffsetAPInt - Used by ObjectSizeOffsetVisitor, which works with APInts.