LLVM 23.0.0git
Loads.cpp
Go to the documentation of this file.
1//===- Loads.cpp - Local load analysis ------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines simple local analyses for load instructions.
10//
11//===----------------------------------------------------------------------===//
12
13#include "llvm/Analysis/Loads.h"
23#include "llvm/IR/DataLayout.h"
26#include "llvm/IR/Operator.h"
27
28using namespace llvm;
29
30static bool isAligned(const Value *Base, Align Alignment,
31 const DataLayout &DL) {
32 return Base->getPointerAlignment(DL) >= Alignment;
33}
34
36 const Value *Ptr, Align Alignment,
37 function_ref<bool(const RetainedKnowledge &RK)> CheckSize,
38 const DataLayout &DL, const Instruction *CtxI, AssumptionCache *AC,
39 const DominatorTree *DT) {
40 if (!CtxI)
41 return false;
42 /// Look through assumes to see if both dereferencability and alignment can
43 /// be proven by an assume if needed.
44 RetainedKnowledge AlignRK;
45 RetainedKnowledge DerefRK;
46 bool PtrCanBeFreed = Ptr->canBeFreed();
47 bool IsAligned = Ptr->getPointerAlignment(DL) >= Alignment;
49 Ptr, {Attribute::Dereferenceable, Attribute::Alignment}, *AC,
50 [&](RetainedKnowledge RK, Instruction *Assume, auto) {
51 if (!isValidAssumeForContext(Assume, CtxI, DT))
52 return false;
53 if (RK.AttrKind == Attribute::Alignment)
54 AlignRK = std::max(AlignRK, RK);
55
56 // Dereferenceable information from assumptions is only valid if the
57 // value cannot be freed between the assumption and use.
58 if ((!PtrCanBeFreed || willNotFreeBetween(Assume, CtxI)) &&
59 RK.AttrKind == Attribute::Dereferenceable)
60 DerefRK = std::max(DerefRK, RK);
61 IsAligned |= AlignRK && AlignRK.ArgValue >= Alignment.value();
62 if (IsAligned && DerefRK && CheckSize(DerefRK))
63 return true; // We have found what we needed so we stop looking
64 return false; // Other assumes may have better information. so
65 // keep looking
66 });
67}
68
69/// Test if V is always a pointer to allocated and suitably aligned memory for
70/// a simple load or store.
72 const Value *V, Align Alignment, const APInt &Size, const DataLayout &DL,
73 const Instruction *CtxI, AssumptionCache *AC, const DominatorTree *DT,
75 unsigned MaxDepth) {
76 assert(V->getType()->isPointerTy() && "Base must be pointer");
77
78 // Recursion limit.
79 if (MaxDepth-- == 0)
80 return false;
81
82 // Already visited? Bail out, we've likely hit unreachable code.
83 if (!Visited.insert(V).second)
84 return false;
85
86 // Note that it is not safe to speculate into a malloc'd region because
87 // malloc may return null.
88
89 // For GEPs, determine if the indexing lands within the allocated object.
90 if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
91 const Value *Base = GEP->getPointerOperand();
92
93 APInt Offset(DL.getIndexTypeSizeInBits(GEP->getType()), 0);
94 if (!GEP->accumulateConstantOffset(DL, Offset) || Offset.isNegative() ||
95 !Offset.urem(APInt(Offset.getBitWidth(), Alignment.value()))
96 .isMinValue())
97 return false;
98
99 // If the base pointer is dereferenceable for Offset+Size bytes, then the
100 // GEP (== Base + Offset) is dereferenceable for Size bytes. If the base
101 // pointer is aligned to Align bytes, and the Offset is divisible by Align
102 // then the GEP (== Base + Offset == k_0 * Align + k_1 * Align) is also
103 // aligned to Align bytes.
104
105 // Offset and Size may have different bit widths if we have visited an
106 // addrspacecast, so we can't do arithmetic directly on the APInt values.
108 Base, Alignment, Offset + Size.sextOrTrunc(Offset.getBitWidth()), DL,
109 CtxI, AC, DT, TLI, Visited, MaxDepth);
110 }
111
112 // bitcast instructions are no-ops as far as dereferenceability is concerned.
113 if (const BitCastOperator *BC = dyn_cast<BitCastOperator>(V)) {
114 if (BC->getSrcTy()->isPointerTy())
116 BC->getOperand(0), Alignment, Size, DL, CtxI, AC, DT, TLI,
117 Visited, MaxDepth);
118 }
119
120 // Recurse into both hands of select.
121 if (const SelectInst *Sel = dyn_cast<SelectInst>(V)) {
122 return isDereferenceableAndAlignedPointer(Sel->getTrueValue(), Alignment,
123 Size, DL, CtxI, AC, DT, TLI,
124 Visited, MaxDepth) &&
125 isDereferenceableAndAlignedPointer(Sel->getFalseValue(), Alignment,
126 Size, DL, CtxI, AC, DT, TLI,
127 Visited, MaxDepth);
128 }
129
130 auto IsKnownDeref = [&]() {
131 bool CheckForNonNull, CheckForFreed;
132 if (!Size.ule(V->getPointerDereferenceableBytes(DL, CheckForNonNull,
133 CheckForFreed)) ||
134 CheckForFreed)
135 return false;
136 if (CheckForNonNull &&
137 !isKnownNonZero(V, SimplifyQuery(DL, DT, AC, CtxI)))
138 return false;
139 // When using something like !dereferenceable on a load, the
140 // dereferenceability may only be valid on a specific control-flow path.
141 // If the instruction doesn't dominate the context instruction, we're
142 // asking about dereferenceability under the assumption that the
143 // instruction has been speculated to the point of the context instruction,
144 // in which case we don't know if the dereferenceability info still holds.
145 // We don't bother handling allocas here, as they aren't speculatable
146 // anyway.
147 auto *I = dyn_cast<Instruction>(V);
148 if (I && !isa<AllocaInst>(I))
149 return CtxI && isValidAssumeForContext(I, CtxI, DT);
150 return true;
151 };
152 if (IsKnownDeref()) {
153 // As we recursed through GEPs to get here, we've incrementally checked
154 // that each step advanced by a multiple of the alignment. If our base is
155 // properly aligned, then the original offset accessed must also be.
156 return isAligned(V, Alignment, DL);
157 }
158
159 /// TODO refactor this function to be able to search independently for
160 /// Dereferencability and Alignment requirements.
161
162
163 if (const auto *Call = dyn_cast<CallBase>(V)) {
165 Call, /*MustPreserveOffset=*/true))
166 return isDereferenceableAndAlignedPointer(RP, Alignment, Size, DL, CtxI,
167 AC, DT, TLI, Visited, MaxDepth);
168
169 // If we have a call we can't recurse through, check to see if this is an
170 // allocation function for which we can establish an minimum object size.
171 // Such a minimum object size is analogous to a deref_or_null attribute in
172 // that we still need to prove the result non-null at point of use.
173 // NOTE: We can only use the object size as a base fact as we a) need to
174 // prove alignment too, and b) don't want the compile time impact of a
175 // separate recursive walk.
176 ObjectSizeOpts Opts;
177 // TODO: It may be okay to round to align, but that would imply that
178 // accessing slightly out of bounds was legal, and we're currently
179 // inconsistent about that. For the moment, be conservative.
180 Opts.RoundToAlign = false;
181 Opts.NullIsUnknownSize = true;
182 uint64_t ObjSize;
183 if (getObjectSize(V, ObjSize, DL, TLI, Opts)) {
184 APInt KnownDerefBytes(Size.getBitWidth(), ObjSize);
185 if (KnownDerefBytes.getBoolValue() && KnownDerefBytes.uge(Size) &&
186 isKnownNonZero(V, SimplifyQuery(DL, DT, AC, CtxI)) &&
187 !V->canBeFreed()) {
188 // As we recursed through GEPs to get here, we've incrementally
189 // checked that each step advanced by a multiple of the alignment. If
190 // our base is properly aligned, then the original offset accessed
191 // must also be.
192 return isAligned(V, Alignment, DL);
193 }
194 }
195 }
196
197 // For gc.relocate, look through relocations
198 if (const GCRelocateInst *RelocateInst = dyn_cast<GCRelocateInst>(V))
199 return isDereferenceableAndAlignedPointer(RelocateInst->getDerivedPtr(),
200 Alignment, Size, DL, CtxI, AC, DT,
201 TLI, Visited, MaxDepth);
202
204 return isDereferenceableAndAlignedPointer(ASC->getOperand(0), Alignment,
205 Size, DL, CtxI, AC, DT, TLI,
206 Visited, MaxDepth);
207
209 V, Alignment,
210 [Size](const RetainedKnowledge &RK) {
211 return RK.ArgValue >= Size.getZExtValue();
212 },
213 DL, CtxI, AC, DT);
214}
215
217 const Value *V, Align Alignment, const APInt &Size, const DataLayout &DL,
218 const Instruction *CtxI, AssumptionCache *AC, const DominatorTree *DT,
219 const TargetLibraryInfo *TLI) {
220 // Note: At the moment, Size can be zero. This ends up being interpreted as
221 // a query of whether [Base, V] is dereferenceable and V is aligned (since
222 // that's what the implementation happened to do). It's unclear if this is
223 // the desired semantic, but at least SelectionDAG does exercise this case.
224
226 return ::isDereferenceableAndAlignedPointer(V, Alignment, Size, DL, CtxI, AC,
227 DT, TLI, Visited, 16);
228}
229
231 const Value *V, Type *Ty, Align Alignment, const DataLayout &DL,
232 const Instruction *CtxI, AssumptionCache *AC, const DominatorTree *DT,
233 const TargetLibraryInfo *TLI) {
234 // For unsized types or scalable vectors we don't know exactly how many bytes
235 // are dereferenced, so bail out.
236 if (!Ty->isSized() || Ty->isScalableTy())
237 return false;
238
239 // When dereferenceability information is provided by a dereferenceable
240 // attribute, we know exactly how many bytes are dereferenceable. If we can
241 // determine the exact offset to the attributed variable, we can use that
242 // information here.
243
244 APInt AccessSize(DL.getPointerTypeSizeInBits(V->getType()),
245 DL.getTypeStoreSize(Ty));
246 return isDereferenceableAndAlignedPointer(V, Alignment, AccessSize, DL, CtxI,
247 AC, DT, TLI);
248}
249
251 const DataLayout &DL,
252 const Instruction *CtxI,
253 AssumptionCache *AC,
254 const DominatorTree *DT,
255 const TargetLibraryInfo *TLI) {
256 return isDereferenceableAndAlignedPointer(V, Ty, Align(1), DL, CtxI, AC, DT,
257 TLI);
258}
259
260/// Test if A and B will obviously have the same value.
261///
262/// This includes recognizing that %t0 and %t1 will have the same
263/// value in code like this:
264/// \code
265/// %t0 = getelementptr \@a, 0, 3
266/// store i32 0, i32* %t0
267/// %t1 = getelementptr \@a, 0, 3
268/// %t2 = load i32* %t1
269/// \endcode
270///
271static bool AreEquivalentAddressValues(const Value *A, const Value *B) {
272 // Test if the values are trivially equivalent.
273 if (A == B)
274 return true;
275
276 // Test if the values come from identical arithmetic instructions.
277 // Use isIdenticalToWhenDefined instead of isIdenticalTo because
278 // this function is only used when one address use dominates the
279 // other, which means that they'll always either have the same
280 // value or one of them will have an undefined value.
282 if (const Instruction *BI = dyn_cast<Instruction>(B))
283 if (cast<Instruction>(A)->isIdenticalToWhenDefined(BI))
284 return true;
285
286 // Otherwise they may not be equivalent.
287 return false;
288}
289
291 LoadInst *LI, Loop *L, ScalarEvolution &SE, DominatorTree &DT,
293 auto &DL = LI->getDataLayout();
294 Value *Ptr = LI->getPointerOperand();
295 const SCEV *PtrSCEV = SE.getSCEV(Ptr);
296 APInt EltSize(DL.getIndexTypeSizeInBits(Ptr->getType()),
297 DL.getTypeStoreSize(LI->getType()).getFixedValue());
298
299 // If given a uniform (i.e. non-varying) address, see if we can prove the
300 // access is safe within the loop w/o needing predication.
301 if (L->isLoopInvariant(Ptr))
303 Ptr, LI->getAlign(), EltSize, DL, &*L->getHeader()->getFirstNonPHIIt(),
304 AC, &DT);
305
306 const SCEV *EltSizeSCEV = SE.getConstant(EltSize);
307 return isDereferenceableAndAlignedInLoop(PtrSCEV, LI->getAlign(), EltSizeSCEV,
308 L, SE, DT, AC, Predicates);
309}
310
312 const SCEV *PtrSCEV, Align Alignment, const SCEV *EltSizeSCEV, Loop *L,
315 auto *AddRec = dyn_cast<SCEVAddRecExpr>(PtrSCEV);
316
317 // Check to see if we have a repeating access pattern and it's possible
318 // to prove all accesses are well aligned.
319 if (!AddRec || AddRec->getLoop() != L || !AddRec->isAffine())
320 return false;
321
322 auto *Step = dyn_cast<SCEVConstant>(AddRec->getStepRecurrence(SE));
323 if (!Step)
324 return false;
325
326 const APInt &EltSize = cast<SCEVConstant>(EltSizeSCEV)->getAPInt();
327 // For the moment, restrict ourselves to the case where the access size is a
328 // multiple of the requested alignment and the base is aligned.
329 // TODO: generalize if a case found which warrants
330 if (EltSize.urem(Alignment.value()) != 0)
331 return false;
332
333 // TODO: Handle overlapping accesses.
334 if (EltSize.ugt(Step->getAPInt().abs()))
335 return false;
336
337 const SCEV *MaxBECount =
338 Predicates ? SE.getPredicatedSymbolicMaxBackedgeTakenCount(L, *Predicates)
340 const SCEV *BECount = Predicates
341 ? SE.getPredicatedBackedgeTakenCount(L, *Predicates)
342 : SE.getBackedgeTakenCount(L);
343 if (isa<SCEVCouldNotCompute>(MaxBECount))
344 return false;
345 std::optional<ScalarEvolution::LoopGuards> LoopGuards;
346
347 auto &DL = L->getHeader()->getDataLayout();
348 const auto &[AccessStart, AccessEnd] =
349 getStartAndEndForAccess(L, PtrSCEV, EltSizeSCEV, BECount, MaxBECount, &SE,
350 nullptr, &DT, AC, LoopGuards);
351 if (isa<SCEVCouldNotCompute>(AccessStart) ||
352 isa<SCEVCouldNotCompute>(AccessEnd))
353 return false;
354
355 // Try to get the access size.
356 const SCEV *PtrDiff = SE.getMinusSCEV(AccessEnd, AccessStart);
357 if (isa<SCEVCouldNotCompute>(PtrDiff))
358 return false;
359
360 if (!LoopGuards)
361 LoopGuards.emplace(
362 ScalarEvolution::LoopGuards::collect(AddRec->getLoop(), SE));
363
364 APInt MaxPtrDiff =
365 SE.getUnsignedRangeMax(SE.applyLoopGuards(PtrDiff, *LoopGuards));
366
367 Value *Base = nullptr;
368 APInt AccessSize;
369 const SCEV *AccessSizeSCEV = nullptr;
370 if (const SCEVUnknown *NewBase = dyn_cast<SCEVUnknown>(AccessStart)) {
371 Base = NewBase->getValue();
372 AccessSize = std::move(MaxPtrDiff);
373 AccessSizeSCEV = PtrDiff;
374 } else if (auto *MinAdd = dyn_cast<SCEVAddExpr>(AccessStart)) {
375 if (MinAdd->getNumOperands() != 2)
376 return false;
377
378 const auto *Offset = dyn_cast<SCEVConstant>(MinAdd->getOperand(0));
379 const auto *NewBase = dyn_cast<SCEVUnknown>(MinAdd->getOperand(1));
380 if (!Offset || !NewBase)
381 return false;
382
383 // The following code below assumes the offset is unsigned, but GEP
384 // offsets are treated as signed so we can end up with a signed value
385 // here too. For example, suppose the initial PHI value is (i8 255),
386 // the offset will be treated as (i8 -1) and sign-extended to (i64 -1).
387 if (Offset->getAPInt().isNegative())
388 return false;
389
390 // For the moment, restrict ourselves to the case where the offset is a
391 // multiple of the requested alignment and the base is aligned.
392 // TODO: generalize if a case found which warrants
393 if (Offset->getAPInt().urem(Alignment.value()) != 0)
394 return false;
395
396 bool Overflow = false;
397 AccessSize = MaxPtrDiff.uadd_ov(Offset->getAPInt(), Overflow);
398 if (Overflow)
399 return false;
400 AccessSizeSCEV = SE.getAddExpr(PtrDiff, Offset);
401 Base = NewBase->getValue();
402 } else
403 return false;
404
405 Instruction *CtxI = &*L->getHeader()->getFirstNonPHIIt();
406 if (BasicBlock *LoopPred = L->getLoopPredecessor()) {
407 if (isa<UncondBrInst, CondBrInst>(LoopPred->getTerminator()))
408 CtxI = LoopPred->getTerminator();
409 }
411 Base, Alignment,
412 [&SE, AccessSizeSCEV, &LoopGuards](const RetainedKnowledge &RK) {
413 return SE.isKnownPredicate(
415 SE.applyLoopGuards(AccessSizeSCEV, *LoopGuards),
416 SE.applyLoopGuards(SE.getSCEV(RK.IRArgValue), *LoopGuards));
417 },
418 DL, CtxI, AC, &DT) ||
419 isDereferenceableAndAlignedPointer(Base, Alignment, AccessSize, DL,
420 CtxI, AC, &DT);
421}
422
424 const Function &F = *CtxI.getFunction();
425 // Speculative load may create a race that did not exist in the source.
426 return F.hasFnAttribute(Attribute::SanitizeThread) ||
427 // Speculative load may load data from dirty regions.
428 F.hasFnAttribute(Attribute::SanitizeAddress) ||
429 F.hasFnAttribute(Attribute::SanitizeHWAddress);
430}
431
435
436/// Check if executing a load of this pointer value cannot trap.
437///
438/// If DT and ScanFrom are specified this method performs context-sensitive
439/// analysis and returns true if it is safe to load immediately before ScanFrom.
440///
441/// If it is not obviously safe to load from the specified pointer, we do
442/// a quick local scan of the basic block containing \c ScanFrom, to determine
443/// if the address is already accessed.
444///
445/// This uses the pointee type to determine how many bytes need to be safe to
446/// load from the pointer.
448 const DataLayout &DL,
449 Instruction *ScanFrom,
450 AssumptionCache *AC,
451 const DominatorTree *DT,
452 const TargetLibraryInfo *TLI) {
453 // If DT is not specified we can't make context-sensitive query
454 const Instruction* CtxI = DT ? ScanFrom : nullptr;
455 if (isDereferenceableAndAlignedPointer(V, Alignment, Size, DL, CtxI, AC, DT,
456 TLI)) {
457 // With sanitizers `Dereferenceable` is not always enough for unconditional
458 // load.
459 if (!ScanFrom || !suppressSpeculativeLoadForSanitizers(*ScanFrom))
460 return true;
461 }
462
463 if (!ScanFrom)
464 return false;
465
466 if (Size.getBitWidth() > 64)
467 return false;
468 const TypeSize LoadSize = TypeSize::getFixed(Size.getZExtValue());
469
470 // Otherwise, be a little bit aggressive by scanning the local block where we
471 // want to check to see if the pointer is already being loaded or stored
472 // from/to. If so, the previous load or store would have already trapped,
473 // so there is no harm doing an extra load (also, CSE will later eliminate
474 // the load entirely).
475 BasicBlock::iterator BBI = ScanFrom->getIterator(),
476 E = ScanFrom->getParent()->begin();
477
478 // We can at least always strip pointer casts even though we can't use the
479 // base here.
480 V = V->stripPointerCasts();
481
482 while (BBI != E) {
483 --BBI;
484
485 // If we see a free or a call which may write to memory (i.e. which might do
486 // a free) the pointer could be marked invalid.
487 if (isa<CallInst>(BBI) && BBI->mayWriteToMemory() &&
489 return false;
490
491 Value *AccessedPtr;
492 Type *AccessedTy;
493 Align AccessedAlign;
494 if (LoadInst *LI = dyn_cast<LoadInst>(BBI)) {
495 // Ignore volatile loads. The execution of a volatile load cannot
496 // be used to prove an address is backed by regular memory; it can,
497 // for example, point to an MMIO register.
498 if (LI->isVolatile())
499 continue;
500 AccessedPtr = LI->getPointerOperand();
501 AccessedTy = LI->getType();
502 AccessedAlign = LI->getAlign();
503 } else if (StoreInst *SI = dyn_cast<StoreInst>(BBI)) {
504 // Ignore volatile stores (see comment for loads).
505 if (SI->isVolatile())
506 continue;
507 AccessedPtr = SI->getPointerOperand();
508 AccessedTy = SI->getValueOperand()->getType();
509 AccessedAlign = SI->getAlign();
510 } else
511 continue;
512
513 if (AccessedAlign < Alignment)
514 continue;
515
516 // Handle trivial cases.
517 if (AccessedPtr == V &&
518 TypeSize::isKnownLE(LoadSize, DL.getTypeStoreSize(AccessedTy)))
519 return true;
520
521 if (AreEquivalentAddressValues(AccessedPtr->stripPointerCasts(), V) &&
522 TypeSize::isKnownLE(LoadSize, DL.getTypeStoreSize(AccessedTy)))
523 return true;
524 }
525 return false;
526}
527
529 const DataLayout &DL,
530 Instruction *ScanFrom,
531 AssumptionCache *AC,
532 const DominatorTree *DT,
533 const TargetLibraryInfo *TLI) {
534 TypeSize TySize = DL.getTypeStoreSize(Ty);
535 if (TySize.isScalable())
536 return false;
537 APInt Size(DL.getIndexTypeSizeInBits(V->getType()), TySize.getFixedValue());
538 return isSafeToLoadUnconditionally(V, Alignment, Size, DL, ScanFrom, AC, DT,
539 TLI);
540}
541
542/// DefMaxInstsToScan - the default number of maximum instructions
543/// to scan in the block, used by FindAvailableLoadedValue().
544/// FindAvailableLoadedValue() was introduced in r60148, to improve jump
545/// threading in part by eliminating partially redundant loads.
546/// At that point, the value of MaxInstsToScan was already set to '6'
547/// without documented explanation.
549llvm::DefMaxInstsToScan("available-load-scan-limit", cl::init(6), cl::Hidden,
550 cl::desc("Use this to specify the default maximum number of instructions "
551 "to scan backward from a given instruction, when searching for "
552 "available loaded value"));
553
555 BasicBlock::iterator &ScanFrom,
556 unsigned MaxInstsToScan,
557 BatchAAResults *AA, bool *IsLoad,
558 unsigned *NumScanedInst) {
559 // Don't CSE load that is volatile or anything stronger than unordered.
560 if (!Load->isUnordered())
561 return nullptr;
562
564 return findAvailablePtrLoadStore(Loc, Load->getType(), Load->isAtomic(),
565 ScanBB, ScanFrom, MaxInstsToScan, AA, IsLoad,
566 NumScanedInst);
567}
568
569// Check if the load and the store have the same base, constant offsets and
570// non-overlapping access ranges.
571static bool areNonOverlapSameBaseLoadAndStore(const Value *LoadPtr,
572 Type *LoadTy,
573 const Value *StorePtr,
574 Type *StoreTy,
575 const DataLayout &DL) {
576 APInt LoadOffset(DL.getIndexTypeSizeInBits(LoadPtr->getType()), 0);
577 APInt StoreOffset(DL.getIndexTypeSizeInBits(StorePtr->getType()), 0);
578 if (LoadOffset.getBitWidth() != StoreOffset.getBitWidth())
579 return false;
580 const Value *LoadBase = LoadPtr->stripAndAccumulateConstantOffsets(
581 DL, LoadOffset, /* AllowNonInbounds */ false);
582 const Value *StoreBase = StorePtr->stripAndAccumulateConstantOffsets(
583 DL, StoreOffset, /* AllowNonInbounds */ false);
584 if (LoadBase != StoreBase)
585 return false;
586 auto LoadAccessSize = LocationSize::precise(DL.getTypeStoreSize(LoadTy));
587 auto StoreAccessSize = LocationSize::precise(DL.getTypeStoreSize(StoreTy));
588 ConstantRange LoadRange(LoadOffset,
589 LoadOffset + LoadAccessSize.toRaw());
590 ConstantRange StoreRange(StoreOffset,
591 StoreOffset + StoreAccessSize.toRaw());
592 return LoadRange.intersectWith(StoreRange).isEmptySet();
593}
594
596 Type *AccessTy, bool AtLeastAtomic,
597 const DataLayout &DL, bool *IsLoadCSE) {
598 // If this is a load of Ptr, the loaded value is available.
599 // (This is true even if the load is volatile or atomic, although
600 // those cases are unlikely.)
601 if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) {
602 // We can value forward from an atomic to a non-atomic, but not the
603 // other way around.
604 if (LI->isAtomic() < AtLeastAtomic)
605 return nullptr;
606
607 Value *LoadPtr = LI->getPointerOperand()->stripPointerCasts();
608 if (!AreEquivalentAddressValues(LoadPtr, Ptr))
609 return nullptr;
610
611 if (CastInst::isBitOrNoopPointerCastable(LI->getType(), AccessTy, DL)) {
612 if (IsLoadCSE)
613 *IsLoadCSE = true;
614 return LI;
615 }
616 }
617
618 // If this is a store through Ptr, the value is available!
619 // (This is true even if the store is volatile or atomic, although
620 // those cases are unlikely.)
621 if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
622 // We can value forward from an atomic to a non-atomic, but not the
623 // other way around.
624 if (SI->isAtomic() < AtLeastAtomic)
625 return nullptr;
626
627 Value *StorePtr = SI->getPointerOperand()->stripPointerCasts();
628 if (!AreEquivalentAddressValues(StorePtr, Ptr))
629 return nullptr;
630
631 if (IsLoadCSE)
632 *IsLoadCSE = false;
633
634 Value *Val = SI->getValueOperand();
635 if (CastInst::isBitOrNoopPointerCastable(Val->getType(), AccessTy, DL))
636 return Val;
637
638 TypeSize StoreSize = DL.getTypeSizeInBits(Val->getType());
639 TypeSize LoadSize = DL.getTypeSizeInBits(AccessTy);
640 if (TypeSize::isKnownLE(LoadSize, StoreSize))
641 if (auto *C = dyn_cast<Constant>(Val))
642 return ConstantFoldLoadFromConst(C, AccessTy, DL);
643 }
644
645 if (auto *MSI = dyn_cast<MemSetInst>(Inst)) {
646 // Don't forward from (non-atomic) memset to atomic load.
647 if (AtLeastAtomic)
648 return nullptr;
649
650 // Only handle constant memsets.
651 auto *Val = dyn_cast<ConstantInt>(MSI->getValue());
652 auto *Len = dyn_cast<ConstantInt>(MSI->getLength());
653 if (!Val || !Len)
654 return nullptr;
655
656 // Handle offsets.
657 int64_t StoreOffset = 0, LoadOffset = 0;
658 const Value *StoreBase =
659 GetPointerBaseWithConstantOffset(MSI->getDest(), StoreOffset, DL);
660 const Value *LoadBase =
661 GetPointerBaseWithConstantOffset(Ptr, LoadOffset, DL);
662 if (StoreBase != LoadBase || LoadOffset < StoreOffset)
663 return nullptr;
664
665 if (IsLoadCSE)
666 *IsLoadCSE = false;
667
668 TypeSize LoadTypeSize = DL.getTypeSizeInBits(AccessTy);
669 if (LoadTypeSize.isScalable())
670 return nullptr;
671
672 // Make sure the read bytes are contained in the memset.
673 uint64_t LoadSize = LoadTypeSize.getFixedValue();
674 if ((Len->getValue() * 8).ult(LoadSize + (LoadOffset - StoreOffset) * 8))
675 return nullptr;
676
677 APInt Splat = LoadSize >= 8 ? APInt::getSplat(LoadSize, Val->getValue())
678 : Val->getValue().trunc(LoadSize);
679 ConstantInt *SplatC = ConstantInt::get(MSI->getContext(), Splat);
680 if (CastInst::isBitOrNoopPointerCastable(SplatC->getType(), AccessTy, DL))
681 return SplatC;
682
683 return nullptr;
684 }
685
686 return nullptr;
687}
688
690 const MemoryLocation &Loc, Type *AccessTy, bool AtLeastAtomic,
691 BasicBlock *ScanBB, BasicBlock::iterator &ScanFrom, unsigned MaxInstsToScan,
692 BatchAAResults *AA, bool *IsLoadCSE, unsigned *NumScanedInst) {
693 if (MaxInstsToScan == 0)
694 MaxInstsToScan = ~0U;
695
696 const DataLayout &DL = ScanBB->getDataLayout();
697 const Value *StrippedPtr = Loc.Ptr->stripPointerCasts();
698
699 while (ScanFrom != ScanBB->begin()) {
700 // We must ignore debug info directives when counting (otherwise they
701 // would affect codegen).
702 Instruction *Inst = &*--ScanFrom;
703 if (Inst->isDebugOrPseudoInst())
704 continue;
705
706 // Restore ScanFrom to expected value in case next test succeeds
707 ScanFrom++;
708
709 if (NumScanedInst)
710 ++(*NumScanedInst);
711
712 // Don't scan huge blocks.
713 if (MaxInstsToScan-- == 0)
714 return nullptr;
715
716 --ScanFrom;
717
718 if (Value *Available = getAvailableLoadStore(Inst, StrippedPtr, AccessTy,
719 AtLeastAtomic, DL, IsLoadCSE))
720 return Available;
721
722 // Try to get the store size for the type.
723 if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
724 Value *StorePtr = SI->getPointerOperand()->stripPointerCasts();
725
726 // If both StrippedPtr and StorePtr reach all the way to an alloca or
727 // global and they are different, ignore the store. This is a trivial form
728 // of alias analysis that is important for reg2mem'd code.
729 if ((isa<AllocaInst>(StrippedPtr) || isa<GlobalVariable>(StrippedPtr)) &&
730 (isa<AllocaInst>(StorePtr) || isa<GlobalVariable>(StorePtr)) &&
731 StrippedPtr != StorePtr)
732 continue;
733
734 if (!AA) {
735 // When AA isn't available, but if the load and the store have the same
736 // base, constant offsets and non-overlapping access ranges, ignore the
737 // store. This is a simple form of alias analysis that is used by the
738 // inliner. FIXME: use BasicAA if possible.
740 Loc.Ptr, AccessTy, SI->getPointerOperand(),
741 SI->getValueOperand()->getType(), DL))
742 continue;
743 } else {
744 // If we have alias analysis and it says the store won't modify the
745 // loaded value, ignore the store.
746 if (!isModSet(AA->getModRefInfo(SI, Loc)))
747 continue;
748 }
749
750 // Otherwise the store that may or may not alias the pointer, bail out.
751 ++ScanFrom;
752 return nullptr;
753 }
754
755 // If this is some other instruction that may clobber Ptr, bail out.
756 if (Inst->mayWriteToMemory()) {
757 // If alias analysis claims that it really won't modify the load,
758 // ignore it.
759 if (AA && !isModSet(AA->getModRefInfo(Inst, Loc)))
760 continue;
761
762 // May modify the pointer, bail out.
763 ++ScanFrom;
764 return nullptr;
765 }
766 }
767
768 // Got to the start of the block, we didn't find it, but are done for this
769 // block.
770 return nullptr;
771}
772
774 bool *IsLoadCSE,
775 unsigned MaxInstsToScan) {
776 const DataLayout &DL = Load->getDataLayout();
777 Value *StrippedPtr = Load->getPointerOperand()->stripPointerCasts();
778 BasicBlock *ScanBB = Load->getParent();
779 Type *AccessTy = Load->getType();
780 bool AtLeastAtomic = Load->isAtomic();
781
782 if (!Load->isUnordered())
783 return nullptr;
784
785 // Try to find an available value first, and delay expensive alias analysis
786 // queries until later.
787 Value *Available = nullptr;
788 SmallVector<Instruction *> MustNotAliasInsts;
789 for (Instruction &Inst : make_range(++Load->getReverseIterator(),
790 ScanBB->rend())) {
791 if (Inst.isDebugOrPseudoInst())
792 continue;
793
794 if (MaxInstsToScan-- == 0)
795 return nullptr;
796
797 Available = getAvailableLoadStore(&Inst, StrippedPtr, AccessTy,
798 AtLeastAtomic, DL, IsLoadCSE);
799 if (Available)
800 break;
801
802 if (Inst.mayWriteToMemory())
803 MustNotAliasInsts.push_back(&Inst);
804 }
805
806 // If we found an available value, ensure that the instructions in between
807 // did not modify the memory location.
808 if (Available) {
810 for (Instruction *Inst : MustNotAliasInsts)
811 if (isModSet(AA.getModRefInfo(Inst, Loc)))
812 return nullptr;
813 }
814
815 return Available;
816}
817
818// Returns true if a use is either in an ICmp/PtrToInt or a Phi/Select that only
819// feeds into them.
820static bool isPointerUseReplacable(const Use &U, bool HasNonAddressBits) {
821 unsigned Limit = 40;
822 SmallVector<const User *> Worklist({U.getUser()});
824
825 while (!Worklist.empty() && --Limit) {
826 auto *User = Worklist.pop_back_val();
827 if (!Visited.insert(User).second)
828 continue;
830 continue;
831 // FIXME: The PtrToIntInst case here is not strictly correct, as it
832 // changes which provenance is exposed.
833 if (!HasNonAddressBits && isa<PtrToIntInst>(User))
834 continue;
836 Worklist.append(User->user_begin(), User->user_end());
837 else
838 return false;
839 }
840
841 return Limit != 0;
842}
843
844static bool isPointerAlwaysReplaceable(const Value *From, const Value *To,
845 const DataLayout &DL) {
846 // This is not strictly correct, but we do it for now to retain important
847 // optimizations.
849 return true;
850 // Conversely, replacing null in the default address space with destination
851 // pointer is always valid.
852 if (isa<ConstantPointerNull>(From) &&
853 From->getType()->getPointerAddressSpace() == 0)
854 return true;
855 if (isa<Constant>(To) && To->getType()->isPointerTy() &&
857 return true;
858 return getUnderlyingObjectAggressive(From) ==
860}
861
863 const DataLayout &DL) {
864 Type *Ty = To->getType();
865 assert(U->getType() == Ty && "values must have matching types");
866 // Not a pointer, just return true.
867 if (!Ty->isPtrOrPtrVectorTy())
868 return true;
869
870 // Do not perform replacements in lifetime intrinsic arguments.
871 if (isa<LifetimeIntrinsic>(U.getUser()))
872 return false;
873
874 if (isPointerAlwaysReplaceable(&*U, To, DL))
875 return true;
876
877 bool HasNonAddressBits =
878 DL.getAddressSizeInBits(Ty) != DL.getPointerTypeSizeInBits(Ty);
879 return isPointerUseReplacable(U, HasNonAddressBits);
880}
881
882bool llvm::canReplacePointersIfEqual(const Value *From, const Value *To,
883 const DataLayout &DL) {
884 assert(From->getType() == To->getType() && "values must have matching types");
885 // Not a pointer, just return true.
886 if (!From->getType()->isPtrOrPtrVectorTy())
887 return true;
888
889 return isPointerAlwaysReplaceable(From, To, DL);
890}
891
894 SmallVectorImpl<LoadInst *> &NonDereferenceableAndAlignedLoads,
896 for (BasicBlock *BB : L->blocks()) {
897 for (Instruction &I : *BB) {
898 if (auto *LI = dyn_cast<LoadInst>(&I)) {
899 if (!isDereferenceableAndAlignedInLoop(LI, L, *SE, *DT, AC, Predicates))
900 NonDereferenceableAndAlignedLoads.push_back(LI);
901 } else if (I.mayReadFromMemory() || I.mayWriteToMemory() ||
902 I.mayThrow()) {
903 return false;
904 }
905 }
906 }
907 return true;
908}
909
911 Value *Ptr) {
912 assert(Ptr->getType()->isPointerTy() && "Must be called with pointer arg");
913
914 unsigned BitWidth = DL.getIndexTypeSizeInBits(Ptr->getType());
915 LinearExpression Expr(Ptr, BitWidth);
916
917 while (true) {
918 auto *GEP = dyn_cast<GEPOperator>(Expr.BasePtr);
919 if (!GEP || GEP->getSourceElementType()->isScalableTy())
920 return Expr;
921
922 Value *VarIndex = nullptr;
923 for (Value *Index : GEP->indices()) {
924 if (isa<ConstantInt>(Index))
925 continue;
926 // Only allow a single variable index. We do not bother to handle the
927 // case of the same variable index appearing multiple times.
928 if (Expr.Index || VarIndex)
929 return Expr;
930 VarIndex = Index;
931 }
932
933 // Don't return non-canonical indexes.
934 if (VarIndex && !VarIndex->getType()->isIntegerTy(BitWidth))
935 return Expr;
936
937 // We have verified that we can fully handle this GEP, so we can update Expr
938 // members past this point.
939 Expr.BasePtr = GEP->getPointerOperand();
940 Expr.Flags = Expr.Flags.intersectForOffsetAdd(GEP->getNoWrapFlags());
942 GTI != GTE; ++GTI) {
943 Value *Index = GTI.getOperand();
944 if (auto *ConstOffset = dyn_cast<ConstantInt>(Index)) {
945 if (ConstOffset->isZero())
946 continue;
947 if (StructType *STy = GTI.getStructTypeOrNull()) {
948 unsigned ElementIdx = ConstOffset->getZExtValue();
949 const StructLayout *SL = DL.getStructLayout(STy);
950 Expr.Offset += SL->getElementOffset(ElementIdx);
951 continue;
952 }
953 // Truncate if type size exceeds index space.
954 APInt IndexedSize(BitWidth, GTI.getSequentialElementStride(DL),
955 /*isSigned=*/false,
956 /*implcitTrunc=*/true);
957 Expr.Offset += ConstOffset->getValue() * IndexedSize;
958 continue;
959 }
960
961 // FIXME: Also look through a mul/shl in the index.
962 assert(Expr.Index == nullptr && "Shouldn't have index yet");
963 Expr.Index = Index;
964 // Truncate if type size exceeds index space.
965 Expr.Scale = APInt(BitWidth, GTI.getSequentialElementStride(DL),
966 /*isSigned=*/false, /*implicitTrunc=*/true);
967 }
968 }
969
970 return Expr;
971}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
@ Available
We know the block is fully available. This is a fixpoint.
Definition GVN.cpp:945
Hexagon Common GEP
static bool AreEquivalentAddressValues(const Value *A, const Value *B)
Test if A and B will obviously have the same value.
Definition Loads.cpp:271
static bool isPointerAlwaysReplaceable(const Value *From, const Value *To, const DataLayout &DL)
Definition Loads.cpp:844
static bool isPointerUseReplacable(const Use &U, bool HasNonAddressBits)
Definition Loads.cpp:820
static bool areNonOverlapSameBaseLoadAndStore(const Value *LoadPtr, Type *LoadTy, const Value *StorePtr, Type *StoreTy, const DataLayout &DL)
Definition Loads.cpp:571
static bool isDereferenceableAndAlignedPointerViaAssumption(const Value *Ptr, Align Alignment, function_ref< bool(const RetainedKnowledge &RK)> CheckSize, const DataLayout &DL, const Instruction *CtxI, AssumptionCache *AC, const DominatorTree *DT)
Definition Loads.cpp:35
static Value * getAvailableLoadStore(Instruction *Inst, const Value *Ptr, Type *AccessTy, bool AtLeastAtomic, const DataLayout &DL, bool *IsLoadCSE)
Definition Loads.cpp:595
static bool suppressSpeculativeLoadForSanitizers(const Instruction &CtxI)
Definition Loads.cpp:423
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
This file provides utility analysis objects describing memory locations.
Class for arbitrary precision integers.
Definition APInt.h:78
bool ugt(const APInt &RHS) const
Unsigned greater than comparison.
Definition APInt.h:1189
LLVM_ABI APInt urem(const APInt &RHS) const
Unsigned remainder operation.
Definition APInt.cpp:1709
unsigned getBitWidth() const
Return the number of bits in the APInt.
Definition APInt.h:1511
LLVM_ABI APInt uadd_ov(const APInt &RHS, bool &Overflow) const
Definition APInt.cpp:1987
static LLVM_ABI APInt getSplat(unsigned NewLen, const APInt &V)
Return a value containing V broadcasted over NewLen bits.
Definition APInt.cpp:652
bool getBoolValue() const
Convert APInt to a boolean value.
Definition APInt.h:472
bool uge(const APInt &RHS) const
Unsigned greater or equal comparison.
Definition APInt.h:1228
A cache of @llvm.assume calls within a function.
LLVM Basic Block Representation.
Definition BasicBlock.h:62
iterator begin()
Instruction iterator methods.
Definition BasicBlock.h:461
LLVM_ABI const DataLayout & getDataLayout() const
Get the data layout of the module this basic block belongs to.
reverse_iterator rend()
Definition BasicBlock.h:479
InstListType::iterator iterator
Instruction iterators...
Definition BasicBlock.h:170
This class is a wrapper over an AAResults, and it is intended to be used only when there are no IR ch...
static LLVM_ABI bool isBitOrNoopPointerCastable(Type *SrcTy, Type *DestTy, const DataLayout &DL)
Check whether a bitcast, inttoptr, or ptrtoint cast between these types is valid and a no-op.
@ ICMP_ULE
unsigned less or equal
Definition InstrTypes.h:702
This is the shared class of boolean and integer constants.
Definition Constants.h:87
This class represents a range of values.
LLVM_ABI bool isEmptySet() const
Return true if this set contains no members.
LLVM_ABI ConstantRange intersectWith(const ConstantRange &CR, PreferredRangeType Type=Smallest) const
Return the range that results from the intersection of this range with another range.
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:64
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition Dominators.h:159
Represents calls to the gc.relocate intrinsic.
GEPNoWrapFlags intersectForOffsetAdd(GEPNoWrapFlags Other) const
Given (gep (gep p, x), y), determine the nowrap flags for (gep p, x+y).
LLVM_ABI bool isDebugOrPseudoInst() const LLVM_READONLY
Return true if the instruction is a DbgInfoIntrinsic or PseudoProbeInst.
LLVM_ABI bool mayWriteToMemory() const LLVM_READONLY
Return true if this instruction may modify memory.
LLVM_ABI const Function * getFunction() const
Return the function this instruction belongs to.
LLVM_ABI const DataLayout & getDataLayout() const
Get the data layout of the module this instruction belongs to.
An instruction for reading from memory.
Value * getPointerOperand()
bool isUnordered() const
Align getAlign() const
Return the alignment of the access that is being performed.
static LocationSize precise(uint64_t Value)
Represents a single loop in the control flow graph.
Definition LoopInfo.h:40
Representation for a specific memory location.
static LLVM_ABI MemoryLocation get(const LoadInst *LI)
Return a location with information about the memory reference by the given instruction.
This means that we are dealing with an entirely unknown SCEV value, and only represent it as its LLVM...
This class represents an analyzed expression in the program.
static LLVM_ABI LoopGuards collect(const Loop *L, ScalarEvolution &SE)
Collect rewrite map for loop guards for loop L, together with flags indicating if NUW and NSW can be ...
The main scalar evolution driver.
LLVM_ABI const SCEV * getBackedgeTakenCount(const Loop *L, ExitCountKind Kind=Exact)
If the specified loop has a predictable backedge-taken count, return it, otherwise return a SCEVCould...
LLVM_ABI const SCEV * getConstant(ConstantInt *V)
LLVM_ABI const SCEV * getPredicatedBackedgeTakenCount(const Loop *L, SmallVectorImpl< const SCEVPredicate * > &Predicates)
Similar to getBackedgeTakenCount, except it will add a set of SCEV predicates to Predicates that are ...
LLVM_ABI const SCEV * getSCEV(Value *V)
Return a SCEV expression for the full generality of the specified expression.
LLVM_ABI const SCEV * getMinusSCEV(SCEVUse LHS, SCEVUse RHS, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Return LHS-RHS.
APInt getUnsignedRangeMax(const SCEV *S)
Determine the max of the unsigned range for a particular SCEV.
LLVM_ABI const SCEV * getAddExpr(SmallVectorImpl< SCEVUse > &Ops, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Get a canonical add expression, or something simpler if possible.
LLVM_ABI bool isKnownPredicate(CmpPredicate Pred, SCEVUse LHS, SCEVUse RHS)
Test if the given expression is known to satisfy the condition described by Pred, LHS,...
LLVM_ABI const SCEV * applyLoopGuards(const SCEV *Expr, const Loop *L)
Try to apply information from loop guards for L to Expr.
LLVM_ABI const SCEV * getPredicatedSymbolicMaxBackedgeTakenCount(const Loop *L, SmallVectorImpl< const SCEVPredicate * > &Predicates)
Similar to getSymbolicMaxBackedgeTakenCount, except it will add a set of SCEV predicates to Predicate...
const SCEV * getSymbolicMaxBackedgeTakenCount(const Loop *L)
When successful, this returns a SCEV that is greater than or equal to (i.e.
This class represents the LLVM 'select' instruction.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
iterator insert(iterator I, T &&Elt)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
Used to lazily calculate structure layout information for a target machine, based on the DataLayout s...
Definition DataLayout.h:743
TypeSize getElementOffset(unsigned Idx) const
Definition DataLayout.h:774
Class to represent struct types.
Provides information about what library functions are available for the current target.
static constexpr TypeSize getFixed(ScalarTy ExactSize)
Definition TypeSize.h:343
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:46
bool isPointerTy() const
True if this is an instance of PointerType.
Definition Type.h:284
LLVM_ABI unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
static LLVM_ABI IntegerType * getInt8Ty(LLVMContext &C)
Definition Type.cpp:311
bool isPtrOrPtrVectorTy() const
Return true if this is a pointer type or a vector of pointer types.
Definition Type.h:287
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition Type.h:257
A Use represents the edge between a Value definition and its users.
Definition Use.h:35
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:255
user_iterator user_begin()
Definition Value.h:402
LLVMContext & getContext() const
All values hold a context through their type.
Definition Value.h:258
LLVM_ABI Align getPointerAlignment(const DataLayout &DL) const
Returns an alignment of the pointer value.
Definition Value.cpp:964
LLVM_ABI bool canBeFreed() const
Return true if the memory object referred to by V can by freed in the scope for which the SSA value d...
Definition Value.cpp:824
LLVM_ABI const Value * stripAndAccumulateConstantOffsets(const DataLayout &DL, APInt &Offset, bool AllowNonInbounds, bool AllowInvariantGroup=false, function_ref< bool(Value &Value, APInt &Offset)> ExternalAnalysis=nullptr, bool LookThroughIntToPtr=false) const
Accumulate the constant offset this value has compared to a base pointer.
LLVM_ABI const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
Definition Value.cpp:709
user_iterator user_end()
Definition Value.h:410
constexpr ScalarTy getFixedValue() const
Definition TypeSize.h:200
static constexpr bool isKnownLE(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
Definition TypeSize.h:230
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition TypeSize.h:168
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
Definition ilist_node.h:34
self_iterator getIterator()
Definition ilist_node.h:123
CallInst * Call
Abstract Attribute helper functions.
Definition Attributor.h:165
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
initializer< Ty > init(const Ty &Val)
This is an optimization pass for GlobalISel generic memory operations.
LLVM_ABI std::pair< const SCEV *, const SCEV * > getStartAndEndForAccess(const Loop *Lp, const SCEV *PtrExpr, Type *AccessTy, const SCEV *BTC, const SCEV *MaxBTC, ScalarEvolution *SE, DenseMap< std::pair< const SCEV *, const SCEV * >, std::pair< const SCEV *, const SCEV * > > *PointerBounds, DominatorTree *DT, AssumptionCache *AC, std::optional< ScalarEvolution::LoopGuards > &LoopGuards)
Calculate Start and End points of memory access using exact backedge taken count BTC if computable or...
LLVM_ABI bool willNotFreeBetween(const Instruction *Assume, const Instruction *CtxI)
Returns true, if no instruction between Assume and CtxI may free memory and the function is marked as...
@ Offset
Definition DWP.cpp:557
LLVM_ABI RetainedKnowledge getKnowledgeForValue(const Value *V, ArrayRef< Attribute::AttrKind > AttrKinds, AssumptionCache &AC, function_ref< bool(RetainedKnowledge, Instruction *, const CallBase::BundleOpInfo *)> Filter=[](auto...) { return true;})
Return a valid Knowledge associated to the Value V if its Attribute kind is in AttrKinds and it match...
LLVM_ABI bool isValidAssumeForContext(const Instruction *I, const Instruction *CxtI, const DominatorTree *DT=nullptr, bool AllowEphemerals=false)
Return true if it is valid to use the assumptions provided by an assume intrinsic,...
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
bool isAligned(Align Lhs, uint64_t SizeInBytes)
Checks that SizeInBytes is a multiple of the alignment.
Definition Alignment.h:134
LLVM_ABI bool isDereferenceableAndAlignedPointer(const Value *V, Type *Ty, Align Alignment, const DataLayout &DL, const Instruction *CtxI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr)
Returns true if V is always a dereferenceable pointer with alignment greater or equal than requested.
Definition Loads.cpp:230
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
Value * GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset, const DataLayout &DL, bool AllowNonInbounds=true)
Analyze the specified pointer to see if it can be expressed as a base pointer plus a constant offset.
LLVM_ABI Value * findAvailablePtrLoadStore(const MemoryLocation &Loc, Type *AccessTy, bool AtLeastAtomic, BasicBlock *ScanBB, BasicBlock::iterator &ScanFrom, unsigned MaxInstsToScan, BatchAAResults *AA, bool *IsLoadCSE, unsigned *NumScanedInst)
Scan backwards to see if we have the value of the given pointer available locally within a small numb...
Definition Loads.cpp:689
LLVM_ABI bool mustSuppressSpeculation(const LoadInst &LI)
Return true if speculation of the given load must be suppressed to avoid ordering or interfering with...
Definition Loads.cpp:432
gep_type_iterator gep_type_end(const User *GEP)
LLVM_ABI Value * FindAvailableLoadedValue(LoadInst *Load, BasicBlock *ScanBB, BasicBlock::iterator &ScanFrom, unsigned MaxInstsToScan=DefMaxInstsToScan, BatchAAResults *AA=nullptr, bool *IsLoadCSE=nullptr, unsigned *NumScanedInst=nullptr)
Scan backwards to see if we have the value of the given load available locally within a small number ...
Definition Loads.cpp:554
LLVM_ABI const Value * getArgumentAliasingToReturnedPointer(const CallBase *Call, bool MustPreserveOffset)
This function returns call pointer argument that is considered the same by aliasing rules.
LLVM_ABI bool getObjectSize(const Value *Ptr, uint64_t &Size, const DataLayout &DL, const TargetLibraryInfo *TLI, ObjectSizeOpts Opts={})
Compute the size of the object pointed by Ptr.
LLVM_ABI bool canReplacePointersInUseIfEqual(const Use &U, const Value *To, const DataLayout &DL)
Definition Loads.cpp:862
LLVM_ABI bool canReplacePointersIfEqual(const Value *From, const Value *To, const DataLayout &DL)
Returns true if a pointer value From can be replaced with another pointer value \To if they are deeme...
Definition Loads.cpp:882
bool isModSet(const ModRefInfo MRI)
Definition ModRef.h:49
LLVM_ABI LinearExpression decomposeLinearExpression(const DataLayout &DL, Value *Ptr)
Decompose a pointer into a linear expression.
Definition Loads.cpp:910
LLVM_ABI bool isSafeToLoadUnconditionally(Value *V, Align Alignment, const APInt &Size, const DataLayout &DL, Instruction *ScanFrom, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr)
Return true if we know that executing a load from this value cannot trap.
Definition Loads.cpp:447
generic_gep_type_iterator<> gep_type_iterator
LLVM_ABI Constant * ConstantFoldLoadFromConst(Constant *C, Type *Ty, const APInt &Offset, const DataLayout &DL)
Extract value of C at the given Offset reinterpreted as Ty.
LLVM_ABI cl::opt< unsigned > DefMaxInstsToScan
The default number of maximum instructions to scan in the block, used by FindAvailableLoadedValue().
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
LLVM_ABI bool isKnownNonZero(const Value *V, const SimplifyQuery &Q, unsigned Depth=0)
Return true if the given value is known to be non-zero when defined.
LLVM_ABI const Value * getUnderlyingObjectAggressive(const Value *V)
Like getUnderlyingObject(), but will try harder to find a single underlying object.
constexpr unsigned BitWidth
LLVM_ABI bool isDereferenceablePointer(const Value *V, Type *Ty, const DataLayout &DL, const Instruction *CtxI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr)
Return true if this is always a dereferenceable pointer.
Definition Loads.cpp:250
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
LLVM_ABI bool isReadOnlyLoop(Loop *L, ScalarEvolution *SE, DominatorTree *DT, AssumptionCache *AC, SmallVectorImpl< LoadInst * > &NonDereferenceableAndAlignedLoads, SmallVectorImpl< const SCEVPredicate * > *Predicates=nullptr)
Returns true if the loop contains read-only memory accesses and doesn't throw.
Definition Loads.cpp:892
gep_type_iterator gep_type_begin(const User *GEP)
LLVM_ABI bool isDereferenceableAndAlignedInLoop(LoadInst *LI, Loop *L, ScalarEvolution &SE, DominatorTree &DT, AssumptionCache *AC=nullptr, SmallVectorImpl< const SCEVPredicate * > *Predicates=nullptr)
Return true if we can prove that the given load (which is assumed to be within the specified loop) wo...
Definition Loads.cpp:290
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
constexpr uint64_t value() const
This is a hole in the type system and should not be abused.
Definition Alignment.h:77
Linear expression BasePtr + Index * Scale + Offset.
Definition Loads.h:211
GEPNoWrapFlags Flags
Definition Loads.h:216
Various options to control the behavior of getObjectSize.
bool NullIsUnknownSize
If this is true, null pointers in address space 0 will be treated as though they can't be evaluated.
bool RoundToAlign
Whether to round the result up to the alignment of allocas, byval arguments, and global variables.
Represent one information held inside an operand bundle of an llvm.assume.
Attribute::AttrKind AttrKind