LLVM 22.0.0git
SCCPSolver.cpp
Go to the documentation of this file.
1//===- SCCPSolver.cpp - SCCP Utility --------------------------- *- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// \file
10// This file implements the Sparse Conditional Constant Propagation (SCCP)
11// utility.
12//
13//===----------------------------------------------------------------------===//
14
16#include "llvm/ADT/SetVector.h"
24#include "llvm/IR/IRBuilder.h"
25#include "llvm/IR/InstVisitor.h"
27#include "llvm/IR/NoFolder.h"
30#include "llvm/Support/Debug.h"
34#include <cassert>
35#include <utility>
36#include <vector>
37
38using namespace llvm;
39using namespace PatternMatch;
40
41#define DEBUG_TYPE "sccp"
42
43// The maximum number of range extensions allowed for operations requiring
44// widening.
45static const unsigned MaxNumRangeExtensions = 10;
46
47/// Returns MergeOptions with MaxWidenSteps set to MaxNumRangeExtensions.
52
53namespace llvm {
54
56 return LV.isConstant() ||
58}
59
63
65 Constant *Const = getConstantOrNull(V);
66 if (!Const)
67 return false;
68 // Replacing `musttail` instructions with constant breaks `musttail` invariant
69 // unless the call itself can be removed.
70 // Calls with "clang.arc.attachedcall" implicitly use the return value and
71 // those uses cannot be updated with a constant.
73 if (CB && ((CB->isMustTailCall() && !wouldInstructionBeTriviallyDead(CB)) ||
76
77 // Don't zap returns of the callee
78 if (F)
80
81 LLVM_DEBUG(dbgs() << " Can\'t treat the result of call " << *CB
82 << " as a constant\n");
83 return false;
84 }
85
86 LLVM_DEBUG(dbgs() << " Constant: " << *Const << " = " << *V << '\n');
87
88 // Replaces all of the uses of a variable with uses of the constant.
89 V->replaceAllUsesWith(Const);
90 return true;
91}
92
93/// Helper for getting ranges from \p Solver. Instructions inserted during
94/// simplification are unavailable in the solver, so we return a full range for
95/// them.
97 const SmallPtrSetImpl<Value *> &InsertedValues) {
98 if (auto *Const = dyn_cast<Constant>(Op))
99 return Const->toConstantRange();
100 if (InsertedValues.contains(Op)) {
101 unsigned Bitwidth = Op->getType()->getScalarSizeInBits();
102 return ConstantRange::getFull(Bitwidth);
103 }
104 return Solver.getLatticeValueFor(Op).asConstantRange(Op->getType(),
105 /*UndefAllowed=*/false);
106}
107
108/// Try to use \p Inst's value range from \p Solver to infer the NUW flag.
109static bool refineInstruction(SCCPSolver &Solver,
110 const SmallPtrSetImpl<Value *> &InsertedValues,
111 Instruction &Inst) {
112 bool Changed = false;
113 auto GetRange = [&Solver, &InsertedValues](Value *Op) {
114 return getRange(Op, Solver, InsertedValues);
115 };
116
118 if (Inst.hasNoSignedWrap() && Inst.hasNoUnsignedWrap())
119 return false;
120
121 auto RangeA = GetRange(Inst.getOperand(0));
122 auto RangeB = GetRange(Inst.getOperand(1));
123 if (!Inst.hasNoUnsignedWrap()) {
125 Instruction::BinaryOps(Inst.getOpcode()), RangeB,
127 if (NUWRange.contains(RangeA)) {
129 Changed = true;
130 }
131 }
132 if (!Inst.hasNoSignedWrap()) {
134 Instruction::BinaryOps(Inst.getOpcode()), RangeB,
136 if (NSWRange.contains(RangeA)) {
137 Inst.setHasNoSignedWrap();
138 Changed = true;
139 }
140 }
141 } else if (isa<PossiblyNonNegInst>(Inst) && !Inst.hasNonNeg()) {
142 auto Range = GetRange(Inst.getOperand(0));
143 if (Range.isAllNonNegative()) {
144 Inst.setNonNeg();
145 Changed = true;
146 }
147 } else if (TruncInst *TI = dyn_cast<TruncInst>(&Inst)) {
148 if (TI->hasNoSignedWrap() && TI->hasNoUnsignedWrap())
149 return false;
150
151 auto Range = GetRange(Inst.getOperand(0));
152 uint64_t DestWidth = TI->getDestTy()->getScalarSizeInBits();
153 if (!TI->hasNoUnsignedWrap()) {
154 if (Range.getActiveBits() <= DestWidth) {
155 TI->setHasNoUnsignedWrap(true);
156 Changed = true;
157 }
158 }
159 if (!TI->hasNoSignedWrap()) {
160 if (Range.getMinSignedBits() <= DestWidth) {
161 TI->setHasNoSignedWrap(true);
162 Changed = true;
163 }
164 }
165 } else if (auto *GEP = dyn_cast<GetElementPtrInst>(&Inst)) {
166 if (GEP->hasNoUnsignedWrap() || !GEP->hasNoUnsignedSignedWrap())
167 return false;
168
169 if (all_of(GEP->indices(),
170 [&](Value *V) { return GetRange(V).isAllNonNegative(); })) {
171 GEP->setNoWrapFlags(GEP->getNoWrapFlags() |
173 Changed = true;
174 }
175 }
176
177 return Changed;
178}
179
180/// Try to replace signed instructions with their unsigned equivalent.
181static bool replaceSignedInst(SCCPSolver &Solver,
182 SmallPtrSetImpl<Value *> &InsertedValues,
183 Instruction &Inst) {
184 // Determine if a signed value is known to be >= 0.
185 auto isNonNegative = [&Solver, &InsertedValues](Value *V) {
186 return getRange(V, Solver, InsertedValues).isAllNonNegative();
187 };
188
189 Instruction *NewInst = nullptr;
190 switch (Inst.getOpcode()) {
191 case Instruction::SIToFP:
192 case Instruction::SExt: {
193 // If the source value is not negative, this is a zext/uitofp.
194 Value *Op0 = Inst.getOperand(0);
195 if (!isNonNegative(Op0))
196 return false;
197 NewInst = CastInst::Create(Inst.getOpcode() == Instruction::SExt
198 ? Instruction::ZExt
199 : Instruction::UIToFP,
200 Op0, Inst.getType(), "", Inst.getIterator());
201 NewInst->setNonNeg();
202 break;
203 }
204 case Instruction::AShr: {
205 // If the shifted value is not negative, this is a logical shift right.
206 Value *Op0 = Inst.getOperand(0);
207 if (!isNonNegative(Op0))
208 return false;
209 NewInst = BinaryOperator::CreateLShr(Op0, Inst.getOperand(1), "", Inst.getIterator());
210 NewInst->setIsExact(Inst.isExact());
211 break;
212 }
213 case Instruction::SDiv:
214 case Instruction::SRem: {
215 // If both operands are not negative, this is the same as udiv/urem.
216 Value *Op0 = Inst.getOperand(0), *Op1 = Inst.getOperand(1);
217 if (!isNonNegative(Op0) || !isNonNegative(Op1))
218 return false;
219 auto NewOpcode = Inst.getOpcode() == Instruction::SDiv ? Instruction::UDiv
220 : Instruction::URem;
221 NewInst = BinaryOperator::Create(NewOpcode, Op0, Op1, "", Inst.getIterator());
222 if (Inst.getOpcode() == Instruction::SDiv)
223 NewInst->setIsExact(Inst.isExact());
224 break;
225 }
226 default:
227 return false;
228 }
229
230 // Wire up the new instruction and update state.
231 assert(NewInst && "Expected replacement instruction");
232 NewInst->takeName(&Inst);
233 InsertedValues.insert(NewInst);
234 Inst.replaceAllUsesWith(NewInst);
235 NewInst->setDebugLoc(Inst.getDebugLoc());
236 Solver.removeLatticeValueFor(&Inst);
237 Inst.eraseFromParent();
238 return true;
239}
240
241/// Try to use \p Inst's value range from \p Solver to simplify it.
243 SmallPtrSetImpl<Value *> &InsertedValues,
244 Instruction &Inst) {
245 auto GetRange = [&Solver, &InsertedValues](Value *Op) {
246 return getRange(Op, Solver, InsertedValues);
247 };
248
249 Value *X;
250 const APInt *RHSC;
251 // Remove masking operations.
252 if (match(&Inst, m_And(m_Value(X), m_LowBitMask(RHSC)))) {
253 ConstantRange LRange = GetRange(X);
254 if (LRange.getUnsignedMax().ule(*RHSC))
255 return X;
256 }
257
258 // Check if we can simplify [us]cmp(X, Y) to X - Y.
259 if (auto *Cmp = dyn_cast<CmpIntrinsic>(&Inst)) {
260 Value *LHS = Cmp->getOperand(0);
261 Value *RHS = Cmp->getOperand(1);
262 unsigned BitWidth = LHS->getType()->getScalarSizeInBits();
263 // Bail out on 1-bit comparisons.
264 if (BitWidth == 1)
265 return nullptr;
266 ConstantRange LRange = GetRange(LHS);
267 if (LRange.isSizeLargerThan(3))
268 return nullptr;
269 ConstantRange RRange = GetRange(RHS);
270 if (RRange.isSizeLargerThan(3))
271 return nullptr;
272 ConstantRange RHSLower = RRange.sub(APInt(BitWidth, 1));
273 ConstantRange RHSUpper = RRange.add(APInt(BitWidth, 1));
275 Cmp->isSigned() ? CmpInst::ICMP_SLE : CmpInst::ICMP_ULE;
276 if (!RHSLower.icmp(Pred, LRange) || !LRange.icmp(Pred, RHSUpper))
277 return nullptr;
278
279 IRBuilder<NoFolder> Builder(&Inst);
280 Value *Sub = Builder.CreateSub(LHS, RHS, Inst.getName(), /*HasNUW=*/false,
281 /*HasNSW=*/Cmp->isSigned());
282 InsertedValues.insert(Sub);
283 if (Sub->getType() != Inst.getType()) {
284 Sub = Builder.CreateSExtOrTrunc(Sub, Inst.getType());
285 InsertedValues.insert(Sub);
286 }
287 return Sub;
288 }
289
290 // Relax range checks.
291 if (auto *ICmp = dyn_cast<ICmpInst>(&Inst)) {
292 Value *X;
293 auto MatchTwoInstructionExactRangeCheck =
294 [&]() -> std::optional<ConstantRange> {
295 const APInt *RHSC;
296 if (!match(ICmp->getOperand(1), m_APInt(RHSC)))
297 return std::nullopt;
298
299 Value *LHS = ICmp->getOperand(0);
300 ICmpInst::Predicate Pred = ICmp->getPredicate();
301 const APInt *Offset;
303 return ConstantRange::makeExactICmpRegion(Pred, *RHSC).sub(*Offset);
304 // Match icmp eq/ne X & NegPow2, C
305 if (ICmp->isEquality()) {
306 const APInt *Mask;
307 if (match(LHS, m_OneUse(m_And(m_Value(X), m_NegatedPower2(Mask)))) &&
308 RHSC->countr_zero() >= Mask->countr_zero()) {
309 ConstantRange CR(*RHSC, *RHSC - *Mask);
310 return Pred == ICmpInst::ICMP_EQ ? CR : CR.inverse();
311 }
312 }
313 return std::nullopt;
314 };
315
316 if (auto CR = MatchTwoInstructionExactRangeCheck()) {
317 ConstantRange LRange = GetRange(X);
318 // Early exit if we know nothing about X.
319 if (LRange.isFullSet())
320 return nullptr;
321 auto ConvertCRToICmp =
322 [&](const std::optional<ConstantRange> &NewCR) -> Value * {
324 APInt RHS;
325 // Check if we can represent NewCR as an icmp predicate.
326 if (NewCR && NewCR->getEquivalentICmp(Pred, RHS)) {
327 IRBuilder<NoFolder> Builder(&Inst);
328 Value *NewICmp =
329 Builder.CreateICmp(Pred, X, ConstantInt::get(X->getType(), RHS));
330 InsertedValues.insert(NewICmp);
331 return NewICmp;
332 }
333 return nullptr;
334 };
335 // We are allowed to refine the comparison to either true or false for out
336 // of range inputs.
337 // Here we refine the comparison to false, and check if we can narrow the
338 // range check to a simpler test.
339 if (auto *V = ConvertCRToICmp(CR->exactIntersectWith(LRange)))
340 return V;
341 // Here we refine the comparison to true, i.e. we relax the range check.
342 if (auto *V = ConvertCRToICmp(CR->exactUnionWith(LRange.inverse())))
343 return V;
344 }
345 }
346
347 return nullptr;
348}
349
351 SmallPtrSetImpl<Value *> &InsertedValues,
352 Statistic &InstRemovedStat,
353 Statistic &InstReplacedStat) {
354 bool MadeChanges = false;
355 for (Instruction &Inst : make_early_inc_range(BB)) {
356 if (Inst.getType()->isVoidTy())
357 continue;
358 if (tryToReplaceWithConstant(&Inst)) {
360 Inst.eraseFromParent();
361
362 MadeChanges = true;
363 ++InstRemovedStat;
364 } else if (replaceSignedInst(*this, InsertedValues, Inst)) {
365 MadeChanges = true;
366 ++InstReplacedStat;
367 } else if (refineInstruction(*this, InsertedValues, Inst)) {
368 MadeChanges = true;
369 } else if (auto *V = simplifyInstruction(*this, InsertedValues, Inst)) {
370 Inst.replaceAllUsesWith(V);
371 Inst.eraseFromParent();
372 ++InstRemovedStat;
373 MadeChanges = true;
374 }
375 }
376 return MadeChanges;
377}
378
380 BasicBlock *&NewUnreachableBB) const {
381 SmallPtrSet<BasicBlock *, 8> FeasibleSuccessors;
382 bool HasNonFeasibleEdges = false;
383 for (BasicBlock *Succ : successors(BB)) {
384 if (isEdgeFeasible(BB, Succ))
385 FeasibleSuccessors.insert(Succ);
386 else
387 HasNonFeasibleEdges = true;
388 }
389
390 // All edges feasible, nothing to do.
391 if (!HasNonFeasibleEdges)
392 return false;
393
394 // SCCP can only determine non-feasible edges for br, switch and indirectbr.
395 Instruction *TI = BB->getTerminator();
397 isa<IndirectBrInst>(TI)) &&
398 "Terminator must be a br, switch or indirectbr");
399
400 if (FeasibleSuccessors.size() == 0) {
401 // Branch on undef/poison, replace with unreachable.
404 for (BasicBlock *Succ : successors(BB)) {
405 Succ->removePredecessor(BB);
406 if (SeenSuccs.insert(Succ).second)
407 Updates.push_back({DominatorTree::Delete, BB, Succ});
408 }
409 TI->eraseFromParent();
410 new UnreachableInst(BB->getContext(), BB);
411 DTU.applyUpdatesPermissive(Updates);
412 } else if (FeasibleSuccessors.size() == 1) {
413 // Replace with an unconditional branch to the only feasible successor.
414 BasicBlock *OnlyFeasibleSuccessor = *FeasibleSuccessors.begin();
416 bool HaveSeenOnlyFeasibleSuccessor = false;
417 for (BasicBlock *Succ : successors(BB)) {
418 if (Succ == OnlyFeasibleSuccessor && !HaveSeenOnlyFeasibleSuccessor) {
419 // Don't remove the edge to the only feasible successor the first time
420 // we see it. We still do need to remove any multi-edges to it though.
421 HaveSeenOnlyFeasibleSuccessor = true;
422 continue;
423 }
424
425 Succ->removePredecessor(BB);
426 Updates.push_back({DominatorTree::Delete, BB, Succ});
427 }
428
429 Instruction *BI = BranchInst::Create(OnlyFeasibleSuccessor, BB);
430 BI->setDebugLoc(TI->getDebugLoc());
431 TI->eraseFromParent();
432 DTU.applyUpdatesPermissive(Updates);
433 } else if (FeasibleSuccessors.size() > 1) {
436
437 // If the default destination is unfeasible it will never be taken. Replace
438 // it with a new block with a single Unreachable instruction.
439 BasicBlock *DefaultDest = SI->getDefaultDest();
440 if (!FeasibleSuccessors.contains(DefaultDest)) {
441 if (!NewUnreachableBB) {
442 NewUnreachableBB =
443 BasicBlock::Create(DefaultDest->getContext(), "default.unreachable",
444 DefaultDest->getParent(), DefaultDest);
445 auto *UI =
446 new UnreachableInst(DefaultDest->getContext(), NewUnreachableBB);
447 UI->setDebugLoc(DebugLoc::getTemporary());
448 }
449
450 DefaultDest->removePredecessor(BB);
451 SI->setDefaultDest(NewUnreachableBB);
452 Updates.push_back({DominatorTree::Delete, BB, DefaultDest});
453 Updates.push_back({DominatorTree::Insert, BB, NewUnreachableBB});
454 }
455
456 for (auto CI = SI->case_begin(); CI != SI->case_end();) {
457 if (FeasibleSuccessors.contains(CI->getCaseSuccessor())) {
458 ++CI;
459 continue;
460 }
461
462 BasicBlock *Succ = CI->getCaseSuccessor();
463 Succ->removePredecessor(BB);
464 Updates.push_back({DominatorTree::Delete, BB, Succ});
465 SI.removeCase(CI);
466 // Don't increment CI, as we removed a case.
467 }
468
469 DTU.applyUpdatesPermissive(Updates);
470 } else {
471 llvm_unreachable("Must have at least one feasible successor");
472 }
473 return true;
474}
475
476static void inferAttribute(Function *F, unsigned AttrIndex,
477 const ValueLatticeElement &Val) {
478 // If there is a known constant range for the value, add range attribute.
479 if (Val.isConstantRange() && !Val.getConstantRange().isSingleElement()) {
480 // Do not add range attribute if the value may include undef.
482 return;
483
484 // Take the intersection of the existing attribute and the inferred range.
485 Attribute OldAttr = F->getAttributeAtIndex(AttrIndex, Attribute::Range);
487 if (OldAttr.isValid())
488 CR = CR.intersectWith(OldAttr.getRange());
489 F->addAttributeAtIndex(
490 AttrIndex, Attribute::get(F->getContext(), Attribute::Range, CR));
491 return;
492 }
493 // Infer nonnull attribute.
494 if (Val.isNotConstant() && Val.getNotConstant()->getType()->isPointerTy() &&
495 Val.getNotConstant()->isNullValue() &&
496 !F->hasAttributeAtIndex(AttrIndex, Attribute::NonNull)) {
497 F->addAttributeAtIndex(AttrIndex,
498 Attribute::get(F->getContext(), Attribute::NonNull));
499 }
500}
501
503 for (const auto &[F, ReturnValue] : getTrackedRetVals())
504 inferAttribute(F, AttributeList::ReturnIndex, ReturnValue);
505}
506
509 if (!isBlockExecutable(&F->front()))
510 continue;
511 for (Argument &A : F->args())
512 if (!A.getType()->isStructTy())
513 inferAttribute(F, AttributeList::FirstArgIndex + A.getArgNo(),
515 }
516}
517
518/// Helper class for SCCPSolver. This implements the instruction visitor and
519/// holds all the state.
520class SCCPInstVisitor : public InstVisitor<SCCPInstVisitor> {
521 const DataLayout &DL;
522 std::function<const TargetLibraryInfo &(Function &)> GetTLI;
523 /// Basic blocks that are executable (but may not have been visited yet).
524 SmallPtrSet<BasicBlock *, 8> BBExecutable;
525 /// Basic blocks that are executable and have been visited at least once.
528 ValueState; // The state each value is in.
529
530 /// StructValueState - This maintains ValueState for values that have
531 /// StructType, for example for formal arguments, calls, insertelement, etc.
533
534 /// GlobalValue - If we are tracking any values for the contents of a global
535 /// variable, we keep a mapping from the constant accessor to the element of
536 /// the global, to the currently known value. If the value becomes
537 /// overdefined, it's entry is simply removed from this map.
539
540 /// TrackedRetVals - If we are tracking arguments into and the return
541 /// value out of a function, it will have an entry in this map, indicating
542 /// what the known return value for the function is.
544
545 /// TrackedMultipleRetVals - Same as TrackedRetVals, but used for functions
546 /// that return multiple values.
548 TrackedMultipleRetVals;
549
550 /// The set of values whose lattice has been invalidated.
551 /// Populated by resetLatticeValueFor(), cleared after resolving undefs.
552 DenseSet<Value *> Invalidated;
553
554 /// MRVFunctionsTracked - Each function in TrackedMultipleRetVals is
555 /// represented here for efficient lookup.
556 SmallPtrSet<Function *, 16> MRVFunctionsTracked;
557
558 /// A list of functions whose return cannot be modified.
559 SmallPtrSet<Function *, 16> MustPreserveReturnsInFunctions;
560
561 /// TrackingIncomingArguments - This is the set of functions for whose
562 /// arguments we make optimistic assumptions about and try to prove as
563 /// constants.
564 SmallPtrSet<Function *, 16> TrackingIncomingArguments;
565
566 /// Worklist of instructions to re-visit. This only includes instructions
567 /// in blocks that have already been visited at least once.
569
570 /// Current instruction while visiting a block for the first time, used to
571 /// avoid unnecessary instruction worklist insertions. Null if an instruction
572 /// is visited outside a whole-block visitation.
573 Instruction *CurI = nullptr;
574
575 // The BasicBlock work list
577
578 /// KnownFeasibleEdges - Entries in this set are edges which have already had
579 /// PHI nodes retriggered.
580 using Edge = std::pair<BasicBlock *, BasicBlock *>;
581 DenseSet<Edge> KnownFeasibleEdges;
582
584
586
587 LLVMContext &Ctx;
588
589 BumpPtrAllocator PredicateInfoAllocator;
590
591private:
592 ConstantInt *getConstantInt(const ValueLatticeElement &IV, Type *Ty) const {
594 }
595
596 /// Push instruction \p I to the worklist.
597 void pushToWorkList(Instruction *I);
598
599 /// Push users of value \p V to the worklist.
600 void pushUsersToWorkList(Value *V);
601
602 /// Like pushUsersToWorkList(), but also prints a debug message with the
603 /// updated value.
604 void pushUsersToWorkListMsg(ValueLatticeElement &IV, Value *V);
605
606 // markConstant - Make a value be marked as "constant". If the value
607 // is not already a constant, add it to the instruction work list so that
608 // the users of the instruction are updated later.
609 bool markConstant(ValueLatticeElement &IV, Value *V, Constant *C,
610 bool MayIncludeUndef = false);
611
612 bool markConstant(Value *V, Constant *C) {
613 assert(!V->getType()->isStructTy() && "structs should use mergeInValue");
614 return markConstant(ValueState[V], V, C);
615 }
616
617 bool markNotConstant(ValueLatticeElement &IV, Value *V, Constant *C);
618
619 bool markNotNull(ValueLatticeElement &IV, Value *V) {
620 return markNotConstant(IV, V, Constant::getNullValue(V->getType()));
621 }
622
623 /// markConstantRange - Mark the object as constant range with \p CR. If the
624 /// object is not a constant range with the range \p CR, add it to the
625 /// instruction work list so that the users of the instruction are updated
626 /// later.
627 bool markConstantRange(ValueLatticeElement &IV, Value *V,
628 const ConstantRange &CR);
629
630 // markOverdefined - Make a value be marked as "overdefined". If the
631 // value is not already overdefined, add it to the overdefined instruction
632 // work list so that the users of the instruction are updated later.
633 bool markOverdefined(ValueLatticeElement &IV, Value *V);
634
635 /// Merge \p MergeWithV into \p IV and push \p V to the worklist, if \p IV
636 /// changes.
637 bool mergeInValue(ValueLatticeElement &IV, Value *V,
638 const ValueLatticeElement &MergeWithV,
640 /*MayIncludeUndef=*/false, /*CheckWiden=*/false});
641
642 /// getValueState - Return the ValueLatticeElement object that corresponds to
643 /// the value. This function handles the case when the value hasn't been seen
644 /// yet by properly seeding constants etc.
645 ValueLatticeElement &getValueState(Value *V) {
646 assert(!V->getType()->isStructTy() && "Should use getStructValueState");
647
648 auto I = ValueState.try_emplace(V);
649 ValueLatticeElement &LV = I.first->second;
650
651 if (!I.second)
652 return LV; // Common case, already in the map.
653
654 if (auto *C = dyn_cast<Constant>(V))
655 LV.markConstant(C); // Constants are constant
656
657 // All others are unknown by default.
658 return LV;
659 }
660
661 /// getStructValueState - Return the ValueLatticeElement object that
662 /// corresponds to the value/field pair. This function handles the case when
663 /// the value hasn't been seen yet by properly seeding constants etc.
664 ValueLatticeElement &getStructValueState(Value *V, unsigned i) {
665 assert(V->getType()->isStructTy() && "Should use getValueState");
666 assert(i < cast<StructType>(V->getType())->getNumElements() &&
667 "Invalid element #");
668
669 auto I = StructValueState.insert(
670 std::make_pair(std::make_pair(V, i), ValueLatticeElement()));
671 ValueLatticeElement &LV = I.first->second;
672
673 if (!I.second)
674 return LV; // Common case, already in the map.
675
676 if (auto *C = dyn_cast<Constant>(V)) {
677 Constant *Elt = C->getAggregateElement(i);
678
679 if (!Elt)
680 LV.markOverdefined(); // Unknown sort of constant.
681 else
682 LV.markConstant(Elt); // Constants are constant.
683 }
684
685 // All others are underdefined by default.
686 return LV;
687 }
688
689 /// Traverse the use-def chain of \p Call, marking itself and its users as
690 /// "unknown" on the way.
691 void invalidate(CallBase *Call) {
693 ToInvalidate.push_back(Call);
694
695 while (!ToInvalidate.empty()) {
696 Instruction *Inst = ToInvalidate.pop_back_val();
697
698 if (!Invalidated.insert(Inst).second)
699 continue;
700
701 if (!BBExecutable.count(Inst->getParent()))
702 continue;
703
704 Value *V = nullptr;
705 // For return instructions we need to invalidate the tracked returns map.
706 // Anything else has its lattice in the value map.
707 if (auto *RetInst = dyn_cast<ReturnInst>(Inst)) {
708 Function *F = RetInst->getParent()->getParent();
709 if (auto It = TrackedRetVals.find(F); It != TrackedRetVals.end()) {
710 It->second = ValueLatticeElement();
711 V = F;
712 } else if (MRVFunctionsTracked.count(F)) {
713 auto *STy = cast<StructType>(F->getReturnType());
714 for (unsigned I = 0, E = STy->getNumElements(); I != E; ++I)
715 TrackedMultipleRetVals[{F, I}] = ValueLatticeElement();
716 V = F;
717 }
718 } else if (auto *STy = dyn_cast<StructType>(Inst->getType())) {
719 for (unsigned I = 0, E = STy->getNumElements(); I != E; ++I) {
720 if (auto It = StructValueState.find({Inst, I});
721 It != StructValueState.end()) {
722 It->second = ValueLatticeElement();
723 V = Inst;
724 }
725 }
726 } else if (auto It = ValueState.find(Inst); It != ValueState.end()) {
727 It->second = ValueLatticeElement();
728 V = Inst;
729 }
730
731 if (V) {
732 LLVM_DEBUG(dbgs() << "Invalidated lattice for " << *V << "\n");
733
734 for (User *U : V->users())
735 if (auto *UI = dyn_cast<Instruction>(U))
736 ToInvalidate.push_back(UI);
737
738 auto It = AdditionalUsers.find(V);
739 if (It != AdditionalUsers.end())
740 for (User *U : It->second)
741 if (auto *UI = dyn_cast<Instruction>(U))
742 ToInvalidate.push_back(UI);
743 }
744 }
745 }
746
747 /// markEdgeExecutable - Mark a basic block as executable, adding it to the BB
748 /// work list if it is not already executable.
749 bool markEdgeExecutable(BasicBlock *Source, BasicBlock *Dest);
750
751 // getFeasibleSuccessors - Return a vector of booleans to indicate which
752 // successors are reachable from a given terminator instruction.
753 void getFeasibleSuccessors(Instruction &TI, SmallVectorImpl<bool> &Succs);
754
755 // Add U as additional user of V.
756 void addAdditionalUser(Value *V, User *U) { AdditionalUsers[V].insert(U); }
757
758 void handlePredicate(Instruction *I, Value *CopyOf, const PredicateBase *PI);
759 void handleCallOverdefined(CallBase &CB);
760 void handleCallResult(CallBase &CB);
761 void handleCallArguments(CallBase &CB);
762 void handleExtractOfWithOverflow(ExtractValueInst &EVI,
763 const WithOverflowInst *WO, unsigned Idx);
764 bool isInstFullyOverDefined(Instruction &Inst);
765
766private:
767 friend class InstVisitor<SCCPInstVisitor>;
768
769 // visit implementations - Something changed in this instruction. Either an
770 // operand made a transition, or the instruction is newly executable. Change
771 // the value type of I to reflect these changes if appropriate.
772 void visitPHINode(PHINode &I);
773
774 // Terminators
775
776 void visitReturnInst(ReturnInst &I);
777 void visitTerminator(Instruction &TI);
778
779 void visitCastInst(CastInst &I);
780 void visitSelectInst(SelectInst &I);
781 void visitUnaryOperator(Instruction &I);
782 void visitFreezeInst(FreezeInst &I);
783 void visitBinaryOperator(Instruction &I);
784 void visitCmpInst(CmpInst &I);
785 void visitExtractValueInst(ExtractValueInst &EVI);
786 void visitInsertValueInst(InsertValueInst &IVI);
787
788 void visitCatchSwitchInst(CatchSwitchInst &CPI) {
789 markOverdefined(&CPI);
790 visitTerminator(CPI);
791 }
792
793 // Instructions that cannot be folded away.
794
795 void visitStoreInst(StoreInst &I);
796 void visitLoadInst(LoadInst &I);
797 void visitGetElementPtrInst(GetElementPtrInst &I);
798 void visitAllocaInst(AllocaInst &AI);
799
800 void visitInvokeInst(InvokeInst &II) {
801 visitCallBase(II);
802 visitTerminator(II);
803 }
804
805 void visitCallBrInst(CallBrInst &CBI) {
806 visitCallBase(CBI);
807 visitTerminator(CBI);
808 }
809
810 void visitCallBase(CallBase &CB);
811 void visitResumeInst(ResumeInst &I) { /*returns void*/
812 }
813 void visitUnreachableInst(UnreachableInst &I) { /*returns void*/
814 }
815 void visitFenceInst(FenceInst &I) { /*returns void*/
816 }
817
818 void visitInstruction(Instruction &I);
819
820public:
822 FnPredicateInfo.insert({&F, std::make_unique<PredicateInfo>(
823 F, DT, AC, PredicateInfoAllocator)});
824 }
825
827 auto It = FnPredicateInfo.find(&F);
828 if (It == FnPredicateInfo.end())
829 return;
830
831 for (BasicBlock &BB : F) {
832 for (Instruction &Inst : llvm::make_early_inc_range(BB)) {
833 if (auto *BC = dyn_cast<BitCastInst>(&Inst)) {
834 if (BC->getType() == BC->getOperand(0)->getType()) {
835 if (It->second->getPredicateInfoFor(&Inst)) {
836 Value *Op = BC->getOperand(0);
837 Inst.replaceAllUsesWith(Op);
838 Inst.eraseFromParent();
839 }
840 }
841 }
842 }
843 }
844 }
845
846 void visitCallInst(CallInst &I) { visitCallBase(I); }
847
849
851 auto It = FnPredicateInfo.find(I->getParent()->getParent());
852 if (It == FnPredicateInfo.end())
853 return nullptr;
854 return It->second->getPredicateInfoFor(I);
855 }
856
858 std::function<const TargetLibraryInfo &(Function &)> GetTLI,
859 LLVMContext &Ctx)
860 : DL(DL), GetTLI(GetTLI), Ctx(Ctx) {}
861
863 // We only track the contents of scalar globals.
864 if (GV->getValueType()->isSingleValueType()) {
865 ValueLatticeElement &IV = TrackedGlobals[GV];
866 IV.markConstant(GV->getInitializer());
867 }
868 }
869
871 // Add an entry, F -> undef.
872 if (auto *STy = dyn_cast<StructType>(F->getReturnType())) {
873 MRVFunctionsTracked.insert(F);
874 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i)
875 TrackedMultipleRetVals.try_emplace(std::make_pair(F, i));
876 } else if (!F->getReturnType()->isVoidTy())
877 TrackedRetVals.try_emplace(F);
878 }
879
881 MustPreserveReturnsInFunctions.insert(F);
882 }
883
885 return MustPreserveReturnsInFunctions.count(F);
886 }
887
889 TrackingIncomingArguments.insert(F);
890 }
891
893 return TrackingIncomingArguments.count(F);
894 }
895
897 return TrackingIncomingArguments;
898 }
899
900 void solve();
901
903
905
907 return BBExecutable.count(BB);
908 }
909
910 bool isEdgeFeasible(BasicBlock *From, BasicBlock *To) const;
911
912 std::vector<ValueLatticeElement> getStructLatticeValueFor(Value *V) const {
913 std::vector<ValueLatticeElement> StructValues;
914 auto *STy = dyn_cast<StructType>(V->getType());
915 assert(STy && "getStructLatticeValueFor() can be called only on structs");
916 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
917 auto I = StructValueState.find(std::make_pair(V, i));
918 assert(I != StructValueState.end() && "Value not in valuemap!");
919 StructValues.push_back(I->second);
920 }
921 return StructValues;
922 }
923
924 void removeLatticeValueFor(Value *V) { ValueState.erase(V); }
925
926 /// Invalidate the Lattice Value of \p Call and its users after specializing
927 /// the call. Then recompute it.
929 // Calls to void returning functions do not need invalidation.
930 Function *F = Call->getCalledFunction();
931 (void)F;
932 assert(!F->getReturnType()->isVoidTy() &&
933 (TrackedRetVals.count(F) || MRVFunctionsTracked.count(F)) &&
934 "All non void specializations should be tracked");
935 invalidate(Call);
936 handleCallResult(*Call);
937 }
938
940 assert(!V->getType()->isStructTy() &&
941 "Should use getStructLatticeValueFor");
943 ValueState.find(V);
944 assert(I != ValueState.end() &&
945 "V not found in ValueState nor Paramstate map!");
946 return I->second;
947 }
948
950 return TrackedRetVals;
951 }
952
955 return TrackedGlobals;
956 }
957
959 return MRVFunctionsTracked;
960 }
961
963 if (auto *STy = dyn_cast<StructType>(V->getType()))
964 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i)
965 markOverdefined(getStructValueState(V, i), V);
966 else
967 markOverdefined(ValueState[V], V);
968 }
969
971 if (A->getType()->isIntOrIntVectorTy()) {
972 if (std::optional<ConstantRange> Range = A->getRange())
974 }
975 if (A->hasNonNullAttr())
977 // Assume nothing about the incoming arguments without attributes.
979 }
980
982 if (A->getType()->isStructTy())
983 return (void)markOverdefined(A);
984 mergeInValue(ValueState[A], A, getArgAttributeVL(A));
985 }
986
988
989 Constant *getConstant(const ValueLatticeElement &LV, Type *Ty) const;
990
992
994 const SmallVectorImpl<ArgInfo> &Args);
995
997 for (auto &BB : *F)
998 BBExecutable.erase(&BB);
999 }
1000
1002 bool ResolvedUndefs = true;
1003 while (ResolvedUndefs) {
1004 solve();
1005 ResolvedUndefs = false;
1006 for (Function &F : M)
1007 ResolvedUndefs |= resolvedUndefsIn(F);
1008 }
1009 }
1010
1012 bool ResolvedUndefs = true;
1013 while (ResolvedUndefs) {
1014 solve();
1015 ResolvedUndefs = false;
1016 for (Function *F : WorkList)
1017 ResolvedUndefs |= resolvedUndefsIn(*F);
1018 }
1019 }
1020
1022 bool ResolvedUndefs = true;
1023 while (ResolvedUndefs) {
1024 solve();
1025 ResolvedUndefs = false;
1026 for (Value *V : Invalidated)
1027 if (auto *I = dyn_cast<Instruction>(V))
1028 ResolvedUndefs |= resolvedUndef(*I);
1029 }
1030 Invalidated.clear();
1031 }
1032};
1033
1034} // namespace llvm
1035
1037 if (!BBExecutable.insert(BB).second)
1038 return false;
1039 LLVM_DEBUG(dbgs() << "Marking Block Executable: " << BB->getName() << '\n');
1040 BBWorkList.push_back(BB); // Add the block to the work list!
1041 return true;
1042}
1043
1044void SCCPInstVisitor::pushToWorkList(Instruction *I) {
1045 // If we're currently visiting a block, do not push any instructions in the
1046 // same blocks that are after the current one, as they will be visited
1047 // anyway. We do have to push updates to earlier instructions (e.g. phi
1048 // nodes or loads of tracked globals).
1049 if (CurI && I->getParent() == CurI->getParent() && !I->comesBefore(CurI))
1050 return;
1051 // Only push instructions in already visited blocks. Otherwise we'll handle
1052 // it when we visit the block for the first time.
1053 if (BBVisited.contains(I->getParent()))
1054 InstWorkList.insert(I);
1055}
1056
1057void SCCPInstVisitor::pushUsersToWorkList(Value *V) {
1058 for (User *U : V->users())
1059 if (auto *UI = dyn_cast<Instruction>(U))
1060 pushToWorkList(UI);
1061
1062 auto Iter = AdditionalUsers.find(V);
1063 if (Iter != AdditionalUsers.end()) {
1064 // Copy additional users before notifying them of changes, because new
1065 // users may be added, potentially invalidating the iterator.
1067 for (User *U : Iter->second)
1068 if (auto *UI = dyn_cast<Instruction>(U))
1069 ToNotify.push_back(UI);
1070 for (Instruction *UI : ToNotify)
1071 pushToWorkList(UI);
1072 }
1073}
1074
1075void SCCPInstVisitor::pushUsersToWorkListMsg(ValueLatticeElement &IV,
1076 Value *V) {
1077 LLVM_DEBUG(dbgs() << "updated " << IV << ": " << *V << '\n');
1078 pushUsersToWorkList(V);
1079}
1080
1081bool SCCPInstVisitor::markConstant(ValueLatticeElement &IV, Value *V,
1082 Constant *C, bool MayIncludeUndef) {
1083 if (!IV.markConstant(C, MayIncludeUndef))
1084 return false;
1085 LLVM_DEBUG(dbgs() << "markConstant: " << *C << ": " << *V << '\n');
1086 pushUsersToWorkList(V);
1087 return true;
1088}
1089
1090bool SCCPInstVisitor::markNotConstant(ValueLatticeElement &IV, Value *V,
1091 Constant *C) {
1092 if (!IV.markNotConstant(C))
1093 return false;
1094 LLVM_DEBUG(dbgs() << "markNotConstant: " << *C << ": " << *V << '\n');
1095 pushUsersToWorkList(V);
1096 return true;
1097}
1098
1099bool SCCPInstVisitor::markConstantRange(ValueLatticeElement &IV, Value *V,
1100 const ConstantRange &CR) {
1101 if (!IV.markConstantRange(CR))
1102 return false;
1103 LLVM_DEBUG(dbgs() << "markConstantRange: " << CR << ": " << *V << '\n');
1104 pushUsersToWorkList(V);
1105 return true;
1106}
1107
1108bool SCCPInstVisitor::markOverdefined(ValueLatticeElement &IV, Value *V) {
1109 if (!IV.markOverdefined())
1110 return false;
1111
1112 LLVM_DEBUG(dbgs() << "markOverdefined: ";
1113 if (auto *F = dyn_cast<Function>(V)) dbgs()
1114 << "Function '" << F->getName() << "'\n";
1115 else dbgs() << *V << '\n');
1116 // Only instructions go on the work list
1117 pushUsersToWorkList(V);
1118 return true;
1119}
1120
1122 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
1123 const auto &It = TrackedMultipleRetVals.find(std::make_pair(F, i));
1124 assert(It != TrackedMultipleRetVals.end());
1125 if (!SCCPSolver::isConstant(It->second))
1126 return false;
1127 }
1128 return true;
1129}
1130
1132 Type *Ty) const {
1133 if (LV.isConstant()) {
1134 Constant *C = LV.getConstant();
1135 assert(C->getType() == Ty && "Type mismatch");
1136 return C;
1137 }
1138
1139 if (LV.isConstantRange()) {
1140 const auto &CR = LV.getConstantRange();
1141 if (CR.getSingleElement())
1142 return ConstantInt::get(Ty, *CR.getSingleElement());
1143 }
1144 return nullptr;
1145}
1146
1148 Constant *Const = nullptr;
1149 if (V->getType()->isStructTy()) {
1150 std::vector<ValueLatticeElement> LVs = getStructLatticeValueFor(V);
1152 return nullptr;
1153 std::vector<Constant *> ConstVals;
1154 auto *ST = cast<StructType>(V->getType());
1155 for (unsigned I = 0, E = ST->getNumElements(); I != E; ++I) {
1156 const ValueLatticeElement &LV = LVs[I];
1157 ConstVals.push_back(SCCPSolver::isConstant(LV)
1158 ? getConstant(LV, ST->getElementType(I))
1159 : UndefValue::get(ST->getElementType(I)));
1160 }
1161 Const = ConstantStruct::get(ST, ConstVals);
1162 } else {
1165 return nullptr;
1166 Const = SCCPSolver::isConstant(LV) ? getConstant(LV, V->getType())
1167 : UndefValue::get(V->getType());
1168 }
1169 assert(Const && "Constant is nullptr here!");
1170 return Const;
1171}
1172
1174 const SmallVectorImpl<ArgInfo> &Args) {
1175 assert(!Args.empty() && "Specialization without arguments");
1176 assert(F->arg_size() == Args[0].Formal->getParent()->arg_size() &&
1177 "Functions should have the same number of arguments");
1178
1179 auto Iter = Args.begin();
1180 Function::arg_iterator NewArg = F->arg_begin();
1181 Function::arg_iterator OldArg = Args[0].Formal->getParent()->arg_begin();
1182 for (auto End = F->arg_end(); NewArg != End; ++NewArg, ++OldArg) {
1183
1184 LLVM_DEBUG(dbgs() << "SCCP: Marking argument "
1185 << NewArg->getNameOrAsOperand() << "\n");
1186
1187 // Mark the argument constants in the new function
1188 // or copy the lattice state over from the old function.
1189 if (Iter != Args.end() && Iter->Formal == &*OldArg) {
1190 if (auto *STy = dyn_cast<StructType>(NewArg->getType())) {
1191 for (unsigned I = 0, E = STy->getNumElements(); I != E; ++I) {
1192 ValueLatticeElement &NewValue = StructValueState[{&*NewArg, I}];
1193 NewValue.markConstant(Iter->Actual->getAggregateElement(I));
1194 }
1195 } else {
1196 ValueState[&*NewArg].markConstant(Iter->Actual);
1197 }
1198 ++Iter;
1199 } else {
1200 if (auto *STy = dyn_cast<StructType>(NewArg->getType())) {
1201 for (unsigned I = 0, E = STy->getNumElements(); I != E; ++I) {
1202 ValueLatticeElement &NewValue = StructValueState[{&*NewArg, I}];
1203 NewValue = StructValueState[{&*OldArg, I}];
1204 }
1205 } else {
1206 ValueLatticeElement &NewValue = ValueState[&*NewArg];
1207 NewValue = ValueState[&*OldArg];
1208 }
1209 }
1210 }
1211}
1212
1213void SCCPInstVisitor::visitInstruction(Instruction &I) {
1214 // All the instructions we don't do any special handling for just
1215 // go to overdefined.
1216 LLVM_DEBUG(dbgs() << "SCCP: Don't know how to handle: " << I << '\n');
1217 markOverdefined(&I);
1218}
1219
1220bool SCCPInstVisitor::mergeInValue(ValueLatticeElement &IV, Value *V,
1221 const ValueLatticeElement &MergeWithV,
1223 if (IV.mergeIn(MergeWithV, Opts)) {
1224 pushUsersToWorkList(V);
1225 LLVM_DEBUG(dbgs() << "Merged " << MergeWithV << " into " << *V << " : "
1226 << IV << "\n");
1227 return true;
1228 }
1229 return false;
1230}
1231
1232bool SCCPInstVisitor::markEdgeExecutable(BasicBlock *Source, BasicBlock *Dest) {
1233 if (!KnownFeasibleEdges.insert(Edge(Source, Dest)).second)
1234 return false; // This edge is already known to be executable!
1235
1236 if (!markBlockExecutable(Dest)) {
1237 // If the destination is already executable, we just made an *edge*
1238 // feasible that wasn't before. Revisit the PHI nodes in the block
1239 // because they have potentially new operands.
1240 LLVM_DEBUG(dbgs() << "Marking Edge Executable: " << Source->getName()
1241 << " -> " << Dest->getName() << '\n');
1242
1243 for (PHINode &PN : Dest->phis())
1244 pushToWorkList(&PN);
1245 }
1246 return true;
1247}
1248
1249// getFeasibleSuccessors - Return a vector of booleans to indicate which
1250// successors are reachable from a given terminator instruction.
1251void SCCPInstVisitor::getFeasibleSuccessors(Instruction &TI,
1252 SmallVectorImpl<bool> &Succs) {
1253 Succs.resize(TI.getNumSuccessors());
1254 if (auto *BI = dyn_cast<BranchInst>(&TI)) {
1255 if (BI->isUnconditional()) {
1256 Succs[0] = true;
1257 return;
1258 }
1259
1260 const ValueLatticeElement &BCValue = getValueState(BI->getCondition());
1261 ConstantInt *CI = getConstantInt(BCValue, BI->getCondition()->getType());
1262 if (!CI) {
1263 // Overdefined condition variables, and branches on unfoldable constant
1264 // conditions, mean the branch could go either way.
1265 if (!BCValue.isUnknownOrUndef())
1266 Succs[0] = Succs[1] = true;
1267 return;
1268 }
1269
1270 // Constant condition variables mean the branch can only go a single way.
1271 Succs[CI->isZero()] = true;
1272 return;
1273 }
1274
1275 // We cannot analyze special terminators, so consider all successors
1276 // executable.
1277 if (TI.isSpecialTerminator()) {
1278 Succs.assign(TI.getNumSuccessors(), true);
1279 return;
1280 }
1281
1282 if (auto *SI = dyn_cast<SwitchInst>(&TI)) {
1283 if (!SI->getNumCases()) {
1284 Succs[0] = true;
1285 return;
1286 }
1287 const ValueLatticeElement &SCValue = getValueState(SI->getCondition());
1288 if (ConstantInt *CI =
1289 getConstantInt(SCValue, SI->getCondition()->getType())) {
1290 Succs[SI->findCaseValue(CI)->getSuccessorIndex()] = true;
1291 return;
1292 }
1293
1294 // TODO: Switch on undef is UB. Stop passing false once the rest of LLVM
1295 // is ready.
1296 if (SCValue.isConstantRange(/*UndefAllowed=*/false)) {
1297 const ConstantRange &Range = SCValue.getConstantRange();
1298 unsigned ReachableCaseCount = 0;
1299 for (const auto &Case : SI->cases()) {
1300 const APInt &CaseValue = Case.getCaseValue()->getValue();
1301 if (Range.contains(CaseValue)) {
1302 Succs[Case.getSuccessorIndex()] = true;
1303 ++ReachableCaseCount;
1304 }
1305 }
1306
1307 Succs[SI->case_default()->getSuccessorIndex()] =
1308 Range.isSizeLargerThan(ReachableCaseCount);
1309 return;
1310 }
1311
1312 // Overdefined or unknown condition? All destinations are executable!
1313 if (!SCValue.isUnknownOrUndef())
1314 Succs.assign(TI.getNumSuccessors(), true);
1315 return;
1316 }
1317
1318 // In case of indirect branch and its address is a blockaddress, we mark
1319 // the target as executable.
1320 if (auto *IBR = dyn_cast<IndirectBrInst>(&TI)) {
1321 // Casts are folded by visitCastInst.
1322 const ValueLatticeElement &IBRValue = getValueState(IBR->getAddress());
1324 getConstant(IBRValue, IBR->getAddress()->getType()));
1325 if (!Addr) { // Overdefined or unknown condition?
1326 // All destinations are executable!
1327 if (!IBRValue.isUnknownOrUndef())
1328 Succs.assign(TI.getNumSuccessors(), true);
1329 return;
1330 }
1331
1332 BasicBlock *T = Addr->getBasicBlock();
1333 assert(Addr->getFunction() == T->getParent() &&
1334 "Block address of a different function ?");
1335 for (unsigned i = 0; i < IBR->getNumSuccessors(); ++i) {
1336 // This is the target.
1337 if (IBR->getDestination(i) == T) {
1338 Succs[i] = true;
1339 return;
1340 }
1341 }
1342
1343 // If we didn't find our destination in the IBR successor list, then we
1344 // have undefined behavior. Its ok to assume no successor is executable.
1345 return;
1346 }
1347
1348 LLVM_DEBUG(dbgs() << "Unknown terminator instruction: " << TI << '\n');
1349 llvm_unreachable("SCCP: Don't know how to handle this terminator!");
1350}
1351
1352// isEdgeFeasible - Return true if the control flow edge from the 'From' basic
1353// block to the 'To' basic block is currently feasible.
1355 // Check if we've called markEdgeExecutable on the edge yet. (We could
1356 // be more aggressive and try to consider edges which haven't been marked
1357 // yet, but there isn't any need.)
1358 return KnownFeasibleEdges.count(Edge(From, To));
1359}
1360
1361// visit Implementations - Something changed in this instruction, either an
1362// operand made a transition, or the instruction is newly executable. Change
1363// the value type of I to reflect these changes if appropriate. This method
1364// makes sure to do the following actions:
1365//
1366// 1. If a phi node merges two constants in, and has conflicting value coming
1367// from different branches, or if the PHI node merges in an overdefined
1368// value, then the PHI node becomes overdefined.
1369// 2. If a phi node merges only constants in, and they all agree on value, the
1370// PHI node becomes a constant value equal to that.
1371// 3. If V <- x (op) y && isConstant(x) && isConstant(y) V = Constant
1372// 4. If V <- x (op) y && (isOverdefined(x) || isOverdefined(y)) V = Overdefined
1373// 5. If V <- MEM or V <- CALL or V <- (unknown) then V = Overdefined
1374// 6. If a conditional branch has a value that is constant, make the selected
1375// destination executable
1376// 7. If a conditional branch has a value that is overdefined, make all
1377// successors executable.
1378void SCCPInstVisitor::visitPHINode(PHINode &PN) {
1379 // Super-extra-high-degree PHI nodes are unlikely to ever be marked constant,
1380 // and slow us down a lot. Just mark them overdefined.
1381 if (PN.getNumIncomingValues() > 64)
1382 return (void)markOverdefined(&PN);
1383
1384 if (isInstFullyOverDefined(PN))
1385 return;
1386 SmallVector<unsigned> FeasibleIncomingIndices;
1387 for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i) {
1388 if (!isEdgeFeasible(PN.getIncomingBlock(i), PN.getParent()))
1389 continue;
1390 FeasibleIncomingIndices.push_back(i);
1391 }
1392
1393 // Look at all of the executable operands of the PHI node. If any of them
1394 // are overdefined, the PHI becomes overdefined as well. If they are all
1395 // constant, and they agree with each other, the PHI becomes the identical
1396 // constant. If they are constant and don't agree, the PHI is a constant
1397 // range. If there are no executable operands, the PHI remains unknown.
1398 if (StructType *STy = dyn_cast<StructType>(PN.getType())) {
1399 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
1400 ValueLatticeElement PhiState = getStructValueState(&PN, i);
1401 if (PhiState.isOverdefined())
1402 continue;
1403 for (unsigned j : FeasibleIncomingIndices) {
1404 const ValueLatticeElement &IV =
1405 getStructValueState(PN.getIncomingValue(j), i);
1406 PhiState.mergeIn(IV);
1407 if (PhiState.isOverdefined())
1408 break;
1409 }
1410 ValueLatticeElement &PhiStateRef = getStructValueState(&PN, i);
1411 mergeInValue(PhiStateRef, &PN, PhiState,
1412 ValueLatticeElement::MergeOptions().setMaxWidenSteps(
1413 FeasibleIncomingIndices.size() + 1));
1414 PhiStateRef.setNumRangeExtensions(
1415 std::max((unsigned)FeasibleIncomingIndices.size(),
1416 PhiStateRef.getNumRangeExtensions()));
1417 }
1418 } else {
1419 ValueLatticeElement PhiState = getValueState(&PN);
1420 for (unsigned i : FeasibleIncomingIndices) {
1421 const ValueLatticeElement &IV = getValueState(PN.getIncomingValue(i));
1422 PhiState.mergeIn(IV);
1423 if (PhiState.isOverdefined())
1424 break;
1425 }
1426 // We allow up to 1 range extension per active incoming value and one
1427 // additional extension. Note that we manually adjust the number of range
1428 // extensions to match the number of active incoming values. This helps to
1429 // limit multiple extensions caused by the same incoming value, if other
1430 // incoming values are equal.
1431 ValueLatticeElement &PhiStateRef = ValueState[&PN];
1432 mergeInValue(PhiStateRef, &PN, PhiState,
1433 ValueLatticeElement::MergeOptions().setMaxWidenSteps(
1434 FeasibleIncomingIndices.size() + 1));
1435 PhiStateRef.setNumRangeExtensions(
1436 std::max((unsigned)FeasibleIncomingIndices.size(),
1437 PhiStateRef.getNumRangeExtensions()));
1438 }
1439}
1440
1441void SCCPInstVisitor::visitReturnInst(ReturnInst &I) {
1442 if (I.getNumOperands() == 0)
1443 return; // ret void
1444
1445 Function *F = I.getParent()->getParent();
1446 Value *ResultOp = I.getOperand(0);
1447
1448 // If we are tracking the return value of this function, merge it in.
1449 if (!TrackedRetVals.empty() && !ResultOp->getType()->isStructTy()) {
1450 auto TFRVI = TrackedRetVals.find(F);
1451 if (TFRVI != TrackedRetVals.end()) {
1452 mergeInValue(TFRVI->second, F, getValueState(ResultOp));
1453 return;
1454 }
1455 }
1456
1457 // Handle functions that return multiple values.
1458 if (!TrackedMultipleRetVals.empty()) {
1459 if (auto *STy = dyn_cast<StructType>(ResultOp->getType()))
1460 if (MRVFunctionsTracked.count(F))
1461 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i)
1462 mergeInValue(TrackedMultipleRetVals[std::make_pair(F, i)], F,
1463 getStructValueState(ResultOp, i));
1464 }
1465}
1466
1467void SCCPInstVisitor::visitTerminator(Instruction &TI) {
1468 SmallVector<bool, 16> SuccFeasible;
1469 getFeasibleSuccessors(TI, SuccFeasible);
1470
1471 BasicBlock *BB = TI.getParent();
1472
1473 // Mark all feasible successors executable.
1474 for (unsigned i = 0, e = SuccFeasible.size(); i != e; ++i)
1475 if (SuccFeasible[i])
1476 markEdgeExecutable(BB, TI.getSuccessor(i));
1477}
1478
1479void SCCPInstVisitor::visitCastInst(CastInst &I) {
1480 // ResolvedUndefsIn might mark I as overdefined. Bail out, even if we would
1481 // discover a concrete value later.
1482 if (ValueState[&I].isOverdefined())
1483 return;
1484
1485 if (auto *BC = dyn_cast<BitCastInst>(&I)) {
1486 if (BC->getType() == BC->getOperand(0)->getType()) {
1487 if (const PredicateBase *PI = getPredicateInfoFor(&I)) {
1488 handlePredicate(&I, I.getOperand(0), PI);
1489 return;
1490 }
1491 }
1492 }
1493
1494 const ValueLatticeElement &OpSt = getValueState(I.getOperand(0));
1495 if (OpSt.isUnknownOrUndef())
1496 return;
1497
1498 if (Constant *OpC = getConstant(OpSt, I.getOperand(0)->getType())) {
1499 // Fold the constant as we build.
1500 if (Constant *C =
1501 ConstantFoldCastOperand(I.getOpcode(), OpC, I.getType(), DL)) {
1502 auto &LV = ValueState[&I];
1503 mergeInValue(LV, &I, ValueLatticeElement::get(C));
1504 return;
1505 }
1506 }
1507
1508 // Ignore bitcasts, as they may change the number of vector elements.
1509 if (I.getDestTy()->isIntOrIntVectorTy() &&
1510 I.getSrcTy()->isIntOrIntVectorTy() &&
1511 I.getOpcode() != Instruction::BitCast) {
1512 ConstantRange OpRange =
1513 OpSt.asConstantRange(I.getSrcTy(), /*UndefAllowed=*/false);
1514 auto &LV = getValueState(&I);
1515
1516 Type *DestTy = I.getDestTy();
1517 ConstantRange Res = ConstantRange::getEmpty(DestTy->getScalarSizeInBits());
1518 if (auto *Trunc = dyn_cast<TruncInst>(&I))
1519 Res = OpRange.truncate(DestTy->getScalarSizeInBits(),
1520 Trunc->getNoWrapKind());
1521 else
1522 Res = OpRange.castOp(I.getOpcode(), DestTy->getScalarSizeInBits());
1523 mergeInValue(LV, &I, ValueLatticeElement::getRange(Res));
1524 } else
1525 markOverdefined(&I);
1526}
1527
1528void SCCPInstVisitor::handleExtractOfWithOverflow(ExtractValueInst &EVI,
1529 const WithOverflowInst *WO,
1530 unsigned Idx) {
1531 Value *LHS = WO->getLHS(), *RHS = WO->getRHS();
1532 Type *Ty = LHS->getType();
1533
1534 addAdditionalUser(LHS, &EVI);
1535 addAdditionalUser(RHS, &EVI);
1536
1537 const ValueLatticeElement &L = getValueState(LHS);
1538 if (L.isUnknownOrUndef())
1539 return; // Wait to resolve.
1540 ConstantRange LR = L.asConstantRange(Ty, /*UndefAllowed=*/false);
1541
1542 const ValueLatticeElement &R = getValueState(RHS);
1543 if (R.isUnknownOrUndef())
1544 return; // Wait to resolve.
1545
1546 ConstantRange RR = R.asConstantRange(Ty, /*UndefAllowed=*/false);
1547 if (Idx == 0) {
1548 ConstantRange Res = LR.binaryOp(WO->getBinaryOp(), RR);
1549 mergeInValue(ValueState[&EVI], &EVI, ValueLatticeElement::getRange(Res));
1550 } else {
1551 assert(Idx == 1 && "Index can only be 0 or 1");
1552 ConstantRange NWRegion = ConstantRange::makeGuaranteedNoWrapRegion(
1553 WO->getBinaryOp(), RR, WO->getNoWrapKind());
1554 if (NWRegion.contains(LR))
1555 return (void)markConstant(&EVI, ConstantInt::getFalse(EVI.getType()));
1556 markOverdefined(&EVI);
1557 }
1558}
1559
1560void SCCPInstVisitor::visitExtractValueInst(ExtractValueInst &EVI) {
1561 // If this returns a struct, mark all elements over defined, we don't track
1562 // structs in structs.
1563 if (EVI.getType()->isStructTy())
1564 return (void)markOverdefined(&EVI);
1565
1566 // resolvedUndefsIn might mark I as overdefined. Bail out, even if we would
1567 // discover a concrete value later.
1568 if (ValueState[&EVI].isOverdefined())
1569 return (void)markOverdefined(&EVI);
1570
1571 // If this is extracting from more than one level of struct, we don't know.
1572 if (EVI.getNumIndices() != 1)
1573 return (void)markOverdefined(&EVI);
1574
1575 Value *AggVal = EVI.getAggregateOperand();
1576 if (AggVal->getType()->isStructTy()) {
1577 unsigned i = *EVI.idx_begin();
1578 if (auto *WO = dyn_cast<WithOverflowInst>(AggVal))
1579 return handleExtractOfWithOverflow(EVI, WO, i);
1580 ValueLatticeElement EltVal = getStructValueState(AggVal, i);
1581 mergeInValue(ValueState[&EVI], &EVI, EltVal);
1582 } else {
1583 // Otherwise, must be extracting from an array.
1584 return (void)markOverdefined(&EVI);
1585 }
1586}
1587
1588void SCCPInstVisitor::visitInsertValueInst(InsertValueInst &IVI) {
1589 auto *STy = dyn_cast<StructType>(IVI.getType());
1590 if (!STy)
1591 return (void)markOverdefined(&IVI);
1592
1593 // resolvedUndefsIn might mark I as overdefined. Bail out, even if we would
1594 // discover a concrete value later.
1595 if (ValueState[&IVI].isOverdefined())
1596 return (void)markOverdefined(&IVI);
1597
1598 // If this has more than one index, we can't handle it, drive all results to
1599 // undef.
1600 if (IVI.getNumIndices() != 1)
1601 return (void)markOverdefined(&IVI);
1602
1603 Value *Aggr = IVI.getAggregateOperand();
1604 unsigned Idx = *IVI.idx_begin();
1605
1606 // Compute the result based on what we're inserting.
1607 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
1608 // This passes through all values that aren't the inserted element.
1609 if (i != Idx) {
1610 ValueLatticeElement EltVal = getStructValueState(Aggr, i);
1611 mergeInValue(getStructValueState(&IVI, i), &IVI, EltVal);
1612 continue;
1613 }
1614
1615 Value *Val = IVI.getInsertedValueOperand();
1616 if (Val->getType()->isStructTy())
1617 // We don't track structs in structs.
1618 markOverdefined(getStructValueState(&IVI, i), &IVI);
1619 else {
1620 ValueLatticeElement InVal = getValueState(Val);
1621 mergeInValue(getStructValueState(&IVI, i), &IVI, InVal);
1622 }
1623 }
1624}
1625
1626void SCCPInstVisitor::visitSelectInst(SelectInst &I) {
1627 // If this select returns a struct, just mark the result overdefined.
1628 // TODO: We could do a lot better than this if code actually uses this.
1629 if (I.getType()->isStructTy())
1630 return (void)markOverdefined(&I);
1631
1632 // resolvedUndefsIn might mark I as overdefined. Bail out, even if we would
1633 // discover a concrete value later.
1634 if (ValueState[&I].isOverdefined())
1635 return (void)markOverdefined(&I);
1636
1637 const ValueLatticeElement &CondValue = getValueState(I.getCondition());
1638 if (CondValue.isUnknownOrUndef())
1639 return;
1640
1641 if (ConstantInt *CondCB =
1642 getConstantInt(CondValue, I.getCondition()->getType())) {
1643 Value *OpVal = CondCB->isZero() ? I.getFalseValue() : I.getTrueValue();
1644 const ValueLatticeElement &OpValState = getValueState(OpVal);
1645 // Safety: ValueState[&I] doesn't invalidate OpValState since it is already
1646 // in the map.
1647 assert(ValueState.contains(&I) && "&I is not in ValueState map.");
1648 mergeInValue(ValueState[&I], &I, OpValState);
1649 return;
1650 }
1651
1652 // Otherwise, the condition is overdefined or a constant we can't evaluate.
1653 // See if we can produce something better than overdefined based on the T/F
1654 // value.
1655 ValueLatticeElement TVal = getValueState(I.getTrueValue());
1656 ValueLatticeElement FVal = getValueState(I.getFalseValue());
1657
1658 ValueLatticeElement &State = ValueState[&I];
1659 bool Changed = State.mergeIn(TVal);
1660 Changed |= State.mergeIn(FVal);
1661 if (Changed)
1662 pushUsersToWorkListMsg(State, &I);
1663}
1664
1665// Handle Unary Operators.
1666void SCCPInstVisitor::visitUnaryOperator(Instruction &I) {
1667 ValueLatticeElement V0State = getValueState(I.getOperand(0));
1668
1669 ValueLatticeElement &IV = ValueState[&I];
1670 // resolvedUndefsIn might mark I as overdefined. Bail out, even if we would
1671 // discover a concrete value later.
1672 if (IV.isOverdefined())
1673 return (void)markOverdefined(&I);
1674
1675 // If something is unknown/undef, wait for it to resolve.
1676 if (V0State.isUnknownOrUndef())
1677 return;
1678
1679 if (SCCPSolver::isConstant(V0State))
1680 if (Constant *C = ConstantFoldUnaryOpOperand(
1681 I.getOpcode(), getConstant(V0State, I.getType()), DL))
1682 return (void)markConstant(IV, &I, C);
1683
1684 markOverdefined(&I);
1685}
1686
1687void SCCPInstVisitor::visitFreezeInst(FreezeInst &I) {
1688 // If this freeze returns a struct, just mark the result overdefined.
1689 // TODO: We could do a lot better than this.
1690 if (I.getType()->isStructTy())
1691 return (void)markOverdefined(&I);
1692
1693 ValueLatticeElement V0State = getValueState(I.getOperand(0));
1694 ValueLatticeElement &IV = ValueState[&I];
1695 // resolvedUndefsIn might mark I as overdefined. Bail out, even if we would
1696 // discover a concrete value later.
1697 if (IV.isOverdefined())
1698 return (void)markOverdefined(&I);
1699
1700 // If something is unknown/undef, wait for it to resolve.
1701 if (V0State.isUnknownOrUndef())
1702 return;
1703
1704 if (SCCPSolver::isConstant(V0State) &&
1705 isGuaranteedNotToBeUndefOrPoison(getConstant(V0State, I.getType())))
1706 return (void)markConstant(IV, &I, getConstant(V0State, I.getType()));
1707
1708 markOverdefined(&I);
1709}
1710
1711// Handle Binary Operators.
1712void SCCPInstVisitor::visitBinaryOperator(Instruction &I) {
1713 ValueLatticeElement V1State = getValueState(I.getOperand(0));
1714 ValueLatticeElement V2State = getValueState(I.getOperand(1));
1715
1716 ValueLatticeElement &IV = ValueState[&I];
1717 if (IV.isOverdefined())
1718 return;
1719
1720 // If something is undef, wait for it to resolve.
1721 if (V1State.isUnknownOrUndef() || V2State.isUnknownOrUndef())
1722 return;
1723
1724 if (V1State.isOverdefined() && V2State.isOverdefined())
1725 return (void)markOverdefined(&I);
1726
1727 // If either of the operands is a constant, try to fold it to a constant.
1728 // TODO: Use information from notconstant better.
1729 if ((V1State.isConstant() || V2State.isConstant())) {
1730 Value *V1 = SCCPSolver::isConstant(V1State)
1731 ? getConstant(V1State, I.getOperand(0)->getType())
1732 : I.getOperand(0);
1733 Value *V2 = SCCPSolver::isConstant(V2State)
1734 ? getConstant(V2State, I.getOperand(1)->getType())
1735 : I.getOperand(1);
1736 Value *R = simplifyBinOp(I.getOpcode(), V1, V2, SimplifyQuery(DL, &I));
1737 auto *C = dyn_cast_or_null<Constant>(R);
1738 if (C) {
1739 // Conservatively assume that the result may be based on operands that may
1740 // be undef. Note that we use mergeInValue to combine the constant with
1741 // the existing lattice value for I, as different constants might be found
1742 // after one of the operands go to overdefined, e.g. due to one operand
1743 // being a special floating value.
1744 ValueLatticeElement NewV;
1745 NewV.markConstant(C, /*MayIncludeUndef=*/true);
1746 return (void)mergeInValue(ValueState[&I], &I, NewV);
1747 }
1748 }
1749
1750 // Only use ranges for binary operators on integers.
1751 if (!I.getType()->isIntOrIntVectorTy())
1752 return markOverdefined(&I);
1753
1754 // Try to simplify to a constant range.
1755 ConstantRange A =
1756 V1State.asConstantRange(I.getType(), /*UndefAllowed=*/false);
1757 ConstantRange B =
1758 V2State.asConstantRange(I.getType(), /*UndefAllowed=*/false);
1759
1760 auto *BO = cast<BinaryOperator>(&I);
1761 ConstantRange R = ConstantRange::getEmpty(I.getType()->getScalarSizeInBits());
1762 if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(BO))
1763 R = A.overflowingBinaryOp(BO->getOpcode(), B, OBO->getNoWrapKind());
1764 else
1765 R = A.binaryOp(BO->getOpcode(), B);
1766 mergeInValue(ValueState[&I], &I, ValueLatticeElement::getRange(R));
1767
1768 // TODO: Currently we do not exploit special values that produce something
1769 // better than overdefined with an overdefined operand for vector or floating
1770 // point types, like and <4 x i32> overdefined, zeroinitializer.
1771}
1772
1773// Handle ICmpInst instruction.
1774void SCCPInstVisitor::visitCmpInst(CmpInst &I) {
1775 // Do not cache this lookup, getValueState calls later in the function might
1776 // invalidate the reference.
1777 if (ValueState[&I].isOverdefined())
1778 return (void)markOverdefined(&I);
1779
1780 Value *Op1 = I.getOperand(0);
1781 Value *Op2 = I.getOperand(1);
1782
1783 // For parameters, use ParamState which includes constant range info if
1784 // available.
1785 auto V1State = getValueState(Op1);
1786 auto V2State = getValueState(Op2);
1787
1788 Constant *C = V1State.getCompare(I.getPredicate(), I.getType(), V2State, DL);
1789 if (C) {
1790 ValueLatticeElement CV;
1791 CV.markConstant(C);
1792 mergeInValue(ValueState[&I], &I, CV);
1793 return;
1794 }
1795
1796 // If operands are still unknown, wait for it to resolve.
1797 if ((V1State.isUnknownOrUndef() || V2State.isUnknownOrUndef()) &&
1798 !SCCPSolver::isConstant(ValueState[&I]))
1799 return;
1800
1801 markOverdefined(&I);
1802}
1803
1804// Handle getelementptr instructions. If all operands are constants then we
1805// can turn this into a getelementptr ConstantExpr.
1806void SCCPInstVisitor::visitGetElementPtrInst(GetElementPtrInst &I) {
1807 if (ValueState[&I].isOverdefined())
1808 return (void)markOverdefined(&I);
1809
1810 const ValueLatticeElement &PtrState = getValueState(I.getPointerOperand());
1811 if (PtrState.isUnknownOrUndef())
1812 return;
1813
1814 // gep inbounds/nuw of non-null is non-null.
1815 if (PtrState.isNotConstant() && PtrState.getNotConstant()->isNullValue()) {
1816 if (I.hasNoUnsignedWrap() ||
1817 (I.isInBounds() &&
1818 !NullPointerIsDefined(I.getFunction(), I.getAddressSpace())))
1819 return (void)markNotNull(ValueState[&I], &I);
1820 return (void)markOverdefined(&I);
1821 }
1822
1824 Operands.reserve(I.getNumOperands());
1825
1826 for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i) {
1827 const ValueLatticeElement &State = getValueState(I.getOperand(i));
1828 if (State.isUnknownOrUndef())
1829 return; // Operands are not resolved yet.
1830
1831 if (Constant *C = getConstant(State, I.getOperand(i)->getType())) {
1832 Operands.push_back(C);
1833 continue;
1834 }
1835
1836 return (void)markOverdefined(&I);
1837 }
1838
1839 if (Constant *C = ConstantFoldInstOperands(&I, Operands, DL))
1840 markConstant(&I, C);
1841 else
1842 markOverdefined(&I);
1843}
1844
1845void SCCPInstVisitor::visitAllocaInst(AllocaInst &I) {
1846 if (!NullPointerIsDefined(I.getFunction(), I.getAddressSpace()))
1847 return (void)markNotNull(ValueState[&I], &I);
1848
1849 markOverdefined(&I);
1850}
1851
1852void SCCPInstVisitor::visitStoreInst(StoreInst &SI) {
1853 // If this store is of a struct, ignore it.
1854 if (SI.getOperand(0)->getType()->isStructTy())
1855 return;
1856
1857 if (TrackedGlobals.empty() || !isa<GlobalVariable>(SI.getOperand(1)))
1858 return;
1859
1860 GlobalVariable *GV = cast<GlobalVariable>(SI.getOperand(1));
1861 auto I = TrackedGlobals.find(GV);
1862 if (I == TrackedGlobals.end())
1863 return;
1864
1865 // Get the value we are storing into the global, then merge it.
1866 mergeInValue(I->second, GV, getValueState(SI.getOperand(0)),
1867 ValueLatticeElement::MergeOptions().setCheckWiden(false));
1868 if (I->second.isOverdefined())
1869 TrackedGlobals.erase(I); // No need to keep tracking this!
1870}
1871
1873 if (const auto *CB = dyn_cast<CallBase>(I)) {
1874 if (CB->getType()->isIntOrIntVectorTy())
1875 if (std::optional<ConstantRange> Range = CB->getRange())
1877 if (CB->getType()->isPointerTy() && CB->isReturnNonNull())
1880 }
1881
1882 if (I->getType()->isIntOrIntVectorTy())
1883 if (MDNode *Ranges = I->getMetadata(LLVMContext::MD_range))
1886 if (I->hasMetadata(LLVMContext::MD_nonnull))
1889
1891}
1892
1893// Handle load instructions. If the operand is a constant pointer to a constant
1894// global, we can replace the load with the loaded constant value!
1895void SCCPInstVisitor::visitLoadInst(LoadInst &I) {
1896 // If this load is of a struct or the load is volatile, just mark the result
1897 // as overdefined.
1898 if (I.getType()->isStructTy() || I.isVolatile())
1899 return (void)markOverdefined(&I);
1900
1901 // resolvedUndefsIn might mark I as overdefined. Bail out, even if we would
1902 // discover a concrete value later.
1903 if (ValueState[&I].isOverdefined())
1904 return (void)markOverdefined(&I);
1905
1906 const ValueLatticeElement &PtrVal = getValueState(I.getOperand(0));
1907 if (PtrVal.isUnknownOrUndef())
1908 return; // The pointer is not resolved yet!
1909
1910 if (SCCPSolver::isConstant(PtrVal)) {
1911 Constant *Ptr = getConstant(PtrVal, I.getOperand(0)->getType());
1912 ValueLatticeElement &IV = ValueState[&I];
1913
1914 // load null is undefined.
1915 if (isa<ConstantPointerNull>(Ptr)) {
1916 if (NullPointerIsDefined(I.getFunction(), I.getPointerAddressSpace()))
1917 return (void)markOverdefined(IV, &I);
1918 else
1919 return;
1920 }
1921
1922 // Transform load (constant global) into the value loaded.
1923 if (auto *GV = dyn_cast<GlobalVariable>(Ptr)) {
1924 if (!TrackedGlobals.empty()) {
1925 // If we are tracking this global, merge in the known value for it.
1926 auto It = TrackedGlobals.find(GV);
1927 if (It != TrackedGlobals.end()) {
1928 mergeInValue(IV, &I, It->second, getMaxWidenStepsOpts());
1929 return;
1930 }
1931 }
1932 }
1933
1934 // Transform load from a constant into a constant if possible.
1935 if (Constant *C = ConstantFoldLoadFromConstPtr(Ptr, I.getType(), DL))
1936 return (void)markConstant(IV, &I, C);
1937 }
1938
1939 // Fall back to metadata.
1940 mergeInValue(ValueState[&I], &I, getValueFromMetadata(&I));
1941}
1942
1943void SCCPInstVisitor::visitCallBase(CallBase &CB) {
1944 handleCallResult(CB);
1945 handleCallArguments(CB);
1946}
1947
1948void SCCPInstVisitor::handleCallOverdefined(CallBase &CB) {
1950
1951 // Void return and not tracking callee, just bail.
1952 if (CB.getType()->isVoidTy())
1953 return;
1954
1955 // Always mark struct return as overdefined.
1956 if (CB.getType()->isStructTy())
1957 return (void)markOverdefined(&CB);
1958
1959 // Otherwise, if we have a single return value case, and if the function is
1960 // a declaration, maybe we can constant fold it.
1961 if (F && F->isDeclaration() && canConstantFoldCallTo(&CB, F)) {
1963 for (const Use &A : CB.args()) {
1964 if (A.get()->getType()->isStructTy())
1965 return markOverdefined(&CB); // Can't handle struct args.
1966 if (A.get()->getType()->isMetadataTy())
1967 continue; // Carried in CB, not allowed in Operands.
1968 const ValueLatticeElement &State = getValueState(A);
1969
1970 if (State.isUnknownOrUndef())
1971 return; // Operands are not resolved yet.
1972 if (SCCPSolver::isOverdefined(State))
1973 return (void)markOverdefined(&CB);
1974 assert(SCCPSolver::isConstant(State) && "Unknown state!");
1975 Operands.push_back(getConstant(State, A->getType()));
1976 }
1977
1978 if (SCCPSolver::isOverdefined(getValueState(&CB)))
1979 return (void)markOverdefined(&CB);
1980
1981 // If we can constant fold this, mark the result of the call as a
1982 // constant.
1983 if (Constant *C = ConstantFoldCall(&CB, F, Operands, &GetTLI(*F)))
1984 return (void)markConstant(&CB, C);
1985 }
1986
1987 // Fall back to metadata.
1988 mergeInValue(ValueState[&CB], &CB, getValueFromMetadata(&CB));
1989}
1990
1991void SCCPInstVisitor::handleCallArguments(CallBase &CB) {
1993 // If this is a local function that doesn't have its address taken, mark its
1994 // entry block executable and merge in the actual arguments to the call into
1995 // the formal arguments of the function.
1996 if (TrackingIncomingArguments.count(F)) {
1997 markBlockExecutable(&F->front());
1998
1999 // Propagate information from this call site into the callee.
2000 auto CAI = CB.arg_begin();
2001 for (Function::arg_iterator AI = F->arg_begin(), E = F->arg_end(); AI != E;
2002 ++AI, ++CAI) {
2003 // If this argument is byval, and if the function is not readonly, there
2004 // will be an implicit copy formed of the input aggregate.
2005 if (AI->hasByValAttr() && !F->onlyReadsMemory()) {
2006 markOverdefined(&*AI);
2007 continue;
2008 }
2009
2010 if (auto *STy = dyn_cast<StructType>(AI->getType())) {
2011 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
2012 ValueLatticeElement CallArg = getStructValueState(*CAI, i);
2013 mergeInValue(getStructValueState(&*AI, i), &*AI, CallArg,
2015 }
2016 } else {
2017 ValueLatticeElement CallArg =
2018 getValueState(*CAI).intersect(getArgAttributeVL(&*AI));
2019 mergeInValue(ValueState[&*AI], &*AI, CallArg, getMaxWidenStepsOpts());
2020 }
2021 }
2022 }
2023}
2024
2025void SCCPInstVisitor::handlePredicate(Instruction *I, Value *CopyOf,
2026 const PredicateBase *PI) {
2027 ValueLatticeElement CopyOfVal = getValueState(CopyOf);
2028 const std::optional<PredicateConstraint> &Constraint = PI->getConstraint();
2029 if (!Constraint) {
2030 mergeInValue(ValueState[I], I, CopyOfVal);
2031 return;
2032 }
2033
2034 CmpInst::Predicate Pred = Constraint->Predicate;
2035 Value *OtherOp = Constraint->OtherOp;
2036
2037 // Wait until OtherOp is resolved.
2038 if (getValueState(OtherOp).isUnknown()) {
2039 addAdditionalUser(OtherOp, I);
2040 return;
2041 }
2042
2043 ValueLatticeElement CondVal = getValueState(OtherOp);
2044 ValueLatticeElement &IV = ValueState[I];
2045 if (CondVal.isConstantRange() || CopyOfVal.isConstantRange()) {
2046 auto ImposedCR =
2047 ConstantRange::getFull(DL.getTypeSizeInBits(CopyOf->getType()));
2048
2049 // Get the range imposed by the condition.
2050 if (CondVal.isConstantRange())
2052 Pred, CondVal.getConstantRange());
2053
2054 // Combine range info for the original value with the new range from the
2055 // condition.
2056 auto CopyOfCR = CopyOfVal.asConstantRange(CopyOf->getType(),
2057 /*UndefAllowed=*/true);
2058 // Treat an unresolved input like a full range.
2059 if (CopyOfCR.isEmptySet())
2060 CopyOfCR = ConstantRange::getFull(CopyOfCR.getBitWidth());
2061 auto NewCR = ImposedCR.intersectWith(CopyOfCR);
2062 // If the existing information is != x, do not use the information from
2063 // a chained predicate, as the != x information is more likely to be
2064 // helpful in practice.
2065 if (!CopyOfCR.contains(NewCR) && CopyOfCR.getSingleMissingElement())
2066 NewCR = CopyOfCR;
2067
2068 // The new range is based on a branch condition. That guarantees that
2069 // neither of the compare operands can be undef in the branch targets,
2070 // unless we have conditions that are always true/false (e.g. icmp ule
2071 // i32, %a, i32_max). For the latter overdefined/empty range will be
2072 // inferred, but the branch will get folded accordingly anyways.
2073 addAdditionalUser(OtherOp, I);
2074 mergeInValue(
2075 IV, I, ValueLatticeElement::getRange(NewCR, /*MayIncludeUndef*/ false));
2076 return;
2077 } else if (Pred == CmpInst::ICMP_EQ &&
2078 (CondVal.isConstant() || CondVal.isNotConstant())) {
2079 // For non-integer values or integer constant expressions, only
2080 // propagate equal constants or not-constants.
2081 addAdditionalUser(OtherOp, I);
2082 mergeInValue(IV, I, CondVal);
2083 return;
2084 } else if (Pred == CmpInst::ICMP_NE && CondVal.isConstant()) {
2085 // Propagate inequalities.
2086 addAdditionalUser(OtherOp, I);
2087 mergeInValue(IV, I, ValueLatticeElement::getNot(CondVal.getConstant()));
2088 return;
2089 }
2090
2091 return (void)mergeInValue(IV, I, CopyOfVal);
2092}
2093
2094void SCCPInstVisitor::handleCallResult(CallBase &CB) {
2096
2097 if (auto *II = dyn_cast<IntrinsicInst>(&CB)) {
2098 if (II->getIntrinsicID() == Intrinsic::vscale) {
2099 unsigned BitWidth = CB.getType()->getScalarSizeInBits();
2100 const ConstantRange Result = getVScaleRange(II->getFunction(), BitWidth);
2101 return (void)mergeInValue(ValueState[II], II,
2103 }
2104 if (II->getIntrinsicID() == Intrinsic::experimental_get_vector_length) {
2105 Value *CountArg = II->getArgOperand(0);
2106 Value *VF = II->getArgOperand(1);
2107 bool Scalable = cast<ConstantInt>(II->getArgOperand(2))->isOne();
2108
2109 // Computation happens in the larger type.
2110 unsigned BitWidth = std::max(CountArg->getType()->getScalarSizeInBits(),
2111 VF->getType()->getScalarSizeInBits());
2112
2113 ConstantRange Count = getValueState(CountArg)
2114 .asConstantRange(CountArg->getType(), false)
2115 .zeroExtend(BitWidth);
2116 ConstantRange MaxLanes = getValueState(VF)
2117 .asConstantRange(VF->getType(), false)
2118 .zeroExtend(BitWidth);
2119 if (Scalable)
2120 MaxLanes =
2121 MaxLanes.multiply(getVScaleRange(II->getFunction(), BitWidth));
2122
2123 // The result is always less than both Count and MaxLanes.
2124 ConstantRange Result(
2126 APIntOps::umin(Count.getUpper(), MaxLanes.getUpper()));
2127
2128 // If Count <= MaxLanes, getvectorlength(Count, MaxLanes) = Count
2129 if (Count.icmp(CmpInst::ICMP_ULE, MaxLanes))
2130 Result = Count;
2131
2132 Result = Result.truncate(II->getType()->getScalarSizeInBits());
2133 return (void)mergeInValue(ValueState[II], II,
2135 }
2136
2137 if (ConstantRange::isIntrinsicSupported(II->getIntrinsicID())) {
2138 // Compute result range for intrinsics supported by ConstantRange.
2139 // Do this even if we don't know a range for all operands, as we may
2140 // still know something about the result range, e.g. of abs(x).
2142 for (Value *Op : II->args()) {
2143 const ValueLatticeElement &State = getValueState(Op);
2144 if (State.isUnknownOrUndef())
2145 return;
2146 OpRanges.push_back(
2147 State.asConstantRange(Op->getType(), /*UndefAllowed=*/false));
2148 }
2149
2150 ConstantRange Result =
2151 ConstantRange::intrinsic(II->getIntrinsicID(), OpRanges);
2152 return (void)mergeInValue(ValueState[II], II,
2154 }
2155 }
2156
2157 // The common case is that we aren't tracking the callee, either because we
2158 // are not doing interprocedural analysis or the callee is indirect, or is
2159 // external. Handle these cases first.
2160 if (!F || F->isDeclaration())
2161 return handleCallOverdefined(CB);
2162
2163 // If this is a single/zero retval case, see if we're tracking the function.
2164 if (auto *STy = dyn_cast<StructType>(F->getReturnType())) {
2165 if (!MRVFunctionsTracked.count(F))
2166 return handleCallOverdefined(CB); // Not tracking this callee.
2167
2168 // If we are tracking this callee, propagate the result of the function
2169 // into this call site.
2170 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i)
2171 mergeInValue(getStructValueState(&CB, i), &CB,
2172 TrackedMultipleRetVals[std::make_pair(F, i)],
2174 } else {
2175 auto TFRVI = TrackedRetVals.find(F);
2176 if (TFRVI == TrackedRetVals.end())
2177 return handleCallOverdefined(CB); // Not tracking this callee.
2178
2179 // If so, propagate the return value of the callee into this call result.
2180 mergeInValue(ValueState[&CB], &CB, TFRVI->second, getMaxWidenStepsOpts());
2181 }
2182}
2183
2184bool SCCPInstVisitor::isInstFullyOverDefined(Instruction &Inst) {
2185 // For structure Type, we handle each member separately.
2186 // A structure object won't be considered as overdefined when
2187 // there is at least one member that is not overdefined.
2188 if (StructType *STy = dyn_cast<StructType>(Inst.getType())) {
2189 for (unsigned i = 0, e = STy->getNumElements(); i < e; ++i) {
2190 if (!getStructValueState(&Inst, i).isOverdefined())
2191 return false;
2192 }
2193 return true;
2194 }
2195
2196 return getValueState(&Inst).isOverdefined();
2197}
2198
2200 // Process the work lists until they are empty!
2201 while (!BBWorkList.empty() || !InstWorkList.empty()) {
2202 // Process the instruction work list.
2203 while (!InstWorkList.empty()) {
2204 Instruction *I = InstWorkList.pop_back_val();
2205 Invalidated.erase(I);
2206
2207 LLVM_DEBUG(dbgs() << "\nPopped off I-WL: " << *I << '\n');
2208
2209 visit(I);
2210 }
2211
2212 // Process the basic block work list.
2213 while (!BBWorkList.empty()) {
2214 BasicBlock *BB = BBWorkList.pop_back_val();
2215 BBVisited.insert(BB);
2216
2217 LLVM_DEBUG(dbgs() << "\nPopped off BBWL: " << *BB << '\n');
2218 for (Instruction &I : *BB) {
2219 CurI = &I;
2220 visit(I);
2221 }
2222 CurI = nullptr;
2223 }
2224 }
2225}
2226
2228 // Look for instructions which produce undef values.
2229 if (I.getType()->isVoidTy())
2230 return false;
2231
2232 if (auto *STy = dyn_cast<StructType>(I.getType())) {
2233 // Only a few things that can be structs matter for undef.
2234
2235 // Tracked calls must never be marked overdefined in resolvedUndefsIn.
2236 if (auto *CB = dyn_cast<CallBase>(&I))
2237 if (Function *F = CB->getCalledFunction())
2238 if (MRVFunctionsTracked.count(F))
2239 return false;
2240
2241 // extractvalue and insertvalue don't need to be marked; they are
2242 // tracked as precisely as their operands.
2244 return false;
2245 // Send the results of everything else to overdefined. We could be
2246 // more precise than this but it isn't worth bothering.
2247 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
2248 ValueLatticeElement &LV = getStructValueState(&I, i);
2249 if (LV.isUnknown()) {
2250 markOverdefined(LV, &I);
2251 return true;
2252 }
2253 }
2254 return false;
2255 }
2256
2257 ValueLatticeElement &LV = getValueState(&I);
2258 if (!LV.isUnknown())
2259 return false;
2260
2261 // There are two reasons a call can have an undef result
2262 // 1. It could be tracked.
2263 // 2. It could be constant-foldable.
2264 // Because of the way we solve return values, tracked calls must
2265 // never be marked overdefined in resolvedUndefsIn.
2266 if (auto *CB = dyn_cast<CallBase>(&I))
2267 if (Function *F = CB->getCalledFunction())
2268 if (TrackedRetVals.count(F))
2269 return false;
2270
2271 if (isa<LoadInst>(I)) {
2272 // A load here means one of two things: a load of undef from a global,
2273 // a load from an unknown pointer. Either way, having it return undef
2274 // is okay.
2275 return false;
2276 }
2277
2278 markOverdefined(&I);
2279 return true;
2280}
2281
2282/// While solving the dataflow for a function, we don't compute a result for
2283/// operations with an undef operand, to allow undef to be lowered to a
2284/// constant later. For example, constant folding of "zext i8 undef to i16"
2285/// would result in "i16 0", and if undef is later lowered to "i8 1", then the
2286/// zext result would become "i16 1" and would result into an overdefined
2287/// lattice value once merged with the previous result. Not computing the
2288/// result of the zext (treating undef the same as unknown) allows us to handle
2289/// a later undef->constant lowering more optimally.
2290///
2291/// However, if the operand remains undef when the solver returns, we do need
2292/// to assign some result to the instruction (otherwise we would treat it as
2293/// unreachable). For simplicity, we mark any instructions that are still
2294/// unknown as overdefined.
2296 bool MadeChange = false;
2297 for (BasicBlock &BB : F) {
2298 if (!BBExecutable.count(&BB))
2299 continue;
2300
2301 for (Instruction &I : BB)
2302 MadeChange |= resolvedUndef(I);
2303 }
2304
2305 LLVM_DEBUG(if (MadeChange) dbgs()
2306 << "\nResolved undefs in " << F.getName() << '\n');
2307
2308 return MadeChange;
2309}
2310
2311//===----------------------------------------------------------------------===//
2312//
2313// SCCPSolver implementations
2314//
2316 const DataLayout &DL,
2317 std::function<const TargetLibraryInfo &(Function &)> GetTLI,
2318 LLVMContext &Ctx)
2319 : Visitor(new SCCPInstVisitor(DL, std::move(GetTLI), Ctx)) {}
2320
2321SCCPSolver::~SCCPSolver() = default;
2322
2324 AssumptionCache &AC) {
2325 Visitor->addPredicateInfo(F, DT, AC);
2326}
2327
2329 Visitor->removeSSACopies(F);
2330}
2331
2333 return Visitor->markBlockExecutable(BB);
2334}
2335
2337 return Visitor->getPredicateInfoFor(I);
2338}
2339
2341 Visitor->trackValueOfGlobalVariable(GV);
2342}
2343
2345 Visitor->addTrackedFunction(F);
2346}
2347
2349 Visitor->addToMustPreserveReturnsInFunctions(F);
2350}
2351
2353 return Visitor->mustPreserveReturn(F);
2354}
2355
2357 Visitor->addArgumentTrackedFunction(F);
2358}
2359
2361 return Visitor->isArgumentTrackedFunction(F);
2362}
2363
2366 return Visitor->getArgumentTrackedFunctions();
2367}
2368
2369void SCCPSolver::solve() { Visitor->solve(); }
2370
2372 return Visitor->resolvedUndefsIn(F);
2373}
2374
2376 Visitor->solveWhileResolvedUndefsIn(M);
2377}
2378
2379void
2381 Visitor->solveWhileResolvedUndefsIn(WorkList);
2382}
2383
2385 Visitor->solveWhileResolvedUndefs();
2386}
2387
2389 return Visitor->isBlockExecutable(BB);
2390}
2391
2393 return Visitor->isEdgeFeasible(From, To);
2394}
2395
2396std::vector<ValueLatticeElement>
2398 return Visitor->getStructLatticeValueFor(V);
2399}
2400
2402 return Visitor->removeLatticeValueFor(V);
2403}
2404
2406 Visitor->resetLatticeValueFor(Call);
2407}
2408
2410 return Visitor->getLatticeValueFor(V);
2411}
2412
2415 return Visitor->getTrackedRetVals();
2416}
2417
2420 return Visitor->getTrackedGlobals();
2421}
2422
2424 return Visitor->getMRVFunctionsTracked();
2425}
2426
2427void SCCPSolver::markOverdefined(Value *V) { Visitor->markOverdefined(V); }
2428
2430 Visitor->trackValueOfArgument(V);
2431}
2432
2434 return Visitor->isStructLatticeConstant(F, STy);
2435}
2436
2438 Type *Ty) const {
2439 return Visitor->getConstant(LV, Ty);
2440}
2441
2443 return Visitor->getConstantOrNull(V);
2444}
2445
2447 const SmallVectorImpl<ArgInfo> &Args) {
2448 Visitor->setLatticeValueForSpecializationArguments(F, Args);
2449}
2450
2452 Visitor->markFunctionUnreachable(F);
2453}
2454
2455void SCCPSolver::visit(Instruction *I) { Visitor->visit(I); }
2456
2457void SCCPSolver::visitCall(CallInst &I) { Visitor->visitCall(I); }
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
Hexagon Common GEP
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
#define T
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
uint64_t IntrinsicInst * II
static ValueLatticeElement::MergeOptions getMaxWidenStepsOpts()
Returns MergeOptions with MaxWidenSteps set to MaxNumRangeExtensions.
static const unsigned MaxNumRangeExtensions
static ValueLatticeElement getValueFromMetadata(const Instruction *I)
std::pair< BasicBlock *, BasicBlock * > Edge
This file implements a set that has insertion order iteration characteristics.
static ConstantInt * getConstantInt(Value *V, const DataLayout &DL)
Extract ConstantInt from value, looking through IntToPtr and PointerNullValue.
#define LLVM_DEBUG(...)
Definition Debug.h:114
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
Value * RHS
Value * LHS
static const uint32_t IV[8]
Definition blake3_impl.h:83
Class for arbitrary precision integers.
Definition APInt.h:78
unsigned countr_zero() const
Count the number of trailing zero bits.
Definition APInt.h:1640
bool ule(const APInt &RHS) const
Unsigned less or equal comparison.
Definition APInt.h:1151
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
Definition APInt.h:201
an instruction to allocate memory on the stack
This class represents an incoming formal argument to a Function.
Definition Argument.h:32
A cache of @llvm.assume calls within a function.
Functions, function parameters, and return types can have attributes to indicate how they should be t...
Definition Attributes.h:69
LLVM_ABI const ConstantRange & getRange() const
Returns the value of the range attribute.
static LLVM_ABI Attribute get(LLVMContext &Context, AttrKind Kind, uint64_t Val=0)
Return a uniquified Attribute object.
bool isValid() const
Return true if the attribute is any kind of attribute.
Definition Attributes.h:223
LLVM Basic Block Representation.
Definition BasicBlock.h:62
iterator_range< const_phi_iterator > phis() const
Returns a range that iterates over the phis in the basic block.
Definition BasicBlock.h:528
const Function * getParent() const
Return the enclosing method, or null if none.
Definition BasicBlock.h:213
static BasicBlock * Create(LLVMContext &Context, const Twine &Name="", Function *Parent=nullptr, BasicBlock *InsertBefore=nullptr)
Creates a new BasicBlock.
Definition BasicBlock.h:206
LLVM_ABI LLVMContext & getContext() const
Get the context in which this basic block lives.
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition BasicBlock.h:233
LLVM_ABI void removePredecessor(BasicBlock *Pred, bool KeepOneInputPHIs=false)
Update PHI nodes in this BasicBlock before removal of predecessor Pred.
LLVM_ABI unsigned getNoWrapKind() const
Returns one of OBO::NoSignedWrap or OBO::NoUnsignedWrap.
LLVM_ABI Instruction::BinaryOps getBinaryOp() const
Returns the binary operation underlying the intrinsic.
static LLVM_ABI BinaryOperator * Create(BinaryOps Op, Value *S1, Value *S2, const Twine &Name=Twine(), InsertPosition InsertBefore=nullptr)
Construct a binary instruction, given the opcode and the two operands.
Function * getFunction() const
Definition Constants.h:943
BasicBlock * getBasicBlock() const
Definition Constants.h:942
static BranchInst * Create(BasicBlock *IfTrue, InsertPosition InsertBefore=nullptr)
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
std::optional< OperandBundleUse > getOperandBundle(StringRef Name) const
Return an operand bundle by name, if present.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
User::op_iterator arg_begin()
Return the iterator pointing to the beginning of the argument list.
LLVM_ABI bool isMustTailCall() const
Tests if this call site must be tail call optimized.
iterator_range< User::op_iterator > args()
Iteration adapter for range-for loops.
CallBr instruction, tracking function calls that may not return control but instead transfer it to a ...
This class represents a function call, abstracting a target machine's calling convention.
This is the base class for all instructions that perform data casts.
Definition InstrTypes.h:448
static LLVM_ABI CastInst * Create(Instruction::CastOps, Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Provides a way to construct any of the CastInst subclasses using an opcode instead of the subclass's ...
This class is the base class for the comparison instructions.
Definition InstrTypes.h:664
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition InstrTypes.h:676
@ ICMP_SLE
signed less or equal
Definition InstrTypes.h:706
@ ICMP_NE
not equal
Definition InstrTypes.h:698
@ ICMP_ULE
unsigned less or equal
Definition InstrTypes.h:702
This is the shared class of boolean and integer constants.
Definition Constants.h:87
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
Definition Constants.h:222
static LLVM_ABI ConstantInt * getFalse(LLVMContext &Context)
static LLVM_ABI ConstantPointerNull * get(PointerType *T)
Static factory methods - Return objects of the specified value.
This class represents a range of values.
LLVM_ABI ConstantRange multiply(const ConstantRange &Other) const
Return a new range representing the possible values resulting from a multiplication of a value in thi...
LLVM_ABI ConstantRange add(const ConstantRange &Other) const
Return a new range representing the possible values resulting from an addition of a value in this ran...
const APInt * getSingleElement() const
If this set contains a single element, return it, otherwise return null.
LLVM_ABI ConstantRange castOp(Instruction::CastOps CastOp, uint32_t BitWidth) const
Return a new range representing the possible values resulting from an application of the specified ca...
LLVM_ABI bool isFullSet() const
Return true if this set contains all of the elements possible for this data-type.
LLVM_ABI bool icmp(CmpInst::Predicate Pred, const ConstantRange &Other) const
Does the predicate Pred hold between ranges this and Other?
static LLVM_ABI ConstantRange intrinsic(Intrinsic::ID IntrinsicID, ArrayRef< ConstantRange > Ops)
Compute range of intrinsic result for the given operand ranges.
LLVM_ABI bool isSizeLargerThan(uint64_t MaxSize) const
Compare set size of this range with Value.
static LLVM_ABI bool isIntrinsicSupported(Intrinsic::ID IntrinsicID)
Returns true if ConstantRange calculations are supported for intrinsic with IntrinsicID.
bool isSingleElement() const
Return true if this set contains exactly one member.
LLVM_ABI ConstantRange truncate(uint32_t BitWidth, unsigned NoWrapKind=0) const
Return a new range in the specified integer type, which must be strictly smaller than the current typ...
LLVM_ABI bool isAllNonNegative() const
Return true if all values in this range are non-negative.
static LLVM_ABI ConstantRange makeAllowedICmpRegion(CmpInst::Predicate Pred, const ConstantRange &Other)
Produce the smallest range such that all values that may satisfy the given predicate with any value c...
const APInt & getUpper() const
Return the upper value for this range.
static LLVM_ABI ConstantRange makeExactICmpRegion(CmpInst::Predicate Pred, const APInt &Other)
Produce the exact range such that all values in the returned range satisfy the given predicate with a...
LLVM_ABI ConstantRange inverse() const
Return a new range that is the logical not of the current set.
LLVM_ABI bool contains(const APInt &Val) const
Return true if the specified value is in the set.
LLVM_ABI APInt getUnsignedMax() const
Return the largest unsigned value contained in the ConstantRange.
LLVM_ABI ConstantRange intersectWith(const ConstantRange &CR, PreferredRangeType Type=Smallest) const
Return the range that results from the intersection of this range with another range.
static LLVM_ABI ConstantRange makeGuaranteedNoWrapRegion(Instruction::BinaryOps BinOp, const ConstantRange &Other, unsigned NoWrapKind)
Produce the largest range containing all X such that "X BinOp Y" is guaranteed not to wrap (overflow)...
LLVM_ABI ConstantRange binaryOp(Instruction::BinaryOps BinOp, const ConstantRange &Other) const
Return a new range representing the possible values resulting from an application of the specified bi...
LLVM_ABI ConstantRange sub(const ConstantRange &Other) const
Return a new range representing the possible values resulting from a subtraction of a value in this r...
static LLVM_ABI Constant * get(StructType *T, ArrayRef< Constant * > V)
This is an important base class in LLVM.
Definition Constant.h:43
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
LLVM_ABI bool isNullValue() const
Return true if this is the value that would be returned by getNullValue.
Definition Constants.cpp:90
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:64
static DebugLoc getTemporary()
Definition DebugLoc.h:160
DenseMapIterator< KeyT, ValueT, KeyInfoT, BucketT, true > const_iterator
Definition DenseMap.h:75
Implements a dense probed hash-table based set.
Definition DenseSet.h:279
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition Dominators.h:164
This instruction extracts a struct member or array element value from an aggregate value.
unsigned getNumIndices() const
idx_iterator idx_begin() const
This class represents a freeze function that returns random concrete value if an operand is either a ...
Argument * arg_iterator
Definition Function.h:72
static GEPNoWrapFlags noUnsignedWrap()
void applyUpdatesPermissive(ArrayRef< UpdateT > Updates)
Submit updates to all available trees.
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
Module * getParent()
Get the module that this global value is contained inside of...
Type * getValueType() const
const Constant * getInitializer() const
getInitializer - Return the initializer for this global variable.
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition IRBuilder.h:2788
This instruction inserts a struct field of array element value into an aggregate value.
Value * getInsertedValueOperand()
unsigned getNumIndices() const
idx_iterator idx_begin() const
Base class for instruction visitors.
Definition InstVisitor.h:78
void visit(Iterator Start, Iterator End)
Definition InstVisitor.h:87
LLVM_ABI void setHasNoUnsignedWrap(bool b=true)
Set or clear the nuw flag on this instruction, which must be an operator which supports this flag.
LLVM_ABI bool hasNoUnsignedWrap() const LLVM_READONLY
Determine whether the no unsigned wrap flag is set.
LLVM_ABI unsigned getNumSuccessors() const LLVM_READONLY
Return the number of successors that this instruction has.
LLVM_ABI bool hasNoSignedWrap() const LLVM_READONLY
Determine whether the no signed wrap flag is set.
LLVM_ABI void setHasNoSignedWrap(bool b=true)
Set or clear the nsw flag on this instruction, which must be an operator which supports this flag.
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
LLVM_ABI InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
LLVM_ABI bool isExact() const LLVM_READONLY
Determine whether the exact flag is set.
LLVM_ABI BasicBlock * getSuccessor(unsigned Idx) const LLVM_READONLY
Return the specified successor. This instruction must be a terminator.
LLVM_ABI void setNonNeg(bool b=true)
Set or clear the nneg flag on this instruction, which must be a zext instruction.
LLVM_ABI bool hasNonNeg() const LLVM_READONLY
Determine whether the the nneg flag is set.
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
LLVM_ABI void setIsExact(bool b=true)
Set or clear the exact flag on this instruction, which must be an operator which supports this flag.
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
bool isSpecialTerminator() const
Invoke instruction.
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
An instruction for reading from memory.
Metadata node.
Definition Metadata.h:1078
This class implements a map that also provides access to all stored values in a deterministic order.
Definition MapVector.h:36
A Module instance is used to store all the information related to an LLVM module.
Definition Module.h:67
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
unsigned getNumIncomingValues() const
Return the number of incoming edges.
LLVM_ABI std::optional< PredicateConstraint > getConstraint() const
Fetch condition in the form of PredicateConstraint, if possible.
Return a value (possibly void), from a function.
Helper class for SCCPSolver.
const MapVector< Function *, ValueLatticeElement > & getTrackedRetVals() const
const PredicateBase * getPredicateInfoFor(Instruction *I)
std::vector< ValueLatticeElement > getStructLatticeValueFor(Value *V) const
bool resolvedUndef(Instruction &I)
void markFunctionUnreachable(Function *F)
bool markBlockExecutable(BasicBlock *BB)
bool resolvedUndefsIn(Function &F)
While solving the dataflow for a function, we don't compute a result for operations with an undef ope...
Constant * getConstant(const ValueLatticeElement &LV, Type *Ty) const
SCCPInstVisitor(const DataLayout &DL, std::function< const TargetLibraryInfo &(Function &)> GetTLI, LLVMContext &Ctx)
const DenseMap< GlobalVariable *, ValueLatticeElement > & getTrackedGlobals() const
const ValueLatticeElement & getLatticeValueFor(Value *V) const
void removeLatticeValueFor(Value *V)
void trackValueOfArgument(Argument *A)
void visitCallInst(CallInst &I)
void markOverdefined(Value *V)
bool isArgumentTrackedFunction(Function *F)
void addTrackedFunction(Function *F)
void solveWhileResolvedUndefsIn(Module &M)
void trackValueOfGlobalVariable(GlobalVariable *GV)
Constant * getConstantOrNull(Value *V) const
void removeSSACopies(Function &F)
const SmallPtrSet< Function *, 16 > & getMRVFunctionsTracked() const
const SmallPtrSetImpl< Function * > & getArgumentTrackedFunctions() const
void resetLatticeValueFor(CallBase *Call)
Invalidate the Lattice Value of Call and its users after specializing the call.
ValueLatticeElement getArgAttributeVL(Argument *A)
void addPredicateInfo(Function &F, DominatorTree &DT, AssumptionCache &AC)
void addToMustPreserveReturnsInFunctions(Function *F)
void addArgumentTrackedFunction(Function *F)
bool isStructLatticeConstant(Function *F, StructType *STy)
void solveWhileResolvedUndefsIn(SmallVectorImpl< Function * > &WorkList)
bool isBlockExecutable(BasicBlock *BB) const
bool mustPreserveReturn(Function *F)
void setLatticeValueForSpecializationArguments(Function *F, const SmallVectorImpl< ArgInfo > &Args)
bool isEdgeFeasible(BasicBlock *From, BasicBlock *To) const
SCCPSolver - This interface class is a general purpose solver for Sparse Conditional Constant Propaga...
Definition SCCPSolver.h:66
LLVM_ABI void visitCall(CallInst &I)
LLVM_ABI ~SCCPSolver()
LLVM_ABI void resetLatticeValueFor(CallBase *Call)
Invalidate the Lattice Value of Call and its users after specializing the call.
LLVM_ABI void trackValueOfGlobalVariable(GlobalVariable *GV)
trackValueOfGlobalVariable - Clients can use this method to inform the SCCPSolver that it should trac...
LLVM_ABI bool tryToReplaceWithConstant(Value *V)
LLVM_ABI void inferArgAttributes() const
LLVM_ABI bool isStructLatticeConstant(Function *F, StructType *STy)
LLVM_ABI void addPredicateInfo(Function &F, DominatorTree &DT, AssumptionCache &AC)
LLVM_ABI void solve()
Solve - Solve for constants and executable blocks.
LLVM_ABI void visit(Instruction *I)
LLVM_ABI void trackValueOfArgument(Argument *V)
trackValueOfArgument - Mark the specified argument overdefined unless it have range attribute.
LLVM_ABI const DenseMap< GlobalVariable *, ValueLatticeElement > & getTrackedGlobals() const
getTrackedGlobals - Get and return the set of inferred initializers for global variables.
LLVM_ABI void addTrackedFunction(Function *F)
addTrackedFunction - If the SCCP solver is supposed to track calls into and out of the specified func...
LLVM_ABI void solveWhileResolvedUndefsIn(Module &M)
LLVM_ABI const PredicateBase * getPredicateInfoFor(Instruction *I)
LLVM_ABI const SmallPtrSetImpl< Function * > & getArgumentTrackedFunctions() const
LLVM_ABI const SmallPtrSet< Function *, 16 > & getMRVFunctionsTracked() const
getMRVFunctionsTracked - Get the set of functions which return multiple values tracked by the pass.
LLVM_ABI bool resolvedUndefsIn(Function &F)
resolvedUndefsIn - While solving the dataflow for a function, we assume that branches on undef values...
LLVM_ABI void addArgumentTrackedFunction(Function *F)
LLVM_ABI void solveWhileResolvedUndefs()
LLVM_ABI void removeLatticeValueFor(Value *V)
LLVM_ABI std::vector< ValueLatticeElement > getStructLatticeValueFor(Value *V) const
LLVM_ABI Constant * getConstantOrNull(Value *V) const
Return either a Constant or nullptr for a given Value.
LLVM_ABI bool simplifyInstsInBlock(BasicBlock &BB, SmallPtrSetImpl< Value * > &InsertedValues, Statistic &InstRemovedStat, Statistic &InstReplacedStat)
LLVM_ABI Constant * getConstant(const ValueLatticeElement &LV, Type *Ty) const
Helper to return a Constant if LV is either a constant or a constant range with a single element.
LLVM_ABI const ValueLatticeElement & getLatticeValueFor(Value *V) const
LLVM_ABI void addToMustPreserveReturnsInFunctions(Function *F)
Add function to the list of functions whose return cannot be modified.
LLVM_ABI bool removeNonFeasibleEdges(BasicBlock *BB, DomTreeUpdater &DTU, BasicBlock *&NewUnreachableBB) const
LLVM_ABI bool isBlockExecutable(BasicBlock *BB) const
LLVM_ABI void inferReturnAttributes() const
LLVM_ABI bool markBlockExecutable(BasicBlock *BB)
markBlockExecutable - This method can be used by clients to mark all of the blocks that are known to ...
LLVM_ABI void setLatticeValueForSpecializationArguments(Function *F, const SmallVectorImpl< ArgInfo > &Args)
Set the Lattice Value for the arguments of a specialization F.
static LLVM_ABI bool isConstant(const ValueLatticeElement &LV)
LLVM_ABI const MapVector< Function *, ValueLatticeElement > & getTrackedRetVals() const
getTrackedRetVals - Get the inferred return value map.
LLVM_ABI bool isEdgeFeasible(BasicBlock *From, BasicBlock *To) const
LLVM_ABI bool mustPreserveReturn(Function *F)
Returns true if the return of the given function cannot be modified.
static LLVM_ABI bool isOverdefined(const ValueLatticeElement &LV)
LLVM_ABI void markFunctionUnreachable(Function *F)
Mark all of the blocks in function F non-executable.
LLVM_ABI bool isArgumentTrackedFunction(Function *F)
Returns true if the given function is in the solver's set of argument-tracked functions.
LLVM_ABI SCCPSolver(const DataLayout &DL, std::function< const TargetLibraryInfo &(Function &)> GetTLI, LLVMContext &Ctx)
LLVM_ABI void markOverdefined(Value *V)
markOverdefined - Mark the specified value overdefined.
LLVM_ABI void removeSSACopies(Function &F)
This class represents the LLVM 'select' instruction.
size_type size() const
Definition SmallPtrSet.h:99
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
iterator begin() const
bool contains(ConstPtrType Ptr) const
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
A SetVector that performs no allocations if smaller than a certain size.
Definition SetVector.h:339
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void assign(size_type NumElts, ValueParamT Elt)
void reserve(size_type N)
void resize(size_type N)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
Class to represent struct types.
unsigned getNumElements() const
Random access to the elements.
A wrapper class to simplify modification of SwitchInst cases along with their prof branch_weights met...
Provides information about what library functions are available for the current target.
This class represents a truncation of integer types.
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
bool isPointerTy() const
True if this is an instance of PointerType.
Definition Type.h:267
bool isSingleValueType() const
Return true if the type is a valid type for a register in codegen.
Definition Type.h:296
bool isStructTy() const
True if this is an instance of StructType.
Definition Type.h:261
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
Definition Type.cpp:230
bool isVoidTy() const
Return true if this is 'void'.
Definition Type.h:139
static LLVM_ABI UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
This function has undefined behavior.
Value * getOperand(unsigned i) const
Definition User.h:233
This class represents lattice values for constants.
static ValueLatticeElement getRange(ConstantRange CR, bool MayIncludeUndef=false)
LLVM_ABI Constant * getCompare(CmpInst::Predicate Pred, Type *Ty, const ValueLatticeElement &Other, const DataLayout &DL) const
true, false or undef constants, or nullptr if the comparison cannot be evaluated.
bool isConstantRangeIncludingUndef() const
static ValueLatticeElement getNot(Constant *C)
ConstantRange asConstantRange(unsigned BW, bool UndefAllowed=false) const
void setNumRangeExtensions(unsigned N)
const ConstantRange & getConstantRange(bool UndefAllowed=true) const
Returns the constant range for this value.
bool isConstantRange(bool UndefAllowed=true) const
Returns true if this value is a constant range.
static ValueLatticeElement get(Constant *C)
unsigned getNumRangeExtensions() const
Constant * getNotConstant() const
LLVM_ABI ValueLatticeElement intersect(const ValueLatticeElement &Other) const
Combine two sets of facts about the same value into a single set of facts.
Constant * getConstant() const
bool mergeIn(const ValueLatticeElement &RHS, MergeOptions Opts=MergeOptions())
Updates this object to approximate both this object and RHS.
bool markConstant(Constant *V, bool MayIncludeUndef=false)
static ValueLatticeElement getOverdefined()
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
LLVM_ABI std::string getNameOrAsOperand() const
Definition Value.cpp:464
LLVM_ABI void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition Value.cpp:553
iterator_range< user_iterator > users()
Definition Value.h:426
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition Value.cpp:322
LLVM_ABI void takeName(Value *V)
Transfer the name from V to this value.
Definition Value.cpp:403
Represents an op.with.overflow intrinsic.
const ParentTy * getParent() const
Definition ilist_node.h:34
self_iterator getIterator()
Definition ilist_node.h:123
CallInst * Call
Changed
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
const APInt & umin(const APInt &A, const APInt &B)
Determine the smaller of two APInts considered to be unsigned.
Definition APInt.h:2259
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
@ BasicBlock
Various leaf nodes.
Definition ISDOpcodes.h:81
OneUse_match< SubPat > m_OneUse(const SubPat &SP)
cst_pred_ty< is_lowbit_mask > m_LowBitMask()
Match an integer or vector with only the low bit(s) set.
BinaryOp_match< LHS, RHS, Instruction::And > m_And(const LHS &L, const RHS &R)
ap_match< APInt > m_APInt(const APInt *&Res)
Match a ConstantInt or splatted ConstantVector, binding the specified pointer to the contained APInt.
bool match(Val *V, const Pattern &P)
cst_pred_ty< is_negated_power2 > m_NegatedPower2()
Match a integer or vector negated power-of-2.
match_combine_or< BinaryOp_match< LHS, RHS, Instruction::Add >, DisjointOr_match< LHS, RHS > > m_AddLike(const LHS &L, const RHS &R)
Match either "add" or "or disjoint".
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
This is an optimization pass for GlobalISel generic memory operations.
@ Offset
Definition DWP.cpp:532
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1737
static bool replaceSignedInst(SCCPSolver &Solver, SmallPtrSetImpl< Value * > &InsertedValues, Instruction &Inst)
Try to replace signed instructions with their unsigned equivalent.
LLVM_ABI bool canConstantFoldCallTo(const CallBase *Call, const Function *F)
canConstantFoldCallTo - Return true if its even possible to fold a call to the specified function.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
auto successors(const MachineBasicBlock *BB)
static ConstantRange getRange(Value *Op, SCCPSolver &Solver, const SmallPtrSetImpl< Value * > &InsertedValues)
Helper for getting ranges from Solver.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition STLExtras.h:632
LLVM_ABI Constant * ConstantFoldCall(const CallBase *Call, Function *F, ArrayRef< Constant * > Operands, const TargetLibraryInfo *TLI=nullptr, bool AllowNonDeterministic=true)
ConstantFoldCall - Attempt to constant fold a call to the specified function with the specified argum...
LLVM_ABI ConstantRange getConstantRangeFromMetadata(const MDNode &RangeMD)
Parse out a conservative ConstantRange from !range metadata.
LLVM_ABI Value * simplifyInstruction(Instruction *I, const SimplifyQuery &Q)
See if we can compute a simplified version of this instruction.
auto dyn_cast_or_null(const Y &Val)
Definition Casting.h:753
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1744
NoopStatistic Statistic
Definition Statistic.h:162
LLVM_ABI Constant * ConstantFoldUnaryOpOperand(unsigned Opcode, Constant *Op, const DataLayout &DL)
Attempt to constant fold a unary operation with the specified operand.
LLVM_ABI bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
LLVM_ABI bool wouldInstructionBeTriviallyDead(const Instruction *I, const TargetLibraryInfo *TLI=nullptr)
Return true if the result produced by the instruction would have no side effects if it was not used.
Definition Local.cpp:421
FunctionAddr VTableAddr Count
Definition InstrProf.h:139
LLVM_ABI ConstantRange getVScaleRange(const Function *F, unsigned BitWidth)
Determine the possible constant range of vscale with the given bit width, based on the vscale_range f...
LLVM_ABI Constant * ConstantFoldCastOperand(unsigned Opcode, Constant *C, Type *DestTy, const DataLayout &DL)
Attempt to constant fold a cast with the specified operand.
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
LLVM_ABI Value * simplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for a BinaryOperator, fold the result or return null.
@ Sub
Subtraction of integers.
DWARFExpression::Operation Op
LLVM_ABI bool isGuaranteedNotToBeUndefOrPoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Return true if this function can prove that V does not have undef bits and is never poison.
constexpr unsigned BitWidth
OutputIt move(R &&Range, OutputIt Out)
Provide wrappers to std::move which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1915
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
LLVM_ABI Constant * ConstantFoldLoadFromConstPtr(Constant *C, Type *Ty, APInt Offset, const DataLayout &DL)
Return the value that a load from C with offset Offset would produce if it is constant and determinab...
BumpPtrAllocatorImpl<> BumpPtrAllocator
The standard BumpPtrAllocator which just uses the default template parameters.
Definition Allocator.h:383
LLVM_ABI Constant * ConstantFoldInstOperands(const Instruction *I, ArrayRef< Constant * > Ops, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, bool AllowNonDeterministic=true)
ConstantFoldInstOperands - Attempt to constant fold an instruction with the specified operands.
static bool refineInstruction(SCCPSolver &Solver, const SmallPtrSetImpl< Value * > &InsertedValues, Instruction &Inst)
Try to use Inst's value range from Solver to infer the NUW flag.
static void inferAttribute(Function *F, unsigned AttrIndex, const ValueLatticeElement &Val)
Implement std::hash so that hash_code can be used in STL containers.
Definition BitVector.h:870
Struct to control some aspects related to merging constant ranges.
MergeOptions & setMaxWidenSteps(unsigned Steps=1)