LLVM 23.0.0git
ScalarEvolution.cpp
Go to the documentation of this file.
1//===- ScalarEvolution.cpp - Scalar Evolution Analysis --------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains the implementation of the scalar evolution analysis
10// engine, which is used primarily to analyze expressions involving induction
11// variables in loops.
12//
13// There are several aspects to this library. First is the representation of
14// scalar expressions, which are represented as subclasses of the SCEV class.
15// These classes are used to represent certain types of subexpressions that we
16// can handle. We only create one SCEV of a particular shape, so
17// pointer-comparisons for equality are legal.
18//
19// One important aspect of the SCEV objects is that they are never cyclic, even
20// if there is a cycle in the dataflow for an expression (ie, a PHI node). If
21// the PHI node is one of the idioms that we can represent (e.g., a polynomial
22// recurrence) then we represent it directly as a recurrence node, otherwise we
23// represent it as a SCEVUnknown node.
24//
25// In addition to being able to represent expressions of various types, we also
26// have folders that are used to build the *canonical* representation for a
27// particular expression. These folders are capable of using a variety of
28// rewrite rules to simplify the expressions.
29//
30// Once the folders are defined, we can implement the more interesting
31// higher-level code, such as the code that recognizes PHI nodes of various
32// types, computes the execution count of a loop, etc.
33//
34// TODO: We should use these routines and value representations to implement
35// dependence analysis!
36//
37//===----------------------------------------------------------------------===//
38//
39// There are several good references for the techniques used in this analysis.
40//
41// Chains of recurrences -- a method to expedite the evaluation
42// of closed-form functions
43// Olaf Bachmann, Paul S. Wang, Eugene V. Zima
44//
45// On computational properties of chains of recurrences
46// Eugene V. Zima
47//
48// Symbolic Evaluation of Chains of Recurrences for Loop Optimization
49// Robert A. van Engelen
50//
51// Efficient Symbolic Analysis for Optimizing Compilers
52// Robert A. van Engelen
53//
54// Using the chains of recurrences algebra for data dependence testing and
55// induction variable substitution
56// MS Thesis, Johnie Birch
57//
58//===----------------------------------------------------------------------===//
59
61#include "llvm/ADT/APInt.h"
62#include "llvm/ADT/ArrayRef.h"
63#include "llvm/ADT/DenseMap.h"
65#include "llvm/ADT/FoldingSet.h"
66#include "llvm/ADT/STLExtras.h"
67#include "llvm/ADT/ScopeExit.h"
68#include "llvm/ADT/Sequence.h"
71#include "llvm/ADT/Statistic.h"
73#include "llvm/ADT/StringRef.h"
83#include "llvm/Config/llvm-config.h"
84#include "llvm/IR/Argument.h"
85#include "llvm/IR/BasicBlock.h"
86#include "llvm/IR/CFG.h"
87#include "llvm/IR/Constant.h"
89#include "llvm/IR/Constants.h"
90#include "llvm/IR/DataLayout.h"
92#include "llvm/IR/Dominators.h"
93#include "llvm/IR/Function.h"
94#include "llvm/IR/GlobalAlias.h"
95#include "llvm/IR/GlobalValue.h"
97#include "llvm/IR/InstrTypes.h"
98#include "llvm/IR/Instruction.h"
101#include "llvm/IR/Intrinsics.h"
102#include "llvm/IR/LLVMContext.h"
103#include "llvm/IR/Operator.h"
104#include "llvm/IR/PatternMatch.h"
105#include "llvm/IR/Type.h"
106#include "llvm/IR/Use.h"
107#include "llvm/IR/User.h"
108#include "llvm/IR/Value.h"
109#include "llvm/IR/Verifier.h"
111#include "llvm/Pass.h"
112#include "llvm/Support/Casting.h"
115#include "llvm/Support/Debug.h"
121#include <algorithm>
122#include <cassert>
123#include <climits>
124#include <cstdint>
125#include <cstdlib>
126#include <map>
127#include <memory>
128#include <numeric>
129#include <optional>
130#include <tuple>
131#include <utility>
132#include <vector>
133
134using namespace llvm;
135using namespace PatternMatch;
136using namespace SCEVPatternMatch;
137
138#define DEBUG_TYPE "scalar-evolution"
139
140STATISTIC(NumExitCountsComputed,
141 "Number of loop exits with predictable exit counts");
142STATISTIC(NumExitCountsNotComputed,
143 "Number of loop exits without predictable exit counts");
144STATISTIC(NumBruteForceTripCountsComputed,
145 "Number of loops with trip counts computed by force");
146
147#ifdef EXPENSIVE_CHECKS
148bool llvm::VerifySCEV = true;
149#else
150bool llvm::VerifySCEV = false;
151#endif
152
154 MaxBruteForceIterations("scalar-evolution-max-iterations", cl::ReallyHidden,
155 cl::desc("Maximum number of iterations SCEV will "
156 "symbolically execute a constant "
157 "derived loop"),
158 cl::init(100));
159
161 "verify-scev", cl::Hidden, cl::location(VerifySCEV),
162 cl::desc("Verify ScalarEvolution's backedge taken counts (slow)"));
164 "verify-scev-strict", cl::Hidden,
165 cl::desc("Enable stricter verification with -verify-scev is passed"));
166
168 "scev-verify-ir", cl::Hidden,
169 cl::desc("Verify IR correctness when making sensitive SCEV queries (slow)"),
170 cl::init(false));
171
173 "scev-mulops-inline-threshold", cl::Hidden,
174 cl::desc("Threshold for inlining multiplication operands into a SCEV"),
175 cl::init(32));
176
178 "scev-addops-inline-threshold", cl::Hidden,
179 cl::desc("Threshold for inlining addition operands into a SCEV"),
180 cl::init(500));
181
183 "scalar-evolution-max-scev-compare-depth", cl::Hidden,
184 cl::desc("Maximum depth of recursive SCEV complexity comparisons"),
185 cl::init(32));
186
188 "scalar-evolution-max-scev-operations-implication-depth", cl::Hidden,
189 cl::desc("Maximum depth of recursive SCEV operations implication analysis"),
190 cl::init(2));
191
193 "scalar-evolution-max-value-compare-depth", cl::Hidden,
194 cl::desc("Maximum depth of recursive value complexity comparisons"),
195 cl::init(2));
196
198 MaxArithDepth("scalar-evolution-max-arith-depth", cl::Hidden,
199 cl::desc("Maximum depth of recursive arithmetics"),
200 cl::init(32));
201
203 "scalar-evolution-max-constant-evolving-depth", cl::Hidden,
204 cl::desc("Maximum depth of recursive constant evolving"), cl::init(32));
205
207 MaxCastDepth("scalar-evolution-max-cast-depth", cl::Hidden,
208 cl::desc("Maximum depth of recursive SExt/ZExt/Trunc"),
209 cl::init(8));
210
212 MaxAddRecSize("scalar-evolution-max-add-rec-size", cl::Hidden,
213 cl::desc("Max coefficients in AddRec during evolving"),
214 cl::init(8));
215
217 HugeExprThreshold("scalar-evolution-huge-expr-threshold", cl::Hidden,
218 cl::desc("Size of the expression which is considered huge"),
219 cl::init(4096));
220
222 "scev-range-iter-threshold", cl::Hidden,
223 cl::desc("Threshold for switching to iteratively computing SCEV ranges"),
224 cl::init(32));
225
227 "scalar-evolution-max-loop-guard-collection-depth", cl::Hidden,
228 cl::desc("Maximum depth for recursive loop guard collection"), cl::init(1));
229
230static cl::opt<bool>
231ClassifyExpressions("scalar-evolution-classify-expressions",
232 cl::Hidden, cl::init(true),
233 cl::desc("When printing analysis, include information on every instruction"));
234
236 "scalar-evolution-use-expensive-range-sharpening", cl::Hidden,
237 cl::init(false),
238 cl::desc("Use more powerful methods of sharpening expression ranges. May "
239 "be costly in terms of compile time"));
240
242 "scalar-evolution-max-scc-analysis-depth", cl::Hidden,
243 cl::desc("Maximum amount of nodes to process while searching SCEVUnknown "
244 "Phi strongly connected components"),
245 cl::init(8));
246
247static cl::opt<bool>
248 EnableFiniteLoopControl("scalar-evolution-finite-loop", cl::Hidden,
249 cl::desc("Handle <= and >= in finite loops"),
250 cl::init(true));
251
253 "scalar-evolution-use-context-for-no-wrap-flag-strenghening", cl::Hidden,
254 cl::desc("Infer nuw/nsw flags using context where suitable"),
255 cl::init(true));
256
257//===----------------------------------------------------------------------===//
258// SCEV class definitions
259//===----------------------------------------------------------------------===//
260
262 // Leaf nodes are always their own canonical.
263 switch (getSCEVType()) {
264 case scConstant:
265 case scVScale:
266 case scUnknown:
267 CanonicalSCEV = this;
268 return;
269 default:
270 break;
271 }
272
273 // For all other expressions, check whether any immediate operand has a
274 // different canonical. Since operands are always created before their parent,
275 // their canonical pointers are already set — no recursion needed.
276 bool Changed = false;
278 for (SCEVUse Op : operands()) {
279 CanonOps.push_back(Op->getCanonical());
280 Changed |= CanonOps.back() != Op.getPointer();
281 }
282
283 if (!Changed) {
284 CanonicalSCEV = this;
285 return;
286 }
287
288 auto *NAry = dyn_cast<SCEVNAryExpr>(this);
289 SCEV::NoWrapFlags Flags = NAry ? NAry->getNoWrapFlags() : SCEV::FlagAnyWrap;
290 switch (getSCEVType()) {
291 case scPtrToAddr:
292 CanonicalSCEV = SE.getPtrToAddrExpr(CanonOps[0]);
293 return;
294 case scPtrToInt:
295 CanonicalSCEV = SE.getPtrToIntExpr(CanonOps[0], getType());
296 return;
297 case scTruncate:
298 CanonicalSCEV = SE.getTruncateExpr(CanonOps[0], getType());
299 return;
300 case scZeroExtend:
301 CanonicalSCEV = SE.getZeroExtendExpr(CanonOps[0], getType());
302 return;
303 case scSignExtend:
304 CanonicalSCEV = SE.getSignExtendExpr(CanonOps[0], getType());
305 return;
306 case scUDivExpr:
307 CanonicalSCEV = SE.getUDivExpr(CanonOps[0], CanonOps[1]);
308 return;
309 case scAddExpr:
310 CanonicalSCEV = SE.getAddExpr(CanonOps, Flags);
311 return;
312 case scMulExpr:
313 CanonicalSCEV = SE.getMulExpr(CanonOps, Flags);
314 return;
315 case scAddRecExpr:
317 CanonOps, cast<SCEVAddRecExpr>(this)->getLoop(), Flags);
318 return;
319 case scSMaxExpr:
320 CanonicalSCEV = SE.getSMaxExpr(CanonOps);
321 return;
322 case scUMaxExpr:
323 CanonicalSCEV = SE.getUMaxExpr(CanonOps);
324 return;
325 case scSMinExpr:
326 CanonicalSCEV = SE.getSMinExpr(CanonOps);
327 return;
328 case scUMinExpr:
329 CanonicalSCEV = SE.getUMinExpr(CanonOps);
330 return;
332 CanonicalSCEV = SE.getUMinExpr(CanonOps, /*Sequential=*/true);
333 return;
334 default:
335 llvm_unreachable("Unknown SCEV type");
336 }
337}
338
339//===----------------------------------------------------------------------===//
340// Implementation of the SCEV class.
341//
342
343#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
345 print(dbgs());
346 dbgs() << '\n';
347}
348#endif
349
350void SCEV::print(raw_ostream &OS) const {
351 switch (getSCEVType()) {
352 case scConstant:
353 cast<SCEVConstant>(this)->getValue()->printAsOperand(OS, false);
354 return;
355 case scVScale:
356 OS << "vscale";
357 return;
358 case scPtrToAddr:
359 case scPtrToInt: {
360 const SCEVCastExpr *PtrCast = cast<SCEVCastExpr>(this);
361 const SCEV *Op = PtrCast->getOperand();
362 StringRef OpS = getSCEVType() == scPtrToAddr ? "addr" : "int";
363 OS << "(ptrto" << OpS << " " << *Op->getType() << " " << *Op << " to "
364 << *PtrCast->getType() << ")";
365 return;
366 }
367 case scTruncate: {
368 const SCEVTruncateExpr *Trunc = cast<SCEVTruncateExpr>(this);
369 const SCEV *Op = Trunc->getOperand();
370 OS << "(trunc " << *Op->getType() << " " << *Op << " to "
371 << *Trunc->getType() << ")";
372 return;
373 }
374 case scZeroExtend: {
376 const SCEV *Op = ZExt->getOperand();
377 OS << "(zext " << *Op->getType() << " " << *Op << " to "
378 << *ZExt->getType() << ")";
379 return;
380 }
381 case scSignExtend: {
383 const SCEV *Op = SExt->getOperand();
384 OS << "(sext " << *Op->getType() << " " << *Op << " to "
385 << *SExt->getType() << ")";
386 return;
387 }
388 case scAddRecExpr: {
389 const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(this);
390 OS << "{" << *AR->getOperand(0);
391 for (unsigned i = 1, e = AR->getNumOperands(); i != e; ++i)
392 OS << ",+," << *AR->getOperand(i);
393 OS << "}<";
394 if (AR->hasNoUnsignedWrap())
395 OS << "nuw><";
396 if (AR->hasNoSignedWrap())
397 OS << "nsw><";
398 if (AR->hasNoSelfWrap() && !AR->hasNoUnsignedWrap() &&
399 !AR->hasNoSignedWrap())
400 OS << "nw><";
401 AR->getLoop()->getHeader()->printAsOperand(OS, /*PrintType=*/false);
402 OS << ">";
403 return;
404 }
405 case scAddExpr:
406 case scMulExpr:
407 case scUMaxExpr:
408 case scSMaxExpr:
409 case scUMinExpr:
410 case scSMinExpr:
412 const SCEVNAryExpr *NAry = cast<SCEVNAryExpr>(this);
413 const char *OpStr = nullptr;
414 switch (NAry->getSCEVType()) {
415 case scAddExpr: OpStr = " + "; break;
416 case scMulExpr: OpStr = " * "; break;
417 case scUMaxExpr: OpStr = " umax "; break;
418 case scSMaxExpr: OpStr = " smax "; break;
419 case scUMinExpr:
420 OpStr = " umin ";
421 break;
422 case scSMinExpr:
423 OpStr = " smin ";
424 break;
426 OpStr = " umin_seq ";
427 break;
428 default:
429 llvm_unreachable("There are no other nary expression types.");
430 }
431 OS << "("
433 << ")";
434 switch (NAry->getSCEVType()) {
435 case scAddExpr:
436 case scMulExpr:
437 if (NAry->hasNoUnsignedWrap())
438 OS << "<nuw>";
439 if (NAry->hasNoSignedWrap())
440 OS << "<nsw>";
441 break;
442 default:
443 // Nothing to print for other nary expressions.
444 break;
445 }
446 return;
447 }
448 case scUDivExpr: {
449 const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(this);
450 OS << "(" << *UDiv->getLHS() << " /u " << *UDiv->getRHS() << ")";
451 return;
452 }
453 case scUnknown:
454 cast<SCEVUnknown>(this)->getValue()->printAsOperand(OS, false);
455 return;
457 OS << "***COULDNOTCOMPUTE***";
458 return;
459 }
460 llvm_unreachable("Unknown SCEV kind!");
461}
462
464 switch (getSCEVType()) {
465 case scConstant:
466 return cast<SCEVConstant>(this)->getType();
467 case scVScale:
468 return cast<SCEVVScale>(this)->getType();
469 case scPtrToAddr:
470 case scPtrToInt:
471 case scTruncate:
472 case scZeroExtend:
473 case scSignExtend:
474 return cast<SCEVCastExpr>(this)->getType();
475 case scAddRecExpr:
476 return cast<SCEVAddRecExpr>(this)->getType();
477 case scMulExpr:
478 return cast<SCEVMulExpr>(this)->getType();
479 case scUMaxExpr:
480 case scSMaxExpr:
481 case scUMinExpr:
482 case scSMinExpr:
483 return cast<SCEVMinMaxExpr>(this)->getType();
485 return cast<SCEVSequentialMinMaxExpr>(this)->getType();
486 case scAddExpr:
487 return cast<SCEVAddExpr>(this)->getType();
488 case scUDivExpr:
489 return cast<SCEVUDivExpr>(this)->getType();
490 case scUnknown:
491 return cast<SCEVUnknown>(this)->getType();
493 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
494 }
495 llvm_unreachable("Unknown SCEV kind!");
496}
497
499 switch (getSCEVType()) {
500 case scConstant:
501 case scVScale:
502 case scUnknown:
503 return {};
504 case scPtrToAddr:
505 case scPtrToInt:
506 case scTruncate:
507 case scZeroExtend:
508 case scSignExtend:
509 return cast<SCEVCastExpr>(this)->operands();
510 case scAddRecExpr:
511 case scAddExpr:
512 case scMulExpr:
513 case scUMaxExpr:
514 case scSMaxExpr:
515 case scUMinExpr:
516 case scSMinExpr:
518 return cast<SCEVNAryExpr>(this)->operands();
519 case scUDivExpr:
520 return cast<SCEVUDivExpr>(this)->operands();
522 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
523 }
524 llvm_unreachable("Unknown SCEV kind!");
525}
526
527bool SCEV::isZero() const { return match(this, m_scev_Zero()); }
528
529bool SCEV::isOne() const { return match(this, m_scev_One()); }
530
531bool SCEV::isAllOnesValue() const { return match(this, m_scev_AllOnes()); }
532
535 if (!Mul) return false;
536
537 // If there is a constant factor, it will be first.
538 const SCEVConstant *SC = dyn_cast<SCEVConstant>(Mul->getOperand(0));
539 if (!SC) return false;
540
541 // Return true if the value is negative, this matches things like (-42 * V).
542 return SC->getAPInt().isNegative();
543}
544
547
549 return S->getSCEVType() == scCouldNotCompute;
550}
551
554 ID.AddInteger(scConstant);
555 ID.AddPointer(V);
556 void *IP = nullptr;
557 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
558 SCEV *S = new (SCEVAllocator) SCEVConstant(ID.Intern(SCEVAllocator), V);
559 UniqueSCEVs.InsertNode(S, IP);
560 S->computeAndSetCanonical(*this);
561 return S;
562}
563
565 return getConstant(ConstantInt::get(getContext(), Val));
566}
567
568const SCEV *
571 // TODO: Avoid implicit trunc?
572 // See https://github.com/llvm/llvm-project/issues/112510.
573 return getConstant(
574 ConstantInt::get(ITy, V, isSigned, /*ImplicitTrunc=*/true));
575}
576
579 ID.AddInteger(scVScale);
580 ID.AddPointer(Ty);
581 void *IP = nullptr;
582 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP))
583 return S;
584 SCEV *S = new (SCEVAllocator) SCEVVScale(ID.Intern(SCEVAllocator), Ty);
585 UniqueSCEVs.InsertNode(S, IP);
586 S->computeAndSetCanonical(*this);
587 return S;
588}
589
591 SCEV::NoWrapFlags Flags) {
592 const SCEV *Res = getConstant(Ty, EC.getKnownMinValue());
593 if (EC.isScalable())
594 Res = getMulExpr(Res, getVScale(Ty), Flags);
595 return Res;
596}
597
601
602SCEVPtrToAddrExpr::SCEVPtrToAddrExpr(const FoldingSetNodeIDRef ID,
603 const SCEV *Op, Type *ITy)
604 : SCEVCastExpr(ID, scPtrToAddr, Op, ITy) {
605 assert(getOperand()->getType()->isPointerTy() && Ty->isIntegerTy() &&
606 "Must be a non-bit-width-changing pointer-to-integer cast!");
607}
608
609SCEVPtrToIntExpr::SCEVPtrToIntExpr(const FoldingSetNodeIDRef ID, SCEVUse Op,
610 Type *ITy)
611 : SCEVCastExpr(ID, scPtrToInt, Op, ITy) {
612 assert(getOperand()->getType()->isPointerTy() && Ty->isIntegerTy() &&
613 "Must be a non-bit-width-changing pointer-to-integer cast!");
614}
615
620
621SCEVTruncateExpr::SCEVTruncateExpr(const FoldingSetNodeIDRef ID, SCEVUse op,
622 Type *ty)
624 assert(getOperand()->getType()->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&
625 "Cannot truncate non-integer value!");
626}
627
628SCEVZeroExtendExpr::SCEVZeroExtendExpr(const FoldingSetNodeIDRef ID, SCEVUse op,
629 Type *ty)
631 assert(getOperand()->getType()->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&
632 "Cannot zero extend non-integer value!");
633}
634
635SCEVSignExtendExpr::SCEVSignExtendExpr(const FoldingSetNodeIDRef ID, SCEVUse op,
636 Type *ty)
638 assert(getOperand()->getType()->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&
639 "Cannot sign extend non-integer value!");
640}
641
643 // Clear this SCEVUnknown from various maps.
644 SE->forgetMemoizedResults({this});
645
646 // Remove this SCEVUnknown from the uniquing map.
647 SE->UniqueSCEVs.RemoveNode(this);
648
649 // Release the value.
650 setValPtr(nullptr);
651}
652
653void SCEVUnknown::allUsesReplacedWith(Value *New) {
654 // Clear this SCEVUnknown from various maps.
655 SE->forgetMemoizedResults({this});
656
657 // Remove this SCEVUnknown from the uniquing map.
658 SE->UniqueSCEVs.RemoveNode(this);
659
660 // Replace the value pointer in case someone is still using this SCEVUnknown.
661 setValPtr(New);
662}
663
664//===----------------------------------------------------------------------===//
665// SCEV Utilities
666//===----------------------------------------------------------------------===//
667
668/// Compare the two values \p LV and \p RV in terms of their "complexity" where
669/// "complexity" is a partial (and somewhat ad-hoc) relation used to order
670/// operands in SCEV expressions.
671static int CompareValueComplexity(const LoopInfo *const LI, Value *LV,
672 Value *RV, unsigned Depth) {
674 return 0;
675
676 // Order pointer values after integer values. This helps SCEVExpander form
677 // GEPs.
678 bool LIsPointer = LV->getType()->isPointerTy(),
679 RIsPointer = RV->getType()->isPointerTy();
680 if (LIsPointer != RIsPointer)
681 return (int)LIsPointer - (int)RIsPointer;
682
683 // Compare getValueID values.
684 unsigned LID = LV->getValueID(), RID = RV->getValueID();
685 if (LID != RID)
686 return (int)LID - (int)RID;
687
688 // Sort arguments by their position.
689 if (const auto *LA = dyn_cast<Argument>(LV)) {
690 const auto *RA = cast<Argument>(RV);
691 unsigned LArgNo = LA->getArgNo(), RArgNo = RA->getArgNo();
692 return (int)LArgNo - (int)RArgNo;
693 }
694
695 if (const auto *LGV = dyn_cast<GlobalValue>(LV)) {
696 const auto *RGV = cast<GlobalValue>(RV);
697
698 if (auto L = LGV->getLinkage() - RGV->getLinkage())
699 return L;
700
701 const auto IsGVNameSemantic = [&](const GlobalValue *GV) {
702 auto LT = GV->getLinkage();
703 return !(GlobalValue::isPrivateLinkage(LT) ||
705 };
706
707 // Use the names to distinguish the two values, but only if the
708 // names are semantically important.
709 if (IsGVNameSemantic(LGV) && IsGVNameSemantic(RGV))
710 return LGV->getName().compare(RGV->getName());
711 }
712
713 // For instructions, compare their loop depth, and their operand count. This
714 // is pretty loose.
715 if (const auto *LInst = dyn_cast<Instruction>(LV)) {
716 const auto *RInst = cast<Instruction>(RV);
717
718 // Compare loop depths.
719 const BasicBlock *LParent = LInst->getParent(),
720 *RParent = RInst->getParent();
721 if (LParent != RParent) {
722 unsigned LDepth = LI->getLoopDepth(LParent),
723 RDepth = LI->getLoopDepth(RParent);
724 if (LDepth != RDepth)
725 return (int)LDepth - (int)RDepth;
726 }
727
728 // Compare the number of operands.
729 unsigned LNumOps = LInst->getNumOperands(),
730 RNumOps = RInst->getNumOperands();
731 if (LNumOps != RNumOps)
732 return (int)LNumOps - (int)RNumOps;
733
734 for (unsigned Idx : seq(LNumOps)) {
735 int Result = CompareValueComplexity(LI, LInst->getOperand(Idx),
736 RInst->getOperand(Idx), Depth + 1);
737 if (Result != 0)
738 return Result;
739 }
740 }
741
742 return 0;
743}
744
745// Return negative, zero, or positive, if LHS is less than, equal to, or greater
746// than RHS, respectively. A three-way result allows recursive comparisons to be
747// more efficient.
748// If the max analysis depth was reached, return std::nullopt, assuming we do
749// not know if they are equivalent for sure.
750static std::optional<int>
751CompareSCEVComplexity(const LoopInfo *const LI, const SCEV *LHS,
752 const SCEV *RHS, DominatorTree &DT, unsigned Depth = 0) {
753 // Fast-path: SCEVs are uniqued so we can do a quick equality check.
754 if (LHS == RHS)
755 return 0;
756
757 // Primarily, sort the SCEVs by their getSCEVType().
758 SCEVTypes LType = LHS->getSCEVType(), RType = RHS->getSCEVType();
759 if (LType != RType)
760 return (int)LType - (int)RType;
761
763 return std::nullopt;
764
765 // Aside from the getSCEVType() ordering, the particular ordering
766 // isn't very important except that it's beneficial to be consistent,
767 // so that (a + b) and (b + a) don't end up as different expressions.
768 switch (LType) {
769 case scUnknown: {
770 const SCEVUnknown *LU = cast<SCEVUnknown>(LHS);
771 const SCEVUnknown *RU = cast<SCEVUnknown>(RHS);
772
773 int X =
774 CompareValueComplexity(LI, LU->getValue(), RU->getValue(), Depth + 1);
775 return X;
776 }
777
778 case scConstant: {
781
782 // Compare constant values.
783 const APInt &LA = LC->getAPInt();
784 const APInt &RA = RC->getAPInt();
785 unsigned LBitWidth = LA.getBitWidth(), RBitWidth = RA.getBitWidth();
786 if (LBitWidth != RBitWidth)
787 return (int)LBitWidth - (int)RBitWidth;
788 return LA.ult(RA) ? -1 : 1;
789 }
790
791 case scVScale: {
792 const auto *LTy = cast<IntegerType>(cast<SCEVVScale>(LHS)->getType());
793 const auto *RTy = cast<IntegerType>(cast<SCEVVScale>(RHS)->getType());
794 return LTy->getBitWidth() - RTy->getBitWidth();
795 }
796
797 case scAddRecExpr: {
800
801 // There is always a dominance between two recs that are used by one SCEV,
802 // so we can safely sort recs by loop header dominance. We require such
803 // order in getAddExpr.
804 const Loop *LLoop = LA->getLoop(), *RLoop = RA->getLoop();
805 if (LLoop != RLoop) {
806 const BasicBlock *LHead = LLoop->getHeader(), *RHead = RLoop->getHeader();
807 assert(LHead != RHead && "Two loops share the same header?");
808 if (DT.dominates(LHead, RHead))
809 return 1;
810 assert(DT.dominates(RHead, LHead) &&
811 "No dominance between recurrences used by one SCEV?");
812 return -1;
813 }
814
815 [[fallthrough]];
816 }
817
818 case scTruncate:
819 case scZeroExtend:
820 case scSignExtend:
821 case scPtrToAddr:
822 case scPtrToInt:
823 case scAddExpr:
824 case scMulExpr:
825 case scUDivExpr:
826 case scSMaxExpr:
827 case scUMaxExpr:
828 case scSMinExpr:
829 case scUMinExpr:
831 ArrayRef<SCEVUse> LOps = LHS->operands();
832 ArrayRef<SCEVUse> ROps = RHS->operands();
833
834 // Lexicographically compare n-ary-like expressions.
835 unsigned LNumOps = LOps.size(), RNumOps = ROps.size();
836 if (LNumOps != RNumOps)
837 return (int)LNumOps - (int)RNumOps;
838
839 for (unsigned i = 0; i != LNumOps; ++i) {
840 auto X = CompareSCEVComplexity(LI, LOps[i].getPointer(),
841 ROps[i].getPointer(), DT, Depth + 1);
842 if (X != 0)
843 return X;
844 }
845 return 0;
846 }
847
849 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
850 }
851 llvm_unreachable("Unknown SCEV kind!");
852}
853
854/// Given a list of SCEV objects, order them by their complexity, and group
855/// objects of the same complexity together by value. When this routine is
856/// finished, we know that any duplicates in the vector are consecutive and that
857/// complexity is monotonically increasing.
858///
859/// Note that we go take special precautions to ensure that we get deterministic
860/// results from this routine. In other words, we don't want the results of
861/// this to depend on where the addresses of various SCEV objects happened to
862/// land in memory.
864 DominatorTree &DT) {
865 if (Ops.size() < 2) return; // Noop
866
867 // Whether LHS has provably less complexity than RHS.
868 auto IsLessComplex = [&](SCEVUse LHS, SCEVUse RHS) {
869 auto Complexity = CompareSCEVComplexity(LI, LHS, RHS, DT);
870 return Complexity && *Complexity < 0;
871 };
872 if (Ops.size() == 2) {
873 // This is the common case, which also happens to be trivially simple.
874 // Special case it.
875 SCEVUse &LHS = Ops[0], &RHS = Ops[1];
876 if (IsLessComplex(RHS, LHS))
877 std::swap(LHS, RHS);
878 return;
879 }
880
881 // Do the rough sort by complexity.
883 Ops, [&](SCEVUse LHS, SCEVUse RHS) { return IsLessComplex(LHS, RHS); });
884
885 // Now that we are sorted by complexity, group elements of the same
886 // complexity. Note that this is, at worst, N^2, but the vector is likely to
887 // be extremely short in practice. Note that we take this approach because we
888 // do not want to depend on the addresses of the objects we are grouping.
889 for (unsigned i = 0, e = Ops.size(); i != e-2; ++i) {
890 const SCEV *S = Ops[i];
891 unsigned Complexity = S->getSCEVType();
892
893 // If there are any objects of the same complexity and same value as this
894 // one, group them.
895 for (unsigned j = i+1; j != e && Ops[j]->getSCEVType() == Complexity; ++j) {
896 if (Ops[j] == S) { // Found a duplicate.
897 // Move it to immediately after i'th element.
898 std::swap(Ops[i+1], Ops[j]);
899 ++i; // no need to rescan it.
900 if (i == e-2) return; // Done!
901 }
902 }
903 }
904}
905
906/// Returns true if \p Ops contains a huge SCEV (the subtree of S contains at
907/// least HugeExprThreshold nodes).
909 return any_of(Ops, [](const SCEV *S) {
911 });
912}
913
914/// Performs a number of common optimizations on the passed \p Ops. If the
915/// whole expression reduces down to a single operand, it will be returned.
916///
917/// The following optimizations are performed:
918/// * Fold constants using the \p Fold function.
919/// * Remove identity constants satisfying \p IsIdentity.
920/// * If a constant satisfies \p IsAbsorber, return it.
921/// * Sort operands by complexity.
922template <typename FoldT, typename IsIdentityT, typename IsAbsorberT>
923static const SCEV *
925 SmallVectorImpl<SCEVUse> &Ops, FoldT Fold,
926 IsIdentityT IsIdentity, IsAbsorberT IsAbsorber) {
927 const SCEVConstant *Folded = nullptr;
928 for (unsigned Idx = 0; Idx < Ops.size();) {
929 const SCEV *Op = Ops[Idx];
930 if (const auto *C = dyn_cast<SCEVConstant>(Op)) {
931 if (!Folded)
932 Folded = C;
933 else
934 Folded = cast<SCEVConstant>(
935 SE.getConstant(Fold(Folded->getAPInt(), C->getAPInt())));
936 Ops.erase(Ops.begin() + Idx);
937 continue;
938 }
939 ++Idx;
940 }
941
942 if (Ops.empty()) {
943 assert(Folded && "Must have folded value");
944 return Folded;
945 }
946
947 if (Folded && IsAbsorber(Folded->getAPInt()))
948 return Folded;
949
950 GroupByComplexity(Ops, &LI, DT);
951 if (Folded && !IsIdentity(Folded->getAPInt()))
952 Ops.insert(Ops.begin(), Folded);
953
954 return Ops.size() == 1 ? Ops[0] : nullptr;
955}
956
957//===----------------------------------------------------------------------===//
958// Simple SCEV method implementations
959//===----------------------------------------------------------------------===//
960
961/// Compute BC(It, K). The result has width W. Assume, K > 0.
962static const SCEV *BinomialCoefficient(const SCEV *It, unsigned K,
963 ScalarEvolution &SE,
964 Type *ResultTy) {
965 // Handle the simplest case efficiently.
966 if (K == 1)
967 return SE.getTruncateOrZeroExtend(It, ResultTy);
968
969 // We are using the following formula for BC(It, K):
970 //
971 // BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / K!
972 //
973 // Suppose, W is the bitwidth of the return value. We must be prepared for
974 // overflow. Hence, we must assure that the result of our computation is
975 // equal to the accurate one modulo 2^W. Unfortunately, division isn't
976 // safe in modular arithmetic.
977 //
978 // However, this code doesn't use exactly that formula; the formula it uses
979 // is something like the following, where T is the number of factors of 2 in
980 // K! (i.e. trailing zeros in the binary representation of K!), and ^ is
981 // exponentiation:
982 //
983 // BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / 2^T / (K! / 2^T)
984 //
985 // This formula is trivially equivalent to the previous formula. However,
986 // this formula can be implemented much more efficiently. The trick is that
987 // K! / 2^T is odd, and exact division by an odd number *is* safe in modular
988 // arithmetic. To do exact division in modular arithmetic, all we have
989 // to do is multiply by the inverse. Therefore, this step can be done at
990 // width W.
991 //
992 // The next issue is how to safely do the division by 2^T. The way this
993 // is done is by doing the multiplication step at a width of at least W + T
994 // bits. This way, the bottom W+T bits of the product are accurate. Then,
995 // when we perform the division by 2^T (which is equivalent to a right shift
996 // by T), the bottom W bits are accurate. Extra bits are okay; they'll get
997 // truncated out after the division by 2^T.
998 //
999 // In comparison to just directly using the first formula, this technique
1000 // is much more efficient; using the first formula requires W * K bits,
1001 // but this formula less than W + K bits. Also, the first formula requires
1002 // a division step, whereas this formula only requires multiplies and shifts.
1003 //
1004 // It doesn't matter whether the subtraction step is done in the calculation
1005 // width or the input iteration count's width; if the subtraction overflows,
1006 // the result must be zero anyway. We prefer here to do it in the width of
1007 // the induction variable because it helps a lot for certain cases; CodeGen
1008 // isn't smart enough to ignore the overflow, which leads to much less
1009 // efficient code if the width of the subtraction is wider than the native
1010 // register width.
1011 //
1012 // (It's possible to not widen at all by pulling out factors of 2 before
1013 // the multiplication; for example, K=2 can be calculated as
1014 // It/2*(It+(It*INT_MIN/INT_MIN)+-1). However, it requires
1015 // extra arithmetic, so it's not an obvious win, and it gets
1016 // much more complicated for K > 3.)
1017
1018 // Protection from insane SCEVs; this bound is conservative,
1019 // but it probably doesn't matter.
1020 if (K > 1000)
1021 return SE.getCouldNotCompute();
1022
1023 unsigned W = SE.getTypeSizeInBits(ResultTy);
1024
1025 // Calculate K! / 2^T and T; we divide out the factors of two before
1026 // multiplying for calculating K! / 2^T to avoid overflow.
1027 // Other overflow doesn't matter because we only care about the bottom
1028 // W bits of the result.
1029 APInt OddFactorial(W, 1);
1030 unsigned T = 1;
1031 for (unsigned i = 3; i <= K; ++i) {
1032 unsigned TwoFactors = countr_zero(i);
1033 T += TwoFactors;
1034 OddFactorial *= (i >> TwoFactors);
1035 }
1036
1037 // We need at least W + T bits for the multiplication step
1038 unsigned CalculationBits = W + T;
1039
1040 // Calculate 2^T, at width T+W.
1041 APInt DivFactor = APInt::getOneBitSet(CalculationBits, T);
1042
1043 // Calculate the multiplicative inverse of K! / 2^T;
1044 // this multiplication factor will perform the exact division by
1045 // K! / 2^T.
1046 APInt MultiplyFactor = OddFactorial.multiplicativeInverse();
1047
1048 // Calculate the product, at width T+W
1049 IntegerType *CalculationTy = IntegerType::get(SE.getContext(),
1050 CalculationBits);
1051 const SCEV *Dividend = SE.getTruncateOrZeroExtend(It, CalculationTy);
1052 for (unsigned i = 1; i != K; ++i) {
1053 const SCEV *S = SE.getMinusSCEV(It, SE.getConstant(It->getType(), i));
1054 Dividend = SE.getMulExpr(Dividend,
1055 SE.getTruncateOrZeroExtend(S, CalculationTy));
1056 }
1057
1058 // Divide by 2^T
1059 const SCEV *DivResult = SE.getUDivExpr(Dividend, SE.getConstant(DivFactor));
1060
1061 // Truncate the result, and divide by K! / 2^T.
1062
1063 return SE.getMulExpr(SE.getConstant(MultiplyFactor),
1064 SE.getTruncateOrZeroExtend(DivResult, ResultTy));
1065}
1066
1067/// Return the value of this chain of recurrences at the specified iteration
1068/// number. We can evaluate this recurrence by multiplying each element in the
1069/// chain by the binomial coefficient corresponding to it. In other words, we
1070/// can evaluate {A,+,B,+,C,+,D} as:
1071///
1072/// A*BC(It, 0) + B*BC(It, 1) + C*BC(It, 2) + D*BC(It, 3)
1073///
1074/// where BC(It, k) stands for binomial coefficient.
1076 ScalarEvolution &SE) const {
1077 return evaluateAtIteration(operands(), It, SE);
1078}
1079
1081 const SCEV *It,
1082 ScalarEvolution &SE) {
1083 assert(Operands.size() > 0);
1084 const SCEV *Result = Operands[0].getPointer();
1085 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
1086 // The computation is correct in the face of overflow provided that the
1087 // multiplication is performed _after_ the evaluation of the binomial
1088 // coefficient.
1089 const SCEV *Coeff = BinomialCoefficient(It, i, SE, Result->getType());
1090 if (isa<SCEVCouldNotCompute>(Coeff))
1091 return Coeff;
1092
1093 Result =
1094 SE.getAddExpr(Result, SE.getMulExpr(Operands[i].getPointer(), Coeff));
1095 }
1096 return Result;
1097}
1098
1099//===----------------------------------------------------------------------===//
1100// SCEV Expression folder implementations
1101//===----------------------------------------------------------------------===//
1102
1103/// The SCEVCastSinkingRewriter takes a scalar evolution expression,
1104/// which computes a pointer-typed value, and rewrites the whole expression
1105/// tree so that *all* the computations are done on integers, and the only
1106/// pointer-typed operands in the expression are SCEVUnknown.
1107/// The CreatePtrCast callback is invoked to create the actual conversion
1108/// (ptrtoint or ptrtoaddr) at the SCEVUnknown leaves.
1110 : public SCEVRewriteVisitor<SCEVCastSinkingRewriter> {
1112 using ConversionFn = function_ref<const SCEV *(const SCEVUnknown *)>;
1113 Type *TargetTy;
1114 ConversionFn CreatePtrCast;
1115
1116public:
1118 ConversionFn CreatePtrCast)
1119 : Base(SE), TargetTy(TargetTy), CreatePtrCast(std::move(CreatePtrCast)) {}
1120
1121 static const SCEV *rewrite(const SCEV *Scev, ScalarEvolution &SE,
1122 Type *TargetTy, ConversionFn CreatePtrCast) {
1123 SCEVCastSinkingRewriter Rewriter(SE, TargetTy, std::move(CreatePtrCast));
1124 return Rewriter.visit(Scev);
1125 }
1126
1127 const SCEV *visit(const SCEV *S) {
1128 Type *STy = S->getType();
1129 // If the expression is not pointer-typed, just keep it as-is.
1130 if (!STy->isPointerTy())
1131 return S;
1132 // Else, recursively sink the cast down into it.
1133 return Base::visit(S);
1134 }
1135
1136 const SCEV *visitAddExpr(const SCEVAddExpr *Expr) {
1137 // Preserve wrap flags on rewritten SCEVAddExpr, which the default
1138 // implementation drops.
1139 SmallVector<SCEVUse, 2> Operands;
1140 bool Changed = false;
1141 for (SCEVUse Op : Expr->operands()) {
1142 Operands.push_back(visit(Op.getPointer()));
1143 Changed |= Op.getPointer() != Operands.back();
1144 }
1145 return !Changed ? Expr : SE.getAddExpr(Operands, Expr->getNoWrapFlags());
1146 }
1147
1148 const SCEV *visitMulExpr(const SCEVMulExpr *Expr) {
1149 SmallVector<SCEVUse, 2> Operands;
1150 bool Changed = false;
1151 for (SCEVUse Op : Expr->operands()) {
1152 Operands.push_back(visit(Op.getPointer()));
1153 Changed |= Op.getPointer() != Operands.back();
1154 }
1155 return !Changed ? Expr : SE.getMulExpr(Operands, Expr->getNoWrapFlags());
1156 }
1157
1158 const SCEV *visitUnknown(const SCEVUnknown *Expr) {
1159 assert(Expr->getType()->isPointerTy() &&
1160 "Should only reach pointer-typed SCEVUnknown's.");
1161 // Perform some basic constant folding. If the operand of the cast is a
1162 // null pointer, don't create a cast SCEV expression (that will be left
1163 // as-is), but produce a zero constant.
1165 return SE.getZero(TargetTy);
1166 return CreatePtrCast(Expr);
1167 }
1168};
1169
1171 assert(Op->getType()->isPointerTy() && "Op must be a pointer");
1172
1173 // It isn't legal for optimizations to construct new ptrtoint expressions
1174 // for non-integral pointers.
1175 if (getDataLayout().isNonIntegralPointerType(Op->getType()))
1176 return getCouldNotCompute();
1177
1178 Type *IntPtrTy = getDataLayout().getIntPtrType(Op->getType());
1179
1180 // We can only trivially model ptrtoint if SCEV's effective (integer) type
1181 // is sufficiently wide to represent all possible pointer values.
1182 // We could theoretically teach SCEV to truncate wider pointers, but
1183 // that isn't implemented for now.
1185 getDataLayout().getTypeSizeInBits(IntPtrTy))
1186 return getCouldNotCompute();
1187
1188 // Use the rewriter to sink the cast down to SCEVUnknown leaves.
1190 Op, *this, IntPtrTy, [this, IntPtrTy](const SCEVUnknown *U) {
1192 ID.AddInteger(scPtrToInt);
1193 ID.AddPointer(U);
1194 void *IP = nullptr;
1195 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP))
1196 return S;
1197 SCEV *S = new (SCEVAllocator)
1198 SCEVPtrToIntExpr(ID.Intern(SCEVAllocator), U, IntPtrTy);
1199 UniqueSCEVs.InsertNode(S, IP);
1200 S->computeAndSetCanonical(*this);
1201 registerUser(S, U);
1202 return static_cast<const SCEV *>(S);
1203 });
1204 assert(IntOp->getType()->isIntegerTy() &&
1205 "We must have succeeded in sinking the cast, "
1206 "and ending up with an integer-typed expression!");
1207 return IntOp;
1208}
1209
1211 assert(Op->getType()->isPointerTy() && "Op must be a pointer");
1212
1213 // Treat pointers with unstable representation conservatively, since the
1214 // address bits may change.
1215 if (DL.hasUnstableRepresentation(Op->getType()))
1216 return getCouldNotCompute();
1217
1218 Type *Ty = DL.getAddressType(Op->getType());
1219
1220 // Use the rewriter to sink the cast down to SCEVUnknown leaves.
1221 // The rewriter handles null pointer constant folding.
1223 Op, *this, Ty, [this, Ty](const SCEVUnknown *U) {
1225 ID.AddInteger(scPtrToAddr);
1226 ID.AddPointer(U);
1227 ID.AddPointer(Ty);
1228 void *IP = nullptr;
1229 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP))
1230 return S;
1231 SCEV *S = new (SCEVAllocator)
1232 SCEVPtrToAddrExpr(ID.Intern(SCEVAllocator), U, Ty);
1233 UniqueSCEVs.InsertNode(S, IP);
1234 S->computeAndSetCanonical(*this);
1235 registerUser(S, U);
1236 return static_cast<const SCEV *>(S);
1237 });
1238 assert(IntOp->getType()->isIntegerTy() &&
1239 "We must have succeeded in sinking the cast, "
1240 "and ending up with an integer-typed expression!");
1241 return IntOp;
1242}
1243
1245 assert(Ty->isIntegerTy() && "Target type must be an integer type!");
1246
1247 const SCEV *IntOp = getLosslessPtrToIntExpr(Op);
1248 if (isa<SCEVCouldNotCompute>(IntOp))
1249 return IntOp;
1250
1251 return getTruncateOrZeroExtend(IntOp, Ty);
1252}
1253
1255 unsigned Depth) {
1256 assert(getTypeSizeInBits(Op->getType()) > getTypeSizeInBits(Ty) &&
1257 "This is not a truncating conversion!");
1258 assert(isSCEVable(Ty) &&
1259 "This is not a conversion to a SCEVable type!");
1260 assert(!Op->getType()->isPointerTy() && "Can't truncate pointer!");
1261 Ty = getEffectiveSCEVType(Ty);
1262
1264 ID.AddInteger(scTruncate);
1265 ID.AddPointer(Op);
1266 ID.AddPointer(Ty);
1267 void *IP = nullptr;
1268 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
1269
1270 // Fold if the operand is constant.
1271 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op))
1272 return getConstant(
1273 cast<ConstantInt>(ConstantExpr::getTrunc(SC->getValue(), Ty)));
1274
1275 // trunc(trunc(x)) --> trunc(x)
1277 return getTruncateExpr(ST->getOperand(), Ty, Depth + 1);
1278
1279 // trunc(sext(x)) --> sext(x) if widening or trunc(x) if narrowing
1281 return getTruncateOrSignExtend(SS->getOperand(), Ty, Depth + 1);
1282
1283 // trunc(zext(x)) --> zext(x) if widening or trunc(x) if narrowing
1285 return getTruncateOrZeroExtend(SZ->getOperand(), Ty, Depth + 1);
1286
1287 if (Depth > MaxCastDepth) {
1288 SCEV *S =
1289 new (SCEVAllocator) SCEVTruncateExpr(ID.Intern(SCEVAllocator), Op, Ty);
1290 UniqueSCEVs.InsertNode(S, IP);
1291 S->computeAndSetCanonical(*this);
1292 registerUser(S, Op);
1293 return S;
1294 }
1295
1296 // trunc(x1 + ... + xN) --> trunc(x1) + ... + trunc(xN) and
1297 // trunc(x1 * ... * xN) --> trunc(x1) * ... * trunc(xN),
1298 // if after transforming we have at most one truncate, not counting truncates
1299 // that replace other casts.
1301 auto *CommOp = cast<SCEVCommutativeExpr>(Op);
1302 SmallVector<SCEVUse, 4> Operands;
1303 unsigned numTruncs = 0;
1304 for (unsigned i = 0, e = CommOp->getNumOperands(); i != e && numTruncs < 2;
1305 ++i) {
1306 const SCEV *S = getTruncateExpr(CommOp->getOperand(i), Ty, Depth + 1);
1307 if (!isa<SCEVIntegralCastExpr>(CommOp->getOperand(i)) &&
1309 numTruncs++;
1310 Operands.push_back(S);
1311 }
1312 if (numTruncs < 2) {
1313 if (isa<SCEVAddExpr>(Op))
1314 return getAddExpr(Operands);
1315 if (isa<SCEVMulExpr>(Op))
1316 return getMulExpr(Operands);
1317 llvm_unreachable("Unexpected SCEV type for Op.");
1318 }
1319 // Although we checked in the beginning that ID is not in the cache, it is
1320 // possible that during recursion and different modification ID was inserted
1321 // into the cache. So if we find it, just return it.
1322 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP))
1323 return S;
1324 }
1325
1326 // If the input value is a chrec scev, truncate the chrec's operands.
1327 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(Op)) {
1328 SmallVector<SCEVUse, 4> Operands;
1329 for (const SCEV *Op : AddRec->operands())
1330 Operands.push_back(getTruncateExpr(Op, Ty, Depth + 1));
1331 return getAddRecExpr(Operands, AddRec->getLoop(), SCEV::FlagAnyWrap);
1332 }
1333
1334 // Return zero if truncating to known zeros.
1335 uint32_t MinTrailingZeros = getMinTrailingZeros(Op);
1336 if (MinTrailingZeros >= getTypeSizeInBits(Ty))
1337 return getZero(Ty);
1338
1339 // The cast wasn't folded; create an explicit cast node. We can reuse
1340 // the existing insert position since if we get here, we won't have
1341 // made any changes which would invalidate it.
1342 SCEV *S = new (SCEVAllocator) SCEVTruncateExpr(ID.Intern(SCEVAllocator),
1343 Op, Ty);
1344 UniqueSCEVs.InsertNode(S, IP);
1345 S->computeAndSetCanonical(*this);
1346 registerUser(S, Op);
1347 return S;
1348}
1349
1350// Get the limit of a recurrence such that incrementing by Step cannot cause
1351// signed overflow as long as the value of the recurrence within the
1352// loop does not exceed this limit before incrementing.
1353static const SCEV *getSignedOverflowLimitForStep(const SCEV *Step,
1354 ICmpInst::Predicate *Pred,
1355 ScalarEvolution *SE) {
1356 unsigned BitWidth = SE->getTypeSizeInBits(Step->getType());
1357 if (SE->isKnownPositive(Step)) {
1358 *Pred = ICmpInst::ICMP_SLT;
1360 SE->getSignedRangeMax(Step));
1361 }
1362 if (SE->isKnownNegative(Step)) {
1363 *Pred = ICmpInst::ICMP_SGT;
1365 SE->getSignedRangeMin(Step));
1366 }
1367 return nullptr;
1368}
1369
1370// Get the limit of a recurrence such that incrementing by Step cannot cause
1371// unsigned overflow as long as the value of the recurrence within the loop does
1372// not exceed this limit before incrementing.
1374 ICmpInst::Predicate *Pred,
1375 ScalarEvolution *SE) {
1376 unsigned BitWidth = SE->getTypeSizeInBits(Step->getType());
1377 *Pred = ICmpInst::ICMP_ULT;
1378
1380 SE->getUnsignedRangeMax(Step));
1381}
1382
1383namespace {
1384
1385struct ExtendOpTraitsBase {
1386 typedef const SCEV *(ScalarEvolution::*GetExtendExprTy)(const SCEV *, Type *,
1387 unsigned);
1388};
1389
1390// Used to make code generic over signed and unsigned overflow.
1391template <typename ExtendOp> struct ExtendOpTraits {
1392 // Members present:
1393 //
1394 // static const SCEV::NoWrapFlags WrapType;
1395 //
1396 // static const ExtendOpTraitsBase::GetExtendExprTy GetExtendExpr;
1397 //
1398 // static const SCEV *getOverflowLimitForStep(const SCEV *Step,
1399 // ICmpInst::Predicate *Pred,
1400 // ScalarEvolution *SE);
1401};
1402
1403template <>
1404struct ExtendOpTraits<SCEVSignExtendExpr> : public ExtendOpTraitsBase {
1405 static const SCEV::NoWrapFlags WrapType = SCEV::FlagNSW;
1406
1407 static const GetExtendExprTy GetExtendExpr;
1408
1409 static const SCEV *getOverflowLimitForStep(const SCEV *Step,
1410 ICmpInst::Predicate *Pred,
1411 ScalarEvolution *SE) {
1412 return getSignedOverflowLimitForStep(Step, Pred, SE);
1413 }
1414};
1415
1416const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits<
1418
1419template <>
1420struct ExtendOpTraits<SCEVZeroExtendExpr> : public ExtendOpTraitsBase {
1421 static const SCEV::NoWrapFlags WrapType = SCEV::FlagNUW;
1422
1423 static const GetExtendExprTy GetExtendExpr;
1424
1425 static const SCEV *getOverflowLimitForStep(const SCEV *Step,
1426 ICmpInst::Predicate *Pred,
1427 ScalarEvolution *SE) {
1428 return getUnsignedOverflowLimitForStep(Step, Pred, SE);
1429 }
1430};
1431
1432const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits<
1434
1435} // end anonymous namespace
1436
1437// The recurrence AR has been shown to have no signed/unsigned wrap or something
1438// close to it. Typically, if we can prove NSW/NUW for AR, then we can just as
1439// easily prove NSW/NUW for its preincrement or postincrement sibling. This
1440// allows normalizing a sign/zero extended AddRec as such: {sext/zext(Step +
1441// Start),+,Step} => {(Step + sext/zext(Start),+,Step} As a result, the
1442// expression "Step + sext/zext(PreIncAR)" is congruent with
1443// "sext/zext(PostIncAR)"
1444template <typename ExtendOpTy>
1445static const SCEV *getPreStartForExtend(const SCEVAddRecExpr *AR, Type *Ty,
1446 ScalarEvolution *SE, unsigned Depth) {
1447 auto WrapType = ExtendOpTraits<ExtendOpTy>::WrapType;
1448 auto GetExtendExpr = ExtendOpTraits<ExtendOpTy>::GetExtendExpr;
1449
1450 const Loop *L = AR->getLoop();
1451 const SCEV *Start = AR->getStart();
1452 const SCEV *Step = AR->getStepRecurrence(*SE);
1453
1454 // Check for a simple looking step prior to loop entry.
1455 const SCEVAddExpr *SA = dyn_cast<SCEVAddExpr>(Start);
1456 if (!SA)
1457 return nullptr;
1458
1459 // Create an AddExpr for "PreStart" after subtracting Step. Full SCEV
1460 // subtraction is expensive. For this purpose, perform a quick and dirty
1461 // difference, by checking for Step in the operand list. Note, that
1462 // SA might have repeated ops, like %a + %a + ..., so only remove one.
1463 SmallVector<SCEVUse, 4> DiffOps(SA->operands());
1464 for (auto It = DiffOps.begin(); It != DiffOps.end(); ++It)
1465 if (*It == Step) {
1466 DiffOps.erase(It);
1467 break;
1468 }
1469
1470 if (DiffOps.size() == SA->getNumOperands())
1471 return nullptr;
1472
1473 // Try to prove `WrapType` (SCEV::FlagNSW or SCEV::FlagNUW) on `PreStart` +
1474 // `Step`:
1475
1476 // 1. NSW/NUW flags on the step increment.
1477 auto PreStartFlags =
1479 const SCEV *PreStart = SE->getAddExpr(DiffOps, PreStartFlags);
1481 SE->getAddRecExpr(PreStart, Step, L, SCEV::FlagAnyWrap));
1482
1483 // "{S,+,X} is <nsw>/<nuw>" and "the backedge is taken at least once" implies
1484 // "S+X does not sign/unsign-overflow".
1485 //
1486
1487 const SCEV *BECount = SE->getBackedgeTakenCount(L);
1488 if (PreAR && any(PreAR->getNoWrapFlags(WrapType)) &&
1489 !isa<SCEVCouldNotCompute>(BECount) && SE->isKnownPositive(BECount))
1490 return PreStart;
1491
1492 // 2. Direct overflow check on the step operation's expression.
1493 unsigned BitWidth = SE->getTypeSizeInBits(AR->getType());
1494 Type *WideTy = IntegerType::get(SE->getContext(), BitWidth * 2);
1495 const SCEV *OperandExtendedStart =
1496 SE->getAddExpr((SE->*GetExtendExpr)(PreStart, WideTy, Depth),
1497 (SE->*GetExtendExpr)(Step, WideTy, Depth));
1498 if ((SE->*GetExtendExpr)(Start, WideTy, Depth) == OperandExtendedStart) {
1499 if (PreAR && any(AR->getNoWrapFlags(WrapType))) {
1500 // If we know `AR` == {`PreStart`+`Step`,+,`Step`} is `WrapType` (FlagNSW
1501 // or FlagNUW) and that `PreStart` + `Step` is `WrapType` too, then
1502 // `PreAR` == {`PreStart`,+,`Step`} is also `WrapType`. Cache this fact.
1503 SE->setNoWrapFlags(const_cast<SCEVAddRecExpr *>(PreAR), WrapType);
1504 }
1505 return PreStart;
1506 }
1507
1508 // 3. Loop precondition.
1510 const SCEV *OverflowLimit =
1511 ExtendOpTraits<ExtendOpTy>::getOverflowLimitForStep(Step, &Pred, SE);
1512
1513 if (OverflowLimit &&
1514 SE->isLoopEntryGuardedByCond(L, Pred, PreStart, OverflowLimit))
1515 return PreStart;
1516
1517 return nullptr;
1518}
1519
1520// Get the normalized zero or sign extended expression for this AddRec's Start.
1521template <typename ExtendOpTy>
1522static const SCEV *getExtendAddRecStart(const SCEVAddRecExpr *AR, Type *Ty,
1523 ScalarEvolution *SE,
1524 unsigned Depth) {
1525 auto GetExtendExpr = ExtendOpTraits<ExtendOpTy>::GetExtendExpr;
1526
1527 const SCEV *PreStart = getPreStartForExtend<ExtendOpTy>(AR, Ty, SE, Depth);
1528 if (!PreStart)
1529 return (SE->*GetExtendExpr)(AR->getStart(), Ty, Depth);
1530
1531 return SE->getAddExpr((SE->*GetExtendExpr)(AR->getStepRecurrence(*SE), Ty,
1532 Depth),
1533 (SE->*GetExtendExpr)(PreStart, Ty, Depth));
1534}
1535
1536// Try to prove away overflow by looking at "nearby" add recurrences. A
1537// motivating example for this rule: if we know `{0,+,4}` is `ult` `-1` and it
1538// does not itself wrap then we can conclude that `{1,+,4}` is `nuw`.
1539//
1540// Formally:
1541//
1542// {S,+,X} == {S-T,+,X} + T
1543// => Ext({S,+,X}) == Ext({S-T,+,X} + T)
1544//
1545// If ({S-T,+,X} + T) does not overflow ... (1)
1546//
1547// RHS == Ext({S-T,+,X} + T) == Ext({S-T,+,X}) + Ext(T)
1548//
1549// If {S-T,+,X} does not overflow ... (2)
1550//
1551// RHS == Ext({S-T,+,X}) + Ext(T) == {Ext(S-T),+,Ext(X)} + Ext(T)
1552// == {Ext(S-T)+Ext(T),+,Ext(X)}
1553//
1554// If (S-T)+T does not overflow ... (3)
1555//
1556// RHS == {Ext(S-T)+Ext(T),+,Ext(X)} == {Ext(S-T+T),+,Ext(X)}
1557// == {Ext(S),+,Ext(X)} == LHS
1558//
1559// Thus, if (1), (2) and (3) are true for some T, then
1560// Ext({S,+,X}) == {Ext(S),+,Ext(X)}
1561//
1562// (3) is implied by (1) -- "(S-T)+T does not overflow" is simply "({S-T,+,X}+T)
1563// does not overflow" restricted to the 0th iteration. Therefore we only need
1564// to check for (1) and (2).
1565//
1566// In the current context, S is `Start`, X is `Step`, Ext is `ExtendOpTy` and T
1567// is `Delta` (defined below).
1568template <typename ExtendOpTy>
1569bool ScalarEvolution::proveNoWrapByVaryingStart(const SCEV *Start,
1570 const SCEV *Step,
1571 const Loop *L) {
1572 auto WrapType = ExtendOpTraits<ExtendOpTy>::WrapType;
1573
1574 // We restrict `Start` to a constant to prevent SCEV from spending too much
1575 // time here. It is correct (but more expensive) to continue with a
1576 // non-constant `Start` and do a general SCEV subtraction to compute
1577 // `PreStart` below.
1578 const SCEVConstant *StartC = dyn_cast<SCEVConstant>(Start);
1579 if (!StartC)
1580 return false;
1581
1582 APInt StartAI = StartC->getAPInt();
1583
1584 for (unsigned Delta : {-2, -1, 1, 2}) {
1585 const SCEV *PreStart = getConstant(StartAI - Delta);
1586
1587 FoldingSetNodeID ID;
1588 ID.AddInteger(scAddRecExpr);
1589 ID.AddPointer(PreStart);
1590 ID.AddPointer(Step);
1591 ID.AddPointer(L);
1592 void *IP = nullptr;
1593 const auto *PreAR =
1594 static_cast<SCEVAddRecExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP));
1595
1596 // Give up if we don't already have the add recurrence we need because
1597 // actually constructing an add recurrence is relatively expensive.
1598 if (PreAR && any(PreAR->getNoWrapFlags(WrapType))) { // proves (2)
1599 const SCEV *DeltaS = getConstant(StartC->getType(), Delta);
1601 const SCEV *Limit = ExtendOpTraits<ExtendOpTy>::getOverflowLimitForStep(
1602 DeltaS, &Pred, this);
1603 if (Limit && isKnownPredicate(Pred, PreAR, Limit)) // proves (1)
1604 return true;
1605 }
1606 }
1607
1608 return false;
1609}
1610
1611// Finds an integer D for an expression (C + x + y + ...) such that the top
1612// level addition in (D + (C - D + x + y + ...)) would not wrap (signed or
1613// unsigned) and the number of trailing zeros of (C - D + x + y + ...) is
1614// maximized, where C is the \p ConstantTerm, x, y, ... are arbitrary SCEVs, and
1615// the (C + x + y + ...) expression is \p WholeAddExpr.
1617 const SCEVConstant *ConstantTerm,
1618 const SCEVAddExpr *WholeAddExpr) {
1619 const APInt &C = ConstantTerm->getAPInt();
1620 const unsigned BitWidth = C.getBitWidth();
1621 // Find number of trailing zeros of (x + y + ...) w/o the C first:
1622 uint32_t TZ = BitWidth;
1623 for (unsigned I = 1, E = WholeAddExpr->getNumOperands(); I < E && TZ; ++I)
1624 TZ = std::min(TZ, SE.getMinTrailingZeros(WholeAddExpr->getOperand(I)));
1625 if (TZ) {
1626 // Set D to be as many least significant bits of C as possible while still
1627 // guaranteeing that adding D to (C - D + x + y + ...) won't cause a wrap:
1628 return TZ < BitWidth ? C.trunc(TZ).zext(BitWidth) : C;
1629 }
1630 return APInt(BitWidth, 0);
1631}
1632
1633// Finds an integer D for an affine AddRec expression {C,+,x} such that the top
1634// level addition in (D + {C-D,+,x}) would not wrap (signed or unsigned) and the
1635// number of trailing zeros of (C - D + x * n) is maximized, where C is the \p
1636// ConstantStart, x is an arbitrary \p Step, and n is the loop trip count.
1638 const APInt &ConstantStart,
1639 const SCEV *Step) {
1640 const unsigned BitWidth = ConstantStart.getBitWidth();
1641 const uint32_t TZ = SE.getMinTrailingZeros(Step);
1642 if (TZ)
1643 return TZ < BitWidth ? ConstantStart.trunc(TZ).zext(BitWidth)
1644 : ConstantStart;
1645 return APInt(BitWidth, 0);
1646}
1647
1649 const ScalarEvolution::FoldID &ID, const SCEV *S,
1652 &FoldCacheUser) {
1653 auto I = FoldCache.insert({ID, S});
1654 if (!I.second) {
1655 // Remove FoldCacheUser entry for ID when replacing an existing FoldCache
1656 // entry.
1657 auto &UserIDs = FoldCacheUser[I.first->second];
1658 assert(count(UserIDs, ID) == 1 && "unexpected duplicates in UserIDs");
1659 for (unsigned I = 0; I != UserIDs.size(); ++I)
1660 if (UserIDs[I] == ID) {
1661 std::swap(UserIDs[I], UserIDs.back());
1662 break;
1663 }
1664 UserIDs.pop_back();
1665 I.first->second = S;
1666 }
1667 FoldCacheUser[S].push_back(ID);
1668}
1669
1670const SCEV *
1672 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&
1673 "This is not an extending conversion!");
1674 assert(isSCEVable(Ty) &&
1675 "This is not a conversion to a SCEVable type!");
1676 assert(!Op->getType()->isPointerTy() && "Can't extend pointer!");
1677 Ty = getEffectiveSCEVType(Ty);
1678
1679 FoldID ID(scZeroExtend, Op, Ty);
1680 if (const SCEV *S = FoldCache.lookup(ID))
1681 return S;
1682
1683 const SCEV *S = getZeroExtendExprImpl(Op, Ty, Depth);
1685 insertFoldCacheEntry(ID, S, FoldCache, FoldCacheUser);
1686 return S;
1687}
1688
1690 unsigned Depth) {
1691 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&
1692 "This is not an extending conversion!");
1693 assert(isSCEVable(Ty) && "This is not a conversion to a SCEVable type!");
1694 assert(!Op->getType()->isPointerTy() && "Can't extend pointer!");
1695
1696 // Fold if the operand is constant.
1697 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op))
1698 return getConstant(SC->getAPInt().zext(getTypeSizeInBits(Ty)));
1699
1700 // zext(zext(x)) --> zext(x)
1702 return getZeroExtendExpr(SZ->getOperand(), Ty, Depth + 1);
1703
1704 // Before doing any expensive analysis, check to see if we've already
1705 // computed a SCEV for this Op and Ty.
1707 ID.AddInteger(scZeroExtend);
1708 ID.AddPointer(Op);
1709 ID.AddPointer(Ty);
1710 void *IP = nullptr;
1711 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
1712 if (Depth > MaxCastDepth) {
1713 SCEV *S = new (SCEVAllocator) SCEVZeroExtendExpr(ID.Intern(SCEVAllocator),
1714 Op, Ty);
1715 UniqueSCEVs.InsertNode(S, IP);
1716 S->computeAndSetCanonical(*this);
1717 registerUser(S, Op);
1718 return S;
1719 }
1720
1721 // zext(trunc(x)) --> zext(x) or x or trunc(x)
1723 // It's possible the bits taken off by the truncate were all zero bits. If
1724 // so, we should be able to simplify this further.
1725 const SCEV *X = ST->getOperand();
1727 unsigned TruncBits = getTypeSizeInBits(ST->getType());
1728 unsigned NewBits = getTypeSizeInBits(Ty);
1729 if (CR.truncate(TruncBits).zeroExtend(NewBits).contains(
1730 CR.zextOrTrunc(NewBits)))
1731 return getTruncateOrZeroExtend(X, Ty, Depth);
1732 }
1733
1734 // If the input value is a chrec scev, and we can prove that the value
1735 // did not overflow the old, smaller, value, we can zero extend all of the
1736 // operands (often constants). This allows analysis of something like
1737 // this: for (unsigned char X = 0; X < 100; ++X) { int Y = X; }
1739 if (AR->isAffine()) {
1740 const SCEV *Start = AR->getStart();
1741 const SCEV *Step = AR->getStepRecurrence(*this);
1742 unsigned BitWidth = getTypeSizeInBits(AR->getType());
1743 const Loop *L = AR->getLoop();
1744
1745 // If we have special knowledge that this addrec won't overflow,
1746 // we don't need to do any further analysis.
1747 if (AR->hasNoUnsignedWrap()) {
1748 Start =
1750 Step = getZeroExtendExpr(Step, Ty, Depth + 1);
1751 return getAddRecExpr(Start, Step, L, AR->getNoWrapFlags());
1752 }
1753
1754 // Check whether the backedge-taken count is SCEVCouldNotCompute.
1755 // Note that this serves two purposes: It filters out loops that are
1756 // simply not analyzable, and it covers the case where this code is
1757 // being called from within backedge-taken count analysis, such that
1758 // attempting to ask for the backedge-taken count would likely result
1759 // in infinite recursion. In the later case, the analysis code will
1760 // cope with a conservative value, and it will take care to purge
1761 // that value once it has finished.
1762 const SCEV *MaxBECount = getConstantMaxBackedgeTakenCount(L);
1763 if (!isa<SCEVCouldNotCompute>(MaxBECount)) {
1764 // Manually compute the final value for AR, checking for overflow.
1765
1766 // Check whether the backedge-taken count can be losslessly casted to
1767 // the addrec's type. The count is always unsigned.
1768 const SCEV *CastedMaxBECount =
1769 getTruncateOrZeroExtend(MaxBECount, Start->getType(), Depth);
1770 const SCEV *RecastedMaxBECount = getTruncateOrZeroExtend(
1771 CastedMaxBECount, MaxBECount->getType(), Depth);
1772 if (MaxBECount == RecastedMaxBECount) {
1773 Type *WideTy = IntegerType::get(getContext(), BitWidth * 2);
1774 // Check whether Start+Step*MaxBECount has no unsigned overflow.
1775 const SCEV *ZMul = getMulExpr(CastedMaxBECount, Step,
1777 const SCEV *ZAdd = getZeroExtendExpr(getAddExpr(Start, ZMul,
1779 Depth + 1),
1780 WideTy, Depth + 1);
1781 const SCEV *WideStart = getZeroExtendExpr(Start, WideTy, Depth + 1);
1782 const SCEV *WideMaxBECount =
1783 getZeroExtendExpr(CastedMaxBECount, WideTy, Depth + 1);
1784 const SCEV *OperandExtendedAdd =
1785 getAddExpr(WideStart,
1786 getMulExpr(WideMaxBECount,
1787 getZeroExtendExpr(Step, WideTy, Depth + 1),
1790 if (ZAdd == OperandExtendedAdd) {
1791 // Cache knowledge of AR NUW, which is propagated to this AddRec.
1792 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), SCEV::FlagNUW);
1793 // Return the expression with the addrec on the outside.
1794 Start = getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this,
1795 Depth + 1);
1796 Step = getZeroExtendExpr(Step, Ty, Depth + 1);
1797 return getAddRecExpr(Start, Step, L, AR->getNoWrapFlags());
1798 }
1799 // Similar to above, only this time treat the step value as signed.
1800 // This covers loops that count down.
1801 OperandExtendedAdd =
1802 getAddExpr(WideStart,
1803 getMulExpr(WideMaxBECount,
1804 getSignExtendExpr(Step, WideTy, Depth + 1),
1807 if (ZAdd == OperandExtendedAdd) {
1808 // Cache knowledge of AR NW, which is propagated to this AddRec.
1809 // Negative step causes unsigned wrap, but it still can't self-wrap.
1810 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), SCEV::FlagNW);
1811 // Return the expression with the addrec on the outside.
1812 Start = getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this,
1813 Depth + 1);
1814 Step = getSignExtendExpr(Step, Ty, Depth + 1);
1815 return getAddRecExpr(Start, Step, L, AR->getNoWrapFlags());
1816 }
1817 }
1818 }
1819
1820 // Normally, in the cases we can prove no-overflow via a
1821 // backedge guarding condition, we can also compute a backedge
1822 // taken count for the loop. The exceptions are assumptions and
1823 // guards present in the loop -- SCEV is not great at exploiting
1824 // these to compute max backedge taken counts, but can still use
1825 // these to prove lack of overflow. Use this fact to avoid
1826 // doing extra work that may not pay off.
1827 if (!isa<SCEVCouldNotCompute>(MaxBECount) || HasGuards ||
1828 !AC.assumptions().empty()) {
1829
1830 auto NewFlags = proveNoUnsignedWrapViaInduction(AR);
1831 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), NewFlags);
1832 if (AR->hasNoUnsignedWrap()) {
1833 // Same as nuw case above - duplicated here to avoid a compile time
1834 // issue. It's not clear that the order of checks does matter, but
1835 // it's one of two issue possible causes for a change which was
1836 // reverted. Be conservative for the moment.
1837 Start =
1839 Step = getZeroExtendExpr(Step, Ty, Depth + 1);
1840 return getAddRecExpr(Start, Step, L, AR->getNoWrapFlags());
1841 }
1842
1843 // For a negative step, we can extend the operands iff doing so only
1844 // traverses values in the range zext([0,UINT_MAX]).
1845 if (isKnownNegative(Step)) {
1847 getSignedRangeMin(Step));
1850 // Cache knowledge of AR NW, which is propagated to this
1851 // AddRec. Negative step causes unsigned wrap, but it
1852 // still can't self-wrap.
1853 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), SCEV::FlagNW);
1854 // Return the expression with the addrec on the outside.
1855 Start = getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this,
1856 Depth + 1);
1857 Step = getSignExtendExpr(Step, Ty, Depth + 1);
1858 return getAddRecExpr(Start, Step, L, AR->getNoWrapFlags());
1859 }
1860 }
1861 }
1862
1863 // zext({C,+,Step}) --> (zext(D) + zext({C-D,+,Step}))<nuw><nsw>
1864 // if D + (C - D + Step * n) could be proven to not unsigned wrap
1865 // where D maximizes the number of trailing zeros of (C - D + Step * n)
1866 if (const auto *SC = dyn_cast<SCEVConstant>(Start)) {
1867 const APInt &C = SC->getAPInt();
1868 const APInt &D = extractConstantWithoutWrapping(*this, C, Step);
1869 if (D != 0) {
1870 const SCEV *SZExtD = getZeroExtendExpr(getConstant(D), Ty, Depth);
1871 const SCEV *SResidual =
1872 getAddRecExpr(getConstant(C - D), Step, L, AR->getNoWrapFlags());
1873 const SCEV *SZExtR = getZeroExtendExpr(SResidual, Ty, Depth + 1);
1874 return getAddExpr(SZExtD, SZExtR, SCEV::FlagNSW | SCEV::FlagNUW,
1875 Depth + 1);
1876 }
1877 }
1878
1879 if (proveNoWrapByVaryingStart<SCEVZeroExtendExpr>(Start, Step, L)) {
1880 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), SCEV::FlagNUW);
1881 Start =
1883 Step = getZeroExtendExpr(Step, Ty, Depth + 1);
1884 return getAddRecExpr(Start, Step, L, AR->getNoWrapFlags());
1885 }
1886 }
1887
1888 // zext(A % B) --> zext(A) % zext(B)
1889 {
1890 const SCEV *LHS;
1891 const SCEV *RHS;
1892 if (match(Op, m_scev_URem(m_SCEV(LHS), m_SCEV(RHS), *this)))
1893 return getURemExpr(getZeroExtendExpr(LHS, Ty, Depth + 1),
1894 getZeroExtendExpr(RHS, Ty, Depth + 1));
1895 }
1896
1897 // zext(A / B) --> zext(A) / zext(B).
1898 if (auto *Div = dyn_cast<SCEVUDivExpr>(Op))
1899 return getUDivExpr(getZeroExtendExpr(Div->getLHS(), Ty, Depth + 1),
1900 getZeroExtendExpr(Div->getRHS(), Ty, Depth + 1));
1901
1902 if (auto *SA = dyn_cast<SCEVAddExpr>(Op)) {
1903 // zext((A + B + ...)<nuw>) --> (zext(A) + zext(B) + ...)<nuw>
1904 if (SA->hasNoUnsignedWrap()) {
1905 // If the addition does not unsign overflow then we can, by definition,
1906 // commute the zero extension with the addition operation.
1908 for (SCEVUse Op : SA->operands())
1909 Ops.push_back(getZeroExtendExpr(Op, Ty, Depth + 1));
1910 return getAddExpr(Ops, SCEV::FlagNUW, Depth + 1);
1911 }
1912
1913 // zext(C + x + y + ...) --> (zext(D) + zext((C - D) + x + y + ...))
1914 // if D + (C - D + x + y + ...) could be proven to not unsigned wrap
1915 // where D maximizes the number of trailing zeros of (C - D + x + y + ...)
1916 //
1917 // Often address arithmetics contain expressions like
1918 // (zext (add (shl X, C1), C2)), for instance, (zext (5 + (4 * X))).
1919 // This transformation is useful while proving that such expressions are
1920 // equal or differ by a small constant amount, see LoadStoreVectorizer pass.
1921 if (const auto *SC = dyn_cast<SCEVConstant>(SA->getOperand(0))) {
1922 const APInt &D = extractConstantWithoutWrapping(*this, SC, SA);
1923 if (D != 0) {
1924 const SCEV *SZExtD = getZeroExtendExpr(getConstant(D), Ty, Depth);
1925 const SCEV *SResidual =
1927 const SCEV *SZExtR = getZeroExtendExpr(SResidual, Ty, Depth + 1);
1928 return getAddExpr(SZExtD, SZExtR, (SCEV::FlagNSW | SCEV::FlagNUW),
1929 Depth + 1);
1930 }
1931 }
1932 }
1933
1934 if (auto *SM = dyn_cast<SCEVMulExpr>(Op)) {
1935 // zext((A * B * ...)<nuw>) --> (zext(A) * zext(B) * ...)<nuw>
1936 if (SM->hasNoUnsignedWrap()) {
1937 // If the multiply does not unsign overflow then we can, by definition,
1938 // commute the zero extension with the multiply operation.
1940 for (SCEVUse Op : SM->operands())
1941 Ops.push_back(getZeroExtendExpr(Op, Ty, Depth + 1));
1942 return getMulExpr(Ops, SCEV::FlagNUW, Depth + 1);
1943 }
1944
1945 // zext(2^K * (trunc X to iN)) to iM ->
1946 // 2^K * (zext(trunc X to i{N-K}) to iM)<nuw>
1947 //
1948 // Proof:
1949 //
1950 // zext(2^K * (trunc X to iN)) to iM
1951 // = zext((trunc X to iN) << K) to iM
1952 // = zext((trunc X to i{N-K}) << K)<nuw> to iM
1953 // (because shl removes the top K bits)
1954 // = zext((2^K * (trunc X to i{N-K}))<nuw>) to iM
1955 // = (2^K * (zext(trunc X to i{N-K}) to iM))<nuw>.
1956 //
1957 const APInt *C;
1958 const SCEV *TruncRHS;
1959 if (match(SM,
1960 m_scev_Mul(m_scev_APInt(C), m_scev_Trunc(m_SCEV(TruncRHS)))) &&
1961 C->isPowerOf2()) {
1962 int NewTruncBits =
1963 getTypeSizeInBits(SM->getOperand(1)->getType()) - C->logBase2();
1964 Type *NewTruncTy = IntegerType::get(getContext(), NewTruncBits);
1965 return getMulExpr(
1966 getZeroExtendExpr(SM->getOperand(0), Ty),
1967 getZeroExtendExpr(getTruncateExpr(TruncRHS, NewTruncTy), Ty),
1968 SCEV::FlagNUW, Depth + 1);
1969 }
1970 }
1971
1972 // zext(umin(x, y)) -> umin(zext(x), zext(y))
1973 // zext(umax(x, y)) -> umax(zext(x), zext(y))
1976 SmallVector<SCEVUse, 4> Operands;
1977 for (SCEVUse Operand : MinMax->operands())
1978 Operands.push_back(getZeroExtendExpr(Operand, Ty));
1980 return getUMinExpr(Operands);
1981 return getUMaxExpr(Operands);
1982 }
1983
1984 // zext(umin_seq(x, y)) -> umin_seq(zext(x), zext(y))
1986 assert(isa<SCEVSequentialUMinExpr>(MinMax) && "Not supported!");
1987 SmallVector<SCEVUse, 4> Operands;
1988 for (SCEVUse Operand : MinMax->operands())
1989 Operands.push_back(getZeroExtendExpr(Operand, Ty));
1990 return getUMinExpr(Operands, /*Sequential*/ true);
1991 }
1992
1993 // The cast wasn't folded; create an explicit cast node.
1994 // Recompute the insert position, as it may have been invalidated.
1995 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
1996 SCEV *S = new (SCEVAllocator) SCEVZeroExtendExpr(ID.Intern(SCEVAllocator),
1997 Op, Ty);
1998 UniqueSCEVs.InsertNode(S, IP);
1999 S->computeAndSetCanonical(*this);
2000 registerUser(S, Op);
2001 return S;
2002}
2003
2004const SCEV *
2006 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&
2007 "This is not an extending conversion!");
2008 assert(isSCEVable(Ty) &&
2009 "This is not a conversion to a SCEVable type!");
2010 assert(!Op->getType()->isPointerTy() && "Can't extend pointer!");
2011 Ty = getEffectiveSCEVType(Ty);
2012
2013 FoldID ID(scSignExtend, Op, Ty);
2014 if (const SCEV *S = FoldCache.lookup(ID))
2015 return S;
2016
2017 const SCEV *S = getSignExtendExprImpl(Op, Ty, Depth);
2019 insertFoldCacheEntry(ID, S, FoldCache, FoldCacheUser);
2020 return S;
2021}
2022
2024 unsigned Depth) {
2025 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&
2026 "This is not an extending conversion!");
2027 assert(isSCEVable(Ty) && "This is not a conversion to a SCEVable type!");
2028 assert(!Op->getType()->isPointerTy() && "Can't extend pointer!");
2029 Ty = getEffectiveSCEVType(Ty);
2030
2031 // Fold if the operand is constant.
2032 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op))
2033 return getConstant(SC->getAPInt().sext(getTypeSizeInBits(Ty)));
2034
2035 // sext(sext(x)) --> sext(x)
2037 return getSignExtendExpr(SS->getOperand(), Ty, Depth + 1);
2038
2039 // sext(zext(x)) --> zext(x)
2041 return getZeroExtendExpr(SZ->getOperand(), Ty, Depth + 1);
2042
2043 // Before doing any expensive analysis, check to see if we've already
2044 // computed a SCEV for this Op and Ty.
2046 ID.AddInteger(scSignExtend);
2047 ID.AddPointer(Op);
2048 ID.AddPointer(Ty);
2049 void *IP = nullptr;
2050 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
2051 // Limit recursion depth.
2052 if (Depth > MaxCastDepth) {
2053 SCEV *S = new (SCEVAllocator) SCEVSignExtendExpr(ID.Intern(SCEVAllocator),
2054 Op, Ty);
2055 UniqueSCEVs.InsertNode(S, IP);
2056 S->computeAndSetCanonical(*this);
2057 registerUser(S, Op);
2058 return S;
2059 }
2060
2061 // sext(trunc(x)) --> sext(x) or x or trunc(x)
2063 // It's possible the bits taken off by the truncate were all sign bits. If
2064 // so, we should be able to simplify this further.
2065 const SCEV *X = ST->getOperand();
2067 unsigned TruncBits = getTypeSizeInBits(ST->getType());
2068 unsigned NewBits = getTypeSizeInBits(Ty);
2069 if (CR.truncate(TruncBits).signExtend(NewBits).contains(
2070 CR.sextOrTrunc(NewBits)))
2071 return getTruncateOrSignExtend(X, Ty, Depth);
2072 }
2073
2074 if (auto *SA = dyn_cast<SCEVAddExpr>(Op)) {
2075 // sext((A + B + ...)<nsw>) --> (sext(A) + sext(B) + ...)<nsw>
2076 if (SA->hasNoSignedWrap()) {
2077 // If the addition does not sign overflow then we can, by definition,
2078 // commute the sign extension with the addition operation.
2080 for (SCEVUse Op : SA->operands())
2081 Ops.push_back(getSignExtendExpr(Op, Ty, Depth + 1));
2082 return getAddExpr(Ops, SCEV::FlagNSW, Depth + 1);
2083 }
2084
2085 // sext(C + x + y + ...) --> (sext(D) + sext((C - D) + x + y + ...))
2086 // if D + (C - D + x + y + ...) could be proven to not signed wrap
2087 // where D maximizes the number of trailing zeros of (C - D + x + y + ...)
2088 //
2089 // For instance, this will bring two seemingly different expressions:
2090 // 1 + sext(5 + 20 * %x + 24 * %y) and
2091 // sext(6 + 20 * %x + 24 * %y)
2092 // to the same form:
2093 // 2 + sext(4 + 20 * %x + 24 * %y)
2094 if (const auto *SC = dyn_cast<SCEVConstant>(SA->getOperand(0))) {
2095 const APInt &D = extractConstantWithoutWrapping(*this, SC, SA);
2096 if (D != 0) {
2097 const SCEV *SSExtD = getSignExtendExpr(getConstant(D), Ty, Depth);
2098 const SCEV *SResidual =
2100 const SCEV *SSExtR = getSignExtendExpr(SResidual, Ty, Depth + 1);
2101 return getAddExpr(SSExtD, SSExtR, (SCEV::FlagNSW | SCEV::FlagNUW),
2102 Depth + 1);
2103 }
2104 }
2105 }
2106 // If the input value is a chrec scev, and we can prove that the value
2107 // did not overflow the old, smaller, value, we can sign extend all of the
2108 // operands (often constants). This allows analysis of something like
2109 // this: for (signed char X = 0; X < 100; ++X) { int Y = X; }
2111 if (AR->isAffine()) {
2112 const SCEV *Start = AR->getStart();
2113 const SCEV *Step = AR->getStepRecurrence(*this);
2114 unsigned BitWidth = getTypeSizeInBits(AR->getType());
2115 const Loop *L = AR->getLoop();
2116
2117 // If we have special knowledge that this addrec won't overflow,
2118 // we don't need to do any further analysis.
2119 if (AR->hasNoSignedWrap()) {
2120 Start =
2122 Step = getSignExtendExpr(Step, Ty, Depth + 1);
2123 return getAddRecExpr(Start, Step, L, SCEV::FlagNSW);
2124 }
2125
2126 // Check whether the backedge-taken count is SCEVCouldNotCompute.
2127 // Note that this serves two purposes: It filters out loops that are
2128 // simply not analyzable, and it covers the case where this code is
2129 // being called from within backedge-taken count analysis, such that
2130 // attempting to ask for the backedge-taken count would likely result
2131 // in infinite recursion. In the later case, the analysis code will
2132 // cope with a conservative value, and it will take care to purge
2133 // that value once it has finished.
2134 const SCEV *MaxBECount = getConstantMaxBackedgeTakenCount(L);
2135 if (!isa<SCEVCouldNotCompute>(MaxBECount)) {
2136 // Manually compute the final value for AR, checking for
2137 // overflow.
2138
2139 // Check whether the backedge-taken count can be losslessly casted to
2140 // the addrec's type. The count is always unsigned.
2141 const SCEV *CastedMaxBECount =
2142 getTruncateOrZeroExtend(MaxBECount, Start->getType(), Depth);
2143 const SCEV *RecastedMaxBECount = getTruncateOrZeroExtend(
2144 CastedMaxBECount, MaxBECount->getType(), Depth);
2145 if (MaxBECount == RecastedMaxBECount) {
2146 Type *WideTy = IntegerType::get(getContext(), BitWidth * 2);
2147 // Check whether Start+Step*MaxBECount has no signed overflow.
2148 const SCEV *SMul = getMulExpr(CastedMaxBECount, Step,
2150 const SCEV *SAdd = getSignExtendExpr(getAddExpr(Start, SMul,
2152 Depth + 1),
2153 WideTy, Depth + 1);
2154 const SCEV *WideStart = getSignExtendExpr(Start, WideTy, Depth + 1);
2155 const SCEV *WideMaxBECount =
2156 getZeroExtendExpr(CastedMaxBECount, WideTy, Depth + 1);
2157 const SCEV *OperandExtendedAdd =
2158 getAddExpr(WideStart,
2159 getMulExpr(WideMaxBECount,
2160 getSignExtendExpr(Step, WideTy, Depth + 1),
2163 if (SAdd == OperandExtendedAdd) {
2164 // Cache knowledge of AR NSW, which is propagated to this AddRec.
2165 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), SCEV::FlagNSW);
2166 // Return the expression with the addrec on the outside.
2167 Start = getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this,
2168 Depth + 1);
2169 Step = getSignExtendExpr(Step, Ty, Depth + 1);
2170 return getAddRecExpr(Start, Step, L, AR->getNoWrapFlags());
2171 }
2172 // Similar to above, only this time treat the step value as unsigned.
2173 // This covers loops that count up with an unsigned step.
2174 OperandExtendedAdd =
2175 getAddExpr(WideStart,
2176 getMulExpr(WideMaxBECount,
2177 getZeroExtendExpr(Step, WideTy, Depth + 1),
2180 if (SAdd == OperandExtendedAdd) {
2181 // If AR wraps around then
2182 //
2183 // abs(Step) * MaxBECount > unsigned-max(AR->getType())
2184 // => SAdd != OperandExtendedAdd
2185 //
2186 // Thus (AR is not NW => SAdd != OperandExtendedAdd) <=>
2187 // (SAdd == OperandExtendedAdd => AR is NW)
2188
2189 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), SCEV::FlagNW);
2190
2191 // Return the expression with the addrec on the outside.
2192 Start = getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this,
2193 Depth + 1);
2194 Step = getZeroExtendExpr(Step, Ty, Depth + 1);
2195 return getAddRecExpr(Start, Step, L, AR->getNoWrapFlags());
2196 }
2197 }
2198 }
2199
2200 auto NewFlags = proveNoSignedWrapViaInduction(AR);
2201 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), NewFlags);
2202 if (AR->hasNoSignedWrap()) {
2203 // Same as nsw case above - duplicated here to avoid a compile time
2204 // issue. It's not clear that the order of checks does matter, but
2205 // it's one of two issue possible causes for a change which was
2206 // reverted. Be conservative for the moment.
2207 Start =
2209 Step = getSignExtendExpr(Step, Ty, Depth + 1);
2210 return getAddRecExpr(Start, Step, L, AR->getNoWrapFlags());
2211 }
2212
2213 // sext({C,+,Step}) --> (sext(D) + sext({C-D,+,Step}))<nuw><nsw>
2214 // if D + (C - D + Step * n) could be proven to not signed wrap
2215 // where D maximizes the number of trailing zeros of (C - D + Step * n)
2216 if (const auto *SC = dyn_cast<SCEVConstant>(Start)) {
2217 const APInt &C = SC->getAPInt();
2218 const APInt &D = extractConstantWithoutWrapping(*this, C, Step);
2219 if (D != 0) {
2220 const SCEV *SSExtD = getSignExtendExpr(getConstant(D), Ty, Depth);
2221 const SCEV *SResidual =
2222 getAddRecExpr(getConstant(C - D), Step, L, AR->getNoWrapFlags());
2223 const SCEV *SSExtR = getSignExtendExpr(SResidual, Ty, Depth + 1);
2224 return getAddExpr(SSExtD, SSExtR, (SCEV::FlagNSW | SCEV::FlagNUW),
2225 Depth + 1);
2226 }
2227 }
2228
2229 if (proveNoWrapByVaryingStart<SCEVSignExtendExpr>(Start, Step, L)) {
2230 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), SCEV::FlagNSW);
2231 Start =
2233 Step = getSignExtendExpr(Step, Ty, Depth + 1);
2234 return getAddRecExpr(Start, Step, L, AR->getNoWrapFlags());
2235 }
2236 }
2237
2238 // If the input value is provably positive and we could not simplify
2239 // away the sext build a zext instead.
2241 return getZeroExtendExpr(Op, Ty, Depth + 1);
2242
2243 // sext(smin(x, y)) -> smin(sext(x), sext(y))
2244 // sext(smax(x, y)) -> smax(sext(x), sext(y))
2247 SmallVector<SCEVUse, 4> Operands;
2248 for (SCEVUse Operand : MinMax->operands())
2249 Operands.push_back(getSignExtendExpr(Operand, Ty));
2251 return getSMinExpr(Operands);
2252 return getSMaxExpr(Operands);
2253 }
2254
2255 // The cast wasn't folded; create an explicit cast node.
2256 // Recompute the insert position, as it may have been invalidated.
2257 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
2258 SCEV *S = new (SCEVAllocator) SCEVSignExtendExpr(ID.Intern(SCEVAllocator),
2259 Op, Ty);
2260 UniqueSCEVs.InsertNode(S, IP);
2261 S->computeAndSetCanonical(*this);
2262 registerUser(S, Op);
2263 return S;
2264}
2265
2267 Type *Ty) {
2268 switch (Kind) {
2269 case scTruncate:
2270 return getTruncateExpr(Op, Ty);
2271 case scZeroExtend:
2272 return getZeroExtendExpr(Op, Ty);
2273 case scSignExtend:
2274 return getSignExtendExpr(Op, Ty);
2275 case scPtrToInt:
2276 return getPtrToIntExpr(Op, Ty);
2277 default:
2278 llvm_unreachable("Not a SCEV cast expression!");
2279 }
2280}
2281
2282/// getAnyExtendExpr - Return a SCEV for the given operand extended with
2283/// unspecified bits out to the given type.
2285 Type *Ty) {
2286 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&
2287 "This is not an extending conversion!");
2288 assert(isSCEVable(Ty) &&
2289 "This is not a conversion to a SCEVable type!");
2290 Ty = getEffectiveSCEVType(Ty);
2291
2292 // Sign-extend negative constants.
2293 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op))
2294 if (SC->getAPInt().isNegative())
2295 return getSignExtendExpr(Op, Ty);
2296
2297 // Peel off a truncate cast.
2299 const SCEV *NewOp = T->getOperand();
2300 if (getTypeSizeInBits(NewOp->getType()) < getTypeSizeInBits(Ty))
2301 return getAnyExtendExpr(NewOp, Ty);
2302 return getTruncateOrNoop(NewOp, Ty);
2303 }
2304
2305 // Next try a zext cast. If the cast is folded, use it.
2306 const SCEV *ZExt = getZeroExtendExpr(Op, Ty);
2307 if (!isa<SCEVZeroExtendExpr>(ZExt))
2308 return ZExt;
2309
2310 // Next try a sext cast. If the cast is folded, use it.
2311 const SCEV *SExt = getSignExtendExpr(Op, Ty);
2312 if (!isa<SCEVSignExtendExpr>(SExt))
2313 return SExt;
2314
2315 // Force the cast to be folded into the operands of an addrec.
2316 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) {
2318 for (const SCEV *Op : AR->operands())
2319 Ops.push_back(getAnyExtendExpr(Op, Ty));
2320 return getAddRecExpr(Ops, AR->getLoop(), SCEV::FlagNW);
2321 }
2322
2323 // If the expression is obviously signed, use the sext cast value.
2324 if (isa<SCEVSMaxExpr>(Op))
2325 return SExt;
2326
2327 // Absent any other information, use the zext cast value.
2328 return ZExt;
2329}
2330
2331/// Process the given Ops list, which is a list of operands to be added under
2332/// the given scale, update the given map. This is a helper function for
2333/// getAddRecExpr. As an example of what it does, given a sequence of operands
2334/// that would form an add expression like this:
2335///
2336/// m + n + 13 + (A * (o + p + (B * (q + m + 29)))) + r + (-1 * r)
2337///
2338/// where A and B are constants, update the map with these values:
2339///
2340/// (m, 1+A*B), (n, 1), (o, A), (p, A), (q, A*B), (r, 0)
2341///
2342/// and add 13 + A*B*29 to AccumulatedConstant.
2343/// This will allow getAddRecExpr to produce this:
2344///
2345/// 13+A*B*29 + n + (m * (1+A*B)) + ((o + p) * A) + (q * A*B)
2346///
2347/// This form often exposes folding opportunities that are hidden in
2348/// the original operand list.
2349///
2350/// Return true iff it appears that any interesting folding opportunities
2351/// may be exposed. This helps getAddRecExpr short-circuit extra work in
2352/// the common case where no interesting opportunities are present, and
2353/// is also used as a check to avoid infinite recursion.
2356 APInt &AccumulatedConstant,
2358 const APInt &Scale,
2359 ScalarEvolution &SE) {
2360 bool Interesting = false;
2361
2362 // Iterate over the add operands. They are sorted, with constants first.
2363 unsigned i = 0;
2364 while (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) {
2365 ++i;
2366 // Pull a buried constant out to the outside.
2367 if (Scale != 1 || AccumulatedConstant != 0 || C->getValue()->isZero())
2368 Interesting = true;
2369 AccumulatedConstant += Scale * C->getAPInt();
2370 }
2371
2372 // Next comes everything else. We're especially interested in multiplies
2373 // here, but they're in the middle, so just visit the rest with one loop.
2374 for (; i != Ops.size(); ++i) {
2376 if (Mul && isa<SCEVConstant>(Mul->getOperand(0))) {
2377 APInt NewScale =
2378 Scale * cast<SCEVConstant>(Mul->getOperand(0))->getAPInt();
2379 if (Mul->getNumOperands() == 2 && isa<SCEVAddExpr>(Mul->getOperand(1))) {
2380 // A multiplication of a constant with another add; recurse.
2381 const SCEVAddExpr *Add = cast<SCEVAddExpr>(Mul->getOperand(1));
2382 Interesting |= CollectAddOperandsWithScales(
2383 M, NewOps, AccumulatedConstant, Add->operands(), NewScale, SE);
2384 } else {
2385 // A multiplication of a constant with some other value. Update
2386 // the map.
2387 SmallVector<SCEVUse, 4> MulOps(drop_begin(Mul->operands()));
2388 const SCEV *Key = SE.getMulExpr(MulOps);
2389 auto Pair = M.insert({Key, NewScale});
2390 if (Pair.second) {
2391 NewOps.push_back(Pair.first->first);
2392 } else {
2393 Pair.first->second += NewScale;
2394 // The map already had an entry for this value, which may indicate
2395 // a folding opportunity.
2396 Interesting = true;
2397 }
2398 }
2399 } else {
2400 // An ordinary operand. Update the map.
2401 auto Pair = M.insert({Ops[i], Scale});
2402 if (Pair.second) {
2403 NewOps.push_back(Pair.first->first);
2404 } else {
2405 Pair.first->second += Scale;
2406 // The map already had an entry for this value, which may indicate
2407 // a folding opportunity.
2408 Interesting = true;
2409 }
2410 }
2411 }
2412
2413 return Interesting;
2414}
2415
2417 const SCEV *LHS, const SCEV *RHS,
2418 const Instruction *CtxI) {
2420 unsigned);
2421 switch (BinOp) {
2422 default:
2423 llvm_unreachable("Unsupported binary op");
2424 case Instruction::Add:
2426 break;
2427 case Instruction::Sub:
2429 break;
2430 case Instruction::Mul:
2432 break;
2433 }
2434
2435 const SCEV *(ScalarEvolution::*Extension)(const SCEV *, Type *, unsigned) =
2438
2439 // Check ext(LHS op RHS) == ext(LHS) op ext(RHS)
2440 auto *NarrowTy = cast<IntegerType>(LHS->getType());
2441 auto *WideTy =
2442 IntegerType::get(NarrowTy->getContext(), NarrowTy->getBitWidth() * 2);
2443
2444 const SCEV *A = (this->*Extension)(
2445 (this->*Operation)(LHS, RHS, SCEV::FlagAnyWrap, 0), WideTy, 0);
2446 const SCEV *LHSB = (this->*Extension)(LHS, WideTy, 0);
2447 const SCEV *RHSB = (this->*Extension)(RHS, WideTy, 0);
2448 const SCEV *B = (this->*Operation)(LHSB, RHSB, SCEV::FlagAnyWrap, 0);
2449 if (A == B)
2450 return true;
2451 // Can we use context to prove the fact we need?
2452 if (!CtxI)
2453 return false;
2454 // TODO: Support mul.
2455 if (BinOp == Instruction::Mul)
2456 return false;
2457 auto *RHSC = dyn_cast<SCEVConstant>(RHS);
2458 // TODO: Lift this limitation.
2459 if (!RHSC)
2460 return false;
2461 APInt C = RHSC->getAPInt();
2462 unsigned NumBits = C.getBitWidth();
2463 bool IsSub = (BinOp == Instruction::Sub);
2464 bool IsNegativeConst = (Signed && C.isNegative());
2465 // Compute the direction and magnitude by which we need to check overflow.
2466 bool OverflowDown = IsSub ^ IsNegativeConst;
2467 APInt Magnitude = C;
2468 if (IsNegativeConst) {
2469 if (C == APInt::getSignedMinValue(NumBits))
2470 // TODO: SINT_MIN on inversion gives the same negative value, we don't
2471 // want to deal with that.
2472 return false;
2473 Magnitude = -C;
2474 }
2475
2477 if (OverflowDown) {
2478 // To avoid overflow down, we need to make sure that MIN + Magnitude <= LHS.
2479 APInt Min = Signed ? APInt::getSignedMinValue(NumBits)
2480 : APInt::getMinValue(NumBits);
2481 APInt Limit = Min + Magnitude;
2482 return isKnownPredicateAt(Pred, getConstant(Limit), LHS, CtxI);
2483 } else {
2484 // To avoid overflow up, we need to make sure that LHS <= MAX - Magnitude.
2485 APInt Max = Signed ? APInt::getSignedMaxValue(NumBits)
2486 : APInt::getMaxValue(NumBits);
2487 APInt Limit = Max - Magnitude;
2488 return isKnownPredicateAt(Pred, LHS, getConstant(Limit), CtxI);
2489 }
2490}
2491
2492std::optional<SCEV::NoWrapFlags>
2494 const OverflowingBinaryOperator *OBO) {
2495 // It cannot be done any better.
2496 if (OBO->hasNoUnsignedWrap() && OBO->hasNoSignedWrap())
2497 return std::nullopt;
2498
2499 SCEV::NoWrapFlags Flags = SCEV::NoWrapFlags::FlagAnyWrap;
2500
2501 if (OBO->hasNoUnsignedWrap())
2503 if (OBO->hasNoSignedWrap())
2505
2506 bool Deduced = false;
2507
2508 if (OBO->getOpcode() != Instruction::Add &&
2509 OBO->getOpcode() != Instruction::Sub &&
2510 OBO->getOpcode() != Instruction::Mul)
2511 return std::nullopt;
2512
2513 const SCEV *LHS = getSCEV(OBO->getOperand(0));
2514 const SCEV *RHS = getSCEV(OBO->getOperand(1));
2515
2516 const Instruction *CtxI =
2518 if (!OBO->hasNoUnsignedWrap() &&
2520 /* Signed */ false, LHS, RHS, CtxI)) {
2522 Deduced = true;
2523 }
2524
2525 if (!OBO->hasNoSignedWrap() &&
2527 /* Signed */ true, LHS, RHS, CtxI)) {
2529 Deduced = true;
2530 }
2531
2532 if (Deduced)
2533 return Flags;
2534 return std::nullopt;
2535}
2536
2537// We're trying to construct a SCEV of type `Type' with `Ops' as operands and
2538// `OldFlags' as can't-wrap behavior. Infer a more aggressive set of
2539// can't-overflow flags for the operation if possible.
2543 SCEV::NoWrapFlags Flags) {
2544 using namespace std::placeholders;
2545
2546 using OBO = OverflowingBinaryOperator;
2547
2548 bool CanAnalyze =
2550 (void)CanAnalyze;
2551 assert(CanAnalyze && "don't call from other places!");
2552
2553 SCEV::NoWrapFlags SignOrUnsignMask = SCEV::FlagNUW | SCEV::FlagNSW;
2554 SCEV::NoWrapFlags SignOrUnsignWrap =
2555 ScalarEvolution::maskFlags(Flags, SignOrUnsignMask);
2556
2557 // If FlagNSW is true and all the operands are non-negative, infer FlagNUW.
2558 auto IsKnownNonNegative = [&](SCEVUse U) {
2559 return SE->isKnownNonNegative(U);
2560 };
2561
2562 if (SignOrUnsignWrap == SCEV::FlagNSW && all_of(Ops, IsKnownNonNegative))
2563 Flags = ScalarEvolution::setFlags(Flags, SignOrUnsignMask);
2564
2565 SignOrUnsignWrap = ScalarEvolution::maskFlags(Flags, SignOrUnsignMask);
2566
2567 if (SignOrUnsignWrap != SignOrUnsignMask &&
2568 (Type == scAddExpr || Type == scMulExpr) && Ops.size() == 2 &&
2569 isa<SCEVConstant>(Ops[0])) {
2570
2571 auto Opcode = [&] {
2572 switch (Type) {
2573 case scAddExpr:
2574 return Instruction::Add;
2575 case scMulExpr:
2576 return Instruction::Mul;
2577 default:
2578 llvm_unreachable("Unexpected SCEV op.");
2579 }
2580 }();
2581
2582 const APInt &C = cast<SCEVConstant>(Ops[0])->getAPInt();
2583
2584 // (A <opcode> C) --> (A <opcode> C)<nsw> if the op doesn't sign overflow.
2585 if (!(SignOrUnsignWrap & SCEV::FlagNSW)) {
2587 Opcode, C, OBO::NoSignedWrap);
2588 if (NSWRegion.contains(SE->getSignedRange(Ops[1])))
2590 }
2591
2592 // (A <opcode> C) --> (A <opcode> C)<nuw> if the op doesn't unsign overflow.
2593 if (!(SignOrUnsignWrap & SCEV::FlagNUW)) {
2595 Opcode, C, OBO::NoUnsignedWrap);
2596 if (NUWRegion.contains(SE->getUnsignedRange(Ops[1])))
2598 }
2599 }
2600
2601 // <0,+,nonnegative><nw> is also nuw
2602 // TODO: Add corresponding nsw case
2604 !ScalarEvolution::hasFlags(Flags, SCEV::FlagNUW) && Ops.size() == 2 &&
2605 Ops[0]->isZero() && IsKnownNonNegative(Ops[1]))
2607
2608 // both (udiv X, Y) * Y and Y * (udiv X, Y) are always NUW
2610 Ops.size() == 2) {
2611 if (auto *UDiv = dyn_cast<SCEVUDivExpr>(Ops[0]))
2612 if (UDiv->getOperand(1) == Ops[1])
2614 if (auto *UDiv = dyn_cast<SCEVUDivExpr>(Ops[1]))
2615 if (UDiv->getOperand(1) == Ops[0])
2617 }
2618
2619 return Flags;
2620}
2621
2623 return isLoopInvariant(S, L) && properlyDominates(S, L->getHeader());
2624}
2625
2626/// Get a canonical add expression, or something simpler if possible.
2628 SCEV::NoWrapFlags OrigFlags,
2629 unsigned Depth) {
2630 assert(!(OrigFlags & ~(SCEV::FlagNUW | SCEV::FlagNSW)) &&
2631 "only nuw or nsw allowed");
2632 assert(!Ops.empty() && "Cannot get empty add!");
2633 if (Ops.size() == 1) return Ops[0];
2634#ifndef NDEBUG
2635 Type *ETy = getEffectiveSCEVType(Ops[0]->getType());
2636 for (unsigned i = 1, e = Ops.size(); i != e; ++i)
2637 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy &&
2638 "SCEVAddExpr operand types don't match!");
2639 unsigned NumPtrs = count_if(
2640 Ops, [](const SCEV *Op) { return Op->getType()->isPointerTy(); });
2641 assert(NumPtrs <= 1 && "add has at most one pointer operand");
2642#endif
2643
2644 const SCEV *Folded = constantFoldAndGroupOps(
2645 *this, LI, DT, Ops,
2646 [](const APInt &C1, const APInt &C2) { return C1 + C2; },
2647 [](const APInt &C) { return C.isZero(); }, // identity
2648 [](const APInt &C) { return false; }); // absorber
2649 if (Folded)
2650 return Folded;
2651
2652 unsigned Idx = isa<SCEVConstant>(Ops[0]) ? 1 : 0;
2653
2654 // Delay expensive flag strengthening until necessary.
2655 auto ComputeFlags = [this, OrigFlags](ArrayRef<SCEVUse> Ops) {
2656 return StrengthenNoWrapFlags(this, scAddExpr, Ops, OrigFlags);
2657 };
2658
2659 // Limit recursion calls depth.
2661 return getOrCreateAddExpr(Ops, ComputeFlags(Ops));
2662
2663 if (SCEV *S = findExistingSCEVInCache(scAddExpr, Ops)) {
2664 // Don't strengthen flags if we have no new information.
2665 SCEVAddExpr *Add = static_cast<SCEVAddExpr *>(S);
2666 if (Add->getNoWrapFlags(OrigFlags) != OrigFlags)
2667 Add->setNoWrapFlags(ComputeFlags(Ops));
2668 return S;
2669 }
2670
2671 // Okay, check to see if the same value occurs in the operand list more than
2672 // once. If so, merge them together into an multiply expression. Since we
2673 // sorted the list, these values are required to be adjacent.
2674 Type *Ty = Ops[0]->getType();
2675 bool FoundMatch = false;
2676 for (unsigned i = 0, e = Ops.size(); i != e-1; ++i)
2677 if (Ops[i] == Ops[i+1]) { // X + Y + Y --> X + Y*2
2678 // Scan ahead to count how many equal operands there are.
2679 unsigned Count = 2;
2680 while (i+Count != e && Ops[i+Count] == Ops[i])
2681 ++Count;
2682 // Merge the values into a multiply.
2683 SCEVUse Scale = getConstant(Ty, Count);
2684 const SCEV *Mul = getMulExpr(Scale, Ops[i], SCEV::FlagAnyWrap, Depth + 1);
2685 if (Ops.size() == Count)
2686 return Mul;
2687 Ops[i] = Mul;
2688 Ops.erase(Ops.begin()+i+1, Ops.begin()+i+Count);
2689 --i; e -= Count - 1;
2690 FoundMatch = true;
2691 }
2692 if (FoundMatch)
2693 return getAddExpr(Ops, OrigFlags, Depth + 1);
2694
2695 // Check for truncates. If all the operands are truncated from the same
2696 // type, see if factoring out the truncate would permit the result to be
2697 // folded. eg., n*trunc(x) + m*trunc(y) --> trunc(trunc(m)*x + trunc(n)*y)
2698 // if the contents of the resulting outer trunc fold to something simple.
2699 auto FindTruncSrcType = [&]() -> Type * {
2700 // We're ultimately looking to fold an addrec of truncs and muls of only
2701 // constants and truncs, so if we find any other types of SCEV
2702 // as operands of the addrec then we bail and return nullptr here.
2703 // Otherwise, we return the type of the operand of a trunc that we find.
2704 if (auto *T = dyn_cast<SCEVTruncateExpr>(Ops[Idx]))
2705 return T->getOperand()->getType();
2706 if (const auto *Mul = dyn_cast<SCEVMulExpr>(Ops[Idx])) {
2707 SCEVUse LastOp = Mul->getOperand(Mul->getNumOperands() - 1);
2708 if (const auto *T = dyn_cast<SCEVTruncateExpr>(LastOp))
2709 return T->getOperand()->getType();
2710 }
2711 return nullptr;
2712 };
2713 if (auto *SrcType = FindTruncSrcType()) {
2714 SmallVector<SCEVUse, 8> LargeOps;
2715 bool Ok = true;
2716 // Check all the operands to see if they can be represented in the
2717 // source type of the truncate.
2718 for (const SCEV *Op : Ops) {
2720 if (T->getOperand()->getType() != SrcType) {
2721 Ok = false;
2722 break;
2723 }
2724 LargeOps.push_back(T->getOperand());
2725 } else if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Op)) {
2726 LargeOps.push_back(getAnyExtendExpr(C, SrcType));
2727 } else if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(Op)) {
2728 SmallVector<SCEVUse, 8> LargeMulOps;
2729 for (unsigned j = 0, f = M->getNumOperands(); j != f && Ok; ++j) {
2730 if (const SCEVTruncateExpr *T =
2731 dyn_cast<SCEVTruncateExpr>(M->getOperand(j))) {
2732 if (T->getOperand()->getType() != SrcType) {
2733 Ok = false;
2734 break;
2735 }
2736 LargeMulOps.push_back(T->getOperand());
2737 } else if (const auto *C = dyn_cast<SCEVConstant>(M->getOperand(j))) {
2738 LargeMulOps.push_back(getAnyExtendExpr(C, SrcType));
2739 } else {
2740 Ok = false;
2741 break;
2742 }
2743 }
2744 if (Ok)
2745 LargeOps.push_back(getMulExpr(LargeMulOps, SCEV::FlagAnyWrap, Depth + 1));
2746 } else {
2747 Ok = false;
2748 break;
2749 }
2750 }
2751 if (Ok) {
2752 // Evaluate the expression in the larger type.
2753 const SCEV *Fold = getAddExpr(LargeOps, SCEV::FlagAnyWrap, Depth + 1);
2754 // If it folds to something simple, use it. Otherwise, don't.
2755 if (isa<SCEVConstant>(Fold) || isa<SCEVUnknown>(Fold))
2756 return getTruncateExpr(Fold, Ty);
2757 }
2758 }
2759
2760 if (Ops.size() == 2) {
2761 // Check if we have an expression of the form ((X + C1) - C2), where C1 and
2762 // C2 can be folded in a way that allows retaining wrapping flags of (X +
2763 // C1).
2764 const SCEV *A = Ops[0];
2765 const SCEV *B = Ops[1];
2766 auto *AddExpr = dyn_cast<SCEVAddExpr>(B);
2767 auto *C = dyn_cast<SCEVConstant>(A);
2768 if (AddExpr && C && isa<SCEVConstant>(AddExpr->getOperand(0))) {
2769 auto C1 = cast<SCEVConstant>(AddExpr->getOperand(0))->getAPInt();
2770 auto C2 = C->getAPInt();
2771 SCEV::NoWrapFlags PreservedFlags = SCEV::FlagAnyWrap;
2772
2773 APInt ConstAdd = C1 + C2;
2774 auto AddFlags = AddExpr->getNoWrapFlags();
2775 // Adding a smaller constant is NUW if the original AddExpr was NUW.
2777 ConstAdd.ule(C1)) {
2778 PreservedFlags =
2780 }
2781
2782 // Adding a constant with the same sign and small magnitude is NSW, if the
2783 // original AddExpr was NSW.
2785 C1.isSignBitSet() == ConstAdd.isSignBitSet() &&
2786 ConstAdd.abs().ule(C1.abs())) {
2787 PreservedFlags =
2789 }
2790
2791 if (PreservedFlags != SCEV::FlagAnyWrap) {
2792 SmallVector<SCEVUse, 4> NewOps(AddExpr->operands());
2793 NewOps[0] = getConstant(ConstAdd);
2794 return getAddExpr(NewOps, PreservedFlags);
2795 }
2796 }
2797
2798 // Try to push the constant operand into a ZExt: A + zext (-A + B) -> zext
2799 // (B), if trunc (A) + -A + B does not unsigned-wrap.
2800 const SCEVAddExpr *InnerAdd;
2801 if (match(B, m_scev_ZExt(m_scev_Add(InnerAdd)))) {
2802 const SCEV *NarrowA = getTruncateExpr(A, InnerAdd->getType());
2803 if (NarrowA == getNegativeSCEV(InnerAdd->getOperand(0)) &&
2804 getZeroExtendExpr(NarrowA, B->getType()) == A &&
2805 hasFlags(StrengthenNoWrapFlags(this, scAddExpr, {NarrowA, InnerAdd},
2807 SCEV::FlagNUW)) {
2808 return getZeroExtendExpr(getAddExpr(NarrowA, InnerAdd), B->getType());
2809 }
2810 }
2811 }
2812
2813 // Canonicalize (-1 * urem X, Y) + X --> (Y * X/Y)
2814 const SCEV *Y;
2815 if (Ops.size() == 2 &&
2816 match(Ops[0],
2818 m_scev_URem(m_scev_Specific(Ops[1]), m_SCEV(Y), *this))))
2819 return getMulExpr(Y, getUDivExpr(Ops[1], Y));
2820
2821 // Skip past any other cast SCEVs.
2822 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddExpr)
2823 ++Idx;
2824
2825 // If there are add operands they would be next.
2826 if (Idx < Ops.size()) {
2827 bool DeletedAdd = false;
2828 // If the original flags and all inlined SCEVAddExprs are NUW, use the
2829 // common NUW flag for expression after inlining. Other flags cannot be
2830 // preserved, because they may depend on the original order of operations.
2831 SCEV::NoWrapFlags CommonFlags = maskFlags(OrigFlags, SCEV::FlagNUW);
2832 while (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[Idx])) {
2833 if (Ops.size() > AddOpsInlineThreshold ||
2834 Add->getNumOperands() > AddOpsInlineThreshold)
2835 break;
2836 // If we have an add, expand the add operands onto the end of the operands
2837 // list.
2838 Ops.erase(Ops.begin()+Idx);
2839 append_range(Ops, Add->operands());
2840 DeletedAdd = true;
2841 CommonFlags = maskFlags(CommonFlags, Add->getNoWrapFlags());
2842 }
2843
2844 // If we deleted at least one add, we added operands to the end of the list,
2845 // and they are not necessarily sorted. Recurse to resort and resimplify
2846 // any operands we just acquired.
2847 if (DeletedAdd)
2848 return getAddExpr(Ops, CommonFlags, Depth + 1);
2849 }
2850
2851 // Skip over the add expression until we get to a multiply.
2852 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr)
2853 ++Idx;
2854
2855 // Check to see if there are any folding opportunities present with
2856 // operands multiplied by constant values.
2857 if (Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx])) {
2861 APInt AccumulatedConstant(BitWidth, 0);
2862 if (CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant,
2863 Ops, APInt(BitWidth, 1), *this)) {
2864 struct APIntCompare {
2865 bool operator()(const APInt &LHS, const APInt &RHS) const {
2866 return LHS.ult(RHS);
2867 }
2868 };
2869
2870 // Some interesting folding opportunity is present, so its worthwhile to
2871 // re-generate the operands list. Group the operands by constant scale,
2872 // to avoid multiplying by the same constant scale multiple times.
2873 std::map<APInt, SmallVector<SCEVUse, 4>, APIntCompare> MulOpLists;
2874 for (const SCEV *NewOp : NewOps)
2875 MulOpLists[M.find(NewOp)->second].push_back(NewOp);
2876 // Re-generate the operands list.
2877 Ops.clear();
2878 if (AccumulatedConstant != 0)
2879 Ops.push_back(getConstant(AccumulatedConstant));
2880 for (auto &MulOp : MulOpLists) {
2881 if (MulOp.first == 1) {
2882 Ops.push_back(getAddExpr(MulOp.second, SCEV::FlagAnyWrap, Depth + 1));
2883 } else if (MulOp.first != 0) {
2884 Ops.push_back(getMulExpr(
2885 getConstant(MulOp.first),
2886 getAddExpr(MulOp.second, SCEV::FlagAnyWrap, Depth + 1),
2887 SCEV::FlagAnyWrap, Depth + 1));
2888 }
2889 }
2890 if (Ops.empty())
2891 return getZero(Ty);
2892 if (Ops.size() == 1)
2893 return Ops[0];
2894 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1);
2895 }
2896 }
2897
2898 // If we are adding something to a multiply expression, make sure the
2899 // something is not already an operand of the multiply. If so, merge it into
2900 // the multiply.
2901 for (; Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx]); ++Idx) {
2902 const SCEVMulExpr *Mul = cast<SCEVMulExpr>(Ops[Idx]);
2903 for (unsigned MulOp = 0, e = Mul->getNumOperands(); MulOp != e; ++MulOp) {
2904 const SCEV *MulOpSCEV = Mul->getOperand(MulOp);
2905 if (isa<SCEVConstant>(MulOpSCEV))
2906 continue;
2907 for (unsigned AddOp = 0, e = Ops.size(); AddOp != e; ++AddOp)
2908 if (MulOpSCEV == Ops[AddOp]) {
2909 // Fold W + X + (X * Y * Z) --> W + (X * ((Y*Z)+1))
2910 const SCEV *InnerMul = Mul->getOperand(MulOp == 0);
2911 if (Mul->getNumOperands() != 2) {
2912 // If the multiply has more than two operands, we must get the
2913 // Y*Z term.
2914 SmallVector<SCEVUse, 4> MulOps(Mul->operands().take_front(MulOp));
2915 append_range(MulOps, Mul->operands().drop_front(MulOp + 1));
2916 InnerMul = getMulExpr(MulOps, SCEV::FlagAnyWrap, Depth + 1);
2917 }
2918 const SCEV *AddOne =
2919 getAddExpr(getOne(Ty), InnerMul, SCEV::FlagAnyWrap, Depth + 1);
2920 const SCEV *OuterMul = getMulExpr(AddOne, MulOpSCEV,
2922 if (Ops.size() == 2) return OuterMul;
2923 if (AddOp < Idx) {
2924 Ops.erase(Ops.begin()+AddOp);
2925 Ops.erase(Ops.begin()+Idx-1);
2926 } else {
2927 Ops.erase(Ops.begin()+Idx);
2928 Ops.erase(Ops.begin()+AddOp-1);
2929 }
2930 Ops.push_back(OuterMul);
2931 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1);
2932 }
2933
2934 // Check this multiply against other multiplies being added together.
2935 for (unsigned OtherMulIdx = Idx+1;
2936 OtherMulIdx < Ops.size() && isa<SCEVMulExpr>(Ops[OtherMulIdx]);
2937 ++OtherMulIdx) {
2938 const SCEVMulExpr *OtherMul = cast<SCEVMulExpr>(Ops[OtherMulIdx]);
2939 // If MulOp occurs in OtherMul, we can fold the two multiplies
2940 // together.
2941 for (unsigned OMulOp = 0, e = OtherMul->getNumOperands();
2942 OMulOp != e; ++OMulOp)
2943 if (OtherMul->getOperand(OMulOp) == MulOpSCEV) {
2944 // Fold X + (A*B*C) + (A*D*E) --> X + (A*(B*C+D*E))
2945 const SCEV *InnerMul1 = Mul->getOperand(MulOp == 0);
2946 if (Mul->getNumOperands() != 2) {
2947 SmallVector<SCEVUse, 4> MulOps(Mul->operands().take_front(MulOp));
2948 append_range(MulOps, Mul->operands().drop_front(MulOp+1));
2949 InnerMul1 = getMulExpr(MulOps, SCEV::FlagAnyWrap, Depth + 1);
2950 }
2951 const SCEV *InnerMul2 = OtherMul->getOperand(OMulOp == 0);
2952 if (OtherMul->getNumOperands() != 2) {
2954 OtherMul->operands().take_front(OMulOp));
2955 append_range(MulOps, OtherMul->operands().drop_front(OMulOp+1));
2956 InnerMul2 = getMulExpr(MulOps, SCEV::FlagAnyWrap, Depth + 1);
2957 }
2958 const SCEV *InnerMulSum =
2959 getAddExpr(InnerMul1, InnerMul2, SCEV::FlagAnyWrap, Depth + 1);
2960 const SCEV *OuterMul = getMulExpr(MulOpSCEV, InnerMulSum,
2962 if (Ops.size() == 2) return OuterMul;
2963 Ops.erase(Ops.begin()+Idx);
2964 Ops.erase(Ops.begin()+OtherMulIdx-1);
2965 Ops.push_back(OuterMul);
2966 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1);
2967 }
2968 }
2969 }
2970 }
2971
2972 // If there are any add recurrences in the operands list, see if any other
2973 // added values are loop invariant. If so, we can fold them into the
2974 // recurrence.
2975 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr)
2976 ++Idx;
2977
2978 // Scan over all recurrences, trying to fold loop invariants into them.
2979 for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) {
2980 // Scan all of the other operands to this add and add them to the vector if
2981 // they are loop invariant w.r.t. the recurrence.
2983 const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]);
2984 const Loop *AddRecLoop = AddRec->getLoop();
2985 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
2986 if (isAvailableAtLoopEntry(Ops[i], AddRecLoop)) {
2987 LIOps.push_back(Ops[i]);
2988 Ops.erase(Ops.begin()+i);
2989 --i; --e;
2990 }
2991
2992 // If we found some loop invariants, fold them into the recurrence.
2993 if (!LIOps.empty()) {
2994 // Compute nowrap flags for the addition of the loop-invariant ops and
2995 // the addrec. Temporarily push it as an operand for that purpose. These
2996 // flags are valid in the scope of the addrec only.
2997 LIOps.push_back(AddRec);
2998 SCEV::NoWrapFlags Flags = ComputeFlags(LIOps);
2999 LIOps.pop_back();
3000
3001 // NLI + LI + {Start,+,Step} --> NLI + {LI+Start,+,Step}
3002 LIOps.push_back(AddRec->getStart());
3003
3004 SmallVector<SCEVUse, 4> AddRecOps(AddRec->operands());
3005
3006 // It is not in general safe to propagate flags valid on an add within
3007 // the addrec scope to one outside it. We must prove that the inner
3008 // scope is guaranteed to execute if the outer one does to be able to
3009 // safely propagate. We know the program is undefined if poison is
3010 // produced on the inner scoped addrec. We also know that *for this use*
3011 // the outer scoped add can't overflow (because of the flags we just
3012 // computed for the inner scoped add) without the program being undefined.
3013 // Proving that entry to the outer scope neccesitates entry to the inner
3014 // scope, thus proves the program undefined if the flags would be violated
3015 // in the outer scope.
3016 SCEV::NoWrapFlags AddFlags = Flags;
3017 if (AddFlags != SCEV::FlagAnyWrap) {
3018 auto *DefI = getDefiningScopeBound(LIOps);
3019 auto *ReachI = &*AddRecLoop->getHeader()->begin();
3020 if (!isGuaranteedToTransferExecutionTo(DefI, ReachI))
3021 AddFlags = SCEV::FlagAnyWrap;
3022 }
3023 AddRecOps[0] = getAddExpr(LIOps, AddFlags, Depth + 1);
3024
3025 // Build the new addrec. Propagate the NUW and NSW flags if both the
3026 // outer add and the inner addrec are guaranteed to have no overflow.
3027 // Always propagate NW.
3028 Flags = AddRec->getNoWrapFlags(setFlags(Flags, SCEV::FlagNW));
3029 const SCEV *NewRec = getAddRecExpr(AddRecOps, AddRecLoop, Flags);
3030
3031 // If all of the other operands were loop invariant, we are done.
3032 if (Ops.size() == 1) return NewRec;
3033
3034 // Otherwise, add the folded AddRec by the non-invariant parts.
3035 for (unsigned i = 0;; ++i)
3036 if (Ops[i] == AddRec) {
3037 Ops[i] = NewRec;
3038 break;
3039 }
3040 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1);
3041 }
3042
3043 // Okay, if there weren't any loop invariants to be folded, check to see if
3044 // there are multiple AddRec's with the same loop induction variable being
3045 // added together. If so, we can fold them.
3046 for (unsigned OtherIdx = Idx+1;
3047 OtherIdx < Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);
3048 ++OtherIdx) {
3049 // We expect the AddRecExpr's to be sorted in reverse dominance order,
3050 // so that the 1st found AddRecExpr is dominated by all others.
3051 assert(DT.dominates(
3052 cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop()->getHeader(),
3053 AddRec->getLoop()->getHeader()) &&
3054 "AddRecExprs are not sorted in reverse dominance order?");
3055 if (AddRecLoop == cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop()) {
3056 // Other + {A,+,B}<L> + {C,+,D}<L> --> Other + {A+C,+,B+D}<L>
3057 SmallVector<SCEVUse, 4> AddRecOps(AddRec->operands());
3058 for (; OtherIdx != Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);
3059 ++OtherIdx) {
3060 const auto *OtherAddRec = cast<SCEVAddRecExpr>(Ops[OtherIdx]);
3061 if (OtherAddRec->getLoop() == AddRecLoop) {
3062 for (unsigned i = 0, e = OtherAddRec->getNumOperands();
3063 i != e; ++i) {
3064 if (i >= AddRecOps.size()) {
3065 append_range(AddRecOps, OtherAddRec->operands().drop_front(i));
3066 break;
3067 }
3068 AddRecOps[i] =
3069 getAddExpr(AddRecOps[i], OtherAddRec->getOperand(i),
3071 }
3072 Ops.erase(Ops.begin() + OtherIdx); --OtherIdx;
3073 }
3074 }
3075 // Step size has changed, so we cannot guarantee no self-wraparound.
3076 Ops[Idx] = getAddRecExpr(AddRecOps, AddRecLoop, SCEV::FlagAnyWrap);
3077 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1);
3078 }
3079 }
3080
3081 // Otherwise couldn't fold anything into this recurrence. Move onto the
3082 // next one.
3083 }
3084
3085 // Okay, it looks like we really DO need an add expr. Check to see if we
3086 // already have one, otherwise create a new one.
3087 return getOrCreateAddExpr(Ops, ComputeFlags(Ops));
3088}
3089
3090const SCEV *ScalarEvolution::getOrCreateAddExpr(ArrayRef<SCEVUse> Ops,
3091 SCEV::NoWrapFlags Flags) {
3093 ID.AddInteger(scAddExpr);
3094 for (const SCEV *Op : Ops)
3095 ID.AddPointer(Op);
3096 void *IP = nullptr;
3097 SCEVAddExpr *S =
3098 static_cast<SCEVAddExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP));
3099 if (!S) {
3100 SCEVUse *O = SCEVAllocator.Allocate<SCEVUse>(Ops.size());
3102 S = new (SCEVAllocator)
3103 SCEVAddExpr(ID.Intern(SCEVAllocator), O, Ops.size());
3104 UniqueSCEVs.InsertNode(S, IP);
3105 S->computeAndSetCanonical(*this);
3106 registerUser(S, Ops);
3107 }
3108 S->setNoWrapFlags(Flags);
3109 return S;
3110}
3111
3112const SCEV *ScalarEvolution::getOrCreateAddRecExpr(ArrayRef<SCEVUse> Ops,
3113 const Loop *L,
3114 SCEV::NoWrapFlags Flags) {
3115 FoldingSetNodeID ID;
3116 ID.AddInteger(scAddRecExpr);
3117 for (const SCEV *Op : Ops)
3118 ID.AddPointer(Op);
3119 ID.AddPointer(L);
3120 void *IP = nullptr;
3121 SCEVAddRecExpr *S =
3122 static_cast<SCEVAddRecExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP));
3123 if (!S) {
3124 SCEVUse *O = SCEVAllocator.Allocate<SCEVUse>(Ops.size());
3126 S = new (SCEVAllocator)
3127 SCEVAddRecExpr(ID.Intern(SCEVAllocator), O, Ops.size(), L);
3128 UniqueSCEVs.InsertNode(S, IP);
3129 S->computeAndSetCanonical(*this);
3130 LoopUsers[L].push_back(S);
3131 registerUser(S, Ops);
3132 }
3133 setNoWrapFlags(S, Flags);
3134 return S;
3135}
3136
3137const SCEV *ScalarEvolution::getOrCreateMulExpr(ArrayRef<SCEVUse> Ops,
3138 SCEV::NoWrapFlags Flags) {
3139 FoldingSetNodeID ID;
3140 ID.AddInteger(scMulExpr);
3141 for (const SCEV *Op : Ops)
3142 ID.AddPointer(Op);
3143 void *IP = nullptr;
3144 SCEVMulExpr *S =
3145 static_cast<SCEVMulExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP));
3146 if (!S) {
3147 SCEVUse *O = SCEVAllocator.Allocate<SCEVUse>(Ops.size());
3149 S = new (SCEVAllocator) SCEVMulExpr(ID.Intern(SCEVAllocator),
3150 O, Ops.size());
3151 UniqueSCEVs.InsertNode(S, IP);
3152 S->computeAndSetCanonical(*this);
3153 registerUser(S, Ops);
3154 }
3155 S->setNoWrapFlags(Flags);
3156 return S;
3157}
3158
3159static uint64_t umul_ov(uint64_t i, uint64_t j, bool &Overflow) {
3160 uint64_t k = i*j;
3161 if (j > 1 && k / j != i) Overflow = true;
3162 return k;
3163}
3164
3165/// Compute the result of "n choose k", the binomial coefficient. If an
3166/// intermediate computation overflows, Overflow will be set and the return will
3167/// be garbage. Overflow is not cleared on absence of overflow.
3168static uint64_t Choose(uint64_t n, uint64_t k, bool &Overflow) {
3169 // We use the multiplicative formula:
3170 // n(n-1)(n-2)...(n-(k-1)) / k(k-1)(k-2)...1 .
3171 // At each iteration, we take the n-th term of the numeral and divide by the
3172 // (k-n)th term of the denominator. This division will always produce an
3173 // integral result, and helps reduce the chance of overflow in the
3174 // intermediate computations. However, we can still overflow even when the
3175 // final result would fit.
3176
3177 if (n == 0 || n == k) return 1;
3178 if (k > n) return 0;
3179
3180 if (k > n/2)
3181 k = n-k;
3182
3183 uint64_t r = 1;
3184 for (uint64_t i = 1; i <= k; ++i) {
3185 r = umul_ov(r, n-(i-1), Overflow);
3186 r /= i;
3187 }
3188 return r;
3189}
3190
3191/// Determine if any of the operands in this SCEV are a constant or if
3192/// any of the add or multiply expressions in this SCEV contain a constant.
3193static bool containsConstantInAddMulChain(const SCEV *StartExpr) {
3194 struct FindConstantInAddMulChain {
3195 bool FoundConstant = false;
3196
3197 bool follow(const SCEV *S) {
3198 FoundConstant |= isa<SCEVConstant>(S);
3199 return isa<SCEVAddExpr>(S) || isa<SCEVMulExpr>(S);
3200 }
3201
3202 bool isDone() const {
3203 return FoundConstant;
3204 }
3205 };
3206
3207 FindConstantInAddMulChain F;
3209 ST.visitAll(StartExpr);
3210 return F.FoundConstant;
3211}
3212
3213/// Get a canonical multiply expression, or something simpler if possible.
3215 SCEV::NoWrapFlags OrigFlags,
3216 unsigned Depth) {
3217 assert(OrigFlags == maskFlags(OrigFlags, SCEV::FlagNUW | SCEV::FlagNSW) &&
3218 "only nuw or nsw allowed");
3219 assert(!Ops.empty() && "Cannot get empty mul!");
3220 if (Ops.size() == 1) return Ops[0];
3221#ifndef NDEBUG
3222 Type *ETy = Ops[0]->getType();
3223 assert(!ETy->isPointerTy());
3224 for (unsigned i = 1, e = Ops.size(); i != e; ++i)
3225 assert(Ops[i]->getType() == ETy &&
3226 "SCEVMulExpr operand types don't match!");
3227#endif
3228
3229 const SCEV *Folded = constantFoldAndGroupOps(
3230 *this, LI, DT, Ops,
3231 [](const APInt &C1, const APInt &C2) { return C1 * C2; },
3232 [](const APInt &C) { return C.isOne(); }, // identity
3233 [](const APInt &C) { return C.isZero(); }); // absorber
3234 if (Folded)
3235 return Folded;
3236
3237 // Delay expensive flag strengthening until necessary.
3238 auto ComputeFlags = [this, OrigFlags](const ArrayRef<SCEVUse> Ops) {
3239 return StrengthenNoWrapFlags(this, scMulExpr, Ops, OrigFlags);
3240 };
3241
3242 // Limit recursion calls depth.
3244 return getOrCreateMulExpr(Ops, ComputeFlags(Ops));
3245
3246 if (SCEV *S = findExistingSCEVInCache(scMulExpr, Ops)) {
3247 // Don't strengthen flags if we have no new information.
3248 SCEVMulExpr *Mul = static_cast<SCEVMulExpr *>(S);
3249 if (Mul->getNoWrapFlags(OrigFlags) != OrigFlags)
3250 Mul->setNoWrapFlags(ComputeFlags(Ops));
3251 return S;
3252 }
3253
3254 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
3255 if (Ops.size() == 2) {
3256 // C1*(C2+V) -> C1*C2 + C1*V
3257 // If any of Add's ops are Adds or Muls with a constant, apply this
3258 // transformation as well.
3259 //
3260 // TODO: There are some cases where this transformation is not
3261 // profitable; for example, Add = (C0 + X) * Y + Z. Maybe the scope of
3262 // this transformation should be narrowed down.
3263 const SCEV *Op0, *Op1;
3264 if (match(Ops[1], m_scev_Add(m_SCEV(Op0), m_SCEV(Op1))) &&
3266 const SCEV *LHS = getMulExpr(LHSC, Op0, SCEV::FlagAnyWrap, Depth + 1);
3267 const SCEV *RHS = getMulExpr(LHSC, Op1, SCEV::FlagAnyWrap, Depth + 1);
3268 return getAddExpr(LHS, RHS, SCEV::FlagAnyWrap, Depth + 1);
3269 }
3270
3271 if (Ops[0]->isAllOnesValue()) {
3272 // If we have a mul by -1 of an add, try distributing the -1 among the
3273 // add operands.
3274 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1])) {
3276 bool AnyFolded = false;
3277 for (const SCEV *AddOp : Add->operands()) {
3278 const SCEV *Mul = getMulExpr(Ops[0], SCEVUse(AddOp),
3280 if (!isa<SCEVMulExpr>(Mul)) AnyFolded = true;
3281 NewOps.push_back(Mul);
3282 }
3283 if (AnyFolded)
3284 return getAddExpr(NewOps, SCEV::FlagAnyWrap, Depth + 1);
3285 } else if (const auto *AddRec = dyn_cast<SCEVAddRecExpr>(Ops[1])) {
3286 // Negation preserves a recurrence's no self-wrap property.
3287 SmallVector<SCEVUse, 4> Operands;
3288 for (const SCEV *AddRecOp : AddRec->operands())
3289 Operands.push_back(getMulExpr(Ops[0], SCEVUse(AddRecOp),
3290 SCEV::FlagAnyWrap, Depth + 1));
3291 // Let M be the minimum representable signed value. AddRec with nsw
3292 // multiplied by -1 can have signed overflow if and only if it takes a
3293 // value of M: M * (-1) would stay M and (M + 1) * (-1) would be the
3294 // maximum signed value. In all other cases signed overflow is
3295 // impossible.
3296 auto FlagsMask = SCEV::FlagNW;
3297 if (AddRec->hasNoSignedWrap()) {
3298 auto MinInt =
3299 APInt::getSignedMinValue(getTypeSizeInBits(AddRec->getType()));
3300 if (getSignedRangeMin(AddRec) != MinInt)
3301 FlagsMask = setFlags(FlagsMask, SCEV::FlagNSW);
3302 }
3303 return getAddRecExpr(Operands, AddRec->getLoop(),
3304 AddRec->getNoWrapFlags(FlagsMask));
3305 }
3306 }
3307
3308 // Try to push the constant operand into a ZExt: C * zext (A + B) ->
3309 // zext (C*A + C*B) if trunc (C) * (A + B) does not unsigned-wrap.
3310 const SCEVAddExpr *InnerAdd;
3311 if (match(Ops[1], m_scev_ZExt(m_scev_Add(InnerAdd)))) {
3312 const SCEV *NarrowC = getTruncateExpr(LHSC, InnerAdd->getType());
3313 if (isa<SCEVConstant>(InnerAdd->getOperand(0)) &&
3314 getZeroExtendExpr(NarrowC, Ops[1]->getType()) == LHSC &&
3315 hasFlags(StrengthenNoWrapFlags(this, scMulExpr, {NarrowC, InnerAdd},
3317 SCEV::FlagNUW)) {
3318 auto *Res = getMulExpr(NarrowC, InnerAdd, SCEV::FlagNUW, Depth + 1);
3319 return getZeroExtendExpr(Res, Ops[1]->getType(), Depth + 1);
3320 };
3321 }
3322
3323 // Try to fold (C1 * D /u C2) -> C1/C2 * D, if C1 and C2 are powers-of-2,
3324 // D is a multiple of C2, and C1 is a multiple of C2. If C2 is a multiple
3325 // of C1, fold to (D /u (C2 /u C1)).
3326 const SCEV *D;
3327 APInt C1V = LHSC->getAPInt();
3328 // (C1 * D /u C2) == -1 * -C1 * D /u C2 when C1 != INT_MIN. Don't treat -1
3329 // as -1 * 1, as it won't enable additional folds.
3330 if (C1V.isNegative() && !C1V.isMinSignedValue() && !C1V.isAllOnes())
3331 C1V = C1V.abs();
3332 const SCEVConstant *C2;
3333 if (C1V.isPowerOf2() &&
3335 C2->getAPInt().isPowerOf2() &&
3336 C1V.logBase2() <= getMinTrailingZeros(D)) {
3337 const SCEV *NewMul = nullptr;
3338 if (C1V.uge(C2->getAPInt())) {
3339 NewMul = getMulExpr(getUDivExpr(getConstant(C1V), C2), D);
3340 } else if (C2->getAPInt().logBase2() <= getMinTrailingZeros(D)) {
3341 assert(C1V.ugt(1) && "C1 <= 1 should have been folded earlier");
3342 NewMul = getUDivExpr(D, getUDivExpr(C2, getConstant(C1V)));
3343 }
3344 if (NewMul)
3345 return C1V == LHSC->getAPInt() ? NewMul : getNegativeSCEV(NewMul);
3346 }
3347 }
3348 }
3349
3350 // Skip over the add expression until we get to a multiply.
3351 unsigned Idx = 0;
3352 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr)
3353 ++Idx;
3354
3355 // If there are mul operands inline them all into this expression.
3356 if (Idx < Ops.size()) {
3357 bool DeletedMul = false;
3358 while (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[Idx])) {
3359 if (Ops.size() > MulOpsInlineThreshold)
3360 break;
3361 // If we have an mul, expand the mul operands onto the end of the
3362 // operands list.
3363 Ops.erase(Ops.begin()+Idx);
3364 append_range(Ops, Mul->operands());
3365 DeletedMul = true;
3366 }
3367
3368 // If we deleted at least one mul, we added operands to the end of the
3369 // list, and they are not necessarily sorted. Recurse to resort and
3370 // resimplify any operands we just acquired.
3371 if (DeletedMul)
3372 return getMulExpr(Ops, SCEV::FlagAnyWrap, Depth + 1);
3373 }
3374
3375 // If there are any add recurrences in the operands list, see if any other
3376 // added values are loop invariant. If so, we can fold them into the
3377 // recurrence.
3378 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr)
3379 ++Idx;
3380
3381 // Scan over all recurrences, trying to fold loop invariants into them.
3382 for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) {
3383 // Scan all of the other operands to this mul and add them to the vector
3384 // if they are loop invariant w.r.t. the recurrence.
3386 const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]);
3387 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
3388 if (isAvailableAtLoopEntry(Ops[i], AddRec->getLoop())) {
3389 LIOps.push_back(Ops[i]);
3390 Ops.erase(Ops.begin()+i);
3391 --i; --e;
3392 }
3393
3394 // If we found some loop invariants, fold them into the recurrence.
3395 if (!LIOps.empty()) {
3396 // NLI * LI * {Start,+,Step} --> NLI * {LI*Start,+,LI*Step}
3398 NewOps.reserve(AddRec->getNumOperands());
3399 const SCEV *Scale = getMulExpr(LIOps, SCEV::FlagAnyWrap, Depth + 1);
3400
3401 // If both the mul and addrec are nuw, we can preserve nuw.
3402 // If both the mul and addrec are nsw, we can only preserve nsw if either
3403 // a) they are also nuw, or
3404 // b) all multiplications of addrec operands with scale are nsw.
3405 SCEV::NoWrapFlags Flags =
3406 AddRec->getNoWrapFlags(ComputeFlags({Scale, AddRec}));
3407
3408 for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) {
3409 NewOps.push_back(getMulExpr(Scale, AddRec->getOperand(i),
3410 SCEV::FlagAnyWrap, Depth + 1));
3411
3412 if (hasFlags(Flags, SCEV::FlagNSW) && !hasFlags(Flags, SCEV::FlagNUW)) {
3414 Instruction::Mul, getSignedRange(Scale),
3416 if (!NSWRegion.contains(getSignedRange(AddRec->getOperand(i))))
3417 Flags = clearFlags(Flags, SCEV::FlagNSW);
3418 }
3419 }
3420
3421 const SCEV *NewRec = getAddRecExpr(NewOps, AddRec->getLoop(), Flags);
3422
3423 // If all of the other operands were loop invariant, we are done.
3424 if (Ops.size() == 1) return NewRec;
3425
3426 // Otherwise, multiply the folded AddRec by the non-invariant parts.
3427 for (unsigned i = 0;; ++i)
3428 if (Ops[i] == AddRec) {
3429 Ops[i] = NewRec;
3430 break;
3431 }
3432 return getMulExpr(Ops, SCEV::FlagAnyWrap, Depth + 1);
3433 }
3434
3435 // Okay, if there weren't any loop invariants to be folded, check to see
3436 // if there are multiple AddRec's with the same loop induction variable
3437 // being multiplied together. If so, we can fold them.
3438
3439 // {A1,+,A2,+,...,+,An}<L> * {B1,+,B2,+,...,+,Bn}<L>
3440 // = {x=1 in [ sum y=x..2x [ sum z=max(y-x, y-n)..min(x,n) [
3441 // choose(x, 2x)*choose(2x-y, x-z)*A_{y-z}*B_z
3442 // ]]],+,...up to x=2n}.
3443 // Note that the arguments to choose() are always integers with values
3444 // known at compile time, never SCEV objects.
3445 //
3446 // The implementation avoids pointless extra computations when the two
3447 // addrec's are of different length (mathematically, it's equivalent to
3448 // an infinite stream of zeros on the right).
3449 bool OpsModified = false;
3450 for (unsigned OtherIdx = Idx+1;
3451 OtherIdx != Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);
3452 ++OtherIdx) {
3453 const SCEVAddRecExpr *OtherAddRec =
3454 dyn_cast<SCEVAddRecExpr>(Ops[OtherIdx]);
3455 if (!OtherAddRec || OtherAddRec->getLoop() != AddRec->getLoop())
3456 continue;
3457
3458 // Limit max number of arguments to avoid creation of unreasonably big
3459 // SCEVAddRecs with very complex operands.
3460 if (AddRec->getNumOperands() + OtherAddRec->getNumOperands() - 1 >
3461 MaxAddRecSize || hasHugeExpression({AddRec, OtherAddRec}))
3462 continue;
3463
3464 bool Overflow = false;
3465 Type *Ty = AddRec->getType();
3466 bool LargerThan64Bits = getTypeSizeInBits(Ty) > 64;
3467 SmallVector<SCEVUse, 7> AddRecOps;
3468 for (int x = 0, xe = AddRec->getNumOperands() +
3469 OtherAddRec->getNumOperands() - 1; x != xe && !Overflow; ++x) {
3471 for (int y = x, ye = 2*x+1; y != ye && !Overflow; ++y) {
3472 uint64_t Coeff1 = Choose(x, 2*x - y, Overflow);
3473 for (int z = std::max(y-x, y-(int)AddRec->getNumOperands()+1),
3474 ze = std::min(x+1, (int)OtherAddRec->getNumOperands());
3475 z < ze && !Overflow; ++z) {
3476 uint64_t Coeff2 = Choose(2*x - y, x-z, Overflow);
3477 uint64_t Coeff;
3478 if (LargerThan64Bits)
3479 Coeff = umul_ov(Coeff1, Coeff2, Overflow);
3480 else
3481 Coeff = Coeff1*Coeff2;
3482 const SCEV *CoeffTerm = getConstant(Ty, Coeff);
3483 const SCEV *Term1 = AddRec->getOperand(y-z);
3484 const SCEV *Term2 = OtherAddRec->getOperand(z);
3485 SumOps.push_back(getMulExpr(CoeffTerm, Term1, Term2,
3486 SCEV::FlagAnyWrap, Depth + 1));
3487 }
3488 }
3489 if (SumOps.empty())
3490 SumOps.push_back(getZero(Ty));
3491 AddRecOps.push_back(getAddExpr(SumOps, SCEV::FlagAnyWrap, Depth + 1));
3492 }
3493 if (!Overflow) {
3494 const SCEV *NewAddRec = getAddRecExpr(AddRecOps, AddRec->getLoop(),
3496 if (Ops.size() == 2) return NewAddRec;
3497 Ops[Idx] = NewAddRec;
3498 Ops.erase(Ops.begin() + OtherIdx); --OtherIdx;
3499 OpsModified = true;
3500 AddRec = dyn_cast<SCEVAddRecExpr>(NewAddRec);
3501 if (!AddRec)
3502 break;
3503 }
3504 }
3505 if (OpsModified)
3506 return getMulExpr(Ops, SCEV::FlagAnyWrap, Depth + 1);
3507
3508 // Otherwise couldn't fold anything into this recurrence. Move onto the
3509 // next one.
3510 }
3511
3512 // Okay, it looks like we really DO need an mul expr. Check to see if we
3513 // already have one, otherwise create a new one.
3514 return getOrCreateMulExpr(Ops, ComputeFlags(Ops));
3515}
3516
3517/// Represents an unsigned remainder expression based on unsigned division.
3519 assert(getEffectiveSCEVType(LHS->getType()) ==
3520 getEffectiveSCEVType(RHS->getType()) &&
3521 "SCEVURemExpr operand types don't match!");
3522
3523 // Short-circuit easy cases
3524 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) {
3525 // If constant is one, the result is trivial
3526 if (RHSC->getValue()->isOne())
3527 return getZero(LHS->getType()); // X urem 1 --> 0
3528
3529 // If constant is a power of two, fold into a zext(trunc(LHS)).
3530 if (RHSC->getAPInt().isPowerOf2()) {
3531 Type *FullTy = LHS->getType();
3532 Type *TruncTy =
3533 IntegerType::get(getContext(), RHSC->getAPInt().logBase2());
3534 return getZeroExtendExpr(getTruncateExpr(LHS, TruncTy), FullTy);
3535 }
3536 }
3537
3538 // Fallback to %a == %x urem %y == %x -<nuw> ((%x udiv %y) *<nuw> %y)
3539 const SCEV *UDiv = getUDivExpr(LHS, RHS);
3540 const SCEV *Mult = getMulExpr(UDiv, RHS, SCEV::FlagNUW);
3541 return getMinusSCEV(LHS, Mult, SCEV::FlagNUW);
3542}
3543
3544/// Get a canonical unsigned division expression, or something simpler if
3545/// possible.
3547 assert(!LHS->getType()->isPointerTy() &&
3548 "SCEVUDivExpr operand can't be pointer!");
3549 assert(LHS->getType() == RHS->getType() &&
3550 "SCEVUDivExpr operand types don't match!");
3551
3553 ID.AddInteger(scUDivExpr);
3554 ID.AddPointer(LHS);
3555 ID.AddPointer(RHS);
3556 void *IP = nullptr;
3557 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP))
3558 return S;
3559
3560 // 0 udiv Y == 0
3561 if (match(LHS, m_scev_Zero()))
3562 return LHS;
3563
3564 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) {
3565 if (RHSC->getValue()->isOne())
3566 return LHS; // X udiv 1 --> x
3567 // If the denominator is zero, the result of the udiv is undefined. Don't
3568 // try to analyze it, because the resolution chosen here may differ from
3569 // the resolution chosen in other parts of the compiler.
3570 if (!RHSC->getValue()->isZero()) {
3571 // Determine if the division can be folded into the operands of
3572 // its operands.
3573 // TODO: Generalize this to non-constants by using known-bits information.
3574 Type *Ty = LHS->getType();
3575 unsigned LZ = RHSC->getAPInt().countl_zero();
3576 unsigned MaxShiftAmt = getTypeSizeInBits(Ty) - LZ - 1;
3577 // For non-power-of-two values, effectively round the value up to the
3578 // nearest power of two.
3579 if (!RHSC->getAPInt().isPowerOf2())
3580 ++MaxShiftAmt;
3581 IntegerType *ExtTy =
3582 IntegerType::get(getContext(), getTypeSizeInBits(Ty) + MaxShiftAmt);
3583 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(LHS))
3584 if (const SCEVConstant *Step =
3585 dyn_cast<SCEVConstant>(AR->getStepRecurrence(*this))) {
3586 // {X,+,N}/C --> {X/C,+,N/C} if safe and N/C can be folded.
3587 const APInt &StepInt = Step->getAPInt();
3588 const APInt &DivInt = RHSC->getAPInt();
3589 if (!StepInt.urem(DivInt) &&
3590 getZeroExtendExpr(AR, ExtTy) ==
3591 getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy),
3592 getZeroExtendExpr(Step, ExtTy),
3593 AR->getLoop(), SCEV::FlagAnyWrap)) {
3594 SmallVector<SCEVUse, 4> Operands;
3595 for (const SCEV *Op : AR->operands())
3596 Operands.push_back(getUDivExpr(Op, RHS));
3597 return getAddRecExpr(Operands, AR->getLoop(), SCEV::FlagNW);
3598 }
3599 /// Get a canonical UDivExpr for a recurrence.
3600 /// {X,+,N}/C => {Y,+,N}/C where Y=X-(X%N). Safe when C%N=0.
3601 const APInt *StartRem;
3602 if (!DivInt.urem(StepInt) && match(getURemExpr(AR->getStart(), Step),
3603 m_scev_APInt(StartRem))) {
3604 bool NoWrap =
3605 getZeroExtendExpr(AR, ExtTy) ==
3606 getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy),
3607 getZeroExtendExpr(Step, ExtTy), AR->getLoop(),
3609
3610 // With N <= C and both N, C as powers-of-2, the transformation
3611 // {X,+,N}/C => {(X - X%N),+,N}/C preserves division results even
3612 // if wrapping occurs, as the division results remain equivalent for
3613 // all offsets in [[(X - X%N), X).
3614 bool CanFoldWithWrap = StepInt.ule(DivInt) && // N <= C
3615 StepInt.isPowerOf2() && DivInt.isPowerOf2();
3616 // Only fold if the subtraction can be folded in the start
3617 // expression.
3618 const SCEV *NewStart =
3619 getMinusSCEV(AR->getStart(), getConstant(*StartRem));
3620 if (*StartRem != 0 && (NoWrap || CanFoldWithWrap) &&
3621 !isa<SCEVAddExpr>(NewStart)) {
3622 const SCEV *NewLHS =
3623 getAddRecExpr(NewStart, Step, AR->getLoop(),
3624 NoWrap ? SCEV::FlagNW : SCEV::FlagAnyWrap);
3625 if (LHS != NewLHS) {
3626 LHS = NewLHS;
3627
3628 // Reset the ID to include the new LHS, and check if it is
3629 // already cached.
3630 ID.clear();
3631 ID.AddInteger(scUDivExpr);
3632 ID.AddPointer(LHS);
3633 ID.AddPointer(RHS);
3634 IP = nullptr;
3635 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP))
3636 return S;
3637 }
3638 }
3639 }
3640 }
3641 // (A*B)/C --> A*(B/C) if safe and B/C can be folded.
3642 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(LHS)) {
3643 SmallVector<SCEVUse, 4> Operands;
3644 for (const SCEV *Op : M->operands())
3645 Operands.push_back(getZeroExtendExpr(Op, ExtTy));
3646 if (getZeroExtendExpr(M, ExtTy) == getMulExpr(Operands)) {
3647 // Find an operand that's safely divisible.
3648 for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) {
3649 const SCEV *Op = M->getOperand(i);
3650 const SCEV *Div = getUDivExpr(Op, RHSC);
3651 if (!isa<SCEVUDivExpr>(Div) && getMulExpr(Div, RHSC) == Op) {
3652 Operands = SmallVector<SCEVUse, 4>(M->operands());
3653 Operands[i] = Div;
3654 return getMulExpr(Operands);
3655 }
3656 }
3657
3658 // Even if it's not divisible, try to remove a common factor.
3659 if (const auto *LHSC = dyn_cast<SCEVConstant>(M->getOperand(0))) {
3660 APInt Factor = APIntOps::GreatestCommonDivisor(LHSC->getAPInt(),
3661 RHSC->getAPInt());
3662 if (!Factor.isIntN(1)) {
3663 SmallVector<SCEVUse, 2> NewOperands;
3664 NewOperands.push_back(getConstant(LHSC->getAPInt().udiv(Factor)));
3665 append_range(NewOperands, M->operands().drop_front());
3666 const SCEV *NewMul = getMulExpr(NewOperands);
3667 return getUDivExpr(NewMul,
3668 getConstant(RHSC->getAPInt().udiv(Factor)));
3669 }
3670 }
3671 }
3672 }
3673
3674 // (A/B)/C --> A/(B*C) if safe and B*C can be folded.
3675 if (const SCEVUDivExpr *OtherDiv = dyn_cast<SCEVUDivExpr>(LHS)) {
3676 if (auto *DivisorConstant =
3677 dyn_cast<SCEVConstant>(OtherDiv->getRHS())) {
3678 bool Overflow = false;
3679 APInt NewRHS =
3680 DivisorConstant->getAPInt().umul_ov(RHSC->getAPInt(), Overflow);
3681 if (Overflow) {
3682 return getConstant(RHSC->getType(), 0, false);
3683 }
3684 return getUDivExpr(OtherDiv->getLHS(), getConstant(NewRHS));
3685 }
3686 }
3687
3688 // (A+B)/C --> (A/C + B/C) if safe and A/C and B/C can be folded.
3689 if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(LHS)) {
3690 SmallVector<SCEVUse, 4> Operands;
3691 for (const SCEV *Op : A->operands())
3692 Operands.push_back(getZeroExtendExpr(Op, ExtTy));
3693 if (getZeroExtendExpr(A, ExtTy) == getAddExpr(Operands)) {
3694 Operands.clear();
3695 for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i) {
3696 const SCEV *Op = getUDivExpr(A->getOperand(i), RHS);
3697 if (isa<SCEVUDivExpr>(Op) ||
3698 getMulExpr(Op, RHS) != A->getOperand(i))
3699 break;
3700 Operands.push_back(Op);
3701 }
3702 if (Operands.size() == A->getNumOperands())
3703 return getAddExpr(Operands);
3704 }
3705 }
3706
3707 // Fold if both operands are constant.
3708 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS))
3709 return getConstant(LHSC->getAPInt().udiv(RHSC->getAPInt()));
3710 }
3711 }
3712
3713 // ((-C + (C smax %x)) /u %x) evaluates to zero, for any positive constant C.
3714 const APInt *NegC, *C;
3715 if (match(LHS,
3718 NegC->isNegative() && !NegC->isMinSignedValue() && *C == -*NegC)
3719 return getZero(LHS->getType());
3720
3721 // (%a * %b)<nuw> / %b -> %a
3722 const auto *Mul = dyn_cast<SCEVMulExpr>(LHS);
3723 if (Mul && Mul->hasNoUnsignedWrap()) {
3724 for (int i = 0, e = Mul->getNumOperands(); i != e; ++i) {
3725 if (Mul->getOperand(i) == RHS) {
3726 SmallVector<SCEVUse, 2> Operands;
3727 append_range(Operands, Mul->operands().take_front(i));
3728 append_range(Operands, Mul->operands().drop_front(i + 1));
3729 return getMulExpr(Operands);
3730 }
3731 }
3732 }
3733
3734 // TODO: Generalize to handle any common factors.
3735 // udiv (mul nuw a, vscale), (mul nuw b, vscale) --> udiv a, b
3736 const SCEV *NewLHS, *NewRHS;
3737 if (match(LHS, m_scev_c_NUWMul(m_SCEV(NewLHS), m_SCEVVScale())) &&
3738 match(RHS, m_scev_c_NUWMul(m_SCEV(NewRHS), m_SCEVVScale())))
3739 return getUDivExpr(NewLHS, NewRHS);
3740
3741 // The Insertion Point (IP) might be invalid by now (due to UniqueSCEVs
3742 // changes). Make sure we get a new one.
3743 IP = nullptr;
3744 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
3745 SCEV *S = new (SCEVAllocator) SCEVUDivExpr(ID.Intern(SCEVAllocator),
3746 LHS, RHS);
3747 UniqueSCEVs.InsertNode(S, IP);
3748 S->computeAndSetCanonical(*this);
3749 registerUser(S, ArrayRef<SCEVUse>({LHS, RHS}));
3750 return S;
3751}
3752
3753APInt gcd(const SCEVConstant *C1, const SCEVConstant *C2) {
3754 APInt A = C1->getAPInt().abs();
3755 APInt B = C2->getAPInt().abs();
3756 uint32_t ABW = A.getBitWidth();
3757 uint32_t BBW = B.getBitWidth();
3758
3759 if (ABW > BBW)
3760 B = B.zext(ABW);
3761 else if (ABW < BBW)
3762 A = A.zext(BBW);
3763
3764 return APIntOps::GreatestCommonDivisor(std::move(A), std::move(B));
3765}
3766
3767/// Get a canonical unsigned division expression, or something simpler if
3768/// possible. There is no representation for an exact udiv in SCEV IR, but we
3769/// can attempt to optimize it prior to construction.
3771 // Currently there is no exact specific logic.
3772
3773 return getUDivExpr(LHS, RHS);
3774}
3775
3776/// Get an add recurrence expression for the specified loop. Simplify the
3777/// expression as much as possible.
3779 const Loop *L,
3780 SCEV::NoWrapFlags Flags) {
3781 SmallVector<SCEVUse, 4> Operands;
3782 Operands.push_back(Start);
3783 if (const SCEVAddRecExpr *StepChrec = dyn_cast<SCEVAddRecExpr>(Step))
3784 if (StepChrec->getLoop() == L) {
3785 append_range(Operands, StepChrec->operands());
3786 return getAddRecExpr(Operands, L, maskFlags(Flags, SCEV::FlagNW));
3787 }
3788
3789 Operands.push_back(Step);
3790 return getAddRecExpr(Operands, L, Flags);
3791}
3792
3793/// Get an add recurrence expression for the specified loop. Simplify the
3794/// expression as much as possible.
3796 const Loop *L,
3797 SCEV::NoWrapFlags Flags) {
3798 if (Operands.size() == 1) return Operands[0];
3799#ifndef NDEBUG
3800 Type *ETy = getEffectiveSCEVType(Operands[0]->getType());
3801 for (const SCEV *Op : llvm::drop_begin(Operands)) {
3802 assert(getEffectiveSCEVType(Op->getType()) == ETy &&
3803 "SCEVAddRecExpr operand types don't match!");
3804 assert(!Op->getType()->isPointerTy() && "Step must be integer");
3805 }
3806 for (const SCEV *Op : Operands)
3808 "SCEVAddRecExpr operand is not available at loop entry!");
3809#endif
3810
3811 if (Operands.back()->isZero()) {
3812 Operands.pop_back();
3813 return getAddRecExpr(Operands, L, SCEV::FlagAnyWrap); // {X,+,0} --> X
3814 }
3815
3816 // It's tempting to want to call getConstantMaxBackedgeTakenCount count here and
3817 // use that information to infer NUW and NSW flags. However, computing a
3818 // BE count requires calling getAddRecExpr, so we may not yet have a
3819 // meaningful BE count at this point (and if we don't, we'd be stuck
3820 // with a SCEVCouldNotCompute as the cached BE count).
3821
3822 Flags = StrengthenNoWrapFlags(this, scAddRecExpr, Operands, Flags);
3823
3824 // Canonicalize nested AddRecs in by nesting them in order of loop depth.
3825 if (const SCEVAddRecExpr *NestedAR = dyn_cast<SCEVAddRecExpr>(Operands[0])) {
3826 const Loop *NestedLoop = NestedAR->getLoop();
3827 if (L->contains(NestedLoop)
3828 ? (L->getLoopDepth() < NestedLoop->getLoopDepth())
3829 : (!NestedLoop->contains(L) &&
3830 DT.dominates(L->getHeader(), NestedLoop->getHeader()))) {
3831 SmallVector<SCEVUse, 4> NestedOperands(NestedAR->operands());
3832 Operands[0] = NestedAR->getStart();
3833 // AddRecs require their operands be loop-invariant with respect to their
3834 // loops. Don't perform this transformation if it would break this
3835 // requirement.
3836 bool AllInvariant = all_of(
3837 Operands, [&](const SCEV *Op) { return isLoopInvariant(Op, L); });
3838
3839 if (AllInvariant) {
3840 // Create a recurrence for the outer loop with the same step size.
3841 //
3842 // The outer recurrence keeps its NW flag but only keeps NUW/NSW if the
3843 // inner recurrence has the same property.
3844 SCEV::NoWrapFlags OuterFlags =
3845 maskFlags(Flags, SCEV::FlagNW | NestedAR->getNoWrapFlags());
3846
3847 NestedOperands[0] = getAddRecExpr(Operands, L, OuterFlags);
3848 AllInvariant = all_of(NestedOperands, [&](const SCEV *Op) {
3849 return isLoopInvariant(Op, NestedLoop);
3850 });
3851
3852 if (AllInvariant) {
3853 // Ok, both add recurrences are valid after the transformation.
3854 //
3855 // The inner recurrence keeps its NW flag but only keeps NUW/NSW if
3856 // the outer recurrence has the same property.
3857 SCEV::NoWrapFlags InnerFlags =
3858 maskFlags(NestedAR->getNoWrapFlags(), SCEV::FlagNW | Flags);
3859 return getAddRecExpr(NestedOperands, NestedLoop, InnerFlags);
3860 }
3861 }
3862 // Reset Operands to its original state.
3863 Operands[0] = NestedAR;
3864 }
3865 }
3866
3867 // Okay, it looks like we really DO need an addrec expr. Check to see if we
3868 // already have one, otherwise create a new one.
3869 return getOrCreateAddRecExpr(Operands, L, Flags);
3870}
3871
3873 ArrayRef<SCEVUse> IndexExprs) {
3874 const SCEV *BaseExpr = getSCEV(GEP->getPointerOperand());
3875 // getSCEV(Base)->getType() has the same address space as Base->getType()
3876 // because SCEV::getType() preserves the address space.
3877 GEPNoWrapFlags NW = GEP->getNoWrapFlags();
3878 if (NW != GEPNoWrapFlags::none()) {
3879 // We'd like to propagate flags from the IR to the corresponding SCEV nodes,
3880 // but to do that, we have to ensure that said flag is valid in the entire
3881 // defined scope of the SCEV.
3882 // TODO: non-instructions have global scope. We might be able to prove
3883 // some global scope cases
3884 auto *GEPI = dyn_cast<Instruction>(GEP);
3885 if (!GEPI || !isSCEVExprNeverPoison(GEPI))
3886 NW = GEPNoWrapFlags::none();
3887 }
3888
3889 return getGEPExpr(BaseExpr, IndexExprs, GEP->getSourceElementType(), NW);
3890}
3891
3893 ArrayRef<SCEVUse> IndexExprs,
3894 Type *SrcElementTy, GEPNoWrapFlags NW) {
3896 if (NW.hasNoUnsignedSignedWrap())
3897 OffsetWrap = setFlags(OffsetWrap, SCEV::FlagNSW);
3898 if (NW.hasNoUnsignedWrap())
3899 OffsetWrap = setFlags(OffsetWrap, SCEV::FlagNUW);
3900
3901 Type *CurTy = BaseExpr->getType();
3902 Type *IntIdxTy = getEffectiveSCEVType(BaseExpr->getType());
3903 bool FirstIter = true;
3905 for (SCEVUse IndexExpr : IndexExprs) {
3906 // Compute the (potentially symbolic) offset in bytes for this index.
3907 if (StructType *STy = dyn_cast<StructType>(CurTy)) {
3908 // For a struct, add the member offset.
3909 ConstantInt *Index = cast<SCEVConstant>(IndexExpr)->getValue();
3910 unsigned FieldNo = Index->getZExtValue();
3911 const SCEV *FieldOffset = getOffsetOfExpr(IntIdxTy, STy, FieldNo);
3912 Offsets.push_back(FieldOffset);
3913
3914 // Update CurTy to the type of the field at Index.
3915 CurTy = STy->getTypeAtIndex(Index);
3916 } else {
3917 // Update CurTy to its element type.
3918 if (FirstIter) {
3919 assert(isa<PointerType>(CurTy) &&
3920 "The first index of a GEP indexes a pointer");
3921 CurTy = SrcElementTy;
3922 FirstIter = false;
3923 } else {
3925 }
3926 // For an array, add the element offset, explicitly scaled.
3927 const SCEV *ElementSize = getSizeOfExpr(IntIdxTy, CurTy);
3928 // Getelementptr indices are signed.
3929 IndexExpr = getTruncateOrSignExtend(IndexExpr, IntIdxTy);
3930
3931 // Multiply the index by the element size to compute the element offset.
3932 const SCEV *LocalOffset = getMulExpr(IndexExpr, ElementSize, OffsetWrap);
3933 Offsets.push_back(LocalOffset);
3934 }
3935 }
3936
3937 // Handle degenerate case of GEP without offsets.
3938 if (Offsets.empty())
3939 return BaseExpr;
3940
3941 // Add the offsets together, assuming nsw if inbounds.
3942 const SCEV *Offset = getAddExpr(Offsets, OffsetWrap);
3943 // Add the base address and the offset. We cannot use the nsw flag, as the
3944 // base address is unsigned. However, if we know that the offset is
3945 // non-negative, we can use nuw.
3946 bool NUW = NW.hasNoUnsignedWrap() ||
3949 auto *GEPExpr = getAddExpr(BaseExpr, Offset, BaseWrap);
3950 assert(BaseExpr->getType() == GEPExpr->getType() &&
3951 "GEP should not change type mid-flight.");
3952 return GEPExpr;
3953}
3954
3955SCEV *ScalarEvolution::findExistingSCEVInCache(SCEVTypes SCEVType,
3958 ID.AddInteger(SCEVType);
3959 for (const SCEV *Op : Ops)
3960 ID.AddPointer(Op);
3961 void *IP = nullptr;
3962 return UniqueSCEVs.FindNodeOrInsertPos(ID, IP);
3963}
3964
3965SCEV *ScalarEvolution::findExistingSCEVInCache(SCEVTypes SCEVType,
3968 ID.AddInteger(SCEVType);
3969 for (const SCEV *Op : Ops)
3970 ID.AddPointer(Op);
3971 void *IP = nullptr;
3972 return UniqueSCEVs.FindNodeOrInsertPos(ID, IP);
3973}
3974
3975const SCEV *ScalarEvolution::getAbsExpr(const SCEV *Op, bool IsNSW) {
3977 return getSMaxExpr(Op, getNegativeSCEV(Op, Flags));
3978}
3979
3982 assert(SCEVMinMaxExpr::isMinMaxType(Kind) && "Not a SCEVMinMaxExpr!");
3983 assert(!Ops.empty() && "Cannot get empty (u|s)(min|max)!");
3984 if (Ops.size() == 1) return Ops[0];
3985#ifndef NDEBUG
3986 Type *ETy = getEffectiveSCEVType(Ops[0]->getType());
3987 for (unsigned i = 1, e = Ops.size(); i != e; ++i) {
3988 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy &&
3989 "Operand types don't match!");
3990 assert(Ops[0]->getType()->isPointerTy() ==
3991 Ops[i]->getType()->isPointerTy() &&
3992 "min/max should be consistently pointerish");
3993 }
3994#endif
3995
3996 bool IsSigned = Kind == scSMaxExpr || Kind == scSMinExpr;
3997 bool IsMax = Kind == scSMaxExpr || Kind == scUMaxExpr;
3998
3999 const SCEV *Folded = constantFoldAndGroupOps(
4000 *this, LI, DT, Ops,
4001 [&](const APInt &C1, const APInt &C2) {
4002 switch (Kind) {
4003 case scSMaxExpr:
4004 return APIntOps::smax(C1, C2);
4005 case scSMinExpr:
4006 return APIntOps::smin(C1, C2);
4007 case scUMaxExpr:
4008 return APIntOps::umax(C1, C2);
4009 case scUMinExpr:
4010 return APIntOps::umin(C1, C2);
4011 default:
4012 llvm_unreachable("Unknown SCEV min/max opcode");
4013 }
4014 },
4015 [&](const APInt &C) {
4016 // identity
4017 if (IsMax)
4018 return IsSigned ? C.isMinSignedValue() : C.isMinValue();
4019 else
4020 return IsSigned ? C.isMaxSignedValue() : C.isMaxValue();
4021 },
4022 [&](const APInt &C) {
4023 // absorber
4024 if (IsMax)
4025 return IsSigned ? C.isMaxSignedValue() : C.isMaxValue();
4026 else
4027 return IsSigned ? C.isMinSignedValue() : C.isMinValue();
4028 });
4029 if (Folded)
4030 return Folded;
4031
4032 // Check if we have created the same expression before.
4033 if (const SCEV *S = findExistingSCEVInCache(Kind, Ops)) {
4034 return S;
4035 }
4036
4037 // Find the first operation of the same kind
4038 unsigned Idx = 0;
4039 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < Kind)
4040 ++Idx;
4041
4042 // Check to see if one of the operands is of the same kind. If so, expand its
4043 // operands onto our operand list, and recurse to simplify.
4044 if (Idx < Ops.size()) {
4045 bool DeletedAny = false;
4046 while (Ops[Idx]->getSCEVType() == Kind) {
4047 const SCEVMinMaxExpr *SMME = cast<SCEVMinMaxExpr>(Ops[Idx]);
4048 Ops.erase(Ops.begin()+Idx);
4049 append_range(Ops, SMME->operands());
4050 DeletedAny = true;
4051 }
4052
4053 if (DeletedAny)
4054 return getMinMaxExpr(Kind, Ops);
4055 }
4056
4057 // Okay, check to see if the same value occurs in the operand list twice. If
4058 // so, delete one. Since we sorted the list, these values are required to
4059 // be adjacent.
4064 llvm::CmpInst::Predicate FirstPred = IsMax ? GEPred : LEPred;
4065 llvm::CmpInst::Predicate SecondPred = IsMax ? LEPred : GEPred;
4066 for (unsigned i = 0, e = Ops.size() - 1; i != e; ++i) {
4067 if (Ops[i] == Ops[i + 1] ||
4068 isKnownViaNonRecursiveReasoning(FirstPred, Ops[i], Ops[i + 1])) {
4069 // X op Y op Y --> X op Y
4070 // X op Y --> X, if we know X, Y are ordered appropriately
4071 Ops.erase(Ops.begin() + i + 1, Ops.begin() + i + 2);
4072 --i;
4073 --e;
4074 } else if (isKnownViaNonRecursiveReasoning(SecondPred, Ops[i],
4075 Ops[i + 1])) {
4076 // X op Y --> Y, if we know X, Y are ordered appropriately
4077 Ops.erase(Ops.begin() + i, Ops.begin() + i + 1);
4078 --i;
4079 --e;
4080 }
4081 }
4082
4083 if (Ops.size() == 1) return Ops[0];
4084
4085 assert(!Ops.empty() && "Reduced smax down to nothing!");
4086
4087 // Okay, it looks like we really DO need an expr. Check to see if we
4088 // already have one, otherwise create a new one.
4090 ID.AddInteger(Kind);
4091 for (const SCEV *Op : Ops)
4092 ID.AddPointer(Op);
4093 void *IP = nullptr;
4094 const SCEV *ExistingSCEV = UniqueSCEVs.FindNodeOrInsertPos(ID, IP);
4095 if (ExistingSCEV)
4096 return ExistingSCEV;
4097 SCEVUse *O = SCEVAllocator.Allocate<SCEVUse>(Ops.size());
4099 SCEV *S = new (SCEVAllocator)
4100 SCEVMinMaxExpr(ID.Intern(SCEVAllocator), Kind, O, Ops.size());
4101
4102 UniqueSCEVs.InsertNode(S, IP);
4103 S->computeAndSetCanonical(*this);
4104 registerUser(S, Ops);
4105 return S;
4106}
4107
4108namespace {
4109
4110class SCEVSequentialMinMaxDeduplicatingVisitor final
4111 : public SCEVVisitor<SCEVSequentialMinMaxDeduplicatingVisitor,
4112 std::optional<const SCEV *>> {
4113 using RetVal = std::optional<const SCEV *>;
4115
4116 ScalarEvolution &SE;
4117 const SCEVTypes RootKind; // Must be a sequential min/max expression.
4118 const SCEVTypes NonSequentialRootKind; // Non-sequential variant of RootKind.
4120
4121 bool canRecurseInto(SCEVTypes Kind) const {
4122 // We can only recurse into the SCEV expression of the same effective type
4123 // as the type of our root SCEV expression.
4124 return RootKind == Kind || NonSequentialRootKind == Kind;
4125 };
4126
4127 RetVal visitAnyMinMaxExpr(const SCEV *S) {
4129 "Only for min/max expressions.");
4130 SCEVTypes Kind = S->getSCEVType();
4131
4132 if (!canRecurseInto(Kind))
4133 return S;
4134
4135 auto *NAry = cast<SCEVNAryExpr>(S);
4136 SmallVector<SCEVUse> NewOps;
4137 bool Changed = visit(Kind, NAry->operands(), NewOps);
4138
4139 if (!Changed)
4140 return S;
4141 if (NewOps.empty())
4142 return std::nullopt;
4143
4145 ? SE.getSequentialMinMaxExpr(Kind, NewOps)
4146 : SE.getMinMaxExpr(Kind, NewOps);
4147 }
4148
4149 RetVal visit(const SCEV *S) {
4150 // Has the whole operand been seen already?
4151 if (!SeenOps.insert(S).second)
4152 return std::nullopt;
4153 return Base::visit(S);
4154 }
4155
4156public:
4157 SCEVSequentialMinMaxDeduplicatingVisitor(ScalarEvolution &SE,
4158 SCEVTypes RootKind)
4159 : SE(SE), RootKind(RootKind),
4160 NonSequentialRootKind(
4161 SCEVSequentialMinMaxExpr::getEquivalentNonSequentialSCEVType(
4162 RootKind)) {}
4163
4164 bool /*Changed*/ visit(SCEVTypes Kind, ArrayRef<SCEVUse> OrigOps,
4165 SmallVectorImpl<SCEVUse> &NewOps) {
4166 bool Changed = false;
4168 Ops.reserve(OrigOps.size());
4169
4170 for (const SCEV *Op : OrigOps) {
4171 RetVal NewOp = visit(Op);
4172 if (NewOp != Op)
4173 Changed = true;
4174 if (NewOp)
4175 Ops.emplace_back(*NewOp);
4176 }
4177
4178 if (Changed)
4179 NewOps = std::move(Ops);
4180 return Changed;
4181 }
4182
4183 RetVal visitConstant(const SCEVConstant *Constant) { return Constant; }
4184
4185 RetVal visitVScale(const SCEVVScale *VScale) { return VScale; }
4186
4187 RetVal visitPtrToAddrExpr(const SCEVPtrToAddrExpr *Expr) { return Expr; }
4188
4189 RetVal visitPtrToIntExpr(const SCEVPtrToIntExpr *Expr) { return Expr; }
4190
4191 RetVal visitTruncateExpr(const SCEVTruncateExpr *Expr) { return Expr; }
4192
4193 RetVal visitZeroExtendExpr(const SCEVZeroExtendExpr *Expr) { return Expr; }
4194
4195 RetVal visitSignExtendExpr(const SCEVSignExtendExpr *Expr) { return Expr; }
4196
4197 RetVal visitAddExpr(const SCEVAddExpr *Expr) { return Expr; }
4198
4199 RetVal visitMulExpr(const SCEVMulExpr *Expr) { return Expr; }
4200
4201 RetVal visitUDivExpr(const SCEVUDivExpr *Expr) { return Expr; }
4202
4203 RetVal visitAddRecExpr(const SCEVAddRecExpr *Expr) { return Expr; }
4204
4205 RetVal visitSMaxExpr(const SCEVSMaxExpr *Expr) {
4206 return visitAnyMinMaxExpr(Expr);
4207 }
4208
4209 RetVal visitUMaxExpr(const SCEVUMaxExpr *Expr) {
4210 return visitAnyMinMaxExpr(Expr);
4211 }
4212
4213 RetVal visitSMinExpr(const SCEVSMinExpr *Expr) {
4214 return visitAnyMinMaxExpr(Expr);
4215 }
4216
4217 RetVal visitUMinExpr(const SCEVUMinExpr *Expr) {
4218 return visitAnyMinMaxExpr(Expr);
4219 }
4220
4221 RetVal visitSequentialUMinExpr(const SCEVSequentialUMinExpr *Expr) {
4222 return visitAnyMinMaxExpr(Expr);
4223 }
4224
4225 RetVal visitUnknown(const SCEVUnknown *Expr) { return Expr; }
4226
4227 RetVal visitCouldNotCompute(const SCEVCouldNotCompute *Expr) { return Expr; }
4228};
4229
4230} // namespace
4231
4233 switch (Kind) {
4234 case scConstant:
4235 case scVScale:
4236 case scTruncate:
4237 case scZeroExtend:
4238 case scSignExtend:
4239 case scPtrToAddr:
4240 case scPtrToInt:
4241 case scAddExpr:
4242 case scMulExpr:
4243 case scUDivExpr:
4244 case scAddRecExpr:
4245 case scUMaxExpr:
4246 case scSMaxExpr:
4247 case scUMinExpr:
4248 case scSMinExpr:
4249 case scUnknown:
4250 // If any operand is poison, the whole expression is poison.
4251 return true;
4253 // FIXME: if the *first* operand is poison, the whole expression is poison.
4254 return false; // Pessimistically, say that it does not propagate poison.
4255 case scCouldNotCompute:
4256 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
4257 }
4258 llvm_unreachable("Unknown SCEV kind!");
4259}
4260
4261namespace {
4262// The only way poison may be introduced in a SCEV expression is from a
4263// poison SCEVUnknown (ConstantExprs are also represented as SCEVUnknown,
4264// not SCEVConstant). Notably, nowrap flags in SCEV nodes can *not*
4265// introduce poison -- they encode guaranteed, non-speculated knowledge.
4266//
4267// Additionally, all SCEV nodes propagate poison from inputs to outputs,
4268// with the notable exception of umin_seq, where only poison from the first
4269// operand is (unconditionally) propagated.
4270struct SCEVPoisonCollector {
4271 bool LookThroughMaybePoisonBlocking;
4272 SmallPtrSet<const SCEVUnknown *, 4> MaybePoison;
4273 SCEVPoisonCollector(bool LookThroughMaybePoisonBlocking)
4274 : LookThroughMaybePoisonBlocking(LookThroughMaybePoisonBlocking) {}
4275
4276 bool follow(const SCEV *S) {
4277 if (!LookThroughMaybePoisonBlocking &&
4279 return false;
4280
4281 if (auto *SU = dyn_cast<SCEVUnknown>(S)) {
4282 if (!isGuaranteedNotToBePoison(SU->getValue()))
4283 MaybePoison.insert(SU);
4284 }
4285 return true;
4286 }
4287 bool isDone() const { return false; }
4288};
4289} // namespace
4290
4291/// Return true if V is poison given that AssumedPoison is already poison.
4292static bool impliesPoison(const SCEV *AssumedPoison, const SCEV *S) {
4293 // First collect all SCEVs that might result in AssumedPoison to be poison.
4294 // We need to look through potentially poison-blocking operations here,
4295 // because we want to find all SCEVs that *might* result in poison, not only
4296 // those that are *required* to.
4297 SCEVPoisonCollector PC1(/* LookThroughMaybePoisonBlocking */ true);
4298 visitAll(AssumedPoison, PC1);
4299
4300 // AssumedPoison is never poison. As the assumption is false, the implication
4301 // is true. Don't bother walking the other SCEV in this case.
4302 if (PC1.MaybePoison.empty())
4303 return true;
4304
4305 // Collect all SCEVs in S that, if poison, *will* result in S being poison
4306 // as well. We cannot look through potentially poison-blocking operations
4307 // here, as their arguments only *may* make the result poison.
4308 SCEVPoisonCollector PC2(/* LookThroughMaybePoisonBlocking */ false);
4309 visitAll(S, PC2);
4310
4311 // Make sure that no matter which SCEV in PC1.MaybePoison is actually poison,
4312 // it will also make S poison by being part of PC2.MaybePoison.
4313 return llvm::set_is_subset(PC1.MaybePoison, PC2.MaybePoison);
4314}
4315
4317 SmallPtrSetImpl<const Value *> &Result, const SCEV *S) {
4318 SCEVPoisonCollector PC(/* LookThroughMaybePoisonBlocking */ false);
4319 visitAll(S, PC);
4320 for (const SCEVUnknown *SU : PC.MaybePoison)
4321 Result.insert(SU->getValue());
4322}
4323
4325 const SCEV *S, Instruction *I,
4326 SmallVectorImpl<Instruction *> &DropPoisonGeneratingInsts) {
4327 // If the instruction cannot be poison, it's always safe to reuse.
4329 return true;
4330
4331 // Otherwise, it is possible that I is more poisonous that S. Collect the
4332 // poison-contributors of S, and then check whether I has any additional
4333 // poison-contributors. Poison that is contributed through poison-generating
4334 // flags is handled by dropping those flags instead.
4336 getPoisonGeneratingValues(PoisonVals, S);
4337
4338 SmallVector<Value *> Worklist;
4340 Worklist.push_back(I);
4341 while (!Worklist.empty()) {
4342 Value *V = Worklist.pop_back_val();
4343 if (!Visited.insert(V).second)
4344 continue;
4345
4346 // Avoid walking large instruction graphs.
4347 if (Visited.size() > 16)
4348 return false;
4349
4350 // Either the value can't be poison, or the S would also be poison if it
4351 // is.
4352 if (PoisonVals.contains(V) || ::isGuaranteedNotToBePoison(V))
4353 continue;
4354
4355 auto *I = dyn_cast<Instruction>(V);
4356 if (!I)
4357 return false;
4358
4359 // Disjoint or instructions are interpreted as adds by SCEV. However, we
4360 // can't replace an arbitrary add with disjoint or, even if we drop the
4361 // flag. We would need to convert the or into an add.
4362 if (auto *PDI = dyn_cast<PossiblyDisjointInst>(I))
4363 if (PDI->isDisjoint())
4364 return false;
4365
4366 // FIXME: Ignore vscale, even though it technically could be poison. Do this
4367 // because SCEV currently assumes it can't be poison. Remove this special
4368 // case once we proper model when vscale can be poison.
4369 if (auto *II = dyn_cast<IntrinsicInst>(I);
4370 II && II->getIntrinsicID() == Intrinsic::vscale)
4371 continue;
4372
4373 if (canCreatePoison(cast<Operator>(I), /*ConsiderFlagsAndMetadata*/ false))
4374 return false;
4375
4376 // If the instruction can't create poison, we can recurse to its operands.
4377 if (I->hasPoisonGeneratingAnnotations())
4378 DropPoisonGeneratingInsts.push_back(I);
4379
4380 llvm::append_range(Worklist, I->operands());
4381 }
4382 return true;
4383}
4384
4385const SCEV *
4388 assert(SCEVSequentialMinMaxExpr::isSequentialMinMaxType(Kind) &&
4389 "Not a SCEVSequentialMinMaxExpr!");
4390 assert(!Ops.empty() && "Cannot get empty (u|s)(min|max)!");
4391 if (Ops.size() == 1)
4392 return Ops[0];
4393#ifndef NDEBUG
4394 Type *ETy = getEffectiveSCEVType(Ops[0]->getType());
4395 for (unsigned i = 1, e = Ops.size(); i != e; ++i) {
4396 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy &&
4397 "Operand types don't match!");
4398 assert(Ops[0]->getType()->isPointerTy() ==
4399 Ops[i]->getType()->isPointerTy() &&
4400 "min/max should be consistently pointerish");
4401 }
4402#endif
4403
4404 // Note that SCEVSequentialMinMaxExpr is *NOT* commutative,
4405 // so we can *NOT* do any kind of sorting of the expressions!
4406
4407 // Check if we have created the same expression before.
4408 if (const SCEV *S = findExistingSCEVInCache(Kind, Ops))
4409 return S;
4410
4411 // FIXME: there are *some* simplifications that we can do here.
4412
4413 // Keep only the first instance of an operand.
4414 {
4415 SCEVSequentialMinMaxDeduplicatingVisitor Deduplicator(*this, Kind);
4416 bool Changed = Deduplicator.visit(Kind, Ops, Ops);
4417 if (Changed)
4418 return getSequentialMinMaxExpr(Kind, Ops);
4419 }
4420
4421 // Check to see if one of the operands is of the same kind. If so, expand its
4422 // operands onto our operand list, and recurse to simplify.
4423 {
4424 unsigned Idx = 0;
4425 bool DeletedAny = false;
4426 while (Idx < Ops.size()) {
4427 if (Ops[Idx]->getSCEVType() != Kind) {
4428 ++Idx;
4429 continue;
4430 }
4431 const auto *SMME = cast<SCEVSequentialMinMaxExpr>(Ops[Idx]);
4432 Ops.erase(Ops.begin() + Idx);
4433 Ops.insert(Ops.begin() + Idx, SMME->operands().begin(),
4434 SMME->operands().end());
4435 DeletedAny = true;
4436 }
4437
4438 if (DeletedAny)
4439 return getSequentialMinMaxExpr(Kind, Ops);
4440 }
4441
4442 const SCEV *SaturationPoint;
4444 switch (Kind) {
4446 SaturationPoint = getZero(Ops[0]->getType());
4447 Pred = ICmpInst::ICMP_ULE;
4448 break;
4449 default:
4450 llvm_unreachable("Not a sequential min/max type.");
4451 }
4452
4453 for (unsigned i = 1, e = Ops.size(); i != e; ++i) {
4454 if (!isGuaranteedNotToCauseUB(Ops[i]))
4455 continue;
4456 // We can replace %x umin_seq %y with %x umin %y if either:
4457 // * %y being poison implies %x is also poison.
4458 // * %x cannot be the saturating value (e.g. zero for umin).
4459 if (::impliesPoison(Ops[i], Ops[i - 1]) ||
4460 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_NE, Ops[i - 1],
4461 SaturationPoint)) {
4462 SmallVector<SCEVUse, 2> SeqOps = {Ops[i - 1], Ops[i]};
4463 Ops[i - 1] = getMinMaxExpr(
4465 SeqOps);
4466 Ops.erase(Ops.begin() + i);
4467 return getSequentialMinMaxExpr(Kind, Ops);
4468 }
4469 // Fold %x umin_seq %y to %x if %x ule %y.
4470 // TODO: We might be able to prove the predicate for a later operand.
4471 if (isKnownViaNonRecursiveReasoning(Pred, Ops[i - 1], Ops[i])) {
4472 Ops.erase(Ops.begin() + i);
4473 return getSequentialMinMaxExpr(Kind, Ops);
4474 }
4475 }
4476
4477 // Okay, it looks like we really DO need an expr. Check to see if we
4478 // already have one, otherwise create a new one.
4480 ID.AddInteger(Kind);
4481 for (const SCEV *Op : Ops)
4482 ID.AddPointer(Op);
4483 void *IP = nullptr;
4484 const SCEV *ExistingSCEV = UniqueSCEVs.FindNodeOrInsertPos(ID, IP);
4485 if (ExistingSCEV)
4486 return ExistingSCEV;
4487
4488 SCEVUse *O = SCEVAllocator.Allocate<SCEVUse>(Ops.size());
4490 SCEV *S = new (SCEVAllocator)
4491 SCEVSequentialMinMaxExpr(ID.Intern(SCEVAllocator), Kind, O, Ops.size());
4492
4493 UniqueSCEVs.InsertNode(S, IP);
4494 S->computeAndSetCanonical(*this);
4495 registerUser(S, Ops);
4496 return S;
4497}
4498
4503
4507
4512
4516
4521
4525
4527 bool Sequential) {
4528 SmallVector<SCEVUse, 2> Ops = {LHS, RHS};
4529 return getUMinExpr(Ops, Sequential);
4530}
4531
4537
4538const SCEV *
4540 const SCEV *Res = getConstant(IntTy, Size.getKnownMinValue());
4541 if (Size.isScalable())
4542 Res = getMulExpr(Res, getVScale(IntTy));
4543 return Res;
4544}
4545
4547 return getSizeOfExpr(IntTy, getDataLayout().getTypeAllocSize(AllocTy));
4548}
4549
4551 return getSizeOfExpr(IntTy, getDataLayout().getTypeStoreSize(StoreTy));
4552}
4553
4555 StructType *STy,
4556 unsigned FieldNo) {
4557 // We can bypass creating a target-independent constant expression and then
4558 // folding it back into a ConstantInt. This is just a compile-time
4559 // optimization.
4560 const StructLayout *SL = getDataLayout().getStructLayout(STy);
4561 assert(!SL->getSizeInBits().isScalable() &&
4562 "Cannot get offset for structure containing scalable vector types");
4563 return getConstant(IntTy, SL->getElementOffset(FieldNo));
4564}
4565
4567 // Don't attempt to do anything other than create a SCEVUnknown object
4568 // here. createSCEV only calls getUnknown after checking for all other
4569 // interesting possibilities, and any other code that calls getUnknown
4570 // is doing so in order to hide a value from SCEV canonicalization.
4571
4573 ID.AddInteger(scUnknown);
4574 ID.AddPointer(V);
4575 void *IP = nullptr;
4576 if (SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) {
4577 assert(cast<SCEVUnknown>(S)->getValue() == V &&
4578 "Stale SCEVUnknown in uniquing map!");
4579 return S;
4580 }
4581 SCEV *S = new (SCEVAllocator) SCEVUnknown(ID.Intern(SCEVAllocator), V, this,
4582 FirstUnknown);
4583 FirstUnknown = cast<SCEVUnknown>(S);
4584 UniqueSCEVs.InsertNode(S, IP);
4585 S->computeAndSetCanonical(*this);
4586 return S;
4587}
4588
4589//===----------------------------------------------------------------------===//
4590// Basic SCEV Analysis and PHI Idiom Recognition Code
4591//
4592
4593/// Test if values of the given type are analyzable within the SCEV
4594/// framework. This primarily includes integer types, and it can optionally
4595/// include pointer types if the ScalarEvolution class has access to
4596/// target-specific information.
4598 // Integers and pointers are always SCEVable.
4599 return Ty->isIntOrPtrTy();
4600}
4601
4602/// Return the size in bits of the specified type, for which isSCEVable must
4603/// return true.
4605 assert(isSCEVable(Ty) && "Type is not SCEVable!");
4606 if (Ty->isPointerTy())
4608 return getDataLayout().getTypeSizeInBits(Ty);
4609}
4610
4611/// Return a type with the same bitwidth as the given type and which represents
4612/// how SCEV will treat the given type, for which isSCEVable must return
4613/// true. For pointer types, this is the pointer index sized integer type.
4615 assert(isSCEVable(Ty) && "Type is not SCEVable!");
4616
4617 if (Ty->isIntegerTy())
4618 return Ty;
4619
4620 // The only other support type is pointer.
4621 assert(Ty->isPointerTy() && "Unexpected non-pointer non-integer type!");
4622 return getDataLayout().getIndexType(Ty);
4623}
4624
4626 return getTypeSizeInBits(T1) >= getTypeSizeInBits(T2) ? T1 : T2;
4627}
4628
4630 const SCEV *B) {
4631 /// For a valid use point to exist, the defining scope of one operand
4632 /// must dominate the other.
4633 bool PreciseA, PreciseB;
4634 auto *ScopeA = getDefiningScopeBound({A}, PreciseA);
4635 auto *ScopeB = getDefiningScopeBound({B}, PreciseB);
4636 if (!PreciseA || !PreciseB)
4637 // Can't tell.
4638 return false;
4639 return (ScopeA == ScopeB) || DT.dominates(ScopeA, ScopeB) ||
4640 DT.dominates(ScopeB, ScopeA);
4641}
4642
4644 return CouldNotCompute.get();
4645}
4646
4647bool ScalarEvolution::checkValidity(const SCEV *S) const {
4648 bool ContainsNulls = SCEVExprContains(S, [](const SCEV *S) {
4649 auto *SU = dyn_cast<SCEVUnknown>(S);
4650 return SU && SU->getValue() == nullptr;
4651 });
4652
4653 return !ContainsNulls;
4654}
4655
4657 HasRecMapType::iterator I = HasRecMap.find(S);
4658 if (I != HasRecMap.end())
4659 return I->second;
4660
4661 bool FoundAddRec =
4662 SCEVExprContains(S, [](const SCEV *S) { return isa<SCEVAddRecExpr>(S); });
4663 HasRecMap.insert({S, FoundAddRec});
4664 return FoundAddRec;
4665}
4666
4667/// Return the ValueOffsetPair set for \p S. \p S can be represented
4668/// by the value and offset from any ValueOffsetPair in the set.
4669ArrayRef<Value *> ScalarEvolution::getSCEVValues(const SCEV *S) {
4670 ExprValueMapType::iterator SI = ExprValueMap.find_as(S);
4671 if (SI == ExprValueMap.end())
4672 return {};
4673 return SI->second.getArrayRef();
4674}
4675
4676/// Erase Value from ValueExprMap and ExprValueMap. ValueExprMap.erase(V)
4677/// cannot be used separately. eraseValueFromMap should be used to remove
4678/// V from ValueExprMap and ExprValueMap at the same time.
4679void ScalarEvolution::eraseValueFromMap(Value *V) {
4680 ValueExprMapType::iterator I = ValueExprMap.find_as(V);
4681 if (I != ValueExprMap.end()) {
4682 auto EVIt = ExprValueMap.find(I->second);
4683 bool Removed = EVIt->second.remove(V);
4684 (void) Removed;
4685 assert(Removed && "Value not in ExprValueMap?");
4686 ValueExprMap.erase(I);
4687 }
4688}
4689
4690void ScalarEvolution::insertValueToMap(Value *V, const SCEV *S) {
4691 // A recursive query may have already computed the SCEV. It should be
4692 // equivalent, but may not necessarily be exactly the same, e.g. due to lazily
4693 // inferred nowrap flags.
4694 auto It = ValueExprMap.find_as(V);
4695 if (It == ValueExprMap.end()) {
4696 ValueExprMap.insert({SCEVCallbackVH(V, this), S});
4697 ExprValueMap[S].insert(V);
4698 }
4699}
4700
4701/// Return an existing SCEV if it exists, otherwise analyze the expression and
4702/// create a new one.
4704 assert(isSCEVable(V->getType()) && "Value is not SCEVable!");
4705
4706 if (const SCEV *S = getExistingSCEV(V))
4707 return S;
4708 return createSCEVIter(V);
4709}
4710
4712 assert(isSCEVable(V->getType()) && "Value is not SCEVable!");
4713
4714 ValueExprMapType::iterator I = ValueExprMap.find_as(V);
4715 if (I != ValueExprMap.end()) {
4716 const SCEV *S = I->second;
4717 assert(checkValidity(S) &&
4718 "existing SCEV has not been properly invalidated");
4719 return S;
4720 }
4721 return nullptr;
4722}
4723
4724/// Return a SCEV corresponding to -V = -1*V
4726 SCEV::NoWrapFlags Flags) {
4727 if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V))
4728 return getConstant(
4729 cast<ConstantInt>(ConstantExpr::getNeg(VC->getValue())));
4730
4731 Type *Ty = V->getType();
4732 Ty = getEffectiveSCEVType(Ty);
4733 return getMulExpr(V, getMinusOne(Ty), Flags);
4734}
4735
4736/// If Expr computes ~A, return A else return nullptr
4737static const SCEV *MatchNotExpr(const SCEV *Expr) {
4738 const SCEV *MulOp;
4739 if (match(Expr, m_scev_Add(m_scev_AllOnes(),
4740 m_scev_Mul(m_scev_AllOnes(), m_SCEV(MulOp)))))
4741 return MulOp;
4742 return nullptr;
4743}
4744
4745/// Return a SCEV corresponding to ~V = -1-V
4747 assert(!V->getType()->isPointerTy() && "Can't negate pointer");
4748
4749 if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V))
4750 return getConstant(
4751 cast<ConstantInt>(ConstantExpr::getNot(VC->getValue())));
4752
4753 // Fold ~(u|s)(min|max)(~x, ~y) to (u|s)(max|min)(x, y)
4754 if (const SCEVMinMaxExpr *MME = dyn_cast<SCEVMinMaxExpr>(V)) {
4755 auto MatchMinMaxNegation = [&](const SCEVMinMaxExpr *MME) {
4756 SmallVector<SCEVUse, 2> MatchedOperands;
4757 for (const SCEV *Operand : MME->operands()) {
4758 const SCEV *Matched = MatchNotExpr(Operand);
4759 if (!Matched)
4760 return (const SCEV *)nullptr;
4761 MatchedOperands.push_back(Matched);
4762 }
4763 return getMinMaxExpr(SCEVMinMaxExpr::negate(MME->getSCEVType()),
4764 MatchedOperands);
4765 };
4766 if (const SCEV *Replaced = MatchMinMaxNegation(MME))
4767 return Replaced;
4768 }
4769
4770 Type *Ty = V->getType();
4771 Ty = getEffectiveSCEVType(Ty);
4772 return getMinusSCEV(getMinusOne(Ty), V);
4773}
4774
4776 assert(P->getType()->isPointerTy());
4777
4778 if (auto *AddRec = dyn_cast<SCEVAddRecExpr>(P)) {
4779 // The base of an AddRec is the first operand.
4780 SmallVector<SCEVUse> Ops{AddRec->operands()};
4781 Ops[0] = removePointerBase(Ops[0]);
4782 // Don't try to transfer nowrap flags for now. We could in some cases
4783 // (for example, if pointer operand of the AddRec is a SCEVUnknown).
4784 return getAddRecExpr(Ops, AddRec->getLoop(), SCEV::FlagAnyWrap);
4785 }
4786 if (auto *Add = dyn_cast<SCEVAddExpr>(P)) {
4787 // The base of an Add is the pointer operand.
4788 SmallVector<SCEVUse> Ops{Add->operands()};
4789 SCEVUse *PtrOp = nullptr;
4790 for (SCEVUse &AddOp : Ops) {
4791 if (AddOp->getType()->isPointerTy()) {
4792 assert(!PtrOp && "Cannot have multiple pointer ops");
4793 PtrOp = &AddOp;
4794 }
4795 }
4796 *PtrOp = removePointerBase(*PtrOp);
4797 // Don't try to transfer nowrap flags for now. We could in some cases
4798 // (for example, if the pointer operand of the Add is a SCEVUnknown).
4799 return getAddExpr(Ops);
4800 }
4801 // Any other expression must be a pointer base.
4802 return getZero(P->getType());
4803}
4804
4806 SCEV::NoWrapFlags Flags,
4807 unsigned Depth) {
4808 // Fast path: X - X --> 0.
4809 if (LHS == RHS)
4810 return getZero(LHS->getType());
4811
4812 // If we subtract two pointers with different pointer bases, bail.
4813 // Eventually, we're going to add an assertion to getMulExpr that we
4814 // can't multiply by a pointer.
4815 if (RHS->getType()->isPointerTy()) {
4816 if (!LHS->getType()->isPointerTy() ||
4817 getPointerBase(LHS) != getPointerBase(RHS))
4818 return getCouldNotCompute();
4819 LHS = removePointerBase(LHS);
4820 RHS = removePointerBase(RHS);
4821 }
4822
4823 // We represent LHS - RHS as LHS + (-1)*RHS. This transformation
4824 // makes it so that we cannot make much use of NUW.
4825 auto AddFlags = SCEV::FlagAnyWrap;
4826 const bool RHSIsNotMinSigned =
4828 if (hasFlags(Flags, SCEV::FlagNSW)) {
4829 // Let M be the minimum representable signed value. Then (-1)*RHS
4830 // signed-wraps if and only if RHS is M. That can happen even for
4831 // a NSW subtraction because e.g. (-1)*M signed-wraps even though
4832 // -1 - M does not. So to transfer NSW from LHS - RHS to LHS +
4833 // (-1)*RHS, we need to prove that RHS != M.
4834 //
4835 // If LHS is non-negative and we know that LHS - RHS does not
4836 // signed-wrap, then RHS cannot be M. So we can rule out signed-wrap
4837 // either by proving that RHS > M or that LHS >= 0.
4838 if (RHSIsNotMinSigned || isKnownNonNegative(LHS)) {
4839 AddFlags = SCEV::FlagNSW;
4840 }
4841 }
4842
4843 // FIXME: Find a correct way to transfer NSW to (-1)*M when LHS -
4844 // RHS is NSW and LHS >= 0.
4845 //
4846 // The difficulty here is that the NSW flag may have been proven
4847 // relative to a loop that is to be found in a recurrence in LHS and
4848 // not in RHS. Applying NSW to (-1)*M may then let the NSW have a
4849 // larger scope than intended.
4850 auto NegFlags = RHSIsNotMinSigned ? SCEV::FlagNSW : SCEV::FlagAnyWrap;
4851
4852 return getAddExpr(LHS, getNegativeSCEV(RHS, NegFlags), AddFlags, Depth);
4853}
4854
4856 unsigned Depth) {
4857 Type *SrcTy = V->getType();
4858 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&
4859 "Cannot truncate or zero extend with non-integer arguments!");
4860 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
4861 return V; // No conversion
4862 if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty))
4863 return getTruncateExpr(V, Ty, Depth);
4864 return getZeroExtendExpr(V, Ty, Depth);
4865}
4866
4868 unsigned Depth) {
4869 Type *SrcTy = V->getType();
4870 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&
4871 "Cannot truncate or zero extend with non-integer arguments!");
4872 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
4873 return V; // No conversion
4874 if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty))
4875 return getTruncateExpr(V, Ty, Depth);
4876 return getSignExtendExpr(V, Ty, Depth);
4877}
4878
4879const SCEV *
4881 Type *SrcTy = V->getType();
4882 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&
4883 "Cannot noop or zero extend with non-integer arguments!");
4885 "getNoopOrZeroExtend cannot truncate!");
4886 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
4887 return V; // No conversion
4888 return getZeroExtendExpr(V, Ty);
4889}
4890
4891const SCEV *
4893 Type *SrcTy = V->getType();
4894 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&
4895 "Cannot noop or sign extend with non-integer arguments!");
4897 "getNoopOrSignExtend cannot truncate!");
4898 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
4899 return V; // No conversion
4900 return getSignExtendExpr(V, Ty);
4901}
4902
4903const SCEV *
4905 Type *SrcTy = V->getType();
4906 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&
4907 "Cannot noop or any extend with non-integer arguments!");
4909 "getNoopOrAnyExtend cannot truncate!");
4910 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
4911 return V; // No conversion
4912 return getAnyExtendExpr(V, Ty);
4913}
4914
4915const SCEV *
4917 Type *SrcTy = V->getType();
4918 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&
4919 "Cannot truncate or noop with non-integer arguments!");
4921 "getTruncateOrNoop cannot extend!");
4922 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
4923 return V; // No conversion
4924 return getTruncateExpr(V, Ty);
4925}
4926
4928 const SCEV *RHS) {
4929 const SCEV *PromotedLHS = LHS;
4930 const SCEV *PromotedRHS = RHS;
4931
4932 if (getTypeSizeInBits(LHS->getType()) > getTypeSizeInBits(RHS->getType()))
4933 PromotedRHS = getZeroExtendExpr(RHS, LHS->getType());
4934 else
4935 PromotedLHS = getNoopOrZeroExtend(LHS, RHS->getType());
4936
4937 return getUMaxExpr(PromotedLHS, PromotedRHS);
4938}
4939
4941 const SCEV *RHS,
4942 bool Sequential) {
4943 SmallVector<SCEVUse, 2> Ops = {LHS, RHS};
4944 return getUMinFromMismatchedTypes(Ops, Sequential);
4945}
4946
4947const SCEV *
4949 bool Sequential) {
4950 assert(!Ops.empty() && "At least one operand must be!");
4951 // Trivial case.
4952 if (Ops.size() == 1)
4953 return Ops[0];
4954
4955 // Find the max type first.
4956 Type *MaxType = nullptr;
4957 for (SCEVUse S : Ops)
4958 if (MaxType)
4959 MaxType = getWiderType(MaxType, S->getType());
4960 else
4961 MaxType = S->getType();
4962 assert(MaxType && "Failed to find maximum type!");
4963
4964 // Extend all ops to max type.
4965 SmallVector<SCEVUse, 2> PromotedOps;
4966 for (SCEVUse S : Ops)
4967 PromotedOps.push_back(getNoopOrZeroExtend(S, MaxType));
4968
4969 // Generate umin.
4970 return getUMinExpr(PromotedOps, Sequential);
4971}
4972
4974 // A pointer operand may evaluate to a nonpointer expression, such as null.
4975 if (!V->getType()->isPointerTy())
4976 return V;
4977
4978 while (true) {
4979 if (auto *AddRec = dyn_cast<SCEVAddRecExpr>(V)) {
4980 V = AddRec->getStart();
4981 } else if (auto *Add = dyn_cast<SCEVAddExpr>(V)) {
4982 const SCEV *PtrOp = nullptr;
4983 for (const SCEV *AddOp : Add->operands()) {
4984 if (AddOp->getType()->isPointerTy()) {
4985 assert(!PtrOp && "Cannot have multiple pointer ops");
4986 PtrOp = AddOp;
4987 }
4988 }
4989 assert(PtrOp && "Must have pointer op");
4990 V = PtrOp;
4991 } else // Not something we can look further into.
4992 return V;
4993 }
4994}
4995
4996/// Push users of the given Instruction onto the given Worklist.
5000 // Push the def-use children onto the Worklist stack.
5001 for (User *U : I->users()) {
5002 auto *UserInsn = cast<Instruction>(U);
5003 if (Visited.insert(UserInsn).second)
5004 Worklist.push_back(UserInsn);
5005 }
5006}
5007
5008namespace {
5009
5010/// Takes SCEV S and Loop L. For each AddRec sub-expression, use its start
5011/// expression in case its Loop is L. If it is not L then
5012/// if IgnoreOtherLoops is true then use AddRec itself
5013/// otherwise rewrite cannot be done.
5014/// If SCEV contains non-invariant unknown SCEV rewrite cannot be done.
5015class SCEVInitRewriter : public SCEVRewriteVisitor<SCEVInitRewriter> {
5016public:
5017 static const SCEV *rewrite(const SCEV *S, const Loop *L, ScalarEvolution &SE,
5018 bool IgnoreOtherLoops = true) {
5019 SCEVInitRewriter Rewriter(L, SE);
5020 const SCEV *Result = Rewriter.visit(S);
5021 if (Rewriter.hasSeenLoopVariantSCEVUnknown())
5022 return SE.getCouldNotCompute();
5023 return Rewriter.hasSeenOtherLoops() && !IgnoreOtherLoops
5024 ? SE.getCouldNotCompute()
5025 : Result;
5026 }
5027
5028 const SCEV *visitUnknown(const SCEVUnknown *Expr) {
5029 if (!SE.isLoopInvariant(Expr, L))
5030 SeenLoopVariantSCEVUnknown = true;
5031 return Expr;
5032 }
5033
5034 const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) {
5035 // Only re-write AddRecExprs for this loop.
5036 if (Expr->getLoop() == L)
5037 return Expr->getStart();
5038 SeenOtherLoops = true;
5039 return Expr;
5040 }
5041
5042 bool hasSeenLoopVariantSCEVUnknown() { return SeenLoopVariantSCEVUnknown; }
5043
5044 bool hasSeenOtherLoops() { return SeenOtherLoops; }
5045
5046private:
5047 explicit SCEVInitRewriter(const Loop *L, ScalarEvolution &SE)
5048 : SCEVRewriteVisitor(SE), L(L) {}
5049
5050 const Loop *L;
5051 bool SeenLoopVariantSCEVUnknown = false;
5052 bool SeenOtherLoops = false;
5053};
5054
5055/// Takes SCEV S and Loop L. For each AddRec sub-expression, use its post
5056/// increment expression in case its Loop is L. If it is not L then
5057/// use AddRec itself.
5058/// If SCEV contains non-invariant unknown SCEV rewrite cannot be done.
5059class SCEVPostIncRewriter : public SCEVRewriteVisitor<SCEVPostIncRewriter> {
5060public:
5061 static const SCEV *rewrite(const SCEV *S, const Loop *L, ScalarEvolution &SE) {
5062 SCEVPostIncRewriter Rewriter(L, SE);
5063 const SCEV *Result = Rewriter.visit(S);
5064 return Rewriter.hasSeenLoopVariantSCEVUnknown()
5065 ? SE.getCouldNotCompute()
5066 : Result;
5067 }
5068
5069 const SCEV *visitUnknown(const SCEVUnknown *Expr) {
5070 if (!SE.isLoopInvariant(Expr, L))
5071 SeenLoopVariantSCEVUnknown = true;
5072 return Expr;
5073 }
5074
5075 const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) {
5076 // Only re-write AddRecExprs for this loop.
5077 if (Expr->getLoop() == L)
5078 return Expr->getPostIncExpr(SE);
5079 SeenOtherLoops = true;
5080 return Expr;
5081 }
5082
5083 bool hasSeenLoopVariantSCEVUnknown() { return SeenLoopVariantSCEVUnknown; }
5084
5085 bool hasSeenOtherLoops() { return SeenOtherLoops; }
5086
5087private:
5088 explicit SCEVPostIncRewriter(const Loop *L, ScalarEvolution &SE)
5089 : SCEVRewriteVisitor(SE), L(L) {}
5090
5091 const Loop *L;
5092 bool SeenLoopVariantSCEVUnknown = false;
5093 bool SeenOtherLoops = false;
5094};
5095
5096/// This class evaluates the compare condition by matching it against the
5097/// condition of loop latch. If there is a match we assume a true value
5098/// for the condition while building SCEV nodes.
5099class SCEVBackedgeConditionFolder
5100 : public SCEVRewriteVisitor<SCEVBackedgeConditionFolder> {
5101public:
5102 static const SCEV *rewrite(const SCEV *S, const Loop *L,
5103 ScalarEvolution &SE) {
5104 bool IsPosBECond = false;
5105 Value *BECond = nullptr;
5106 if (BasicBlock *Latch = L->getLoopLatch()) {
5107 if (CondBrInst *BI = dyn_cast<CondBrInst>(Latch->getTerminator())) {
5108 assert(BI->getSuccessor(0) != BI->getSuccessor(1) &&
5109 "Both outgoing branches should not target same header!");
5110 BECond = BI->getCondition();
5111 IsPosBECond = BI->getSuccessor(0) == L->getHeader();
5112 } else {
5113 return S;
5114 }
5115 }
5116 SCEVBackedgeConditionFolder Rewriter(L, BECond, IsPosBECond, SE);
5117 return Rewriter.visit(S);
5118 }
5119
5120 const SCEV *visitUnknown(const SCEVUnknown *Expr) {
5121 const SCEV *Result = Expr;
5122 bool InvariantF = SE.isLoopInvariant(Expr, L);
5123
5124 if (!InvariantF) {
5126 switch (I->getOpcode()) {
5127 case Instruction::Select: {
5128 SelectInst *SI = cast<SelectInst>(I);
5129 std::optional<const SCEV *> Res =
5130 compareWithBackedgeCondition(SI->getCondition());
5131 if (Res) {
5132 bool IsOne = cast<SCEVConstant>(*Res)->getValue()->isOne();
5133 Result = SE.getSCEV(IsOne ? SI->getTrueValue() : SI->getFalseValue());
5134 }
5135 break;
5136 }
5137 default: {
5138 std::optional<const SCEV *> Res = compareWithBackedgeCondition(I);
5139 if (Res)
5140 Result = *Res;
5141 break;
5142 }
5143 }
5144 }
5145 return Result;
5146 }
5147
5148private:
5149 explicit SCEVBackedgeConditionFolder(const Loop *L, Value *BECond,
5150 bool IsPosBECond, ScalarEvolution &SE)
5151 : SCEVRewriteVisitor(SE), L(L), BackedgeCond(BECond),
5152 IsPositiveBECond(IsPosBECond) {}
5153
5154 std::optional<const SCEV *> compareWithBackedgeCondition(Value *IC);
5155
5156 const Loop *L;
5157 /// Loop back condition.
5158 Value *BackedgeCond = nullptr;
5159 /// Set to true if loop back is on positive branch condition.
5160 bool IsPositiveBECond;
5161};
5162
5163std::optional<const SCEV *>
5164SCEVBackedgeConditionFolder::compareWithBackedgeCondition(Value *IC) {
5165
5166 // If value matches the backedge condition for loop latch,
5167 // then return a constant evolution node based on loopback
5168 // branch taken.
5169 if (BackedgeCond == IC)
5170 return IsPositiveBECond ? SE.getOne(Type::getInt1Ty(SE.getContext()))
5172 return std::nullopt;
5173}
5174
5175class SCEVShiftRewriter : public SCEVRewriteVisitor<SCEVShiftRewriter> {
5176public:
5177 static const SCEV *rewrite(const SCEV *S, const Loop *L,
5178 ScalarEvolution &SE) {
5179 SCEVShiftRewriter Rewriter(L, SE);
5180 const SCEV *Result = Rewriter.visit(S);
5181 return Rewriter.isValid() ? Result : SE.getCouldNotCompute();
5182 }
5183
5184 const SCEV *visitUnknown(const SCEVUnknown *Expr) {
5185 // Only allow AddRecExprs for this loop.
5186 if (!SE.isLoopInvariant(Expr, L))
5187 Valid = false;
5188 return Expr;
5189 }
5190
5191 const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) {
5192 if (Expr->getLoop() == L && Expr->isAffine())
5193 return SE.getMinusSCEV(Expr, Expr->getStepRecurrence(SE));
5194 Valid = false;
5195 return Expr;
5196 }
5197
5198 bool isValid() { return Valid; }
5199
5200private:
5201 explicit SCEVShiftRewriter(const Loop *L, ScalarEvolution &SE)
5202 : SCEVRewriteVisitor(SE), L(L) {}
5203
5204 const Loop *L;
5205 bool Valid = true;
5206};
5207
5208} // end anonymous namespace
5209
5211ScalarEvolution::proveNoWrapViaConstantRanges(const SCEVAddRecExpr *AR) {
5212 if (!AR->isAffine())
5213 return SCEV::FlagAnyWrap;
5214
5215 using OBO = OverflowingBinaryOperator;
5216
5218
5219 if (!AR->hasNoSelfWrap()) {
5220 const SCEV *BECount = getConstantMaxBackedgeTakenCount(AR->getLoop());
5221 if (const SCEVConstant *BECountMax = dyn_cast<SCEVConstant>(BECount)) {
5222 ConstantRange StepCR = getSignedRange(AR->getStepRecurrence(*this));
5223 const APInt &BECountAP = BECountMax->getAPInt();
5224 unsigned NoOverflowBitWidth =
5225 BECountAP.getActiveBits() + StepCR.getMinSignedBits();
5226 if (NoOverflowBitWidth <= getTypeSizeInBits(AR->getType()))
5228 }
5229 }
5230
5231 if (!AR->hasNoSignedWrap()) {
5232 ConstantRange AddRecRange = getSignedRange(AR);
5233 ConstantRange IncRange = getSignedRange(AR->getStepRecurrence(*this));
5234
5236 Instruction::Add, IncRange, OBO::NoSignedWrap);
5237 if (NSWRegion.contains(AddRecRange))
5239 }
5240
5241 if (!AR->hasNoUnsignedWrap()) {
5242 ConstantRange AddRecRange = getUnsignedRange(AR);
5243 ConstantRange IncRange = getUnsignedRange(AR->getStepRecurrence(*this));
5244
5246 Instruction::Add, IncRange, OBO::NoUnsignedWrap);
5247 if (NUWRegion.contains(AddRecRange))
5249 }
5250
5251 return Result;
5252}
5253
5255ScalarEvolution::proveNoSignedWrapViaInduction(const SCEVAddRecExpr *AR) {
5257
5258 if (AR->hasNoSignedWrap())
5259 return Result;
5260
5261 if (!AR->isAffine())
5262 return Result;
5263
5264 // This function can be expensive, only try to prove NSW once per AddRec.
5265 if (!SignedWrapViaInductionTried.insert(AR).second)
5266 return Result;
5267
5268 const SCEV *Step = AR->getStepRecurrence(*this);
5269 const Loop *L = AR->getLoop();
5270
5271 // Check whether the backedge-taken count is SCEVCouldNotCompute.
5272 // Note that this serves two purposes: It filters out loops that are
5273 // simply not analyzable, and it covers the case where this code is
5274 // being called from within backedge-taken count analysis, such that
5275 // attempting to ask for the backedge-taken count would likely result
5276 // in infinite recursion. In the later case, the analysis code will
5277 // cope with a conservative value, and it will take care to purge
5278 // that value once it has finished.
5279 const SCEV *MaxBECount = getConstantMaxBackedgeTakenCount(L);
5280
5281 // Normally, in the cases we can prove no-overflow via a
5282 // backedge guarding condition, we can also compute a backedge
5283 // taken count for the loop. The exceptions are assumptions and
5284 // guards present in the loop -- SCEV is not great at exploiting
5285 // these to compute max backedge taken counts, but can still use
5286 // these to prove lack of overflow. Use this fact to avoid
5287 // doing extra work that may not pay off.
5288
5289 if (isa<SCEVCouldNotCompute>(MaxBECount) && !HasGuards &&
5290 AC.assumptions().empty())
5291 return Result;
5292
5293 // If the backedge is guarded by a comparison with the pre-inc value the
5294 // addrec is safe. Also, if the entry is guarded by a comparison with the
5295 // start value and the backedge is guarded by a comparison with the post-inc
5296 // value, the addrec is safe.
5298 const SCEV *OverflowLimit =
5299 getSignedOverflowLimitForStep(Step, &Pred, this);
5300 if (OverflowLimit &&
5301 (isLoopBackedgeGuardedByCond(L, Pred, AR, OverflowLimit) ||
5302 isKnownOnEveryIteration(Pred, AR, OverflowLimit))) {
5303 Result = setFlags(Result, SCEV::FlagNSW);
5304 }
5305 return Result;
5306}
5308ScalarEvolution::proveNoUnsignedWrapViaInduction(const SCEVAddRecExpr *AR) {
5310
5311 if (AR->hasNoUnsignedWrap())
5312 return Result;
5313
5314 if (!AR->isAffine())
5315 return Result;
5316
5317 // This function can be expensive, only try to prove NUW once per AddRec.
5318 if (!UnsignedWrapViaInductionTried.insert(AR).second)
5319 return Result;
5320
5321 const SCEV *Step = AR->getStepRecurrence(*this);
5322 unsigned BitWidth = getTypeSizeInBits(AR->getType());
5323 const Loop *L = AR->getLoop();
5324
5325 // Check whether the backedge-taken count is SCEVCouldNotCompute.
5326 // Note that this serves two purposes: It filters out loops that are
5327 // simply not analyzable, and it covers the case where this code is
5328 // being called from within backedge-taken count analysis, such that
5329 // attempting to ask for the backedge-taken count would likely result
5330 // in infinite recursion. In the later case, the analysis code will
5331 // cope with a conservative value, and it will take care to purge
5332 // that value once it has finished.
5333 const SCEV *MaxBECount = getConstantMaxBackedgeTakenCount(L);
5334
5335 // Normally, in the cases we can prove no-overflow via a
5336 // backedge guarding condition, we can also compute a backedge
5337 // taken count for the loop. The exceptions are assumptions and
5338 // guards present in the loop -- SCEV is not great at exploiting
5339 // these to compute max backedge taken counts, but can still use
5340 // these to prove lack of overflow. Use this fact to avoid
5341 // doing extra work that may not pay off.
5342
5343 if (isa<SCEVCouldNotCompute>(MaxBECount) && !HasGuards &&
5344 AC.assumptions().empty())
5345 return Result;
5346
5347 // If the backedge is guarded by a comparison with the pre-inc value the
5348 // addrec is safe. Also, if the entry is guarded by a comparison with the
5349 // start value and the backedge is guarded by a comparison with the post-inc
5350 // value, the addrec is safe.
5351 if (isKnownPositive(Step)) {
5352 const SCEV *N = getConstant(APInt::getMinValue(BitWidth) -
5353 getUnsignedRangeMax(Step));
5356 Result = setFlags(Result, SCEV::FlagNUW);
5357 }
5358 }
5359
5360 return Result;
5361}
5362
5363namespace {
5364
5365/// Represents an abstract binary operation. This may exist as a
5366/// normal instruction or constant expression, or may have been
5367/// derived from an expression tree.
5368struct BinaryOp {
5369 unsigned Opcode;
5370 Value *LHS;
5371 Value *RHS;
5372 bool IsNSW = false;
5373 bool IsNUW = false;
5374
5375 /// Op is set if this BinaryOp corresponds to a concrete LLVM instruction or
5376 /// constant expression.
5377 Operator *Op = nullptr;
5378
5379 explicit BinaryOp(Operator *Op)
5380 : Opcode(Op->getOpcode()), LHS(Op->getOperand(0)), RHS(Op->getOperand(1)),
5381 Op(Op) {
5382 if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(Op)) {
5383 IsNSW = OBO->hasNoSignedWrap();
5384 IsNUW = OBO->hasNoUnsignedWrap();
5385 }
5386 }
5387
5388 explicit BinaryOp(unsigned Opcode, Value *LHS, Value *RHS, bool IsNSW = false,
5389 bool IsNUW = false)
5390 : Opcode(Opcode), LHS(LHS), RHS(RHS), IsNSW(IsNSW), IsNUW(IsNUW) {}
5391};
5392
5393} // end anonymous namespace
5394
5395/// Try to map \p V into a BinaryOp, and return \c std::nullopt on failure.
5396static std::optional<BinaryOp> MatchBinaryOp(Value *V, const DataLayout &DL,
5397 AssumptionCache &AC,
5398 const DominatorTree &DT,
5399 const Instruction *CxtI) {
5400 auto *Op = dyn_cast<Operator>(V);
5401 if (!Op)
5402 return std::nullopt;
5403
5404 // Implementation detail: all the cleverness here should happen without
5405 // creating new SCEV expressions -- our caller knowns tricks to avoid creating
5406 // SCEV expressions when possible, and we should not break that.
5407
5408 switch (Op->getOpcode()) {
5409 case Instruction::Add:
5410 case Instruction::Sub:
5411 case Instruction::Mul:
5412 case Instruction::UDiv:
5413 case Instruction::URem:
5414 case Instruction::And:
5415 case Instruction::AShr:
5416 case Instruction::Shl:
5417 return BinaryOp(Op);
5418
5419 case Instruction::Or: {
5420 // Convert or disjoint into add nuw nsw.
5421 if (cast<PossiblyDisjointInst>(Op)->isDisjoint()) {
5422 BinaryOp BinOp(Instruction::Add, Op->getOperand(0), Op->getOperand(1),
5423 /*IsNSW=*/true, /*IsNUW=*/true);
5424 // Keep the reference to the original instruction so that we can later
5425 // check whether it can produce poison value or not.
5426 BinOp.Op = Op;
5427 return BinOp;
5428 }
5429 return BinaryOp(Op);
5430 }
5431
5432 case Instruction::Xor:
5433 if (auto *RHSC = dyn_cast<ConstantInt>(Op->getOperand(1)))
5434 // If the RHS of the xor is a signmask, then this is just an add.
5435 // Instcombine turns add of signmask into xor as a strength reduction step.
5436 if (RHSC->getValue().isSignMask())
5437 return BinaryOp(Instruction::Add, Op->getOperand(0), Op->getOperand(1));
5438 // Binary `xor` is a bit-wise `add`.
5439 if (V->getType()->isIntegerTy(1))
5440 return BinaryOp(Instruction::Add, Op->getOperand(0), Op->getOperand(1));
5441 return BinaryOp(Op);
5442
5443 case Instruction::LShr:
5444 // Turn logical shift right of a constant into a unsigned divide.
5445 if (ConstantInt *SA = dyn_cast<ConstantInt>(Op->getOperand(1))) {
5446 uint32_t BitWidth = cast<IntegerType>(Op->getType())->getBitWidth();
5447
5448 // If the shift count is not less than the bitwidth, the result of
5449 // the shift is undefined. Don't try to analyze it, because the
5450 // resolution chosen here may differ from the resolution chosen in
5451 // other parts of the compiler.
5452 if (SA->getValue().ult(BitWidth)) {
5453 Constant *X =
5454 ConstantInt::get(SA->getContext(),
5455 APInt::getOneBitSet(BitWidth, SA->getZExtValue()));
5456 return BinaryOp(Instruction::UDiv, Op->getOperand(0), X);
5457 }
5458 }
5459 return BinaryOp(Op);
5460
5461 case Instruction::ExtractValue: {
5462 auto *EVI = cast<ExtractValueInst>(Op);
5463 if (EVI->getNumIndices() != 1 || EVI->getIndices()[0] != 0)
5464 break;
5465
5466 auto *WO = dyn_cast<WithOverflowInst>(EVI->getAggregateOperand());
5467 if (!WO)
5468 break;
5469
5470 Instruction::BinaryOps BinOp = WO->getBinaryOp();
5471 bool Signed = WO->isSigned();
5472 // TODO: Should add nuw/nsw flags for mul as well.
5473 if (BinOp == Instruction::Mul || !isOverflowIntrinsicNoWrap(WO, DT))
5474 return BinaryOp(BinOp, WO->getLHS(), WO->getRHS());
5475
5476 // Now that we know that all uses of the arithmetic-result component of
5477 // CI are guarded by the overflow check, we can go ahead and pretend
5478 // that the arithmetic is non-overflowing.
5479 return BinaryOp(BinOp, WO->getLHS(), WO->getRHS(),
5480 /* IsNSW = */ Signed, /* IsNUW = */ !Signed);
5481 }
5482
5483 default:
5484 break;
5485 }
5486
5487 // Recognise intrinsic loop.decrement.reg, and as this has exactly the same
5488 // semantics as a Sub, return a binary sub expression.
5489 if (auto *II = dyn_cast<IntrinsicInst>(V))
5490 if (II->getIntrinsicID() == Intrinsic::loop_decrement_reg)
5491 return BinaryOp(Instruction::Sub, II->getOperand(0), II->getOperand(1));
5492
5493 return std::nullopt;
5494}
5495
5496/// Helper function to createAddRecFromPHIWithCasts. We have a phi
5497/// node whose symbolic (unknown) SCEV is \p SymbolicPHI, which is updated via
5498/// the loop backedge by a SCEVAddExpr, possibly also with a few casts on the
5499/// way. This function checks if \p Op, an operand of this SCEVAddExpr,
5500/// follows one of the following patterns:
5501/// Op == (SExt ix (Trunc iy (%SymbolicPHI) to ix) to iy)
5502/// Op == (ZExt ix (Trunc iy (%SymbolicPHI) to ix) to iy)
5503/// If the SCEV expression of \p Op conforms with one of the expected patterns
5504/// we return the type of the truncation operation, and indicate whether the
5505/// truncated type should be treated as signed/unsigned by setting
5506/// \p Signed to true/false, respectively.
5507static Type *isSimpleCastedPHI(const SCEV *Op, const SCEVUnknown *SymbolicPHI,
5508 bool &Signed, ScalarEvolution &SE) {
5509 // The case where Op == SymbolicPHI (that is, with no type conversions on
5510 // the way) is handled by the regular add recurrence creating logic and
5511 // would have already been triggered in createAddRecForPHI. Reaching it here
5512 // means that createAddRecFromPHI had failed for this PHI before (e.g.,
5513 // because one of the other operands of the SCEVAddExpr updating this PHI is
5514 // not invariant).
5515 //
5516 // Here we look for the case where Op = (ext(trunc(SymbolicPHI))), and in
5517 // this case predicates that allow us to prove that Op == SymbolicPHI will
5518 // be added.
5519 if (Op == SymbolicPHI)
5520 return nullptr;
5521
5522 unsigned SourceBits = SE.getTypeSizeInBits(SymbolicPHI->getType());
5523 unsigned NewBits = SE.getTypeSizeInBits(Op->getType());
5524 if (SourceBits != NewBits)
5525 return nullptr;
5526
5527 if (match(Op, m_scev_SExt(m_scev_Trunc(m_scev_Specific(SymbolicPHI))))) {
5528 Signed = true;
5529 return cast<SCEVCastExpr>(Op)->getOperand()->getType();
5530 }
5531 if (match(Op, m_scev_ZExt(m_scev_Trunc(m_scev_Specific(SymbolicPHI))))) {
5532 Signed = false;
5533 return cast<SCEVCastExpr>(Op)->getOperand()->getType();
5534 }
5535 return nullptr;
5536}
5537
5538static const Loop *isIntegerLoopHeaderPHI(const PHINode *PN, LoopInfo &LI) {
5539 if (!PN->getType()->isIntegerTy())
5540 return nullptr;
5541 const Loop *L = LI.getLoopFor(PN->getParent());
5542 if (!L || L->getHeader() != PN->getParent())
5543 return nullptr;
5544 return L;
5545}
5546
5547// Analyze \p SymbolicPHI, a SCEV expression of a phi node, and check if the
5548// computation that updates the phi follows the following pattern:
5549// (SExt/ZExt ix (Trunc iy (%SymbolicPHI) to ix) to iy) + InvariantAccum
5550// which correspond to a phi->trunc->sext/zext->add->phi update chain.
5551// If so, try to see if it can be rewritten as an AddRecExpr under some
5552// Predicates. If successful, return them as a pair. Also cache the results
5553// of the analysis.
5554//
5555// Example usage scenario:
5556// Say the Rewriter is called for the following SCEV:
5557// 8 * ((sext i32 (trunc i64 %X to i32) to i64) + %Step)
5558// where:
5559// %X = phi i64 (%Start, %BEValue)
5560// It will visitMul->visitAdd->visitSExt->visitTrunc->visitUnknown(%X),
5561// and call this function with %SymbolicPHI = %X.
5562//
5563// The analysis will find that the value coming around the backedge has
5564// the following SCEV:
5565// BEValue = ((sext i32 (trunc i64 %X to i32) to i64) + %Step)
5566// Upon concluding that this matches the desired pattern, the function
5567// will return the pair {NewAddRec, SmallPredsVec} where:
5568// NewAddRec = {%Start,+,%Step}
5569// SmallPredsVec = {P1, P2, P3} as follows:
5570// P1(WrapPred): AR: {trunc(%Start),+,(trunc %Step)}<nsw> Flags: <nssw>
5571// P2(EqualPred): %Start == (sext i32 (trunc i64 %Start to i32) to i64)
5572// P3(EqualPred): %Step == (sext i32 (trunc i64 %Step to i32) to i64)
5573// The returned pair means that SymbolicPHI can be rewritten into NewAddRec
5574// under the predicates {P1,P2,P3}.
5575// This predicated rewrite will be cached in PredicatedSCEVRewrites:
5576// PredicatedSCEVRewrites[{%X,L}] = {NewAddRec, {P1,P2,P3)}
5577//
5578// TODO's:
5579//
5580// 1) Extend the Induction descriptor to also support inductions that involve
5581// casts: When needed (namely, when we are called in the context of the
5582// vectorizer induction analysis), a Set of cast instructions will be
5583// populated by this method, and provided back to isInductionPHI. This is
5584// needed to allow the vectorizer to properly record them to be ignored by
5585// the cost model and to avoid vectorizing them (otherwise these casts,
5586// which are redundant under the runtime overflow checks, will be
5587// vectorized, which can be costly).
5588//
5589// 2) Support additional induction/PHISCEV patterns: We also want to support
5590// inductions where the sext-trunc / zext-trunc operations (partly) occur
5591// after the induction update operation (the induction increment):
5592//
5593// (Trunc iy (SExt/ZExt ix (%SymbolicPHI + InvariantAccum) to iy) to ix)
5594// which correspond to a phi->add->trunc->sext/zext->phi update chain.
5595//
5596// (Trunc iy ((SExt/ZExt ix (%SymbolicPhi) to iy) + InvariantAccum) to ix)
5597// which correspond to a phi->trunc->add->sext/zext->phi update chain.
5598//
5599// 3) Outline common code with createAddRecFromPHI to avoid duplication.
5600std::optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>>
5601ScalarEvolution::createAddRecFromPHIWithCastsImpl(const SCEVUnknown *SymbolicPHI) {
5603
5604 // *** Part1: Analyze if we have a phi-with-cast pattern for which we can
5605 // return an AddRec expression under some predicate.
5606
5607 auto *PN = cast<PHINode>(SymbolicPHI->getValue());
5608 const Loop *L = isIntegerLoopHeaderPHI(PN, LI);
5609 assert(L && "Expecting an integer loop header phi");
5610
5611 // The loop may have multiple entrances or multiple exits; we can analyze
5612 // this phi as an addrec if it has a unique entry value and a unique
5613 // backedge value.
5614 Value *BEValueV = nullptr, *StartValueV = nullptr;
5615 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
5616 Value *V = PN->getIncomingValue(i);
5617 if (L->contains(PN->getIncomingBlock(i))) {
5618 if (!BEValueV) {
5619 BEValueV = V;
5620 } else if (BEValueV != V) {
5621 BEValueV = nullptr;
5622 break;
5623 }
5624 } else if (!StartValueV) {
5625 StartValueV = V;
5626 } else if (StartValueV != V) {
5627 StartValueV = nullptr;
5628 break;
5629 }
5630 }
5631 if (!BEValueV || !StartValueV)
5632 return std::nullopt;
5633
5634 const SCEV *BEValue = getSCEV(BEValueV);
5635
5636 // If the value coming around the backedge is an add with the symbolic
5637 // value we just inserted, possibly with casts that we can ignore under
5638 // an appropriate runtime guard, then we found a simple induction variable!
5639 const auto *Add = dyn_cast<SCEVAddExpr>(BEValue);
5640 if (!Add)
5641 return std::nullopt;
5642
5643 // If there is a single occurrence of the symbolic value, possibly
5644 // casted, replace it with a recurrence.
5645 unsigned FoundIndex = Add->getNumOperands();
5646 Type *TruncTy = nullptr;
5647 bool Signed;
5648 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i)
5649 if ((TruncTy =
5650 isSimpleCastedPHI(Add->getOperand(i), SymbolicPHI, Signed, *this)))
5651 if (FoundIndex == e) {
5652 FoundIndex = i;
5653 break;
5654 }
5655
5656 if (FoundIndex == Add->getNumOperands())
5657 return std::nullopt;
5658
5659 // Create an add with everything but the specified operand.
5661 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i)
5662 if (i != FoundIndex)
5663 Ops.push_back(Add->getOperand(i));
5664 const SCEV *Accum = getAddExpr(Ops);
5665
5666 // The runtime checks will not be valid if the step amount is
5667 // varying inside the loop.
5668 if (!isLoopInvariant(Accum, L))
5669 return std::nullopt;
5670
5671 // *** Part2: Create the predicates
5672
5673 // Analysis was successful: we have a phi-with-cast pattern for which we
5674 // can return an AddRec expression under the following predicates:
5675 //
5676 // P1: A Wrap predicate that guarantees that Trunc(Start) + i*Trunc(Accum)
5677 // fits within the truncated type (does not overflow) for i = 0 to n-1.
5678 // P2: An Equal predicate that guarantees that
5679 // Start = (Ext ix (Trunc iy (Start) to ix) to iy)
5680 // P3: An Equal predicate that guarantees that
5681 // Accum = (Ext ix (Trunc iy (Accum) to ix) to iy)
5682 //
5683 // As we next prove, the above predicates guarantee that:
5684 // Start + i*Accum = (Ext ix (Trunc iy ( Start + i*Accum ) to ix) to iy)
5685 //
5686 //
5687 // More formally, we want to prove that:
5688 // Expr(i+1) = Start + (i+1) * Accum
5689 // = (Ext ix (Trunc iy (Expr(i)) to ix) to iy) + Accum
5690 //
5691 // Given that:
5692 // 1) Expr(0) = Start
5693 // 2) Expr(1) = Start + Accum
5694 // = (Ext ix (Trunc iy (Start) to ix) to iy) + Accum :: from P2
5695 // 3) Induction hypothesis (step i):
5696 // Expr(i) = (Ext ix (Trunc iy (Expr(i-1)) to ix) to iy) + Accum
5697 //
5698 // Proof:
5699 // Expr(i+1) =
5700 // = Start + (i+1)*Accum
5701 // = (Start + i*Accum) + Accum
5702 // = Expr(i) + Accum
5703 // = (Ext ix (Trunc iy (Expr(i-1)) to ix) to iy) + Accum + Accum
5704 // :: from step i
5705 //
5706 // = (Ext ix (Trunc iy (Start + (i-1)*Accum) to ix) to iy) + Accum + Accum
5707 //
5708 // = (Ext ix (Trunc iy (Start + (i-1)*Accum) to ix) to iy)
5709 // + (Ext ix (Trunc iy (Accum) to ix) to iy)
5710 // + Accum :: from P3
5711 //
5712 // = (Ext ix (Trunc iy ((Start + (i-1)*Accum) + Accum) to ix) to iy)
5713 // + Accum :: from P1: Ext(x)+Ext(y)=>Ext(x+y)
5714 //
5715 // = (Ext ix (Trunc iy (Start + i*Accum) to ix) to iy) + Accum
5716 // = (Ext ix (Trunc iy (Expr(i)) to ix) to iy) + Accum
5717 //
5718 // By induction, the same applies to all iterations 1<=i<n:
5719 //
5720
5721 // Create a truncated addrec for which we will add a no overflow check (P1).
5722 const SCEV *StartVal = getSCEV(StartValueV);
5723 const SCEV *PHISCEV =
5724 getAddRecExpr(getTruncateExpr(StartVal, TruncTy),
5725 getTruncateExpr(Accum, TruncTy), L, SCEV::FlagAnyWrap);
5726
5727 // PHISCEV can be either a SCEVConstant or a SCEVAddRecExpr.
5728 // ex: If truncated Accum is 0 and StartVal is a constant, then PHISCEV
5729 // will be constant.
5730 //
5731 // If PHISCEV is a constant, then P1 degenerates into P2 or P3, so we don't
5732 // add P1.
5733 if (const auto *AR = dyn_cast<SCEVAddRecExpr>(PHISCEV)) {
5737 const SCEVPredicate *AddRecPred = getWrapPredicate(AR, AddedFlags);
5738 Predicates.push_back(AddRecPred);
5739 }
5740
5741 // Create the Equal Predicates P2,P3:
5742
5743 // It is possible that the predicates P2 and/or P3 are computable at
5744 // compile time due to StartVal and/or Accum being constants.
5745 // If either one is, then we can check that now and escape if either P2
5746 // or P3 is false.
5747
5748 // Construct the extended SCEV: (Ext ix (Trunc iy (Expr) to ix) to iy)
5749 // for each of StartVal and Accum
5750 auto getExtendedExpr = [&](const SCEV *Expr,
5751 bool CreateSignExtend) -> const SCEV * {
5752 assert(isLoopInvariant(Expr, L) && "Expr is expected to be invariant");
5753 const SCEV *TruncatedExpr = getTruncateExpr(Expr, TruncTy);
5754 const SCEV *ExtendedExpr =
5755 CreateSignExtend ? getSignExtendExpr(TruncatedExpr, Expr->getType())
5756 : getZeroExtendExpr(TruncatedExpr, Expr->getType());
5757 return ExtendedExpr;
5758 };
5759
5760 // Given:
5761 // ExtendedExpr = (Ext ix (Trunc iy (Expr) to ix) to iy
5762 // = getExtendedExpr(Expr)
5763 // Determine whether the predicate P: Expr == ExtendedExpr
5764 // is known to be false at compile time
5765 auto PredIsKnownFalse = [&](const SCEV *Expr,
5766 const SCEV *ExtendedExpr) -> bool {
5767 return Expr != ExtendedExpr &&
5768 isKnownPredicate(ICmpInst::ICMP_NE, Expr, ExtendedExpr);
5769 };
5770
5771 const SCEV *StartExtended = getExtendedExpr(StartVal, Signed);
5772 if (PredIsKnownFalse(StartVal, StartExtended)) {
5773 LLVM_DEBUG(dbgs() << "P2 is compile-time false\n";);
5774 return std::nullopt;
5775 }
5776
5777 // The Step is always Signed (because the overflow checks are either
5778 // NSSW or NUSW)
5779 const SCEV *AccumExtended = getExtendedExpr(Accum, /*CreateSignExtend=*/true);
5780 if (PredIsKnownFalse(Accum, AccumExtended)) {
5781 LLVM_DEBUG(dbgs() << "P3 is compile-time false\n";);
5782 return std::nullopt;
5783 }
5784
5785 auto AppendPredicate = [&](const SCEV *Expr,
5786 const SCEV *ExtendedExpr) -> void {
5787 if (Expr != ExtendedExpr &&
5788 !isKnownPredicate(ICmpInst::ICMP_EQ, Expr, ExtendedExpr)) {
5789 const SCEVPredicate *Pred = getEqualPredicate(Expr, ExtendedExpr);
5790 LLVM_DEBUG(dbgs() << "Added Predicate: " << *Pred);
5791 Predicates.push_back(Pred);
5792 }
5793 };
5794
5795 AppendPredicate(StartVal, StartExtended);
5796 AppendPredicate(Accum, AccumExtended);
5797
5798 // *** Part3: Predicates are ready. Now go ahead and create the new addrec in
5799 // which the casts had been folded away. The caller can rewrite SymbolicPHI
5800 // into NewAR if it will also add the runtime overflow checks specified in
5801 // Predicates.
5802 auto *NewAR = getAddRecExpr(StartVal, Accum, L, SCEV::FlagAnyWrap);
5803
5804 std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>> PredRewrite =
5805 std::make_pair(NewAR, Predicates);
5806 // Remember the result of the analysis for this SCEV at this locayyytion.
5807 PredicatedSCEVRewrites[{SymbolicPHI, L}] = PredRewrite;
5808 return PredRewrite;
5809}
5810
5811std::optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>>
5813 auto *PN = cast<PHINode>(SymbolicPHI->getValue());
5814 const Loop *L = isIntegerLoopHeaderPHI(PN, LI);
5815 if (!L)
5816 return std::nullopt;
5817
5818 // Check to see if we already analyzed this PHI.
5819 auto I = PredicatedSCEVRewrites.find({SymbolicPHI, L});
5820 if (I != PredicatedSCEVRewrites.end()) {
5821 std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>> Rewrite =
5822 I->second;
5823 // Analysis was done before and failed to create an AddRec:
5824 if (Rewrite.first == SymbolicPHI)
5825 return std::nullopt;
5826 // Analysis was done before and succeeded to create an AddRec under
5827 // a predicate:
5828 assert(isa<SCEVAddRecExpr>(Rewrite.first) && "Expected an AddRec");
5829 assert(!(Rewrite.second).empty() && "Expected to find Predicates");
5830 return Rewrite;
5831 }
5832
5833 std::optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>>
5834 Rewrite = createAddRecFromPHIWithCastsImpl(SymbolicPHI);
5835
5836 // Record in the cache that the analysis failed
5837 if (!Rewrite) {
5839 PredicatedSCEVRewrites[{SymbolicPHI, L}] = {SymbolicPHI, Predicates};
5840 return std::nullopt;
5841 }
5842
5843 return Rewrite;
5844}
5845
5846// FIXME: This utility is currently required because the Rewriter currently
5847// does not rewrite this expression:
5848// {0, +, (sext ix (trunc iy to ix) to iy)}
5849// into {0, +, %step},
5850// even when the following Equal predicate exists:
5851// "%step == (sext ix (trunc iy to ix) to iy)".
5853 const SCEVAddRecExpr *AR1, const SCEVAddRecExpr *AR2) const {
5854 if (AR1 == AR2)
5855 return true;
5856
5857 auto areExprsEqual = [&](const SCEV *Expr1, const SCEV *Expr2) -> bool {
5858 if (Expr1 != Expr2 &&
5859 !Preds->implies(SE.getEqualPredicate(Expr1, Expr2), SE) &&
5860 !Preds->implies(SE.getEqualPredicate(Expr2, Expr1), SE))
5861 return false;
5862 return true;
5863 };
5864
5865 if (!areExprsEqual(AR1->getStart(), AR2->getStart()) ||
5866 !areExprsEqual(AR1->getStepRecurrence(SE), AR2->getStepRecurrence(SE)))
5867 return false;
5868 return true;
5869}
5870
5871/// A helper function for createAddRecFromPHI to handle simple cases.
5872///
5873/// This function tries to find an AddRec expression for the simplest (yet most
5874/// common) cases: PN = PHI(Start, OP(Self, LoopInvariant)).
5875/// If it fails, createAddRecFromPHI will use a more general, but slow,
5876/// technique for finding the AddRec expression.
5877const SCEV *ScalarEvolution::createSimpleAffineAddRec(PHINode *PN,
5878 Value *BEValueV,
5879 Value *StartValueV) {
5880 const Loop *L = LI.getLoopFor(PN->getParent());
5881 assert(L && L->getHeader() == PN->getParent());
5882 assert(BEValueV && StartValueV);
5883
5884 auto BO = MatchBinaryOp(BEValueV, getDataLayout(), AC, DT, PN);
5885 if (!BO)
5886 return nullptr;
5887
5888 if (BO->Opcode != Instruction::Add)
5889 return nullptr;
5890
5891 const SCEV *Accum = nullptr;
5892 if (BO->LHS == PN && L->isLoopInvariant(BO->RHS))
5893 Accum = getSCEV(BO->RHS);
5894 else if (BO->RHS == PN && L->isLoopInvariant(BO->LHS))
5895 Accum = getSCEV(BO->LHS);
5896
5897 if (!Accum)
5898 return nullptr;
5899
5901 if (BO->IsNUW)
5902 Flags = setFlags(Flags, SCEV::FlagNUW);
5903 if (BO->IsNSW)
5904 Flags = setFlags(Flags, SCEV::FlagNSW);
5905
5906 const SCEV *StartVal = getSCEV(StartValueV);
5907 const SCEV *PHISCEV = getAddRecExpr(StartVal, Accum, L, Flags);
5908 insertValueToMap(PN, PHISCEV);
5909
5910 if (auto *AR = dyn_cast<SCEVAddRecExpr>(PHISCEV)) {
5911 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR),
5912 (AR->getNoWrapFlags() | proveNoWrapViaConstantRanges(AR)));
5913 }
5914
5915 // We can add Flags to the post-inc expression only if we
5916 // know that it is *undefined behavior* for BEValueV to
5917 // overflow.
5918 if (auto *BEInst = dyn_cast<Instruction>(BEValueV)) {
5919 assert(isLoopInvariant(Accum, L) &&
5920 "Accum is defined outside L, but is not invariant?");
5921 if (isAddRecNeverPoison(BEInst, L))
5922 (void)getAddRecExpr(getAddExpr(StartVal, Accum), Accum, L, Flags);
5923 }
5924
5925 return PHISCEV;
5926}
5927
5928const SCEV *ScalarEvolution::createAddRecFromPHI(PHINode *PN) {
5929 const Loop *L = LI.getLoopFor(PN->getParent());
5930 if (!L || L->getHeader() != PN->getParent())
5931 return nullptr;
5932
5933 // The loop may have multiple entrances or multiple exits; we can analyze
5934 // this phi as an addrec if it has a unique entry value and a unique
5935 // backedge value.
5936 Value *BEValueV = nullptr, *StartValueV = nullptr;
5937 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
5938 Value *V = PN->getIncomingValue(i);
5939 if (L->contains(PN->getIncomingBlock(i))) {
5940 if (!BEValueV) {
5941 BEValueV = V;
5942 } else if (BEValueV != V) {
5943 BEValueV = nullptr;
5944 break;
5945 }
5946 } else if (!StartValueV) {
5947 StartValueV = V;
5948 } else if (StartValueV != V) {
5949 StartValueV = nullptr;
5950 break;
5951 }
5952 }
5953 if (!BEValueV || !StartValueV)
5954 return nullptr;
5955
5956 assert(ValueExprMap.find_as(PN) == ValueExprMap.end() &&
5957 "PHI node already processed?");
5958
5959 // First, try to find AddRec expression without creating a fictituos symbolic
5960 // value for PN.
5961 if (auto *S = createSimpleAffineAddRec(PN, BEValueV, StartValueV))
5962 return S;
5963
5964 // Handle PHI node value symbolically.
5965 const SCEV *SymbolicName = getUnknown(PN);
5966 insertValueToMap(PN, SymbolicName);
5967
5968 // Using this symbolic name for the PHI, analyze the value coming around
5969 // the back-edge.
5970 const SCEV *BEValue = getSCEV(BEValueV);
5971
5972 // NOTE: If BEValue is loop invariant, we know that the PHI node just
5973 // has a special value for the first iteration of the loop.
5974
5975 // If the value coming around the backedge is an add with the symbolic
5976 // value we just inserted, then we found a simple induction variable!
5977 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(BEValue)) {
5978 // If there is a single occurrence of the symbolic value, replace it
5979 // with a recurrence.
5980 unsigned FoundIndex = Add->getNumOperands();
5981 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i)
5982 if (Add->getOperand(i) == SymbolicName)
5983 if (FoundIndex == e) {
5984 FoundIndex = i;
5985 break;
5986 }
5987
5988 if (FoundIndex != Add->getNumOperands()) {
5989 // Create an add with everything but the specified operand.
5991 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i)
5992 if (i != FoundIndex)
5993 Ops.push_back(SCEVBackedgeConditionFolder::rewrite(Add->getOperand(i),
5994 L, *this));
5995 const SCEV *Accum = getAddExpr(Ops);
5996
5997 // This is not a valid addrec if the step amount is varying each
5998 // loop iteration, but is not itself an addrec in this loop.
5999 if (isLoopInvariant(Accum, L) ||
6000 (isa<SCEVAddRecExpr>(Accum) &&
6001 cast<SCEVAddRecExpr>(Accum)->getLoop() == L)) {
6003
6004 if (auto BO = MatchBinaryOp(BEValueV, getDataLayout(), AC, DT, PN)) {
6005 if (BO->Opcode == Instruction::Add && BO->LHS == PN) {
6006 if (BO->IsNUW)
6007 Flags = setFlags(Flags, SCEV::FlagNUW);
6008 if (BO->IsNSW)
6009 Flags = setFlags(Flags, SCEV::FlagNSW);
6010 }
6011 } else if (GEPOperator *GEP = dyn_cast<GEPOperator>(BEValueV)) {
6012 if (GEP->getOperand(0) == PN) {
6013 GEPNoWrapFlags NW = GEP->getNoWrapFlags();
6014 // If the increment has any nowrap flags, then we know the address
6015 // space cannot be wrapped around.
6016 if (NW != GEPNoWrapFlags::none())
6017 Flags = setFlags(Flags, SCEV::FlagNW);
6018 // If the GEP is nuw or nusw with non-negative offset, we know that
6019 // no unsigned wrap occurs. We cannot set the nsw flag as only the
6020 // offset is treated as signed, while the base is unsigned.
6021 if (NW.hasNoUnsignedWrap() ||
6023 Flags = setFlags(Flags, SCEV::FlagNUW);
6024 }
6025
6026 // We cannot transfer nuw and nsw flags from subtraction
6027 // operations -- sub nuw X, Y is not the same as add nuw X, -Y
6028 // for instance.
6029 }
6030
6031 const SCEV *StartVal = getSCEV(StartValueV);
6032 const SCEV *PHISCEV = getAddRecExpr(StartVal, Accum, L, Flags);
6033
6034 // Okay, for the entire analysis of this edge we assumed the PHI
6035 // to be symbolic. We now need to go back and purge all of the
6036 // entries for the scalars that use the symbolic expression.
6037 forgetMemoizedResults({SymbolicName});
6038 insertValueToMap(PN, PHISCEV);
6039
6040 if (auto *AR = dyn_cast<SCEVAddRecExpr>(PHISCEV)) {
6042 const_cast<SCEVAddRecExpr *>(AR),
6043 (AR->getNoWrapFlags() | proveNoWrapViaConstantRanges(AR)));
6044 }
6045
6046 // We can add Flags to the post-inc expression only if we
6047 // know that it is *undefined behavior* for BEValueV to
6048 // overflow.
6049 if (auto *BEInst = dyn_cast<Instruction>(BEValueV))
6050 if (isLoopInvariant(Accum, L) && isAddRecNeverPoison(BEInst, L))
6051 (void)getAddRecExpr(getAddExpr(StartVal, Accum), Accum, L, Flags);
6052
6053 return PHISCEV;
6054 }
6055 }
6056 } else {
6057 // Otherwise, this could be a loop like this:
6058 // i = 0; for (j = 1; ..; ++j) { .... i = j; }
6059 // In this case, j = {1,+,1} and BEValue is j.
6060 // Because the other in-value of i (0) fits the evolution of BEValue
6061 // i really is an addrec evolution.
6062 //
6063 // We can generalize this saying that i is the shifted value of BEValue
6064 // by one iteration:
6065 // PHI(f(0), f({1,+,1})) --> f({0,+,1})
6066
6067 // Do not allow refinement in rewriting of BEValue.
6068 const SCEV *Shifted = SCEVShiftRewriter::rewrite(BEValue, L, *this);
6069 const SCEV *Start = SCEVInitRewriter::rewrite(Shifted, L, *this, false);
6070 if (Shifted != getCouldNotCompute() && Start != getCouldNotCompute() &&
6071 isGuaranteedNotToCauseUB(Shifted) && ::impliesPoison(Shifted, Start)) {
6072 const SCEV *StartVal = getSCEV(StartValueV);
6073 if (Start == StartVal) {
6074 // Okay, for the entire analysis of this edge we assumed the PHI
6075 // to be symbolic. We now need to go back and purge all of the
6076 // entries for the scalars that use the symbolic expression.
6077 forgetMemoizedResults({SymbolicName});
6078 insertValueToMap(PN, Shifted);
6079 return Shifted;
6080 }
6081 }
6082 }
6083
6084 // Remove the temporary PHI node SCEV that has been inserted while intending
6085 // to create an AddRecExpr for this PHI node. We can not keep this temporary
6086 // as it will prevent later (possibly simpler) SCEV expressions to be added
6087 // to the ValueExprMap.
6088 eraseValueFromMap(PN);
6089
6090 return nullptr;
6091}
6092
6093// Try to match a control flow sequence that branches out at BI and merges back
6094// at Merge into a "C ? LHS : RHS" select pattern. Return true on a successful
6095// match.
6097 Value *&C, Value *&LHS, Value *&RHS) {
6098 C = BI->getCondition();
6099
6100 BasicBlockEdge LeftEdge(BI->getParent(), BI->getSuccessor(0));
6101 BasicBlockEdge RightEdge(BI->getParent(), BI->getSuccessor(1));
6102
6103 Use &LeftUse = Merge->getOperandUse(0);
6104 Use &RightUse = Merge->getOperandUse(1);
6105
6106 if (DT.dominates(LeftEdge, LeftUse) && DT.dominates(RightEdge, RightUse)) {
6107 LHS = LeftUse;
6108 RHS = RightUse;
6109 return true;
6110 }
6111
6112 if (DT.dominates(LeftEdge, RightUse) && DT.dominates(RightEdge, LeftUse)) {
6113 LHS = RightUse;
6114 RHS = LeftUse;
6115 return true;
6116 }
6117
6118 return false;
6119}
6120
6122 Value *&Cond, Value *&LHS,
6123 Value *&RHS) {
6124 auto IsReachable =
6125 [&](BasicBlock *BB) { return DT.isReachableFromEntry(BB); };
6126 if (PN->getNumIncomingValues() == 2 && all_of(PN->blocks(), IsReachable)) {
6127 // Try to match
6128 //
6129 // br %cond, label %left, label %right
6130 // left:
6131 // br label %merge
6132 // right:
6133 // br label %merge
6134 // merge:
6135 // V = phi [ %x, %left ], [ %y, %right ]
6136 //
6137 // as "select %cond, %x, %y"
6138
6139 BasicBlock *IDom = DT[PN->getParent()]->getIDom()->getBlock();
6140 assert(IDom && "At least the entry block should dominate PN");
6141
6142 auto *BI = dyn_cast<CondBrInst>(IDom->getTerminator());
6143 return BI && BrPHIToSelect(DT, BI, PN, Cond, LHS, RHS);
6144 }
6145 return false;
6146}
6147
6148const SCEV *ScalarEvolution::createNodeFromSelectLikePHI(PHINode *PN) {
6149 Value *Cond = nullptr, *LHS = nullptr, *RHS = nullptr;
6150 if (getOperandsForSelectLikePHI(DT, PN, Cond, LHS, RHS) &&
6153 return createNodeForSelectOrPHI(PN, Cond, LHS, RHS);
6154
6155 return nullptr;
6156}
6157
6159 BinaryOperator *CommonInst = nullptr;
6160 // Check if instructions are identical.
6161 for (Value *Incoming : PN->incoming_values()) {
6162 auto *IncomingInst = dyn_cast<BinaryOperator>(Incoming);
6163 if (!IncomingInst)
6164 return nullptr;
6165 if (CommonInst) {
6166 if (!CommonInst->isIdenticalToWhenDefined(IncomingInst))
6167 return nullptr; // Not identical, give up
6168 } else {
6169 // Remember binary operator
6170 CommonInst = IncomingInst;
6171 }
6172 }
6173 return CommonInst;
6174}
6175
6176/// Returns SCEV for the first operand of a phi if all phi operands have
6177/// identical opcodes and operands
6178/// eg.
6179/// a: %add = %a + %b
6180/// br %c
6181/// b: %add1 = %a + %b
6182/// br %c
6183/// c: %phi = phi [%add, a], [%add1, b]
6184/// scev(%phi) => scev(%add)
6185const SCEV *
6186ScalarEvolution::createNodeForPHIWithIdenticalOperands(PHINode *PN) {
6187 BinaryOperator *CommonInst = getCommonInstForPHI(PN);
6188 if (!CommonInst)
6189 return nullptr;
6190
6191 // Check if SCEV exprs for instructions are identical.
6192 const SCEV *CommonSCEV = getSCEV(CommonInst);
6193 bool SCEVExprsIdentical =
6195 [this, CommonSCEV](Value *V) { return CommonSCEV == getSCEV(V); });
6196 return SCEVExprsIdentical ? CommonSCEV : nullptr;
6197}
6198
6199const SCEV *ScalarEvolution::createNodeForPHI(PHINode *PN) {
6200 if (const SCEV *S = createAddRecFromPHI(PN))
6201 return S;
6202
6203 // We do not allow simplifying phi (undef, X) to X here, to avoid reusing the
6204 // phi node for X.
6205 if (Value *V = simplifyInstruction(
6206 PN, {getDataLayout(), &TLI, &DT, &AC, /*CtxI=*/nullptr,
6207 /*UseInstrInfo=*/true, /*CanUseUndef=*/false}))
6208 return getSCEV(V);
6209
6210 if (const SCEV *S = createNodeForPHIWithIdenticalOperands(PN))
6211 return S;
6212
6213 if (const SCEV *S = createNodeFromSelectLikePHI(PN))
6214 return S;
6215
6216 // If it's not a loop phi, we can't handle it yet.
6217 return getUnknown(PN);
6218}
6219
6220bool SCEVMinMaxExprContains(const SCEV *Root, const SCEV *OperandToFind,
6221 SCEVTypes RootKind) {
6222 struct FindClosure {
6223 const SCEV *OperandToFind;
6224 const SCEVTypes RootKind; // Must be a sequential min/max expression.
6225 const SCEVTypes NonSequentialRootKind; // Non-seq variant of RootKind.
6226
6227 bool Found = false;
6228
6229 bool canRecurseInto(SCEVTypes Kind) const {
6230 // We can only recurse into the SCEV expression of the same effective type
6231 // as the type of our root SCEV expression, and into zero-extensions.
6232 return RootKind == Kind || NonSequentialRootKind == Kind ||
6233 scZeroExtend == Kind;
6234 };
6235
6236 FindClosure(const SCEV *OperandToFind, SCEVTypes RootKind)
6237 : OperandToFind(OperandToFind), RootKind(RootKind),
6238 NonSequentialRootKind(
6240 RootKind)) {}
6241
6242 bool follow(const SCEV *S) {
6243 Found = S == OperandToFind;
6244
6245 return !isDone() && canRecurseInto(S->getSCEVType());
6246 }
6247
6248 bool isDone() const { return Found; }
6249 };
6250
6251 FindClosure FC(OperandToFind, RootKind);
6252 visitAll(Root, FC);
6253 return FC.Found;
6254}
6255
6256std::optional<const SCEV *>
6257ScalarEvolution::createNodeForSelectOrPHIInstWithICmpInstCond(Type *Ty,
6258 ICmpInst *Cond,
6259 Value *TrueVal,
6260 Value *FalseVal) {
6261 // Try to match some simple smax or umax patterns.
6262 auto *ICI = Cond;
6263
6264 Value *LHS = ICI->getOperand(0);
6265 Value *RHS = ICI->getOperand(1);
6266
6267 switch (ICI->getPredicate()) {
6268 case ICmpInst::ICMP_SLT:
6269 case ICmpInst::ICMP_SLE:
6270 case ICmpInst::ICMP_ULT:
6271 case ICmpInst::ICMP_ULE:
6272 std::swap(LHS, RHS);
6273 [[fallthrough]];
6274 case ICmpInst::ICMP_SGT:
6275 case ICmpInst::ICMP_SGE:
6276 case ICmpInst::ICMP_UGT:
6277 case ICmpInst::ICMP_UGE:
6278 // a > b ? a+x : b+x -> max(a, b)+x
6279 // a > b ? b+x : a+x -> min(a, b)+x
6281 bool Signed = ICI->isSigned();
6282 const SCEV *LA = getSCEV(TrueVal);
6283 const SCEV *RA = getSCEV(FalseVal);
6284 const SCEV *LS = getSCEV(LHS);
6285 const SCEV *RS = getSCEV(RHS);
6286 if (LA->getType()->isPointerTy()) {
6287 // FIXME: Handle cases where LS/RS are pointers not equal to LA/RA.
6288 // Need to make sure we can't produce weird expressions involving
6289 // negated pointers.
6290 if (LA == LS && RA == RS)
6291 return Signed ? getSMaxExpr(LS, RS) : getUMaxExpr(LS, RS);
6292 if (LA == RS && RA == LS)
6293 return Signed ? getSMinExpr(LS, RS) : getUMinExpr(LS, RS);
6294 }
6295 auto CoerceOperand = [&](const SCEV *Op) -> const SCEV * {
6296 if (Op->getType()->isPointerTy()) {
6299 return Op;
6300 }
6301 if (Signed)
6302 Op = getNoopOrSignExtend(Op, Ty);
6303 else
6304 Op = getNoopOrZeroExtend(Op, Ty);
6305 return Op;
6306 };
6307 LS = CoerceOperand(LS);
6308 RS = CoerceOperand(RS);
6310 break;
6311 const SCEV *LDiff = getMinusSCEV(LA, LS);
6312 const SCEV *RDiff = getMinusSCEV(RA, RS);
6313 if (LDiff == RDiff)
6314 return getAddExpr(Signed ? getSMaxExpr(LS, RS) : getUMaxExpr(LS, RS),
6315 LDiff);
6316 LDiff = getMinusSCEV(LA, RS);
6317 RDiff = getMinusSCEV(RA, LS);
6318 if (LDiff == RDiff)
6319 return getAddExpr(Signed ? getSMinExpr(LS, RS) : getUMinExpr(LS, RS),
6320 LDiff);
6321 }
6322 break;
6323 case ICmpInst::ICMP_NE:
6324 // x != 0 ? x+y : C+y -> x == 0 ? C+y : x+y
6325 std::swap(TrueVal, FalseVal);
6326 [[fallthrough]];
6327 case ICmpInst::ICMP_EQ:
6328 // x == 0 ? C+y : x+y -> umax(x, C)+y iff C u<= 1
6331 const SCEV *X = getNoopOrZeroExtend(getSCEV(LHS), Ty);
6332 const SCEV *TrueValExpr = getSCEV(TrueVal); // C+y
6333 const SCEV *FalseValExpr = getSCEV(FalseVal); // x+y
6334 const SCEV *Y = getMinusSCEV(FalseValExpr, X); // y = (x+y)-x
6335 const SCEV *C = getMinusSCEV(TrueValExpr, Y); // C = (C+y)-y
6336 if (isa<SCEVConstant>(C) && cast<SCEVConstant>(C)->getAPInt().ule(1))
6337 return getAddExpr(getUMaxExpr(X, C), Y);
6338 }
6339 // x == 0 ? 0 : umin (..., x, ...) -> umin_seq(x, umin (...))
6340 // x == 0 ? 0 : umin_seq(..., x, ...) -> umin_seq(x, umin_seq(...))
6341 // x == 0 ? 0 : umin (..., umin_seq(..., x, ...), ...)
6342 // -> umin_seq(x, umin (..., umin_seq(...), ...))
6344 isa<ConstantInt>(TrueVal) && cast<ConstantInt>(TrueVal)->isZero()) {
6345 const SCEV *X = getSCEV(LHS);
6346 while (auto *ZExt = dyn_cast<SCEVZeroExtendExpr>(X))
6347 X = ZExt->getOperand();
6348 if (getTypeSizeInBits(X->getType()) <= getTypeSizeInBits(Ty)) {
6349 const SCEV *FalseValExpr = getSCEV(FalseVal);
6350 if (SCEVMinMaxExprContains(FalseValExpr, X, scSequentialUMinExpr))
6351 return getUMinExpr(getNoopOrZeroExtend(X, Ty), FalseValExpr,
6352 /*Sequential=*/true);
6353 }
6354 }
6355 break;
6356 default:
6357 break;
6358 }
6359
6360 return std::nullopt;
6361}
6362
6363static std::optional<const SCEV *>
6365 const SCEV *TrueExpr, const SCEV *FalseExpr) {
6366 assert(CondExpr->getType()->isIntegerTy(1) &&
6367 TrueExpr->getType() == FalseExpr->getType() &&
6368 TrueExpr->getType()->isIntegerTy(1) &&
6369 "Unexpected operands of a select.");
6370
6371 // i1 cond ? i1 x : i1 C --> C + (i1 cond ? (i1 x - i1 C) : i1 0)
6372 // --> C + (umin_seq cond, x - C)
6373 //
6374 // i1 cond ? i1 C : i1 x --> C + (i1 cond ? i1 0 : (i1 x - i1 C))
6375 // --> C + (i1 ~cond ? (i1 x - i1 C) : i1 0)
6376 // --> C + (umin_seq ~cond, x - C)
6377
6378 // FIXME: while we can't legally model the case where both of the hands
6379 // are fully variable, we only require that the *difference* is constant.
6380 if (!isa<SCEVConstant>(TrueExpr) && !isa<SCEVConstant>(FalseExpr))
6381 return std::nullopt;
6382
6383 const SCEV *X, *C;
6384 if (isa<SCEVConstant>(TrueExpr)) {
6385 CondExpr = SE->getNotSCEV(CondExpr);
6386 X = FalseExpr;
6387 C = TrueExpr;
6388 } else {
6389 X = TrueExpr;
6390 C = FalseExpr;
6391 }
6392 return SE->getAddExpr(C, SE->getUMinExpr(CondExpr, SE->getMinusSCEV(X, C),
6393 /*Sequential=*/true));
6394}
6395
6396static std::optional<const SCEV *>
6398 Value *FalseVal) {
6399 if (!isa<ConstantInt>(TrueVal) && !isa<ConstantInt>(FalseVal))
6400 return std::nullopt;
6401
6402 const auto *SECond = SE->getSCEV(Cond);
6403 const auto *SETrue = SE->getSCEV(TrueVal);
6404 const auto *SEFalse = SE->getSCEV(FalseVal);
6405 return createNodeForSelectViaUMinSeq(SE, SECond, SETrue, SEFalse);
6406}
6407
6408const SCEV *ScalarEvolution::createNodeForSelectOrPHIViaUMinSeq(
6409 Value *V, Value *Cond, Value *TrueVal, Value *FalseVal) {
6410 assert(Cond->getType()->isIntegerTy(1) && "Select condition is not an i1?");
6411 assert(TrueVal->getType() == FalseVal->getType() &&
6412 V->getType() == TrueVal->getType() &&
6413 "Types of select hands and of the result must match.");
6414
6415 // For now, only deal with i1-typed `select`s.
6416 if (!V->getType()->isIntegerTy(1))
6417 return getUnknown(V);
6418
6419 if (std::optional<const SCEV *> S =
6420 createNodeForSelectViaUMinSeq(this, Cond, TrueVal, FalseVal))
6421 return *S;
6422
6423 return getUnknown(V);
6424}
6425
6426const SCEV *ScalarEvolution::createNodeForSelectOrPHI(Value *V, Value *Cond,
6427 Value *TrueVal,
6428 Value *FalseVal) {
6429 // Handle "constant" branch or select. This can occur for instance when a
6430 // loop pass transforms an inner loop and moves on to process the outer loop.
6431 if (auto *CI = dyn_cast<ConstantInt>(Cond))
6432 return getSCEV(CI->isOne() ? TrueVal : FalseVal);
6433
6434 if (auto *I = dyn_cast<Instruction>(V)) {
6435 if (auto *ICI = dyn_cast<ICmpInst>(Cond)) {
6436 if (std::optional<const SCEV *> S =
6437 createNodeForSelectOrPHIInstWithICmpInstCond(I->getType(), ICI,
6438 TrueVal, FalseVal))
6439 return *S;
6440 }
6441 }
6442
6443 return createNodeForSelectOrPHIViaUMinSeq(V, Cond, TrueVal, FalseVal);
6444}
6445
6446/// Expand GEP instructions into add and multiply operations. This allows them
6447/// to be analyzed by regular SCEV code.
6448const SCEV *ScalarEvolution::createNodeForGEP(GEPOperator *GEP) {
6449 assert(GEP->getSourceElementType()->isSized() &&
6450 "GEP source element type must be sized");
6451
6452 SmallVector<SCEVUse, 4> IndexExprs;
6453 for (Value *Index : GEP->indices())
6454 IndexExprs.push_back(getSCEV(Index));
6455 return getGEPExpr(GEP, IndexExprs);
6456}
6457
6458APInt ScalarEvolution::getConstantMultipleImpl(const SCEV *S,
6459 const Instruction *CtxI) {
6460 uint64_t BitWidth = getTypeSizeInBits(S->getType());
6461 auto GetShiftedByZeros = [BitWidth](uint32_t TrailingZeros) {
6462 return TrailingZeros >= BitWidth
6464 : APInt::getOneBitSet(BitWidth, TrailingZeros);
6465 };
6466 auto GetGCDMultiple = [this, CtxI](const SCEVNAryExpr *N) {
6467 // The result is GCD of all operands results.
6468 APInt Res = getConstantMultiple(N->getOperand(0), CtxI);
6469 for (unsigned I = 1, E = N->getNumOperands(); I < E && Res != 1; ++I)
6471 Res, getConstantMultiple(N->getOperand(I), CtxI));
6472 return Res;
6473 };
6474
6475 switch (S->getSCEVType()) {
6476 case scConstant:
6477 return cast<SCEVConstant>(S)->getAPInt();
6478 case scPtrToAddr:
6479 case scPtrToInt:
6480 return getConstantMultiple(cast<SCEVCastExpr>(S)->getOperand());
6481 case scUDivExpr:
6482 case scVScale:
6483 return APInt(BitWidth, 1);
6484 case scTruncate: {
6485 // Only multiples that are a power of 2 will hold after truncation.
6486 const SCEVTruncateExpr *T = cast<SCEVTruncateExpr>(S);
6487 uint32_t TZ = getMinTrailingZeros(T->getOperand(), CtxI);
6488 return GetShiftedByZeros(TZ);
6489 }
6490 case scZeroExtend: {
6491 const SCEVZeroExtendExpr *Z = cast<SCEVZeroExtendExpr>(S);
6492 return getConstantMultiple(Z->getOperand(), CtxI).zext(BitWidth);
6493 }
6494 case scSignExtend: {
6495 // Only multiples that are a power of 2 will hold after sext.
6496 const SCEVSignExtendExpr *E = cast<SCEVSignExtendExpr>(S);
6497 uint32_t TZ = getMinTrailingZeros(E->getOperand(), CtxI);
6498 return GetShiftedByZeros(TZ);
6499 }
6500 case scMulExpr: {
6501 const SCEVMulExpr *M = cast<SCEVMulExpr>(S);
6502 if (M->hasNoUnsignedWrap()) {
6503 // The result is the product of all operand results.
6504 APInt Res = getConstantMultiple(M->getOperand(0), CtxI);
6505 for (const SCEV *Operand : M->operands().drop_front())
6506 Res = Res * getConstantMultiple(Operand, CtxI);
6507 return Res;
6508 }
6509
6510 // If there are no wrap guarentees, find the trailing zeros, which is the
6511 // sum of trailing zeros for all its operands.
6512 uint32_t TZ = 0;
6513 for (const SCEV *Operand : M->operands())
6514 TZ += getMinTrailingZeros(Operand, CtxI);
6515 return GetShiftedByZeros(TZ);
6516 }
6517 case scAddExpr:
6518 case scAddRecExpr: {
6519 const SCEVNAryExpr *N = cast<SCEVNAryExpr>(S);
6520 if (N->hasNoUnsignedWrap())
6521 return GetGCDMultiple(N);
6522 // Find the trailing bits, which is the minimum of its operands.
6523 uint32_t TZ = getMinTrailingZeros(N->getOperand(0), CtxI);
6524 for (const SCEV *Operand : N->operands().drop_front())
6525 TZ = std::min(TZ, getMinTrailingZeros(Operand, CtxI));
6526 return GetShiftedByZeros(TZ);
6527 }
6528 case scUMaxExpr:
6529 case scSMaxExpr:
6530 case scUMinExpr:
6531 case scSMinExpr:
6533 return GetGCDMultiple(cast<SCEVNAryExpr>(S));
6534 case scUnknown: {
6535 // Ask ValueTracking for known bits. SCEVUnknown only become available at
6536 // the point their underlying IR instruction has been defined. If CtxI was
6537 // not provided, use:
6538 // * the first instruction in the entry block if it is an argument
6539 // * the instruction itself otherwise.
6540 const SCEVUnknown *U = cast<SCEVUnknown>(S);
6541 if (!CtxI) {
6542 if (isa<Argument>(U->getValue()))
6543 CtxI = &*F.getEntryBlock().begin();
6544 else if (auto *I = dyn_cast<Instruction>(U->getValue()))
6545 CtxI = I;
6546 }
6547 unsigned Known =
6548 computeKnownBits(U->getValue(),
6549 SimplifyQuery(getDataLayout(), &DT, &AC, CtxI)
6550 .allowEphemerals(true))
6551 .countMinTrailingZeros();
6552 return GetShiftedByZeros(Known);
6553 }
6554 case scCouldNotCompute:
6555 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
6556 }
6557 llvm_unreachable("Unknown SCEV kind!");
6558}
6559
6561 const Instruction *CtxI) {
6562 // Skip looking up and updating the cache if there is a context instruction,
6563 // as the result will only be valid in the specified context.
6564 if (CtxI)
6565 return getConstantMultipleImpl(S, CtxI);
6566
6567 auto I = ConstantMultipleCache.find(S);
6568 if (I != ConstantMultipleCache.end())
6569 return I->second;
6570
6571 APInt Result = getConstantMultipleImpl(S, CtxI);
6572 auto InsertPair = ConstantMultipleCache.insert({S, Result});
6573 assert(InsertPair.second && "Should insert a new key");
6574 return InsertPair.first->second;
6575}
6576
6578 APInt Multiple = getConstantMultiple(S);
6579 return Multiple == 0 ? APInt(Multiple.getBitWidth(), 1) : Multiple;
6580}
6581
6583 const Instruction *CtxI) {
6584 return std::min(getConstantMultiple(S, CtxI).countTrailingZeros(),
6585 (unsigned)getTypeSizeInBits(S->getType()));
6586}
6587
6588/// Helper method to assign a range to V from metadata present in the IR.
6589static std::optional<ConstantRange> GetRangeFromMetadata(Value *V) {
6591 if (MDNode *MD = I->getMetadata(LLVMContext::MD_range))
6592 return getConstantRangeFromMetadata(*MD);
6593 if (const auto *CB = dyn_cast<CallBase>(V))
6594 if (std::optional<ConstantRange> Range = CB->getRange())
6595 return Range;
6596 }
6597 if (auto *A = dyn_cast<Argument>(V))
6598 if (std::optional<ConstantRange> Range = A->getRange())
6599 return Range;
6600
6601 return std::nullopt;
6602}
6603
6605 SCEV::NoWrapFlags Flags) {
6606 if (AddRec->getNoWrapFlags(Flags) != Flags) {
6607 AddRec->setNoWrapFlags(Flags);
6608 UnsignedRanges.erase(AddRec);
6609 SignedRanges.erase(AddRec);
6610 ConstantMultipleCache.erase(AddRec);
6611 }
6612}
6613
6614ConstantRange ScalarEvolution::
6615getRangeForUnknownRecurrence(const SCEVUnknown *U) {
6616 const DataLayout &DL = getDataLayout();
6617
6618 unsigned BitWidth = getTypeSizeInBits(U->getType());
6619 const ConstantRange FullSet(BitWidth, /*isFullSet=*/true);
6620
6621 // Match a simple recurrence of the form: <start, ShiftOp, Step>, and then
6622 // use information about the trip count to improve our available range. Note
6623 // that the trip count independent cases are already handled by known bits.
6624 // WARNING: The definition of recurrence used here is subtly different than
6625 // the one used by AddRec (and thus most of this file). Step is allowed to
6626 // be arbitrarily loop varying here, where AddRec allows only loop invariant
6627 // and other addrecs in the same loop (for non-affine addrecs). The code
6628 // below intentionally handles the case where step is not loop invariant.
6629 auto *P = dyn_cast<PHINode>(U->getValue());
6630 if (!P)
6631 return FullSet;
6632
6633 // Make sure that no Phi input comes from an unreachable block. Otherwise,
6634 // even the values that are not available in these blocks may come from them,
6635 // and this leads to false-positive recurrence test.
6636 for (auto *Pred : predecessors(P->getParent()))
6637 if (!DT.isReachableFromEntry(Pred))
6638 return FullSet;
6639
6640 BinaryOperator *BO;
6641 Value *Start, *Step;
6642 if (!matchSimpleRecurrence(P, BO, Start, Step))
6643 return FullSet;
6644
6645 // If we found a recurrence in reachable code, we must be in a loop. Note
6646 // that BO might be in some subloop of L, and that's completely okay.
6647 auto *L = LI.getLoopFor(P->getParent());
6648 assert(L && L->getHeader() == P->getParent());
6649 if (!L->contains(BO->getParent()))
6650 // NOTE: This bailout should be an assert instead. However, asserting
6651 // the condition here exposes a case where LoopFusion is querying SCEV
6652 // with malformed loop information during the midst of the transform.
6653 // There doesn't appear to be an obvious fix, so for the moment bailout
6654 // until the caller issue can be fixed. PR49566 tracks the bug.
6655 return FullSet;
6656
6657 // TODO: Extend to other opcodes such as mul, and div
6658 switch (BO->getOpcode()) {
6659 default:
6660 return FullSet;
6661 case Instruction::AShr:
6662 case Instruction::LShr:
6663 case Instruction::Shl:
6664 break;
6665 };
6666
6667 if (BO->getOperand(0) != P)
6668 // TODO: Handle the power function forms some day.
6669 return FullSet;
6670
6671 unsigned TC = getSmallConstantMaxTripCount(L);
6672 if (!TC || TC >= BitWidth)
6673 return FullSet;
6674
6675 auto KnownStart = computeKnownBits(Start, DL, &AC, nullptr, &DT);
6676 auto KnownStep = computeKnownBits(Step, DL, &AC, nullptr, &DT);
6677 assert(KnownStart.getBitWidth() == BitWidth &&
6678 KnownStep.getBitWidth() == BitWidth);
6679
6680 // Compute total shift amount, being careful of overflow and bitwidths.
6681 auto MaxShiftAmt = KnownStep.getMaxValue();
6682 APInt TCAP(BitWidth, TC-1);
6683 bool Overflow = false;
6684 auto TotalShift = MaxShiftAmt.umul_ov(TCAP, Overflow);
6685 if (Overflow)
6686 return FullSet;
6687
6688 switch (BO->getOpcode()) {
6689 default:
6690 llvm_unreachable("filtered out above");
6691 case Instruction::AShr: {
6692 // For each ashr, three cases:
6693 // shift = 0 => unchanged value
6694 // saturation => 0 or -1
6695 // other => a value closer to zero (of the same sign)
6696 // Thus, the end value is closer to zero than the start.
6697 auto KnownEnd = KnownBits::ashr(KnownStart,
6698 KnownBits::makeConstant(TotalShift));
6699 if (KnownStart.isNonNegative())
6700 // Analogous to lshr (simply not yet canonicalized)
6701 return ConstantRange::getNonEmpty(KnownEnd.getMinValue(),
6702 KnownStart.getMaxValue() + 1);
6703 if (KnownStart.isNegative())
6704 // End >=u Start && End <=s Start
6705 return ConstantRange::getNonEmpty(KnownStart.getMinValue(),
6706 KnownEnd.getMaxValue() + 1);
6707 break;
6708 }
6709 case Instruction::LShr: {
6710 // For each lshr, three cases:
6711 // shift = 0 => unchanged value
6712 // saturation => 0
6713 // other => a smaller positive number
6714 // Thus, the low end of the unsigned range is the last value produced.
6715 auto KnownEnd = KnownBits::lshr(KnownStart,
6716 KnownBits::makeConstant(TotalShift));
6717 return ConstantRange::getNonEmpty(KnownEnd.getMinValue(),
6718 KnownStart.getMaxValue() + 1);
6719 }
6720 case Instruction::Shl: {
6721 // Iff no bits are shifted out, value increases on every shift.
6722 auto KnownEnd = KnownBits::shl(KnownStart,
6723 KnownBits::makeConstant(TotalShift));
6724 if (TotalShift.ult(KnownStart.countMinLeadingZeros()))
6725 return ConstantRange(KnownStart.getMinValue(),
6726 KnownEnd.getMaxValue() + 1);
6727 break;
6728 }
6729 };
6730 return FullSet;
6731}
6732
6733// The goal of this function is to check if recursively visiting the operands
6734// of this PHI might lead to an infinite loop. If we do see such a loop,
6735// there's no good way to break it, so we avoid analyzing such cases.
6736//
6737// getRangeRef previously used a visited set to avoid infinite loops, but this
6738// caused other issues: the result was dependent on the order of getRangeRef
6739// calls, and the interaction with createSCEVIter could cause a stack overflow
6740// in some cases (see issue #148253).
6741//
6742// FIXME: The way this is implemented is overly conservative; this checks
6743// for a few obviously safe patterns, but anything that doesn't lead to
6744// recursion is fine.
6746 Value *Cond = nullptr, *LHS = nullptr, *RHS = nullptr;
6748 return true;
6749
6750 if (all_of(PHI->operands(),
6751 [&](Value *Operand) { return DT.dominates(Operand, PHI); }))
6752 return true;
6753
6754 return false;
6755}
6756
6757const ConstantRange &
6758ScalarEvolution::getRangeRefIter(const SCEV *S,
6759 ScalarEvolution::RangeSignHint SignHint) {
6760 DenseMap<const SCEV *, ConstantRange> &Cache =
6761 SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED ? UnsignedRanges
6762 : SignedRanges;
6763 SmallVector<SCEVUse> WorkList;
6764 SmallPtrSet<const SCEV *, 8> Seen;
6765
6766 // Add Expr to the worklist, if Expr is either an N-ary expression or a
6767 // SCEVUnknown PHI node.
6768 auto AddToWorklist = [&WorkList, &Seen, &Cache](const SCEV *Expr) {
6769 if (!Seen.insert(Expr).second)
6770 return;
6771 if (Cache.contains(Expr))
6772 return;
6773 switch (Expr->getSCEVType()) {
6774 case scUnknown:
6776 break;
6777 [[fallthrough]];
6778 case scConstant:
6779 case scVScale:
6780 case scTruncate:
6781 case scZeroExtend:
6782 case scSignExtend:
6783 case scPtrToAddr:
6784 case scPtrToInt:
6785 case scAddExpr:
6786 case scMulExpr:
6787 case scUDivExpr:
6788 case scAddRecExpr:
6789 case scUMaxExpr:
6790 case scSMaxExpr:
6791 case scUMinExpr:
6792 case scSMinExpr:
6794 WorkList.push_back(Expr);
6795 break;
6796 case scCouldNotCompute:
6797 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
6798 }
6799 };
6800 AddToWorklist(S);
6801
6802 // Build worklist by queuing operands of N-ary expressions and phi nodes.
6803 for (unsigned I = 0; I != WorkList.size(); ++I) {
6804 const SCEV *P = WorkList[I];
6805 auto *UnknownS = dyn_cast<SCEVUnknown>(P);
6806 // If it is not a `SCEVUnknown`, just recurse into operands.
6807 if (!UnknownS) {
6808 for (const SCEV *Op : P->operands())
6809 AddToWorklist(Op);
6810 continue;
6811 }
6812 // `SCEVUnknown`'s require special treatment.
6813 if (PHINode *P = dyn_cast<PHINode>(UnknownS->getValue())) {
6814 if (!RangeRefPHIAllowedOperands(DT, P))
6815 continue;
6816 for (auto &Op : reverse(P->operands()))
6817 AddToWorklist(getSCEV(Op));
6818 }
6819 }
6820
6821 if (!WorkList.empty()) {
6822 // Use getRangeRef to compute ranges for items in the worklist in reverse
6823 // order. This will force ranges for earlier operands to be computed before
6824 // their users in most cases.
6825 for (const SCEV *P : reverse(drop_begin(WorkList))) {
6826 getRangeRef(P, SignHint);
6827 }
6828 }
6829
6830 return getRangeRef(S, SignHint, 0);
6831}
6832
6833/// Determine the range for a particular SCEV. If SignHint is
6834/// HINT_RANGE_UNSIGNED (resp. HINT_RANGE_SIGNED) then getRange prefers ranges
6835/// with a "cleaner" unsigned (resp. signed) representation.
6836const ConstantRange &ScalarEvolution::getRangeRef(
6837 const SCEV *S, ScalarEvolution::RangeSignHint SignHint, unsigned Depth) {
6838 DenseMap<const SCEV *, ConstantRange> &Cache =
6839 SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED ? UnsignedRanges
6840 : SignedRanges;
6842 SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED ? ConstantRange::Unsigned
6844
6845 // See if we've computed this range already.
6846 auto I = Cache.find(S);
6847 if (I != Cache.end())
6848 return I->second;
6849
6850 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S))
6851 return setRange(C, SignHint, ConstantRange(C->getAPInt()));
6852
6853 // Switch to iteratively computing the range for S, if it is part of a deeply
6854 // nested expression.
6856 return getRangeRefIter(S, SignHint);
6857
6858 unsigned BitWidth = getTypeSizeInBits(S->getType());
6859 ConstantRange ConservativeResult(BitWidth, /*isFullSet=*/true);
6860 using OBO = OverflowingBinaryOperator;
6861
6862 // If the value has known zeros, the maximum value will have those known zeros
6863 // as well.
6864 if (SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED) {
6865 APInt Multiple = getNonZeroConstantMultiple(S);
6866 APInt Remainder = APInt::getMaxValue(BitWidth).urem(Multiple);
6867 if (!Remainder.isZero())
6868 ConservativeResult =
6869 ConstantRange(APInt::getMinValue(BitWidth),
6870 APInt::getMaxValue(BitWidth) - Remainder + 1);
6871 }
6872 else {
6873 uint32_t TZ = getMinTrailingZeros(S);
6874 if (TZ != 0) {
6875 ConservativeResult = ConstantRange(
6877 APInt::getSignedMaxValue(BitWidth).ashr(TZ).shl(TZ) + 1);
6878 }
6879 }
6880
6881 switch (S->getSCEVType()) {
6882 case scConstant:
6883 llvm_unreachable("Already handled above.");
6884 case scVScale:
6885 return setRange(S, SignHint, getVScaleRange(&F, BitWidth));
6886 case scTruncate: {
6887 const SCEVTruncateExpr *Trunc = cast<SCEVTruncateExpr>(S);
6888 ConstantRange X = getRangeRef(Trunc->getOperand(), SignHint, Depth + 1);
6889 return setRange(
6890 Trunc, SignHint,
6891 ConservativeResult.intersectWith(X.truncate(BitWidth), RangeType));
6892 }
6893 case scZeroExtend: {
6894 const SCEVZeroExtendExpr *ZExt = cast<SCEVZeroExtendExpr>(S);
6895 ConstantRange X = getRangeRef(ZExt->getOperand(), SignHint, Depth + 1);
6896 return setRange(
6897 ZExt, SignHint,
6898 ConservativeResult.intersectWith(X.zeroExtend(BitWidth), RangeType));
6899 }
6900 case scSignExtend: {
6901 const SCEVSignExtendExpr *SExt = cast<SCEVSignExtendExpr>(S);
6902 ConstantRange X = getRangeRef(SExt->getOperand(), SignHint, Depth + 1);
6903 return setRange(
6904 SExt, SignHint,
6905 ConservativeResult.intersectWith(X.signExtend(BitWidth), RangeType));
6906 }
6907 case scPtrToAddr:
6908 case scPtrToInt: {
6909 const SCEVCastExpr *Cast = cast<SCEVCastExpr>(S);
6910 ConstantRange X = getRangeRef(Cast->getOperand(), SignHint, Depth + 1);
6911 return setRange(Cast, SignHint, X);
6912 }
6913 case scAddExpr: {
6914 const SCEVAddExpr *Add = cast<SCEVAddExpr>(S);
6915 // Check if this is a URem pattern: A - (A / B) * B, which is always < B.
6916 const SCEV *URemLHS = nullptr, *URemRHS = nullptr;
6917 if (SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED &&
6918 match(S, m_scev_URem(m_SCEV(URemLHS), m_SCEV(URemRHS), *this))) {
6919 ConstantRange LHSRange = getRangeRef(URemLHS, SignHint, Depth + 1);
6920 ConstantRange RHSRange = getRangeRef(URemRHS, SignHint, Depth + 1);
6921 ConservativeResult =
6922 ConservativeResult.intersectWith(LHSRange.urem(RHSRange), RangeType);
6923 }
6924 ConstantRange X = getRangeRef(Add->getOperand(0), SignHint, Depth + 1);
6925 unsigned WrapType = OBO::AnyWrap;
6926 if (Add->hasNoSignedWrap())
6927 WrapType |= OBO::NoSignedWrap;
6928 if (Add->hasNoUnsignedWrap())
6929 WrapType |= OBO::NoUnsignedWrap;
6930 for (const SCEV *Op : drop_begin(Add->operands()))
6931 X = X.addWithNoWrap(getRangeRef(Op, SignHint, Depth + 1), WrapType,
6932 RangeType);
6933 return setRange(Add, SignHint,
6934 ConservativeResult.intersectWith(X, RangeType));
6935 }
6936 case scMulExpr: {
6937 const SCEVMulExpr *Mul = cast<SCEVMulExpr>(S);
6938 ConstantRange X = getRangeRef(Mul->getOperand(0), SignHint, Depth + 1);
6939 for (const SCEV *Op : drop_begin(Mul->operands()))
6940 X = X.multiply(getRangeRef(Op, SignHint, Depth + 1));
6941 return setRange(Mul, SignHint,
6942 ConservativeResult.intersectWith(X, RangeType));
6943 }
6944 case scUDivExpr: {
6945 const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(S);
6946 ConstantRange X = getRangeRef(UDiv->getLHS(), SignHint, Depth + 1);
6947 ConstantRange Y = getRangeRef(UDiv->getRHS(), SignHint, Depth + 1);
6948 return setRange(UDiv, SignHint,
6949 ConservativeResult.intersectWith(X.udiv(Y), RangeType));
6950 }
6951 case scAddRecExpr: {
6952 const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(S);
6953 // If there's no unsigned wrap, the value will never be less than its
6954 // initial value.
6955 if (AddRec->hasNoUnsignedWrap()) {
6956 APInt UnsignedMinValue = getUnsignedRangeMin(AddRec->getStart());
6957 if (!UnsignedMinValue.isZero())
6958 ConservativeResult = ConservativeResult.intersectWith(
6959 ConstantRange(UnsignedMinValue, APInt(BitWidth, 0)), RangeType);
6960 }
6961
6962 // If there's no signed wrap, and all the operands except initial value have
6963 // the same sign or zero, the value won't ever be:
6964 // 1: smaller than initial value if operands are non negative,
6965 // 2: bigger than initial value if operands are non positive.
6966 // For both cases, value can not cross signed min/max boundary.
6967 if (AddRec->hasNoSignedWrap()) {
6968 bool AllNonNeg = true;
6969 bool AllNonPos = true;
6970 for (unsigned i = 1, e = AddRec->getNumOperands(); i != e; ++i) {
6971 if (!isKnownNonNegative(AddRec->getOperand(i)))
6972 AllNonNeg = false;
6973 if (!isKnownNonPositive(AddRec->getOperand(i)))
6974 AllNonPos = false;
6975 }
6976 if (AllNonNeg)
6977 ConservativeResult = ConservativeResult.intersectWith(
6980 RangeType);
6981 else if (AllNonPos)
6982 ConservativeResult = ConservativeResult.intersectWith(
6984 getSignedRangeMax(AddRec->getStart()) +
6985 1),
6986 RangeType);
6987 }
6988
6989 // TODO: non-affine addrec
6990 if (AddRec->isAffine()) {
6991 const SCEV *MaxBEScev =
6993 if (!isa<SCEVCouldNotCompute>(MaxBEScev)) {
6994 APInt MaxBECount = cast<SCEVConstant>(MaxBEScev)->getAPInt();
6995
6996 // Adjust MaxBECount to the same bitwidth as AddRec. We can truncate if
6997 // MaxBECount's active bits are all <= AddRec's bit width.
6998 if (MaxBECount.getBitWidth() > BitWidth &&
6999 MaxBECount.getActiveBits() <= BitWidth)
7000 MaxBECount = MaxBECount.trunc(BitWidth);
7001 else if (MaxBECount.getBitWidth() < BitWidth)
7002 MaxBECount = MaxBECount.zext(BitWidth);
7003
7004 if (MaxBECount.getBitWidth() == BitWidth) {
7005 auto RangeFromAffine = getRangeForAffineAR(
7006 AddRec->getStart(), AddRec->getStepRecurrence(*this), MaxBECount);
7007 ConservativeResult =
7008 ConservativeResult.intersectWith(RangeFromAffine, RangeType);
7009
7010 auto RangeFromFactoring = getRangeViaFactoring(
7011 AddRec->getStart(), AddRec->getStepRecurrence(*this), MaxBECount);
7012 ConservativeResult =
7013 ConservativeResult.intersectWith(RangeFromFactoring, RangeType);
7014 }
7015 }
7016
7017 // Now try symbolic BE count and more powerful methods.
7019 const SCEV *SymbolicMaxBECount =
7021 if (!isa<SCEVCouldNotCompute>(SymbolicMaxBECount) &&
7022 getTypeSizeInBits(MaxBEScev->getType()) <= BitWidth &&
7023 AddRec->hasNoSelfWrap()) {
7024 auto RangeFromAffineNew = getRangeForAffineNoSelfWrappingAR(
7025 AddRec, SymbolicMaxBECount, BitWidth, SignHint);
7026 ConservativeResult =
7027 ConservativeResult.intersectWith(RangeFromAffineNew, RangeType);
7028 }
7029 }
7030 }
7031
7032 return setRange(AddRec, SignHint, std::move(ConservativeResult));
7033 }
7034 case scUMaxExpr:
7035 case scSMaxExpr:
7036 case scUMinExpr:
7037 case scSMinExpr:
7038 case scSequentialUMinExpr: {
7040 switch (S->getSCEVType()) {
7041 case scUMaxExpr:
7042 ID = Intrinsic::umax;
7043 break;
7044 case scSMaxExpr:
7045 ID = Intrinsic::smax;
7046 break;
7047 case scUMinExpr:
7049 ID = Intrinsic::umin;
7050 break;
7051 case scSMinExpr:
7052 ID = Intrinsic::smin;
7053 break;
7054 default:
7055 llvm_unreachable("Unknown SCEVMinMaxExpr/SCEVSequentialMinMaxExpr.");
7056 }
7057
7058 const auto *NAry = cast<SCEVNAryExpr>(S);
7059 ConstantRange X = getRangeRef(NAry->getOperand(0), SignHint, Depth + 1);
7060 for (unsigned i = 1, e = NAry->getNumOperands(); i != e; ++i)
7061 X = X.intrinsic(
7062 ID, {X, getRangeRef(NAry->getOperand(i), SignHint, Depth + 1)});
7063 return setRange(S, SignHint,
7064 ConservativeResult.intersectWith(X, RangeType));
7065 }
7066 case scUnknown: {
7067 const SCEVUnknown *U = cast<SCEVUnknown>(S);
7068 Value *V = U->getValue();
7069
7070 // Check if the IR explicitly contains !range metadata.
7071 std::optional<ConstantRange> MDRange = GetRangeFromMetadata(V);
7072 if (MDRange)
7073 ConservativeResult =
7074 ConservativeResult.intersectWith(*MDRange, RangeType);
7075
7076 // Use facts about recurrences in the underlying IR. Note that add
7077 // recurrences are AddRecExprs and thus don't hit this path. This
7078 // primarily handles shift recurrences.
7079 auto CR = getRangeForUnknownRecurrence(U);
7080 ConservativeResult = ConservativeResult.intersectWith(CR);
7081
7082 // See if ValueTracking can give us a useful range.
7083 const DataLayout &DL = getDataLayout();
7084 KnownBits Known = computeKnownBits(V, DL, &AC, nullptr, &DT);
7085 if (Known.getBitWidth() != BitWidth)
7086 Known = Known.zextOrTrunc(BitWidth);
7087
7088 // ValueTracking may be able to compute a tighter result for the number of
7089 // sign bits than for the value of those sign bits.
7090 unsigned NS = ComputeNumSignBits(V, DL, &AC, nullptr, &DT);
7091 if (U->getType()->isPointerTy()) {
7092 // If the pointer size is larger than the index size type, this can cause
7093 // NS to be larger than BitWidth. So compensate for this.
7094 unsigned ptrSize = DL.getPointerTypeSizeInBits(U->getType());
7095 int ptrIdxDiff = ptrSize - BitWidth;
7096 if (ptrIdxDiff > 0 && ptrSize > BitWidth && NS > (unsigned)ptrIdxDiff)
7097 NS -= ptrIdxDiff;
7098 }
7099
7100 if (NS > 1) {
7101 // If we know any of the sign bits, we know all of the sign bits.
7102 if (!Known.Zero.getHiBits(NS).isZero())
7103 Known.Zero.setHighBits(NS);
7104 if (!Known.One.getHiBits(NS).isZero())
7105 Known.One.setHighBits(NS);
7106 }
7107
7108 if (Known.getMinValue() != Known.getMaxValue() + 1)
7109 ConservativeResult = ConservativeResult.intersectWith(
7110 ConstantRange(Known.getMinValue(), Known.getMaxValue() + 1),
7111 RangeType);
7112 if (NS > 1)
7113 ConservativeResult = ConservativeResult.intersectWith(
7114 ConstantRange(APInt::getSignedMinValue(BitWidth).ashr(NS - 1),
7115 APInt::getSignedMaxValue(BitWidth).ashr(NS - 1) + 1),
7116 RangeType);
7117
7118 if (U->getType()->isPointerTy() && SignHint == HINT_RANGE_UNSIGNED) {
7119 // Strengthen the range if the underlying IR value is a
7120 // global/alloca/heap allocation using the size of the object.
7121 bool CanBeNull, CanBeFreed;
7122 uint64_t DerefBytes =
7123 V->getPointerDereferenceableBytes(DL, CanBeNull, CanBeFreed);
7124 if (DerefBytes > 1 && isUIntN(BitWidth, DerefBytes)) {
7125 // The highest address the object can start is DerefBytes bytes before
7126 // the end (unsigned max value). If this value is not a multiple of the
7127 // alignment, the last possible start value is the next lowest multiple
7128 // of the alignment. Note: The computations below cannot overflow,
7129 // because if they would there's no possible start address for the
7130 // object.
7131 APInt MaxVal =
7132 APInt::getMaxValue(BitWidth) - APInt(BitWidth, DerefBytes);
7133 uint64_t Align = U->getValue()->getPointerAlignment(DL).value();
7134 uint64_t Rem = MaxVal.urem(Align);
7135 MaxVal -= APInt(BitWidth, Rem);
7136 APInt MinVal = APInt::getZero(BitWidth);
7137 if (llvm::isKnownNonZero(V, DL))
7138 MinVal = Align;
7139 ConservativeResult = ConservativeResult.intersectWith(
7140 ConstantRange::getNonEmpty(MinVal, MaxVal + 1), RangeType);
7141 }
7142 }
7143
7144 // A range of Phi is a subset of union of all ranges of its input.
7145 if (PHINode *Phi = dyn_cast<PHINode>(V)) {
7146 // SCEVExpander sometimes creates SCEVUnknowns that are secretly
7147 // AddRecs; return the range for the corresponding AddRec.
7148 if (auto *AR = dyn_cast<SCEVAddRecExpr>(getSCEV(V)))
7149 return getRangeRef(AR, SignHint, Depth + 1);
7150
7151 // Make sure that we do not run over cycled Phis.
7152 if (RangeRefPHIAllowedOperands(DT, Phi)) {
7153 ConstantRange RangeFromOps(BitWidth, /*isFullSet=*/false);
7154
7155 for (const auto &Op : Phi->operands()) {
7156 auto OpRange = getRangeRef(getSCEV(Op), SignHint, Depth + 1);
7157 RangeFromOps = RangeFromOps.unionWith(OpRange);
7158 // No point to continue if we already have a full set.
7159 if (RangeFromOps.isFullSet())
7160 break;
7161 }
7162 ConservativeResult =
7163 ConservativeResult.intersectWith(RangeFromOps, RangeType);
7164 }
7165 }
7166
7167 // vscale can't be equal to zero
7168 if (const auto *II = dyn_cast<IntrinsicInst>(V))
7169 if (II->getIntrinsicID() == Intrinsic::vscale) {
7170 ConstantRange Disallowed = APInt::getZero(BitWidth);
7171 ConservativeResult = ConservativeResult.difference(Disallowed);
7172 }
7173
7174 return setRange(U, SignHint, std::move(ConservativeResult));
7175 }
7176 case scCouldNotCompute:
7177 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
7178 }
7179
7180 return setRange(S, SignHint, std::move(ConservativeResult));
7181}
7182
7183// Given a StartRange, Step and MaxBECount for an expression compute a range of
7184// values that the expression can take. Initially, the expression has a value
7185// from StartRange and then is changed by Step up to MaxBECount times. Signed
7186// argument defines if we treat Step as signed or unsigned.
7188 const ConstantRange &StartRange,
7189 const APInt &MaxBECount,
7190 bool Signed) {
7191 unsigned BitWidth = Step.getBitWidth();
7192 assert(BitWidth == StartRange.getBitWidth() &&
7193 BitWidth == MaxBECount.getBitWidth() && "mismatched bit widths");
7194 // If either Step or MaxBECount is 0, then the expression won't change, and we
7195 // just need to return the initial range.
7196 if (Step == 0 || MaxBECount == 0)
7197 return StartRange;
7198
7199 // If we don't know anything about the initial value (i.e. StartRange is
7200 // FullRange), then we don't know anything about the final range either.
7201 // Return FullRange.
7202 if (StartRange.isFullSet())
7203 return ConstantRange::getFull(BitWidth);
7204
7205 // If Step is signed and negative, then we use its absolute value, but we also
7206 // note that we're moving in the opposite direction.
7207 bool Descending = Signed && Step.isNegative();
7208
7209 if (Signed)
7210 // This is correct even for INT_SMIN. Let's look at i8 to illustrate this:
7211 // abs(INT_SMIN) = abs(-128) = abs(0x80) = -0x80 = 0x80 = 128.
7212 // This equations hold true due to the well-defined wrap-around behavior of
7213 // APInt.
7214 Step = Step.abs();
7215
7216 // Check if Offset is more than full span of BitWidth. If it is, the
7217 // expression is guaranteed to overflow.
7218 if (APInt::getMaxValue(StartRange.getBitWidth()).udiv(Step).ult(MaxBECount))
7219 return ConstantRange::getFull(BitWidth);
7220
7221 // Offset is by how much the expression can change. Checks above guarantee no
7222 // overflow here.
7223 APInt Offset = Step * MaxBECount;
7224
7225 // Minimum value of the final range will match the minimal value of StartRange
7226 // if the expression is increasing and will be decreased by Offset otherwise.
7227 // Maximum value of the final range will match the maximal value of StartRange
7228 // if the expression is decreasing and will be increased by Offset otherwise.
7229 APInt StartLower = StartRange.getLower();
7230 APInt StartUpper = StartRange.getUpper() - 1;
7231 APInt MovedBoundary = Descending ? (StartLower - std::move(Offset))
7232 : (StartUpper + std::move(Offset));
7233
7234 // It's possible that the new minimum/maximum value will fall into the initial
7235 // range (due to wrap around). This means that the expression can take any
7236 // value in this bitwidth, and we have to return full range.
7237 if (StartRange.contains(MovedBoundary))
7238 return ConstantRange::getFull(BitWidth);
7239
7240 APInt NewLower =
7241 Descending ? std::move(MovedBoundary) : std::move(StartLower);
7242 APInt NewUpper =
7243 Descending ? std::move(StartUpper) : std::move(MovedBoundary);
7244 NewUpper += 1;
7245
7246 // No overflow detected, return [StartLower, StartUpper + Offset + 1) range.
7247 return ConstantRange::getNonEmpty(std::move(NewLower), std::move(NewUpper));
7248}
7249
7250ConstantRange ScalarEvolution::getRangeForAffineAR(const SCEV *Start,
7251 const SCEV *Step,
7252 const APInt &MaxBECount) {
7253 assert(getTypeSizeInBits(Start->getType()) ==
7254 getTypeSizeInBits(Step->getType()) &&
7255 getTypeSizeInBits(Start->getType()) == MaxBECount.getBitWidth() &&
7256 "mismatched bit widths");
7257
7258 // First, consider step signed.
7259 ConstantRange StartSRange = getSignedRange(Start);
7260 ConstantRange StepSRange = getSignedRange(Step);
7261
7262 // If Step can be both positive and negative, we need to find ranges for the
7263 // maximum absolute step values in both directions and union them.
7264 ConstantRange SR = getRangeForAffineARHelper(
7265 StepSRange.getSignedMin(), StartSRange, MaxBECount, /* Signed = */ true);
7267 StartSRange, MaxBECount,
7268 /* Signed = */ true));
7269
7270 // Next, consider step unsigned.
7271 ConstantRange UR = getRangeForAffineARHelper(
7272 getUnsignedRangeMax(Step), getUnsignedRange(Start), MaxBECount,
7273 /* Signed = */ false);
7274
7275 // Finally, intersect signed and unsigned ranges.
7277}
7278
7279ConstantRange ScalarEvolution::getRangeForAffineNoSelfWrappingAR(
7280 const SCEVAddRecExpr *AddRec, const SCEV *MaxBECount, unsigned BitWidth,
7281 ScalarEvolution::RangeSignHint SignHint) {
7282 assert(AddRec->isAffine() && "Non-affine AddRecs are not suppored!\n");
7283 assert(AddRec->hasNoSelfWrap() &&
7284 "This only works for non-self-wrapping AddRecs!");
7285 const bool IsSigned = SignHint == HINT_RANGE_SIGNED;
7286 const SCEV *Step = AddRec->getStepRecurrence(*this);
7287 // Only deal with constant step to save compile time.
7288 if (!isa<SCEVConstant>(Step))
7289 return ConstantRange::getFull(BitWidth);
7290 // Let's make sure that we can prove that we do not self-wrap during
7291 // MaxBECount iterations. We need this because MaxBECount is a maximum
7292 // iteration count estimate, and we might infer nw from some exit for which we
7293 // do not know max exit count (or any other side reasoning).
7294 // TODO: Turn into assert at some point.
7295 if (getTypeSizeInBits(MaxBECount->getType()) >
7296 getTypeSizeInBits(AddRec->getType()))
7297 return ConstantRange::getFull(BitWidth);
7298 MaxBECount = getNoopOrZeroExtend(MaxBECount, AddRec->getType());
7299 const SCEV *RangeWidth = getMinusOne(AddRec->getType());
7300 const SCEV *StepAbs = getUMinExpr(Step, getNegativeSCEV(Step));
7301 const SCEV *MaxItersWithoutWrap = getUDivExpr(RangeWidth, StepAbs);
7302 if (!isKnownPredicateViaConstantRanges(ICmpInst::ICMP_ULE, MaxBECount,
7303 MaxItersWithoutWrap))
7304 return ConstantRange::getFull(BitWidth);
7305
7306 ICmpInst::Predicate LEPred =
7308 ICmpInst::Predicate GEPred =
7310 const SCEV *End = AddRec->evaluateAtIteration(MaxBECount, *this);
7311
7312 // We know that there is no self-wrap. Let's take Start and End values and
7313 // look at all intermediate values V1, V2, ..., Vn that IndVar takes during
7314 // the iteration. They either lie inside the range [Min(Start, End),
7315 // Max(Start, End)] or outside it:
7316 //
7317 // Case 1: RangeMin ... Start V1 ... VN End ... RangeMax;
7318 // Case 2: RangeMin Vk ... V1 Start ... End Vn ... Vk + 1 RangeMax;
7319 //
7320 // No self wrap flag guarantees that the intermediate values cannot be BOTH
7321 // outside and inside the range [Min(Start, End), Max(Start, End)]. Using that
7322 // knowledge, let's try to prove that we are dealing with Case 1. It is so if
7323 // Start <= End and step is positive, or Start >= End and step is negative.
7324 const SCEV *Start = applyLoopGuards(AddRec->getStart(), AddRec->getLoop());
7325 ConstantRange StartRange = getRangeRef(Start, SignHint);
7326 ConstantRange EndRange = getRangeRef(End, SignHint);
7327 ConstantRange RangeBetween = StartRange.unionWith(EndRange);
7328 // If they already cover full iteration space, we will know nothing useful
7329 // even if we prove what we want to prove.
7330 if (RangeBetween.isFullSet())
7331 return RangeBetween;
7332 // Only deal with ranges that do not wrap (i.e. RangeMin < RangeMax).
7333 bool IsWrappedSet = IsSigned ? RangeBetween.isSignWrappedSet()
7334 : RangeBetween.isWrappedSet();
7335 if (IsWrappedSet)
7336 return ConstantRange::getFull(BitWidth);
7337
7338 if (isKnownPositive(Step) &&
7339 isKnownPredicateViaConstantRanges(LEPred, Start, End))
7340 return RangeBetween;
7341 if (isKnownNegative(Step) &&
7342 isKnownPredicateViaConstantRanges(GEPred, Start, End))
7343 return RangeBetween;
7344 return ConstantRange::getFull(BitWidth);
7345}
7346
7347ConstantRange ScalarEvolution::getRangeViaFactoring(const SCEV *Start,
7348 const SCEV *Step,
7349 const APInt &MaxBECount) {
7350 // RangeOf({C?A:B,+,C?P:Q}) == RangeOf(C?{A,+,P}:{B,+,Q})
7351 // == RangeOf({A,+,P}) union RangeOf({B,+,Q})
7352
7353 unsigned BitWidth = MaxBECount.getBitWidth();
7354 assert(getTypeSizeInBits(Start->getType()) == BitWidth &&
7355 getTypeSizeInBits(Step->getType()) == BitWidth &&
7356 "mismatched bit widths");
7357
7358 struct SelectPattern {
7359 Value *Condition = nullptr;
7360 APInt TrueValue;
7361 APInt FalseValue;
7362
7363 explicit SelectPattern(ScalarEvolution &SE, unsigned BitWidth,
7364 const SCEV *S) {
7365 std::optional<unsigned> CastOp;
7366 APInt Offset(BitWidth, 0);
7367
7369 "Should be!");
7370
7371 // Peel off a constant offset. In the future we could consider being
7372 // smarter here and handle {Start+Step,+,Step} too.
7373 const APInt *Off;
7374 if (match(S, m_scev_Add(m_scev_APInt(Off), m_SCEV(S))))
7375 Offset = *Off;
7376
7377 // Peel off a cast operation
7378 if (auto *SCast = dyn_cast<SCEVIntegralCastExpr>(S)) {
7379 CastOp = SCast->getSCEVType();
7380 S = SCast->getOperand();
7381 }
7382
7383 using namespace llvm::PatternMatch;
7384
7385 auto *SU = dyn_cast<SCEVUnknown>(S);
7386 const APInt *TrueVal, *FalseVal;
7387 if (!SU ||
7388 !match(SU->getValue(), m_Select(m_Value(Condition), m_APInt(TrueVal),
7389 m_APInt(FalseVal)))) {
7390 Condition = nullptr;
7391 return;
7392 }
7393
7394 TrueValue = *TrueVal;
7395 FalseValue = *FalseVal;
7396
7397 // Re-apply the cast we peeled off earlier
7398 if (CastOp)
7399 switch (*CastOp) {
7400 default:
7401 llvm_unreachable("Unknown SCEV cast type!");
7402
7403 case scTruncate:
7404 TrueValue = TrueValue.trunc(BitWidth);
7405 FalseValue = FalseValue.trunc(BitWidth);
7406 break;
7407 case scZeroExtend:
7408 TrueValue = TrueValue.zext(BitWidth);
7409 FalseValue = FalseValue.zext(BitWidth);
7410 break;
7411 case scSignExtend:
7412 TrueValue = TrueValue.sext(BitWidth);
7413 FalseValue = FalseValue.sext(BitWidth);
7414 break;
7415 }
7416
7417 // Re-apply the constant offset we peeled off earlier
7418 TrueValue += Offset;
7419 FalseValue += Offset;
7420 }
7421
7422 bool isRecognized() { return Condition != nullptr; }
7423 };
7424
7425 SelectPattern StartPattern(*this, BitWidth, Start);
7426 if (!StartPattern.isRecognized())
7427 return ConstantRange::getFull(BitWidth);
7428
7429 SelectPattern StepPattern(*this, BitWidth, Step);
7430 if (!StepPattern.isRecognized())
7431 return ConstantRange::getFull(BitWidth);
7432
7433 if (StartPattern.Condition != StepPattern.Condition) {
7434 // We don't handle this case today; but we could, by considering four
7435 // possibilities below instead of two. I'm not sure if there are cases where
7436 // that will help over what getRange already does, though.
7437 return ConstantRange::getFull(BitWidth);
7438 }
7439
7440 // NB! Calling ScalarEvolution::getConstant is fine, but we should not try to
7441 // construct arbitrary general SCEV expressions here. This function is called
7442 // from deep in the call stack, and calling getSCEV (on a sext instruction,
7443 // say) can end up caching a suboptimal value.
7444
7445 // FIXME: without the explicit `this` receiver below, MSVC errors out with
7446 // C2352 and C2512 (otherwise it isn't needed).
7447
7448 const SCEV *TrueStart = this->getConstant(StartPattern.TrueValue);
7449 const SCEV *TrueStep = this->getConstant(StepPattern.TrueValue);
7450 const SCEV *FalseStart = this->getConstant(StartPattern.FalseValue);
7451 const SCEV *FalseStep = this->getConstant(StepPattern.FalseValue);
7452
7453 ConstantRange TrueRange =
7454 this->getRangeForAffineAR(TrueStart, TrueStep, MaxBECount);
7455 ConstantRange FalseRange =
7456 this->getRangeForAffineAR(FalseStart, FalseStep, MaxBECount);
7457
7458 return TrueRange.unionWith(FalseRange);
7459}
7460
7461SCEV::NoWrapFlags ScalarEvolution::getNoWrapFlagsFromUB(const Value *V) {
7462 if (isa<ConstantExpr>(V)) return SCEV::FlagAnyWrap;
7463 const BinaryOperator *BinOp = cast<BinaryOperator>(V);
7464
7465 // Return early if there are no flags to propagate to the SCEV.
7467 if (auto *PDI = dyn_cast<PossiblyDisjointInst>(BinOp);
7468 PDI && PDI->isDisjoint()) {
7470 } else {
7471 if (BinOp->hasNoUnsignedWrap())
7473 if (BinOp->hasNoSignedWrap())
7475 }
7476 if (Flags == SCEV::FlagAnyWrap)
7477 return SCEV::FlagAnyWrap;
7478
7479 return isSCEVExprNeverPoison(BinOp) ? Flags : SCEV::FlagAnyWrap;
7480}
7481
7482const Instruction *
7483ScalarEvolution::getNonTrivialDefiningScopeBound(const SCEV *S) {
7484 if (auto *AddRec = dyn_cast<SCEVAddRecExpr>(S))
7485 return &*AddRec->getLoop()->getHeader()->begin();
7486 if (auto *U = dyn_cast<SCEVUnknown>(S))
7487 if (auto *I = dyn_cast<Instruction>(U->getValue()))
7488 return I;
7489 return nullptr;
7490}
7491
7492const Instruction *ScalarEvolution::getDefiningScopeBound(ArrayRef<SCEVUse> Ops,
7493 bool &Precise) {
7494 Precise = true;
7495 // Do a bounded search of the def relation of the requested SCEVs.
7496 SmallPtrSet<const SCEV *, 16> Visited;
7497 SmallVector<SCEVUse> Worklist;
7498 auto pushOp = [&](const SCEV *S) {
7499 if (!Visited.insert(S).second)
7500 return;
7501 // Threshold of 30 here is arbitrary.
7502 if (Visited.size() > 30) {
7503 Precise = false;
7504 return;
7505 }
7506 Worklist.push_back(S);
7507 };
7508
7509 for (SCEVUse S : Ops)
7510 pushOp(S);
7511
7512 const Instruction *Bound = nullptr;
7513 while (!Worklist.empty()) {
7514 SCEVUse S = Worklist.pop_back_val();
7515 if (auto *DefI = getNonTrivialDefiningScopeBound(S)) {
7516 if (!Bound || DT.dominates(Bound, DefI))
7517 Bound = DefI;
7518 } else {
7519 for (SCEVUse Op : S->operands())
7520 pushOp(Op);
7521 }
7522 }
7523 return Bound ? Bound : &*F.getEntryBlock().begin();
7524}
7525
7526const Instruction *
7527ScalarEvolution::getDefiningScopeBound(ArrayRef<SCEVUse> Ops) {
7528 bool Discard;
7529 return getDefiningScopeBound(Ops, Discard);
7530}
7531
7532bool ScalarEvolution::isGuaranteedToTransferExecutionTo(const Instruction *A,
7533 const Instruction *B) {
7534 if (A->getParent() == B->getParent() &&
7536 B->getIterator()))
7537 return true;
7538
7539 auto *BLoop = LI.getLoopFor(B->getParent());
7540 if (BLoop && BLoop->getHeader() == B->getParent() &&
7541 BLoop->getLoopPreheader() == A->getParent() &&
7543 A->getParent()->end()) &&
7544 isGuaranteedToTransferExecutionToSuccessor(B->getParent()->begin(),
7545 B->getIterator()))
7546 return true;
7547 return false;
7548}
7549
7550bool ScalarEvolution::isGuaranteedNotToBePoison(const SCEV *Op) {
7551 SCEVPoisonCollector PC(/* LookThroughMaybePoisonBlocking */ true);
7552 visitAll(Op, PC);
7553 return PC.MaybePoison.empty();
7554}
7555
7556bool ScalarEvolution::isGuaranteedNotToCauseUB(const SCEV *Op) {
7557 return !SCEVExprContains(Op, [this](const SCEV *S) {
7558 const SCEV *Op1;
7559 bool M = match(S, m_scev_UDiv(m_SCEV(), m_SCEV(Op1)));
7560 // The UDiv may be UB if the divisor is poison or zero. Unless the divisor
7561 // is a non-zero constant, we have to assume the UDiv may be UB.
7562 return M && (!isKnownNonZero(Op1) || !isGuaranteedNotToBePoison(Op1));
7563 });
7564}
7565
7566bool ScalarEvolution::isSCEVExprNeverPoison(const Instruction *I) {
7567 // Only proceed if we can prove that I does not yield poison.
7569 return false;
7570
7571 // At this point we know that if I is executed, then it does not wrap
7572 // according to at least one of NSW or NUW. If I is not executed, then we do
7573 // not know if the calculation that I represents would wrap. Multiple
7574 // instructions can map to the same SCEV. If we apply NSW or NUW from I to
7575 // the SCEV, we must guarantee no wrapping for that SCEV also when it is
7576 // derived from other instructions that map to the same SCEV. We cannot make
7577 // that guarantee for cases where I is not executed. So we need to find a
7578 // upper bound on the defining scope for the SCEV, and prove that I is
7579 // executed every time we enter that scope. When the bounding scope is a
7580 // loop (the common case), this is equivalent to proving I executes on every
7581 // iteration of that loop.
7582 SmallVector<SCEVUse> SCEVOps;
7583 for (const Use &Op : I->operands()) {
7584 // I could be an extractvalue from a call to an overflow intrinsic.
7585 // TODO: We can do better here in some cases.
7586 if (isSCEVable(Op->getType()))
7587 SCEVOps.push_back(getSCEV(Op));
7588 }
7589 auto *DefI = getDefiningScopeBound(SCEVOps);
7590 return isGuaranteedToTransferExecutionTo(DefI, I);
7591}
7592
7593bool ScalarEvolution::isAddRecNeverPoison(const Instruction *I, const Loop *L) {
7594 // If we know that \c I can never be poison period, then that's enough.
7595 if (isSCEVExprNeverPoison(I))
7596 return true;
7597
7598 // If the loop only has one exit, then we know that, if the loop is entered,
7599 // any instruction dominating that exit will be executed. If any such
7600 // instruction would result in UB, the addrec cannot be poison.
7601 //
7602 // This is basically the same reasoning as in isSCEVExprNeverPoison(), but
7603 // also handles uses outside the loop header (they just need to dominate the
7604 // single exit).
7605
7606 auto *ExitingBB = L->getExitingBlock();
7607 if (!ExitingBB || !loopHasNoAbnormalExits(L))
7608 return false;
7609
7610 SmallPtrSet<const Value *, 16> KnownPoison;
7612
7613 // We start by assuming \c I, the post-inc add recurrence, is poison. Only
7614 // things that are known to be poison under that assumption go on the
7615 // Worklist.
7616 KnownPoison.insert(I);
7617 Worklist.push_back(I);
7618
7619 while (!Worklist.empty()) {
7620 const Instruction *Poison = Worklist.pop_back_val();
7621
7622 for (const Use &U : Poison->uses()) {
7623 const Instruction *PoisonUser = cast<Instruction>(U.getUser());
7624 if (mustTriggerUB(PoisonUser, KnownPoison) &&
7625 DT.dominates(PoisonUser->getParent(), ExitingBB))
7626 return true;
7627
7628 if (propagatesPoison(U) && L->contains(PoisonUser))
7629 if (KnownPoison.insert(PoisonUser).second)
7630 Worklist.push_back(PoisonUser);
7631 }
7632 }
7633
7634 return false;
7635}
7636
7637ScalarEvolution::LoopProperties
7638ScalarEvolution::getLoopProperties(const Loop *L) {
7639 using LoopProperties = ScalarEvolution::LoopProperties;
7640
7641 auto Itr = LoopPropertiesCache.find(L);
7642 if (Itr == LoopPropertiesCache.end()) {
7643 auto HasSideEffects = [](Instruction *I) {
7644 if (auto *SI = dyn_cast<StoreInst>(I))
7645 return !SI->isSimple();
7646
7647 if (I->mayThrow())
7648 return true;
7649
7650 // Non-volatile memset / memcpy do not count as side-effect for forward
7651 // progress.
7652 if (isa<MemIntrinsic>(I) && !I->isVolatile())
7653 return false;
7654
7655 return I->mayWriteToMemory();
7656 };
7657
7658 LoopProperties LP = {/* HasNoAbnormalExits */ true,
7659 /*HasNoSideEffects*/ true};
7660
7661 for (auto *BB : L->getBlocks())
7662 for (auto &I : *BB) {
7664 LP.HasNoAbnormalExits = false;
7665 if (HasSideEffects(&I))
7666 LP.HasNoSideEffects = false;
7667 if (!LP.HasNoAbnormalExits && !LP.HasNoSideEffects)
7668 break; // We're already as pessimistic as we can get.
7669 }
7670
7671 auto InsertPair = LoopPropertiesCache.insert({L, LP});
7672 assert(InsertPair.second && "We just checked!");
7673 Itr = InsertPair.first;
7674 }
7675
7676 return Itr->second;
7677}
7678
7680 // A mustprogress loop without side effects must be finite.
7681 // TODO: The check used here is very conservative. It's only *specific*
7682 // side effects which are well defined in infinite loops.
7683 return isFinite(L) || (isMustProgress(L) && loopHasNoSideEffects(L));
7684}
7685
7686const SCEV *ScalarEvolution::createSCEVIter(Value *V) {
7687 // Worklist item with a Value and a bool indicating whether all operands have
7688 // been visited already.
7691
7692 Stack.emplace_back(V, true);
7693 Stack.emplace_back(V, false);
7694 while (!Stack.empty()) {
7695 auto E = Stack.pop_back_val();
7696 Value *CurV = E.getPointer();
7697
7698 if (getExistingSCEV(CurV))
7699 continue;
7700
7702 const SCEV *CreatedSCEV = nullptr;
7703 // If all operands have been visited already, create the SCEV.
7704 if (E.getInt()) {
7705 CreatedSCEV = createSCEV(CurV);
7706 } else {
7707 // Otherwise get the operands we need to create SCEV's for before creating
7708 // the SCEV for CurV. If the SCEV for CurV can be constructed trivially,
7709 // just use it.
7710 CreatedSCEV = getOperandsToCreate(CurV, Ops);
7711 }
7712
7713 if (CreatedSCEV) {
7714 insertValueToMap(CurV, CreatedSCEV);
7715 } else {
7716 // Queue CurV for SCEV creation, followed by its's operands which need to
7717 // be constructed first.
7718 Stack.emplace_back(CurV, true);
7719 for (Value *Op : Ops)
7720 Stack.emplace_back(Op, false);
7721 }
7722 }
7723
7724 return getExistingSCEV(V);
7725}
7726
7727const SCEV *
7728ScalarEvolution::getOperandsToCreate(Value *V, SmallVectorImpl<Value *> &Ops) {
7729 if (!isSCEVable(V->getType()))
7730 return getUnknown(V);
7731
7732 if (Instruction *I = dyn_cast<Instruction>(V)) {
7733 // Don't attempt to analyze instructions in blocks that aren't
7734 // reachable. Such instructions don't matter, and they aren't required
7735 // to obey basic rules for definitions dominating uses which this
7736 // analysis depends on.
7737 if (!DT.isReachableFromEntry(I->getParent()))
7738 return getUnknown(PoisonValue::get(V->getType()));
7739 } else if (ConstantInt *CI = dyn_cast<ConstantInt>(V))
7740 return getConstant(CI);
7741 else if (isa<GlobalAlias>(V))
7742 return getUnknown(V);
7743 else if (!isa<ConstantExpr>(V))
7744 return getUnknown(V);
7745
7747 if (auto BO =
7749 bool IsConstArg = isa<ConstantInt>(BO->RHS);
7750 switch (BO->Opcode) {
7751 case Instruction::Add:
7752 case Instruction::Mul: {
7753 // For additions and multiplications, traverse add/mul chains for which we
7754 // can potentially create a single SCEV, to reduce the number of
7755 // get{Add,Mul}Expr calls.
7756 do {
7757 if (BO->Op) {
7758 if (BO->Op != V && getExistingSCEV(BO->Op)) {
7759 Ops.push_back(BO->Op);
7760 break;
7761 }
7762 }
7763 Ops.push_back(BO->RHS);
7764 auto NewBO = MatchBinaryOp(BO->LHS, getDataLayout(), AC, DT,
7766 if (!NewBO ||
7767 (BO->Opcode == Instruction::Add &&
7768 (NewBO->Opcode != Instruction::Add &&
7769 NewBO->Opcode != Instruction::Sub)) ||
7770 (BO->Opcode == Instruction::Mul &&
7771 NewBO->Opcode != Instruction::Mul)) {
7772 Ops.push_back(BO->LHS);
7773 break;
7774 }
7775 // CreateSCEV calls getNoWrapFlagsFromUB, which under certain conditions
7776 // requires a SCEV for the LHS.
7777 if (BO->Op && (BO->IsNSW || BO->IsNUW)) {
7778 auto *I = dyn_cast<Instruction>(BO->Op);
7779 if (I && programUndefinedIfPoison(I)) {
7780 Ops.push_back(BO->LHS);
7781 break;
7782 }
7783 }
7784 BO = NewBO;
7785 } while (true);
7786 return nullptr;
7787 }
7788 case Instruction::Sub:
7789 case Instruction::UDiv:
7790 case Instruction::URem:
7791 break;
7792 case Instruction::AShr:
7793 case Instruction::Shl:
7794 case Instruction::Xor:
7795 if (!IsConstArg)
7796 return nullptr;
7797 break;
7798 case Instruction::And:
7799 case Instruction::Or:
7800 if (!IsConstArg && !BO->LHS->getType()->isIntegerTy(1))
7801 return nullptr;
7802 break;
7803 case Instruction::LShr:
7804 return getUnknown(V);
7805 default:
7806 llvm_unreachable("Unhandled binop");
7807 break;
7808 }
7809
7810 Ops.push_back(BO->LHS);
7811 Ops.push_back(BO->RHS);
7812 return nullptr;
7813 }
7814
7815 switch (U->getOpcode()) {
7816 case Instruction::Trunc:
7817 case Instruction::ZExt:
7818 case Instruction::SExt:
7819 case Instruction::PtrToAddr:
7820 case Instruction::PtrToInt:
7821 Ops.push_back(U->getOperand(0));
7822 return nullptr;
7823
7824 case Instruction::BitCast:
7825 if (isSCEVable(U->getType()) && isSCEVable(U->getOperand(0)->getType())) {
7826 Ops.push_back(U->getOperand(0));
7827 return nullptr;
7828 }
7829 return getUnknown(V);
7830
7831 case Instruction::SDiv:
7832 case Instruction::SRem:
7833 Ops.push_back(U->getOperand(0));
7834 Ops.push_back(U->getOperand(1));
7835 return nullptr;
7836
7837 case Instruction::GetElementPtr:
7838 assert(cast<GEPOperator>(U)->getSourceElementType()->isSized() &&
7839 "GEP source element type must be sized");
7840 llvm::append_range(Ops, U->operands());
7841 return nullptr;
7842
7843 case Instruction::IntToPtr:
7844 return getUnknown(V);
7845
7846 case Instruction::PHI:
7847 // getNodeForPHI has four ways to turn a PHI into a SCEV; retrieve the
7848 // relevant nodes for each of them.
7849 //
7850 // The first is just to call simplifyInstruction, and get something back
7851 // that isn't a PHI.
7852 if (Value *V = simplifyInstruction(
7853 cast<PHINode>(U),
7854 {getDataLayout(), &TLI, &DT, &AC, /*CtxI=*/nullptr,
7855 /*UseInstrInfo=*/true, /*CanUseUndef=*/false})) {
7856 assert(V);
7857 Ops.push_back(V);
7858 return nullptr;
7859 }
7860 // The second is createNodeForPHIWithIdenticalOperands: this looks for
7861 // operands which all perform the same operation, but haven't been
7862 // CSE'ed for whatever reason.
7863 if (BinaryOperator *BO = getCommonInstForPHI(cast<PHINode>(U))) {
7864 assert(BO);
7865 Ops.push_back(BO);
7866 return nullptr;
7867 }
7868 // The third is createNodeFromSelectLikePHI; this takes a PHI which
7869 // is equivalent to a select, and analyzes it like a select.
7870 {
7871 Value *Cond = nullptr, *LHS = nullptr, *RHS = nullptr;
7873 assert(Cond);
7874 assert(LHS);
7875 assert(RHS);
7876 if (auto *CondICmp = dyn_cast<ICmpInst>(Cond)) {
7877 Ops.push_back(CondICmp->getOperand(0));
7878 Ops.push_back(CondICmp->getOperand(1));
7879 }
7880 Ops.push_back(Cond);
7881 Ops.push_back(LHS);
7882 Ops.push_back(RHS);
7883 return nullptr;
7884 }
7885 }
7886 // The fourth way is createAddRecFromPHI. It's complicated to handle here,
7887 // so just construct it recursively.
7888 //
7889 // In addition to getNodeForPHI, also construct nodes which might be needed
7890 // by getRangeRef.
7892 for (Value *V : cast<PHINode>(U)->operands())
7893 Ops.push_back(V);
7894 return nullptr;
7895 }
7896 return nullptr;
7897
7898 case Instruction::Select: {
7899 // Check if U is a select that can be simplified to a SCEVUnknown.
7900 auto CanSimplifyToUnknown = [this, U]() {
7901 if (U->getType()->isIntegerTy(1) || isa<ConstantInt>(U->getOperand(0)))
7902 return false;
7903
7904 auto *ICI = dyn_cast<ICmpInst>(U->getOperand(0));
7905 if (!ICI)
7906 return false;
7907 Value *LHS = ICI->getOperand(0);
7908 Value *RHS = ICI->getOperand(1);
7909 if (ICI->getPredicate() == CmpInst::ICMP_EQ ||
7910 ICI->getPredicate() == CmpInst::ICMP_NE) {
7912 return true;
7913 } else if (getTypeSizeInBits(LHS->getType()) >
7914 getTypeSizeInBits(U->getType()))
7915 return true;
7916 return false;
7917 };
7918 if (CanSimplifyToUnknown())
7919 return getUnknown(U);
7920
7921 llvm::append_range(Ops, U->operands());
7922 return nullptr;
7923 break;
7924 }
7925 case Instruction::Call:
7926 case Instruction::Invoke:
7927 if (Value *RV = cast<CallBase>(U)->getReturnedArgOperand()) {
7928 Ops.push_back(RV);
7929 return nullptr;
7930 }
7931
7932 if (auto *II = dyn_cast<IntrinsicInst>(U)) {
7933 switch (II->getIntrinsicID()) {
7934 case Intrinsic::abs:
7935 Ops.push_back(II->getArgOperand(0));
7936 return nullptr;
7937 case Intrinsic::umax:
7938 case Intrinsic::umin:
7939 case Intrinsic::smax:
7940 case Intrinsic::smin:
7941 case Intrinsic::usub_sat:
7942 case Intrinsic::uadd_sat:
7943 Ops.push_back(II->getArgOperand(0));
7944 Ops.push_back(II->getArgOperand(1));
7945 return nullptr;
7946 case Intrinsic::start_loop_iterations:
7947 case Intrinsic::annotation:
7948 case Intrinsic::ptr_annotation:
7949 Ops.push_back(II->getArgOperand(0));
7950 return nullptr;
7951 default:
7952 break;
7953 }
7954 }
7955 break;
7956 }
7957
7958 return nullptr;
7959}
7960
7961const SCEV *ScalarEvolution::createSCEV(Value *V) {
7962 if (!isSCEVable(V->getType()))
7963 return getUnknown(V);
7964
7965 if (Instruction *I = dyn_cast<Instruction>(V)) {
7966 // Don't attempt to analyze instructions in blocks that aren't
7967 // reachable. Such instructions don't matter, and they aren't required
7968 // to obey basic rules for definitions dominating uses which this
7969 // analysis depends on.
7970 if (!DT.isReachableFromEntry(I->getParent()))
7971 return getUnknown(PoisonValue::get(V->getType()));
7972 } else if (ConstantInt *CI = dyn_cast<ConstantInt>(V))
7973 return getConstant(CI);
7974 else if (isa<GlobalAlias>(V))
7975 return getUnknown(V);
7976 else if (!isa<ConstantExpr>(V))
7977 return getUnknown(V);
7978
7979 const SCEV *LHS;
7980 const SCEV *RHS;
7981
7983 if (auto BO =
7985 switch (BO->Opcode) {
7986 case Instruction::Add: {
7987 // The simple thing to do would be to just call getSCEV on both operands
7988 // and call getAddExpr with the result. However if we're looking at a
7989 // bunch of things all added together, this can be quite inefficient,
7990 // because it leads to N-1 getAddExpr calls for N ultimate operands.
7991 // Instead, gather up all the operands and make a single getAddExpr call.
7992 // LLVM IR canonical form means we need only traverse the left operands.
7994 do {
7995 if (BO->Op) {
7996 if (auto *OpSCEV = getExistingSCEV(BO->Op)) {
7997 AddOps.push_back(OpSCEV);
7998 break;
7999 }
8000
8001 // If a NUW or NSW flag can be applied to the SCEV for this
8002 // addition, then compute the SCEV for this addition by itself
8003 // with a separate call to getAddExpr. We need to do that
8004 // instead of pushing the operands of the addition onto AddOps,
8005 // since the flags are only known to apply to this particular
8006 // addition - they may not apply to other additions that can be
8007 // formed with operands from AddOps.
8008 const SCEV *RHS = getSCEV(BO->RHS);
8009 SCEV::NoWrapFlags Flags = getNoWrapFlagsFromUB(BO->Op);
8010 if (Flags != SCEV::FlagAnyWrap) {
8011 const SCEV *LHS = getSCEV(BO->LHS);
8012 if (BO->Opcode == Instruction::Sub)
8013 AddOps.push_back(getMinusSCEV(LHS, RHS, Flags));
8014 else
8015 AddOps.push_back(getAddExpr(LHS, RHS, Flags));
8016 break;
8017 }
8018 }
8019
8020 if (BO->Opcode == Instruction::Sub)
8021 AddOps.push_back(getNegativeSCEV(getSCEV(BO->RHS)));
8022 else
8023 AddOps.push_back(getSCEV(BO->RHS));
8024
8025 auto NewBO = MatchBinaryOp(BO->LHS, getDataLayout(), AC, DT,
8027 if (!NewBO || (NewBO->Opcode != Instruction::Add &&
8028 NewBO->Opcode != Instruction::Sub)) {
8029 AddOps.push_back(getSCEV(BO->LHS));
8030 break;
8031 }
8032 BO = NewBO;
8033 } while (true);
8034
8035 return getAddExpr(AddOps);
8036 }
8037
8038 case Instruction::Mul: {
8040 do {
8041 if (BO->Op) {
8042 if (auto *OpSCEV = getExistingSCEV(BO->Op)) {
8043 MulOps.push_back(OpSCEV);
8044 break;
8045 }
8046
8047 SCEV::NoWrapFlags Flags = getNoWrapFlagsFromUB(BO->Op);
8048 if (Flags != SCEV::FlagAnyWrap) {
8049 LHS = getSCEV(BO->LHS);
8050 RHS = getSCEV(BO->RHS);
8051 MulOps.push_back(getMulExpr(LHS, RHS, Flags));
8052 break;
8053 }
8054 }
8055
8056 MulOps.push_back(getSCEV(BO->RHS));
8057 auto NewBO = MatchBinaryOp(BO->LHS, getDataLayout(), AC, DT,
8059 if (!NewBO || NewBO->Opcode != Instruction::Mul) {
8060 MulOps.push_back(getSCEV(BO->LHS));
8061 break;
8062 }
8063 BO = NewBO;
8064 } while (true);
8065
8066 return getMulExpr(MulOps);
8067 }
8068 case Instruction::UDiv:
8069 LHS = getSCEV(BO->LHS);
8070 RHS = getSCEV(BO->RHS);
8071 return getUDivExpr(LHS, RHS);
8072 case Instruction::URem:
8073 LHS = getSCEV(BO->LHS);
8074 RHS = getSCEV(BO->RHS);
8075 return getURemExpr(LHS, RHS);
8076 case Instruction::Sub: {
8078 if (BO->Op)
8079 Flags = getNoWrapFlagsFromUB(BO->Op);
8080 LHS = getSCEV(BO->LHS);
8081 RHS = getSCEV(BO->RHS);
8082 return getMinusSCEV(LHS, RHS, Flags);
8083 }
8084 case Instruction::And:
8085 // For an expression like x&255 that merely masks off the high bits,
8086 // use zext(trunc(x)) as the SCEV expression.
8087 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS)) {
8088 if (CI->isZero())
8089 return getSCEV(BO->RHS);
8090 if (CI->isMinusOne())
8091 return getSCEV(BO->LHS);
8092 const APInt &A = CI->getValue();
8093
8094 // Instcombine's ShrinkDemandedConstant may strip bits out of
8095 // constants, obscuring what would otherwise be a low-bits mask.
8096 // Use computeKnownBits to compute what ShrinkDemandedConstant
8097 // knew about to reconstruct a low-bits mask value.
8098 unsigned LZ = A.countl_zero();
8099 unsigned TZ = A.countr_zero();
8100 unsigned BitWidth = A.getBitWidth();
8101 KnownBits Known(BitWidth);
8102 computeKnownBits(BO->LHS, Known, getDataLayout(), &AC, nullptr, &DT);
8103
8104 APInt EffectiveMask =
8105 APInt::getLowBitsSet(BitWidth, BitWidth - LZ - TZ).shl(TZ);
8106 if ((LZ != 0 || TZ != 0) && !((~A & ~Known.Zero) & EffectiveMask)) {
8107 const SCEV *MulCount = getConstant(APInt::getOneBitSet(BitWidth, TZ));
8108 const SCEV *LHS = getSCEV(BO->LHS);
8109 const SCEV *ShiftedLHS = nullptr;
8110 if (auto *LHSMul = dyn_cast<SCEVMulExpr>(LHS)) {
8111 if (auto *OpC = dyn_cast<SCEVConstant>(LHSMul->getOperand(0))) {
8112 // For an expression like (x * 8) & 8, simplify the multiply.
8113 unsigned MulZeros = OpC->getAPInt().countr_zero();
8114 unsigned GCD = std::min(MulZeros, TZ);
8115 APInt DivAmt = APInt::getOneBitSet(BitWidth, TZ - GCD);
8117 MulOps.push_back(getConstant(OpC->getAPInt().ashr(GCD)));
8118 append_range(MulOps, LHSMul->operands().drop_front());
8119 auto *NewMul = getMulExpr(MulOps, LHSMul->getNoWrapFlags());
8120 ShiftedLHS = getUDivExpr(NewMul, getConstant(DivAmt));
8121 }
8122 }
8123 if (!ShiftedLHS)
8124 ShiftedLHS = getUDivExpr(LHS, MulCount);
8125 return getMulExpr(
8127 getTruncateExpr(ShiftedLHS,
8128 IntegerType::get(getContext(), BitWidth - LZ - TZ)),
8129 BO->LHS->getType()),
8130 MulCount);
8131 }
8132 }
8133 // Binary `and` is a bit-wise `umin`.
8134 if (BO->LHS->getType()->isIntegerTy(1)) {
8135 LHS = getSCEV(BO->LHS);
8136 RHS = getSCEV(BO->RHS);
8137 return getUMinExpr(LHS, RHS);
8138 }
8139 break;
8140
8141 case Instruction::Or:
8142 // Binary `or` is a bit-wise `umax`.
8143 if (BO->LHS->getType()->isIntegerTy(1)) {
8144 LHS = getSCEV(BO->LHS);
8145 RHS = getSCEV(BO->RHS);
8146 return getUMaxExpr(LHS, RHS);
8147 }
8148 break;
8149
8150 case Instruction::Xor:
8151 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS)) {
8152 // If the RHS of xor is -1, then this is a not operation.
8153 if (CI->isMinusOne())
8154 return getNotSCEV(getSCEV(BO->LHS));
8155
8156 // Model xor(and(x, C), C) as and(~x, C), if C is a low-bits mask.
8157 // This is a variant of the check for xor with -1, and it handles
8158 // the case where instcombine has trimmed non-demanded bits out
8159 // of an xor with -1.
8160 if (auto *LBO = dyn_cast<BinaryOperator>(BO->LHS))
8161 if (ConstantInt *LCI = dyn_cast<ConstantInt>(LBO->getOperand(1)))
8162 if (LBO->getOpcode() == Instruction::And &&
8163 LCI->getValue() == CI->getValue())
8164 if (const SCEVZeroExtendExpr *Z =
8166 Type *UTy = BO->LHS->getType();
8167 const SCEV *Z0 = Z->getOperand();
8168 Type *Z0Ty = Z0->getType();
8169 unsigned Z0TySize = getTypeSizeInBits(Z0Ty);
8170
8171 // If C is a low-bits mask, the zero extend is serving to
8172 // mask off the high bits. Complement the operand and
8173 // re-apply the zext.
8174 if (CI->getValue().isMask(Z0TySize))
8175 return getZeroExtendExpr(getNotSCEV(Z0), UTy);
8176
8177 // If C is a single bit, it may be in the sign-bit position
8178 // before the zero-extend. In this case, represent the xor
8179 // using an add, which is equivalent, and re-apply the zext.
8180 APInt Trunc = CI->getValue().trunc(Z0TySize);
8181 if (Trunc.zext(getTypeSizeInBits(UTy)) == CI->getValue() &&
8182 Trunc.isSignMask())
8183 return getZeroExtendExpr(getAddExpr(Z0, getConstant(Trunc)),
8184 UTy);
8185 }
8186 }
8187 break;
8188
8189 case Instruction::Shl:
8190 // Turn shift left of a constant amount into a multiply.
8191 if (ConstantInt *SA = dyn_cast<ConstantInt>(BO->RHS)) {
8192 uint32_t BitWidth = cast<IntegerType>(SA->getType())->getBitWidth();
8193
8194 // If the shift count is not less than the bitwidth, the result of
8195 // the shift is undefined. Don't try to analyze it, because the
8196 // resolution chosen here may differ from the resolution chosen in
8197 // other parts of the compiler.
8198 if (SA->getValue().uge(BitWidth))
8199 break;
8200
8201 // We can safely preserve the nuw flag in all cases. It's also safe to
8202 // turn a nuw nsw shl into a nuw nsw mul. However, nsw in isolation
8203 // requires special handling. It can be preserved as long as we're not
8204 // left shifting by bitwidth - 1.
8205 auto Flags = SCEV::FlagAnyWrap;
8206 if (BO->Op) {
8207 auto MulFlags = getNoWrapFlagsFromUB(BO->Op);
8208 if (any(MulFlags & SCEV::FlagNSW) &&
8209 (any(MulFlags & SCEV::FlagNUW) ||
8210 SA->getValue().ult(BitWidth - 1)))
8212 if (any(MulFlags & SCEV::FlagNUW))
8214 }
8215
8216 ConstantInt *X = ConstantInt::get(
8217 getContext(), APInt::getOneBitSet(BitWidth, SA->getZExtValue()));
8218 return getMulExpr(getSCEV(BO->LHS), getConstant(X), Flags);
8219 }
8220 break;
8221
8222 case Instruction::AShr:
8223 // AShr X, C, where C is a constant.
8224 ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS);
8225 if (!CI)
8226 break;
8227
8228 Type *OuterTy = BO->LHS->getType();
8229 uint64_t BitWidth = getTypeSizeInBits(OuterTy);
8230 // If the shift count is not less than the bitwidth, the result of
8231 // the shift is undefined. Don't try to analyze it, because the
8232 // resolution chosen here may differ from the resolution chosen in
8233 // other parts of the compiler.
8234 if (CI->getValue().uge(BitWidth))
8235 break;
8236
8237 if (CI->isZero())
8238 return getSCEV(BO->LHS); // shift by zero --> noop
8239
8240 uint64_t AShrAmt = CI->getZExtValue();
8241 Type *TruncTy = IntegerType::get(getContext(), BitWidth - AShrAmt);
8242
8243 Operator *L = dyn_cast<Operator>(BO->LHS);
8244 const SCEV *AddTruncateExpr = nullptr;
8245 ConstantInt *ShlAmtCI = nullptr;
8246 const SCEV *AddConstant = nullptr;
8247
8248 if (L && L->getOpcode() == Instruction::Add) {
8249 // X = Shl A, n
8250 // Y = Add X, c
8251 // Z = AShr Y, m
8252 // n, c and m are constants.
8253
8254 Operator *LShift = dyn_cast<Operator>(L->getOperand(0));
8255 ConstantInt *AddOperandCI = dyn_cast<ConstantInt>(L->getOperand(1));
8256 if (LShift && LShift->getOpcode() == Instruction::Shl) {
8257 if (AddOperandCI) {
8258 const SCEV *ShlOp0SCEV = getSCEV(LShift->getOperand(0));
8259 ShlAmtCI = dyn_cast<ConstantInt>(LShift->getOperand(1));
8260 // since we truncate to TruncTy, the AddConstant should be of the
8261 // same type, so create a new Constant with type same as TruncTy.
8262 // Also, the Add constant should be shifted right by AShr amount.
8263 APInt AddOperand = AddOperandCI->getValue().ashr(AShrAmt);
8264 AddConstant = getConstant(AddOperand.trunc(BitWidth - AShrAmt));
8265 // we model the expression as sext(add(trunc(A), c << n)), since the
8266 // sext(trunc) part is already handled below, we create a
8267 // AddExpr(TruncExp) which will be used later.
8268 AddTruncateExpr = getTruncateExpr(ShlOp0SCEV, TruncTy);
8269 }
8270 }
8271 } else if (L && L->getOpcode() == Instruction::Shl) {
8272 // X = Shl A, n
8273 // Y = AShr X, m
8274 // Both n and m are constant.
8275
8276 const SCEV *ShlOp0SCEV = getSCEV(L->getOperand(0));
8277 ShlAmtCI = dyn_cast<ConstantInt>(L->getOperand(1));
8278 AddTruncateExpr = getTruncateExpr(ShlOp0SCEV, TruncTy);
8279 }
8280
8281 if (AddTruncateExpr && ShlAmtCI) {
8282 // We can merge the two given cases into a single SCEV statement,
8283 // incase n = m, the mul expression will be 2^0, so it gets resolved to
8284 // a simpler case. The following code handles the two cases:
8285 //
8286 // 1) For a two-shift sext-inreg, i.e. n = m,
8287 // use sext(trunc(x)) as the SCEV expression.
8288 //
8289 // 2) When n > m, use sext(mul(trunc(x), 2^(n-m)))) as the SCEV
8290 // expression. We already checked that ShlAmt < BitWidth, so
8291 // the multiplier, 1 << (ShlAmt - AShrAmt), fits into TruncTy as
8292 // ShlAmt - AShrAmt < Amt.
8293 const APInt &ShlAmt = ShlAmtCI->getValue();
8294 if (ShlAmt.ult(BitWidth) && ShlAmt.uge(AShrAmt)) {
8295 APInt Mul = APInt::getOneBitSet(BitWidth - AShrAmt,
8296 ShlAmtCI->getZExtValue() - AShrAmt);
8297 const SCEV *CompositeExpr =
8298 getMulExpr(AddTruncateExpr, getConstant(Mul));
8299 if (L->getOpcode() != Instruction::Shl)
8300 CompositeExpr = getAddExpr(CompositeExpr, AddConstant);
8301
8302 return getSignExtendExpr(CompositeExpr, OuterTy);
8303 }
8304 }
8305 break;
8306 }
8307 }
8308
8309 switch (U->getOpcode()) {
8310 case Instruction::Trunc:
8311 return getTruncateExpr(getSCEV(U->getOperand(0)), U->getType());
8312
8313 case Instruction::ZExt:
8314 return getZeroExtendExpr(getSCEV(U->getOperand(0)), U->getType());
8315
8316 case Instruction::SExt:
8317 if (auto BO = MatchBinaryOp(U->getOperand(0), getDataLayout(), AC, DT,
8319 // The NSW flag of a subtract does not always survive the conversion to
8320 // A + (-1)*B. By pushing sign extension onto its operands we are much
8321 // more likely to preserve NSW and allow later AddRec optimisations.
8322 //
8323 // NOTE: This is effectively duplicating this logic from getSignExtend:
8324 // sext((A + B + ...)<nsw>) --> (sext(A) + sext(B) + ...)<nsw>
8325 // but by that point the NSW information has potentially been lost.
8326 if (BO->Opcode == Instruction::Sub && BO->IsNSW) {
8327 Type *Ty = U->getType();
8328 auto *V1 = getSignExtendExpr(getSCEV(BO->LHS), Ty);
8329 auto *V2 = getSignExtendExpr(getSCEV(BO->RHS), Ty);
8330 return getMinusSCEV(V1, V2, SCEV::FlagNSW);
8331 }
8332 }
8333 return getSignExtendExpr(getSCEV(U->getOperand(0)), U->getType());
8334
8335 case Instruction::BitCast:
8336 // BitCasts are no-op casts so we just eliminate the cast.
8337 if (isSCEVable(U->getType()) && isSCEVable(U->getOperand(0)->getType()))
8338 return getSCEV(U->getOperand(0));
8339 break;
8340
8341 case Instruction::PtrToAddr: {
8342 const SCEV *IntOp = getPtrToAddrExpr(getSCEV(U->getOperand(0)));
8343 if (isa<SCEVCouldNotCompute>(IntOp))
8344 return getUnknown(V);
8345 return IntOp;
8346 }
8347
8348 case Instruction::PtrToInt: {
8349 // Pointer to integer cast is straight-forward, so do model it.
8350 const SCEV *Op = getSCEV(U->getOperand(0));
8351 Type *DstIntTy = U->getType();
8352 // But only if effective SCEV (integer) type is wide enough to represent
8353 // all possible pointer values.
8354 const SCEV *IntOp = getPtrToIntExpr(Op, DstIntTy);
8355 if (isa<SCEVCouldNotCompute>(IntOp))
8356 return getUnknown(V);
8357 return IntOp;
8358 }
8359 case Instruction::IntToPtr:
8360 // Just don't deal with inttoptr casts.
8361 return getUnknown(V);
8362
8363 case Instruction::SDiv:
8364 // If both operands are non-negative, this is just an udiv.
8365 if (isKnownNonNegative(getSCEV(U->getOperand(0))) &&
8366 isKnownNonNegative(getSCEV(U->getOperand(1))))
8367 return getUDivExpr(getSCEV(U->getOperand(0)), getSCEV(U->getOperand(1)));
8368 break;
8369
8370 case Instruction::SRem:
8371 // If both operands are non-negative, this is just an urem.
8372 if (isKnownNonNegative(getSCEV(U->getOperand(0))) &&
8373 isKnownNonNegative(getSCEV(U->getOperand(1))))
8374 return getURemExpr(getSCEV(U->getOperand(0)), getSCEV(U->getOperand(1)));
8375 break;
8376
8377 case Instruction::GetElementPtr:
8378 return createNodeForGEP(cast<GEPOperator>(U));
8379
8380 case Instruction::PHI:
8381 return createNodeForPHI(cast<PHINode>(U));
8382
8383 case Instruction::Select:
8384 return createNodeForSelectOrPHI(U, U->getOperand(0), U->getOperand(1),
8385 U->getOperand(2));
8386
8387 case Instruction::Call:
8388 case Instruction::Invoke:
8389 if (Value *RV = cast<CallBase>(U)->getReturnedArgOperand())
8390 return getSCEV(RV);
8391
8392 if (auto *II = dyn_cast<IntrinsicInst>(U)) {
8393 switch (II->getIntrinsicID()) {
8394 case Intrinsic::abs:
8395 return getAbsExpr(
8396 getSCEV(II->getArgOperand(0)),
8397 /*IsNSW=*/cast<ConstantInt>(II->getArgOperand(1))->isOne());
8398 case Intrinsic::umax:
8399 LHS = getSCEV(II->getArgOperand(0));
8400 RHS = getSCEV(II->getArgOperand(1));
8401 return getUMaxExpr(LHS, RHS);
8402 case Intrinsic::umin:
8403 LHS = getSCEV(II->getArgOperand(0));
8404 RHS = getSCEV(II->getArgOperand(1));
8405 return getUMinExpr(LHS, RHS);
8406 case Intrinsic::smax:
8407 LHS = getSCEV(II->getArgOperand(0));
8408 RHS = getSCEV(II->getArgOperand(1));
8409 return getSMaxExpr(LHS, RHS);
8410 case Intrinsic::smin:
8411 LHS = getSCEV(II->getArgOperand(0));
8412 RHS = getSCEV(II->getArgOperand(1));
8413 return getSMinExpr(LHS, RHS);
8414 case Intrinsic::usub_sat: {
8415 const SCEV *X = getSCEV(II->getArgOperand(0));
8416 const SCEV *Y = getSCEV(II->getArgOperand(1));
8417 const SCEV *ClampedY = getUMinExpr(X, Y);
8418 return getMinusSCEV(X, ClampedY, SCEV::FlagNUW);
8419 }
8420 case Intrinsic::uadd_sat: {
8421 const SCEV *X = getSCEV(II->getArgOperand(0));
8422 const SCEV *Y = getSCEV(II->getArgOperand(1));
8423 const SCEV *ClampedX = getUMinExpr(X, getNotSCEV(Y));
8424 return getAddExpr(ClampedX, Y, SCEV::FlagNUW);
8425 }
8426 case Intrinsic::start_loop_iterations:
8427 case Intrinsic::annotation:
8428 case Intrinsic::ptr_annotation:
8429 // A start_loop_iterations or llvm.annotation or llvm.prt.annotation is
8430 // just eqivalent to the first operand for SCEV purposes.
8431 return getSCEV(II->getArgOperand(0));
8432 case Intrinsic::vscale:
8433 return getVScale(II->getType());
8434 default:
8435 break;
8436 }
8437 }
8438 break;
8439 }
8440
8441 return getUnknown(V);
8442}
8443
8444//===----------------------------------------------------------------------===//
8445// Iteration Count Computation Code
8446//
8447
8449 if (isa<SCEVCouldNotCompute>(ExitCount))
8450 return getCouldNotCompute();
8451
8452 auto *ExitCountType = ExitCount->getType();
8453 assert(ExitCountType->isIntegerTy());
8454 auto *EvalTy = Type::getIntNTy(ExitCountType->getContext(),
8455 1 + ExitCountType->getScalarSizeInBits());
8456 return getTripCountFromExitCount(ExitCount, EvalTy, nullptr);
8457}
8458
8460 Type *EvalTy,
8461 const Loop *L) {
8462 if (isa<SCEVCouldNotCompute>(ExitCount))
8463 return getCouldNotCompute();
8464
8465 unsigned ExitCountSize = getTypeSizeInBits(ExitCount->getType());
8466 unsigned EvalSize = EvalTy->getPrimitiveSizeInBits();
8467
8468 auto CanAddOneWithoutOverflow = [&]() {
8469 ConstantRange ExitCountRange =
8470 getRangeRef(ExitCount, RangeSignHint::HINT_RANGE_UNSIGNED);
8471 if (!ExitCountRange.contains(APInt::getMaxValue(ExitCountSize)))
8472 return true;
8473
8474 return L && isLoopEntryGuardedByCond(L, ICmpInst::ICMP_NE, ExitCount,
8475 getMinusOne(ExitCount->getType()));
8476 };
8477
8478 // If we need to zero extend the backedge count, check if we can add one to
8479 // it prior to zero extending without overflow. Provided this is safe, it
8480 // allows better simplification of the +1.
8481 if (EvalSize > ExitCountSize && CanAddOneWithoutOverflow())
8482 return getZeroExtendExpr(
8483 getAddExpr(ExitCount, getOne(ExitCount->getType())), EvalTy);
8484
8485 // Get the total trip count from the count by adding 1. This may wrap.
8486 return getAddExpr(getTruncateOrZeroExtend(ExitCount, EvalTy), getOne(EvalTy));
8487}
8488
8489static unsigned getConstantTripCount(const SCEVConstant *ExitCount) {
8490 if (!ExitCount)
8491 return 0;
8492
8493 ConstantInt *ExitConst = ExitCount->getValue();
8494
8495 // Guard against huge trip counts.
8496 if (ExitConst->getValue().getActiveBits() > 32)
8497 return 0;
8498
8499 // In case of integer overflow, this returns 0, which is correct.
8500 return ((unsigned)ExitConst->getZExtValue()) + 1;
8501}
8502
8504 auto *ExitCount = dyn_cast<SCEVConstant>(getBackedgeTakenCount(L, Exact));
8505 return getConstantTripCount(ExitCount);
8506}
8507
8508unsigned
8510 const BasicBlock *ExitingBlock) {
8511 assert(ExitingBlock && "Must pass a non-null exiting block!");
8512 assert(L->isLoopExiting(ExitingBlock) &&
8513 "Exiting block must actually branch out of the loop!");
8514 const SCEVConstant *ExitCount =
8515 dyn_cast<SCEVConstant>(getExitCount(L, ExitingBlock));
8516 return getConstantTripCount(ExitCount);
8517}
8518
8520 const Loop *L, SmallVectorImpl<const SCEVPredicate *> *Predicates) {
8521
8522 const auto *MaxExitCount =
8523 Predicates ? getPredicatedConstantMaxBackedgeTakenCount(L, *Predicates)
8525 return getConstantTripCount(dyn_cast<SCEVConstant>(MaxExitCount));
8526}
8527
8529 SmallVector<BasicBlock *, 8> ExitingBlocks;
8530 L->getExitingBlocks(ExitingBlocks);
8531
8532 std::optional<unsigned> Res;
8533 for (auto *ExitingBB : ExitingBlocks) {
8534 unsigned Multiple = getSmallConstantTripMultiple(L, ExitingBB);
8535 if (!Res)
8536 Res = Multiple;
8537 Res = std::gcd(*Res, Multiple);
8538 }
8539 return Res.value_or(1);
8540}
8541
8543 const SCEV *ExitCount) {
8544 if (isa<SCEVCouldNotCompute>(ExitCount))
8545 return 1;
8546
8547 // Get the trip count
8548 const SCEV *TCExpr = getTripCountFromExitCount(applyLoopGuards(ExitCount, L));
8549
8550 APInt Multiple = getNonZeroConstantMultiple(TCExpr);
8551 // If a trip multiple is huge (>=2^32), the trip count is still divisible by
8552 // the greatest power of 2 divisor less than 2^32.
8553 return Multiple.getActiveBits() > 32
8554 ? 1U << std::min(31U, Multiple.countTrailingZeros())
8555 : (unsigned)Multiple.getZExtValue();
8556}
8557
8558/// Returns the largest constant divisor of the trip count of this loop as a
8559/// normal unsigned value, if possible. This means that the actual trip count is
8560/// always a multiple of the returned value (don't forget the trip count could
8561/// very well be zero as well!).
8562///
8563/// Returns 1 if the trip count is unknown or not guaranteed to be the
8564/// multiple of a constant (which is also the case if the trip count is simply
8565/// constant, use getSmallConstantTripCount for that case), Will also return 1
8566/// if the trip count is very large (>= 2^32).
8567///
8568/// As explained in the comments for getSmallConstantTripCount, this assumes
8569/// that control exits the loop via ExitingBlock.
8570unsigned
8572 const BasicBlock *ExitingBlock) {
8573 assert(ExitingBlock && "Must pass a non-null exiting block!");
8574 assert(L->isLoopExiting(ExitingBlock) &&
8575 "Exiting block must actually branch out of the loop!");
8576 const SCEV *ExitCount = getExitCount(L, ExitingBlock);
8577 return getSmallConstantTripMultiple(L, ExitCount);
8578}
8579
8581 const BasicBlock *ExitingBlock,
8582 ExitCountKind Kind) {
8583 switch (Kind) {
8584 case Exact:
8585 return getBackedgeTakenInfo(L).getExact(ExitingBlock, this);
8586 case SymbolicMaximum:
8587 return getBackedgeTakenInfo(L).getSymbolicMax(ExitingBlock, this);
8588 case ConstantMaximum:
8589 return getBackedgeTakenInfo(L).getConstantMax(ExitingBlock, this);
8590 };
8591 llvm_unreachable("Invalid ExitCountKind!");
8592}
8593
8595 const Loop *L, const BasicBlock *ExitingBlock,
8597 switch (Kind) {
8598 case Exact:
8599 return getPredicatedBackedgeTakenInfo(L).getExact(ExitingBlock, this,
8600 Predicates);
8601 case SymbolicMaximum:
8602 return getPredicatedBackedgeTakenInfo(L).getSymbolicMax(ExitingBlock, this,
8603 Predicates);
8604 case ConstantMaximum:
8605 return getPredicatedBackedgeTakenInfo(L).getConstantMax(ExitingBlock, this,
8606 Predicates);
8607 };
8608 llvm_unreachable("Invalid ExitCountKind!");
8609}
8610
8613 return getPredicatedBackedgeTakenInfo(L).getExact(L, this, &Preds);
8614}
8615
8617 ExitCountKind Kind) {
8618 switch (Kind) {
8619 case Exact:
8620 return getBackedgeTakenInfo(L).getExact(L, this);
8621 case ConstantMaximum:
8622 return getBackedgeTakenInfo(L).getConstantMax(this);
8623 case SymbolicMaximum:
8624 return getBackedgeTakenInfo(L).getSymbolicMax(L, this);
8625 };
8626 llvm_unreachable("Invalid ExitCountKind!");
8627}
8628
8631 return getPredicatedBackedgeTakenInfo(L).getSymbolicMax(L, this, &Preds);
8632}
8633
8636 return getPredicatedBackedgeTakenInfo(L).getConstantMax(this, &Preds);
8637}
8638
8640 return getBackedgeTakenInfo(L).isConstantMaxOrZero(this);
8641}
8642
8643/// Push PHI nodes in the header of the given loop onto the given Worklist.
8644static void PushLoopPHIs(const Loop *L,
8647 BasicBlock *Header = L->getHeader();
8648
8649 // Push all Loop-header PHIs onto the Worklist stack.
8650 for (PHINode &PN : Header->phis())
8651 if (Visited.insert(&PN).second)
8652 Worklist.push_back(&PN);
8653}
8654
8655ScalarEvolution::BackedgeTakenInfo &
8656ScalarEvolution::getPredicatedBackedgeTakenInfo(const Loop *L) {
8657 auto &BTI = getBackedgeTakenInfo(L);
8658 if (BTI.hasFullInfo())
8659 return BTI;
8660
8661 auto Pair = PredicatedBackedgeTakenCounts.try_emplace(L);
8662
8663 if (!Pair.second)
8664 return Pair.first->second;
8665
8666 BackedgeTakenInfo Result =
8667 computeBackedgeTakenCount(L, /*AllowPredicates=*/true);
8668
8669 return PredicatedBackedgeTakenCounts.find(L)->second = std::move(Result);
8670}
8671
8672ScalarEvolution::BackedgeTakenInfo &
8673ScalarEvolution::getBackedgeTakenInfo(const Loop *L) {
8674 // Initially insert an invalid entry for this loop. If the insertion
8675 // succeeds, proceed to actually compute a backedge-taken count and
8676 // update the value. The temporary CouldNotCompute value tells SCEV
8677 // code elsewhere that it shouldn't attempt to request a new
8678 // backedge-taken count, which could result in infinite recursion.
8679 std::pair<DenseMap<const Loop *, BackedgeTakenInfo>::iterator, bool> Pair =
8680 BackedgeTakenCounts.try_emplace(L);
8681 if (!Pair.second)
8682 return Pair.first->second;
8683
8684 // computeBackedgeTakenCount may allocate memory for its result. Inserting it
8685 // into the BackedgeTakenCounts map transfers ownership. Otherwise, the result
8686 // must be cleared in this scope.
8687 BackedgeTakenInfo Result = computeBackedgeTakenCount(L);
8688
8689 // Now that we know more about the trip count for this loop, forget any
8690 // existing SCEV values for PHI nodes in this loop since they are only
8691 // conservative estimates made without the benefit of trip count
8692 // information. This invalidation is not necessary for correctness, and is
8693 // only done to produce more precise results.
8694 if (Result.hasAnyInfo()) {
8695 // Invalidate any expression using an addrec in this loop.
8696 SmallVector<SCEVUse, 8> ToForget;
8697 auto LoopUsersIt = LoopUsers.find(L);
8698 if (LoopUsersIt != LoopUsers.end())
8699 append_range(ToForget, LoopUsersIt->second);
8700 forgetMemoizedResults(ToForget);
8701
8702 // Invalidate constant-evolved loop header phis.
8703 for (PHINode &PN : L->getHeader()->phis())
8704 ConstantEvolutionLoopExitValue.erase(&PN);
8705 }
8706
8707 // Re-lookup the insert position, since the call to
8708 // computeBackedgeTakenCount above could result in a
8709 // recusive call to getBackedgeTakenInfo (on a different
8710 // loop), which would invalidate the iterator computed
8711 // earlier.
8712 return BackedgeTakenCounts.find(L)->second = std::move(Result);
8713}
8714
8716 // This method is intended to forget all info about loops. It should
8717 // invalidate caches as if the following happened:
8718 // - The trip counts of all loops have changed arbitrarily
8719 // - Every llvm::Value has been updated in place to produce a different
8720 // result.
8721 BackedgeTakenCounts.clear();
8722 PredicatedBackedgeTakenCounts.clear();
8723 BECountUsers.clear();
8724 LoopPropertiesCache.clear();
8725 ConstantEvolutionLoopExitValue.clear();
8726 ValueExprMap.clear();
8727 ValuesAtScopes.clear();
8728 ValuesAtScopesUsers.clear();
8729 LoopDispositions.clear();
8730 BlockDispositions.clear();
8731 UnsignedRanges.clear();
8732 SignedRanges.clear();
8733 ExprValueMap.clear();
8734 HasRecMap.clear();
8735 ConstantMultipleCache.clear();
8736 PredicatedSCEVRewrites.clear();
8737 FoldCache.clear();
8738 FoldCacheUser.clear();
8739}
8740void ScalarEvolution::visitAndClearUsers(
8743 SmallVectorImpl<SCEVUse> &ToForget) {
8744 while (!Worklist.empty()) {
8745 Instruction *I = Worklist.pop_back_val();
8746 if (!isSCEVable(I->getType()) && !isa<WithOverflowInst>(I))
8747 continue;
8748
8750 ValueExprMap.find_as(static_cast<Value *>(I));
8751 if (It != ValueExprMap.end()) {
8752 eraseValueFromMap(It->first);
8753 ToForget.push_back(It->second);
8754 if (PHINode *PN = dyn_cast<PHINode>(I))
8755 ConstantEvolutionLoopExitValue.erase(PN);
8756 }
8757
8758 PushDefUseChildren(I, Worklist, Visited);
8759 }
8760}
8761
8763 SmallVector<const Loop *, 16> LoopWorklist(1, L);
8766 SmallVector<SCEVUse, 16> ToForget;
8767
8768 // Iterate over all the loops and sub-loops to drop SCEV information.
8769 while (!LoopWorklist.empty()) {
8770 auto *CurrL = LoopWorklist.pop_back_val();
8771
8772 // Drop any stored trip count value.
8773 forgetBackedgeTakenCounts(CurrL, /* Predicated */ false);
8774 forgetBackedgeTakenCounts(CurrL, /* Predicated */ true);
8775
8776 // Drop information about predicated SCEV rewrites for this loop.
8777 for (auto I = PredicatedSCEVRewrites.begin();
8778 I != PredicatedSCEVRewrites.end();) {
8779 std::pair<const SCEV *, const Loop *> Entry = I->first;
8780 if (Entry.second == CurrL)
8781 PredicatedSCEVRewrites.erase(I++);
8782 else
8783 ++I;
8784 }
8785
8786 auto LoopUsersItr = LoopUsers.find(CurrL);
8787 if (LoopUsersItr != LoopUsers.end())
8788 llvm::append_range(ToForget, LoopUsersItr->second);
8789
8790 // Drop information about expressions based on loop-header PHIs.
8791 PushLoopPHIs(CurrL, Worklist, Visited);
8792 visitAndClearUsers(Worklist, Visited, ToForget);
8793
8794 LoopPropertiesCache.erase(CurrL);
8795 // Forget all contained loops too, to avoid dangling entries in the
8796 // ValuesAtScopes map.
8797 LoopWorklist.append(CurrL->begin(), CurrL->end());
8798 }
8799 forgetMemoizedResults(ToForget);
8800}
8801
8803 forgetLoop(L->getOutermostLoop());
8804}
8805
8808 if (!I) return;
8809
8810 // Drop information about expressions based on loop-header PHIs.
8813 SmallVector<SCEVUse, 8> ToForget;
8814 Worklist.push_back(I);
8815 Visited.insert(I);
8816 visitAndClearUsers(Worklist, Visited, ToForget);
8817
8818 forgetMemoizedResults(ToForget);
8819}
8820
8822 if (!isSCEVable(V->getType()))
8823 return;
8824
8825 // If SCEV looked through a trivial LCSSA phi node, we might have SCEV's
8826 // directly using a SCEVUnknown/SCEVAddRec defined in the loop. After an
8827 // extra predecessor is added, this is no longer valid. Find all Unknowns and
8828 // AddRecs defined in the loop and invalidate any SCEV's making use of them.
8829 if (const SCEV *S = getExistingSCEV(V)) {
8830 struct InvalidationRootCollector {
8831 Loop *L;
8833
8834 InvalidationRootCollector(Loop *L) : L(L) {}
8835
8836 bool follow(const SCEV *S) {
8837 if (auto *SU = dyn_cast<SCEVUnknown>(S)) {
8838 if (auto *I = dyn_cast<Instruction>(SU->getValue()))
8839 if (L->contains(I))
8840 Roots.push_back(S);
8841 } else if (auto *AddRec = dyn_cast<SCEVAddRecExpr>(S)) {
8842 if (L->contains(AddRec->getLoop()))
8843 Roots.push_back(S);
8844 }
8845 return true;
8846 }
8847 bool isDone() const { return false; }
8848 };
8849
8850 InvalidationRootCollector C(L);
8851 visitAll(S, C);
8852 forgetMemoizedResults(C.Roots);
8853 }
8854
8855 // Also perform the normal invalidation.
8856 forgetValue(V);
8857}
8858
8859void ScalarEvolution::forgetLoopDispositions() { LoopDispositions.clear(); }
8860
8862 // Unless a specific value is passed to invalidation, completely clear both
8863 // caches.
8864 if (!V) {
8865 BlockDispositions.clear();
8866 LoopDispositions.clear();
8867 return;
8868 }
8869
8870 if (!isSCEVable(V->getType()))
8871 return;
8872
8873 const SCEV *S = getExistingSCEV(V);
8874 if (!S)
8875 return;
8876
8877 // Invalidate the block and loop dispositions cached for S. Dispositions of
8878 // S's users may change if S's disposition changes (i.e. a user may change to
8879 // loop-invariant, if S changes to loop invariant), so also invalidate
8880 // dispositions of S's users recursively.
8881 SmallVector<SCEVUse, 8> Worklist = {S};
8883 while (!Worklist.empty()) {
8884 const SCEV *Curr = Worklist.pop_back_val();
8885 bool LoopDispoRemoved = LoopDispositions.erase(Curr);
8886 bool BlockDispoRemoved = BlockDispositions.erase(Curr);
8887 if (!LoopDispoRemoved && !BlockDispoRemoved)
8888 continue;
8889 auto Users = SCEVUsers.find(Curr);
8890 if (Users != SCEVUsers.end())
8891 for (const auto *User : Users->second)
8892 if (Seen.insert(User).second)
8893 Worklist.push_back(User);
8894 }
8895}
8896
8897/// Get the exact loop backedge taken count considering all loop exits. A
8898/// computable result can only be returned for loops with all exiting blocks
8899/// dominating the latch. howFarToZero assumes that the limit of each loop test
8900/// is never skipped. This is a valid assumption as long as the loop exits via
8901/// that test. For precise results, it is the caller's responsibility to specify
8902/// the relevant loop exiting block using getExact(ExitingBlock, SE).
8903const SCEV *ScalarEvolution::BackedgeTakenInfo::getExact(
8904 const Loop *L, ScalarEvolution *SE,
8906 // If any exits were not computable, the loop is not computable.
8907 if (!isComplete() || ExitNotTaken.empty())
8908 return SE->getCouldNotCompute();
8909
8910 const BasicBlock *Latch = L->getLoopLatch();
8911 // All exiting blocks we have collected must dominate the only backedge.
8912 if (!Latch)
8913 return SE->getCouldNotCompute();
8914
8915 // All exiting blocks we have gathered dominate loop's latch, so exact trip
8916 // count is simply a minimum out of all these calculated exit counts.
8918 for (const auto &ENT : ExitNotTaken) {
8919 const SCEV *BECount = ENT.ExactNotTaken;
8920 assert(BECount != SE->getCouldNotCompute() && "Bad exit SCEV!");
8921 assert(SE->DT.dominates(ENT.ExitingBlock, Latch) &&
8922 "We should only have known counts for exiting blocks that dominate "
8923 "latch!");
8924
8925 Ops.push_back(BECount);
8926
8927 if (Preds)
8928 append_range(*Preds, ENT.Predicates);
8929
8930 assert((Preds || ENT.hasAlwaysTruePredicate()) &&
8931 "Predicate should be always true!");
8932 }
8933
8934 // If an earlier exit exits on the first iteration (exit count zero), then
8935 // a later poison exit count should not propagate into the result. This are
8936 // exactly the semantics provided by umin_seq.
8937 return SE->getUMinFromMismatchedTypes(Ops, /* Sequential */ true);
8938}
8939
8940const ScalarEvolution::ExitNotTakenInfo *
8941ScalarEvolution::BackedgeTakenInfo::getExitNotTaken(
8942 const BasicBlock *ExitingBlock,
8943 SmallVectorImpl<const SCEVPredicate *> *Predicates) const {
8944 for (const auto &ENT : ExitNotTaken)
8945 if (ENT.ExitingBlock == ExitingBlock) {
8946 if (ENT.hasAlwaysTruePredicate())
8947 return &ENT;
8948 else if (Predicates) {
8949 append_range(*Predicates, ENT.Predicates);
8950 return &ENT;
8951 }
8952 }
8953
8954 return nullptr;
8955}
8956
8957/// getConstantMax - Get the constant max backedge taken count for the loop.
8958const SCEV *ScalarEvolution::BackedgeTakenInfo::getConstantMax(
8959 ScalarEvolution *SE,
8960 SmallVectorImpl<const SCEVPredicate *> *Predicates) const {
8961 if (!getConstantMax())
8962 return SE->getCouldNotCompute();
8963
8964 for (const auto &ENT : ExitNotTaken)
8965 if (!ENT.hasAlwaysTruePredicate()) {
8966 if (!Predicates)
8967 return SE->getCouldNotCompute();
8968 append_range(*Predicates, ENT.Predicates);
8969 }
8970
8971 assert((isa<SCEVCouldNotCompute>(getConstantMax()) ||
8972 isa<SCEVConstant>(getConstantMax())) &&
8973 "No point in having a non-constant max backedge taken count!");
8974 return getConstantMax();
8975}
8976
8977const SCEV *ScalarEvolution::BackedgeTakenInfo::getSymbolicMax(
8978 const Loop *L, ScalarEvolution *SE,
8979 SmallVectorImpl<const SCEVPredicate *> *Predicates) {
8980 if (!SymbolicMax) {
8981 // Form an expression for the maximum exit count possible for this loop. We
8982 // merge the max and exact information to approximate a version of
8983 // getConstantMaxBackedgeTakenCount which isn't restricted to just
8984 // constants.
8985 SmallVector<SCEVUse, 4> ExitCounts;
8986
8987 for (const auto &ENT : ExitNotTaken) {
8988 const SCEV *ExitCount = ENT.SymbolicMaxNotTaken;
8989 if (!isa<SCEVCouldNotCompute>(ExitCount)) {
8990 assert(SE->DT.dominates(ENT.ExitingBlock, L->getLoopLatch()) &&
8991 "We should only have known counts for exiting blocks that "
8992 "dominate latch!");
8993 ExitCounts.push_back(ExitCount);
8994 if (Predicates)
8995 append_range(*Predicates, ENT.Predicates);
8996
8997 assert((Predicates || ENT.hasAlwaysTruePredicate()) &&
8998 "Predicate should be always true!");
8999 }
9000 }
9001 if (ExitCounts.empty())
9002 SymbolicMax = SE->getCouldNotCompute();
9003 else
9004 SymbolicMax =
9005 SE->getUMinFromMismatchedTypes(ExitCounts, /*Sequential*/ true);
9006 }
9007 return SymbolicMax;
9008}
9009
9010bool ScalarEvolution::BackedgeTakenInfo::isConstantMaxOrZero(
9011 ScalarEvolution *SE) const {
9012 auto PredicateNotAlwaysTrue = [](const ExitNotTakenInfo &ENT) {
9013 return !ENT.hasAlwaysTruePredicate();
9014 };
9015 return MaxOrZero && !any_of(ExitNotTaken, PredicateNotAlwaysTrue);
9016}
9017
9020
9022 const SCEV *E, const SCEV *ConstantMaxNotTaken,
9023 const SCEV *SymbolicMaxNotTaken, bool MaxOrZero,
9027 // If we prove the max count is zero, so is the symbolic bound. This happens
9028 // in practice due to differences in a) how context sensitive we've chosen
9029 // to be and b) how we reason about bounds implied by UB.
9030 if (ConstantMaxNotTaken->isZero()) {
9031 this->ExactNotTaken = E = ConstantMaxNotTaken;
9032 this->SymbolicMaxNotTaken = SymbolicMaxNotTaken = ConstantMaxNotTaken;
9033 }
9034
9037 "Exact is not allowed to be less precise than Constant Max");
9040 "Exact is not allowed to be less precise than Symbolic Max");
9043 "Symbolic Max is not allowed to be less precise than Constant Max");
9046 "No point in having a non-constant max backedge taken count!");
9048 for (const auto PredList : PredLists)
9049 for (const auto *P : PredList) {
9050 if (SeenPreds.contains(P))
9051 continue;
9052 assert(!isa<SCEVUnionPredicate>(P) && "Only add leaf predicates here!");
9053 SeenPreds.insert(P);
9054 Predicates.push_back(P);
9055 }
9056 assert((isa<SCEVCouldNotCompute>(E) || !E->getType()->isPointerTy()) &&
9057 "Backedge count should be int");
9059 !ConstantMaxNotTaken->getType()->isPointerTy()) &&
9060 "Max backedge count should be int");
9061}
9062
9070
9071/// Allocate memory for BackedgeTakenInfo and copy the not-taken count of each
9072/// computable exit into a persistent ExitNotTakenInfo array.
9073ScalarEvolution::BackedgeTakenInfo::BackedgeTakenInfo(
9075 bool IsComplete, const SCEV *ConstantMax, bool MaxOrZero)
9076 : ConstantMax(ConstantMax), IsComplete(IsComplete), MaxOrZero(MaxOrZero) {
9077 using EdgeExitInfo = ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo;
9078
9079 ExitNotTaken.reserve(ExitCounts.size());
9080 std::transform(ExitCounts.begin(), ExitCounts.end(),
9081 std::back_inserter(ExitNotTaken),
9082 [&](const EdgeExitInfo &EEI) {
9083 BasicBlock *ExitBB = EEI.first;
9084 const ExitLimit &EL = EEI.second;
9085 return ExitNotTakenInfo(ExitBB, EL.ExactNotTaken,
9086 EL.ConstantMaxNotTaken, EL.SymbolicMaxNotTaken,
9087 EL.Predicates);
9088 });
9089 assert((isa<SCEVCouldNotCompute>(ConstantMax) ||
9090 isa<SCEVConstant>(ConstantMax)) &&
9091 "No point in having a non-constant max backedge taken count!");
9092}
9093
9094/// Compute the number of times the backedge of the specified loop will execute.
9095ScalarEvolution::BackedgeTakenInfo
9096ScalarEvolution::computeBackedgeTakenCount(const Loop *L,
9097 bool AllowPredicates) {
9098 SmallVector<BasicBlock *, 8> ExitingBlocks;
9099 L->getExitingBlocks(ExitingBlocks);
9100
9101 using EdgeExitInfo = ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo;
9102
9104 bool CouldComputeBECount = true;
9105 BasicBlock *Latch = L->getLoopLatch(); // may be NULL.
9106 const SCEV *MustExitMaxBECount = nullptr;
9107 const SCEV *MayExitMaxBECount = nullptr;
9108 bool MustExitMaxOrZero = false;
9109 bool IsOnlyExit = ExitingBlocks.size() == 1;
9110
9111 // Compute the ExitLimit for each loop exit. Use this to populate ExitCounts
9112 // and compute maxBECount.
9113 // Do a union of all the predicates here.
9114 for (BasicBlock *ExitBB : ExitingBlocks) {
9115 // We canonicalize untaken exits to br (constant), ignore them so that
9116 // proving an exit untaken doesn't negatively impact our ability to reason
9117 // about the loop as whole.
9118 if (auto *BI = dyn_cast<CondBrInst>(ExitBB->getTerminator()))
9119 if (auto *CI = dyn_cast<ConstantInt>(BI->getCondition())) {
9120 bool ExitIfTrue = !L->contains(BI->getSuccessor(0));
9121 if (ExitIfTrue == CI->isZero())
9122 continue;
9123 }
9124
9125 ExitLimit EL = computeExitLimit(L, ExitBB, IsOnlyExit, AllowPredicates);
9126
9127 assert((AllowPredicates || EL.Predicates.empty()) &&
9128 "Predicated exit limit when predicates are not allowed!");
9129
9130 // 1. For each exit that can be computed, add an entry to ExitCounts.
9131 // CouldComputeBECount is true only if all exits can be computed.
9132 if (EL.ExactNotTaken != getCouldNotCompute())
9133 ++NumExitCountsComputed;
9134 else
9135 // We couldn't compute an exact value for this exit, so
9136 // we won't be able to compute an exact value for the loop.
9137 CouldComputeBECount = false;
9138 // Remember exit count if either exact or symbolic is known. Because
9139 // Exact always implies symbolic, only check symbolic.
9140 if (EL.SymbolicMaxNotTaken != getCouldNotCompute())
9141 ExitCounts.emplace_back(ExitBB, EL);
9142 else {
9143 assert(EL.ExactNotTaken == getCouldNotCompute() &&
9144 "Exact is known but symbolic isn't?");
9145 ++NumExitCountsNotComputed;
9146 }
9147
9148 // 2. Derive the loop's MaxBECount from each exit's max number of
9149 // non-exiting iterations. Partition the loop exits into two kinds:
9150 // LoopMustExits and LoopMayExits.
9151 //
9152 // If the exit dominates the loop latch, it is a LoopMustExit otherwise it
9153 // is a LoopMayExit. If any computable LoopMustExit is found, then
9154 // MaxBECount is the minimum EL.ConstantMaxNotTaken of computable
9155 // LoopMustExits. Otherwise, MaxBECount is conservatively the maximum
9156 // EL.ConstantMaxNotTaken, where CouldNotCompute is considered greater than
9157 // any
9158 // computable EL.ConstantMaxNotTaken.
9159 if (EL.ConstantMaxNotTaken != getCouldNotCompute() && Latch &&
9160 DT.dominates(ExitBB, Latch)) {
9161 if (!MustExitMaxBECount) {
9162 MustExitMaxBECount = EL.ConstantMaxNotTaken;
9163 MustExitMaxOrZero = EL.MaxOrZero;
9164 } else {
9165 MustExitMaxBECount = getUMinFromMismatchedTypes(MustExitMaxBECount,
9166 EL.ConstantMaxNotTaken);
9167 }
9168 } else if (MayExitMaxBECount != getCouldNotCompute()) {
9169 if (!MayExitMaxBECount || EL.ConstantMaxNotTaken == getCouldNotCompute())
9170 MayExitMaxBECount = EL.ConstantMaxNotTaken;
9171 else {
9172 MayExitMaxBECount = getUMaxFromMismatchedTypes(MayExitMaxBECount,
9173 EL.ConstantMaxNotTaken);
9174 }
9175 }
9176 }
9177 const SCEV *MaxBECount = MustExitMaxBECount ? MustExitMaxBECount :
9178 (MayExitMaxBECount ? MayExitMaxBECount : getCouldNotCompute());
9179 // The loop backedge will be taken the maximum or zero times if there's
9180 // a single exit that must be taken the maximum or zero times.
9181 bool MaxOrZero = (MustExitMaxOrZero && ExitingBlocks.size() == 1);
9182
9183 // Remember which SCEVs are used in exit limits for invalidation purposes.
9184 // We only care about non-constant SCEVs here, so we can ignore
9185 // EL.ConstantMaxNotTaken
9186 // and MaxBECount, which must be SCEVConstant.
9187 for (const auto &Pair : ExitCounts) {
9188 if (!isa<SCEVConstant>(Pair.second.ExactNotTaken))
9189 BECountUsers[Pair.second.ExactNotTaken].insert({L, AllowPredicates});
9190 if (!isa<SCEVConstant>(Pair.second.SymbolicMaxNotTaken))
9191 BECountUsers[Pair.second.SymbolicMaxNotTaken].insert(
9192 {L, AllowPredicates});
9193 }
9194 return BackedgeTakenInfo(std::move(ExitCounts), CouldComputeBECount,
9195 MaxBECount, MaxOrZero);
9196}
9197
9198ScalarEvolution::ExitLimit
9199ScalarEvolution::computeExitLimit(const Loop *L, BasicBlock *ExitingBlock,
9200 bool IsOnlyExit, bool AllowPredicates) {
9201 assert(L->contains(ExitingBlock) && "Exit count for non-loop block?");
9202 // If our exiting block does not dominate the latch, then its connection with
9203 // loop's exit limit may be far from trivial.
9204 const BasicBlock *Latch = L->getLoopLatch();
9205 if (!Latch || !DT.dominates(ExitingBlock, Latch))
9206 return getCouldNotCompute();
9207
9208 Instruction *Term = ExitingBlock->getTerminator();
9209 if (CondBrInst *BI = dyn_cast<CondBrInst>(Term)) {
9210 bool ExitIfTrue = !L->contains(BI->getSuccessor(0));
9211 assert(ExitIfTrue == L->contains(BI->getSuccessor(1)) &&
9212 "It should have one successor in loop and one exit block!");
9213 // Proceed to the next level to examine the exit condition expression.
9214 return computeExitLimitFromCond(L, BI->getCondition(), ExitIfTrue,
9215 /*ControlsOnlyExit=*/IsOnlyExit,
9216 AllowPredicates);
9217 }
9218
9219 if (SwitchInst *SI = dyn_cast<SwitchInst>(Term)) {
9220 // For switch, make sure that there is a single exit from the loop.
9221 BasicBlock *Exit = nullptr;
9222 for (auto *SBB : successors(ExitingBlock))
9223 if (!L->contains(SBB)) {
9224 if (Exit) // Multiple exit successors.
9225 return getCouldNotCompute();
9226 Exit = SBB;
9227 }
9228 assert(Exit && "Exiting block must have at least one exit");
9229 return computeExitLimitFromSingleExitSwitch(
9230 L, SI, Exit, /*ControlsOnlyExit=*/IsOnlyExit);
9231 }
9232
9233 return getCouldNotCompute();
9234}
9235
9237 const Loop *L, Value *ExitCond, bool ExitIfTrue, bool ControlsOnlyExit,
9238 bool AllowPredicates) {
9239 ScalarEvolution::ExitLimitCacheTy Cache(L, ExitIfTrue, AllowPredicates);
9240 return computeExitLimitFromCondCached(Cache, L, ExitCond, ExitIfTrue,
9241 ControlsOnlyExit, AllowPredicates);
9242}
9243
9244std::optional<ScalarEvolution::ExitLimit>
9245ScalarEvolution::ExitLimitCache::find(const Loop *L, Value *ExitCond,
9246 bool ExitIfTrue, bool ControlsOnlyExit,
9247 bool AllowPredicates) {
9248 (void)this->L;
9249 (void)this->ExitIfTrue;
9250 (void)this->AllowPredicates;
9251
9252 assert(this->L == L && this->ExitIfTrue == ExitIfTrue &&
9253 this->AllowPredicates == AllowPredicates &&
9254 "Variance in assumed invariant key components!");
9255 auto Itr = TripCountMap.find({ExitCond, ControlsOnlyExit});
9256 if (Itr == TripCountMap.end())
9257 return std::nullopt;
9258 return Itr->second;
9259}
9260
9261void ScalarEvolution::ExitLimitCache::insert(const Loop *L, Value *ExitCond,
9262 bool ExitIfTrue,
9263 bool ControlsOnlyExit,
9264 bool AllowPredicates,
9265 const ExitLimit &EL) {
9266 assert(this->L == L && this->ExitIfTrue == ExitIfTrue &&
9267 this->AllowPredicates == AllowPredicates &&
9268 "Variance in assumed invariant key components!");
9269
9270 auto InsertResult = TripCountMap.insert({{ExitCond, ControlsOnlyExit}, EL});
9271 assert(InsertResult.second && "Expected successful insertion!");
9272 (void)InsertResult;
9273 (void)ExitIfTrue;
9274}
9275
9276ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromCondCached(
9277 ExitLimitCacheTy &Cache, const Loop *L, Value *ExitCond, bool ExitIfTrue,
9278 bool ControlsOnlyExit, bool AllowPredicates) {
9279
9280 if (auto MaybeEL = Cache.find(L, ExitCond, ExitIfTrue, ControlsOnlyExit,
9281 AllowPredicates))
9282 return *MaybeEL;
9283
9284 ExitLimit EL = computeExitLimitFromCondImpl(
9285 Cache, L, ExitCond, ExitIfTrue, ControlsOnlyExit, AllowPredicates);
9286 Cache.insert(L, ExitCond, ExitIfTrue, ControlsOnlyExit, AllowPredicates, EL);
9287 return EL;
9288}
9289
9290ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromCondImpl(
9291 ExitLimitCacheTy &Cache, const Loop *L, Value *ExitCond, bool ExitIfTrue,
9292 bool ControlsOnlyExit, bool AllowPredicates) {
9293 // Handle BinOp conditions (And, Or).
9294 if (auto LimitFromBinOp = computeExitLimitFromCondFromBinOp(
9295 Cache, L, ExitCond, ExitIfTrue, AllowPredicates))
9296 return *LimitFromBinOp;
9297
9298 // With an icmp, it may be feasible to compute an exact backedge-taken count.
9299 // Proceed to the next level to examine the icmp.
9300 if (ICmpInst *ExitCondICmp = dyn_cast<ICmpInst>(ExitCond)) {
9301 ExitLimit EL =
9302 computeExitLimitFromICmp(L, ExitCondICmp, ExitIfTrue, ControlsOnlyExit);
9303 if (EL.hasFullInfo() || !AllowPredicates)
9304 return EL;
9305
9306 // Try again, but use SCEV predicates this time.
9307 return computeExitLimitFromICmp(L, ExitCondICmp, ExitIfTrue,
9308 ControlsOnlyExit,
9309 /*AllowPredicates=*/true);
9310 }
9311
9312 // Check for a constant condition. These are normally stripped out by
9313 // SimplifyCFG, but ScalarEvolution may be used by a pass which wishes to
9314 // preserve the CFG and is temporarily leaving constant conditions
9315 // in place.
9316 if (ConstantInt *CI = dyn_cast<ConstantInt>(ExitCond)) {
9317 if (ExitIfTrue == !CI->getZExtValue())
9318 // The backedge is always taken.
9319 return getCouldNotCompute();
9320 // The backedge is never taken.
9321 return getZero(CI->getType());
9322 }
9323
9324 // If we're exiting based on the overflow flag of an x.with.overflow intrinsic
9325 // with a constant step, we can form an equivalent icmp predicate and figure
9326 // out how many iterations will be taken before we exit.
9327 const WithOverflowInst *WO;
9328 const APInt *C;
9329 if (match(ExitCond, m_ExtractValue<1>(m_WithOverflowInst(WO))) &&
9330 match(WO->getRHS(), m_APInt(C))) {
9331 ConstantRange NWR =
9333 WO->getNoWrapKind());
9334 CmpInst::Predicate Pred;
9335 APInt NewRHSC, Offset;
9336 NWR.getEquivalentICmp(Pred, NewRHSC, Offset);
9337 if (!ExitIfTrue)
9338 Pred = ICmpInst::getInversePredicate(Pred);
9339 auto *LHS = getSCEV(WO->getLHS());
9340 if (Offset != 0)
9342 auto EL = computeExitLimitFromICmp(L, Pred, LHS, getConstant(NewRHSC),
9343 ControlsOnlyExit, AllowPredicates);
9344 if (EL.hasAnyInfo())
9345 return EL;
9346 }
9347
9348 // If it's not an integer or pointer comparison then compute it the hard way.
9349 return computeExitCountExhaustively(L, ExitCond, ExitIfTrue);
9350}
9351
9352std::optional<ScalarEvolution::ExitLimit>
9353ScalarEvolution::computeExitLimitFromCondFromBinOp(ExitLimitCacheTy &Cache,
9354 const Loop *L,
9355 Value *ExitCond,
9356 bool ExitIfTrue,
9357 bool AllowPredicates) {
9358 // Check if the controlling expression for this loop is an And or Or.
9359 Value *Op0, *Op1;
9360 bool IsAnd;
9361 if (match(ExitCond, m_LogicalAnd(m_Value(Op0), m_Value(Op1))))
9362 IsAnd = true;
9363 else if (match(ExitCond, m_LogicalOr(m_Value(Op0), m_Value(Op1))))
9364 IsAnd = false;
9365 else
9366 return std::nullopt;
9367
9368 // A sub-condition of a non-trivial binop never solely controls the exit,
9369 // whether we exit always depends on both conditions.
9370 ExitLimit EL0 = computeExitLimitFromCondCached(
9371 Cache, L, Op0, ExitIfTrue, /*ControlsOnlyExit=*/false, AllowPredicates);
9372 ExitLimit EL1 = computeExitLimitFromCondCached(
9373 Cache, L, Op1, ExitIfTrue, /*ControlsOnlyExit=*/false, AllowPredicates);
9374
9375 // EitherMayExit is true in these two cases:
9376 // br (and Op0 Op1), loop, exit
9377 // br (or Op0 Op1), exit, loop
9378 bool EitherMayExit = IsAnd ^ ExitIfTrue;
9379
9380 const SCEV *BECount = getCouldNotCompute();
9381 const SCEV *ConstantMaxBECount = getCouldNotCompute();
9382 const SCEV *SymbolicMaxBECount = getCouldNotCompute();
9383 if (EitherMayExit) {
9384 bool UseSequentialUMin = !isa<BinaryOperator>(ExitCond);
9385 // Both conditions must be same for the loop to continue executing.
9386 // Choose the less conservative count.
9387 if (EL0.ExactNotTaken != getCouldNotCompute() &&
9388 EL1.ExactNotTaken != getCouldNotCompute()) {
9389 BECount = getUMinFromMismatchedTypes(EL0.ExactNotTaken, EL1.ExactNotTaken,
9390 UseSequentialUMin);
9391 }
9392 if (EL0.ConstantMaxNotTaken == getCouldNotCompute())
9393 ConstantMaxBECount = EL1.ConstantMaxNotTaken;
9394 else if (EL1.ConstantMaxNotTaken == getCouldNotCompute())
9395 ConstantMaxBECount = EL0.ConstantMaxNotTaken;
9396 else
9397 ConstantMaxBECount = getUMinFromMismatchedTypes(EL0.ConstantMaxNotTaken,
9398 EL1.ConstantMaxNotTaken);
9399 if (EL0.SymbolicMaxNotTaken == getCouldNotCompute())
9400 SymbolicMaxBECount = EL1.SymbolicMaxNotTaken;
9401 else if (EL1.SymbolicMaxNotTaken == getCouldNotCompute())
9402 SymbolicMaxBECount = EL0.SymbolicMaxNotTaken;
9403 else
9404 SymbolicMaxBECount = getUMinFromMismatchedTypes(
9405 EL0.SymbolicMaxNotTaken, EL1.SymbolicMaxNotTaken, UseSequentialUMin);
9406 } else {
9407 // Both conditions must be same at the same time for the loop to exit.
9408 // For now, be conservative.
9409 if (EL0.ExactNotTaken == EL1.ExactNotTaken)
9410 BECount = EL0.ExactNotTaken;
9411 }
9412
9413 // There are cases (e.g. PR26207) where computeExitLimitFromCond is able
9414 // to be more aggressive when computing BECount than when computing
9415 // ConstantMaxBECount. In these cases it is possible for EL0.ExactNotTaken
9416 // and
9417 // EL1.ExactNotTaken to match, but for EL0.ConstantMaxNotTaken and
9418 // EL1.ConstantMaxNotTaken to not.
9419 if (isa<SCEVCouldNotCompute>(ConstantMaxBECount) &&
9420 !isa<SCEVCouldNotCompute>(BECount))
9421 ConstantMaxBECount = getConstant(getUnsignedRangeMax(BECount));
9422 if (isa<SCEVCouldNotCompute>(SymbolicMaxBECount))
9423 SymbolicMaxBECount =
9424 isa<SCEVCouldNotCompute>(BECount) ? ConstantMaxBECount : BECount;
9425 return ExitLimit(BECount, ConstantMaxBECount, SymbolicMaxBECount, false,
9426 {ArrayRef(EL0.Predicates), ArrayRef(EL1.Predicates)});
9427}
9428
9429ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromICmp(
9430 const Loop *L, ICmpInst *ExitCond, bool ExitIfTrue, bool ControlsOnlyExit,
9431 bool AllowPredicates) {
9432 // If the condition was exit on true, convert the condition to exit on false
9433 CmpPredicate Pred;
9434 if (!ExitIfTrue)
9435 Pred = ExitCond->getCmpPredicate();
9436 else
9437 Pred = ExitCond->getInverseCmpPredicate();
9438 const ICmpInst::Predicate OriginalPred = Pred;
9439
9440 const SCEV *LHS = getSCEV(ExitCond->getOperand(0));
9441 const SCEV *RHS = getSCEV(ExitCond->getOperand(1));
9442
9443 ExitLimit EL = computeExitLimitFromICmp(L, Pred, LHS, RHS, ControlsOnlyExit,
9444 AllowPredicates);
9445 if (EL.hasAnyInfo())
9446 return EL;
9447
9448 auto *ExhaustiveCount =
9449 computeExitCountExhaustively(L, ExitCond, ExitIfTrue);
9450
9451 if (!isa<SCEVCouldNotCompute>(ExhaustiveCount))
9452 return ExhaustiveCount;
9453
9454 return computeShiftCompareExitLimit(ExitCond->getOperand(0),
9455 ExitCond->getOperand(1), L, OriginalPred);
9456}
9457ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromICmp(
9458 const Loop *L, CmpPredicate Pred, SCEVUse LHS, SCEVUse RHS,
9459 bool ControlsOnlyExit, bool AllowPredicates) {
9460
9461 // Try to evaluate any dependencies out of the loop.
9462 LHS = getSCEVAtScope(LHS, L);
9463 RHS = getSCEVAtScope(RHS, L);
9464
9465 // At this point, we would like to compute how many iterations of the
9466 // loop the predicate will return true for these inputs.
9467 if (isLoopInvariant(LHS, L) && !isLoopInvariant(RHS, L)) {
9468 // If there is a loop-invariant, force it into the RHS.
9469 std::swap(LHS, RHS);
9471 }
9472
9473 bool ControllingFiniteLoop = ControlsOnlyExit && loopHasNoAbnormalExits(L) &&
9475 // Simplify the operands before analyzing them.
9476 (void)SimplifyICmpOperands(Pred, LHS, RHS, /*Depth=*/0);
9477
9478 // If we have a comparison of a chrec against a constant, try to use value
9479 // ranges to answer this query.
9480 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS))
9481 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(LHS))
9482 if (AddRec->getLoop() == L) {
9483 // Form the constant range.
9484 ConstantRange CompRange =
9485 ConstantRange::makeExactICmpRegion(Pred, RHSC->getAPInt());
9486
9487 const SCEV *Ret = AddRec->getNumIterationsInRange(CompRange, *this);
9488 if (!isa<SCEVCouldNotCompute>(Ret)) return Ret;
9489 }
9490
9491 // If this loop must exit based on this condition (or execute undefined
9492 // behaviour), see if we can improve wrap flags. This is essentially
9493 // a must execute style proof.
9494 if (ControllingFiniteLoop && isLoopInvariant(RHS, L)) {
9495 // If we can prove the test sequence produced must repeat the same values
9496 // on self-wrap of the IV, then we can infer that IV doesn't self wrap
9497 // because if it did, we'd have an infinite (undefined) loop.
9498 // TODO: We can peel off any functions which are invertible *in L*. Loop
9499 // invariant terms are effectively constants for our purposes here.
9500 SCEVUse InnerLHS = LHS;
9501 if (auto *ZExt = dyn_cast<SCEVZeroExtendExpr>(LHS))
9502 InnerLHS = ZExt->getOperand();
9503 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(InnerLHS);
9504 AR && !AR->hasNoSelfWrap() && AR->getLoop() == L && AR->isAffine() &&
9505 isKnownToBeAPowerOfTwo(AR->getStepRecurrence(*this), /*OrZero=*/true,
9506 /*OrNegative=*/true)) {
9507 auto Flags = AR->getNoWrapFlags();
9508 Flags = setFlags(Flags, SCEV::FlagNW);
9509 SmallVector<SCEVUse> Operands{AR->operands()};
9510 Flags = StrengthenNoWrapFlags(this, scAddRecExpr, Operands, Flags);
9511 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), Flags);
9512 }
9513
9514 // For a slt/ult condition with a positive step, can we prove nsw/nuw?
9515 // From no-self-wrap, this follows trivially from the fact that every
9516 // (un)signed-wrapped, but not self-wrapped value must be LT than the
9517 // last value before (un)signed wrap. Since we know that last value
9518 // didn't exit, nor will any smaller one.
9519 if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_ULT) {
9520 auto WrapType = Pred == ICmpInst::ICMP_SLT ? SCEV::FlagNSW : SCEV::FlagNUW;
9521 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(LHS);
9522 AR && AR->getLoop() == L && AR->isAffine() &&
9523 !AR->getNoWrapFlags(WrapType) && AR->hasNoSelfWrap() &&
9524 isKnownPositive(AR->getStepRecurrence(*this))) {
9525 auto Flags = AR->getNoWrapFlags();
9526 Flags = setFlags(Flags, WrapType);
9527 SmallVector<SCEVUse> Operands{AR->operands()};
9528 Flags = StrengthenNoWrapFlags(this, scAddRecExpr, Operands, Flags);
9529 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), Flags);
9530 }
9531 }
9532 }
9533
9534 switch (Pred) {
9535 case ICmpInst::ICMP_NE: { // while (X != Y)
9536 // Convert to: while (X-Y != 0)
9537 if (LHS->getType()->isPointerTy()) {
9540 return LHS;
9541 }
9542 if (RHS->getType()->isPointerTy()) {
9545 return RHS;
9546 }
9547 ExitLimit EL = howFarToZero(getMinusSCEV(LHS, RHS), L, ControlsOnlyExit,
9548 AllowPredicates);
9549 if (EL.hasAnyInfo())
9550 return EL;
9551 break;
9552 }
9553 case ICmpInst::ICMP_EQ: { // while (X == Y)
9554 // Convert to: while (X-Y == 0)
9555 if (LHS->getType()->isPointerTy()) {
9558 return LHS;
9559 }
9560 if (RHS->getType()->isPointerTy()) {
9563 return RHS;
9564 }
9565 ExitLimit EL = howFarToNonZero(getMinusSCEV(LHS, RHS), L);
9566 if (EL.hasAnyInfo()) return EL;
9567 break;
9568 }
9569 case ICmpInst::ICMP_SLE:
9570 case ICmpInst::ICMP_ULE:
9571 // Since the loop is finite, an invariant RHS cannot include the boundary
9572 // value, otherwise it would loop forever.
9573 if (!EnableFiniteLoopControl || !ControllingFiniteLoop ||
9574 !isLoopInvariant(RHS, L)) {
9575 // Otherwise, perform the addition in a wider type, to avoid overflow.
9576 // If the LHS is an addrec with the appropriate nowrap flag, the
9577 // extension will be sunk into it and the exit count can be analyzed.
9578 auto *OldType = dyn_cast<IntegerType>(LHS->getType());
9579 if (!OldType)
9580 break;
9581 // Prefer doubling the bitwidth over adding a single bit to make it more
9582 // likely that we use a legal type.
9583 auto *NewType =
9584 Type::getIntNTy(OldType->getContext(), OldType->getBitWidth() * 2);
9585 if (ICmpInst::isSigned(Pred)) {
9586 LHS = getSignExtendExpr(LHS, NewType);
9587 RHS = getSignExtendExpr(RHS, NewType);
9588 } else {
9589 LHS = getZeroExtendExpr(LHS, NewType);
9590 RHS = getZeroExtendExpr(RHS, NewType);
9591 }
9592 }
9594 [[fallthrough]];
9595 case ICmpInst::ICMP_SLT:
9596 case ICmpInst::ICMP_ULT: { // while (X < Y)
9597 bool IsSigned = ICmpInst::isSigned(Pred);
9598 ExitLimit EL = howManyLessThans(LHS, RHS, L, IsSigned, ControlsOnlyExit,
9599 AllowPredicates);
9600 if (EL.hasAnyInfo())
9601 return EL;
9602 break;
9603 }
9604 case ICmpInst::ICMP_SGE:
9605 case ICmpInst::ICMP_UGE:
9606 // Since the loop is finite, an invariant RHS cannot include the boundary
9607 // value, otherwise it would loop forever.
9608 if (!EnableFiniteLoopControl || !ControllingFiniteLoop ||
9609 !isLoopInvariant(RHS, L))
9610 break;
9612 [[fallthrough]];
9613 case ICmpInst::ICMP_SGT:
9614 case ICmpInst::ICMP_UGT: { // while (X > Y)
9615 bool IsSigned = ICmpInst::isSigned(Pred);
9616 ExitLimit EL = howManyGreaterThans(LHS, RHS, L, IsSigned, ControlsOnlyExit,
9617 AllowPredicates);
9618 if (EL.hasAnyInfo())
9619 return EL;
9620 break;
9621 }
9622 default:
9623 break;
9624 }
9625
9626 return getCouldNotCompute();
9627}
9628
9629ScalarEvolution::ExitLimit
9630ScalarEvolution::computeExitLimitFromSingleExitSwitch(const Loop *L,
9631 SwitchInst *Switch,
9632 BasicBlock *ExitingBlock,
9633 bool ControlsOnlyExit) {
9634 assert(!L->contains(ExitingBlock) && "Not an exiting block!");
9635
9636 // Give up if the exit is the default dest of a switch.
9637 if (Switch->getDefaultDest() == ExitingBlock)
9638 return getCouldNotCompute();
9639
9640 assert(L->contains(Switch->getDefaultDest()) &&
9641 "Default case must not exit the loop!");
9642 const SCEV *LHS = getSCEVAtScope(Switch->getCondition(), L);
9643 const SCEV *RHS = getConstant(Switch->findCaseDest(ExitingBlock));
9644
9645 // while (X != Y) --> while (X-Y != 0)
9646 ExitLimit EL = howFarToZero(getMinusSCEV(LHS, RHS), L, ControlsOnlyExit);
9647 if (EL.hasAnyInfo())
9648 return EL;
9649
9650 return getCouldNotCompute();
9651}
9652
9653static ConstantInt *
9655 ScalarEvolution &SE) {
9656 const SCEV *InVal = SE.getConstant(C);
9657 const SCEV *Val = AddRec->evaluateAtIteration(InVal, SE);
9659 "Evaluation of SCEV at constant didn't fold correctly?");
9660 return cast<SCEVConstant>(Val)->getValue();
9661}
9662
9663ScalarEvolution::ExitLimit ScalarEvolution::computeShiftCompareExitLimit(
9664 Value *LHS, Value *RHSV, const Loop *L, ICmpInst::Predicate Pred) {
9665 ConstantInt *RHS = dyn_cast<ConstantInt>(RHSV);
9666 if (!RHS)
9667 return getCouldNotCompute();
9668
9669 const BasicBlock *Latch = L->getLoopLatch();
9670 if (!Latch)
9671 return getCouldNotCompute();
9672
9673 const BasicBlock *Predecessor = L->getLoopPredecessor();
9674 if (!Predecessor)
9675 return getCouldNotCompute();
9676
9677 // Return true if V is of the form "LHS `shift_op` <positive constant>".
9678 // Return LHS in OutLHS and shift_opt in OutOpCode.
9679 auto MatchPositiveShift =
9680 [](Value *V, Value *&OutLHS, Instruction::BinaryOps &OutOpCode) {
9681
9682 using namespace PatternMatch;
9683
9684 ConstantInt *ShiftAmt;
9685 if (match(V, m_LShr(m_Value(OutLHS), m_ConstantInt(ShiftAmt))))
9686 OutOpCode = Instruction::LShr;
9687 else if (match(V, m_AShr(m_Value(OutLHS), m_ConstantInt(ShiftAmt))))
9688 OutOpCode = Instruction::AShr;
9689 else if (match(V, m_Shl(m_Value(OutLHS), m_ConstantInt(ShiftAmt))))
9690 OutOpCode = Instruction::Shl;
9691 else
9692 return false;
9693
9694 return ShiftAmt->getValue().isStrictlyPositive();
9695 };
9696
9697 // Recognize a "shift recurrence" either of the form %iv or of %iv.shifted in
9698 //
9699 // loop:
9700 // %iv = phi i32 [ %iv.shifted, %loop ], [ %val, %preheader ]
9701 // %iv.shifted = lshr i32 %iv, <positive constant>
9702 //
9703 // Return true on a successful match. Return the corresponding PHI node (%iv
9704 // above) in PNOut and the opcode of the shift operation in OpCodeOut.
9705 auto MatchShiftRecurrence =
9706 [&](Value *V, PHINode *&PNOut, Instruction::BinaryOps &OpCodeOut) {
9707 std::optional<Instruction::BinaryOps> PostShiftOpCode;
9708
9709 {
9711 Value *V;
9712
9713 // If we encounter a shift instruction, "peel off" the shift operation,
9714 // and remember that we did so. Later when we inspect %iv's backedge
9715 // value, we will make sure that the backedge value uses the same
9716 // operation.
9717 //
9718 // Note: the peeled shift operation does not have to be the same
9719 // instruction as the one feeding into the PHI's backedge value. We only
9720 // really care about it being the same *kind* of shift instruction --
9721 // that's all that is required for our later inferences to hold.
9722 if (MatchPositiveShift(LHS, V, OpC)) {
9723 PostShiftOpCode = OpC;
9724 LHS = V;
9725 }
9726 }
9727
9728 PNOut = dyn_cast<PHINode>(LHS);
9729 if (!PNOut || PNOut->getParent() != L->getHeader())
9730 return false;
9731
9732 Value *BEValue = PNOut->getIncomingValueForBlock(Latch);
9733 Value *OpLHS;
9734
9735 return
9736 // The backedge value for the PHI node must be a shift by a positive
9737 // amount
9738 MatchPositiveShift(BEValue, OpLHS, OpCodeOut) &&
9739
9740 // of the PHI node itself
9741 OpLHS == PNOut &&
9742
9743 // and the kind of shift should be match the kind of shift we peeled
9744 // off, if any.
9745 (!PostShiftOpCode || *PostShiftOpCode == OpCodeOut);
9746 };
9747
9748 PHINode *PN;
9750 if (!MatchShiftRecurrence(LHS, PN, OpCode))
9751 return getCouldNotCompute();
9752
9753 const DataLayout &DL = getDataLayout();
9754
9755 // The key rationale for this optimization is that for some kinds of shift
9756 // recurrences, the value of the recurrence "stabilizes" to either 0 or -1
9757 // within a finite number of iterations. If the condition guarding the
9758 // backedge (in the sense that the backedge is taken if the condition is true)
9759 // is false for the value the shift recurrence stabilizes to, then we know
9760 // that the backedge is taken only a finite number of times.
9761
9762 ConstantInt *StableValue = nullptr;
9763 switch (OpCode) {
9764 default:
9765 llvm_unreachable("Impossible case!");
9766
9767 case Instruction::AShr: {
9768 // {K,ashr,<positive-constant>} stabilizes to signum(K) in at most
9769 // bitwidth(K) iterations.
9770 Value *FirstValue = PN->getIncomingValueForBlock(Predecessor);
9771 KnownBits Known = computeKnownBits(FirstValue, DL, &AC,
9772 Predecessor->getTerminator(), &DT);
9773 auto *Ty = cast<IntegerType>(RHS->getType());
9774 if (Known.isNonNegative())
9775 StableValue = ConstantInt::get(Ty, 0);
9776 else if (Known.isNegative())
9777 StableValue = ConstantInt::get(Ty, -1, true);
9778 else
9779 return getCouldNotCompute();
9780
9781 break;
9782 }
9783 case Instruction::LShr:
9784 case Instruction::Shl:
9785 // Both {K,lshr,<positive-constant>} and {K,shl,<positive-constant>}
9786 // stabilize to 0 in at most bitwidth(K) iterations.
9787 StableValue = ConstantInt::get(cast<IntegerType>(RHS->getType()), 0);
9788 break;
9789 }
9790
9791 auto *Result =
9792 ConstantFoldCompareInstOperands(Pred, StableValue, RHS, DL, &TLI);
9793 assert(Result->getType()->isIntegerTy(1) &&
9794 "Otherwise cannot be an operand to a branch instruction");
9795
9796 if (Result->isNullValue()) {
9797 unsigned BitWidth = getTypeSizeInBits(RHS->getType());
9798 const SCEV *UpperBound =
9800 return ExitLimit(getCouldNotCompute(), UpperBound, UpperBound, false);
9801 }
9802
9803 return getCouldNotCompute();
9804}
9805
9806/// Return true if we can constant fold an instruction of the specified type,
9807/// assuming that all operands were constants.
9808static bool CanConstantFold(const Instruction *I) {
9812 return true;
9813
9814 if (const CallInst *CI = dyn_cast<CallInst>(I))
9815 if (const Function *F = CI->getCalledFunction())
9816 return canConstantFoldCallTo(CI, F);
9817 return false;
9818}
9819
9820/// Determine whether this instruction can constant evolve within this loop
9821/// assuming its operands can all constant evolve.
9822static bool canConstantEvolve(Instruction *I, const Loop *L) {
9823 // An instruction outside of the loop can't be derived from a loop PHI.
9824 if (!L->contains(I)) return false;
9825
9826 if (isa<PHINode>(I)) {
9827 // We don't currently keep track of the control flow needed to evaluate
9828 // PHIs, so we cannot handle PHIs inside of loops.
9829 return L->getHeader() == I->getParent();
9830 }
9831
9832 // If we won't be able to constant fold this expression even if the operands
9833 // are constants, bail early.
9834 return CanConstantFold(I);
9835}
9836
9837/// getConstantEvolvingPHIOperands - Implement getConstantEvolvingPHI by
9838/// recursing through each instruction operand until reaching a loop header phi.
9839static PHINode *
9842 unsigned Depth) {
9844 return nullptr;
9845
9846 // Otherwise, we can evaluate this instruction if all of its operands are
9847 // constant or derived from a PHI node themselves.
9848 PHINode *PHI = nullptr;
9849 for (Value *Op : UseInst->operands()) {
9850 if (isa<Constant>(Op)) continue;
9851
9853 if (!OpInst || !canConstantEvolve(OpInst, L)) return nullptr;
9854
9855 PHINode *P = dyn_cast<PHINode>(OpInst);
9856 if (!P)
9857 // If this operand is already visited, reuse the prior result.
9858 // We may have P != PHI if this is the deepest point at which the
9859 // inconsistent paths meet.
9860 P = PHIMap.lookup(OpInst);
9861 if (!P) {
9862 // Recurse and memoize the results, whether a phi is found or not.
9863 // This recursive call invalidates pointers into PHIMap.
9864 P = getConstantEvolvingPHIOperands(OpInst, L, PHIMap, Depth + 1);
9865 PHIMap[OpInst] = P;
9866 }
9867 if (!P)
9868 return nullptr; // Not evolving from PHI
9869 if (PHI && PHI != P)
9870 return nullptr; // Evolving from multiple different PHIs.
9871 PHI = P;
9872 }
9873 // This is a expression evolving from a constant PHI!
9874 return PHI;
9875}
9876
9877/// getConstantEvolvingPHI - Given an LLVM value and a loop, return a PHI node
9878/// in the loop that V is derived from. We allow arbitrary operations along the
9879/// way, but the operands of an operation must either be constants or a value
9880/// derived from a constant PHI. If this expression does not fit with these
9881/// constraints, return null.
9884 if (!I || !canConstantEvolve(I, L)) return nullptr;
9885
9886 if (PHINode *PN = dyn_cast<PHINode>(I))
9887 return PN;
9888
9889 // Record non-constant instructions contained by the loop.
9891 return getConstantEvolvingPHIOperands(I, L, PHIMap, 0);
9892}
9893
9894/// EvaluateExpression - Given an expression that passes the
9895/// getConstantEvolvingPHI predicate, evaluate its value assuming the PHI node
9896/// in the loop has the value PHIVal. If we can't fold this expression for some
9897/// reason, return null.
9900 const DataLayout &DL,
9901 const TargetLibraryInfo *TLI) {
9902 // Convenient constant check, but redundant for recursive calls.
9903 if (Constant *C = dyn_cast<Constant>(V)) return C;
9905 if (!I) return nullptr;
9906
9907 if (Constant *C = Vals.lookup(I)) return C;
9908
9909 // An instruction inside the loop depends on a value outside the loop that we
9910 // weren't given a mapping for, or a value such as a call inside the loop.
9911 if (!canConstantEvolve(I, L)) return nullptr;
9912
9913 // An unmapped PHI can be due to a branch or another loop inside this loop,
9914 // or due to this not being the initial iteration through a loop where we
9915 // couldn't compute the evolution of this particular PHI last time.
9916 if (isa<PHINode>(I)) return nullptr;
9917
9918 std::vector<Constant*> Operands(I->getNumOperands());
9919
9920 for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
9921 Instruction *Operand = dyn_cast<Instruction>(I->getOperand(i));
9922 if (!Operand) {
9923 Operands[i] = dyn_cast<Constant>(I->getOperand(i));
9924 if (!Operands[i]) return nullptr;
9925 continue;
9926 }
9927 Constant *C = EvaluateExpression(Operand, L, Vals, DL, TLI);
9928 Vals[Operand] = C;
9929 if (!C) return nullptr;
9930 Operands[i] = C;
9931 }
9932
9933 return ConstantFoldInstOperands(I, Operands, DL, TLI,
9934 /*AllowNonDeterministic=*/false);
9935}
9936
9937
9938// If every incoming value to PN except the one for BB is a specific Constant,
9939// return that, else return nullptr.
9941 Constant *IncomingVal = nullptr;
9942
9943 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
9944 if (PN->getIncomingBlock(i) == BB)
9945 continue;
9946
9947 auto *CurrentVal = dyn_cast<Constant>(PN->getIncomingValue(i));
9948 if (!CurrentVal)
9949 return nullptr;
9950
9951 if (IncomingVal != CurrentVal) {
9952 if (IncomingVal)
9953 return nullptr;
9954 IncomingVal = CurrentVal;
9955 }
9956 }
9957
9958 return IncomingVal;
9959}
9960
9961/// getConstantEvolutionLoopExitValue - If we know that the specified Phi is
9962/// in the header of its containing loop, we know the loop executes a
9963/// constant number of times, and the PHI node is just a recurrence
9964/// involving constants, fold it.
9965Constant *
9966ScalarEvolution::getConstantEvolutionLoopExitValue(PHINode *PN,
9967 const APInt &BEs,
9968 const Loop *L) {
9969 auto [I, Inserted] = ConstantEvolutionLoopExitValue.try_emplace(PN);
9970 if (!Inserted)
9971 return I->second;
9972
9974 return nullptr; // Not going to evaluate it.
9975
9976 Constant *&RetVal = I->second;
9977
9978 DenseMap<Instruction *, Constant *> CurrentIterVals;
9979 BasicBlock *Header = L->getHeader();
9980 assert(PN->getParent() == Header && "Can't evaluate PHI not in loop header!");
9981
9982 BasicBlock *Latch = L->getLoopLatch();
9983 if (!Latch)
9984 return nullptr;
9985
9986 for (PHINode &PHI : Header->phis()) {
9987 if (auto *StartCST = getOtherIncomingValue(&PHI, Latch))
9988 CurrentIterVals[&PHI] = StartCST;
9989 }
9990 if (!CurrentIterVals.count(PN))
9991 return RetVal = nullptr;
9992
9993 Value *BEValue = PN->getIncomingValueForBlock(Latch);
9994
9995 // Execute the loop symbolically to determine the exit value.
9996 assert(BEs.getActiveBits() < CHAR_BIT * sizeof(unsigned) &&
9997 "BEs is <= MaxBruteForceIterations which is an 'unsigned'!");
9998
9999 unsigned NumIterations = BEs.getZExtValue(); // must be in range
10000 unsigned IterationNum = 0;
10001 const DataLayout &DL = getDataLayout();
10002 for (; ; ++IterationNum) {
10003 if (IterationNum == NumIterations)
10004 return RetVal = CurrentIterVals[PN]; // Got exit value!
10005
10006 // Compute the value of the PHIs for the next iteration.
10007 // EvaluateExpression adds non-phi values to the CurrentIterVals map.
10008 DenseMap<Instruction *, Constant *> NextIterVals;
10009 Constant *NextPHI =
10010 EvaluateExpression(BEValue, L, CurrentIterVals, DL, &TLI);
10011 if (!NextPHI)
10012 return nullptr; // Couldn't evaluate!
10013 NextIterVals[PN] = NextPHI;
10014
10015 bool StoppedEvolving = NextPHI == CurrentIterVals[PN];
10016
10017 // Also evaluate the other PHI nodes. However, we don't get to stop if we
10018 // cease to be able to evaluate one of them or if they stop evolving,
10019 // because that doesn't necessarily prevent us from computing PN.
10021 for (const auto &I : CurrentIterVals) {
10022 PHINode *PHI = dyn_cast<PHINode>(I.first);
10023 if (!PHI || PHI == PN || PHI->getParent() != Header) continue;
10024 PHIsToCompute.emplace_back(PHI, I.second);
10025 }
10026 // We use two distinct loops because EvaluateExpression may invalidate any
10027 // iterators into CurrentIterVals.
10028 for (const auto &I : PHIsToCompute) {
10029 PHINode *PHI = I.first;
10030 Constant *&NextPHI = NextIterVals[PHI];
10031 if (!NextPHI) { // Not already computed.
10032 Value *BEValue = PHI->getIncomingValueForBlock(Latch);
10033 NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, DL, &TLI);
10034 }
10035 if (NextPHI != I.second)
10036 StoppedEvolving = false;
10037 }
10038
10039 // If all entries in CurrentIterVals == NextIterVals then we can stop
10040 // iterating, the loop can't continue to change.
10041 if (StoppedEvolving)
10042 return RetVal = CurrentIterVals[PN];
10043
10044 CurrentIterVals.swap(NextIterVals);
10045 }
10046}
10047
10048const SCEV *ScalarEvolution::computeExitCountExhaustively(const Loop *L,
10049 Value *Cond,
10050 bool ExitWhen) {
10051 PHINode *PN = getConstantEvolvingPHI(Cond, L);
10052 if (!PN) return getCouldNotCompute();
10053
10054 // If the loop is canonicalized, the PHI will have exactly two entries.
10055 // That's the only form we support here.
10056 if (PN->getNumIncomingValues() != 2) return getCouldNotCompute();
10057
10058 DenseMap<Instruction *, Constant *> CurrentIterVals;
10059 BasicBlock *Header = L->getHeader();
10060 assert(PN->getParent() == Header && "Can't evaluate PHI not in loop header!");
10061
10062 BasicBlock *Latch = L->getLoopLatch();
10063 assert(Latch && "Should follow from NumIncomingValues == 2!");
10064
10065 for (PHINode &PHI : Header->phis()) {
10066 if (auto *StartCST = getOtherIncomingValue(&PHI, Latch))
10067 CurrentIterVals[&PHI] = StartCST;
10068 }
10069 if (!CurrentIterVals.count(PN))
10070 return getCouldNotCompute();
10071
10072 // Okay, we find a PHI node that defines the trip count of this loop. Execute
10073 // the loop symbolically to determine when the condition gets a value of
10074 // "ExitWhen".
10075 unsigned MaxIterations = MaxBruteForceIterations; // Limit analysis.
10076 const DataLayout &DL = getDataLayout();
10077 for (unsigned IterationNum = 0; IterationNum != MaxIterations;++IterationNum){
10078 auto *CondVal = dyn_cast_or_null<ConstantInt>(
10079 EvaluateExpression(Cond, L, CurrentIterVals, DL, &TLI));
10080
10081 // Couldn't symbolically evaluate.
10082 if (!CondVal) return getCouldNotCompute();
10083
10084 if (CondVal->getValue() == uint64_t(ExitWhen)) {
10085 ++NumBruteForceTripCountsComputed;
10086 return getConstant(Type::getInt32Ty(getContext()), IterationNum);
10087 }
10088
10089 // Update all the PHI nodes for the next iteration.
10090 DenseMap<Instruction *, Constant *> NextIterVals;
10091
10092 // Create a list of which PHIs we need to compute. We want to do this before
10093 // calling EvaluateExpression on them because that may invalidate iterators
10094 // into CurrentIterVals.
10095 SmallVector<PHINode *, 8> PHIsToCompute;
10096 for (const auto &I : CurrentIterVals) {
10097 PHINode *PHI = dyn_cast<PHINode>(I.first);
10098 if (!PHI || PHI->getParent() != Header) continue;
10099 PHIsToCompute.push_back(PHI);
10100 }
10101 for (PHINode *PHI : PHIsToCompute) {
10102 Constant *&NextPHI = NextIterVals[PHI];
10103 if (NextPHI) continue; // Already computed!
10104
10105 Value *BEValue = PHI->getIncomingValueForBlock(Latch);
10106 NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, DL, &TLI);
10107 }
10108 CurrentIterVals.swap(NextIterVals);
10109 }
10110
10111 // Too many iterations were needed to evaluate.
10112 return getCouldNotCompute();
10113}
10114
10115const SCEV *ScalarEvolution::getSCEVAtScope(const SCEV *V, const Loop *L) {
10117 ValuesAtScopes[V];
10118 // Check to see if we've folded this expression at this loop before.
10119 for (auto &LS : Values)
10120 if (LS.first == L)
10121 return LS.second ? LS.second : V;
10122
10123 Values.emplace_back(L, nullptr);
10124
10125 // Otherwise compute it.
10126 const SCEV *C = computeSCEVAtScope(V, L);
10127 for (auto &LS : reverse(ValuesAtScopes[V]))
10128 if (LS.first == L) {
10129 LS.second = C;
10130 if (!isa<SCEVConstant>(C))
10131 ValuesAtScopesUsers[C].push_back({L, V});
10132 break;
10133 }
10134 return C;
10135}
10136
10137/// This builds up a Constant using the ConstantExpr interface. That way, we
10138/// will return Constants for objects which aren't represented by a
10139/// SCEVConstant, because SCEVConstant is restricted to ConstantInt.
10140/// Returns NULL if the SCEV isn't representable as a Constant.
10142 switch (V->getSCEVType()) {
10143 case scCouldNotCompute:
10144 case scAddRecExpr:
10145 case scVScale:
10146 return nullptr;
10147 case scConstant:
10148 return cast<SCEVConstant>(V)->getValue();
10149 case scUnknown:
10151 case scPtrToAddr: {
10153 if (Constant *CastOp = BuildConstantFromSCEV(P2I->getOperand()))
10154 return ConstantExpr::getPtrToAddr(CastOp, P2I->getType());
10155
10156 return nullptr;
10157 }
10158 case scPtrToInt: {
10160 if (Constant *CastOp = BuildConstantFromSCEV(P2I->getOperand()))
10161 return ConstantExpr::getPtrToInt(CastOp, P2I->getType());
10162
10163 return nullptr;
10164 }
10165 case scTruncate: {
10167 if (Constant *CastOp = BuildConstantFromSCEV(ST->getOperand()))
10168 return ConstantExpr::getTrunc(CastOp, ST->getType());
10169 return nullptr;
10170 }
10171 case scAddExpr: {
10172 const SCEVAddExpr *SA = cast<SCEVAddExpr>(V);
10173 Constant *C = nullptr;
10174 for (const SCEV *Op : SA->operands()) {
10176 if (!OpC)
10177 return nullptr;
10178 if (!C) {
10179 C = OpC;
10180 continue;
10181 }
10182 assert(!C->getType()->isPointerTy() &&
10183 "Can only have one pointer, and it must be last");
10184 if (OpC->getType()->isPointerTy()) {
10185 // The offsets have been converted to bytes. We can add bytes using
10186 // an i8 GEP.
10187 C = ConstantExpr::getPtrAdd(OpC, C);
10188 } else {
10189 C = ConstantExpr::getAdd(C, OpC);
10190 }
10191 }
10192 return C;
10193 }
10194 case scMulExpr:
10195 case scSignExtend:
10196 case scZeroExtend:
10197 case scUDivExpr:
10198 case scSMaxExpr:
10199 case scUMaxExpr:
10200 case scSMinExpr:
10201 case scUMinExpr:
10203 return nullptr;
10204 }
10205 llvm_unreachable("Unknown SCEV kind!");
10206}
10207
10208const SCEV *ScalarEvolution::getWithOperands(const SCEV *S,
10209 SmallVectorImpl<SCEVUse> &NewOps) {
10210 switch (S->getSCEVType()) {
10211 case scTruncate:
10212 case scZeroExtend:
10213 case scSignExtend:
10214 case scPtrToAddr:
10215 case scPtrToInt:
10216 return getCastExpr(S->getSCEVType(), NewOps[0], S->getType());
10217 case scAddRecExpr: {
10218 auto *AddRec = cast<SCEVAddRecExpr>(S);
10219 return getAddRecExpr(NewOps, AddRec->getLoop(), AddRec->getNoWrapFlags());
10220 }
10221 case scAddExpr:
10222 return getAddExpr(NewOps, cast<SCEVAddExpr>(S)->getNoWrapFlags());
10223 case scMulExpr:
10224 return getMulExpr(NewOps, cast<SCEVMulExpr>(S)->getNoWrapFlags());
10225 case scUDivExpr:
10226 return getUDivExpr(NewOps[0], NewOps[1]);
10227 case scUMaxExpr:
10228 case scSMaxExpr:
10229 case scUMinExpr:
10230 case scSMinExpr:
10231 return getMinMaxExpr(S->getSCEVType(), NewOps);
10233 return getSequentialMinMaxExpr(S->getSCEVType(), NewOps);
10234 case scConstant:
10235 case scVScale:
10236 case scUnknown:
10237 return S;
10238 case scCouldNotCompute:
10239 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
10240 }
10241 llvm_unreachable("Unknown SCEV kind!");
10242}
10243
10244const SCEV *ScalarEvolution::computeSCEVAtScope(const SCEV *V, const Loop *L) {
10245 switch (V->getSCEVType()) {
10246 case scConstant:
10247 case scVScale:
10248 return V;
10249 case scAddRecExpr: {
10250 // If this is a loop recurrence for a loop that does not contain L, then we
10251 // are dealing with the final value computed by the loop.
10252 const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(V);
10253 // First, attempt to evaluate each operand.
10254 // Avoid performing the look-up in the common case where the specified
10255 // expression has no loop-variant portions.
10256 for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) {
10257 const SCEV *OpAtScope = getSCEVAtScope(AddRec->getOperand(i), L);
10258 if (OpAtScope == AddRec->getOperand(i))
10259 continue;
10260
10261 // Okay, at least one of these operands is loop variant but might be
10262 // foldable. Build a new instance of the folded commutative expression.
10264 NewOps.reserve(AddRec->getNumOperands());
10265 append_range(NewOps, AddRec->operands().take_front(i));
10266 NewOps.push_back(OpAtScope);
10267 for (++i; i != e; ++i)
10268 NewOps.push_back(getSCEVAtScope(AddRec->getOperand(i), L));
10269
10270 const SCEV *FoldedRec = getAddRecExpr(
10271 NewOps, AddRec->getLoop(), AddRec->getNoWrapFlags(SCEV::FlagNW));
10272 AddRec = dyn_cast<SCEVAddRecExpr>(FoldedRec);
10273 // The addrec may be folded to a nonrecurrence, for example, if the
10274 // induction variable is multiplied by zero after constant folding. Go
10275 // ahead and return the folded value.
10276 if (!AddRec)
10277 return FoldedRec;
10278 break;
10279 }
10280
10281 // If the scope is outside the addrec's loop, evaluate it by using the
10282 // loop exit value of the addrec.
10283 if (!AddRec->getLoop()->contains(L)) {
10284 // To evaluate this recurrence, we need to know how many times the AddRec
10285 // loop iterates. Compute this now.
10286 const SCEV *BackedgeTakenCount = getBackedgeTakenCount(AddRec->getLoop());
10287 if (BackedgeTakenCount == getCouldNotCompute())
10288 return AddRec;
10289
10290 // Then, evaluate the AddRec.
10291 return AddRec->evaluateAtIteration(BackedgeTakenCount, *this);
10292 }
10293
10294 return AddRec;
10295 }
10296 case scTruncate:
10297 case scZeroExtend:
10298 case scSignExtend:
10299 case scPtrToAddr:
10300 case scPtrToInt:
10301 case scAddExpr:
10302 case scMulExpr:
10303 case scUDivExpr:
10304 case scUMaxExpr:
10305 case scSMaxExpr:
10306 case scUMinExpr:
10307 case scSMinExpr:
10308 case scSequentialUMinExpr: {
10309 ArrayRef<SCEVUse> Ops = V->operands();
10310 // Avoid performing the look-up in the common case where the specified
10311 // expression has no loop-variant portions.
10312 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
10313 const SCEV *OpAtScope = getSCEVAtScope(Ops[i].getPointer(), L);
10314 if (OpAtScope != Ops[i].getPointer()) {
10315 // Okay, at least one of these operands is loop variant but might be
10316 // foldable. Build a new instance of the folded commutative expression.
10318 NewOps.reserve(Ops.size());
10319 append_range(NewOps, Ops.take_front(i));
10320 NewOps.push_back(OpAtScope);
10321
10322 for (++i; i != e; ++i) {
10323 OpAtScope = getSCEVAtScope(Ops[i].getPointer(), L);
10324 NewOps.push_back(OpAtScope);
10325 }
10326
10327 return getWithOperands(V, NewOps);
10328 }
10329 }
10330 // If we got here, all operands are loop invariant.
10331 return V;
10332 }
10333 case scUnknown: {
10334 // If this instruction is evolved from a constant-evolving PHI, compute the
10335 // exit value from the loop without using SCEVs.
10336 const SCEVUnknown *SU = cast<SCEVUnknown>(V);
10338 if (!I)
10339 return V; // This is some other type of SCEVUnknown, just return it.
10340
10341 if (PHINode *PN = dyn_cast<PHINode>(I)) {
10342 const Loop *CurrLoop = this->LI[I->getParent()];
10343 // Looking for loop exit value.
10344 if (CurrLoop && CurrLoop->getParentLoop() == L &&
10345 PN->getParent() == CurrLoop->getHeader()) {
10346 // Okay, there is no closed form solution for the PHI node. Check
10347 // to see if the loop that contains it has a known backedge-taken
10348 // count. If so, we may be able to force computation of the exit
10349 // value.
10350 const SCEV *BackedgeTakenCount = getBackedgeTakenCount(CurrLoop);
10351 // This trivial case can show up in some degenerate cases where
10352 // the incoming IR has not yet been fully simplified.
10353 if (BackedgeTakenCount->isZero()) {
10354 Value *InitValue = nullptr;
10355 bool MultipleInitValues = false;
10356 for (unsigned i = 0; i < PN->getNumIncomingValues(); i++) {
10357 if (!CurrLoop->contains(PN->getIncomingBlock(i))) {
10358 if (!InitValue)
10359 InitValue = PN->getIncomingValue(i);
10360 else if (InitValue != PN->getIncomingValue(i)) {
10361 MultipleInitValues = true;
10362 break;
10363 }
10364 }
10365 }
10366 if (!MultipleInitValues && InitValue)
10367 return getSCEV(InitValue);
10368 }
10369 // Do we have a loop invariant value flowing around the backedge
10370 // for a loop which must execute the backedge?
10371 if (!isa<SCEVCouldNotCompute>(BackedgeTakenCount) &&
10372 isKnownNonZero(BackedgeTakenCount) &&
10373 PN->getNumIncomingValues() == 2) {
10374
10375 unsigned InLoopPred =
10376 CurrLoop->contains(PN->getIncomingBlock(0)) ? 0 : 1;
10377 Value *BackedgeVal = PN->getIncomingValue(InLoopPred);
10378 if (CurrLoop->isLoopInvariant(BackedgeVal))
10379 return getSCEV(BackedgeVal);
10380 }
10381 if (auto *BTCC = dyn_cast<SCEVConstant>(BackedgeTakenCount)) {
10382 // Okay, we know how many times the containing loop executes. If
10383 // this is a constant evolving PHI node, get the final value at
10384 // the specified iteration number.
10385 Constant *RV =
10386 getConstantEvolutionLoopExitValue(PN, BTCC->getAPInt(), CurrLoop);
10387 if (RV)
10388 return getSCEV(RV);
10389 }
10390 }
10391 }
10392
10393 // Okay, this is an expression that we cannot symbolically evaluate
10394 // into a SCEV. Check to see if it's possible to symbolically evaluate
10395 // the arguments into constants, and if so, try to constant propagate the
10396 // result. This is particularly useful for computing loop exit values.
10397 if (!CanConstantFold(I))
10398 return V; // This is some other type of SCEVUnknown, just return it.
10399
10400 SmallVector<Constant *, 4> Operands;
10401 Operands.reserve(I->getNumOperands());
10402 bool MadeImprovement = false;
10403 for (Value *Op : I->operands()) {
10404 if (Constant *C = dyn_cast<Constant>(Op)) {
10405 Operands.push_back(C);
10406 continue;
10407 }
10408
10409 // If any of the operands is non-constant and if they are
10410 // non-integer and non-pointer, don't even try to analyze them
10411 // with scev techniques.
10412 if (!isSCEVable(Op->getType()))
10413 return V;
10414
10415 const SCEV *OrigV = getSCEV(Op);
10416 const SCEV *OpV = getSCEVAtScope(OrigV, L);
10417 MadeImprovement |= OrigV != OpV;
10418
10420 if (!C)
10421 return V;
10422 assert(C->getType() == Op->getType() && "Type mismatch");
10423 Operands.push_back(C);
10424 }
10425
10426 // Check to see if getSCEVAtScope actually made an improvement.
10427 if (!MadeImprovement)
10428 return V; // This is some other type of SCEVUnknown, just return it.
10429
10430 Constant *C = nullptr;
10431 const DataLayout &DL = getDataLayout();
10432 C = ConstantFoldInstOperands(I, Operands, DL, &TLI,
10433 /*AllowNonDeterministic=*/false);
10434 if (!C)
10435 return V;
10436 return getSCEV(C);
10437 }
10438 case scCouldNotCompute:
10439 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
10440 }
10441 llvm_unreachable("Unknown SCEV type!");
10442}
10443
10445 return getSCEVAtScope(getSCEV(V), L);
10446}
10447
10448const SCEV *ScalarEvolution::stripInjectiveFunctions(const SCEV *S) const {
10450 return stripInjectiveFunctions(ZExt->getOperand());
10452 return stripInjectiveFunctions(SExt->getOperand());
10453 return S;
10454}
10455
10456/// Finds the minimum unsigned root of the following equation:
10457///
10458/// A * X = B (mod N)
10459///
10460/// where N = 2^BW and BW is the common bit width of A and B. The signedness of
10461/// A and B isn't important.
10462///
10463/// If the equation does not have a solution, SCEVCouldNotCompute is returned.
10464static const SCEV *
10467 ScalarEvolution &SE, const Loop *L) {
10468 uint32_t BW = A.getBitWidth();
10469 assert(BW == SE.getTypeSizeInBits(B->getType()));
10470 assert(A != 0 && "A must be non-zero.");
10471
10472 // 1. D = gcd(A, N)
10473 //
10474 // The gcd of A and N may have only one prime factor: 2. The number of
10475 // trailing zeros in A is its multiplicity
10476 uint32_t Mult2 = A.countr_zero();
10477 // D = 2^Mult2
10478
10479 // 2. Check if B is divisible by D.
10480 //
10481 // B is divisible by D if and only if the multiplicity of prime factor 2 for B
10482 // is not less than multiplicity of this prime factor for D.
10483 unsigned MinTZ = SE.getMinTrailingZeros(B);
10484 // Try again with the terminator of the loop predecessor for context-specific
10485 // result, if MinTZ s too small.
10486 if (MinTZ < Mult2 && L->getLoopPredecessor())
10487 MinTZ = SE.getMinTrailingZeros(B, L->getLoopPredecessor()->getTerminator());
10488 if (MinTZ < Mult2) {
10489 // Check if we can prove there's no remainder using URem.
10490 const SCEV *URem =
10491 SE.getURemExpr(B, SE.getConstant(APInt::getOneBitSet(BW, Mult2)));
10492 const SCEV *Zero = SE.getZero(B->getType());
10493 if (!SE.isKnownPredicate(CmpInst::ICMP_EQ, URem, Zero)) {
10494 // Try to add a predicate ensuring B is a multiple of 1 << Mult2.
10495 if (!Predicates)
10496 return SE.getCouldNotCompute();
10497
10498 // Avoid adding a predicate that is known to be false.
10499 if (SE.isKnownPredicate(CmpInst::ICMP_NE, URem, Zero))
10500 return SE.getCouldNotCompute();
10501 Predicates->push_back(SE.getEqualPredicate(URem, Zero));
10502 }
10503 }
10504
10505 // 3. Compute I: the multiplicative inverse of (A / D) in arithmetic
10506 // modulo (N / D).
10507 //
10508 // If D == 1, (N / D) == N == 2^BW, so we need one extra bit to represent
10509 // (N / D) in general. The inverse itself always fits into BW bits, though,
10510 // so we immediately truncate it.
10511 APInt AD = A.lshr(Mult2).trunc(BW - Mult2); // AD = A / D
10512 APInt I = AD.multiplicativeInverse().zext(BW);
10513
10514 // 4. Compute the minimum unsigned root of the equation:
10515 // I * (B / D) mod (N / D)
10516 // To simplify the computation, we factor out the divide by D:
10517 // (I * B mod N) / D
10518 const SCEV *D = SE.getConstant(APInt::getOneBitSet(BW, Mult2));
10519 return SE.getUDivExactExpr(SE.getMulExpr(B, SE.getConstant(I)), D);
10520}
10521
10522/// For a given quadratic addrec, generate coefficients of the corresponding
10523/// quadratic equation, multiplied by a common value to ensure that they are
10524/// integers.
10525/// The returned value is a tuple { A, B, C, M, BitWidth }, where
10526/// Ax^2 + Bx + C is the quadratic function, M is the value that A, B and C
10527/// were multiplied by, and BitWidth is the bit width of the original addrec
10528/// coefficients.
10529/// This function returns std::nullopt if the addrec coefficients are not
10530/// compile- time constants.
10531static std::optional<std::tuple<APInt, APInt, APInt, APInt, unsigned>>
10533 assert(AddRec->getNumOperands() == 3 && "This is not a quadratic chrec!");
10534 const SCEVConstant *LC = dyn_cast<SCEVConstant>(AddRec->getOperand(0));
10535 const SCEVConstant *MC = dyn_cast<SCEVConstant>(AddRec->getOperand(1));
10536 const SCEVConstant *NC = dyn_cast<SCEVConstant>(AddRec->getOperand(2));
10537 LLVM_DEBUG(dbgs() << __func__ << ": analyzing quadratic addrec: "
10538 << *AddRec << '\n');
10539
10540 // We currently can only solve this if the coefficients are constants.
10541 if (!LC || !MC || !NC) {
10542 LLVM_DEBUG(dbgs() << __func__ << ": coefficients are not constant\n");
10543 return std::nullopt;
10544 }
10545
10546 APInt L = LC->getAPInt();
10547 APInt M = MC->getAPInt();
10548 APInt N = NC->getAPInt();
10549 assert(!N.isZero() && "This is not a quadratic addrec");
10550
10551 unsigned BitWidth = LC->getAPInt().getBitWidth();
10552 unsigned NewWidth = BitWidth + 1;
10553 LLVM_DEBUG(dbgs() << __func__ << ": addrec coeff bw: "
10554 << BitWidth << '\n');
10555 // The sign-extension (as opposed to a zero-extension) here matches the
10556 // extension used in SolveQuadraticEquationWrap (with the same motivation).
10557 N = N.sext(NewWidth);
10558 M = M.sext(NewWidth);
10559 L = L.sext(NewWidth);
10560
10561 // The increments are M, M+N, M+2N, ..., so the accumulated values are
10562 // L+M, (L+M)+(M+N), (L+M)+(M+N)+(M+2N), ..., that is,
10563 // L+M, L+2M+N, L+3M+3N, ...
10564 // After n iterations the accumulated value Acc is L + nM + n(n-1)/2 N.
10565 //
10566 // The equation Acc = 0 is then
10567 // L + nM + n(n-1)/2 N = 0, or 2L + 2M n + n(n-1) N = 0.
10568 // In a quadratic form it becomes:
10569 // N n^2 + (2M-N) n + 2L = 0.
10570
10571 APInt A = N;
10572 APInt B = 2 * M - A;
10573 APInt C = 2 * L;
10574 APInt T = APInt(NewWidth, 2);
10575 LLVM_DEBUG(dbgs() << __func__ << ": equation " << A << "x^2 + " << B
10576 << "x + " << C << ", coeff bw: " << NewWidth
10577 << ", multiplied by " << T << '\n');
10578 return std::make_tuple(A, B, C, T, BitWidth);
10579}
10580
10581/// Helper function to compare optional APInts:
10582/// (a) if X and Y both exist, return min(X, Y),
10583/// (b) if neither X nor Y exist, return std::nullopt,
10584/// (c) if exactly one of X and Y exists, return that value.
10585static std::optional<APInt> MinOptional(std::optional<APInt> X,
10586 std::optional<APInt> Y) {
10587 if (X && Y) {
10588 unsigned W = std::max(X->getBitWidth(), Y->getBitWidth());
10589 APInt XW = X->sext(W);
10590 APInt YW = Y->sext(W);
10591 return XW.slt(YW) ? *X : *Y;
10592 }
10593 if (!X && !Y)
10594 return std::nullopt;
10595 return X ? *X : *Y;
10596}
10597
10598/// Helper function to truncate an optional APInt to a given BitWidth.
10599/// When solving addrec-related equations, it is preferable to return a value
10600/// that has the same bit width as the original addrec's coefficients. If the
10601/// solution fits in the original bit width, truncate it (except for i1).
10602/// Returning a value of a different bit width may inhibit some optimizations.
10603///
10604/// In general, a solution to a quadratic equation generated from an addrec
10605/// may require BW+1 bits, where BW is the bit width of the addrec's
10606/// coefficients. The reason is that the coefficients of the quadratic
10607/// equation are BW+1 bits wide (to avoid truncation when converting from
10608/// the addrec to the equation).
10609static std::optional<APInt> TruncIfPossible(std::optional<APInt> X,
10610 unsigned BitWidth) {
10611 if (!X)
10612 return std::nullopt;
10613 unsigned W = X->getBitWidth();
10615 return X->trunc(BitWidth);
10616 return X;
10617}
10618
10619/// Let c(n) be the value of the quadratic chrec {L,+,M,+,N} after n
10620/// iterations. The values L, M, N are assumed to be signed, and they
10621/// should all have the same bit widths.
10622/// Find the least n >= 0 such that c(n) = 0 in the arithmetic modulo 2^BW,
10623/// where BW is the bit width of the addrec's coefficients.
10624/// If the calculated value is a BW-bit integer (for BW > 1), it will be
10625/// returned as such, otherwise the bit width of the returned value may
10626/// be greater than BW.
10627///
10628/// This function returns std::nullopt if
10629/// (a) the addrec coefficients are not constant, or
10630/// (b) SolveQuadraticEquationWrap was unable to find a solution. For cases
10631/// like x^2 = 5, no integer solutions exist, in other cases an integer
10632/// solution may exist, but SolveQuadraticEquationWrap may fail to find it.
10633static std::optional<APInt>
10635 APInt A, B, C, M;
10636 unsigned BitWidth;
10637 auto T = GetQuadraticEquation(AddRec);
10638 if (!T)
10639 return std::nullopt;
10640
10641 std::tie(A, B, C, M, BitWidth) = *T;
10642 LLVM_DEBUG(dbgs() << __func__ << ": solving for unsigned overflow\n");
10643 std::optional<APInt> X =
10645 if (!X)
10646 return std::nullopt;
10647
10648 ConstantInt *CX = ConstantInt::get(SE.getContext(), *X);
10649 ConstantInt *V = EvaluateConstantChrecAtConstant(AddRec, CX, SE);
10650 if (!V->isZero())
10651 return std::nullopt;
10652
10653 return TruncIfPossible(X, BitWidth);
10654}
10655
10656/// Let c(n) be the value of the quadratic chrec {0,+,M,+,N} after n
10657/// iterations. The values M, N are assumed to be signed, and they
10658/// should all have the same bit widths.
10659/// Find the least n such that c(n) does not belong to the given range,
10660/// while c(n-1) does.
10661///
10662/// This function returns std::nullopt if
10663/// (a) the addrec coefficients are not constant, or
10664/// (b) SolveQuadraticEquationWrap was unable to find a solution for the
10665/// bounds of the range.
10666static std::optional<APInt>
10668 const ConstantRange &Range, ScalarEvolution &SE) {
10669 assert(AddRec->getOperand(0)->isZero() &&
10670 "Starting value of addrec should be 0");
10671 LLVM_DEBUG(dbgs() << __func__ << ": solving boundary crossing for range "
10672 << Range << ", addrec " << *AddRec << '\n');
10673 // This case is handled in getNumIterationsInRange. Here we can assume that
10674 // we start in the range.
10675 assert(Range.contains(APInt(SE.getTypeSizeInBits(AddRec->getType()), 0)) &&
10676 "Addrec's initial value should be in range");
10677
10678 APInt A, B, C, M;
10679 unsigned BitWidth;
10680 auto T = GetQuadraticEquation(AddRec);
10681 if (!T)
10682 return std::nullopt;
10683
10684 // Be careful about the return value: there can be two reasons for not
10685 // returning an actual number. First, if no solutions to the equations
10686 // were found, and second, if the solutions don't leave the given range.
10687 // The first case means that the actual solution is "unknown", the second
10688 // means that it's known, but not valid. If the solution is unknown, we
10689 // cannot make any conclusions.
10690 // Return a pair: the optional solution and a flag indicating if the
10691 // solution was found.
10692 auto SolveForBoundary =
10693 [&](APInt Bound) -> std::pair<std::optional<APInt>, bool> {
10694 // Solve for signed overflow and unsigned overflow, pick the lower
10695 // solution.
10696 LLVM_DEBUG(dbgs() << "SolveQuadraticAddRecRange: checking boundary "
10697 << Bound << " (before multiplying by " << M << ")\n");
10698 Bound *= M; // The quadratic equation multiplier.
10699
10700 std::optional<APInt> SO;
10701 if (BitWidth > 1) {
10702 LLVM_DEBUG(dbgs() << "SolveQuadraticAddRecRange: solving for "
10703 "signed overflow\n");
10705 }
10706 LLVM_DEBUG(dbgs() << "SolveQuadraticAddRecRange: solving for "
10707 "unsigned overflow\n");
10708 std::optional<APInt> UO =
10710
10711 auto LeavesRange = [&] (const APInt &X) {
10712 ConstantInt *C0 = ConstantInt::get(SE.getContext(), X);
10713 ConstantInt *V0 = EvaluateConstantChrecAtConstant(AddRec, C0, SE);
10714 if (Range.contains(V0->getValue()))
10715 return false;
10716 // X should be at least 1, so X-1 is non-negative.
10717 ConstantInt *C1 = ConstantInt::get(SE.getContext(), X-1);
10718 ConstantInt *V1 = EvaluateConstantChrecAtConstant(AddRec, C1, SE);
10719 if (Range.contains(V1->getValue()))
10720 return true;
10721 return false;
10722 };
10723
10724 // If SolveQuadraticEquationWrap returns std::nullopt, it means that there
10725 // can be a solution, but the function failed to find it. We cannot treat it
10726 // as "no solution".
10727 if (!SO || !UO)
10728 return {std::nullopt, false};
10729
10730 // Check the smaller value first to see if it leaves the range.
10731 // At this point, both SO and UO must have values.
10732 std::optional<APInt> Min = MinOptional(SO, UO);
10733 if (LeavesRange(*Min))
10734 return { Min, true };
10735 std::optional<APInt> Max = Min == SO ? UO : SO;
10736 if (LeavesRange(*Max))
10737 return { Max, true };
10738
10739 // Solutions were found, but were eliminated, hence the "true".
10740 return {std::nullopt, true};
10741 };
10742
10743 std::tie(A, B, C, M, BitWidth) = *T;
10744 // Lower bound is inclusive, subtract 1 to represent the exiting value.
10745 APInt Lower = Range.getLower().sext(A.getBitWidth()) - 1;
10746 APInt Upper = Range.getUpper().sext(A.getBitWidth());
10747 auto SL = SolveForBoundary(Lower);
10748 auto SU = SolveForBoundary(Upper);
10749 // If any of the solutions was unknown, no meaninigful conclusions can
10750 // be made.
10751 if (!SL.second || !SU.second)
10752 return std::nullopt;
10753
10754 // Claim: The correct solution is not some value between Min and Max.
10755 //
10756 // Justification: Assuming that Min and Max are different values, one of
10757 // them is when the first signed overflow happens, the other is when the
10758 // first unsigned overflow happens. Crossing the range boundary is only
10759 // possible via an overflow (treating 0 as a special case of it, modeling
10760 // an overflow as crossing k*2^W for some k).
10761 //
10762 // The interesting case here is when Min was eliminated as an invalid
10763 // solution, but Max was not. The argument is that if there was another
10764 // overflow between Min and Max, it would also have been eliminated if
10765 // it was considered.
10766 //
10767 // For a given boundary, it is possible to have two overflows of the same
10768 // type (signed/unsigned) without having the other type in between: this
10769 // can happen when the vertex of the parabola is between the iterations
10770 // corresponding to the overflows. This is only possible when the two
10771 // overflows cross k*2^W for the same k. In such case, if the second one
10772 // left the range (and was the first one to do so), the first overflow
10773 // would have to enter the range, which would mean that either we had left
10774 // the range before or that we started outside of it. Both of these cases
10775 // are contradictions.
10776 //
10777 // Claim: In the case where SolveForBoundary returns std::nullopt, the correct
10778 // solution is not some value between the Max for this boundary and the
10779 // Min of the other boundary.
10780 //
10781 // Justification: Assume that we had such Max_A and Min_B corresponding
10782 // to range boundaries A and B and such that Max_A < Min_B. If there was
10783 // a solution between Max_A and Min_B, it would have to be caused by an
10784 // overflow corresponding to either A or B. It cannot correspond to B,
10785 // since Min_B is the first occurrence of such an overflow. If it
10786 // corresponded to A, it would have to be either a signed or an unsigned
10787 // overflow that is larger than both eliminated overflows for A. But
10788 // between the eliminated overflows and this overflow, the values would
10789 // cover the entire value space, thus crossing the other boundary, which
10790 // is a contradiction.
10791
10792 return TruncIfPossible(MinOptional(SL.first, SU.first), BitWidth);
10793}
10794
10795ScalarEvolution::ExitLimit ScalarEvolution::howFarToZero(const SCEV *V,
10796 const Loop *L,
10797 bool ControlsOnlyExit,
10798 bool AllowPredicates) {
10799
10800 // This is only used for loops with a "x != y" exit test. The exit condition
10801 // is now expressed as a single expression, V = x-y. So the exit test is
10802 // effectively V != 0. We know and take advantage of the fact that this
10803 // expression only being used in a comparison by zero context.
10804
10806 // If the value is a constant
10807 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) {
10808 // If the value is already zero, the branch will execute zero times.
10809 if (C->getValue()->isZero()) return C;
10810 return getCouldNotCompute(); // Otherwise it will loop infinitely.
10811 }
10812
10813 const SCEVAddRecExpr *AddRec =
10814 dyn_cast<SCEVAddRecExpr>(stripInjectiveFunctions(V));
10815
10816 if (!AddRec && AllowPredicates)
10817 // Try to make this an AddRec using runtime tests, in the first X
10818 // iterations of this loop, where X is the SCEV expression found by the
10819 // algorithm below.
10820 AddRec = convertSCEVToAddRecWithPredicates(V, L, Predicates);
10821
10822 if (!AddRec || AddRec->getLoop() != L)
10823 return getCouldNotCompute();
10824
10825 // If this is a quadratic (3-term) AddRec {L,+,M,+,N}, find the roots of
10826 // the quadratic equation to solve it.
10827 if (AddRec->isQuadratic() && AddRec->getType()->isIntegerTy()) {
10828 // We can only use this value if the chrec ends up with an exact zero
10829 // value at this index. When solving for "X*X != 5", for example, we
10830 // should not accept a root of 2.
10831 if (auto S = SolveQuadraticAddRecExact(AddRec, *this)) {
10832 const auto *R = cast<SCEVConstant>(getConstant(*S));
10833 return ExitLimit(R, R, R, false, Predicates);
10834 }
10835 return getCouldNotCompute();
10836 }
10837
10838 // Otherwise we can only handle this if it is affine.
10839 if (!AddRec->isAffine())
10840 return getCouldNotCompute();
10841
10842 // If this is an affine expression, the execution count of this branch is
10843 // the minimum unsigned root of the following equation:
10844 //
10845 // Start + Step*N = 0 (mod 2^BW)
10846 //
10847 // equivalent to:
10848 //
10849 // Step*N = -Start (mod 2^BW)
10850 //
10851 // where BW is the common bit width of Start and Step.
10852
10853 // Get the initial value for the loop.
10854 const SCEV *Start = getSCEVAtScope(AddRec->getStart(), L->getParentLoop());
10855 const SCEV *Step = getSCEVAtScope(AddRec->getOperand(1), L->getParentLoop());
10856
10857 if (!isLoopInvariant(Step, L))
10858 return getCouldNotCompute();
10859
10860 LoopGuards Guards = LoopGuards::collect(L, *this);
10861 // Specialize step for this loop so we get context sensitive facts below.
10862 const SCEV *StepWLG = applyLoopGuards(Step, Guards);
10863
10864 // For positive steps (counting up until unsigned overflow):
10865 // N = -Start/Step (as unsigned)
10866 // For negative steps (counting down to zero):
10867 // N = Start/-Step
10868 // First compute the unsigned distance from zero in the direction of Step.
10869 bool CountDown = isKnownNegative(StepWLG);
10870 if (!CountDown && !isKnownNonNegative(StepWLG))
10871 return getCouldNotCompute();
10872
10873 const SCEV *Distance = CountDown ? Start : getNegativeSCEV(Start);
10874 // Handle unitary steps, which cannot wraparound.
10875 // 1*N = -Start; -1*N = Start (mod 2^BW), so:
10876 // N = Distance (as unsigned)
10877
10878 if (match(Step, m_CombineOr(m_scev_One(), m_scev_AllOnes()))) {
10879 APInt MaxBECount = getUnsignedRangeMax(applyLoopGuards(Distance, Guards));
10880 MaxBECount = APIntOps::umin(MaxBECount, getUnsignedRangeMax(Distance));
10881
10882 // When a loop like "for (int i = 0; i != n; ++i) { /* body */ }" is rotated,
10883 // we end up with a loop whose backedge-taken count is n - 1. Detect this
10884 // case, and see if we can improve the bound.
10885 //
10886 // Explicitly handling this here is necessary because getUnsignedRange
10887 // isn't context-sensitive; it doesn't know that we only care about the
10888 // range inside the loop.
10889 const SCEV *Zero = getZero(Distance->getType());
10890 const SCEV *One = getOne(Distance->getType());
10891 const SCEV *DistancePlusOne = getAddExpr(Distance, One);
10892 if (isLoopEntryGuardedByCond(L, ICmpInst::ICMP_NE, DistancePlusOne, Zero)) {
10893 // If Distance + 1 doesn't overflow, we can compute the maximum distance
10894 // as "unsigned_max(Distance + 1) - 1".
10895 ConstantRange CR = getUnsignedRange(DistancePlusOne);
10896 MaxBECount = APIntOps::umin(MaxBECount, CR.getUnsignedMax() - 1);
10897 }
10898 return ExitLimit(Distance, getConstant(MaxBECount), Distance, false,
10899 Predicates);
10900 }
10901
10902 // If the condition controls loop exit (the loop exits only if the expression
10903 // is true) and the addition is no-wrap we can use unsigned divide to
10904 // compute the backedge count. In this case, the step may not divide the
10905 // distance, but we don't care because if the condition is "missed" the loop
10906 // will have undefined behavior due to wrapping.
10907 if (ControlsOnlyExit && AddRec->hasNoSelfWrap() &&
10908 loopHasNoAbnormalExits(AddRec->getLoop())) {
10909
10910 // If the stride is zero and the start is non-zero, the loop must be
10911 // infinite. In C++, most loops are finite by assumption, in which case the
10912 // step being zero implies UB must execute if the loop is entered.
10913 if (!(loopIsFiniteByAssumption(L) && isKnownNonZero(Start)) &&
10914 !isKnownNonZero(StepWLG))
10915 return getCouldNotCompute();
10916
10917 const SCEV *Exact =
10918 getUDivExpr(Distance, CountDown ? getNegativeSCEV(Step) : Step);
10919 const SCEV *ConstantMax = getCouldNotCompute();
10920 if (Exact != getCouldNotCompute()) {
10921 APInt MaxInt = getUnsignedRangeMax(applyLoopGuards(Exact, Guards));
10922 ConstantMax =
10924 }
10925 const SCEV *SymbolicMax =
10926 isa<SCEVCouldNotCompute>(Exact) ? ConstantMax : Exact;
10927 return ExitLimit(Exact, ConstantMax, SymbolicMax, false, Predicates);
10928 }
10929
10930 // Solve the general equation.
10931 const SCEVConstant *StepC = dyn_cast<SCEVConstant>(Step);
10932 if (!StepC || StepC->getValue()->isZero())
10933 return getCouldNotCompute();
10934 const SCEV *E = SolveLinEquationWithOverflow(
10935 StepC->getAPInt(), getNegativeSCEV(Start),
10936 AllowPredicates ? &Predicates : nullptr, *this, L);
10937
10938 const SCEV *M = E;
10939 if (E != getCouldNotCompute()) {
10940 APInt MaxWithGuards = getUnsignedRangeMax(applyLoopGuards(E, Guards));
10941 M = getConstant(APIntOps::umin(MaxWithGuards, getUnsignedRangeMax(E)));
10942 }
10943 auto *S = isa<SCEVCouldNotCompute>(E) ? M : E;
10944 return ExitLimit(E, M, S, false, Predicates);
10945}
10946
10947ScalarEvolution::ExitLimit
10948ScalarEvolution::howFarToNonZero(const SCEV *V, const Loop *L) {
10949 // Loops that look like: while (X == 0) are very strange indeed. We don't
10950 // handle them yet except for the trivial case. This could be expanded in the
10951 // future as needed.
10952
10953 // If the value is a constant, check to see if it is known to be non-zero
10954 // already. If so, the backedge will execute zero times.
10955 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) {
10956 if (!C->getValue()->isZero())
10957 return getZero(C->getType());
10958 return getCouldNotCompute(); // Otherwise it will loop infinitely.
10959 }
10960
10961 // We could implement others, but I really doubt anyone writes loops like
10962 // this, and if they did, they would already be constant folded.
10963 return getCouldNotCompute();
10964}
10965
10966std::pair<const BasicBlock *, const BasicBlock *>
10967ScalarEvolution::getPredecessorWithUniqueSuccessorForBB(const BasicBlock *BB)
10968 const {
10969 // If the block has a unique predecessor, then there is no path from the
10970 // predecessor to the block that does not go through the direct edge
10971 // from the predecessor to the block.
10972 if (const BasicBlock *Pred = BB->getSinglePredecessor())
10973 return {Pred, BB};
10974
10975 // A loop's header is defined to be a block that dominates the loop.
10976 // If the header has a unique predecessor outside the loop, it must be
10977 // a block that has exactly one successor that can reach the loop.
10978 if (const Loop *L = LI.getLoopFor(BB))
10979 return {L->getLoopPredecessor(), L->getHeader()};
10980
10981 return {nullptr, BB};
10982}
10983
10984/// SCEV structural equivalence is usually sufficient for testing whether two
10985/// expressions are equal, however for the purposes of looking for a condition
10986/// guarding a loop, it can be useful to be a little more general, since a
10987/// front-end may have replicated the controlling expression.
10988static bool HasSameValue(const SCEV *A, const SCEV *B) {
10989 // Quick check to see if they are the same SCEV.
10990 if (A == B) return true;
10991
10992 auto ComputesEqualValues = [](const Instruction *A, const Instruction *B) {
10993 // Not all instructions that are "identical" compute the same value. For
10994 // instance, two distinct alloca instructions allocating the same type are
10995 // identical and do not read memory; but compute distinct values.
10996 return A->isIdenticalTo(B) && (isa<BinaryOperator>(A) || isa<GetElementPtrInst>(A));
10997 };
10998
10999 // Otherwise, if they're both SCEVUnknown, it's possible that they hold
11000 // two different instructions with the same value. Check for this case.
11001 if (const SCEVUnknown *AU = dyn_cast<SCEVUnknown>(A))
11002 if (const SCEVUnknown *BU = dyn_cast<SCEVUnknown>(B))
11003 if (const Instruction *AI = dyn_cast<Instruction>(AU->getValue()))
11004 if (const Instruction *BI = dyn_cast<Instruction>(BU->getValue()))
11005 if (ComputesEqualValues(AI, BI))
11006 return true;
11007
11008 // Otherwise assume they may have a different value.
11009 return false;
11010}
11011
11012static bool MatchBinarySub(const SCEV *S, SCEVUse &LHS, SCEVUse &RHS) {
11013 const SCEV *Op0, *Op1;
11014 if (!match(S, m_scev_Add(m_SCEV(Op0), m_SCEV(Op1))))
11015 return false;
11016 if (match(Op0, m_scev_Mul(m_scev_AllOnes(), m_SCEV(RHS)))) {
11017 LHS = Op1;
11018 return true;
11019 }
11020 if (match(Op1, m_scev_Mul(m_scev_AllOnes(), m_SCEV(RHS)))) {
11021 LHS = Op0;
11022 return true;
11023 }
11024 return false;
11025}
11026
11028 SCEVUse &RHS, unsigned Depth) {
11029 bool Changed = false;
11030 // Simplifies ICMP to trivial true or false by turning it into '0 == 0' or
11031 // '0 != 0'.
11032 auto TrivialCase = [&](bool TriviallyTrue) {
11034 Pred = TriviallyTrue ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_NE;
11035 return true;
11036 };
11037 // If we hit the max recursion limit bail out.
11038 if (Depth >= 3)
11039 return false;
11040
11041 const SCEV *NewLHS, *NewRHS;
11042 if (match(LHS, m_scev_c_Mul(m_SCEV(NewLHS), m_SCEVVScale())) &&
11043 match(RHS, m_scev_c_Mul(m_SCEV(NewRHS), m_SCEVVScale()))) {
11044 const SCEVMulExpr *LMul = cast<SCEVMulExpr>(LHS);
11045 const SCEVMulExpr *RMul = cast<SCEVMulExpr>(RHS);
11046
11047 // (X * vscale) pred (Y * vscale) ==> X pred Y
11048 // when both multiples are NSW.
11049 // (X * vscale) uicmp/eq/ne (Y * vscale) ==> X uicmp/eq/ne Y
11050 // when both multiples are NUW.
11051 if ((LMul->hasNoSignedWrap() && RMul->hasNoSignedWrap()) ||
11052 (LMul->hasNoUnsignedWrap() && RMul->hasNoUnsignedWrap() &&
11053 !ICmpInst::isSigned(Pred))) {
11054 LHS = NewLHS;
11055 RHS = NewRHS;
11056 Changed = true;
11057 }
11058 }
11059
11060 // Canonicalize a constant to the right side.
11061 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) {
11062 // Check for both operands constant.
11063 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) {
11064 if (!ICmpInst::compare(LHSC->getAPInt(), RHSC->getAPInt(), Pred))
11065 return TrivialCase(false);
11066 return TrivialCase(true);
11067 }
11068 // Otherwise swap the operands to put the constant on the right.
11069 std::swap(LHS, RHS);
11071 Changed = true;
11072 }
11073
11074 // If we're comparing an addrec with a value which is loop-invariant in the
11075 // addrec's loop, put the addrec on the left. Also make a dominance check,
11076 // as both operands could be addrecs loop-invariant in each other's loop.
11077 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(RHS)) {
11078 const Loop *L = AR->getLoop();
11079 if (isLoopInvariant(LHS, L) && properlyDominates(LHS, L->getHeader())) {
11080 std::swap(LHS, RHS);
11082 Changed = true;
11083 }
11084 }
11085
11086 // If there's a constant operand, canonicalize comparisons with boundary
11087 // cases, and canonicalize *-or-equal comparisons to regular comparisons.
11088 if (const SCEVConstant *RC = dyn_cast<SCEVConstant>(RHS)) {
11089 const APInt &RA = RC->getAPInt();
11090
11091 bool SimplifiedByConstantRange = false;
11092
11093 if (!ICmpInst::isEquality(Pred)) {
11095 if (ExactCR.isFullSet())
11096 return TrivialCase(true);
11097 if (ExactCR.isEmptySet())
11098 return TrivialCase(false);
11099
11100 APInt NewRHS;
11101 CmpInst::Predicate NewPred;
11102 if (ExactCR.getEquivalentICmp(NewPred, NewRHS) &&
11103 ICmpInst::isEquality(NewPred)) {
11104 // We were able to convert an inequality to an equality.
11105 Pred = NewPred;
11106 RHS = getConstant(NewRHS);
11107 Changed = SimplifiedByConstantRange = true;
11108 }
11109 }
11110
11111 if (!SimplifiedByConstantRange) {
11112 switch (Pred) {
11113 default:
11114 break;
11115 case ICmpInst::ICMP_EQ:
11116 case ICmpInst::ICMP_NE:
11117 // Fold ((-1) * %a) + %b == 0 (equivalent to %b-%a == 0) into %a == %b.
11118 if (RA.isZero() && MatchBinarySub(LHS, LHS, RHS))
11119 Changed = true;
11120 break;
11121
11122 // The "Should have been caught earlier!" messages refer to the fact
11123 // that the ExactCR.isFullSet() or ExactCR.isEmptySet() check above
11124 // should have fired on the corresponding cases, and canonicalized the
11125 // check to trivial case.
11126
11127 case ICmpInst::ICMP_UGE:
11128 assert(!RA.isMinValue() && "Should have been caught earlier!");
11129 Pred = ICmpInst::ICMP_UGT;
11130 RHS = getConstant(RA - 1);
11131 Changed = true;
11132 break;
11133 case ICmpInst::ICMP_ULE:
11134 assert(!RA.isMaxValue() && "Should have been caught earlier!");
11135 Pred = ICmpInst::ICMP_ULT;
11136 RHS = getConstant(RA + 1);
11137 Changed = true;
11138 break;
11139 case ICmpInst::ICMP_SGE:
11140 assert(!RA.isMinSignedValue() && "Should have been caught earlier!");
11141 Pred = ICmpInst::ICMP_SGT;
11142 RHS = getConstant(RA - 1);
11143 Changed = true;
11144 break;
11145 case ICmpInst::ICMP_SLE:
11146 assert(!RA.isMaxSignedValue() && "Should have been caught earlier!");
11147 Pred = ICmpInst::ICMP_SLT;
11148 RHS = getConstant(RA + 1);
11149 Changed = true;
11150 break;
11151 }
11152 }
11153 }
11154
11155 // Check for obvious equality.
11156 if (HasSameValue(LHS, RHS)) {
11157 if (ICmpInst::isTrueWhenEqual(Pred))
11158 return TrivialCase(true);
11160 return TrivialCase(false);
11161 }
11162
11163 // If possible, canonicalize GE/LE comparisons to GT/LT comparisons, by
11164 // adding or subtracting 1 from one of the operands.
11165 switch (Pred) {
11166 case ICmpInst::ICMP_SLE:
11167 if (!getSignedRangeMax(RHS).isMaxSignedValue()) {
11168 RHS = getAddExpr(getConstant(RHS->getType(), 1, true), RHS,
11170 Pred = ICmpInst::ICMP_SLT;
11171 Changed = true;
11172 } else if (!getSignedRangeMin(LHS).isMinSignedValue()) {
11173 LHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), LHS,
11175 Pred = ICmpInst::ICMP_SLT;
11176 Changed = true;
11177 }
11178 break;
11179 case ICmpInst::ICMP_SGE:
11180 if (!getSignedRangeMin(RHS).isMinSignedValue()) {
11181 RHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), RHS,
11183 Pred = ICmpInst::ICMP_SGT;
11184 Changed = true;
11185 } else if (!getSignedRangeMax(LHS).isMaxSignedValue()) {
11186 LHS = getAddExpr(getConstant(RHS->getType(), 1, true), LHS,
11188 Pred = ICmpInst::ICMP_SGT;
11189 Changed = true;
11190 }
11191 break;
11192 case ICmpInst::ICMP_ULE:
11193 if (!getUnsignedRangeMax(RHS).isMaxValue()) {
11194 RHS = getAddExpr(getConstant(RHS->getType(), 1, true), RHS,
11196 Pred = ICmpInst::ICMP_ULT;
11197 Changed = true;
11198 } else if (!getUnsignedRangeMin(LHS).isMinValue()) {
11199 LHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), LHS);
11200 Pred = ICmpInst::ICMP_ULT;
11201 Changed = true;
11202 }
11203 break;
11204 case ICmpInst::ICMP_UGE:
11205 // If RHS is an op we can fold the -1, try that first.
11206 // Otherwise prefer LHS to preserve the nuw flag.
11207 if ((isa<SCEVConstant>(RHS) ||
11209 isa<SCEVConstant>(cast<SCEVNAryExpr>(RHS)->getOperand(0)))) &&
11210 !getUnsignedRangeMin(RHS).isMinValue()) {
11211 RHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), RHS);
11212 Pred = ICmpInst::ICMP_UGT;
11213 Changed = true;
11214 } else if (!getUnsignedRangeMax(LHS).isMaxValue()) {
11215 LHS = getAddExpr(getConstant(RHS->getType(), 1, true), LHS,
11217 Pred = ICmpInst::ICMP_UGT;
11218 Changed = true;
11219 } else if (!getUnsignedRangeMin(RHS).isMinValue()) {
11220 RHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), RHS);
11221 Pred = ICmpInst::ICMP_UGT;
11222 Changed = true;
11223 }
11224 break;
11225 default:
11226 break;
11227 }
11228
11229 // TODO: More simplifications are possible here.
11230
11231 // Recursively simplify until we either hit a recursion limit or nothing
11232 // changes.
11233 if (Changed)
11234 (void)SimplifyICmpOperands(Pred, LHS, RHS, Depth + 1);
11235
11236 return Changed;
11237}
11238
11240 return getSignedRangeMax(S).isNegative();
11241}
11242
11246
11248 return !getSignedRangeMin(S).isNegative();
11249}
11250
11254
11256 // Query push down for cases where the unsigned range is
11257 // less than sufficient.
11258 if (const auto *SExt = dyn_cast<SCEVSignExtendExpr>(S))
11259 return isKnownNonZero(SExt->getOperand(0));
11260 return getUnsignedRangeMin(S) != 0;
11261}
11262
11264 bool OrNegative) {
11265 auto NonRecursive = [OrNegative](const SCEV *S) {
11266 if (auto *C = dyn_cast<SCEVConstant>(S))
11267 return C->getAPInt().isPowerOf2() ||
11268 (OrNegative && C->getAPInt().isNegatedPowerOf2());
11269
11270 // vscale is a power-of-two.
11271 return isa<SCEVVScale>(S);
11272 };
11273
11274 if (NonRecursive(S))
11275 return true;
11276
11277 auto *Mul = dyn_cast<SCEVMulExpr>(S);
11278 if (!Mul)
11279 return false;
11280 return all_of(Mul->operands(), NonRecursive) && (OrZero || isKnownNonZero(S));
11281}
11282
11284 const SCEV *S, uint64_t M,
11286 if (M == 0)
11287 return false;
11288 if (M == 1)
11289 return true;
11290
11291 // Recursively check AddRec operands. An AddRecExpr S is a multiple of M if S
11292 // starts with a multiple of M and at every iteration step S only adds
11293 // multiples of M.
11294 if (auto *AddRec = dyn_cast<SCEVAddRecExpr>(S))
11295 return isKnownMultipleOf(AddRec->getStart(), M, Assumptions) &&
11296 isKnownMultipleOf(AddRec->getStepRecurrence(*this), M, Assumptions);
11297
11298 // For a constant, check that "S % M == 0".
11299 if (auto *Cst = dyn_cast<SCEVConstant>(S)) {
11300 APInt C = Cst->getAPInt();
11301 return C.urem(M) == 0;
11302 }
11303
11304 // TODO: Also check other SCEV expressions, i.e., SCEVAddRecExpr, etc.
11305
11306 // Basic tests have failed.
11307 // Check "S % M == 0" at compile time and record runtime Assumptions.
11308 auto *STy = dyn_cast<IntegerType>(S->getType());
11309 const SCEV *SmodM =
11310 getURemExpr(S, getConstant(ConstantInt::get(STy, M, false)));
11311 const SCEV *Zero = getZero(STy);
11312
11313 // Check whether "S % M == 0" is known at compile time.
11314 if (isKnownPredicate(ICmpInst::ICMP_EQ, SmodM, Zero))
11315 return true;
11316
11317 // Check whether "S % M != 0" is known at compile time.
11318 if (isKnownPredicate(ICmpInst::ICMP_NE, SmodM, Zero))
11319 return false;
11320
11322
11323 // Detect redundant predicates.
11324 for (auto *A : Assumptions)
11325 if (A->implies(P, *this))
11326 return true;
11327
11328 // Only record non-redundant predicates.
11329 Assumptions.push_back(P);
11330 return true;
11331}
11332
11334 return ((isKnownNonNegative(S1) && isKnownNonNegative(S2)) ||
11336}
11337
11338std::pair<const SCEV *, const SCEV *>
11340 // Compute SCEV on entry of loop L.
11341 const SCEV *Start = SCEVInitRewriter::rewrite(S, L, *this);
11342 if (Start == getCouldNotCompute())
11343 return { Start, Start };
11344 // Compute post increment SCEV for loop L.
11345 const SCEV *PostInc = SCEVPostIncRewriter::rewrite(S, L, *this);
11346 assert(PostInc != getCouldNotCompute() && "Unexpected could not compute");
11347 return { Start, PostInc };
11348}
11349
11351 SCEVUse RHS) {
11352 // First collect all loops.
11354 getUsedLoops(LHS, LoopsUsed);
11355 getUsedLoops(RHS, LoopsUsed);
11356
11357 if (LoopsUsed.empty())
11358 return false;
11359
11360 // Domination relationship must be a linear order on collected loops.
11361#ifndef NDEBUG
11362 for (const auto *L1 : LoopsUsed)
11363 for (const auto *L2 : LoopsUsed)
11364 assert((DT.dominates(L1->getHeader(), L2->getHeader()) ||
11365 DT.dominates(L2->getHeader(), L1->getHeader())) &&
11366 "Domination relationship is not a linear order");
11367#endif
11368
11369 const Loop *MDL =
11370 *llvm::max_element(LoopsUsed, [&](const Loop *L1, const Loop *L2) {
11371 return DT.properlyDominates(L1->getHeader(), L2->getHeader());
11372 });
11373
11374 // Get init and post increment value for LHS.
11375 auto SplitLHS = SplitIntoInitAndPostInc(MDL, LHS);
11376 // if LHS contains unknown non-invariant SCEV then bail out.
11377 if (SplitLHS.first == getCouldNotCompute())
11378 return false;
11379 assert (SplitLHS.second != getCouldNotCompute() && "Unexpected CNC");
11380 // Get init and post increment value for RHS.
11381 auto SplitRHS = SplitIntoInitAndPostInc(MDL, RHS);
11382 // if RHS contains unknown non-invariant SCEV then bail out.
11383 if (SplitRHS.first == getCouldNotCompute())
11384 return false;
11385 assert (SplitRHS.second != getCouldNotCompute() && "Unexpected CNC");
11386 // It is possible that init SCEV contains an invariant load but it does
11387 // not dominate MDL and is not available at MDL loop entry, so we should
11388 // check it here.
11389 if (!isAvailableAtLoopEntry(SplitLHS.first, MDL) ||
11390 !isAvailableAtLoopEntry(SplitRHS.first, MDL))
11391 return false;
11392
11393 // It seems backedge guard check is faster than entry one so in some cases
11394 // it can speed up whole estimation by short circuit
11395 return isLoopBackedgeGuardedByCond(MDL, Pred, SplitLHS.second,
11396 SplitRHS.second) &&
11397 isLoopEntryGuardedByCond(MDL, Pred, SplitLHS.first, SplitRHS.first);
11398}
11399
11401 SCEVUse RHS) {
11402 // Canonicalize the inputs first.
11403 (void)SimplifyICmpOperands(Pred, LHS, RHS);
11404
11405 if (isKnownViaInduction(Pred, LHS, RHS))
11406 return true;
11407
11408 if (isKnownPredicateViaSplitting(Pred, LHS, RHS))
11409 return true;
11410
11411 // Otherwise see what can be done with some simple reasoning.
11412 return isKnownViaNonRecursiveReasoning(Pred, LHS, RHS);
11413}
11414
11416 const SCEV *LHS,
11417 const SCEV *RHS) {
11418 if (isKnownPredicate(Pred, LHS, RHS))
11419 return true;
11421 return false;
11422 return std::nullopt;
11423}
11424
11426 const SCEV *RHS,
11427 const Instruction *CtxI) {
11428 // TODO: Analyze guards and assumes from Context's block.
11429 return isKnownPredicate(Pred, LHS, RHS) ||
11430 isBasicBlockEntryGuardedByCond(CtxI->getParent(), Pred, LHS, RHS);
11431}
11432
11433std::optional<bool>
11435 const SCEV *RHS, const Instruction *CtxI) {
11436 std::optional<bool> KnownWithoutContext = evaluatePredicate(Pred, LHS, RHS);
11437 if (KnownWithoutContext)
11438 return KnownWithoutContext;
11439
11440 if (isBasicBlockEntryGuardedByCond(CtxI->getParent(), Pred, LHS, RHS))
11441 return true;
11443 CtxI->getParent(), ICmpInst::getInverseCmpPredicate(Pred), LHS, RHS))
11444 return false;
11445 return std::nullopt;
11446}
11447
11449 const SCEVAddRecExpr *LHS,
11450 const SCEV *RHS) {
11451 const Loop *L = LHS->getLoop();
11452 return isLoopEntryGuardedByCond(L, Pred, LHS->getStart(), RHS) &&
11453 isLoopBackedgeGuardedByCond(L, Pred, LHS->getPostIncExpr(*this), RHS);
11454}
11455
11456std::optional<ScalarEvolution::MonotonicPredicateType>
11458 ICmpInst::Predicate Pred) {
11459 auto Result = getMonotonicPredicateTypeImpl(LHS, Pred);
11460
11461#ifndef NDEBUG
11462 // Verify an invariant: inverting the predicate should turn a monotonically
11463 // increasing change to a monotonically decreasing one, and vice versa.
11464 if (Result) {
11465 auto ResultSwapped =
11466 getMonotonicPredicateTypeImpl(LHS, ICmpInst::getSwappedPredicate(Pred));
11467
11468 assert(*ResultSwapped != *Result &&
11469 "monotonicity should flip as we flip the predicate");
11470 }
11471#endif
11472
11473 return Result;
11474}
11475
11476std::optional<ScalarEvolution::MonotonicPredicateType>
11477ScalarEvolution::getMonotonicPredicateTypeImpl(const SCEVAddRecExpr *LHS,
11478 ICmpInst::Predicate Pred) {
11479 // A zero step value for LHS means the induction variable is essentially a
11480 // loop invariant value. We don't really depend on the predicate actually
11481 // flipping from false to true (for increasing predicates, and the other way
11482 // around for decreasing predicates), all we care about is that *if* the
11483 // predicate changes then it only changes from false to true.
11484 //
11485 // A zero step value in itself is not very useful, but there may be places
11486 // where SCEV can prove X >= 0 but not prove X > 0, so it is helpful to be
11487 // as general as possible.
11488
11489 // Only handle LE/LT/GE/GT predicates.
11490 if (!ICmpInst::isRelational(Pred))
11491 return std::nullopt;
11492
11493 bool IsGreater = ICmpInst::isGE(Pred) || ICmpInst::isGT(Pred);
11494 assert((IsGreater || ICmpInst::isLE(Pred) || ICmpInst::isLT(Pred)) &&
11495 "Should be greater or less!");
11496
11497 // Check that AR does not wrap.
11498 if (ICmpInst::isUnsigned(Pred)) {
11499 if (!LHS->hasNoUnsignedWrap())
11500 return std::nullopt;
11502 }
11503 assert(ICmpInst::isSigned(Pred) &&
11504 "Relational predicate is either signed or unsigned!");
11505 if (!LHS->hasNoSignedWrap())
11506 return std::nullopt;
11507
11508 const SCEV *Step = LHS->getStepRecurrence(*this);
11509
11510 if (isKnownNonNegative(Step))
11512
11513 if (isKnownNonPositive(Step))
11515
11516 return std::nullopt;
11517}
11518
11519std::optional<ScalarEvolution::LoopInvariantPredicate>
11521 const SCEV *RHS, const Loop *L,
11522 const Instruction *CtxI) {
11523 // If there is a loop-invariant, force it into the RHS, otherwise bail out.
11524 if (!isLoopInvariant(RHS, L)) {
11525 if (!isLoopInvariant(LHS, L))
11526 return std::nullopt;
11527
11528 std::swap(LHS, RHS);
11530 }
11531
11532 const SCEVAddRecExpr *ArLHS = dyn_cast<SCEVAddRecExpr>(LHS);
11533 if (!ArLHS || ArLHS->getLoop() != L)
11534 return std::nullopt;
11535
11536 auto MonotonicType = getMonotonicPredicateType(ArLHS, Pred);
11537 if (!MonotonicType)
11538 return std::nullopt;
11539 // If the predicate "ArLHS `Pred` RHS" monotonically increases from false to
11540 // true as the loop iterates, and the backedge is control dependent on
11541 // "ArLHS `Pred` RHS" == true then we can reason as follows:
11542 //
11543 // * if the predicate was false in the first iteration then the predicate
11544 // is never evaluated again, since the loop exits without taking the
11545 // backedge.
11546 // * if the predicate was true in the first iteration then it will
11547 // continue to be true for all future iterations since it is
11548 // monotonically increasing.
11549 //
11550 // For both the above possibilities, we can replace the loop varying
11551 // predicate with its value on the first iteration of the loop (which is
11552 // loop invariant).
11553 //
11554 // A similar reasoning applies for a monotonically decreasing predicate, by
11555 // replacing true with false and false with true in the above two bullets.
11557 auto P = Increasing ? Pred : ICmpInst::getInverseCmpPredicate(Pred);
11558
11559 if (isLoopBackedgeGuardedByCond(L, P, LHS, RHS))
11561 RHS);
11562
11563 if (!CtxI)
11564 return std::nullopt;
11565 // Try to prove via context.
11566 // TODO: Support other cases.
11567 switch (Pred) {
11568 default:
11569 break;
11570 case ICmpInst::ICMP_ULE:
11571 case ICmpInst::ICMP_ULT: {
11572 assert(ArLHS->hasNoUnsignedWrap() && "Is a requirement of monotonicity!");
11573 // Given preconditions
11574 // (1) ArLHS does not cross the border of positive and negative parts of
11575 // range because of:
11576 // - Positive step; (TODO: lift this limitation)
11577 // - nuw - does not cross zero boundary;
11578 // - nsw - does not cross SINT_MAX boundary;
11579 // (2) ArLHS <s RHS
11580 // (3) RHS >=s 0
11581 // we can replace the loop variant ArLHS <u RHS condition with loop
11582 // invariant Start(ArLHS) <u RHS.
11583 //
11584 // Because of (1) there are two options:
11585 // - ArLHS is always negative. It means that ArLHS <u RHS is always false;
11586 // - ArLHS is always non-negative. Because of (3) RHS is also non-negative.
11587 // It means that ArLHS <s RHS <=> ArLHS <u RHS.
11588 // Because of (2) ArLHS <u RHS is trivially true.
11589 // All together it means that ArLHS <u RHS <=> Start(ArLHS) >=s 0.
11590 // We can strengthen this to Start(ArLHS) <u RHS.
11591 auto SignFlippedPred = ICmpInst::getFlippedSignednessPredicate(Pred);
11592 if (ArLHS->hasNoSignedWrap() && ArLHS->isAffine() &&
11593 isKnownPositive(ArLHS->getStepRecurrence(*this)) &&
11594 isKnownNonNegative(RHS) &&
11595 isKnownPredicateAt(SignFlippedPred, ArLHS, RHS, CtxI))
11597 RHS);
11598 }
11599 }
11600
11601 return std::nullopt;
11602}
11603
11604std::optional<ScalarEvolution::LoopInvariantPredicate>
11606 CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS, const Loop *L,
11607 const Instruction *CtxI, const SCEV *MaxIter) {
11609 Pred, LHS, RHS, L, CtxI, MaxIter))
11610 return LIP;
11611 if (auto *UMin = dyn_cast<SCEVUMinExpr>(MaxIter))
11612 // Number of iterations expressed as UMIN isn't always great for expressing
11613 // the value on the last iteration. If the straightforward approach didn't
11614 // work, try the following trick: if the a predicate is invariant for X, it
11615 // is also invariant for umin(X, ...). So try to find something that works
11616 // among subexpressions of MaxIter expressed as umin.
11617 for (SCEVUse Op : UMin->operands())
11619 Pred, LHS, RHS, L, CtxI, Op))
11620 return LIP;
11621 return std::nullopt;
11622}
11623
11624std::optional<ScalarEvolution::LoopInvariantPredicate>
11626 CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS, const Loop *L,
11627 const Instruction *CtxI, const SCEV *MaxIter) {
11628 // Try to prove the following set of facts:
11629 // - The predicate is monotonic in the iteration space.
11630 // - If the check does not fail on the 1st iteration:
11631 // - No overflow will happen during first MaxIter iterations;
11632 // - It will not fail on the MaxIter'th iteration.
11633 // If the check does fail on the 1st iteration, we leave the loop and no
11634 // other checks matter.
11635
11636 // If there is a loop-invariant, force it into the RHS, otherwise bail out.
11637 if (!isLoopInvariant(RHS, L)) {
11638 if (!isLoopInvariant(LHS, L))
11639 return std::nullopt;
11640
11641 std::swap(LHS, RHS);
11643 }
11644
11645 auto *AR = dyn_cast<SCEVAddRecExpr>(LHS);
11646 if (!AR || AR->getLoop() != L)
11647 return std::nullopt;
11648
11649 // Even if both are valid, we need to consistently chose the unsigned or the
11650 // signed predicate below, not mixtures of both. For now, prefer the unsigned
11651 // predicate.
11652 Pred = Pred.dropSameSign();
11653
11654 // The predicate must be relational (i.e. <, <=, >=, >).
11655 if (!ICmpInst::isRelational(Pred))
11656 return std::nullopt;
11657
11658 // TODO: Support steps other than +/- 1.
11659 const SCEV *Step = AR->getStepRecurrence(*this);
11660 auto *One = getOne(Step->getType());
11661 auto *MinusOne = getNegativeSCEV(One);
11662 if (Step != One && Step != MinusOne)
11663 return std::nullopt;
11664
11665 // Type mismatch here means that MaxIter is potentially larger than max
11666 // unsigned value in start type, which mean we cannot prove no wrap for the
11667 // indvar.
11668 if (AR->getType() != MaxIter->getType())
11669 return std::nullopt;
11670
11671 // Value of IV on suggested last iteration.
11672 const SCEV *Last = AR->evaluateAtIteration(MaxIter, *this);
11673 // Does it still meet the requirement?
11674 if (!isLoopBackedgeGuardedByCond(L, Pred, Last, RHS))
11675 return std::nullopt;
11676 // Because step is +/- 1 and MaxIter has same type as Start (i.e. it does
11677 // not exceed max unsigned value of this type), this effectively proves
11678 // that there is no wrap during the iteration. To prove that there is no
11679 // signed/unsigned wrap, we need to check that
11680 // Start <= Last for step = 1 or Start >= Last for step = -1.
11681 ICmpInst::Predicate NoOverflowPred =
11683 if (Step == MinusOne)
11684 NoOverflowPred = ICmpInst::getSwappedPredicate(NoOverflowPred);
11685 const SCEV *Start = AR->getStart();
11686 if (!isKnownPredicateAt(NoOverflowPred, Start, Last, CtxI))
11687 return std::nullopt;
11688
11689 // Everything is fine.
11690 return ScalarEvolution::LoopInvariantPredicate(Pred, Start, RHS);
11691}
11692
11693bool ScalarEvolution::isKnownPredicateViaConstantRanges(CmpPredicate Pred,
11694 SCEVUse LHS,
11695 SCEVUse RHS) {
11696 if (HasSameValue(LHS, RHS))
11697 return ICmpInst::isTrueWhenEqual(Pred);
11698
11699 auto CheckRange = [&](bool IsSigned) {
11700 auto RangeLHS = IsSigned ? getSignedRange(LHS) : getUnsignedRange(LHS);
11701 auto RangeRHS = IsSigned ? getSignedRange(RHS) : getUnsignedRange(RHS);
11702 return RangeLHS.icmp(Pred, RangeRHS);
11703 };
11704
11705 // The check at the top of the function catches the case where the values are
11706 // known to be equal.
11707 if (Pred == CmpInst::ICMP_EQ)
11708 return false;
11709
11710 if (Pred == CmpInst::ICMP_NE) {
11711 if (CheckRange(true) || CheckRange(false))
11712 return true;
11713 auto *Diff = getMinusSCEV(LHS, RHS);
11714 return !isa<SCEVCouldNotCompute>(Diff) && isKnownNonZero(Diff);
11715 }
11716
11717 return CheckRange(CmpInst::isSigned(Pred));
11718}
11719
11720bool ScalarEvolution::isKnownPredicateViaNoOverflow(CmpPredicate Pred,
11722 // Match X to (A + C1)<ExpectedFlags> and Y to (A + C2)<ExpectedFlags>, where
11723 // C1 and C2 are constant integers. If either X or Y are not add expressions,
11724 // consider them as X + 0 and Y + 0 respectively. C1 and C2 are returned via
11725 // OutC1 and OutC2.
11726 auto MatchBinaryAddToConst = [this](SCEVUse X, SCEVUse Y, APInt &OutC1,
11727 APInt &OutC2,
11728 SCEV::NoWrapFlags ExpectedFlags) {
11729 SCEVUse XNonConstOp, XConstOp;
11730 SCEVUse YNonConstOp, YConstOp;
11731 SCEV::NoWrapFlags XFlagsPresent;
11732 SCEV::NoWrapFlags YFlagsPresent;
11733
11734 if (!splitBinaryAdd(X, XConstOp, XNonConstOp, XFlagsPresent)) {
11735 XConstOp = getZero(X->getType());
11736 XNonConstOp = X;
11737 XFlagsPresent = ExpectedFlags;
11738 }
11739 if (!isa<SCEVConstant>(XConstOp))
11740 return false;
11741
11742 if (!splitBinaryAdd(Y, YConstOp, YNonConstOp, YFlagsPresent)) {
11743 YConstOp = getZero(Y->getType());
11744 YNonConstOp = Y;
11745 YFlagsPresent = ExpectedFlags;
11746 }
11747
11748 if (YNonConstOp != XNonConstOp)
11749 return false;
11750
11751 if (!isa<SCEVConstant>(YConstOp))
11752 return false;
11753
11754 // When matching ADDs with NUW flags (and unsigned predicates), only the
11755 // second ADD (with the larger constant) requires NUW.
11756 if ((YFlagsPresent & ExpectedFlags) != ExpectedFlags)
11757 return false;
11758 if (ExpectedFlags != SCEV::FlagNUW &&
11759 (XFlagsPresent & ExpectedFlags) != ExpectedFlags) {
11760 return false;
11761 }
11762
11763 OutC1 = cast<SCEVConstant>(XConstOp)->getAPInt();
11764 OutC2 = cast<SCEVConstant>(YConstOp)->getAPInt();
11765
11766 return true;
11767 };
11768
11769 APInt C1;
11770 APInt C2;
11771
11772 switch (Pred) {
11773 default:
11774 break;
11775
11776 case ICmpInst::ICMP_SGE:
11777 std::swap(LHS, RHS);
11778 [[fallthrough]];
11779 case ICmpInst::ICMP_SLE:
11780 // (X + C1)<nsw> s<= (X + C2)<nsw> if C1 s<= C2.
11781 if (MatchBinaryAddToConst(LHS, RHS, C1, C2, SCEV::FlagNSW) && C1.sle(C2))
11782 return true;
11783
11784 break;
11785
11786 case ICmpInst::ICMP_SGT:
11787 std::swap(LHS, RHS);
11788 [[fallthrough]];
11789 case ICmpInst::ICMP_SLT:
11790 // (X + C1)<nsw> s< (X + C2)<nsw> if C1 s< C2.
11791 if (MatchBinaryAddToConst(LHS, RHS, C1, C2, SCEV::FlagNSW) && C1.slt(C2))
11792 return true;
11793
11794 break;
11795
11796 case ICmpInst::ICMP_UGE:
11797 std::swap(LHS, RHS);
11798 [[fallthrough]];
11799 case ICmpInst::ICMP_ULE:
11800 // (X + C1) u<= (X + C2)<nuw> for C1 u<= C2.
11801 if (MatchBinaryAddToConst(LHS, RHS, C1, C2, SCEV::FlagNUW) && C1.ule(C2))
11802 return true;
11803
11804 break;
11805
11806 case ICmpInst::ICMP_UGT:
11807 std::swap(LHS, RHS);
11808 [[fallthrough]];
11809 case ICmpInst::ICMP_ULT:
11810 // (X + C1) u< (X + C2)<nuw> if C1 u< C2.
11811 if (MatchBinaryAddToConst(LHS, RHS, C1, C2, SCEV::FlagNUW) && C1.ult(C2))
11812 return true;
11813 break;
11814 }
11815
11816 return false;
11817}
11818
11819bool ScalarEvolution::isKnownPredicateViaSplitting(CmpPredicate Pred,
11821 if (Pred != ICmpInst::ICMP_ULT || ProvingSplitPredicate)
11822 return false;
11823
11824 // Allowing arbitrary number of activations of isKnownPredicateViaSplitting on
11825 // the stack can result in exponential time complexity.
11826 SaveAndRestore Restore(ProvingSplitPredicate, true);
11827
11828 // If L >= 0 then I `ult` L <=> I >= 0 && I `slt` L
11829 //
11830 // To prove L >= 0 we use isKnownNonNegative whereas to prove I >= 0 we use
11831 // isKnownPredicate. isKnownPredicate is more powerful, but also more
11832 // expensive; and using isKnownNonNegative(RHS) is sufficient for most of the
11833 // interesting cases seen in practice. We can consider "upgrading" L >= 0 to
11834 // use isKnownPredicate later if needed.
11835 return isKnownNonNegative(RHS) &&
11838}
11839
11840bool ScalarEvolution::isImpliedViaGuard(const BasicBlock *BB, CmpPredicate Pred,
11841 const SCEV *LHS, const SCEV *RHS) {
11842 // No need to even try if we know the module has no guards.
11843 if (!HasGuards)
11844 return false;
11845
11846 return any_of(*BB, [&](const Instruction &I) {
11847 using namespace llvm::PatternMatch;
11848
11849 Value *Condition;
11851 m_Value(Condition))) &&
11852 isImpliedCond(Pred, LHS, RHS, Condition, false);
11853 });
11854}
11855
11856/// isLoopBackedgeGuardedByCond - Test whether the backedge of the loop is
11857/// protected by a conditional between LHS and RHS. This is used to
11858/// to eliminate casts.
11860 CmpPredicate Pred,
11861 const SCEV *LHS,
11862 const SCEV *RHS) {
11863 // Interpret a null as meaning no loop, where there is obviously no guard
11864 // (interprocedural conditions notwithstanding). Do not bother about
11865 // unreachable loops.
11866 if (!L || !DT.isReachableFromEntry(L->getHeader()))
11867 return true;
11868
11869 if (VerifyIR)
11870 assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs()) &&
11871 "This cannot be done on broken IR!");
11872
11873
11874 if (isKnownViaNonRecursiveReasoning(Pred, LHS, RHS))
11875 return true;
11876
11877 BasicBlock *Latch = L->getLoopLatch();
11878 if (!Latch)
11879 return false;
11880
11881 CondBrInst *LoopContinuePredicate =
11883 if (LoopContinuePredicate &&
11884 isImpliedCond(Pred, LHS, RHS, LoopContinuePredicate->getCondition(),
11885 LoopContinuePredicate->getSuccessor(0) != L->getHeader()))
11886 return true;
11887
11888 // We don't want more than one activation of the following loops on the stack
11889 // -- that can lead to O(n!) time complexity.
11890 if (WalkingBEDominatingConds)
11891 return false;
11892
11893 SaveAndRestore ClearOnExit(WalkingBEDominatingConds, true);
11894
11895 // See if we can exploit a trip count to prove the predicate.
11896 const auto &BETakenInfo = getBackedgeTakenInfo(L);
11897 const SCEV *LatchBECount = BETakenInfo.getExact(Latch, this);
11898 if (LatchBECount != getCouldNotCompute()) {
11899 // We know that Latch branches back to the loop header exactly
11900 // LatchBECount times. This means the backdege condition at Latch is
11901 // equivalent to "{0,+,1} u< LatchBECount".
11902 Type *Ty = LatchBECount->getType();
11903 auto NoWrapFlags = SCEV::NoWrapFlags(SCEV::FlagNUW | SCEV::FlagNW);
11904 const SCEV *LoopCounter =
11905 getAddRecExpr(getZero(Ty), getOne(Ty), L, NoWrapFlags);
11906 if (isImpliedCond(Pred, LHS, RHS, ICmpInst::ICMP_ULT, LoopCounter,
11907 LatchBECount))
11908 return true;
11909 }
11910
11911 // Check conditions due to any @llvm.assume intrinsics.
11912 for (auto &AssumeVH : AC.assumptions()) {
11913 if (!AssumeVH)
11914 continue;
11915 auto *CI = cast<CallInst>(AssumeVH);
11916 if (!DT.dominates(CI, Latch->getTerminator()))
11917 continue;
11918
11919 if (isImpliedCond(Pred, LHS, RHS, CI->getArgOperand(0), false))
11920 return true;
11921 }
11922
11923 if (isImpliedViaGuard(Latch, Pred, LHS, RHS))
11924 return true;
11925
11926 for (DomTreeNode *DTN = DT[Latch], *HeaderDTN = DT[L->getHeader()];
11927 DTN != HeaderDTN; DTN = DTN->getIDom()) {
11928 assert(DTN && "should reach the loop header before reaching the root!");
11929
11930 BasicBlock *BB = DTN->getBlock();
11931 if (isImpliedViaGuard(BB, Pred, LHS, RHS))
11932 return true;
11933
11934 BasicBlock *PBB = BB->getSinglePredecessor();
11935 if (!PBB)
11936 continue;
11937
11939 if (!ContBr || ContBr->getSuccessor(0) == ContBr->getSuccessor(1))
11940 continue;
11941
11942 // If we have an edge `E` within the loop body that dominates the only
11943 // latch, the condition guarding `E` also guards the backedge. This
11944 // reasoning works only for loops with a single latch.
11945 // We're constructively (and conservatively) enumerating edges within the
11946 // loop body that dominate the latch. The dominator tree better agree
11947 // with us on this:
11948 assert(DT.dominates(BasicBlockEdge(PBB, BB), Latch) && "should be!");
11949 if (isImpliedCond(Pred, LHS, RHS, ContBr->getCondition(),
11950 BB != ContBr->getSuccessor(0)))
11951 return true;
11952 }
11953
11954 return false;
11955}
11956
11958 CmpPredicate Pred,
11959 const SCEV *LHS,
11960 const SCEV *RHS) {
11961 // Do not bother proving facts for unreachable code.
11962 if (!DT.isReachableFromEntry(BB))
11963 return true;
11964 if (VerifyIR)
11965 assert(!verifyFunction(*BB->getParent(), &dbgs()) &&
11966 "This cannot be done on broken IR!");
11967
11968 // If we cannot prove strict comparison (e.g. a > b), maybe we can prove
11969 // the facts (a >= b && a != b) separately. A typical situation is when the
11970 // non-strict comparison is known from ranges and non-equality is known from
11971 // dominating predicates. If we are proving strict comparison, we always try
11972 // to prove non-equality and non-strict comparison separately.
11973 CmpPredicate NonStrictPredicate = ICmpInst::getNonStrictCmpPredicate(Pred);
11974 const bool ProvingStrictComparison =
11975 Pred != NonStrictPredicate.dropSameSign();
11976 bool ProvedNonStrictComparison = false;
11977 bool ProvedNonEquality = false;
11978
11979 auto SplitAndProve = [&](std::function<bool(CmpPredicate)> Fn) -> bool {
11980 if (!ProvedNonStrictComparison)
11981 ProvedNonStrictComparison = Fn(NonStrictPredicate);
11982 if (!ProvedNonEquality)
11983 ProvedNonEquality = Fn(ICmpInst::ICMP_NE);
11984 if (ProvedNonStrictComparison && ProvedNonEquality)
11985 return true;
11986 return false;
11987 };
11988
11989 if (ProvingStrictComparison) {
11990 auto ProofFn = [&](CmpPredicate P) {
11991 return isKnownViaNonRecursiveReasoning(P, LHS, RHS);
11992 };
11993 if (SplitAndProve(ProofFn))
11994 return true;
11995 }
11996
11997 // Try to prove (Pred, LHS, RHS) using isImpliedCond.
11998 auto ProveViaCond = [&](const Value *Condition, bool Inverse) {
11999 const Instruction *CtxI = &BB->front();
12000 if (isImpliedCond(Pred, LHS, RHS, Condition, Inverse, CtxI))
12001 return true;
12002 if (ProvingStrictComparison) {
12003 auto ProofFn = [&](CmpPredicate P) {
12004 return isImpliedCond(P, LHS, RHS, Condition, Inverse, CtxI);
12005 };
12006 if (SplitAndProve(ProofFn))
12007 return true;
12008 }
12009 return false;
12010 };
12011
12012 // Starting at the block's predecessor, climb up the predecessor chain, as long
12013 // as there are predecessors that can be found that have unique successors
12014 // leading to the original block.
12015 const Loop *ContainingLoop = LI.getLoopFor(BB);
12016 const BasicBlock *PredBB;
12017 if (ContainingLoop && ContainingLoop->getHeader() == BB)
12018 PredBB = ContainingLoop->getLoopPredecessor();
12019 else
12020 PredBB = BB->getSinglePredecessor();
12021 for (std::pair<const BasicBlock *, const BasicBlock *> Pair(PredBB, BB);
12022 Pair.first; Pair = getPredecessorWithUniqueSuccessorForBB(Pair.first)) {
12023 const CondBrInst *BlockEntryPredicate =
12024 dyn_cast<CondBrInst>(Pair.first->getTerminator());
12025 if (!BlockEntryPredicate)
12026 continue;
12027
12028 if (ProveViaCond(BlockEntryPredicate->getCondition(),
12029 BlockEntryPredicate->getSuccessor(0) != Pair.second))
12030 return true;
12031 }
12032
12033 // Check conditions due to any @llvm.assume intrinsics.
12034 for (auto &AssumeVH : AC.assumptions()) {
12035 if (!AssumeVH)
12036 continue;
12037 auto *CI = cast<CallInst>(AssumeVH);
12038 if (!DT.dominates(CI, BB))
12039 continue;
12040
12041 if (ProveViaCond(CI->getArgOperand(0), false))
12042 return true;
12043 }
12044
12045 // Check conditions due to any @llvm.experimental.guard intrinsics.
12046 auto *GuardDecl = Intrinsic::getDeclarationIfExists(
12047 F.getParent(), Intrinsic::experimental_guard);
12048 if (GuardDecl)
12049 for (const auto *GU : GuardDecl->users())
12050 if (const auto *Guard = dyn_cast<IntrinsicInst>(GU))
12051 if (Guard->getFunction() == BB->getParent() && DT.dominates(Guard, BB))
12052 if (ProveViaCond(Guard->getArgOperand(0), false))
12053 return true;
12054 return false;
12055}
12056
12058 const SCEV *LHS,
12059 const SCEV *RHS) {
12060 // Interpret a null as meaning no loop, where there is obviously no guard
12061 // (interprocedural conditions notwithstanding).
12062 if (!L)
12063 return false;
12064
12065 // Both LHS and RHS must be available at loop entry.
12067 "LHS is not available at Loop Entry");
12069 "RHS is not available at Loop Entry");
12070
12071 if (isKnownViaNonRecursiveReasoning(Pred, LHS, RHS))
12072 return true;
12073
12074 return isBasicBlockEntryGuardedByCond(L->getHeader(), Pred, LHS, RHS);
12075}
12076
12077bool ScalarEvolution::isImpliedCond(CmpPredicate Pred, const SCEV *LHS,
12078 const SCEV *RHS,
12079 const Value *FoundCondValue, bool Inverse,
12080 const Instruction *CtxI) {
12081 // False conditions implies anything. Do not bother analyzing it further.
12082 if (FoundCondValue ==
12083 ConstantInt::getBool(FoundCondValue->getContext(), Inverse))
12084 return true;
12085
12086 if (!PendingLoopPredicates.insert(FoundCondValue).second)
12087 return false;
12088
12089 llvm::scope_exit ClearOnExit(
12090 [&]() { PendingLoopPredicates.erase(FoundCondValue); });
12091
12092 // Recursively handle And and Or conditions.
12093 const Value *Op0, *Op1;
12094 if (match(FoundCondValue, m_LogicalAnd(m_Value(Op0), m_Value(Op1)))) {
12095 if (!Inverse)
12096 return isImpliedCond(Pred, LHS, RHS, Op0, Inverse, CtxI) ||
12097 isImpliedCond(Pred, LHS, RHS, Op1, Inverse, CtxI);
12098 } else if (match(FoundCondValue, m_LogicalOr(m_Value(Op0), m_Value(Op1)))) {
12099 if (Inverse)
12100 return isImpliedCond(Pred, LHS, RHS, Op0, Inverse, CtxI) ||
12101 isImpliedCond(Pred, LHS, RHS, Op1, Inverse, CtxI);
12102 }
12103
12104 const ICmpInst *ICI = dyn_cast<ICmpInst>(FoundCondValue);
12105 if (!ICI) return false;
12106
12107 // Now that we found a conditional branch that dominates the loop or controls
12108 // the loop latch. Check to see if it is the comparison we are looking for.
12109 CmpPredicate FoundPred;
12110 if (Inverse)
12111 FoundPred = ICI->getInverseCmpPredicate();
12112 else
12113 FoundPred = ICI->getCmpPredicate();
12114
12115 const SCEV *FoundLHS = getSCEV(ICI->getOperand(0));
12116 const SCEV *FoundRHS = getSCEV(ICI->getOperand(1));
12117
12118 return isImpliedCond(Pred, LHS, RHS, FoundPred, FoundLHS, FoundRHS, CtxI);
12119}
12120
12121bool ScalarEvolution::isImpliedCond(CmpPredicate Pred, const SCEV *LHS,
12122 const SCEV *RHS, CmpPredicate FoundPred,
12123 const SCEV *FoundLHS, const SCEV *FoundRHS,
12124 const Instruction *CtxI) {
12125 // Balance the types.
12126 if (getTypeSizeInBits(LHS->getType()) <
12127 getTypeSizeInBits(FoundLHS->getType())) {
12128 // For unsigned and equality predicates, try to prove that both found
12129 // operands fit into narrow unsigned range. If so, try to prove facts in
12130 // narrow types.
12131 if (!CmpInst::isSigned(FoundPred) && !FoundLHS->getType()->isPointerTy() &&
12132 !FoundRHS->getType()->isPointerTy()) {
12133 auto *NarrowType = LHS->getType();
12134 auto *WideType = FoundLHS->getType();
12135 auto BitWidth = getTypeSizeInBits(NarrowType);
12136 const SCEV *MaxValue = getZeroExtendExpr(
12138 if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_ULE, FoundLHS,
12139 MaxValue) &&
12140 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_ULE, FoundRHS,
12141 MaxValue)) {
12142 const SCEV *TruncFoundLHS = getTruncateExpr(FoundLHS, NarrowType);
12143 const SCEV *TruncFoundRHS = getTruncateExpr(FoundRHS, NarrowType);
12144 // We cannot preserve samesign after truncation.
12145 if (isImpliedCondBalancedTypes(Pred, LHS, RHS, FoundPred.dropSameSign(),
12146 TruncFoundLHS, TruncFoundRHS, CtxI))
12147 return true;
12148 }
12149 }
12150
12151 if (LHS->getType()->isPointerTy() || RHS->getType()->isPointerTy())
12152 return false;
12153 if (CmpInst::isSigned(Pred)) {
12154 LHS = getSignExtendExpr(LHS, FoundLHS->getType());
12155 RHS = getSignExtendExpr(RHS, FoundLHS->getType());
12156 } else {
12157 LHS = getZeroExtendExpr(LHS, FoundLHS->getType());
12158 RHS = getZeroExtendExpr(RHS, FoundLHS->getType());
12159 }
12160 } else if (getTypeSizeInBits(LHS->getType()) >
12161 getTypeSizeInBits(FoundLHS->getType())) {
12162 if (FoundLHS->getType()->isPointerTy() || FoundRHS->getType()->isPointerTy())
12163 return false;
12164 if (CmpInst::isSigned(FoundPred)) {
12165 FoundLHS = getSignExtendExpr(FoundLHS, LHS->getType());
12166 FoundRHS = getSignExtendExpr(FoundRHS, LHS->getType());
12167 } else {
12168 FoundLHS = getZeroExtendExpr(FoundLHS, LHS->getType());
12169 FoundRHS = getZeroExtendExpr(FoundRHS, LHS->getType());
12170 }
12171 }
12172 return isImpliedCondBalancedTypes(Pred, LHS, RHS, FoundPred, FoundLHS,
12173 FoundRHS, CtxI);
12174}
12175
12176bool ScalarEvolution::isImpliedCondBalancedTypes(
12177 CmpPredicate Pred, SCEVUse LHS, SCEVUse RHS, CmpPredicate FoundPred,
12178 SCEVUse FoundLHS, SCEVUse FoundRHS, const Instruction *CtxI) {
12180 getTypeSizeInBits(FoundLHS->getType()) &&
12181 "Types should be balanced!");
12182 // Canonicalize the query to match the way instcombine will have
12183 // canonicalized the comparison.
12184 if (SimplifyICmpOperands(Pred, LHS, RHS))
12185 if (LHS == RHS)
12186 return CmpInst::isTrueWhenEqual(Pred);
12187 if (SimplifyICmpOperands(FoundPred, FoundLHS, FoundRHS))
12188 if (FoundLHS == FoundRHS)
12189 return CmpInst::isFalseWhenEqual(FoundPred);
12190
12191 // Check to see if we can make the LHS or RHS match.
12192 if (LHS == FoundRHS || RHS == FoundLHS) {
12193 if (isa<SCEVConstant>(RHS)) {
12194 std::swap(FoundLHS, FoundRHS);
12195 FoundPred = ICmpInst::getSwappedCmpPredicate(FoundPred);
12196 } else {
12197 std::swap(LHS, RHS);
12199 }
12200 }
12201
12202 // Check whether the found predicate is the same as the desired predicate.
12203 if (auto P = CmpPredicate::getMatching(FoundPred, Pred))
12204 return isImpliedCondOperands(*P, LHS, RHS, FoundLHS, FoundRHS, CtxI);
12205
12206 // Check whether swapping the found predicate makes it the same as the
12207 // desired predicate.
12208 if (auto P = CmpPredicate::getMatching(
12209 ICmpInst::getSwappedCmpPredicate(FoundPred), Pred)) {
12210 // We can write the implication
12211 // 0. LHS Pred RHS <- FoundLHS SwapPred FoundRHS
12212 // using one of the following ways:
12213 // 1. LHS Pred RHS <- FoundRHS Pred FoundLHS
12214 // 2. RHS SwapPred LHS <- FoundLHS SwapPred FoundRHS
12215 // 3. LHS Pred RHS <- ~FoundLHS Pred ~FoundRHS
12216 // 4. ~LHS SwapPred ~RHS <- FoundLHS SwapPred FoundRHS
12217 // Forms 1. and 2. require swapping the operands of one condition. Don't
12218 // do this if it would break canonical constant/addrec ordering.
12220 return isImpliedCondOperands(ICmpInst::getSwappedCmpPredicate(*P), RHS,
12221 LHS, FoundLHS, FoundRHS, CtxI);
12222 if (!isa<SCEVConstant>(FoundRHS) && !isa<SCEVAddRecExpr>(FoundLHS))
12223 return isImpliedCondOperands(*P, LHS, RHS, FoundRHS, FoundLHS, CtxI);
12224
12225 // There's no clear preference between forms 3. and 4., try both. Avoid
12226 // forming getNotSCEV of pointer values as the resulting subtract is
12227 // not legal.
12228 if (!LHS->getType()->isPointerTy() && !RHS->getType()->isPointerTy() &&
12229 isImpliedCondOperands(ICmpInst::getSwappedCmpPredicate(*P),
12230 getNotSCEV(LHS), getNotSCEV(RHS), FoundLHS,
12231 FoundRHS, CtxI))
12232 return true;
12233
12234 if (!FoundLHS->getType()->isPointerTy() &&
12235 !FoundRHS->getType()->isPointerTy() &&
12236 isImpliedCondOperands(*P, LHS, RHS, getNotSCEV(FoundLHS),
12237 getNotSCEV(FoundRHS), CtxI))
12238 return true;
12239
12240 return false;
12241 }
12242
12243 auto IsSignFlippedPredicate = [](CmpInst::Predicate P1,
12245 assert(P1 != P2 && "Handled earlier!");
12246 return CmpInst::isRelational(P2) &&
12248 };
12249 if (IsSignFlippedPredicate(Pred, FoundPred)) {
12250 // Unsigned comparison is the same as signed comparison when both the
12251 // operands are non-negative or negative.
12252 if (haveSameSign(FoundLHS, FoundRHS))
12253 return isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS, CtxI);
12254 // Create local copies that we can freely swap and canonicalize our
12255 // conditions to "le/lt".
12256 CmpPredicate CanonicalPred = Pred, CanonicalFoundPred = FoundPred;
12257 const SCEV *CanonicalLHS = LHS, *CanonicalRHS = RHS,
12258 *CanonicalFoundLHS = FoundLHS, *CanonicalFoundRHS = FoundRHS;
12259 if (ICmpInst::isGT(CanonicalPred) || ICmpInst::isGE(CanonicalPred)) {
12260 CanonicalPred = ICmpInst::getSwappedCmpPredicate(CanonicalPred);
12261 CanonicalFoundPred = ICmpInst::getSwappedCmpPredicate(CanonicalFoundPred);
12262 std::swap(CanonicalLHS, CanonicalRHS);
12263 std::swap(CanonicalFoundLHS, CanonicalFoundRHS);
12264 }
12265 assert((ICmpInst::isLT(CanonicalPred) || ICmpInst::isLE(CanonicalPred)) &&
12266 "Must be!");
12267 assert((ICmpInst::isLT(CanonicalFoundPred) ||
12268 ICmpInst::isLE(CanonicalFoundPred)) &&
12269 "Must be!");
12270 if (ICmpInst::isSigned(CanonicalPred) && isKnownNonNegative(CanonicalRHS))
12271 // Use implication:
12272 // x <u y && y >=s 0 --> x <s y.
12273 // If we can prove the left part, the right part is also proven.
12274 return isImpliedCondOperands(CanonicalFoundPred, CanonicalLHS,
12275 CanonicalRHS, CanonicalFoundLHS,
12276 CanonicalFoundRHS);
12277 if (ICmpInst::isUnsigned(CanonicalPred) && isKnownNegative(CanonicalRHS))
12278 // Use implication:
12279 // x <s y && y <s 0 --> x <u y.
12280 // If we can prove the left part, the right part is also proven.
12281 return isImpliedCondOperands(CanonicalFoundPred, CanonicalLHS,
12282 CanonicalRHS, CanonicalFoundLHS,
12283 CanonicalFoundRHS);
12284 }
12285
12286 // Check if we can make progress by sharpening ranges.
12287 if (FoundPred == ICmpInst::ICMP_NE &&
12288 (isa<SCEVConstant>(FoundLHS) || isa<SCEVConstant>(FoundRHS))) {
12289
12290 const SCEVConstant *C = nullptr;
12291 const SCEV *V = nullptr;
12292
12293 if (isa<SCEVConstant>(FoundLHS)) {
12294 C = cast<SCEVConstant>(FoundLHS);
12295 V = FoundRHS;
12296 } else {
12297 C = cast<SCEVConstant>(FoundRHS);
12298 V = FoundLHS;
12299 }
12300
12301 // The guarding predicate tells us that C != V. If the known range
12302 // of V is [C, t), we can sharpen the range to [C + 1, t). The
12303 // range we consider has to correspond to same signedness as the
12304 // predicate we're interested in folding.
12305
12306 APInt Min = ICmpInst::isSigned(Pred) ?
12308
12309 if (Min == C->getAPInt()) {
12310 // Given (V >= Min && V != Min) we conclude V >= (Min + 1).
12311 // This is true even if (Min + 1) wraps around -- in case of
12312 // wraparound, (Min + 1) < Min, so (V >= Min => V >= (Min + 1)).
12313
12314 APInt SharperMin = Min + 1;
12315
12316 switch (Pred) {
12317 case ICmpInst::ICMP_SGE:
12318 case ICmpInst::ICMP_UGE:
12319 // We know V `Pred` SharperMin. If this implies LHS `Pred`
12320 // RHS, we're done.
12321 if (isImpliedCondOperands(Pred, LHS, RHS, V, getConstant(SharperMin),
12322 CtxI))
12323 return true;
12324 [[fallthrough]];
12325
12326 case ICmpInst::ICMP_SGT:
12327 case ICmpInst::ICMP_UGT:
12328 // We know from the range information that (V `Pred` Min ||
12329 // V == Min). We know from the guarding condition that !(V
12330 // == Min). This gives us
12331 //
12332 // V `Pred` Min || V == Min && !(V == Min)
12333 // => V `Pred` Min
12334 //
12335 // If V `Pred` Min implies LHS `Pred` RHS, we're done.
12336
12337 if (isImpliedCondOperands(Pred, LHS, RHS, V, getConstant(Min), CtxI))
12338 return true;
12339 break;
12340
12341 // `LHS < RHS` and `LHS <= RHS` are handled in the same way as `RHS > LHS` and `RHS >= LHS` respectively.
12342 case ICmpInst::ICMP_SLE:
12343 case ICmpInst::ICMP_ULE:
12344 if (isImpliedCondOperands(ICmpInst::getSwappedCmpPredicate(Pred), RHS,
12345 LHS, V, getConstant(SharperMin), CtxI))
12346 return true;
12347 [[fallthrough]];
12348
12349 case ICmpInst::ICMP_SLT:
12350 case ICmpInst::ICMP_ULT:
12351 if (isImpliedCondOperands(ICmpInst::getSwappedCmpPredicate(Pred), RHS,
12352 LHS, V, getConstant(Min), CtxI))
12353 return true;
12354 break;
12355
12356 default:
12357 // No change
12358 break;
12359 }
12360 }
12361 }
12362
12363 // Check whether the actual condition is beyond sufficient.
12364 if (FoundPred == ICmpInst::ICMP_EQ)
12365 if (ICmpInst::isTrueWhenEqual(Pred))
12366 if (isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS, CtxI))
12367 return true;
12368 if (Pred == ICmpInst::ICMP_NE)
12369 if (!ICmpInst::isTrueWhenEqual(FoundPred))
12370 if (isImpliedCondOperands(FoundPred, LHS, RHS, FoundLHS, FoundRHS, CtxI))
12371 return true;
12372
12373 if (isImpliedCondOperandsViaRanges(Pred, LHS, RHS, FoundPred, FoundLHS, FoundRHS))
12374 return true;
12375
12376 // Otherwise assume the worst.
12377 return false;
12378}
12379
12380bool ScalarEvolution::splitBinaryAdd(SCEVUse Expr, SCEVUse &L, SCEVUse &R,
12381 SCEV::NoWrapFlags &Flags) {
12382 if (!match(Expr, m_scev_Add(m_SCEV(L), m_SCEV(R))))
12383 return false;
12384
12385 Flags = cast<SCEVAddExpr>(Expr)->getNoWrapFlags();
12386 return true;
12387}
12388
12389std::optional<APInt>
12391 // We avoid subtracting expressions here because this function is usually
12392 // fairly deep in the call stack (i.e. is called many times).
12393
12394 unsigned BW = getTypeSizeInBits(More->getType());
12395 APInt Diff(BW, 0);
12396 APInt DiffMul(BW, 1);
12397 // Try various simplifications to reduce the difference to a constant. Limit
12398 // the number of allowed simplifications to keep compile-time low.
12399 for (unsigned I = 0; I < 8; ++I) {
12400 if (More == Less)
12401 return Diff;
12402
12403 // Reduce addrecs with identical steps to their start value.
12405 const auto *LAR = cast<SCEVAddRecExpr>(Less);
12406 const auto *MAR = cast<SCEVAddRecExpr>(More);
12407
12408 if (LAR->getLoop() != MAR->getLoop())
12409 return std::nullopt;
12410
12411 // We look at affine expressions only; not for correctness but to keep
12412 // getStepRecurrence cheap.
12413 if (!LAR->isAffine() || !MAR->isAffine())
12414 return std::nullopt;
12415
12416 if (LAR->getStepRecurrence(*this) != MAR->getStepRecurrence(*this))
12417 return std::nullopt;
12418
12419 Less = LAR->getStart();
12420 More = MAR->getStart();
12421 continue;
12422 }
12423
12424 // Try to match a common constant multiply.
12425 auto MatchConstMul =
12426 [](const SCEV *S) -> std::optional<std::pair<const SCEV *, APInt>> {
12427 const APInt *C;
12428 const SCEV *Op;
12429 if (match(S, m_scev_Mul(m_scev_APInt(C), m_SCEV(Op))))
12430 return {{Op, *C}};
12431 return std::nullopt;
12432 };
12433 if (auto MatchedMore = MatchConstMul(More)) {
12434 if (auto MatchedLess = MatchConstMul(Less)) {
12435 if (MatchedMore->second == MatchedLess->second) {
12436 More = MatchedMore->first;
12437 Less = MatchedLess->first;
12438 DiffMul *= MatchedMore->second;
12439 continue;
12440 }
12441 }
12442 }
12443
12444 // Try to cancel out common factors in two add expressions.
12446 auto Add = [&](const SCEV *S, int Mul) {
12447 if (auto *C = dyn_cast<SCEVConstant>(S)) {
12448 if (Mul == 1) {
12449 Diff += C->getAPInt() * DiffMul;
12450 } else {
12451 assert(Mul == -1);
12452 Diff -= C->getAPInt() * DiffMul;
12453 }
12454 } else
12455 Multiplicity[S] += Mul;
12456 };
12457 auto Decompose = [&](const SCEV *S, int Mul) {
12458 if (isa<SCEVAddExpr>(S)) {
12459 for (const SCEV *Op : S->operands())
12460 Add(Op, Mul);
12461 } else
12462 Add(S, Mul);
12463 };
12464 Decompose(More, 1);
12465 Decompose(Less, -1);
12466
12467 // Check whether all the non-constants cancel out, or reduce to new
12468 // More/Less values.
12469 const SCEV *NewMore = nullptr, *NewLess = nullptr;
12470 for (const auto &[S, Mul] : Multiplicity) {
12471 if (Mul == 0)
12472 continue;
12473 if (Mul == 1) {
12474 if (NewMore)
12475 return std::nullopt;
12476 NewMore = S;
12477 } else if (Mul == -1) {
12478 if (NewLess)
12479 return std::nullopt;
12480 NewLess = S;
12481 } else
12482 return std::nullopt;
12483 }
12484
12485 // Values stayed the same, no point in trying further.
12486 if (NewMore == More || NewLess == Less)
12487 return std::nullopt;
12488
12489 More = NewMore;
12490 Less = NewLess;
12491
12492 // Reduced to constant.
12493 if (!More && !Less)
12494 return Diff;
12495
12496 // Left with variable on only one side, bail out.
12497 if (!More || !Less)
12498 return std::nullopt;
12499 }
12500
12501 // Did not reduce to constant.
12502 return std::nullopt;
12503}
12504
12505bool ScalarEvolution::isImpliedCondOperandsViaAddRecStart(
12506 CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS, const SCEV *FoundLHS,
12507 const SCEV *FoundRHS, const Instruction *CtxI) {
12508 // Try to recognize the following pattern:
12509 //
12510 // FoundRHS = ...
12511 // ...
12512 // loop:
12513 // FoundLHS = {Start,+,W}
12514 // context_bb: // Basic block from the same loop
12515 // known(Pred, FoundLHS, FoundRHS)
12516 //
12517 // If some predicate is known in the context of a loop, it is also known on
12518 // each iteration of this loop, including the first iteration. Therefore, in
12519 // this case, `FoundLHS Pred FoundRHS` implies `Start Pred FoundRHS`. Try to
12520 // prove the original pred using this fact.
12521 if (!CtxI)
12522 return false;
12523 const BasicBlock *ContextBB = CtxI->getParent();
12524 // Make sure AR varies in the context block.
12525 if (auto *AR = dyn_cast<SCEVAddRecExpr>(FoundLHS)) {
12526 const Loop *L = AR->getLoop();
12527 const auto *Latch = L->getLoopLatch();
12528 // Make sure that context belongs to the loop and executes on 1st iteration
12529 // (if it ever executes at all).
12530 if (!L->contains(ContextBB) || !Latch || !DT.dominates(ContextBB, Latch))
12531 return false;
12532 if (!isAvailableAtLoopEntry(FoundRHS, AR->getLoop()))
12533 return false;
12534 return isImpliedCondOperands(Pred, LHS, RHS, AR->getStart(), FoundRHS);
12535 }
12536
12537 if (auto *AR = dyn_cast<SCEVAddRecExpr>(FoundRHS)) {
12538 const Loop *L = AR->getLoop();
12539 const auto *Latch = L->getLoopLatch();
12540 // Make sure that context belongs to the loop and executes on 1st iteration
12541 // (if it ever executes at all).
12542 if (!L->contains(ContextBB) || !Latch || !DT.dominates(ContextBB, Latch))
12543 return false;
12544 if (!isAvailableAtLoopEntry(FoundLHS, AR->getLoop()))
12545 return false;
12546 return isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, AR->getStart());
12547 }
12548
12549 return false;
12550}
12551
12552bool ScalarEvolution::isImpliedCondOperandsViaNoOverflow(CmpPredicate Pred,
12553 const SCEV *LHS,
12554 const SCEV *RHS,
12555 const SCEV *FoundLHS,
12556 const SCEV *FoundRHS) {
12557 if (Pred != CmpInst::ICMP_SLT && Pred != CmpInst::ICMP_ULT)
12558 return false;
12559
12560 const auto *AddRecLHS = dyn_cast<SCEVAddRecExpr>(LHS);
12561 if (!AddRecLHS)
12562 return false;
12563
12564 const auto *AddRecFoundLHS = dyn_cast<SCEVAddRecExpr>(FoundLHS);
12565 if (!AddRecFoundLHS)
12566 return false;
12567
12568 // We'd like to let SCEV reason about control dependencies, so we constrain
12569 // both the inequalities to be about add recurrences on the same loop. This
12570 // way we can use isLoopEntryGuardedByCond later.
12571
12572 const Loop *L = AddRecFoundLHS->getLoop();
12573 if (L != AddRecLHS->getLoop())
12574 return false;
12575
12576 // FoundLHS u< FoundRHS u< -C => (FoundLHS + C) u< (FoundRHS + C) ... (1)
12577 //
12578 // FoundLHS s< FoundRHS s< INT_MIN - C => (FoundLHS + C) s< (FoundRHS + C)
12579 // ... (2)
12580 //
12581 // Informal proof for (2), assuming (1) [*]:
12582 //
12583 // We'll also assume (A s< B) <=> ((A + INT_MIN) u< (B + INT_MIN)) ... (3)[**]
12584 //
12585 // Then
12586 //
12587 // FoundLHS s< FoundRHS s< INT_MIN - C
12588 // <=> (FoundLHS + INT_MIN) u< (FoundRHS + INT_MIN) u< -C [ using (3) ]
12589 // <=> (FoundLHS + INT_MIN + C) u< (FoundRHS + INT_MIN + C) [ using (1) ]
12590 // <=> (FoundLHS + INT_MIN + C + INT_MIN) s<
12591 // (FoundRHS + INT_MIN + C + INT_MIN) [ using (3) ]
12592 // <=> FoundLHS + C s< FoundRHS + C
12593 //
12594 // [*]: (1) can be proved by ruling out overflow.
12595 //
12596 // [**]: This can be proved by analyzing all the four possibilities:
12597 // (A s< 0, B s< 0), (A s< 0, B s>= 0), (A s>= 0, B s< 0) and
12598 // (A s>= 0, B s>= 0).
12599 //
12600 // Note:
12601 // Despite (2), "FoundRHS s< INT_MIN - C" does not mean that "FoundRHS + C"
12602 // will not sign underflow. For instance, say FoundLHS = (i8 -128), FoundRHS
12603 // = (i8 -127) and C = (i8 -100). Then INT_MIN - C = (i8 -28), and FoundRHS
12604 // s< (INT_MIN - C). Lack of sign overflow / underflow in "FoundRHS + C" is
12605 // neither necessary nor sufficient to prove "(FoundLHS + C) s< (FoundRHS +
12606 // C)".
12607
12608 std::optional<APInt> LDiff = computeConstantDifference(LHS, FoundLHS);
12609 if (!LDiff)
12610 return false;
12611 std::optional<APInt> RDiff = computeConstantDifference(RHS, FoundRHS);
12612 if (!RDiff || *LDiff != *RDiff)
12613 return false;
12614
12615 if (LDiff->isMinValue())
12616 return true;
12617
12618 APInt FoundRHSLimit;
12619
12620 if (Pred == CmpInst::ICMP_ULT) {
12621 FoundRHSLimit = -(*RDiff);
12622 } else {
12623 assert(Pred == CmpInst::ICMP_SLT && "Checked above!");
12624 FoundRHSLimit = APInt::getSignedMinValue(getTypeSizeInBits(RHS->getType())) - *RDiff;
12625 }
12626
12627 // Try to prove (1) or (2), as needed.
12628 return isAvailableAtLoopEntry(FoundRHS, L) &&
12629 isLoopEntryGuardedByCond(L, Pred, FoundRHS,
12630 getConstant(FoundRHSLimit));
12631}
12632
12633bool ScalarEvolution::isImpliedViaMerge(CmpPredicate Pred, const SCEV *LHS,
12634 const SCEV *RHS, const SCEV *FoundLHS,
12635 const SCEV *FoundRHS, unsigned Depth) {
12636 const PHINode *LPhi = nullptr, *RPhi = nullptr;
12637
12638 llvm::scope_exit ClearOnExit([&]() {
12639 if (LPhi) {
12640 bool Erased = PendingMerges.erase(LPhi);
12641 assert(Erased && "Failed to erase LPhi!");
12642 (void)Erased;
12643 }
12644 if (RPhi) {
12645 bool Erased = PendingMerges.erase(RPhi);
12646 assert(Erased && "Failed to erase RPhi!");
12647 (void)Erased;
12648 }
12649 });
12650
12651 // Find respective Phis and check that they are not being pending.
12652 if (const SCEVUnknown *LU = dyn_cast<SCEVUnknown>(LHS))
12653 if (auto *Phi = dyn_cast<PHINode>(LU->getValue())) {
12654 if (!PendingMerges.insert(Phi).second)
12655 return false;
12656 LPhi = Phi;
12657 }
12658 if (const SCEVUnknown *RU = dyn_cast<SCEVUnknown>(RHS))
12659 if (auto *Phi = dyn_cast<PHINode>(RU->getValue())) {
12660 // If we detect a loop of Phi nodes being processed by this method, for
12661 // example:
12662 //
12663 // %a = phi i32 [ %some1, %preheader ], [ %b, %latch ]
12664 // %b = phi i32 [ %some2, %preheader ], [ %a, %latch ]
12665 //
12666 // we don't want to deal with a case that complex, so return conservative
12667 // answer false.
12668 if (!PendingMerges.insert(Phi).second)
12669 return false;
12670 RPhi = Phi;
12671 }
12672
12673 // If none of LHS, RHS is a Phi, nothing to do here.
12674 if (!LPhi && !RPhi)
12675 return false;
12676
12677 // If there is a SCEVUnknown Phi we are interested in, make it left.
12678 if (!LPhi) {
12679 std::swap(LHS, RHS);
12680 std::swap(FoundLHS, FoundRHS);
12681 std::swap(LPhi, RPhi);
12683 }
12684
12685 assert(LPhi && "LPhi should definitely be a SCEVUnknown Phi!");
12686 const BasicBlock *LBB = LPhi->getParent();
12687 const SCEVAddRecExpr *RAR = dyn_cast<SCEVAddRecExpr>(RHS);
12688
12689 auto ProvedEasily = [&](const SCEV *S1, const SCEV *S2) {
12690 return isKnownViaNonRecursiveReasoning(Pred, S1, S2) ||
12691 isImpliedCondOperandsViaRanges(Pred, S1, S2, Pred, FoundLHS, FoundRHS) ||
12692 isImpliedViaOperations(Pred, S1, S2, FoundLHS, FoundRHS, Depth);
12693 };
12694
12695 if (RPhi && RPhi->getParent() == LBB) {
12696 // Case one: RHS is also a SCEVUnknown Phi from the same basic block.
12697 // If we compare two Phis from the same block, and for each entry block
12698 // the predicate is true for incoming values from this block, then the
12699 // predicate is also true for the Phis.
12700 for (const BasicBlock *IncBB : predecessors(LBB)) {
12701 const SCEV *L = getSCEV(LPhi->getIncomingValueForBlock(IncBB));
12702 const SCEV *R = getSCEV(RPhi->getIncomingValueForBlock(IncBB));
12703 if (!ProvedEasily(L, R))
12704 return false;
12705 }
12706 } else if (RAR && RAR->getLoop()->getHeader() == LBB) {
12707 // Case two: RHS is also a Phi from the same basic block, and it is an
12708 // AddRec. It means that there is a loop which has both AddRec and Unknown
12709 // PHIs, for it we can compare incoming values of AddRec from above the loop
12710 // and latch with their respective incoming values of LPhi.
12711 // TODO: Generalize to handle loops with many inputs in a header.
12712 if (LPhi->getNumIncomingValues() != 2) return false;
12713
12714 auto *RLoop = RAR->getLoop();
12715 auto *Predecessor = RLoop->getLoopPredecessor();
12716 assert(Predecessor && "Loop with AddRec with no predecessor?");
12717 const SCEV *L1 = getSCEV(LPhi->getIncomingValueForBlock(Predecessor));
12718 if (!ProvedEasily(L1, RAR->getStart()))
12719 return false;
12720 auto *Latch = RLoop->getLoopLatch();
12721 assert(Latch && "Loop with AddRec with no latch?");
12722 const SCEV *L2 = getSCEV(LPhi->getIncomingValueForBlock(Latch));
12723 if (!ProvedEasily(L2, RAR->getPostIncExpr(*this)))
12724 return false;
12725 } else {
12726 // In all other cases go over inputs of LHS and compare each of them to RHS,
12727 // the predicate is true for (LHS, RHS) if it is true for all such pairs.
12728 // At this point RHS is either a non-Phi, or it is a Phi from some block
12729 // different from LBB.
12730 for (const BasicBlock *IncBB : predecessors(LBB)) {
12731 // Check that RHS is available in this block.
12732 if (!dominates(RHS, IncBB))
12733 return false;
12734 const SCEV *L = getSCEV(LPhi->getIncomingValueForBlock(IncBB));
12735 // Make sure L does not refer to a value from a potentially previous
12736 // iteration of a loop.
12737 if (!properlyDominates(L, LBB))
12738 return false;
12739 // Addrecs are considered to properly dominate their loop, so are missed
12740 // by the previous check. Discard any values that have computable
12741 // evolution in this loop.
12742 if (auto *Loop = LI.getLoopFor(LBB))
12743 if (hasComputableLoopEvolution(L, Loop))
12744 return false;
12745 if (!ProvedEasily(L, RHS))
12746 return false;
12747 }
12748 }
12749 return true;
12750}
12751
12752bool ScalarEvolution::isImpliedCondOperandsViaShift(CmpPredicate Pred,
12753 const SCEV *LHS,
12754 const SCEV *RHS,
12755 const SCEV *FoundLHS,
12756 const SCEV *FoundRHS) {
12757 // We want to imply LHS < RHS from LHS < (RHS >> shiftvalue). First, make
12758 // sure that we are dealing with same LHS.
12759 if (RHS == FoundRHS) {
12760 std::swap(LHS, RHS);
12761 std::swap(FoundLHS, FoundRHS);
12763 }
12764 if (LHS != FoundLHS)
12765 return false;
12766
12767 auto *SUFoundRHS = dyn_cast<SCEVUnknown>(FoundRHS);
12768 if (!SUFoundRHS)
12769 return false;
12770
12771 Value *Shiftee, *ShiftValue;
12772
12773 using namespace PatternMatch;
12774 if (match(SUFoundRHS->getValue(),
12775 m_LShr(m_Value(Shiftee), m_Value(ShiftValue)))) {
12776 auto *ShifteeS = getSCEV(Shiftee);
12777 // Prove one of the following:
12778 // LHS <u (shiftee >> shiftvalue) && shiftee <=u RHS ---> LHS <u RHS
12779 // LHS <=u (shiftee >> shiftvalue) && shiftee <=u RHS ---> LHS <=u RHS
12780 // LHS <s (shiftee >> shiftvalue) && shiftee <=s RHS && shiftee >=s 0
12781 // ---> LHS <s RHS
12782 // LHS <=s (shiftee >> shiftvalue) && shiftee <=s RHS && shiftee >=s 0
12783 // ---> LHS <=s RHS
12784 if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE)
12785 return isKnownPredicate(ICmpInst::ICMP_ULE, ShifteeS, RHS);
12786 if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE)
12787 if (isKnownNonNegative(ShifteeS))
12788 return isKnownPredicate(ICmpInst::ICMP_SLE, ShifteeS, RHS);
12789 }
12790
12791 return false;
12792}
12793
12794bool ScalarEvolution::isImpliedCondOperands(CmpPredicate Pred, const SCEV *LHS,
12795 const SCEV *RHS,
12796 const SCEV *FoundLHS,
12797 const SCEV *FoundRHS,
12798 const Instruction *CtxI) {
12799 return isImpliedCondOperandsViaRanges(Pred, LHS, RHS, Pred, FoundLHS,
12800 FoundRHS) ||
12801 isImpliedCondOperandsViaNoOverflow(Pred, LHS, RHS, FoundLHS,
12802 FoundRHS) ||
12803 isImpliedCondOperandsViaShift(Pred, LHS, RHS, FoundLHS, FoundRHS) ||
12804 isImpliedCondOperandsViaAddRecStart(Pred, LHS, RHS, FoundLHS, FoundRHS,
12805 CtxI) ||
12806 isImpliedCondOperandsHelper(Pred, LHS, RHS, FoundLHS, FoundRHS);
12807}
12808
12809/// Is MaybeMinMaxExpr an (U|S)(Min|Max) of Candidate and some other values?
12810template <typename MinMaxExprType>
12811static bool IsMinMaxConsistingOf(const SCEV *MaybeMinMaxExpr,
12812 const SCEV *Candidate) {
12813 const MinMaxExprType *MinMaxExpr = dyn_cast<MinMaxExprType>(MaybeMinMaxExpr);
12814 if (!MinMaxExpr)
12815 return false;
12816
12817 return is_contained(MinMaxExpr->operands(), Candidate);
12818}
12819
12821 CmpPredicate Pred, const SCEV *LHS,
12822 const SCEV *RHS) {
12823 // If both sides are affine addrecs for the same loop, with equal
12824 // steps, and we know the recurrences don't wrap, then we only
12825 // need to check the predicate on the starting values.
12826
12827 if (!ICmpInst::isRelational(Pred))
12828 return false;
12829
12830 const SCEV *LStart, *RStart, *Step;
12831 const Loop *L;
12832 if (!match(LHS,
12833 m_scev_AffineAddRec(m_SCEV(LStart), m_SCEV(Step), m_Loop(L))) ||
12835 m_SpecificLoop(L))))
12836 return false;
12841 if (!LAR->getNoWrapFlags(NW) || !RAR->getNoWrapFlags(NW))
12842 return false;
12843
12844 return SE.isKnownPredicate(Pred, LStart, RStart);
12845}
12846
12847/// Is LHS `Pred` RHS true on the virtue of LHS or RHS being a Min or Max
12848/// expression?
12850 const SCEV *LHS, const SCEV *RHS) {
12851 switch (Pred) {
12852 default:
12853 return false;
12854
12855 case ICmpInst::ICMP_SGE:
12856 std::swap(LHS, RHS);
12857 [[fallthrough]];
12858 case ICmpInst::ICMP_SLE:
12859 return
12860 // min(A, ...) <= A
12862 // A <= max(A, ...)
12864
12865 case ICmpInst::ICMP_UGE:
12866 std::swap(LHS, RHS);
12867 [[fallthrough]];
12868 case ICmpInst::ICMP_ULE:
12869 return
12870 // min(A, ...) <= A
12871 // FIXME: what about umin_seq?
12873 // A <= max(A, ...)
12875 }
12876
12877 llvm_unreachable("covered switch fell through?!");
12878}
12879
12880bool ScalarEvolution::isImpliedViaOperations(CmpPredicate Pred, const SCEV *LHS,
12881 const SCEV *RHS,
12882 const SCEV *FoundLHS,
12883 const SCEV *FoundRHS,
12884 unsigned Depth) {
12887 "LHS and RHS have different sizes?");
12888 assert(getTypeSizeInBits(FoundLHS->getType()) ==
12889 getTypeSizeInBits(FoundRHS->getType()) &&
12890 "FoundLHS and FoundRHS have different sizes?");
12891 // We want to avoid hurting the compile time with analysis of too big trees.
12893 return false;
12894
12895 // We only want to work with GT comparison so far.
12896 if (ICmpInst::isLT(Pred)) {
12898 std::swap(LHS, RHS);
12899 std::swap(FoundLHS, FoundRHS);
12900 }
12901
12903
12904 // For unsigned, try to reduce it to corresponding signed comparison.
12905 if (P == ICmpInst::ICMP_UGT)
12906 // We can replace unsigned predicate with its signed counterpart if all
12907 // involved values are non-negative.
12908 // TODO: We could have better support for unsigned.
12909 if (isKnownNonNegative(FoundLHS) && isKnownNonNegative(FoundRHS)) {
12910 // Knowing that both FoundLHS and FoundRHS are non-negative, and knowing
12911 // FoundLHS >u FoundRHS, we also know that FoundLHS >s FoundRHS. Let us
12912 // use this fact to prove that LHS and RHS are non-negative.
12913 const SCEV *MinusOne = getMinusOne(LHS->getType());
12914 if (isImpliedCondOperands(ICmpInst::ICMP_SGT, LHS, MinusOne, FoundLHS,
12915 FoundRHS) &&
12916 isImpliedCondOperands(ICmpInst::ICMP_SGT, RHS, MinusOne, FoundLHS,
12917 FoundRHS))
12919 }
12920
12921 if (P != ICmpInst::ICMP_SGT)
12922 return false;
12923
12924 auto GetOpFromSExt = [&](const SCEV *S) -> const SCEV * {
12925 if (auto *Ext = dyn_cast<SCEVSignExtendExpr>(S))
12926 return Ext->getOperand();
12927 // TODO: If S is a SCEVConstant then you can cheaply "strip" the sext off
12928 // the constant in some cases.
12929 return S;
12930 };
12931
12932 // Acquire values from extensions.
12933 auto *OrigLHS = LHS;
12934 auto *OrigFoundLHS = FoundLHS;
12935 LHS = GetOpFromSExt(LHS);
12936 FoundLHS = GetOpFromSExt(FoundLHS);
12937
12938 // Is the SGT predicate can be proved trivially or using the found context.
12939 auto IsSGTViaContext = [&](const SCEV *S1, const SCEV *S2) {
12940 return isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SGT, S1, S2) ||
12941 isImpliedViaOperations(ICmpInst::ICMP_SGT, S1, S2, OrigFoundLHS,
12942 FoundRHS, Depth + 1);
12943 };
12944
12945 if (auto *LHSAddExpr = dyn_cast<SCEVAddExpr>(LHS)) {
12946 // We want to avoid creation of any new non-constant SCEV. Since we are
12947 // going to compare the operands to RHS, we should be certain that we don't
12948 // need any size extensions for this. So let's decline all cases when the
12949 // sizes of types of LHS and RHS do not match.
12950 // TODO: Maybe try to get RHS from sext to catch more cases?
12952 return false;
12953
12954 // Should not overflow.
12955 if (!LHSAddExpr->hasNoSignedWrap())
12956 return false;
12957
12958 SCEVUse LL = LHSAddExpr->getOperand(0);
12959 SCEVUse LR = LHSAddExpr->getOperand(1);
12960 auto *MinusOne = getMinusOne(RHS->getType());
12961
12962 // Checks that S1 >= 0 && S2 > RHS, trivially or using the found context.
12963 auto IsSumGreaterThanRHS = [&](const SCEV *S1, const SCEV *S2) {
12964 return IsSGTViaContext(S1, MinusOne) && IsSGTViaContext(S2, RHS);
12965 };
12966 // Try to prove the following rule:
12967 // (LHS = LL + LR) && (LL >= 0) && (LR > RHS) => (LHS > RHS).
12968 // (LHS = LL + LR) && (LR >= 0) && (LL > RHS) => (LHS > RHS).
12969 if (IsSumGreaterThanRHS(LL, LR) || IsSumGreaterThanRHS(LR, LL))
12970 return true;
12971 } else if (auto *LHSUnknownExpr = dyn_cast<SCEVUnknown>(LHS)) {
12972 Value *LL, *LR;
12973 // FIXME: Once we have SDiv implemented, we can get rid of this matching.
12974
12975 using namespace llvm::PatternMatch;
12976
12977 if (match(LHSUnknownExpr->getValue(), m_SDiv(m_Value(LL), m_Value(LR)))) {
12978 // Rules for division.
12979 // We are going to perform some comparisons with Denominator and its
12980 // derivative expressions. In general case, creating a SCEV for it may
12981 // lead to a complex analysis of the entire graph, and in particular it
12982 // can request trip count recalculation for the same loop. This would
12983 // cache as SCEVCouldNotCompute to avoid the infinite recursion. To avoid
12984 // this, we only want to create SCEVs that are constants in this section.
12985 // So we bail if Denominator is not a constant.
12986 if (!isa<ConstantInt>(LR))
12987 return false;
12988
12989 auto *Denominator = cast<SCEVConstant>(getSCEV(LR));
12990
12991 // We want to make sure that LHS = FoundLHS / Denominator. If it is so,
12992 // then a SCEV for the numerator already exists and matches with FoundLHS.
12993 auto *Numerator = getExistingSCEV(LL);
12994 if (!Numerator || Numerator->getType() != FoundLHS->getType())
12995 return false;
12996
12997 // Make sure that the numerator matches with FoundLHS and the denominator
12998 // is positive.
12999 if (!HasSameValue(Numerator, FoundLHS) || !isKnownPositive(Denominator))
13000 return false;
13001
13002 auto *DTy = Denominator->getType();
13003 auto *FRHSTy = FoundRHS->getType();
13004 if (DTy->isPointerTy() != FRHSTy->isPointerTy())
13005 // One of types is a pointer and another one is not. We cannot extend
13006 // them properly to a wider type, so let us just reject this case.
13007 // TODO: Usage of getEffectiveSCEVType for DTy, FRHSTy etc should help
13008 // to avoid this check.
13009 return false;
13010
13011 // Given that:
13012 // FoundLHS > FoundRHS, LHS = FoundLHS / Denominator, Denominator > 0.
13013 auto *WTy = getWiderType(DTy, FRHSTy);
13014 auto *DenominatorExt = getNoopOrSignExtend(Denominator, WTy);
13015 auto *FoundRHSExt = getNoopOrSignExtend(FoundRHS, WTy);
13016
13017 // Try to prove the following rule:
13018 // (FoundRHS > Denominator - 2) && (RHS <= 0) => (LHS > RHS).
13019 // For example, given that FoundLHS > 2. It means that FoundLHS is at
13020 // least 3. If we divide it by Denominator < 4, we will have at least 1.
13021 auto *DenomMinusTwo = getMinusSCEV(DenominatorExt, getConstant(WTy, 2));
13022 if (isKnownNonPositive(RHS) &&
13023 IsSGTViaContext(FoundRHSExt, DenomMinusTwo))
13024 return true;
13025
13026 // Try to prove the following rule:
13027 // (FoundRHS > -1 - Denominator) && (RHS < 0) => (LHS > RHS).
13028 // For example, given that FoundLHS > -3. Then FoundLHS is at least -2.
13029 // If we divide it by Denominator > 2, then:
13030 // 1. If FoundLHS is negative, then the result is 0.
13031 // 2. If FoundLHS is non-negative, then the result is non-negative.
13032 // Anyways, the result is non-negative.
13033 auto *MinusOne = getMinusOne(WTy);
13034 auto *NegDenomMinusOne = getMinusSCEV(MinusOne, DenominatorExt);
13035 if (isKnownNegative(RHS) &&
13036 IsSGTViaContext(FoundRHSExt, NegDenomMinusOne))
13037 return true;
13038 }
13039 }
13040
13041 // If our expression contained SCEVUnknown Phis, and we split it down and now
13042 // need to prove something for them, try to prove the predicate for every
13043 // possible incoming values of those Phis.
13044 if (isImpliedViaMerge(Pred, OrigLHS, RHS, OrigFoundLHS, FoundRHS, Depth + 1))
13045 return true;
13046
13047 return false;
13048}
13049
13051 const SCEV *RHS) {
13052 // zext x u<= sext x, sext x s<= zext x
13053 const SCEV *Op;
13054 switch (Pred) {
13055 case ICmpInst::ICMP_SGE:
13056 std::swap(LHS, RHS);
13057 [[fallthrough]];
13058 case ICmpInst::ICMP_SLE: {
13059 // If operand >=s 0 then ZExt == SExt. If operand <s 0 then SExt <s ZExt.
13060 return match(LHS, m_scev_SExt(m_SCEV(Op))) &&
13062 }
13063 case ICmpInst::ICMP_UGE:
13064 std::swap(LHS, RHS);
13065 [[fallthrough]];
13066 case ICmpInst::ICMP_ULE: {
13067 // If operand >=u 0 then ZExt == SExt. If operand <u 0 then ZExt <u SExt.
13068 return match(LHS, m_scev_ZExt(m_SCEV(Op))) &&
13070 }
13071 default:
13072 return false;
13073 };
13074 llvm_unreachable("unhandled case");
13075}
13076
13077bool ScalarEvolution::isKnownViaNonRecursiveReasoning(CmpPredicate Pred,
13078 SCEVUse LHS,
13079 SCEVUse RHS) {
13080 return isKnownPredicateExtendIdiom(Pred, LHS, RHS) ||
13081 isKnownPredicateViaConstantRanges(Pred, LHS, RHS) ||
13082 IsKnownPredicateViaMinOrMax(*this, Pred, LHS, RHS) ||
13083 IsKnownPredicateViaAddRecStart(*this, Pred, LHS, RHS) ||
13084 isKnownPredicateViaNoOverflow(Pred, LHS, RHS);
13085}
13086
13087bool ScalarEvolution::isImpliedCondOperandsHelper(CmpPredicate Pred,
13088 const SCEV *LHS,
13089 const SCEV *RHS,
13090 const SCEV *FoundLHS,
13091 const SCEV *FoundRHS) {
13092 switch (Pred) {
13093 default:
13094 llvm_unreachable("Unexpected CmpPredicate value!");
13095 case ICmpInst::ICMP_EQ:
13096 case ICmpInst::ICMP_NE:
13097 if (HasSameValue(LHS, FoundLHS) && HasSameValue(RHS, FoundRHS))
13098 return true;
13099 break;
13100 case ICmpInst::ICMP_SLT:
13101 case ICmpInst::ICMP_SLE:
13102 if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SLE, LHS, FoundLHS) &&
13103 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SGE, RHS, FoundRHS))
13104 return true;
13105 break;
13106 case ICmpInst::ICMP_SGT:
13107 case ICmpInst::ICMP_SGE:
13108 if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SGE, LHS, FoundLHS) &&
13109 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SLE, RHS, FoundRHS))
13110 return true;
13111 break;
13112 case ICmpInst::ICMP_ULT:
13113 case ICmpInst::ICMP_ULE:
13114 if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_ULE, LHS, FoundLHS) &&
13115 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_UGE, RHS, FoundRHS))
13116 return true;
13117 break;
13118 case ICmpInst::ICMP_UGT:
13119 case ICmpInst::ICMP_UGE:
13120 if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_UGE, LHS, FoundLHS) &&
13121 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_ULE, RHS, FoundRHS))
13122 return true;
13123 break;
13124 }
13125
13126 // Maybe it can be proved via operations?
13127 if (isImpliedViaOperations(Pred, LHS, RHS, FoundLHS, FoundRHS))
13128 return true;
13129
13130 return false;
13131}
13132
13133bool ScalarEvolution::isImpliedCondOperandsViaRanges(
13134 CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS, CmpPredicate FoundPred,
13135 const SCEV *FoundLHS, const SCEV *FoundRHS) {
13136 if (!isa<SCEVConstant>(RHS) || !isa<SCEVConstant>(FoundRHS))
13137 // The restriction on `FoundRHS` be lifted easily -- it exists only to
13138 // reduce the compile time impact of this optimization.
13139 return false;
13140
13141 std::optional<APInt> Addend = computeConstantDifference(LHS, FoundLHS);
13142 if (!Addend)
13143 return false;
13144
13145 const APInt &ConstFoundRHS = cast<SCEVConstant>(FoundRHS)->getAPInt();
13146
13147 // `FoundLHSRange` is the range we know `FoundLHS` to be in by virtue of the
13148 // antecedent "`FoundLHS` `FoundPred` `FoundRHS`".
13149 ConstantRange FoundLHSRange =
13150 ConstantRange::makeExactICmpRegion(FoundPred, ConstFoundRHS);
13151
13152 // Since `LHS` is `FoundLHS` + `Addend`, we can compute a range for `LHS`:
13153 ConstantRange LHSRange = FoundLHSRange.add(ConstantRange(*Addend));
13154
13155 // We can also compute the range of values for `LHS` that satisfy the
13156 // consequent, "`LHS` `Pred` `RHS`":
13157 const APInt &ConstRHS = cast<SCEVConstant>(RHS)->getAPInt();
13158 // The antecedent implies the consequent if every value of `LHS` that
13159 // satisfies the antecedent also satisfies the consequent.
13160 return LHSRange.icmp(Pred, ConstRHS);
13161}
13162
13163bool ScalarEvolution::canIVOverflowOnLT(const SCEV *RHS, const SCEV *Stride,
13164 bool IsSigned) {
13165 assert(isKnownPositive(Stride) && "Positive stride expected!");
13166
13167 unsigned BitWidth = getTypeSizeInBits(RHS->getType());
13168 const SCEV *One = getOne(Stride->getType());
13169
13170 if (IsSigned) {
13171 APInt MaxRHS = getSignedRangeMax(RHS);
13172 APInt MaxValue = APInt::getSignedMaxValue(BitWidth);
13173 APInt MaxStrideMinusOne = getSignedRangeMax(getMinusSCEV(Stride, One));
13174
13175 // SMaxRHS + SMaxStrideMinusOne > SMaxValue => overflow!
13176 return (std::move(MaxValue) - MaxStrideMinusOne).slt(MaxRHS);
13177 }
13178
13179 APInt MaxRHS = getUnsignedRangeMax(RHS);
13180 APInt MaxValue = APInt::getMaxValue(BitWidth);
13181 APInt MaxStrideMinusOne = getUnsignedRangeMax(getMinusSCEV(Stride, One));
13182
13183 // UMaxRHS + UMaxStrideMinusOne > UMaxValue => overflow!
13184 return (std::move(MaxValue) - MaxStrideMinusOne).ult(MaxRHS);
13185}
13186
13187bool ScalarEvolution::canIVOverflowOnGT(const SCEV *RHS, const SCEV *Stride,
13188 bool IsSigned) {
13189
13190 unsigned BitWidth = getTypeSizeInBits(RHS->getType());
13191 const SCEV *One = getOne(Stride->getType());
13192
13193 if (IsSigned) {
13194 APInt MinRHS = getSignedRangeMin(RHS);
13195 APInt MinValue = APInt::getSignedMinValue(BitWidth);
13196 APInt MaxStrideMinusOne = getSignedRangeMax(getMinusSCEV(Stride, One));
13197
13198 // SMinRHS - SMaxStrideMinusOne < SMinValue => overflow!
13199 return (std::move(MinValue) + MaxStrideMinusOne).sgt(MinRHS);
13200 }
13201
13202 APInt MinRHS = getUnsignedRangeMin(RHS);
13203 APInt MinValue = APInt::getMinValue(BitWidth);
13204 APInt MaxStrideMinusOne = getUnsignedRangeMax(getMinusSCEV(Stride, One));
13205
13206 // UMinRHS - UMaxStrideMinusOne < UMinValue => overflow!
13207 return (std::move(MinValue) + MaxStrideMinusOne).ugt(MinRHS);
13208}
13209
13211 // umin(N, 1) + floor((N - umin(N, 1)) / D)
13212 // This is equivalent to "1 + floor((N - 1) / D)" for N != 0. The umin
13213 // expression fixes the case of N=0.
13214 const SCEV *MinNOne = getUMinExpr(N, getOne(N->getType()));
13215 const SCEV *NMinusOne = getMinusSCEV(N, MinNOne);
13216 return getAddExpr(MinNOne, getUDivExpr(NMinusOne, D));
13217}
13218
13219const SCEV *ScalarEvolution::computeMaxBECountForLT(const SCEV *Start,
13220 const SCEV *Stride,
13221 const SCEV *End,
13222 unsigned BitWidth,
13223 bool IsSigned) {
13224 // The logic in this function assumes we can represent a positive stride.
13225 // If we can't, the backedge-taken count must be zero.
13226 if (IsSigned && BitWidth == 1)
13227 return getZero(Stride->getType());
13228
13229 // This code below only been closely audited for negative strides in the
13230 // unsigned comparison case, it may be correct for signed comparison, but
13231 // that needs to be established.
13232 if (IsSigned && isKnownNegative(Stride))
13233 return getCouldNotCompute();
13234
13235 // Calculate the maximum backedge count based on the range of values
13236 // permitted by Start, End, and Stride.
13237 APInt MinStart =
13238 IsSigned ? getSignedRangeMin(Start) : getUnsignedRangeMin(Start);
13239
13240 APInt MinStride =
13241 IsSigned ? getSignedRangeMin(Stride) : getUnsignedRangeMin(Stride);
13242
13243 // We assume either the stride is positive, or the backedge-taken count
13244 // is zero. So force StrideForMaxBECount to be at least one.
13245 APInt One(BitWidth, 1);
13246 APInt StrideForMaxBECount = IsSigned ? APIntOps::smax(One, MinStride)
13247 : APIntOps::umax(One, MinStride);
13248
13249 APInt MaxValue = IsSigned ? APInt::getSignedMaxValue(BitWidth)
13250 : APInt::getMaxValue(BitWidth);
13251 APInt Limit = MaxValue - (StrideForMaxBECount - 1);
13252
13253 // Although End can be a MAX expression we estimate MaxEnd considering only
13254 // the case End = RHS of the loop termination condition. This is safe because
13255 // in the other case (End - Start) is zero, leading to a zero maximum backedge
13256 // taken count.
13257 APInt MaxEnd = IsSigned ? APIntOps::smin(getSignedRangeMax(End), Limit)
13258 : APIntOps::umin(getUnsignedRangeMax(End), Limit);
13259
13260 // MaxBECount = ceil((max(MaxEnd, MinStart) - MinStart) / Stride)
13261 MaxEnd = IsSigned ? APIntOps::smax(MaxEnd, MinStart)
13262 : APIntOps::umax(MaxEnd, MinStart);
13263
13264 return getUDivCeilSCEV(getConstant(MaxEnd - MinStart) /* Delta */,
13265 getConstant(StrideForMaxBECount) /* Step */);
13266}
13267
13269ScalarEvolution::howManyLessThans(const SCEV *LHS, const SCEV *RHS,
13270 const Loop *L, bool IsSigned,
13271 bool ControlsOnlyExit, bool AllowPredicates) {
13273
13275 bool PredicatedIV = false;
13276 if (!IV) {
13277 if (auto *ZExt = dyn_cast<SCEVZeroExtendExpr>(LHS)) {
13278 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(ZExt->getOperand());
13279 if (AR && AR->getLoop() == L && AR->isAffine()) {
13280 auto canProveNUW = [&]() {
13281 // We can use the comparison to infer no-wrap flags only if it fully
13282 // controls the loop exit.
13283 if (!ControlsOnlyExit)
13284 return false;
13285
13286 if (!isLoopInvariant(RHS, L))
13287 return false;
13288
13289 if (!isKnownNonZero(AR->getStepRecurrence(*this)))
13290 // We need the sequence defined by AR to strictly increase in the
13291 // unsigned integer domain for the logic below to hold.
13292 return false;
13293
13294 const unsigned InnerBitWidth = getTypeSizeInBits(AR->getType());
13295 const unsigned OuterBitWidth = getTypeSizeInBits(RHS->getType());
13296 // If RHS <=u Limit, then there must exist a value V in the sequence
13297 // defined by AR (e.g. {Start,+,Step}) such that V >u RHS, and
13298 // V <=u UINT_MAX. Thus, we must exit the loop before unsigned
13299 // overflow occurs. This limit also implies that a signed comparison
13300 // (in the wide bitwidth) is equivalent to an unsigned comparison as
13301 // the high bits on both sides must be zero.
13302 APInt StrideMax = getUnsignedRangeMax(AR->getStepRecurrence(*this));
13303 APInt Limit = APInt::getMaxValue(InnerBitWidth) - (StrideMax - 1);
13304 Limit = Limit.zext(OuterBitWidth);
13305 return getUnsignedRangeMax(applyLoopGuards(RHS, L)).ule(Limit);
13306 };
13307 auto Flags = AR->getNoWrapFlags();
13308 if (!hasFlags(Flags, SCEV::FlagNUW) && canProveNUW())
13309 Flags = setFlags(Flags, SCEV::FlagNUW);
13310
13311 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), Flags);
13312 if (AR->hasNoUnsignedWrap()) {
13313 // Emulate what getZeroExtendExpr would have done during construction
13314 // if we'd been able to infer the fact just above at that time.
13315 const SCEV *Step = AR->getStepRecurrence(*this);
13316 Type *Ty = ZExt->getType();
13317 auto *S = getAddRecExpr(
13319 getZeroExtendExpr(Step, Ty, 0), L, AR->getNoWrapFlags());
13321 }
13322 }
13323 }
13324 }
13325
13326
13327 if (!IV && AllowPredicates) {
13328 // Try to make this an AddRec using runtime tests, in the first X
13329 // iterations of this loop, where X is the SCEV expression found by the
13330 // algorithm below.
13331 IV = convertSCEVToAddRecWithPredicates(LHS, L, Predicates);
13332 PredicatedIV = true;
13333 }
13334
13335 // Avoid weird loops
13336 if (!IV || IV->getLoop() != L || !IV->isAffine())
13337 return getCouldNotCompute();
13338
13339 // A precondition of this method is that the condition being analyzed
13340 // reaches an exiting branch which dominates the latch. Given that, we can
13341 // assume that an increment which violates the nowrap specification and
13342 // produces poison must cause undefined behavior when the resulting poison
13343 // value is branched upon and thus we can conclude that the backedge is
13344 // taken no more often than would be required to produce that poison value.
13345 // Note that a well defined loop can exit on the iteration which violates
13346 // the nowrap specification if there is another exit (either explicit or
13347 // implicit/exceptional) which causes the loop to execute before the
13348 // exiting instruction we're analyzing would trigger UB.
13349 auto WrapType = IsSigned ? SCEV::FlagNSW : SCEV::FlagNUW;
13350 bool NoWrap = ControlsOnlyExit && any(IV->getNoWrapFlags(WrapType));
13352
13353 const SCEV *Stride = IV->getStepRecurrence(*this);
13354
13355 bool PositiveStride = isKnownPositive(Stride);
13356
13357 // Avoid negative or zero stride values.
13358 if (!PositiveStride) {
13359 // We can compute the correct backedge taken count for loops with unknown
13360 // strides if we can prove that the loop is not an infinite loop with side
13361 // effects. Here's the loop structure we are trying to handle -
13362 //
13363 // i = start
13364 // do {
13365 // A[i] = i;
13366 // i += s;
13367 // } while (i < end);
13368 //
13369 // The backedge taken count for such loops is evaluated as -
13370 // (max(end, start + stride) - start - 1) /u stride
13371 //
13372 // The additional preconditions that we need to check to prove correctness
13373 // of the above formula is as follows -
13374 //
13375 // a) IV is either nuw or nsw depending upon signedness (indicated by the
13376 // NoWrap flag).
13377 // b) the loop is guaranteed to be finite (e.g. is mustprogress and has
13378 // no side effects within the loop)
13379 // c) loop has a single static exit (with no abnormal exits)
13380 //
13381 // Precondition a) implies that if the stride is negative, this is a single
13382 // trip loop. The backedge taken count formula reduces to zero in this case.
13383 //
13384 // Precondition b) and c) combine to imply that if rhs is invariant in L,
13385 // then a zero stride means the backedge can't be taken without executing
13386 // undefined behavior.
13387 //
13388 // The positive stride case is the same as isKnownPositive(Stride) returning
13389 // true (original behavior of the function).
13390 //
13391 if (PredicatedIV || !NoWrap || !loopIsFiniteByAssumption(L) ||
13393 return getCouldNotCompute();
13394
13395 if (!isKnownNonZero(Stride)) {
13396 // If we have a step of zero, and RHS isn't invariant in L, we don't know
13397 // if it might eventually be greater than start and if so, on which
13398 // iteration. We can't even produce a useful upper bound.
13399 if (!isLoopInvariant(RHS, L))
13400 return getCouldNotCompute();
13401
13402 // We allow a potentially zero stride, but we need to divide by stride
13403 // below. Since the loop can't be infinite and this check must control
13404 // the sole exit, we can infer the exit must be taken on the first
13405 // iteration (e.g. backedge count = 0) if the stride is zero. Given that,
13406 // we know the numerator in the divides below must be zero, so we can
13407 // pick an arbitrary non-zero value for the denominator (e.g. stride)
13408 // and produce the right result.
13409 // FIXME: Handle the case where Stride is poison?
13410 auto wouldZeroStrideBeUB = [&]() {
13411 // Proof by contradiction. Suppose the stride were zero. If we can
13412 // prove that the backedge *is* taken on the first iteration, then since
13413 // we know this condition controls the sole exit, we must have an
13414 // infinite loop. We can't have a (well defined) infinite loop per
13415 // check just above.
13416 // Note: The (Start - Stride) term is used to get the start' term from
13417 // (start' + stride,+,stride). Remember that we only care about the
13418 // result of this expression when stride == 0 at runtime.
13419 auto *StartIfZero = getMinusSCEV(IV->getStart(), Stride);
13420 return isLoopEntryGuardedByCond(L, Cond, StartIfZero, RHS);
13421 };
13422 if (!wouldZeroStrideBeUB()) {
13423 Stride = getUMaxExpr(Stride, getOne(Stride->getType()));
13424 }
13425 }
13426 } else if (!NoWrap) {
13427 // Avoid proven overflow cases: this will ensure that the backedge taken
13428 // count will not generate any unsigned overflow.
13429 if (canIVOverflowOnLT(RHS, Stride, IsSigned))
13430 return getCouldNotCompute();
13431 }
13432
13433 // On all paths just preceeding, we established the following invariant:
13434 // IV can be assumed not to overflow up to and including the exiting
13435 // iteration. We proved this in one of two ways:
13436 // 1) We can show overflow doesn't occur before the exiting iteration
13437 // 1a) canIVOverflowOnLT, and b) step of one
13438 // 2) We can show that if overflow occurs, the loop must execute UB
13439 // before any possible exit.
13440 // Note that we have not yet proved RHS invariant (in general).
13441
13442 const SCEV *Start = IV->getStart();
13443
13444 // Preserve pointer-typed Start/RHS to pass to isLoopEntryGuardedByCond.
13445 // If we convert to integers, isLoopEntryGuardedByCond will miss some cases.
13446 // Use integer-typed versions for actual computation; we can't subtract
13447 // pointers in general.
13448 const SCEV *OrigStart = Start;
13449 const SCEV *OrigRHS = RHS;
13450 if (Start->getType()->isPointerTy()) {
13452 if (isa<SCEVCouldNotCompute>(Start))
13453 return Start;
13454 }
13455 if (RHS->getType()->isPointerTy()) {
13458 return RHS;
13459 }
13460
13461 const SCEV *End = nullptr, *BECount = nullptr,
13462 *BECountIfBackedgeTaken = nullptr;
13463 if (!isLoopInvariant(RHS, L)) {
13464 const auto *RHSAddRec = dyn_cast<SCEVAddRecExpr>(RHS);
13465 if (PositiveStride && RHSAddRec != nullptr && RHSAddRec->getLoop() == L &&
13466 any(RHSAddRec->getNoWrapFlags())) {
13467 // The structure of loop we are trying to calculate backedge count of:
13468 //
13469 // left = left_start
13470 // right = right_start
13471 //
13472 // while(left < right){
13473 // ... do something here ...
13474 // left += s1; // stride of left is s1 (s1 > 0)
13475 // right += s2; // stride of right is s2 (s2 < 0)
13476 // }
13477 //
13478
13479 const SCEV *RHSStart = RHSAddRec->getStart();
13480 const SCEV *RHSStride = RHSAddRec->getStepRecurrence(*this);
13481
13482 // If Stride - RHSStride is positive and does not overflow, we can write
13483 // backedge count as ->
13484 // ceil((End - Start) /u (Stride - RHSStride))
13485 // Where, End = max(RHSStart, Start)
13486
13487 // Check if RHSStride < 0 and Stride - RHSStride will not overflow.
13488 if (isKnownNegative(RHSStride) &&
13489 willNotOverflow(Instruction::Sub, /*Signed=*/true, Stride,
13490 RHSStride)) {
13491
13492 const SCEV *Denominator = getMinusSCEV(Stride, RHSStride);
13493 if (isKnownPositive(Denominator)) {
13494 End = IsSigned ? getSMaxExpr(RHSStart, Start)
13495 : getUMaxExpr(RHSStart, Start);
13496
13497 // We can do this because End >= Start, as End = max(RHSStart, Start)
13498 const SCEV *Delta = getMinusSCEV(End, Start);
13499
13500 BECount = getUDivCeilSCEV(Delta, Denominator);
13501 BECountIfBackedgeTaken =
13502 getUDivCeilSCEV(getMinusSCEV(RHSStart, Start), Denominator);
13503 }
13504 }
13505 }
13506 if (BECount == nullptr) {
13507 // If we cannot calculate ExactBECount, we can calculate the MaxBECount,
13508 // given the start, stride and max value for the end bound of the
13509 // loop (RHS), and the fact that IV does not overflow (which is
13510 // checked above).
13511 const SCEV *MaxBECount = computeMaxBECountForLT(
13512 Start, Stride, RHS, getTypeSizeInBits(LHS->getType()), IsSigned);
13513 return ExitLimit(getCouldNotCompute() /* ExactNotTaken */, MaxBECount,
13514 MaxBECount, false /*MaxOrZero*/, Predicates);
13515 }
13516 } else {
13517 // We use the expression (max(End,Start)-Start)/Stride to describe the
13518 // backedge count, as if the backedge is taken at least once
13519 // max(End,Start) is End and so the result is as above, and if not
13520 // max(End,Start) is Start so we get a backedge count of zero.
13521 auto *OrigStartMinusStride = getMinusSCEV(OrigStart, Stride);
13522 assert(isAvailableAtLoopEntry(OrigStartMinusStride, L) && "Must be!");
13523 assert(isAvailableAtLoopEntry(OrigStart, L) && "Must be!");
13524 assert(isAvailableAtLoopEntry(OrigRHS, L) && "Must be!");
13525 // Can we prove (max(RHS,Start) > Start - Stride?
13526 if (isLoopEntryGuardedByCond(L, Cond, OrigStartMinusStride, OrigStart) &&
13527 isLoopEntryGuardedByCond(L, Cond, OrigStartMinusStride, OrigRHS)) {
13528 // In this case, we can use a refined formula for computing backedge
13529 // taken count. The general formula remains:
13530 // "End-Start /uceiling Stride" where "End = max(RHS,Start)"
13531 // We want to use the alternate formula:
13532 // "((End - 1) - (Start - Stride)) /u Stride"
13533 // Let's do a quick case analysis to show these are equivalent under
13534 // our precondition that max(RHS,Start) > Start - Stride.
13535 // * For RHS <= Start, the backedge-taken count must be zero.
13536 // "((End - 1) - (Start - Stride)) /u Stride" reduces to
13537 // "((Start - 1) - (Start - Stride)) /u Stride" which simplies to
13538 // "Stride - 1 /u Stride" which is indeed zero for all non-zero values
13539 // of Stride. For 0 stride, we've use umin(1,Stride) above,
13540 // reducing this to the stride of 1 case.
13541 // * For RHS >= Start, the backedge count must be "RHS-Start /uceil
13542 // Stride".
13543 // "((End - 1) - (Start - Stride)) /u Stride" reduces to
13544 // "((RHS - 1) - (Start - Stride)) /u Stride" reassociates to
13545 // "((RHS - (Start - Stride) - 1) /u Stride".
13546 // Our preconditions trivially imply no overflow in that form.
13547 const SCEV *MinusOne = getMinusOne(Stride->getType());
13548 const SCEV *Numerator =
13549 getMinusSCEV(getAddExpr(RHS, MinusOne), getMinusSCEV(Start, Stride));
13550 BECount = getUDivExpr(Numerator, Stride);
13551 }
13552
13553 if (!BECount) {
13554 auto canProveRHSGreaterThanEqualStart = [&]() {
13555 auto CondGE = IsSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE;
13556 const SCEV *GuardedRHS = applyLoopGuards(OrigRHS, L);
13557 const SCEV *GuardedStart = applyLoopGuards(OrigStart, L);
13558
13559 if (isLoopEntryGuardedByCond(L, CondGE, OrigRHS, OrigStart) ||
13560 isKnownPredicate(CondGE, GuardedRHS, GuardedStart))
13561 return true;
13562
13563 // (RHS > Start - 1) implies RHS >= Start.
13564 // * "RHS >= Start" is trivially equivalent to "RHS > Start - 1" if
13565 // "Start - 1" doesn't overflow.
13566 // * For signed comparison, if Start - 1 does overflow, it's equal
13567 // to INT_MAX, and "RHS >s INT_MAX" is trivially false.
13568 // * For unsigned comparison, if Start - 1 does overflow, it's equal
13569 // to UINT_MAX, and "RHS >u UINT_MAX" is trivially false.
13570 //
13571 // FIXME: Should isLoopEntryGuardedByCond do this for us?
13572 auto CondGT = IsSigned ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT;
13573 auto *StartMinusOne =
13574 getAddExpr(OrigStart, getMinusOne(OrigStart->getType()));
13575 return isLoopEntryGuardedByCond(L, CondGT, OrigRHS, StartMinusOne);
13576 };
13577
13578 // If we know that RHS >= Start in the context of loop, then we know
13579 // that max(RHS, Start) = RHS at this point.
13580 if (canProveRHSGreaterThanEqualStart()) {
13581 End = RHS;
13582 } else {
13583 // If RHS < Start, the backedge will be taken zero times. So in
13584 // general, we can write the backedge-taken count as:
13585 //
13586 // RHS >= Start ? ceil(RHS - Start) / Stride : 0
13587 //
13588 // We convert it to the following to make it more convenient for SCEV:
13589 //
13590 // ceil(max(RHS, Start) - Start) / Stride
13591 End = IsSigned ? getSMaxExpr(RHS, Start) : getUMaxExpr(RHS, Start);
13592
13593 // See what would happen if we assume the backedge is taken. This is
13594 // used to compute MaxBECount.
13595 BECountIfBackedgeTaken =
13596 getUDivCeilSCEV(getMinusSCEV(RHS, Start), Stride);
13597 }
13598
13599 // At this point, we know:
13600 //
13601 // 1. If IsSigned, Start <=s End; otherwise, Start <=u End
13602 // 2. The index variable doesn't overflow.
13603 //
13604 // Therefore, we know N exists such that
13605 // (Start + Stride * N) >= End, and computing "(Start + Stride * N)"
13606 // doesn't overflow.
13607 //
13608 // Using this information, try to prove whether the addition in
13609 // "(Start - End) + (Stride - 1)" has unsigned overflow.
13610 const SCEV *One = getOne(Stride->getType());
13611 bool MayAddOverflow = [&] {
13612 if (isKnownToBeAPowerOfTwo(Stride)) {
13613 // Suppose Stride is a power of two, and Start/End are unsigned
13614 // integers. Let UMAX be the largest representable unsigned
13615 // integer.
13616 //
13617 // By the preconditions of this function, we know
13618 // "(Start + Stride * N) >= End", and this doesn't overflow.
13619 // As a formula:
13620 //
13621 // End <= (Start + Stride * N) <= UMAX
13622 //
13623 // Subtracting Start from all the terms:
13624 //
13625 // End - Start <= Stride * N <= UMAX - Start
13626 //
13627 // Since Start is unsigned, UMAX - Start <= UMAX. Therefore:
13628 //
13629 // End - Start <= Stride * N <= UMAX
13630 //
13631 // Stride * N is a multiple of Stride. Therefore,
13632 //
13633 // End - Start <= Stride * N <= UMAX - (UMAX mod Stride)
13634 //
13635 // Since Stride is a power of two, UMAX + 1 is divisible by
13636 // Stride. Therefore, UMAX mod Stride == Stride - 1. So we can
13637 // write:
13638 //
13639 // End - Start <= Stride * N <= UMAX - Stride - 1
13640 //
13641 // Dropping the middle term:
13642 //
13643 // End - Start <= UMAX - Stride - 1
13644 //
13645 // Adding Stride - 1 to both sides:
13646 //
13647 // (End - Start) + (Stride - 1) <= UMAX
13648 //
13649 // In other words, the addition doesn't have unsigned overflow.
13650 //
13651 // A similar proof works if we treat Start/End as signed values.
13652 // Just rewrite steps before "End - Start <= Stride * N <= UMAX"
13653 // to use signed max instead of unsigned max. Note that we're
13654 // trying to prove a lack of unsigned overflow in either case.
13655 return false;
13656 }
13657 if (Start == Stride || Start == getMinusSCEV(Stride, One)) {
13658 // If Start is equal to Stride, (End - Start) + (Stride - 1) == End
13659 // - 1. If !IsSigned, 0 <u Stride == Start <=u End; so 0 <u End - 1
13660 // <u End. If IsSigned, 0 <s Stride == Start <=s End; so 0 <s End -
13661 // 1 <s End.
13662 //
13663 // If Start is equal to Stride - 1, (End - Start) + Stride - 1 ==
13664 // End.
13665 return false;
13666 }
13667 return true;
13668 }();
13669
13670 const SCEV *Delta = getMinusSCEV(End, Start);
13671 if (!MayAddOverflow) {
13672 // floor((D + (S - 1)) / S)
13673 // We prefer this formulation if it's legal because it's fewer
13674 // operations.
13675 BECount =
13676 getUDivExpr(getAddExpr(Delta, getMinusSCEV(Stride, One)), Stride);
13677 } else {
13678 BECount = getUDivCeilSCEV(Delta, Stride);
13679 }
13680 }
13681 }
13682
13683 const SCEV *ConstantMaxBECount;
13684 bool MaxOrZero = false;
13685 if (isa<SCEVConstant>(BECount)) {
13686 ConstantMaxBECount = BECount;
13687 } else if (BECountIfBackedgeTaken &&
13688 isa<SCEVConstant>(BECountIfBackedgeTaken)) {
13689 // If we know exactly how many times the backedge will be taken if it's
13690 // taken at least once, then the backedge count will either be that or
13691 // zero.
13692 ConstantMaxBECount = BECountIfBackedgeTaken;
13693 MaxOrZero = true;
13694 } else {
13695 ConstantMaxBECount = computeMaxBECountForLT(
13696 Start, Stride, RHS, getTypeSizeInBits(LHS->getType()), IsSigned);
13697 }
13698
13699 if (isa<SCEVCouldNotCompute>(ConstantMaxBECount) &&
13700 !isa<SCEVCouldNotCompute>(BECount))
13701 ConstantMaxBECount = getConstant(getUnsignedRangeMax(BECount));
13702
13703 const SCEV *SymbolicMaxBECount =
13704 isa<SCEVCouldNotCompute>(BECount) ? ConstantMaxBECount : BECount;
13705 return ExitLimit(BECount, ConstantMaxBECount, SymbolicMaxBECount, MaxOrZero,
13706 Predicates);
13707}
13708
13709ScalarEvolution::ExitLimit ScalarEvolution::howManyGreaterThans(
13710 const SCEV *LHS, const SCEV *RHS, const Loop *L, bool IsSigned,
13711 bool ControlsOnlyExit, bool AllowPredicates) {
13713 // We handle only IV > Invariant
13714 if (!isLoopInvariant(RHS, L))
13715 return getCouldNotCompute();
13716
13717 const SCEVAddRecExpr *IV = dyn_cast<SCEVAddRecExpr>(LHS);
13718 if (!IV && AllowPredicates)
13719 // Try to make this an AddRec using runtime tests, in the first X
13720 // iterations of this loop, where X is the SCEV expression found by the
13721 // algorithm below.
13722 IV = convertSCEVToAddRecWithPredicates(LHS, L, Predicates);
13723
13724 // Avoid weird loops
13725 if (!IV || IV->getLoop() != L || !IV->isAffine())
13726 return getCouldNotCompute();
13727
13728 auto WrapType = IsSigned ? SCEV::FlagNSW : SCEV::FlagNUW;
13729 bool NoWrap = ControlsOnlyExit && any(IV->getNoWrapFlags(WrapType));
13731
13732 const SCEV *Stride = getNegativeSCEV(IV->getStepRecurrence(*this));
13733
13734 // Avoid negative or zero stride values
13735 if (!isKnownPositive(Stride))
13736 return getCouldNotCompute();
13737
13738 // Avoid proven overflow cases: this will ensure that the backedge taken count
13739 // will not generate any unsigned overflow. Relaxed no-overflow conditions
13740 // exploit NoWrapFlags, allowing to optimize in presence of undefined
13741 // behaviors like the case of C language.
13742 if (!Stride->isOne() && !NoWrap)
13743 if (canIVOverflowOnGT(RHS, Stride, IsSigned))
13744 return getCouldNotCompute();
13745
13746 const SCEV *Start = IV->getStart();
13747 const SCEV *End = RHS;
13748 if (!isLoopEntryGuardedByCond(L, Cond, getAddExpr(Start, Stride), RHS)) {
13749 // If we know that Start >= RHS in the context of loop, then we know that
13750 // min(RHS, Start) = RHS at this point.
13752 L, IsSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE, Start, RHS))
13753 End = RHS;
13754 else
13755 End = IsSigned ? getSMinExpr(RHS, Start) : getUMinExpr(RHS, Start);
13756 }
13757
13758 if (Start->getType()->isPointerTy()) {
13760 if (isa<SCEVCouldNotCompute>(Start))
13761 return Start;
13762 }
13763 if (End->getType()->isPointerTy()) {
13764 End = getLosslessPtrToIntExpr(End);
13765 if (isa<SCEVCouldNotCompute>(End))
13766 return End;
13767 }
13768
13769 // Compute ((Start - End) + (Stride - 1)) / Stride.
13770 // FIXME: This can overflow. Holding off on fixing this for now;
13771 // howManyGreaterThans will hopefully be gone soon.
13772 const SCEV *One = getOne(Stride->getType());
13773 const SCEV *BECount = getUDivExpr(
13774 getAddExpr(getMinusSCEV(Start, End), getMinusSCEV(Stride, One)), Stride);
13775
13776 APInt MaxStart = IsSigned ? getSignedRangeMax(Start)
13778
13779 APInt MinStride = IsSigned ? getSignedRangeMin(Stride)
13780 : getUnsignedRangeMin(Stride);
13781
13782 unsigned BitWidth = getTypeSizeInBits(LHS->getType());
13783 APInt Limit = IsSigned ? APInt::getSignedMinValue(BitWidth) + (MinStride - 1)
13784 : APInt::getMinValue(BitWidth) + (MinStride - 1);
13785
13786 // Although End can be a MIN expression we estimate MinEnd considering only
13787 // the case End = RHS. This is safe because in the other case (Start - End)
13788 // is zero, leading to a zero maximum backedge taken count.
13789 APInt MinEnd =
13790 IsSigned ? APIntOps::smax(getSignedRangeMin(RHS), Limit)
13791 : APIntOps::umax(getUnsignedRangeMin(RHS), Limit);
13792
13793 const SCEV *ConstantMaxBECount =
13794 isa<SCEVConstant>(BECount)
13795 ? BECount
13796 : getUDivCeilSCEV(getConstant(MaxStart - MinEnd),
13797 getConstant(MinStride));
13798
13799 if (isa<SCEVCouldNotCompute>(ConstantMaxBECount))
13800 ConstantMaxBECount = BECount;
13801 const SCEV *SymbolicMaxBECount =
13802 isa<SCEVCouldNotCompute>(BECount) ? ConstantMaxBECount : BECount;
13803
13804 return ExitLimit(BECount, ConstantMaxBECount, SymbolicMaxBECount, false,
13805 Predicates);
13806}
13807
13809 ScalarEvolution &SE) const {
13810 if (Range.isFullSet()) // Infinite loop.
13811 return SE.getCouldNotCompute();
13812
13813 // If the start is a non-zero constant, shift the range to simplify things.
13814 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(getStart()))
13815 if (!SC->getValue()->isZero()) {
13817 Operands[0] = SE.getZero(SC->getType());
13818 const SCEV *Shifted = SE.getAddRecExpr(Operands, getLoop(),
13820 if (const auto *ShiftedAddRec = dyn_cast<SCEVAddRecExpr>(Shifted))
13821 return ShiftedAddRec->getNumIterationsInRange(
13822 Range.subtract(SC->getAPInt()), SE);
13823 // This is strange and shouldn't happen.
13824 return SE.getCouldNotCompute();
13825 }
13826
13827 // The only time we can solve this is when we have all constant indices.
13828 // Otherwise, we cannot determine the overflow conditions.
13829 if (any_of(operands(), [](const SCEV *Op) { return !isa<SCEVConstant>(Op); }))
13830 return SE.getCouldNotCompute();
13831
13832 // Okay at this point we know that all elements of the chrec are constants and
13833 // that the start element is zero.
13834
13835 // First check to see if the range contains zero. If not, the first
13836 // iteration exits.
13837 unsigned BitWidth = SE.getTypeSizeInBits(getType());
13838 if (!Range.contains(APInt(BitWidth, 0)))
13839 return SE.getZero(getType());
13840
13841 if (isAffine()) {
13842 // If this is an affine expression then we have this situation:
13843 // Solve {0,+,A} in Range === Ax in Range
13844
13845 // We know that zero is in the range. If A is positive then we know that
13846 // the upper value of the range must be the first possible exit value.
13847 // If A is negative then the lower of the range is the last possible loop
13848 // value. Also note that we already checked for a full range.
13849 APInt A = cast<SCEVConstant>(getOperand(1))->getAPInt();
13850 APInt End = A.sge(1) ? (Range.getUpper() - 1) : Range.getLower();
13851
13852 // The exit value should be (End+A)/A.
13853 APInt ExitVal = (End + A).udiv(A);
13854 ConstantInt *ExitValue = ConstantInt::get(SE.getContext(), ExitVal);
13855
13856 // Evaluate at the exit value. If we really did fall out of the valid
13857 // range, then we computed our trip count, otherwise wrap around or other
13858 // things must have happened.
13859 ConstantInt *Val = EvaluateConstantChrecAtConstant(this, ExitValue, SE);
13860 if (Range.contains(Val->getValue()))
13861 return SE.getCouldNotCompute(); // Something strange happened
13862
13863 // Ensure that the previous value is in the range.
13864 assert(Range.contains(
13866 ConstantInt::get(SE.getContext(), ExitVal - 1), SE)->getValue()) &&
13867 "Linear scev computation is off in a bad way!");
13868 return SE.getConstant(ExitValue);
13869 }
13870
13871 if (isQuadratic()) {
13872 if (auto S = SolveQuadraticAddRecRange(this, Range, SE))
13873 return SE.getConstant(*S);
13874 }
13875
13876 return SE.getCouldNotCompute();
13877}
13878
13879const SCEVAddRecExpr *
13881 assert(getNumOperands() > 1 && "AddRec with zero step?");
13882 // There is a temptation to just call getAddExpr(this, getStepRecurrence(SE)),
13883 // but in this case we cannot guarantee that the value returned will be an
13884 // AddRec because SCEV does not have a fixed point where it stops
13885 // simplification: it is legal to return ({rec1} + {rec2}). For example, it
13886 // may happen if we reach arithmetic depth limit while simplifying. So we
13887 // construct the returned value explicitly.
13889 // If this is {A,+,B,+,C,...,+,N}, then its step is {B,+,C,+,...,+,N}, and
13890 // (this + Step) is {A+B,+,B+C,+...,+,N}.
13891 for (unsigned i = 0, e = getNumOperands() - 1; i < e; ++i)
13892 Ops.push_back(SE.getAddExpr(getOperand(i), getOperand(i + 1)));
13893 // We know that the last operand is not a constant zero (otherwise it would
13894 // have been popped out earlier). This guarantees us that if the result has
13895 // the same last operand, then it will also not be popped out, meaning that
13896 // the returned value will be an AddRec.
13897 const SCEV *Last = getOperand(getNumOperands() - 1);
13898 assert(!Last->isZero() && "Recurrency with zero step?");
13899 Ops.push_back(Last);
13902}
13903
13904// Return true when S contains at least an undef value.
13906 return SCEVExprContains(
13907 S, [](const SCEV *S) { return match(S, m_scev_UndefOrPoison()); });
13908}
13909
13910// Return true when S contains a value that is a nullptr.
13912 return SCEVExprContains(S, [](const SCEV *S) {
13913 if (const auto *SU = dyn_cast<SCEVUnknown>(S))
13914 return SU->getValue() == nullptr;
13915 return false;
13916 });
13917}
13918
13919/// Return the size of an element read or written by Inst.
13921 Type *Ty;
13922 if (StoreInst *Store = dyn_cast<StoreInst>(Inst))
13923 Ty = Store->getValueOperand()->getType();
13924 else if (LoadInst *Load = dyn_cast<LoadInst>(Inst))
13925 Ty = Load->getType();
13926 else
13927 return nullptr;
13928
13930 return getSizeOfExpr(ETy, Ty);
13931}
13932
13933//===----------------------------------------------------------------------===//
13934// SCEVCallbackVH Class Implementation
13935//===----------------------------------------------------------------------===//
13936
13938 assert(SE && "SCEVCallbackVH called with a null ScalarEvolution!");
13939 if (PHINode *PN = dyn_cast<PHINode>(getValPtr()))
13940 SE->ConstantEvolutionLoopExitValue.erase(PN);
13941 SE->eraseValueFromMap(getValPtr());
13942 // this now dangles!
13943}
13944
13945void ScalarEvolution::SCEVCallbackVH::allUsesReplacedWith(Value *V) {
13946 assert(SE && "SCEVCallbackVH called with a null ScalarEvolution!");
13947
13948 // Forget all the expressions associated with users of the old value,
13949 // so that future queries will recompute the expressions using the new
13950 // value.
13951 SE->forgetValue(getValPtr());
13952 // this now dangles!
13953}
13954
13955ScalarEvolution::SCEVCallbackVH::SCEVCallbackVH(Value *V, ScalarEvolution *se)
13956 : CallbackVH(V), SE(se) {}
13957
13958//===----------------------------------------------------------------------===//
13959// ScalarEvolution Class Implementation
13960//===----------------------------------------------------------------------===//
13961
13964 LoopInfo &LI)
13965 : F(F), DL(F.getDataLayout()), TLI(TLI), AC(AC), DT(DT), LI(LI),
13966 CouldNotCompute(new SCEVCouldNotCompute()), ValuesAtScopes(64),
13967 LoopDispositions(64), BlockDispositions(64) {
13968 // To use guards for proving predicates, we need to scan every instruction in
13969 // relevant basic blocks, and not just terminators. Doing this is a waste of
13970 // time if the IR does not actually contain any calls to
13971 // @llvm.experimental.guard, so do a quick check and remember this beforehand.
13972 //
13973 // This pessimizes the case where a pass that preserves ScalarEvolution wants
13974 // to _add_ guards to the module when there weren't any before, and wants
13975 // ScalarEvolution to optimize based on those guards. For now we prefer to be
13976 // efficient in lieu of being smart in that rather obscure case.
13977
13978 auto *GuardDecl = Intrinsic::getDeclarationIfExists(
13979 F.getParent(), Intrinsic::experimental_guard);
13980 HasGuards = GuardDecl && !GuardDecl->use_empty();
13981}
13982
13984 : F(Arg.F), DL(Arg.DL), HasGuards(Arg.HasGuards), TLI(Arg.TLI), AC(Arg.AC),
13985 DT(Arg.DT), LI(Arg.LI), CouldNotCompute(std::move(Arg.CouldNotCompute)),
13986 ValueExprMap(std::move(Arg.ValueExprMap)),
13987 PendingLoopPredicates(std::move(Arg.PendingLoopPredicates)),
13988 PendingMerges(std::move(Arg.PendingMerges)),
13989 ConstantMultipleCache(std::move(Arg.ConstantMultipleCache)),
13990 BackedgeTakenCounts(std::move(Arg.BackedgeTakenCounts)),
13991 PredicatedBackedgeTakenCounts(
13992 std::move(Arg.PredicatedBackedgeTakenCounts)),
13993 BECountUsers(std::move(Arg.BECountUsers)),
13994 ConstantEvolutionLoopExitValue(
13995 std::move(Arg.ConstantEvolutionLoopExitValue)),
13996 ValuesAtScopes(std::move(Arg.ValuesAtScopes)),
13997 ValuesAtScopesUsers(std::move(Arg.ValuesAtScopesUsers)),
13998 LoopDispositions(std::move(Arg.LoopDispositions)),
13999 LoopPropertiesCache(std::move(Arg.LoopPropertiesCache)),
14000 BlockDispositions(std::move(Arg.BlockDispositions)),
14001 SCEVUsers(std::move(Arg.SCEVUsers)),
14002 UnsignedRanges(std::move(Arg.UnsignedRanges)),
14003 SignedRanges(std::move(Arg.SignedRanges)),
14004 UniqueSCEVs(std::move(Arg.UniqueSCEVs)),
14005 UniquePreds(std::move(Arg.UniquePreds)),
14006 SCEVAllocator(std::move(Arg.SCEVAllocator)),
14007 LoopUsers(std::move(Arg.LoopUsers)),
14008 PredicatedSCEVRewrites(std::move(Arg.PredicatedSCEVRewrites)),
14009 FirstUnknown(Arg.FirstUnknown) {
14010 Arg.FirstUnknown = nullptr;
14011}
14012
14014 // Iterate through all the SCEVUnknown instances and call their
14015 // destructors, so that they release their references to their values.
14016 for (SCEVUnknown *U = FirstUnknown; U;) {
14017 SCEVUnknown *Tmp = U;
14018 U = U->Next;
14019 Tmp->~SCEVUnknown();
14020 }
14021 FirstUnknown = nullptr;
14022
14023 ExprValueMap.clear();
14024 ValueExprMap.clear();
14025 HasRecMap.clear();
14026 BackedgeTakenCounts.clear();
14027 PredicatedBackedgeTakenCounts.clear();
14028
14029 assert(PendingLoopPredicates.empty() && "isImpliedCond garbage");
14030 assert(PendingMerges.empty() && "isImpliedViaMerge garbage");
14031 assert(!WalkingBEDominatingConds && "isLoopBackedgeGuardedByCond garbage!");
14032 assert(!ProvingSplitPredicate && "ProvingSplitPredicate garbage!");
14033}
14034
14038
14039/// When printing a top-level SCEV for trip counts, it's helpful to include
14040/// a type for constants which are otherwise hard to disambiguate.
14041static void PrintSCEVWithTypeHint(raw_ostream &OS, const SCEV* S) {
14042 if (isa<SCEVConstant>(S))
14043 OS << *S->getType() << " ";
14044 OS << *S;
14045}
14046
14048 const Loop *L) {
14049 // Print all inner loops first
14050 for (Loop *I : *L)
14051 PrintLoopInfo(OS, SE, I);
14052
14053 OS << "Loop ";
14054 L->getHeader()->printAsOperand(OS, /*PrintType=*/false);
14055 OS << ": ";
14056
14057 SmallVector<BasicBlock *, 8> ExitingBlocks;
14058 L->getExitingBlocks(ExitingBlocks);
14059 if (ExitingBlocks.size() != 1)
14060 OS << "<multiple exits> ";
14061
14062 auto *BTC = SE->getBackedgeTakenCount(L);
14063 if (!isa<SCEVCouldNotCompute>(BTC)) {
14064 OS << "backedge-taken count is ";
14065 PrintSCEVWithTypeHint(OS, BTC);
14066 } else
14067 OS << "Unpredictable backedge-taken count.";
14068 OS << "\n";
14069
14070 if (ExitingBlocks.size() > 1)
14071 for (BasicBlock *ExitingBlock : ExitingBlocks) {
14072 OS << " exit count for " << ExitingBlock->getName() << ": ";
14073 const SCEV *EC = SE->getExitCount(L, ExitingBlock);
14074 PrintSCEVWithTypeHint(OS, EC);
14075 if (isa<SCEVCouldNotCompute>(EC)) {
14076 // Retry with predicates.
14078 EC = SE->getPredicatedExitCount(L, ExitingBlock, &Predicates);
14079 if (!isa<SCEVCouldNotCompute>(EC)) {
14080 OS << "\n predicated exit count for " << ExitingBlock->getName()
14081 << ": ";
14082 PrintSCEVWithTypeHint(OS, EC);
14083 OS << "\n Predicates:\n";
14084 for (const auto *P : Predicates)
14085 P->print(OS, 4);
14086 }
14087 }
14088 OS << "\n";
14089 }
14090
14091 OS << "Loop ";
14092 L->getHeader()->printAsOperand(OS, /*PrintType=*/false);
14093 OS << ": ";
14094
14095 auto *ConstantBTC = SE->getConstantMaxBackedgeTakenCount(L);
14096 if (!isa<SCEVCouldNotCompute>(ConstantBTC)) {
14097 OS << "constant max backedge-taken count is ";
14098 PrintSCEVWithTypeHint(OS, ConstantBTC);
14100 OS << ", actual taken count either this or zero.";
14101 } else {
14102 OS << "Unpredictable constant max backedge-taken count. ";
14103 }
14104
14105 OS << "\n"
14106 "Loop ";
14107 L->getHeader()->printAsOperand(OS, /*PrintType=*/false);
14108 OS << ": ";
14109
14110 auto *SymbolicBTC = SE->getSymbolicMaxBackedgeTakenCount(L);
14111 if (!isa<SCEVCouldNotCompute>(SymbolicBTC)) {
14112 OS << "symbolic max backedge-taken count is ";
14113 PrintSCEVWithTypeHint(OS, SymbolicBTC);
14115 OS << ", actual taken count either this or zero.";
14116 } else {
14117 OS << "Unpredictable symbolic max backedge-taken count. ";
14118 }
14119 OS << "\n";
14120
14121 if (ExitingBlocks.size() > 1)
14122 for (BasicBlock *ExitingBlock : ExitingBlocks) {
14123 OS << " symbolic max exit count for " << ExitingBlock->getName() << ": ";
14124 auto *ExitBTC = SE->getExitCount(L, ExitingBlock,
14126 PrintSCEVWithTypeHint(OS, ExitBTC);
14127 if (isa<SCEVCouldNotCompute>(ExitBTC)) {
14128 // Retry with predicates.
14130 ExitBTC = SE->getPredicatedExitCount(L, ExitingBlock, &Predicates,
14132 if (!isa<SCEVCouldNotCompute>(ExitBTC)) {
14133 OS << "\n predicated symbolic max exit count for "
14134 << ExitingBlock->getName() << ": ";
14135 PrintSCEVWithTypeHint(OS, ExitBTC);
14136 OS << "\n Predicates:\n";
14137 for (const auto *P : Predicates)
14138 P->print(OS, 4);
14139 }
14140 }
14141 OS << "\n";
14142 }
14143
14145 auto *PBT = SE->getPredicatedBackedgeTakenCount(L, Preds);
14146 if (PBT != BTC) {
14147 assert(!Preds.empty() && "Different predicated BTC, but no predicates");
14148 OS << "Loop ";
14149 L->getHeader()->printAsOperand(OS, /*PrintType=*/false);
14150 OS << ": ";
14151 if (!isa<SCEVCouldNotCompute>(PBT)) {
14152 OS << "Predicated backedge-taken count is ";
14153 PrintSCEVWithTypeHint(OS, PBT);
14154 } else
14155 OS << "Unpredictable predicated backedge-taken count.";
14156 OS << "\n";
14157 OS << " Predicates:\n";
14158 for (const auto *P : Preds)
14159 P->print(OS, 4);
14160 }
14161 Preds.clear();
14162
14163 auto *PredConstantMax =
14165 if (PredConstantMax != ConstantBTC) {
14166 assert(!Preds.empty() &&
14167 "different predicated constant max BTC but no predicates");
14168 OS << "Loop ";
14169 L->getHeader()->printAsOperand(OS, /*PrintType=*/false);
14170 OS << ": ";
14171 if (!isa<SCEVCouldNotCompute>(PredConstantMax)) {
14172 OS << "Predicated constant max backedge-taken count is ";
14173 PrintSCEVWithTypeHint(OS, PredConstantMax);
14174 } else
14175 OS << "Unpredictable predicated constant max backedge-taken count.";
14176 OS << "\n";
14177 OS << " Predicates:\n";
14178 for (const auto *P : Preds)
14179 P->print(OS, 4);
14180 }
14181 Preds.clear();
14182
14183 auto *PredSymbolicMax =
14185 if (SymbolicBTC != PredSymbolicMax) {
14186 assert(!Preds.empty() &&
14187 "Different predicated symbolic max BTC, but no predicates");
14188 OS << "Loop ";
14189 L->getHeader()->printAsOperand(OS, /*PrintType=*/false);
14190 OS << ": ";
14191 if (!isa<SCEVCouldNotCompute>(PredSymbolicMax)) {
14192 OS << "Predicated symbolic max backedge-taken count is ";
14193 PrintSCEVWithTypeHint(OS, PredSymbolicMax);
14194 } else
14195 OS << "Unpredictable predicated symbolic max backedge-taken count.";
14196 OS << "\n";
14197 OS << " Predicates:\n";
14198 for (const auto *P : Preds)
14199 P->print(OS, 4);
14200 }
14201
14203 OS << "Loop ";
14204 L->getHeader()->printAsOperand(OS, /*PrintType=*/false);
14205 OS << ": ";
14206 OS << "Trip multiple is " << SE->getSmallConstantTripMultiple(L) << "\n";
14207 }
14208}
14209
14210namespace llvm {
14211// Note: these overloaded operators need to be in the llvm namespace for them
14212// to be resolved correctly. If we put them outside the llvm namespace, the
14213//
14214// OS << ": " << SE.getLoopDisposition(SV, InnerL);
14215//
14216// code below "breaks" and start printing raw enum values as opposed to the
14217// string values.
14220 switch (LD) {
14222 OS << "Variant";
14223 break;
14225 OS << "Invariant";
14226 break;
14228 OS << "Computable";
14229 break;
14230 }
14231 return OS;
14232}
14233
14236 switch (BD) {
14238 OS << "DoesNotDominate";
14239 break;
14241 OS << "Dominates";
14242 break;
14244 OS << "ProperlyDominates";
14245 break;
14246 }
14247 return OS;
14248}
14249} // namespace llvm
14250
14252 // ScalarEvolution's implementation of the print method is to print
14253 // out SCEV values of all instructions that are interesting. Doing
14254 // this potentially causes it to create new SCEV objects though,
14255 // which technically conflicts with the const qualifier. This isn't
14256 // observable from outside the class though, so casting away the
14257 // const isn't dangerous.
14258 ScalarEvolution &SE = *const_cast<ScalarEvolution *>(this);
14259
14260 if (ClassifyExpressions) {
14261 OS << "Classifying expressions for: ";
14262 F.printAsOperand(OS, /*PrintType=*/false);
14263 OS << "\n";
14264 for (Instruction &I : instructions(F))
14265 if (isSCEVable(I.getType()) && !isa<CmpInst>(I)) {
14266 OS << I << '\n';
14267 OS << " --> ";
14268 const SCEV *SV = SE.getSCEV(&I);
14269 SV->print(OS);
14270 if (!isa<SCEVCouldNotCompute>(SV)) {
14271 OS << " U: ";
14272 SE.getUnsignedRange(SV).print(OS);
14273 OS << " S: ";
14274 SE.getSignedRange(SV).print(OS);
14275 }
14276
14277 const Loop *L = LI.getLoopFor(I.getParent());
14278
14279 const SCEV *AtUse = SE.getSCEVAtScope(SV, L);
14280 if (AtUse != SV) {
14281 OS << " --> ";
14282 AtUse->print(OS);
14283 if (!isa<SCEVCouldNotCompute>(AtUse)) {
14284 OS << " U: ";
14285 SE.getUnsignedRange(AtUse).print(OS);
14286 OS << " S: ";
14287 SE.getSignedRange(AtUse).print(OS);
14288 }
14289 }
14290
14291 if (L) {
14292 OS << "\t\t" "Exits: ";
14293 const SCEV *ExitValue = SE.getSCEVAtScope(SV, L->getParentLoop());
14294 if (!SE.isLoopInvariant(ExitValue, L)) {
14295 OS << "<<Unknown>>";
14296 } else {
14297 OS << *ExitValue;
14298 }
14299
14300 ListSeparator LS(", ", "\t\tLoopDispositions: { ");
14301 for (const auto *Iter = L; Iter; Iter = Iter->getParentLoop()) {
14302 OS << LS;
14303 Iter->getHeader()->printAsOperand(OS, /*PrintType=*/false);
14304 OS << ": " << SE.getLoopDisposition(SV, Iter);
14305 }
14306
14307 for (const auto *InnerL : depth_first(L)) {
14308 if (InnerL == L)
14309 continue;
14310 OS << LS;
14311 InnerL->getHeader()->printAsOperand(OS, /*PrintType=*/false);
14312 OS << ": " << SE.getLoopDisposition(SV, InnerL);
14313 }
14314
14315 OS << " }";
14316 }
14317
14318 OS << "\n";
14319 }
14320 }
14321
14322 OS << "Determining loop execution counts for: ";
14323 F.printAsOperand(OS, /*PrintType=*/false);
14324 OS << "\n";
14325 for (Loop *I : LI)
14326 PrintLoopInfo(OS, &SE, I);
14327}
14328
14331 auto &Values = LoopDispositions[S];
14332 for (auto &V : Values) {
14333 if (V.getPointer() == L)
14334 return V.getInt();
14335 }
14336 Values.emplace_back(L, LoopVariant);
14337 LoopDisposition D = computeLoopDisposition(S, L);
14338 auto &Values2 = LoopDispositions[S];
14339 for (auto &V : llvm::reverse(Values2)) {
14340 if (V.getPointer() == L) {
14341 V.setInt(D);
14342 break;
14343 }
14344 }
14345 return D;
14346}
14347
14349ScalarEvolution::computeLoopDisposition(const SCEV *S, const Loop *L) {
14350 switch (S->getSCEVType()) {
14351 case scConstant:
14352 case scVScale:
14353 return LoopInvariant;
14354 case scAddRecExpr: {
14355 const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(S);
14356
14357 // If L is the addrec's loop, it's computable.
14358 if (AR->getLoop() == L)
14359 return LoopComputable;
14360
14361 // Add recurrences are never invariant in the function-body (null loop).
14362 if (!L)
14363 return LoopVariant;
14364
14365 // Everything that is not defined at loop entry is variant.
14366 if (DT.dominates(L->getHeader(), AR->getLoop()->getHeader()))
14367 return LoopVariant;
14368 assert(!L->contains(AR->getLoop()) && "Containing loop's header does not"
14369 " dominate the contained loop's header?");
14370
14371 // This recurrence is invariant w.r.t. L if AR's loop contains L.
14372 if (AR->getLoop()->contains(L))
14373 return LoopInvariant;
14374
14375 // This recurrence is variant w.r.t. L if any of its operands
14376 // are variant.
14377 for (SCEVUse Op : AR->operands())
14378 if (!isLoopInvariant(Op, L))
14379 return LoopVariant;
14380
14381 // Otherwise it's loop-invariant.
14382 return LoopInvariant;
14383 }
14384 case scTruncate:
14385 case scZeroExtend:
14386 case scSignExtend:
14387 case scPtrToAddr:
14388 case scPtrToInt:
14389 case scAddExpr:
14390 case scMulExpr:
14391 case scUDivExpr:
14392 case scUMaxExpr:
14393 case scSMaxExpr:
14394 case scUMinExpr:
14395 case scSMinExpr:
14396 case scSequentialUMinExpr: {
14397 bool HasVarying = false;
14398 for (SCEVUse Op : S->operands()) {
14400 if (D == LoopVariant)
14401 return LoopVariant;
14402 if (D == LoopComputable)
14403 HasVarying = true;
14404 }
14405 return HasVarying ? LoopComputable : LoopInvariant;
14406 }
14407 case scUnknown:
14408 // All non-instruction values are loop invariant. All instructions are loop
14409 // invariant if they are not contained in the specified loop.
14410 // Instructions are never considered invariant in the function body
14411 // (null loop) because they are defined within the "loop".
14413 return (L && !L->contains(I)) ? LoopInvariant : LoopVariant;
14414 return LoopInvariant;
14415 case scCouldNotCompute:
14416 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
14417 }
14418 llvm_unreachable("Unknown SCEV kind!");
14419}
14420
14422 return getLoopDisposition(S, L) == LoopInvariant;
14423}
14424
14426 return getLoopDisposition(S, L) == LoopComputable;
14427}
14428
14431 auto &Values = BlockDispositions[S];
14432 for (auto &V : Values) {
14433 if (V.getPointer() == BB)
14434 return V.getInt();
14435 }
14436 Values.emplace_back(BB, DoesNotDominateBlock);
14437 BlockDisposition D = computeBlockDisposition(S, BB);
14438 auto &Values2 = BlockDispositions[S];
14439 for (auto &V : llvm::reverse(Values2)) {
14440 if (V.getPointer() == BB) {
14441 V.setInt(D);
14442 break;
14443 }
14444 }
14445 return D;
14446}
14447
14449ScalarEvolution::computeBlockDisposition(const SCEV *S, const BasicBlock *BB) {
14450 switch (S->getSCEVType()) {
14451 case scConstant:
14452 case scVScale:
14454 case scAddRecExpr: {
14455 // This uses a "dominates" query instead of "properly dominates" query
14456 // to test for proper dominance too, because the instruction which
14457 // produces the addrec's value is a PHI, and a PHI effectively properly
14458 // dominates its entire containing block.
14459 const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(S);
14460 if (!DT.dominates(AR->getLoop()->getHeader(), BB))
14461 return DoesNotDominateBlock;
14462
14463 // Fall through into SCEVNAryExpr handling.
14464 [[fallthrough]];
14465 }
14466 case scTruncate:
14467 case scZeroExtend:
14468 case scSignExtend:
14469 case scPtrToAddr:
14470 case scPtrToInt:
14471 case scAddExpr:
14472 case scMulExpr:
14473 case scUDivExpr:
14474 case scUMaxExpr:
14475 case scSMaxExpr:
14476 case scUMinExpr:
14477 case scSMinExpr:
14478 case scSequentialUMinExpr: {
14479 bool Proper = true;
14480 for (const SCEV *NAryOp : S->operands()) {
14482 if (D == DoesNotDominateBlock)
14483 return DoesNotDominateBlock;
14484 if (D == DominatesBlock)
14485 Proper = false;
14486 }
14487 return Proper ? ProperlyDominatesBlock : DominatesBlock;
14488 }
14489 case scUnknown:
14490 if (Instruction *I =
14492 if (I->getParent() == BB)
14493 return DominatesBlock;
14494 if (DT.properlyDominates(I->getParent(), BB))
14496 return DoesNotDominateBlock;
14497 }
14499 case scCouldNotCompute:
14500 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
14501 }
14502 llvm_unreachable("Unknown SCEV kind!");
14503}
14504
14505bool ScalarEvolution::dominates(const SCEV *S, const BasicBlock *BB) {
14506 return getBlockDisposition(S, BB) >= DominatesBlock;
14507}
14508
14511}
14512
14513bool ScalarEvolution::hasOperand(const SCEV *S, const SCEV *Op) const {
14514 return SCEVExprContains(S, [&](const SCEV *Expr) { return Expr == Op; });
14515}
14516
14517void ScalarEvolution::forgetBackedgeTakenCounts(const Loop *L,
14518 bool Predicated) {
14519 auto &BECounts =
14520 Predicated ? PredicatedBackedgeTakenCounts : BackedgeTakenCounts;
14521 auto It = BECounts.find(L);
14522 if (It != BECounts.end()) {
14523 for (const ExitNotTakenInfo &ENT : It->second.ExitNotTaken) {
14524 for (const SCEV *S : {ENT.ExactNotTaken, ENT.SymbolicMaxNotTaken}) {
14525 if (!isa<SCEVConstant>(S)) {
14526 auto UserIt = BECountUsers.find(S);
14527 assert(UserIt != BECountUsers.end());
14528 UserIt->second.erase({L, Predicated});
14529 }
14530 }
14531 }
14532 BECounts.erase(It);
14533 }
14534}
14535
14536void ScalarEvolution::forgetMemoizedResults(ArrayRef<SCEVUse> SCEVs) {
14537 SmallPtrSet<const SCEV *, 8> ToForget(llvm::from_range, SCEVs);
14538 SmallVector<SCEVUse, 8> Worklist(ToForget.begin(), ToForget.end());
14539
14540 while (!Worklist.empty()) {
14541 const SCEV *Curr = Worklist.pop_back_val();
14542 auto Users = SCEVUsers.find(Curr);
14543 if (Users != SCEVUsers.end())
14544 for (const auto *User : Users->second)
14545 if (ToForget.insert(User).second)
14546 Worklist.push_back(User);
14547 }
14548
14549 for (const auto *S : ToForget)
14550 forgetMemoizedResultsImpl(S);
14551
14552 for (auto I = PredicatedSCEVRewrites.begin();
14553 I != PredicatedSCEVRewrites.end();) {
14554 std::pair<const SCEV *, const Loop *> Entry = I->first;
14555 if (ToForget.count(Entry.first))
14556 PredicatedSCEVRewrites.erase(I++);
14557 else
14558 ++I;
14559 }
14560}
14561
14562void ScalarEvolution::forgetMemoizedResultsImpl(const SCEV *S) {
14563 LoopDispositions.erase(S);
14564 BlockDispositions.erase(S);
14565 UnsignedRanges.erase(S);
14566 SignedRanges.erase(S);
14567 HasRecMap.erase(S);
14568 ConstantMultipleCache.erase(S);
14569
14570 if (auto *AR = dyn_cast<SCEVAddRecExpr>(S)) {
14571 UnsignedWrapViaInductionTried.erase(AR);
14572 SignedWrapViaInductionTried.erase(AR);
14573 }
14574
14575 auto ExprIt = ExprValueMap.find(S);
14576 if (ExprIt != ExprValueMap.end()) {
14577 for (Value *V : ExprIt->second) {
14578 auto ValueIt = ValueExprMap.find_as(V);
14579 if (ValueIt != ValueExprMap.end())
14580 ValueExprMap.erase(ValueIt);
14581 }
14582 ExprValueMap.erase(ExprIt);
14583 }
14584
14585 auto ScopeIt = ValuesAtScopes.find(S);
14586 if (ScopeIt != ValuesAtScopes.end()) {
14587 for (const auto &Pair : ScopeIt->second)
14588 if (!isa_and_nonnull<SCEVConstant>(Pair.second))
14589 llvm::erase(ValuesAtScopesUsers[Pair.second],
14590 std::make_pair(Pair.first, S));
14591 ValuesAtScopes.erase(ScopeIt);
14592 }
14593
14594 auto ScopeUserIt = ValuesAtScopesUsers.find(S);
14595 if (ScopeUserIt != ValuesAtScopesUsers.end()) {
14596 for (const auto &Pair : ScopeUserIt->second)
14597 llvm::erase(ValuesAtScopes[Pair.second], std::make_pair(Pair.first, S));
14598 ValuesAtScopesUsers.erase(ScopeUserIt);
14599 }
14600
14601 auto BEUsersIt = BECountUsers.find(S);
14602 if (BEUsersIt != BECountUsers.end()) {
14603 // Work on a copy, as forgetBackedgeTakenCounts() will modify the original.
14604 auto Copy = BEUsersIt->second;
14605 for (const auto &Pair : Copy)
14606 forgetBackedgeTakenCounts(Pair.getPointer(), Pair.getInt());
14607 BECountUsers.erase(BEUsersIt);
14608 }
14609
14610 auto FoldUser = FoldCacheUser.find(S);
14611 if (FoldUser != FoldCacheUser.end())
14612 for (auto &KV : FoldUser->second)
14613 FoldCache.erase(KV);
14614 FoldCacheUser.erase(S);
14615}
14616
14617void
14618ScalarEvolution::getUsedLoops(const SCEV *S,
14619 SmallPtrSetImpl<const Loop *> &LoopsUsed) {
14620 struct FindUsedLoops {
14621 FindUsedLoops(SmallPtrSetImpl<const Loop *> &LoopsUsed)
14622 : LoopsUsed(LoopsUsed) {}
14623 SmallPtrSetImpl<const Loop *> &LoopsUsed;
14624 bool follow(const SCEV *S) {
14625 if (auto *AR = dyn_cast<SCEVAddRecExpr>(S))
14626 LoopsUsed.insert(AR->getLoop());
14627 return true;
14628 }
14629
14630 bool isDone() const { return false; }
14631 };
14632
14633 FindUsedLoops F(LoopsUsed);
14634 SCEVTraversal<FindUsedLoops>(F).visitAll(S);
14635}
14636
14637void ScalarEvolution::getReachableBlocks(
14640 Worklist.push_back(&F.getEntryBlock());
14641 while (!Worklist.empty()) {
14642 BasicBlock *BB = Worklist.pop_back_val();
14643 if (!Reachable.insert(BB).second)
14644 continue;
14645
14646 Value *Cond;
14647 BasicBlock *TrueBB, *FalseBB;
14648 if (match(BB->getTerminator(), m_Br(m_Value(Cond), m_BasicBlock(TrueBB),
14649 m_BasicBlock(FalseBB)))) {
14650 if (auto *C = dyn_cast<ConstantInt>(Cond)) {
14651 Worklist.push_back(C->isOne() ? TrueBB : FalseBB);
14652 continue;
14653 }
14654
14655 if (auto *Cmp = dyn_cast<ICmpInst>(Cond)) {
14656 const SCEV *L = getSCEV(Cmp->getOperand(0));
14657 const SCEV *R = getSCEV(Cmp->getOperand(1));
14658 if (isKnownPredicateViaConstantRanges(Cmp->getCmpPredicate(), L, R)) {
14659 Worklist.push_back(TrueBB);
14660 continue;
14661 }
14662 if (isKnownPredicateViaConstantRanges(Cmp->getInverseCmpPredicate(), L,
14663 R)) {
14664 Worklist.push_back(FalseBB);
14665 continue;
14666 }
14667 }
14668 }
14669
14670 append_range(Worklist, successors(BB));
14671 }
14672}
14673
14675 ScalarEvolution &SE = *const_cast<ScalarEvolution *>(this);
14676 ScalarEvolution SE2(F, TLI, AC, DT, LI);
14677
14678 SmallVector<Loop *, 8> LoopStack(LI.begin(), LI.end());
14679
14680 // Map's SCEV expressions from one ScalarEvolution "universe" to another.
14681 struct SCEVMapper : public SCEVRewriteVisitor<SCEVMapper> {
14682 SCEVMapper(ScalarEvolution &SE) : SCEVRewriteVisitor<SCEVMapper>(SE) {}
14683
14684 const SCEV *visitConstant(const SCEVConstant *Constant) {
14685 return SE.getConstant(Constant->getAPInt());
14686 }
14687
14688 const SCEV *visitUnknown(const SCEVUnknown *Expr) {
14689 return SE.getUnknown(Expr->getValue());
14690 }
14691
14692 const SCEV *visitCouldNotCompute(const SCEVCouldNotCompute *Expr) {
14693 return SE.getCouldNotCompute();
14694 }
14695 };
14696
14697 SCEVMapper SCM(SE2);
14698 SmallPtrSet<BasicBlock *, 16> ReachableBlocks;
14699 SE2.getReachableBlocks(ReachableBlocks, F);
14700
14701 auto GetDelta = [&](const SCEV *Old, const SCEV *New) -> const SCEV * {
14702 if (containsUndefs(Old) || containsUndefs(New)) {
14703 // SCEV treats "undef" as an unknown but consistent value (i.e. it does
14704 // not propagate undef aggressively). This means we can (and do) fail
14705 // verification in cases where a transform makes a value go from "undef"
14706 // to "undef+1" (say). The transform is fine, since in both cases the
14707 // result is "undef", but SCEV thinks the value increased by 1.
14708 return nullptr;
14709 }
14710
14711 // Unless VerifySCEVStrict is set, we only compare constant deltas.
14712 const SCEV *Delta = SE2.getMinusSCEV(Old, New);
14713 if (!VerifySCEVStrict && !isa<SCEVConstant>(Delta))
14714 return nullptr;
14715
14716 return Delta;
14717 };
14718
14719 while (!LoopStack.empty()) {
14720 auto *L = LoopStack.pop_back_val();
14721 llvm::append_range(LoopStack, *L);
14722
14723 // Only verify BECounts in reachable loops. For an unreachable loop,
14724 // any BECount is legal.
14725 if (!ReachableBlocks.contains(L->getHeader()))
14726 continue;
14727
14728 // Only verify cached BECounts. Computing new BECounts may change the
14729 // results of subsequent SCEV uses.
14730 auto It = BackedgeTakenCounts.find(L);
14731 if (It == BackedgeTakenCounts.end())
14732 continue;
14733
14734 auto *CurBECount =
14735 SCM.visit(It->second.getExact(L, const_cast<ScalarEvolution *>(this)));
14736 auto *NewBECount = SE2.getBackedgeTakenCount(L);
14737
14738 if (CurBECount == SE2.getCouldNotCompute() ||
14739 NewBECount == SE2.getCouldNotCompute()) {
14740 // NB! This situation is legal, but is very suspicious -- whatever pass
14741 // change the loop to make a trip count go from could not compute to
14742 // computable or vice-versa *should have* invalidated SCEV. However, we
14743 // choose not to assert here (for now) since we don't want false
14744 // positives.
14745 continue;
14746 }
14747
14748 if (SE.getTypeSizeInBits(CurBECount->getType()) >
14749 SE.getTypeSizeInBits(NewBECount->getType()))
14750 NewBECount = SE2.getZeroExtendExpr(NewBECount, CurBECount->getType());
14751 else if (SE.getTypeSizeInBits(CurBECount->getType()) <
14752 SE.getTypeSizeInBits(NewBECount->getType()))
14753 CurBECount = SE2.getZeroExtendExpr(CurBECount, NewBECount->getType());
14754
14755 const SCEV *Delta = GetDelta(CurBECount, NewBECount);
14756 if (Delta && !Delta->isZero()) {
14757 dbgs() << "Trip Count for " << *L << " Changed!\n";
14758 dbgs() << "Old: " << *CurBECount << "\n";
14759 dbgs() << "New: " << *NewBECount << "\n";
14760 dbgs() << "Delta: " << *Delta << "\n";
14761 std::abort();
14762 }
14763 }
14764
14765 // Collect all valid loops currently in LoopInfo.
14766 SmallPtrSet<Loop *, 32> ValidLoops;
14767 SmallVector<Loop *, 32> Worklist(LI.begin(), LI.end());
14768 while (!Worklist.empty()) {
14769 Loop *L = Worklist.pop_back_val();
14770 if (ValidLoops.insert(L).second)
14771 Worklist.append(L->begin(), L->end());
14772 }
14773 for (const auto &KV : ValueExprMap) {
14774#ifndef NDEBUG
14775 // Check for SCEV expressions referencing invalid/deleted loops.
14776 if (auto *AR = dyn_cast<SCEVAddRecExpr>(KV.second)) {
14777 assert(ValidLoops.contains(AR->getLoop()) &&
14778 "AddRec references invalid loop");
14779 }
14780#endif
14781
14782 // Check that the value is also part of the reverse map.
14783 auto It = ExprValueMap.find(KV.second);
14784 if (It == ExprValueMap.end() || !It->second.contains(KV.first)) {
14785 dbgs() << "Value " << *KV.first
14786 << " is in ValueExprMap but not in ExprValueMap\n";
14787 std::abort();
14788 }
14789
14790 if (auto *I = dyn_cast<Instruction>(&*KV.first)) {
14791 if (!ReachableBlocks.contains(I->getParent()))
14792 continue;
14793 const SCEV *OldSCEV = SCM.visit(KV.second);
14794 const SCEV *NewSCEV = SE2.getSCEV(I);
14795 const SCEV *Delta = GetDelta(OldSCEV, NewSCEV);
14796 if (Delta && !Delta->isZero()) {
14797 dbgs() << "SCEV for value " << *I << " changed!\n"
14798 << "Old: " << *OldSCEV << "\n"
14799 << "New: " << *NewSCEV << "\n"
14800 << "Delta: " << *Delta << "\n";
14801 std::abort();
14802 }
14803 }
14804 }
14805
14806 for (const auto &KV : ExprValueMap) {
14807 for (Value *V : KV.second) {
14808 const SCEV *S = ValueExprMap.lookup(V);
14809 if (!S) {
14810 dbgs() << "Value " << *V
14811 << " is in ExprValueMap but not in ValueExprMap\n";
14812 std::abort();
14813 }
14814 if (S != KV.first) {
14815 dbgs() << "Value " << *V << " mapped to " << *S << " rather than "
14816 << *KV.first << "\n";
14817 std::abort();
14818 }
14819 }
14820 }
14821
14822 // Verify integrity of SCEV users.
14823 for (const auto &S : UniqueSCEVs) {
14824 for (SCEVUse Op : S.operands()) {
14825 // We do not store dependencies of constants.
14826 if (isa<SCEVConstant>(Op))
14827 continue;
14828 auto It = SCEVUsers.find(Op);
14829 if (It != SCEVUsers.end() && It->second.count(&S))
14830 continue;
14831 dbgs() << "Use of operand " << *Op << " by user " << S
14832 << " is not being tracked!\n";
14833 std::abort();
14834 }
14835 }
14836
14837 // Verify integrity of ValuesAtScopes users.
14838 for (const auto &ValueAndVec : ValuesAtScopes) {
14839 const SCEV *Value = ValueAndVec.first;
14840 for (const auto &LoopAndValueAtScope : ValueAndVec.second) {
14841 const Loop *L = LoopAndValueAtScope.first;
14842 const SCEV *ValueAtScope = LoopAndValueAtScope.second;
14843 if (!isa<SCEVConstant>(ValueAtScope)) {
14844 auto It = ValuesAtScopesUsers.find(ValueAtScope);
14845 if (It != ValuesAtScopesUsers.end() &&
14846 is_contained(It->second, std::make_pair(L, Value)))
14847 continue;
14848 dbgs() << "Value: " << *Value << ", Loop: " << *L << ", ValueAtScope: "
14849 << *ValueAtScope << " missing in ValuesAtScopesUsers\n";
14850 std::abort();
14851 }
14852 }
14853 }
14854
14855 for (const auto &ValueAtScopeAndVec : ValuesAtScopesUsers) {
14856 const SCEV *ValueAtScope = ValueAtScopeAndVec.first;
14857 for (const auto &LoopAndValue : ValueAtScopeAndVec.second) {
14858 const Loop *L = LoopAndValue.first;
14859 const SCEV *Value = LoopAndValue.second;
14861 auto It = ValuesAtScopes.find(Value);
14862 if (It != ValuesAtScopes.end() &&
14863 is_contained(It->second, std::make_pair(L, ValueAtScope)))
14864 continue;
14865 dbgs() << "Value: " << *Value << ", Loop: " << *L << ", ValueAtScope: "
14866 << *ValueAtScope << " missing in ValuesAtScopes\n";
14867 std::abort();
14868 }
14869 }
14870
14871 // Verify integrity of BECountUsers.
14872 auto VerifyBECountUsers = [&](bool Predicated) {
14873 auto &BECounts =
14874 Predicated ? PredicatedBackedgeTakenCounts : BackedgeTakenCounts;
14875 for (const auto &LoopAndBEInfo : BECounts) {
14876 for (const ExitNotTakenInfo &ENT : LoopAndBEInfo.second.ExitNotTaken) {
14877 for (const SCEV *S : {ENT.ExactNotTaken, ENT.SymbolicMaxNotTaken}) {
14878 if (!isa<SCEVConstant>(S)) {
14879 auto UserIt = BECountUsers.find(S);
14880 if (UserIt != BECountUsers.end() &&
14881 UserIt->second.contains({ LoopAndBEInfo.first, Predicated }))
14882 continue;
14883 dbgs() << "Value " << *S << " for loop " << *LoopAndBEInfo.first
14884 << " missing from BECountUsers\n";
14885 std::abort();
14886 }
14887 }
14888 }
14889 }
14890 };
14891 VerifyBECountUsers(/* Predicated */ false);
14892 VerifyBECountUsers(/* Predicated */ true);
14893
14894 // Verify intergity of loop disposition cache.
14895 for (auto &[S, Values] : LoopDispositions) {
14896 for (auto [Loop, CachedDisposition] : Values) {
14897 const auto RecomputedDisposition = SE2.getLoopDisposition(S, Loop);
14898 if (CachedDisposition != RecomputedDisposition) {
14899 dbgs() << "Cached disposition of " << *S << " for loop " << *Loop
14900 << " is incorrect: cached " << CachedDisposition << ", actual "
14901 << RecomputedDisposition << "\n";
14902 std::abort();
14903 }
14904 }
14905 }
14906
14907 // Verify integrity of the block disposition cache.
14908 for (auto &[S, Values] : BlockDispositions) {
14909 for (auto [BB, CachedDisposition] : Values) {
14910 const auto RecomputedDisposition = SE2.getBlockDisposition(S, BB);
14911 if (CachedDisposition != RecomputedDisposition) {
14912 dbgs() << "Cached disposition of " << *S << " for block %"
14913 << BB->getName() << " is incorrect: cached " << CachedDisposition
14914 << ", actual " << RecomputedDisposition << "\n";
14915 std::abort();
14916 }
14917 }
14918 }
14919
14920 // Verify FoldCache/FoldCacheUser caches.
14921 for (auto [FoldID, Expr] : FoldCache) {
14922 auto I = FoldCacheUser.find(Expr);
14923 if (I == FoldCacheUser.end()) {
14924 dbgs() << "Missing entry in FoldCacheUser for cached expression " << *Expr
14925 << "!\n";
14926 std::abort();
14927 }
14928 if (!is_contained(I->second, FoldID)) {
14929 dbgs() << "Missing FoldID in cached users of " << *Expr << "!\n";
14930 std::abort();
14931 }
14932 }
14933 for (auto [Expr, IDs] : FoldCacheUser) {
14934 for (auto &FoldID : IDs) {
14935 const SCEV *S = FoldCache.lookup(FoldID);
14936 if (!S) {
14937 dbgs() << "Missing entry in FoldCache for expression " << *Expr
14938 << "!\n";
14939 std::abort();
14940 }
14941 if (S != Expr) {
14942 dbgs() << "Entry in FoldCache doesn't match FoldCacheUser: " << *S
14943 << " != " << *Expr << "!\n";
14944 std::abort();
14945 }
14946 }
14947 }
14948
14949 // Verify that ConstantMultipleCache computations are correct. We check that
14950 // cached multiples and recomputed multiples are multiples of each other to
14951 // verify correctness. It is possible that a recomputed multiple is different
14952 // from the cached multiple due to strengthened no wrap flags or changes in
14953 // KnownBits computations.
14954 for (auto [S, Multiple] : ConstantMultipleCache) {
14955 APInt RecomputedMultiple = SE2.getConstantMultiple(S);
14956 if ((Multiple != 0 && RecomputedMultiple != 0 &&
14957 Multiple.urem(RecomputedMultiple) != 0 &&
14958 RecomputedMultiple.urem(Multiple) != 0)) {
14959 dbgs() << "Incorrect cached computation in ConstantMultipleCache for "
14960 << *S << " : Computed " << RecomputedMultiple
14961 << " but cache contains " << Multiple << "!\n";
14962 std::abort();
14963 }
14964 }
14965}
14966
14968 Function &F, const PreservedAnalyses &PA,
14969 FunctionAnalysisManager::Invalidator &Inv) {
14970 // Invalidate the ScalarEvolution object whenever it isn't preserved or one
14971 // of its dependencies is invalidated.
14972 auto PAC = PA.getChecker<ScalarEvolutionAnalysis>();
14973 return !(PAC.preserved() || PAC.preservedSet<AllAnalysesOn<Function>>()) ||
14974 Inv.invalidate<AssumptionAnalysis>(F, PA) ||
14975 Inv.invalidate<DominatorTreeAnalysis>(F, PA) ||
14976 Inv.invalidate<LoopAnalysis>(F, PA);
14977}
14978
14979AnalysisKey ScalarEvolutionAnalysis::Key;
14980
14983 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
14984 auto &AC = AM.getResult<AssumptionAnalysis>(F);
14985 auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
14986 auto &LI = AM.getResult<LoopAnalysis>(F);
14987 return ScalarEvolution(F, TLI, AC, DT, LI);
14988}
14989
14995
14998 // For compatibility with opt's -analyze feature under legacy pass manager
14999 // which was not ported to NPM. This keeps tests using
15000 // update_analyze_test_checks.py working.
15001 OS << "Printing analysis 'Scalar Evolution Analysis' for function '"
15002 << F.getName() << "':\n";
15004 return PreservedAnalyses::all();
15005}
15006
15008 "Scalar Evolution Analysis", false, true)
15014 "Scalar Evolution Analysis", false, true)
15015
15017
15019
15021 SE.reset(new ScalarEvolution(
15023 getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F),
15025 getAnalysis<LoopInfoWrapperPass>().getLoopInfo()));
15026 return false;
15027}
15028
15030
15032 SE->print(OS);
15033}
15034
15036 if (!VerifySCEV)
15037 return;
15038
15039 SE->verify();
15040}
15041
15049
15051 const SCEV *RHS) {
15052 return getComparePredicate(ICmpInst::ICMP_EQ, LHS, RHS);
15053}
15054
15055const SCEVPredicate *
15057 const SCEV *LHS, const SCEV *RHS) {
15059 assert(LHS->getType() == RHS->getType() &&
15060 "Type mismatch between LHS and RHS");
15061 // Unique this node based on the arguments
15062 ID.AddInteger(SCEVPredicate::P_Compare);
15063 ID.AddInteger(Pred);
15064 ID.AddPointer(LHS);
15065 ID.AddPointer(RHS);
15066 void *IP = nullptr;
15067 if (const auto *S = UniquePreds.FindNodeOrInsertPos(ID, IP))
15068 return S;
15069 SCEVComparePredicate *Eq = new (SCEVAllocator)
15070 SCEVComparePredicate(ID.Intern(SCEVAllocator), Pred, LHS, RHS);
15071 UniquePreds.InsertNode(Eq, IP);
15072 return Eq;
15073}
15074
15076 const SCEVAddRecExpr *AR,
15079 // Unique this node based on the arguments
15080 ID.AddInteger(SCEVPredicate::P_Wrap);
15081 ID.AddPointer(AR);
15082 ID.AddInteger(AddedFlags);
15083 void *IP = nullptr;
15084 if (const auto *S = UniquePreds.FindNodeOrInsertPos(ID, IP))
15085 return S;
15086 auto *OF = new (SCEVAllocator)
15087 SCEVWrapPredicate(ID.Intern(SCEVAllocator), AR, AddedFlags);
15088 UniquePreds.InsertNode(OF, IP);
15089 return OF;
15090}
15091
15092namespace {
15093
15094class SCEVPredicateRewriter : public SCEVRewriteVisitor<SCEVPredicateRewriter> {
15095public:
15096
15097 /// Rewrites \p S in the context of a loop L and the SCEV predication
15098 /// infrastructure.
15099 ///
15100 /// If \p Pred is non-null, the SCEV expression is rewritten to respect the
15101 /// equivalences present in \p Pred.
15102 ///
15103 /// If \p NewPreds is non-null, rewrite is free to add further predicates to
15104 /// \p NewPreds such that the result will be an AddRecExpr.
15105 static const SCEV *rewrite(const SCEV *S, const Loop *L, ScalarEvolution &SE,
15107 const SCEVPredicate *Pred) {
15108 SCEVPredicateRewriter Rewriter(L, SE, NewPreds, Pred);
15109 return Rewriter.visit(S);
15110 }
15111
15112 const SCEV *visitUnknown(const SCEVUnknown *Expr) {
15113 if (Pred) {
15114 if (auto *U = dyn_cast<SCEVUnionPredicate>(Pred)) {
15115 for (const auto *Pred : U->getPredicates())
15116 if (const auto *IPred = dyn_cast<SCEVComparePredicate>(Pred))
15117 if (IPred->getLHS() == Expr &&
15118 IPred->getPredicate() == ICmpInst::ICMP_EQ)
15119 return IPred->getRHS();
15120 } else if (const auto *IPred = dyn_cast<SCEVComparePredicate>(Pred)) {
15121 if (IPred->getLHS() == Expr &&
15122 IPred->getPredicate() == ICmpInst::ICMP_EQ)
15123 return IPred->getRHS();
15124 }
15125 }
15126 return convertToAddRecWithPreds(Expr);
15127 }
15128
15129 const SCEV *visitZeroExtendExpr(const SCEVZeroExtendExpr *Expr) {
15130 const SCEV *Operand = visit(Expr->getOperand());
15131 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Operand);
15132 if (AR && AR->getLoop() == L && AR->isAffine()) {
15133 // This couldn't be folded because the operand didn't have the nuw
15134 // flag. Add the nusw flag as an assumption that we could make.
15135 const SCEV *Step = AR->getStepRecurrence(SE);
15136 Type *Ty = Expr->getType();
15137 if (addOverflowAssumption(AR, SCEVWrapPredicate::IncrementNUSW))
15138 return SE.getAddRecExpr(SE.getZeroExtendExpr(AR->getStart(), Ty),
15139 SE.getSignExtendExpr(Step, Ty), L,
15140 AR->getNoWrapFlags());
15141 }
15142 return SE.getZeroExtendExpr(Operand, Expr->getType());
15143 }
15144
15145 const SCEV *visitSignExtendExpr(const SCEVSignExtendExpr *Expr) {
15146 const SCEV *Operand = visit(Expr->getOperand());
15147 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Operand);
15148 if (AR && AR->getLoop() == L && AR->isAffine()) {
15149 // This couldn't be folded because the operand didn't have the nsw
15150 // flag. Add the nssw flag as an assumption that we could make.
15151 const SCEV *Step = AR->getStepRecurrence(SE);
15152 Type *Ty = Expr->getType();
15153 if (addOverflowAssumption(AR, SCEVWrapPredicate::IncrementNSSW))
15154 return SE.getAddRecExpr(SE.getSignExtendExpr(AR->getStart(), Ty),
15155 SE.getSignExtendExpr(Step, Ty), L,
15156 AR->getNoWrapFlags());
15157 }
15158 return SE.getSignExtendExpr(Operand, Expr->getType());
15159 }
15160
15161private:
15162 explicit SCEVPredicateRewriter(
15163 const Loop *L, ScalarEvolution &SE,
15164 SmallVectorImpl<const SCEVPredicate *> *NewPreds,
15165 const SCEVPredicate *Pred)
15166 : SCEVRewriteVisitor(SE), NewPreds(NewPreds), Pred(Pred), L(L) {}
15167
15168 bool addOverflowAssumption(const SCEVPredicate *P) {
15169 if (!NewPreds) {
15170 // Check if we've already made this assumption.
15171 return Pred && Pred->implies(P, SE);
15172 }
15173 NewPreds->push_back(P);
15174 return true;
15175 }
15176
15177 bool addOverflowAssumption(const SCEVAddRecExpr *AR,
15179 auto *A = SE.getWrapPredicate(AR, AddedFlags);
15180 return addOverflowAssumption(A);
15181 }
15182
15183 // If \p Expr represents a PHINode, we try to see if it can be represented
15184 // as an AddRec, possibly under a predicate (PHISCEVPred). If it is possible
15185 // to add this predicate as a runtime overflow check, we return the AddRec.
15186 // If \p Expr does not meet these conditions (is not a PHI node, or we
15187 // couldn't create an AddRec for it, or couldn't add the predicate), we just
15188 // return \p Expr.
15189 const SCEV *convertToAddRecWithPreds(const SCEVUnknown *Expr) {
15190 if (!isa<PHINode>(Expr->getValue()))
15191 return Expr;
15192 std::optional<
15193 std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>>
15194 PredicatedRewrite = SE.createAddRecFromPHIWithCasts(Expr);
15195 if (!PredicatedRewrite)
15196 return Expr;
15197 for (const auto *P : PredicatedRewrite->second){
15198 // Wrap predicates from outer loops are not supported.
15199 if (auto *WP = dyn_cast<const SCEVWrapPredicate>(P)) {
15200 if (L != WP->getExpr()->getLoop())
15201 return Expr;
15202 }
15203 if (!addOverflowAssumption(P))
15204 return Expr;
15205 }
15206 return PredicatedRewrite->first;
15207 }
15208
15209 SmallVectorImpl<const SCEVPredicate *> *NewPreds;
15210 const SCEVPredicate *Pred;
15211 const Loop *L;
15212};
15213
15214} // end anonymous namespace
15215
15216const SCEV *
15218 const SCEVPredicate &Preds) {
15219 return SCEVPredicateRewriter::rewrite(S, L, *this, nullptr, &Preds);
15220}
15221
15223 const SCEV *S, const Loop *L,
15226 S = SCEVPredicateRewriter::rewrite(S, L, *this, &TransformPreds, nullptr);
15227 auto *AddRec = dyn_cast<SCEVAddRecExpr>(S);
15228
15229 if (!AddRec)
15230 return nullptr;
15231
15232 // Check if any of the transformed predicates is known to be false. In that
15233 // case, it doesn't make sense to convert to a predicated AddRec, as the
15234 // versioned loop will never execute.
15235 for (const SCEVPredicate *Pred : TransformPreds) {
15236 auto *WrapPred = dyn_cast<SCEVWrapPredicate>(Pred);
15237 if (!WrapPred || WrapPred->getFlags() != SCEVWrapPredicate::IncrementNSSW)
15238 continue;
15239
15240 const SCEVAddRecExpr *AddRecToCheck = WrapPred->getExpr();
15241 const SCEV *ExitCount = getBackedgeTakenCount(AddRecToCheck->getLoop());
15242 if (isa<SCEVCouldNotCompute>(ExitCount))
15243 continue;
15244
15245 const SCEV *Step = AddRecToCheck->getStepRecurrence(*this);
15246 if (!Step->isOne())
15247 continue;
15248
15249 ExitCount = getTruncateOrSignExtend(ExitCount, Step->getType());
15250 const SCEV *Add = getAddExpr(AddRecToCheck->getStart(), ExitCount);
15251 if (isKnownPredicate(CmpInst::ICMP_SLT, Add, AddRecToCheck->getStart()))
15252 return nullptr;
15253 }
15254
15255 // Since the transformation was successful, we can now transfer the SCEV
15256 // predicates.
15257 Preds.append(TransformPreds.begin(), TransformPreds.end());
15258
15259 return AddRec;
15260}
15261
15262/// SCEV predicates
15266
15268 const ICmpInst::Predicate Pred,
15269 const SCEV *LHS, const SCEV *RHS)
15270 : SCEVPredicate(ID, P_Compare), Pred(Pred), LHS(LHS), RHS(RHS) {
15271 assert(LHS->getType() == RHS->getType() && "LHS and RHS types don't match");
15272 assert(LHS != RHS && "LHS and RHS are the same SCEV");
15273}
15274
15276 ScalarEvolution &SE) const {
15277 const auto *Op = dyn_cast<SCEVComparePredicate>(N);
15278
15279 if (!Op)
15280 return false;
15281
15282 if (Pred != ICmpInst::ICMP_EQ)
15283 return false;
15284
15285 return Op->LHS == LHS && Op->RHS == RHS;
15286}
15287
15288bool SCEVComparePredicate::isAlwaysTrue() const { return false; }
15289
15291 if (Pred == ICmpInst::ICMP_EQ)
15292 OS.indent(Depth) << "Equal predicate: " << *LHS << " == " << *RHS << "\n";
15293 else
15294 OS.indent(Depth) << "Compare predicate: " << *LHS << " " << Pred << ") "
15295 << *RHS << "\n";
15296
15297}
15298
15300 const SCEVAddRecExpr *AR,
15301 IncrementWrapFlags Flags)
15302 : SCEVPredicate(ID, P_Wrap), AR(AR), Flags(Flags) {}
15303
15304const SCEVAddRecExpr *SCEVWrapPredicate::getExpr() const { return AR; }
15305
15307 ScalarEvolution &SE) const {
15308 const auto *Op = dyn_cast<SCEVWrapPredicate>(N);
15309 if (!Op || setFlags(Flags, Op->Flags) != Flags)
15310 return false;
15311
15312 if (Op->AR == AR)
15313 return true;
15314
15315 if (Flags != SCEVWrapPredicate::IncrementNSSW &&
15317 return false;
15318
15319 const SCEV *Start = AR->getStart();
15320 const SCEV *OpStart = Op->AR->getStart();
15321 if (Start->getType()->isPointerTy() != OpStart->getType()->isPointerTy())
15322 return false;
15323
15324 // Reject pointers to different address spaces.
15325 if (Start->getType()->isPointerTy() && Start->getType() != OpStart->getType())
15326 return false;
15327
15328 // NUSW/NSSW on a wider-type AddRec does not imply the same on a
15329 // narrower-type AddRec.
15330 if (SE.getTypeSizeInBits(AR->getType()) >
15331 SE.getTypeSizeInBits(Op->AR->getType()))
15332 return false;
15333
15334 const SCEV *Step = AR->getStepRecurrence(SE);
15335 const SCEV *OpStep = Op->AR->getStepRecurrence(SE);
15336 if (!SE.isKnownPositive(Step) || !SE.isKnownPositive(OpStep))
15337 return false;
15338
15339 // If both steps are positive, this implies N, if N's start and step are
15340 // ULE/SLE (for NSUW/NSSW) than this'.
15341 Type *WiderTy = SE.getWiderType(Step->getType(), OpStep->getType());
15342 Step = SE.getNoopOrZeroExtend(Step, WiderTy);
15343 OpStep = SE.getNoopOrZeroExtend(OpStep, WiderTy);
15344
15345 bool IsNUW = Flags == SCEVWrapPredicate::IncrementNUSW;
15346 OpStart = IsNUW ? SE.getNoopOrZeroExtend(OpStart, WiderTy)
15347 : SE.getNoopOrSignExtend(OpStart, WiderTy);
15348 Start = IsNUW ? SE.getNoopOrZeroExtend(Start, WiderTy)
15349 : SE.getNoopOrSignExtend(Start, WiderTy);
15351 return SE.isKnownPredicate(Pred, OpStep, Step) &&
15352 SE.isKnownPredicate(Pred, OpStart, Start);
15353}
15354
15356 SCEV::NoWrapFlags ScevFlags = AR->getNoWrapFlags();
15357 IncrementWrapFlags IFlags = Flags;
15358
15359 if (ScalarEvolution::setFlags(ScevFlags, SCEV::FlagNSW) == ScevFlags)
15360 IFlags = clearFlags(IFlags, IncrementNSSW);
15361
15362 return IFlags == IncrementAnyWrap;
15363}
15364
15365void SCEVWrapPredicate::print(raw_ostream &OS, unsigned Depth) const {
15366 OS.indent(Depth) << *getExpr() << " Added Flags: ";
15368 OS << "<nusw>";
15370 OS << "<nssw>";
15371 OS << "\n";
15372}
15373
15376 ScalarEvolution &SE) {
15377 IncrementWrapFlags ImpliedFlags = IncrementAnyWrap;
15378 SCEV::NoWrapFlags StaticFlags = AR->getNoWrapFlags();
15379
15380 // We can safely transfer the NSW flag as NSSW.
15381 if (ScalarEvolution::setFlags(StaticFlags, SCEV::FlagNSW) == StaticFlags)
15382 ImpliedFlags = IncrementNSSW;
15383
15384 if (ScalarEvolution::setFlags(StaticFlags, SCEV::FlagNUW) == StaticFlags) {
15385 // If the increment is positive, the SCEV NUW flag will also imply the
15386 // WrapPredicate NUSW flag.
15387 if (const auto *Step = dyn_cast<SCEVConstant>(AR->getStepRecurrence(SE)))
15388 if (Step->getValue()->getValue().isNonNegative())
15389 ImpliedFlags = setFlags(ImpliedFlags, IncrementNUSW);
15390 }
15391
15392 return ImpliedFlags;
15393}
15394
15395/// Union predicates don't get cached so create a dummy set ID for it.
15397 ScalarEvolution &SE)
15398 : SCEVPredicate(FoldingSetNodeIDRef(nullptr, 0), P_Union) {
15399 for (const auto *P : Preds)
15400 add(P, SE);
15401}
15402
15404 return all_of(Preds,
15405 [](const SCEVPredicate *I) { return I->isAlwaysTrue(); });
15406}
15407
15409 ScalarEvolution &SE) const {
15410 if (const auto *Set = dyn_cast<SCEVUnionPredicate>(N))
15411 return all_of(Set->Preds, [this, &SE](const SCEVPredicate *I) {
15412 return this->implies(I, SE);
15413 });
15414
15415 return any_of(Preds,
15416 [N, &SE](const SCEVPredicate *I) { return I->implies(N, SE); });
15417}
15418
15420 for (const auto *Pred : Preds)
15421 Pred->print(OS, Depth);
15422}
15423
15424void SCEVUnionPredicate::add(const SCEVPredicate *N, ScalarEvolution &SE) {
15425 if (const auto *Set = dyn_cast<SCEVUnionPredicate>(N)) {
15426 for (const auto *Pred : Set->Preds)
15427 add(Pred, SE);
15428 return;
15429 }
15430
15431 // Implication checks are quadratic in the number of predicates. Stop doing
15432 // them if there are many predicates, as they should be too expensive to use
15433 // anyway at that point.
15434 bool CheckImplies = Preds.size() < 16;
15435
15436 // Only add predicate if it is not already implied by this union predicate.
15437 if (CheckImplies && implies(N, SE))
15438 return;
15439
15440 // Build a new vector containing the current predicates, except the ones that
15441 // are implied by the new predicate N.
15443 for (auto *P : Preds) {
15444 if (CheckImplies && N->implies(P, SE))
15445 continue;
15446 PrunedPreds.push_back(P);
15447 }
15448 Preds = std::move(PrunedPreds);
15449 Preds.push_back(N);
15450}
15451
15453 Loop &L)
15454 : SE(SE), L(L) {
15456 Preds = std::make_unique<SCEVUnionPredicate>(Empty, SE);
15457}
15458
15461 for (const auto *Op : Ops)
15462 // We do not expect that forgetting cached data for SCEVConstants will ever
15463 // open any prospects for sharpening or introduce any correctness issues,
15464 // so we don't bother storing their dependencies.
15465 if (!isa<SCEVConstant>(Op))
15466 SCEVUsers[Op].insert(User);
15467}
15468
15470 for (const SCEV *Op : Ops)
15471 // We do not expect that forgetting cached data for SCEVConstants will ever
15472 // open any prospects for sharpening or introduce any correctness issues,
15473 // so we don't bother storing their dependencies.
15474 if (!isa<SCEVConstant>(Op))
15475 SCEVUsers[Op].insert(User);
15476}
15477
15479 const SCEV *Expr = SE.getSCEV(V);
15480 return getPredicatedSCEV(Expr);
15481}
15482
15484 RewriteEntry &Entry = RewriteMap[Expr];
15485
15486 // If we already have an entry and the version matches, return it.
15487 if (Entry.second && Generation == Entry.first)
15488 return Entry.second;
15489
15490 // We found an entry but it's stale. Rewrite the stale entry
15491 // according to the current predicate.
15492 if (Entry.second)
15493 Expr = Entry.second;
15494
15495 const SCEV *NewSCEV = SE.rewriteUsingPredicate(Expr, &L, *Preds);
15496 Entry = {Generation, NewSCEV};
15497
15498 return NewSCEV;
15499}
15500
15502 if (!BackedgeCount) {
15504 BackedgeCount = SE.getPredicatedBackedgeTakenCount(&L, Preds);
15505 for (const auto *P : Preds)
15506 addPredicate(*P);
15507 }
15508 return BackedgeCount;
15509}
15510
15512 if (!SymbolicMaxBackedgeCount) {
15514 SymbolicMaxBackedgeCount =
15515 SE.getPredicatedSymbolicMaxBackedgeTakenCount(&L, Preds);
15516 for (const auto *P : Preds)
15517 addPredicate(*P);
15518 }
15519 return SymbolicMaxBackedgeCount;
15520}
15521
15523 if (!SmallConstantMaxTripCount) {
15525 SmallConstantMaxTripCount = SE.getSmallConstantMaxTripCount(&L, &Preds);
15526 for (const auto *P : Preds)
15527 addPredicate(*P);
15528 }
15529 return *SmallConstantMaxTripCount;
15530}
15531
15533 if (Preds->implies(&Pred, SE))
15534 return;
15535
15536 SmallVector<const SCEVPredicate *, 4> NewPreds(Preds->getPredicates());
15537 NewPreds.push_back(&Pred);
15538 Preds = std::make_unique<SCEVUnionPredicate>(NewPreds, SE);
15539 updateGeneration();
15540}
15541
15543 return *Preds;
15544}
15545
15546void PredicatedScalarEvolution::updateGeneration() {
15547 // If the generation number wrapped recompute everything.
15548 if (++Generation == 0) {
15549 for (auto &II : RewriteMap) {
15550 const SCEV *Rewritten = II.second.second;
15551 II.second = {Generation, SE.rewriteUsingPredicate(Rewritten, &L, *Preds)};
15552 }
15553 }
15554}
15555
15558 const SCEV *Expr = getSCEV(V);
15559 const auto *AR = cast<SCEVAddRecExpr>(Expr);
15560
15561 auto ImpliedFlags = SCEVWrapPredicate::getImpliedFlags(AR, SE);
15562
15563 // Clear the statically implied flags.
15564 Flags = SCEVWrapPredicate::clearFlags(Flags, ImpliedFlags);
15565 addPredicate(*SE.getWrapPredicate(AR, Flags));
15566
15567 auto II = FlagsMap.insert({V, Flags});
15568 if (!II.second)
15569 II.first->second = SCEVWrapPredicate::setFlags(Flags, II.first->second);
15570}
15571
15574 const SCEV *Expr = getSCEV(V);
15575 const auto *AR = cast<SCEVAddRecExpr>(Expr);
15576
15578 Flags, SCEVWrapPredicate::getImpliedFlags(AR, SE));
15579
15580 auto II = FlagsMap.find(V);
15581
15582 if (II != FlagsMap.end())
15583 Flags = SCEVWrapPredicate::clearFlags(Flags, II->second);
15584
15586}
15587
15589 const SCEV *Expr = this->getSCEV(V);
15591 auto *New = SE.convertSCEVToAddRecWithPredicates(Expr, &L, NewPreds);
15592
15593 if (!New)
15594 return nullptr;
15595
15596 for (const auto *P : NewPreds)
15597 addPredicate(*P);
15598
15599 RewriteMap[SE.getSCEV(V)] = {Generation, New};
15600 return New;
15601}
15602
15605 : RewriteMap(Init.RewriteMap), SE(Init.SE), L(Init.L),
15606 Preds(std::make_unique<SCEVUnionPredicate>(Init.Preds->getPredicates(),
15607 SE)),
15608 Generation(Init.Generation), BackedgeCount(Init.BackedgeCount) {
15609 for (auto I : Init.FlagsMap)
15610 FlagsMap.insert(I);
15611}
15612
15614 // For each block.
15615 for (auto *BB : L.getBlocks())
15616 for (auto &I : *BB) {
15617 if (!SE.isSCEVable(I.getType()))
15618 continue;
15619
15620 auto *Expr = SE.getSCEV(&I);
15621 auto II = RewriteMap.find(Expr);
15622
15623 if (II == RewriteMap.end())
15624 continue;
15625
15626 // Don't print things that are not interesting.
15627 if (II->second.second == Expr)
15628 continue;
15629
15630 OS.indent(Depth) << "[PSE]" << I << ":\n";
15631 OS.indent(Depth + 2) << *Expr << "\n";
15632 OS.indent(Depth + 2) << "--> " << *II->second.second << "\n";
15633 }
15634}
15635
15638 BasicBlock *Header = L->getHeader();
15639 BasicBlock *Pred = L->getLoopPredecessor();
15640 LoopGuards Guards(SE);
15641 if (!Pred)
15642 return Guards;
15644 collectFromBlock(SE, Guards, Header, Pred, VisitedBlocks);
15645 return Guards;
15646}
15647
15648void ScalarEvolution::LoopGuards::collectFromPHI(
15652 unsigned Depth) {
15653 if (!SE.isSCEVable(Phi.getType()))
15654 return;
15655
15656 using MinMaxPattern = std::pair<const SCEVConstant *, SCEVTypes>;
15657 auto GetMinMaxConst = [&](unsigned IncomingIdx) -> MinMaxPattern {
15658 const BasicBlock *InBlock = Phi.getIncomingBlock(IncomingIdx);
15659 if (!VisitedBlocks.insert(InBlock).second)
15660 return {nullptr, scCouldNotCompute};
15661
15662 // Avoid analyzing unreachable blocks so that we don't get trapped
15663 // traversing cycles with ill-formed dominance or infinite cycles
15664 if (!SE.DT.isReachableFromEntry(InBlock))
15665 return {nullptr, scCouldNotCompute};
15666
15667 auto [G, Inserted] = IncomingGuards.try_emplace(InBlock, LoopGuards(SE));
15668 if (Inserted)
15669 collectFromBlock(SE, G->second, Phi.getParent(), InBlock, VisitedBlocks,
15670 Depth + 1);
15671 auto &RewriteMap = G->second.RewriteMap;
15672 if (RewriteMap.empty())
15673 return {nullptr, scCouldNotCompute};
15674 auto S = RewriteMap.find(SE.getSCEV(Phi.getIncomingValue(IncomingIdx)));
15675 if (S == RewriteMap.end())
15676 return {nullptr, scCouldNotCompute};
15677 auto *SM = dyn_cast_if_present<SCEVMinMaxExpr>(S->second);
15678 if (!SM)
15679 return {nullptr, scCouldNotCompute};
15680 if (const SCEVConstant *C0 = dyn_cast<SCEVConstant>(SM->getOperand(0)))
15681 return {C0, SM->getSCEVType()};
15682 return {nullptr, scCouldNotCompute};
15683 };
15684 auto MergeMinMaxConst = [](MinMaxPattern P1,
15685 MinMaxPattern P2) -> MinMaxPattern {
15686 auto [C1, T1] = P1;
15687 auto [C2, T2] = P2;
15688 if (!C1 || !C2 || T1 != T2)
15689 return {nullptr, scCouldNotCompute};
15690 switch (T1) {
15691 case scUMaxExpr:
15692 return {C1->getAPInt().ult(C2->getAPInt()) ? C1 : C2, T1};
15693 case scSMaxExpr:
15694 return {C1->getAPInt().slt(C2->getAPInt()) ? C1 : C2, T1};
15695 case scUMinExpr:
15696 return {C1->getAPInt().ugt(C2->getAPInt()) ? C1 : C2, T1};
15697 case scSMinExpr:
15698 return {C1->getAPInt().sgt(C2->getAPInt()) ? C1 : C2, T1};
15699 default:
15700 llvm_unreachable("Trying to merge non-MinMaxExpr SCEVs.");
15701 }
15702 };
15703 auto P = GetMinMaxConst(0);
15704 for (unsigned int In = 1; In < Phi.getNumIncomingValues(); In++) {
15705 if (!P.first)
15706 break;
15707 P = MergeMinMaxConst(P, GetMinMaxConst(In));
15708 }
15709 if (P.first) {
15710 const SCEV *LHS = SE.getSCEV(const_cast<PHINode *>(&Phi));
15711 SmallVector<SCEVUse, 2> Ops({P.first, LHS});
15712 const SCEV *RHS = SE.getMinMaxExpr(P.second, Ops);
15713 Guards.RewriteMap.insert({LHS, RHS});
15714 }
15715}
15716
15717// Return a new SCEV that modifies \p Expr to the closest number divides by
15718// \p Divisor and less or equal than Expr. For now, only handle constant
15719// Expr.
15721 const APInt &DivisorVal,
15722 ScalarEvolution &SE) {
15723 const APInt *ExprVal;
15724 if (!match(Expr, m_scev_APInt(ExprVal)) || ExprVal->isNegative() ||
15725 DivisorVal.isNonPositive())
15726 return Expr;
15727 APInt Rem = ExprVal->urem(DivisorVal);
15728 // return the SCEV: Expr - Expr % Divisor
15729 return SE.getConstant(*ExprVal - Rem);
15730}
15731
15732// Return a new SCEV that modifies \p Expr to the closest number divides by
15733// \p Divisor and greater or equal than Expr. For now, only handle constant
15734// Expr.
15735static const SCEV *getNextSCEVDivisibleByDivisor(const SCEV *Expr,
15736 const APInt &DivisorVal,
15737 ScalarEvolution &SE) {
15738 const APInt *ExprVal;
15739 if (!match(Expr, m_scev_APInt(ExprVal)) || ExprVal->isNegative() ||
15740 DivisorVal.isNonPositive())
15741 return Expr;
15742 APInt Rem = ExprVal->urem(DivisorVal);
15743 if (Rem.isZero())
15744 return Expr;
15745 // return the SCEV: Expr + Divisor - Expr % Divisor
15746 return SE.getConstant(*ExprVal + DivisorVal - Rem);
15747}
15748
15750 ICmpInst::Predicate Predicate, const SCEV *LHS, const SCEV *RHS,
15753 // If we have LHS == 0, check if LHS is computing a property of some unknown
15754 // SCEV %v which we can rewrite %v to express explicitly.
15756 return false;
15757 // If LHS is A % B, i.e. A % B == 0, rewrite A to (A /u B) * B to
15758 // explicitly express that.
15759 const SCEVUnknown *URemLHS = nullptr;
15760 const SCEV *URemRHS = nullptr;
15761 if (!match(LHS, m_scev_URem(m_SCEVUnknown(URemLHS), m_SCEV(URemRHS), SE)))
15762 return false;
15763
15764 const SCEV *Multiple =
15765 SE.getMulExpr(SE.getUDivExpr(URemLHS, URemRHS), URemRHS);
15766 DivInfo[URemLHS] = Multiple;
15767 if (auto *C = dyn_cast<SCEVConstant>(URemRHS))
15768 Multiples[URemLHS] = C->getAPInt();
15769 return true;
15770}
15771
15772// Check if the condition is a divisibility guard (A % B == 0).
15773static bool isDivisibilityGuard(const SCEV *LHS, const SCEV *RHS,
15774 ScalarEvolution &SE) {
15775 const SCEV *X, *Y;
15776 return match(LHS, m_scev_URem(m_SCEV(X), m_SCEV(Y), SE)) && RHS->isZero();
15777}
15778
15779// Apply divisibility by \p Divisor on MinMaxExpr with constant values,
15780// recursively. This is done by aligning up/down the constant value to the
15781// Divisor.
15782static const SCEV *applyDivisibilityOnMinMaxExpr(const SCEV *MinMaxExpr,
15783 APInt Divisor,
15784 ScalarEvolution &SE) {
15785 // Return true if \p Expr is a MinMax SCEV expression with a non-negative
15786 // constant operand. If so, return in \p SCTy the SCEV type and in \p RHS
15787 // the non-constant operand and in \p LHS the constant operand.
15788 auto IsMinMaxSCEVWithNonNegativeConstant =
15789 [&](const SCEV *Expr, SCEVTypes &SCTy, const SCEV *&LHS,
15790 const SCEV *&RHS) {
15791 if (auto *MinMax = dyn_cast<SCEVMinMaxExpr>(Expr)) {
15792 if (MinMax->getNumOperands() != 2)
15793 return false;
15794 if (auto *C = dyn_cast<SCEVConstant>(MinMax->getOperand(0))) {
15795 if (C->getAPInt().isNegative())
15796 return false;
15797 SCTy = MinMax->getSCEVType();
15798 LHS = MinMax->getOperand(0);
15799 RHS = MinMax->getOperand(1);
15800 return true;
15801 }
15802 }
15803 return false;
15804 };
15805
15806 const SCEV *MinMaxLHS = nullptr, *MinMaxRHS = nullptr;
15807 SCEVTypes SCTy;
15808 if (!IsMinMaxSCEVWithNonNegativeConstant(MinMaxExpr, SCTy, MinMaxLHS,
15809 MinMaxRHS))
15810 return MinMaxExpr;
15811 auto IsMin = isa<SCEVSMinExpr>(MinMaxExpr) || isa<SCEVUMinExpr>(MinMaxExpr);
15812 assert(SE.isKnownNonNegative(MinMaxLHS) && "Expected non-negative operand!");
15813 auto *DivisibleExpr =
15814 IsMin ? getPreviousSCEVDivisibleByDivisor(MinMaxLHS, Divisor, SE)
15815 : getNextSCEVDivisibleByDivisor(MinMaxLHS, Divisor, SE);
15817 applyDivisibilityOnMinMaxExpr(MinMaxRHS, Divisor, SE), DivisibleExpr};
15818 return SE.getMinMaxExpr(SCTy, Ops);
15819}
15820
15821void ScalarEvolution::LoopGuards::collectFromBlock(
15822 ScalarEvolution &SE, ScalarEvolution::LoopGuards &Guards,
15823 const BasicBlock *Block, const BasicBlock *Pred,
15824 SmallPtrSetImpl<const BasicBlock *> &VisitedBlocks, unsigned Depth) {
15825
15827
15828 SmallVector<SCEVUse> ExprsToRewrite;
15829 auto CollectCondition = [&](ICmpInst::Predicate Predicate, const SCEV *LHS,
15830 const SCEV *RHS,
15831 DenseMap<const SCEV *, const SCEV *> &RewriteMap,
15832 const LoopGuards &DivGuards) {
15833 // WARNING: It is generally unsound to apply any wrap flags to the proposed
15834 // replacement SCEV which isn't directly implied by the structure of that
15835 // SCEV. In particular, using contextual facts to imply flags is *NOT*
15836 // legal. See the scoping rules for flags in the header to understand why.
15837
15838 // Check for a condition of the form (-C1 + X < C2). InstCombine will
15839 // create this form when combining two checks of the form (X u< C2 + C1) and
15840 // (X >=u C1).
15841 auto MatchRangeCheckIdiom = [&SE, Predicate, LHS, RHS, &RewriteMap,
15842 &ExprsToRewrite]() {
15843 const SCEVConstant *C1;
15844 const SCEVUnknown *LHSUnknown;
15845 auto *C2 = dyn_cast<SCEVConstant>(RHS);
15846 if (!match(LHS,
15847 m_scev_Add(m_SCEVConstant(C1), m_SCEVUnknown(LHSUnknown))) ||
15848 !C2)
15849 return false;
15850
15851 auto ExactRegion =
15852 ConstantRange::makeExactICmpRegion(Predicate, C2->getAPInt())
15853 .sub(C1->getAPInt());
15854
15855 // Bail out, unless we have a non-wrapping, monotonic range.
15856 if (ExactRegion.isWrappedSet() || ExactRegion.isFullSet())
15857 return false;
15858 auto [I, Inserted] = RewriteMap.try_emplace(LHSUnknown);
15859 const SCEV *RewrittenLHS = Inserted ? LHSUnknown : I->second;
15860 I->second = SE.getUMaxExpr(
15861 SE.getConstant(ExactRegion.getUnsignedMin()),
15862 SE.getUMinExpr(RewrittenLHS,
15863 SE.getConstant(ExactRegion.getUnsignedMax())));
15864 ExprsToRewrite.push_back(LHSUnknown);
15865 return true;
15866 };
15867 if (MatchRangeCheckIdiom())
15868 return;
15869
15870 // Do not apply information for constants or if RHS contains an AddRec.
15872 return;
15873
15874 // If RHS is SCEVUnknown, make sure the information is applied to it.
15876 std::swap(LHS, RHS);
15878 }
15879
15880 // Puts rewrite rule \p From -> \p To into the rewrite map. Also if \p From
15881 // and \p FromRewritten are the same (i.e. there has been no rewrite
15882 // registered for \p From), then puts this value in the list of rewritten
15883 // expressions.
15884 auto AddRewrite = [&](const SCEV *From, const SCEV *FromRewritten,
15885 const SCEV *To) {
15886 if (From == FromRewritten)
15887 ExprsToRewrite.push_back(From);
15888 RewriteMap[From] = To;
15889 };
15890
15891 // Checks whether \p S has already been rewritten. In that case returns the
15892 // existing rewrite because we want to chain further rewrites onto the
15893 // already rewritten value. Otherwise returns \p S.
15894 auto GetMaybeRewritten = [&](const SCEV *S) {
15895 return RewriteMap.lookup_or(S, S);
15896 };
15897
15898 const SCEV *RewrittenLHS = GetMaybeRewritten(LHS);
15899 // Apply divisibility information when computing the constant multiple.
15900 const APInt &DividesBy =
15901 SE.getConstantMultiple(DivGuards.rewrite(RewrittenLHS));
15902
15903 // Collect rewrites for LHS and its transitive operands based on the
15904 // condition.
15905 // For min/max expressions, also apply the guard to its operands:
15906 // 'min(a, b) >= c' -> '(a >= c) and (b >= c)',
15907 // 'min(a, b) > c' -> '(a > c) and (b > c)',
15908 // 'max(a, b) <= c' -> '(a <= c) and (b <= c)',
15909 // 'max(a, b) < c' -> '(a < c) and (b < c)'.
15910
15911 // We cannot express strict predicates in SCEV, so instead we replace them
15912 // with non-strict ones against plus or minus one of RHS depending on the
15913 // predicate.
15914 const SCEV *One = SE.getOne(RHS->getType());
15915 switch (Predicate) {
15916 case CmpInst::ICMP_ULT:
15917 if (RHS->getType()->isPointerTy())
15918 return;
15919 RHS = SE.getUMaxExpr(RHS, One);
15920 [[fallthrough]];
15921 case CmpInst::ICMP_SLT: {
15922 RHS = SE.getMinusSCEV(RHS, One);
15923 RHS = getPreviousSCEVDivisibleByDivisor(RHS, DividesBy, SE);
15924 break;
15925 }
15926 case CmpInst::ICMP_UGT:
15927 case CmpInst::ICMP_SGT:
15928 RHS = SE.getAddExpr(RHS, One);
15929 RHS = getNextSCEVDivisibleByDivisor(RHS, DividesBy, SE);
15930 break;
15931 case CmpInst::ICMP_ULE:
15932 case CmpInst::ICMP_SLE:
15933 RHS = getPreviousSCEVDivisibleByDivisor(RHS, DividesBy, SE);
15934 break;
15935 case CmpInst::ICMP_UGE:
15936 case CmpInst::ICMP_SGE:
15937 RHS = getNextSCEVDivisibleByDivisor(RHS, DividesBy, SE);
15938 break;
15939 default:
15940 break;
15941 }
15942
15943 SmallVector<SCEVUse, 16> Worklist(1, LHS);
15944 SmallPtrSet<const SCEV *, 16> Visited;
15945
15946 auto EnqueueOperands = [&Worklist](const SCEVNAryExpr *S) {
15947 append_range(Worklist, S->operands());
15948 };
15949
15950 while (!Worklist.empty()) {
15951 const SCEV *From = Worklist.pop_back_val();
15952 if (isa<SCEVConstant>(From))
15953 continue;
15954 if (!Visited.insert(From).second)
15955 continue;
15956 const SCEV *FromRewritten = GetMaybeRewritten(From);
15957 const SCEV *To = nullptr;
15958
15959 switch (Predicate) {
15960 case CmpInst::ICMP_ULT:
15961 case CmpInst::ICMP_ULE:
15962 To = SE.getUMinExpr(FromRewritten, RHS);
15963 if (auto *UMax = dyn_cast<SCEVUMaxExpr>(FromRewritten))
15964 EnqueueOperands(UMax);
15965 break;
15966 case CmpInst::ICMP_SLT:
15967 case CmpInst::ICMP_SLE:
15968 To = SE.getSMinExpr(FromRewritten, RHS);
15969 if (auto *SMax = dyn_cast<SCEVSMaxExpr>(FromRewritten))
15970 EnqueueOperands(SMax);
15971 break;
15972 case CmpInst::ICMP_UGT:
15973 case CmpInst::ICMP_UGE:
15974 To = SE.getUMaxExpr(FromRewritten, RHS);
15975 if (auto *UMin = dyn_cast<SCEVUMinExpr>(FromRewritten))
15976 EnqueueOperands(UMin);
15977 break;
15978 case CmpInst::ICMP_SGT:
15979 case CmpInst::ICMP_SGE:
15980 To = SE.getSMaxExpr(FromRewritten, RHS);
15981 if (auto *SMin = dyn_cast<SCEVSMinExpr>(FromRewritten))
15982 EnqueueOperands(SMin);
15983 break;
15984 case CmpInst::ICMP_EQ:
15986 To = RHS;
15987 break;
15988 case CmpInst::ICMP_NE:
15989 if (match(RHS, m_scev_Zero())) {
15990 const SCEV *OneAlignedUp =
15991 getNextSCEVDivisibleByDivisor(One, DividesBy, SE);
15992 To = SE.getUMaxExpr(FromRewritten, OneAlignedUp);
15993 } else {
15994 // LHS != RHS can be rewritten as (LHS - RHS) = UMax(1, LHS - RHS),
15995 // but creating the subtraction eagerly is expensive. Track the
15996 // inequalities in a separate map, and materialize the rewrite lazily
15997 // when encountering a suitable subtraction while re-writing.
15998 if (LHS->getType()->isPointerTy()) {
16002 break;
16003 }
16004 const SCEVConstant *C;
16005 const SCEV *A, *B;
16008 RHS = A;
16009 LHS = B;
16010 }
16011 if (LHS > RHS)
16012 std::swap(LHS, RHS);
16013 Guards.NotEqual.insert({LHS, RHS});
16014 continue;
16015 }
16016 break;
16017 default:
16018 break;
16019 }
16020
16021 if (To)
16022 AddRewrite(From, FromRewritten, To);
16023 }
16024 };
16025
16027 // First, collect information from assumptions dominating the loop.
16028 for (auto &AssumeVH : SE.AC.assumptions()) {
16029 if (!AssumeVH)
16030 continue;
16031 auto *AssumeI = cast<CallInst>(AssumeVH);
16032 if (!SE.DT.dominates(AssumeI, Block))
16033 continue;
16034 Terms.emplace_back(AssumeI->getOperand(0), true);
16035 }
16036
16037 // Second, collect information from llvm.experimental.guards dominating the loop.
16038 auto *GuardDecl = Intrinsic::getDeclarationIfExists(
16039 SE.F.getParent(), Intrinsic::experimental_guard);
16040 if (GuardDecl)
16041 for (const auto *GU : GuardDecl->users())
16042 if (const auto *Guard = dyn_cast<IntrinsicInst>(GU))
16043 if (Guard->getFunction() == Block->getParent() &&
16044 SE.DT.dominates(Guard, Block))
16045 Terms.emplace_back(Guard->getArgOperand(0), true);
16046
16047 // Third, collect conditions from dominating branches. Starting at the loop
16048 // predecessor, climb up the predecessor chain, as long as there are
16049 // predecessors that can be found that have unique successors leading to the
16050 // original header.
16051 // TODO: share this logic with isLoopEntryGuardedByCond.
16052 unsigned NumCollectedConditions = 0;
16054 std::pair<const BasicBlock *, const BasicBlock *> Pair(Pred, Block);
16055 for (; Pair.first;
16056 Pair = SE.getPredecessorWithUniqueSuccessorForBB(Pair.first)) {
16057 VisitedBlocks.insert(Pair.second);
16058 const CondBrInst *LoopEntryPredicate =
16059 dyn_cast<CondBrInst>(Pair.first->getTerminator());
16060 if (!LoopEntryPredicate)
16061 continue;
16062
16063 Terms.emplace_back(LoopEntryPredicate->getCondition(),
16064 LoopEntryPredicate->getSuccessor(0) == Pair.second);
16065 NumCollectedConditions++;
16066
16067 // If we are recursively collecting guards stop after 2
16068 // conditions to limit compile-time impact for now.
16069 if (Depth > 0 && NumCollectedConditions == 2)
16070 break;
16071 }
16072 // Finally, if we stopped climbing the predecessor chain because
16073 // there wasn't a unique one to continue, try to collect conditions
16074 // for PHINodes by recursively following all of their incoming
16075 // blocks and try to merge the found conditions to build a new one
16076 // for the Phi.
16077 if (Pair.second->hasNPredecessorsOrMore(2) &&
16079 SmallDenseMap<const BasicBlock *, LoopGuards> IncomingGuards;
16080 for (auto &Phi : Pair.second->phis())
16081 collectFromPHI(SE, Guards, Phi, VisitedBlocks, IncomingGuards, Depth);
16082 }
16083
16084 // Now apply the information from the collected conditions to
16085 // Guards.RewriteMap. Conditions are processed in reverse order, so the
16086 // earliest conditions is processed first, except guards with divisibility
16087 // information, which are moved to the back. This ensures the SCEVs with the
16088 // shortest dependency chains are constructed first.
16090 GuardsToProcess;
16091 for (auto [Term, EnterIfTrue] : reverse(Terms)) {
16092 SmallVector<Value *, 8> Worklist;
16093 SmallPtrSet<Value *, 8> Visited;
16094 Worklist.push_back(Term);
16095 while (!Worklist.empty()) {
16096 Value *Cond = Worklist.pop_back_val();
16097 if (!Visited.insert(Cond).second)
16098 continue;
16099
16100 if (auto *Cmp = dyn_cast<ICmpInst>(Cond)) {
16101 auto Predicate =
16102 EnterIfTrue ? Cmp->getPredicate() : Cmp->getInversePredicate();
16103 const auto *LHS = SE.getSCEV(Cmp->getOperand(0));
16104 const auto *RHS = SE.getSCEV(Cmp->getOperand(1));
16105 // If LHS is a constant, apply information to the other expression.
16106 // TODO: If LHS is not a constant, check if using CompareSCEVComplexity
16107 // can improve results.
16108 if (isa<SCEVConstant>(LHS)) {
16109 std::swap(LHS, RHS);
16111 }
16112 GuardsToProcess.emplace_back(Predicate, LHS, RHS);
16113 continue;
16114 }
16115
16116 Value *L, *R;
16117 if (EnterIfTrue ? match(Cond, m_LogicalAnd(m_Value(L), m_Value(R)))
16118 : match(Cond, m_LogicalOr(m_Value(L), m_Value(R)))) {
16119 Worklist.push_back(L);
16120 Worklist.push_back(R);
16121 }
16122 }
16123 }
16124
16125 // Process divisibility guards in reverse order to populate DivGuards early.
16126 DenseMap<const SCEV *, APInt> Multiples;
16127 LoopGuards DivGuards(SE);
16128 for (const auto &[Predicate, LHS, RHS] : GuardsToProcess) {
16129 if (!isDivisibilityGuard(LHS, RHS, SE))
16130 continue;
16131 collectDivisibilityInformation(Predicate, LHS, RHS, DivGuards.RewriteMap,
16132 Multiples, SE);
16133 }
16134
16135 for (const auto &[Predicate, LHS, RHS] : GuardsToProcess)
16136 CollectCondition(Predicate, LHS, RHS, Guards.RewriteMap, DivGuards);
16137
16138 // Apply divisibility information last. This ensures it is applied to the
16139 // outermost expression after other rewrites for the given value.
16140 for (const auto &[K, Divisor] : Multiples) {
16141 const SCEV *DivisorSCEV = SE.getConstant(Divisor);
16142 Guards.RewriteMap[K] =
16144 Guards.rewrite(K), Divisor, SE),
16145 DivisorSCEV),
16146 DivisorSCEV);
16147 ExprsToRewrite.push_back(K);
16148 }
16149
16150 // Let the rewriter preserve NUW/NSW flags if the unsigned/signed ranges of
16151 // the replacement expressions are contained in the ranges of the replaced
16152 // expressions.
16153 Guards.PreserveNUW = true;
16154 Guards.PreserveNSW = true;
16155 for (const SCEV *Expr : ExprsToRewrite) {
16156 const SCEV *RewriteTo = Guards.RewriteMap[Expr];
16157 Guards.PreserveNUW &=
16158 SE.getUnsignedRange(Expr).contains(SE.getUnsignedRange(RewriteTo));
16159 Guards.PreserveNSW &=
16160 SE.getSignedRange(Expr).contains(SE.getSignedRange(RewriteTo));
16161 }
16162
16163 // Now that all rewrite information is collect, rewrite the collected
16164 // expressions with the information in the map. This applies information to
16165 // sub-expressions.
16166 if (ExprsToRewrite.size() > 1) {
16167 for (const SCEV *Expr : ExprsToRewrite) {
16168 const SCEV *RewriteTo = Guards.RewriteMap[Expr];
16169 Guards.RewriteMap.erase(Expr);
16170 Guards.RewriteMap.insert({Expr, Guards.rewrite(RewriteTo)});
16171 }
16172 }
16173}
16174
16176 /// A rewriter to replace SCEV expressions in Map with the corresponding entry
16177 /// in the map. It skips AddRecExpr because we cannot guarantee that the
16178 /// replacement is loop invariant in the loop of the AddRec.
16179 class SCEVLoopGuardRewriter
16180 : public SCEVRewriteVisitor<SCEVLoopGuardRewriter> {
16183
16185
16186 public:
16187 SCEVLoopGuardRewriter(ScalarEvolution &SE,
16188 const ScalarEvolution::LoopGuards &Guards)
16189 : SCEVRewriteVisitor(SE), Map(Guards.RewriteMap),
16190 NotEqual(Guards.NotEqual) {
16191 if (Guards.PreserveNUW)
16192 FlagMask = ScalarEvolution::setFlags(FlagMask, SCEV::FlagNUW);
16193 if (Guards.PreserveNSW)
16194 FlagMask = ScalarEvolution::setFlags(FlagMask, SCEV::FlagNSW);
16195 }
16196
16197 const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) { return Expr; }
16198
16199 const SCEV *visitUnknown(const SCEVUnknown *Expr) {
16200 return Map.lookup_or(Expr, Expr);
16201 }
16202
16203 const SCEV *visitZeroExtendExpr(const SCEVZeroExtendExpr *Expr) {
16204 if (const SCEV *S = Map.lookup(Expr))
16205 return S;
16206
16207 // If we didn't find the extact ZExt expr in the map, check if there's
16208 // an entry for a smaller ZExt we can use instead.
16209 Type *Ty = Expr->getType();
16210 const SCEV *Op = Expr->getOperand(0);
16211 unsigned Bitwidth = Ty->getScalarSizeInBits() / 2;
16212 while (Bitwidth % 8 == 0 && Bitwidth >= 8 &&
16213 Bitwidth > Op->getType()->getScalarSizeInBits()) {
16214 Type *NarrowTy = IntegerType::get(SE.getContext(), Bitwidth);
16215 auto *NarrowExt = SE.getZeroExtendExpr(Op, NarrowTy);
16216 if (const SCEV *S = Map.lookup(NarrowExt))
16217 return SE.getZeroExtendExpr(S, Ty);
16218 Bitwidth = Bitwidth / 2;
16219 }
16220
16222 Expr);
16223 }
16224
16225 const SCEV *visitSignExtendExpr(const SCEVSignExtendExpr *Expr) {
16226 if (const SCEV *S = Map.lookup(Expr))
16227 return S;
16229 Expr);
16230 }
16231
16232 const SCEV *visitUMinExpr(const SCEVUMinExpr *Expr) {
16233 if (const SCEV *S = Map.lookup(Expr))
16234 return S;
16236 }
16237
16238 const SCEV *visitSMinExpr(const SCEVSMinExpr *Expr) {
16239 if (const SCEV *S = Map.lookup(Expr))
16240 return S;
16242 }
16243
16244 const SCEV *visitAddExpr(const SCEVAddExpr *Expr) {
16245 // Helper to check if S is a subtraction (A - B) where A != B, and if so,
16246 // return UMax(S, 1).
16247 auto RewriteSubtraction = [&](const SCEV *S) -> const SCEV * {
16248 SCEVUse LHS, RHS;
16249 if (MatchBinarySub(S, LHS, RHS)) {
16250 if (LHS > RHS)
16251 std::swap(LHS, RHS);
16252 if (NotEqual.contains({LHS, RHS})) {
16253 const SCEV *OneAlignedUp = getNextSCEVDivisibleByDivisor(
16254 SE.getOne(S->getType()), SE.getConstantMultiple(S), SE);
16255 return SE.getUMaxExpr(OneAlignedUp, S);
16256 }
16257 }
16258 return nullptr;
16259 };
16260
16261 // Check if Expr itself is a subtraction pattern with guard info.
16262 if (const SCEV *Rewritten = RewriteSubtraction(Expr))
16263 return Rewritten;
16264
16265 // Trip count expressions sometimes consist of adding 3 operands, i.e.
16266 // (Const + A + B). There may be guard info for A + B, and if so, apply
16267 // it.
16268 // TODO: Could more generally apply guards to Add sub-expressions.
16269 if (isa<SCEVConstant>(Expr->getOperand(0)) &&
16270 Expr->getNumOperands() == 3) {
16271 const SCEV *Add =
16272 SE.getAddExpr(Expr->getOperand(1), Expr->getOperand(2));
16273 if (const SCEV *Rewritten = RewriteSubtraction(Add))
16274 return SE.getAddExpr(
16275 Expr->getOperand(0), Rewritten,
16276 ScalarEvolution::maskFlags(Expr->getNoWrapFlags(), FlagMask));
16277 if (const SCEV *S = Map.lookup(Add))
16278 return SE.getAddExpr(Expr->getOperand(0), S);
16279 }
16280 SmallVector<SCEVUse, 2> Operands;
16281 bool Changed = false;
16282 for (SCEVUse Op : Expr->operands()) {
16283 Operands.push_back(
16285 Changed |= Op != Operands.back();
16286 }
16287 // We are only replacing operands with equivalent values, so transfer the
16288 // flags from the original expression.
16289 return !Changed ? Expr
16290 : SE.getAddExpr(Operands,
16292 Expr->getNoWrapFlags(), FlagMask));
16293 }
16294
16295 const SCEV *visitMulExpr(const SCEVMulExpr *Expr) {
16296 SmallVector<SCEVUse, 2> Operands;
16297 bool Changed = false;
16298 for (SCEVUse Op : Expr->operands()) {
16299 Operands.push_back(
16301 Changed |= Op != Operands.back();
16302 }
16303 // We are only replacing operands with equivalent values, so transfer the
16304 // flags from the original expression.
16305 return !Changed ? Expr
16306 : SE.getMulExpr(Operands,
16308 Expr->getNoWrapFlags(), FlagMask));
16309 }
16310 };
16311
16312 if (RewriteMap.empty() && NotEqual.empty())
16313 return Expr;
16314
16315 SCEVLoopGuardRewriter Rewriter(SE, *this);
16316 return Rewriter.visit(Expr);
16317}
16318
16319const SCEV *ScalarEvolution::applyLoopGuards(const SCEV *Expr, const Loop *L) {
16320 return applyLoopGuards(Expr, LoopGuards::collect(L, *this));
16321}
16322
16324 const LoopGuards &Guards) {
16325 return Guards.rewrite(Expr);
16326}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
constexpr LLT S1
Rewrite undef for PHI
This file implements a class to represent arbitrary precision integral constant values and operations...
@ PostInc
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Expand Atomic instructions
#define X(NUM, ENUM, NAME)
Definition ELF.h:853
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
#define LLVM_DUMP_METHOD
Mark debug helper function definitions like dump() that should not be stripped from debug builds.
Definition Compiler.h:661
This file contains the declarations for the subclasses of Constant, which represent the different fla...
SmallPtrSet< const BasicBlock *, 8 > VisitedBlocks
This file defines the DenseMap class.
This file builds on the ADT/GraphTraits.h file to build generic depth first graph iterator.
static bool isSigned(unsigned Opcode)
This file defines a hash set that can be used to remove duplication of nodes in a graph.
#define op(i)
Hexagon Common GEP
Value * getPointer(Value *Ptr)
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
This defines the Use class.
iv Induction Variable Users
Definition IVUsers.cpp:48
static constexpr Value * getValue(Ty &ValueOrUse)
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
static bool isZero(Value *V, const DataLayout &DL, DominatorTree *DT, AssumptionCache *AC)
Definition Lint.cpp:539
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
#define G(x, y, z)
Definition MD5.cpp:55
#define T
#define T1
static constexpr unsigned SM(unsigned Version)
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
uint64_t IntrinsicInst * II
#define P(N)
ppc ctr loops verify
PowerPC Reduce CR logical Operation
#define INITIALIZE_PASS_DEPENDENCY(depName)
Definition PassSupport.h:42
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
Definition PassSupport.h:44
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
Definition PassSupport.h:39
R600 Clause Merge
const SmallVectorImpl< MachineOperand > & Cond
static DominatorTree getDomTree(Function &F)
static bool isValid(const char C)
Returns true if C is a valid mangled character: <0-9a-zA-Z_>.
SI optimize exec mask operations pre RA
static void visit(BasicBlock &Start, std::function< bool(BasicBlock *)> op)
This file contains some templates that are useful if you are working with the STL at all.
This file provides utility classes that use RAII to save and restore values.
bool SCEVMinMaxExprContains(const SCEV *Root, const SCEV *OperandToFind, SCEVTypes RootKind)
static cl::opt< unsigned > MaxAddRecSize("scalar-evolution-max-add-rec-size", cl::Hidden, cl::desc("Max coefficients in AddRec during evolving"), cl::init(8))
static cl::opt< unsigned > RangeIterThreshold("scev-range-iter-threshold", cl::Hidden, cl::desc("Threshold for switching to iteratively computing SCEV ranges"), cl::init(32))
static const Loop * isIntegerLoopHeaderPHI(const PHINode *PN, LoopInfo &LI)
static unsigned getConstantTripCount(const SCEVConstant *ExitCount)
static int CompareValueComplexity(const LoopInfo *const LI, Value *LV, Value *RV, unsigned Depth)
Compare the two values LV and RV in terms of their "complexity" where "complexity" is a partial (and ...
static const SCEV * getNextSCEVDivisibleByDivisor(const SCEV *Expr, const APInt &DivisorVal, ScalarEvolution &SE)
static void PushLoopPHIs(const Loop *L, SmallVectorImpl< Instruction * > &Worklist, SmallPtrSetImpl< Instruction * > &Visited)
Push PHI nodes in the header of the given loop onto the given Worklist.
static void insertFoldCacheEntry(const ScalarEvolution::FoldID &ID, const SCEV *S, DenseMap< ScalarEvolution::FoldID, const SCEV * > &FoldCache, DenseMap< const SCEV *, SmallVector< ScalarEvolution::FoldID, 2 > > &FoldCacheUser)
static cl::opt< bool > ClassifyExpressions("scalar-evolution-classify-expressions", cl::Hidden, cl::init(true), cl::desc("When printing analysis, include information on every instruction"))
static bool hasHugeExpression(ArrayRef< SCEVUse > Ops)
Returns true if Ops contains a huge SCEV (the subtree of S contains at least HugeExprThreshold nodes)...
static bool CanConstantFold(const Instruction *I)
Return true if we can constant fold an instruction of the specified type, assuming that all operands ...
static cl::opt< unsigned > AddOpsInlineThreshold("scev-addops-inline-threshold", cl::Hidden, cl::desc("Threshold for inlining addition operands into a SCEV"), cl::init(500))
static cl::opt< unsigned > MaxLoopGuardCollectionDepth("scalar-evolution-max-loop-guard-collection-depth", cl::Hidden, cl::desc("Maximum depth for recursive loop guard collection"), cl::init(1))
static cl::opt< bool > VerifyIR("scev-verify-ir", cl::Hidden, cl::desc("Verify IR correctness when making sensitive SCEV queries (slow)"), cl::init(false))
static bool RangeRefPHIAllowedOperands(DominatorTree &DT, PHINode *PHI)
static const SCEV * getPreStartForExtend(const SCEVAddRecExpr *AR, Type *Ty, ScalarEvolution *SE, unsigned Depth)
static std::optional< APInt > MinOptional(std::optional< APInt > X, std::optional< APInt > Y)
Helper function to compare optional APInts: (a) if X and Y both exist, return min(X,...
static cl::opt< unsigned > MulOpsInlineThreshold("scev-mulops-inline-threshold", cl::Hidden, cl::desc("Threshold for inlining multiplication operands into a SCEV"), cl::init(32))
static BinaryOperator * getCommonInstForPHI(PHINode *PN)
static bool isDivisibilityGuard(const SCEV *LHS, const SCEV *RHS, ScalarEvolution &SE)
static std::optional< const SCEV * > createNodeForSelectViaUMinSeq(ScalarEvolution *SE, const SCEV *CondExpr, const SCEV *TrueExpr, const SCEV *FalseExpr)
static Constant * BuildConstantFromSCEV(const SCEV *V)
This builds up a Constant using the ConstantExpr interface.
static ConstantInt * EvaluateConstantChrecAtConstant(const SCEVAddRecExpr *AddRec, ConstantInt *C, ScalarEvolution &SE)
static const SCEV * BinomialCoefficient(const SCEV *It, unsigned K, ScalarEvolution &SE, Type *ResultTy)
Compute BC(It, K). The result has width W. Assume, K > 0.
static cl::opt< unsigned > MaxCastDepth("scalar-evolution-max-cast-depth", cl::Hidden, cl::desc("Maximum depth of recursive SExt/ZExt/Trunc"), cl::init(8))
static bool IsMinMaxConsistingOf(const SCEV *MaybeMinMaxExpr, const SCEV *Candidate)
Is MaybeMinMaxExpr an (U|S)(Min|Max) of Candidate and some other values?
static PHINode * getConstantEvolvingPHI(Value *V, const Loop *L)
getConstantEvolvingPHI - Given an LLVM value and a loop, return a PHI node in the loop that V is deri...
static const SCEV * SolveLinEquationWithOverflow(const APInt &A, const SCEV *B, SmallVectorImpl< const SCEVPredicate * > *Predicates, ScalarEvolution &SE, const Loop *L)
Finds the minimum unsigned root of the following equation:
static cl::opt< unsigned > MaxBruteForceIterations("scalar-evolution-max-iterations", cl::ReallyHidden, cl::desc("Maximum number of iterations SCEV will " "symbolically execute a constant " "derived loop"), cl::init(100))
static uint64_t umul_ov(uint64_t i, uint64_t j, bool &Overflow)
static void PrintSCEVWithTypeHint(raw_ostream &OS, const SCEV *S)
When printing a top-level SCEV for trip counts, it's helpful to include a type for constants which ar...
static void PrintLoopInfo(raw_ostream &OS, ScalarEvolution *SE, const Loop *L)
static SCEV::NoWrapFlags StrengthenNoWrapFlags(ScalarEvolution *SE, SCEVTypes Type, ArrayRef< SCEVUse > Ops, SCEV::NoWrapFlags Flags)
static bool containsConstantInAddMulChain(const SCEV *StartExpr)
Determine if any of the operands in this SCEV are a constant or if any of the add or multiply express...
static const SCEV * getExtendAddRecStart(const SCEVAddRecExpr *AR, Type *Ty, ScalarEvolution *SE, unsigned Depth)
static bool CollectAddOperandsWithScales(SmallDenseMap< SCEVUse, APInt, 16 > &M, SmallVectorImpl< SCEVUse > &NewOps, APInt &AccumulatedConstant, ArrayRef< SCEVUse > Ops, const APInt &Scale, ScalarEvolution &SE)
Process the given Ops list, which is a list of operands to be added under the given scale,...
static const SCEV * constantFoldAndGroupOps(ScalarEvolution &SE, LoopInfo &LI, DominatorTree &DT, SmallVectorImpl< SCEVUse > &Ops, FoldT Fold, IsIdentityT IsIdentity, IsAbsorberT IsAbsorber)
Performs a number of common optimizations on the passed Ops.
static cl::opt< unsigned > MaxPhiSCCAnalysisSize("scalar-evolution-max-scc-analysis-depth", cl::Hidden, cl::desc("Maximum amount of nodes to process while searching SCEVUnknown " "Phi strongly connected components"), cl::init(8))
static bool IsKnownPredicateViaAddRecStart(ScalarEvolution &SE, CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS)
static void GroupByComplexity(SmallVectorImpl< SCEVUse > &Ops, LoopInfo *LI, DominatorTree &DT)
Given a list of SCEV objects, order them by their complexity, and group objects of the same complexit...
static bool collectDivisibilityInformation(ICmpInst::Predicate Predicate, const SCEV *LHS, const SCEV *RHS, DenseMap< const SCEV *, const SCEV * > &DivInfo, DenseMap< const SCEV *, APInt > &Multiples, ScalarEvolution &SE)
static cl::opt< unsigned > MaxSCEVOperationsImplicationDepth("scalar-evolution-max-scev-operations-implication-depth", cl::Hidden, cl::desc("Maximum depth of recursive SCEV operations implication analysis"), cl::init(2))
static void PushDefUseChildren(Instruction *I, SmallVectorImpl< Instruction * > &Worklist, SmallPtrSetImpl< Instruction * > &Visited)
Push users of the given Instruction onto the given Worklist.
static std::optional< APInt > SolveQuadraticAddRecRange(const SCEVAddRecExpr *AddRec, const ConstantRange &Range, ScalarEvolution &SE)
Let c(n) be the value of the quadratic chrec {0,+,M,+,N} after n iterations.
static cl::opt< bool > UseContextForNoWrapFlagInference("scalar-evolution-use-context-for-no-wrap-flag-strenghening", cl::Hidden, cl::desc("Infer nuw/nsw flags using context where suitable"), cl::init(true))
static cl::opt< bool > EnableFiniteLoopControl("scalar-evolution-finite-loop", cl::Hidden, cl::desc("Handle <= and >= in finite loops"), cl::init(true))
static bool getOperandsForSelectLikePHI(DominatorTree &DT, PHINode *PN, Value *&Cond, Value *&LHS, Value *&RHS)
static std::optional< std::tuple< APInt, APInt, APInt, APInt, unsigned > > GetQuadraticEquation(const SCEVAddRecExpr *AddRec)
For a given quadratic addrec, generate coefficients of the corresponding quadratic equation,...
static bool isKnownPredicateExtendIdiom(CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS)
static std::optional< BinaryOp > MatchBinaryOp(Value *V, const DataLayout &DL, AssumptionCache &AC, const DominatorTree &DT, const Instruction *CxtI)
Try to map V into a BinaryOp, and return std::nullopt on failure.
static std::optional< APInt > SolveQuadraticAddRecExact(const SCEVAddRecExpr *AddRec, ScalarEvolution &SE)
Let c(n) be the value of the quadratic chrec {L,+,M,+,N} after n iterations.
static std::optional< APInt > TruncIfPossible(std::optional< APInt > X, unsigned BitWidth)
Helper function to truncate an optional APInt to a given BitWidth.
static cl::opt< unsigned > MaxSCEVCompareDepth("scalar-evolution-max-scev-compare-depth", cl::Hidden, cl::desc("Maximum depth of recursive SCEV complexity comparisons"), cl::init(32))
static APInt extractConstantWithoutWrapping(ScalarEvolution &SE, const SCEVConstant *ConstantTerm, const SCEVAddExpr *WholeAddExpr)
static cl::opt< unsigned > MaxConstantEvolvingDepth("scalar-evolution-max-constant-evolving-depth", cl::Hidden, cl::desc("Maximum depth of recursive constant evolving"), cl::init(32))
static ConstantRange getRangeForAffineARHelper(APInt Step, const ConstantRange &StartRange, const APInt &MaxBECount, bool Signed)
static bool MatchBinarySub(const SCEV *S, SCEVUse &LHS, SCEVUse &RHS)
static std::optional< ConstantRange > GetRangeFromMetadata(Value *V)
Helper method to assign a range to V from metadata present in the IR.
static cl::opt< unsigned > HugeExprThreshold("scalar-evolution-huge-expr-threshold", cl::Hidden, cl::desc("Size of the expression which is considered huge"), cl::init(4096))
static Type * isSimpleCastedPHI(const SCEV *Op, const SCEVUnknown *SymbolicPHI, bool &Signed, ScalarEvolution &SE)
Helper function to createAddRecFromPHIWithCasts.
static Constant * EvaluateExpression(Value *V, const Loop *L, DenseMap< Instruction *, Constant * > &Vals, const DataLayout &DL, const TargetLibraryInfo *TLI)
EvaluateExpression - Given an expression that passes the getConstantEvolvingPHI predicate,...
static const SCEV * getPreviousSCEVDivisibleByDivisor(const SCEV *Expr, const APInt &DivisorVal, ScalarEvolution &SE)
static const SCEV * MatchNotExpr(const SCEV *Expr)
If Expr computes ~A, return A else return nullptr.
static cl::opt< unsigned > MaxValueCompareDepth("scalar-evolution-max-value-compare-depth", cl::Hidden, cl::desc("Maximum depth of recursive value complexity comparisons"), cl::init(2))
static const SCEV * applyDivisibilityOnMinMaxExpr(const SCEV *MinMaxExpr, APInt Divisor, ScalarEvolution &SE)
static cl::opt< bool, true > VerifySCEVOpt("verify-scev", cl::Hidden, cl::location(VerifySCEV), cl::desc("Verify ScalarEvolution's backedge taken counts (slow)"))
static const SCEV * getSignedOverflowLimitForStep(const SCEV *Step, ICmpInst::Predicate *Pred, ScalarEvolution *SE)
static cl::opt< unsigned > MaxArithDepth("scalar-evolution-max-arith-depth", cl::Hidden, cl::desc("Maximum depth of recursive arithmetics"), cl::init(32))
static bool HasSameValue(const SCEV *A, const SCEV *B)
SCEV structural equivalence is usually sufficient for testing whether two expressions are equal,...
static uint64_t Choose(uint64_t n, uint64_t k, bool &Overflow)
Compute the result of "n choose k", the binomial coefficient.
static std::optional< int > CompareSCEVComplexity(const LoopInfo *const LI, const SCEV *LHS, const SCEV *RHS, DominatorTree &DT, unsigned Depth=0)
static bool canConstantEvolve(Instruction *I, const Loop *L)
Determine whether this instruction can constant evolve within this loop assuming its operands can all...
static PHINode * getConstantEvolvingPHIOperands(Instruction *UseInst, const Loop *L, DenseMap< Instruction *, PHINode * > &PHIMap, unsigned Depth)
getConstantEvolvingPHIOperands - Implement getConstantEvolvingPHI by recursing through each instructi...
static bool scevUnconditionallyPropagatesPoisonFromOperands(SCEVTypes Kind)
static cl::opt< bool > VerifySCEVStrict("verify-scev-strict", cl::Hidden, cl::desc("Enable stricter verification with -verify-scev is passed"))
static Constant * getOtherIncomingValue(PHINode *PN, BasicBlock *BB)
static cl::opt< bool > UseExpensiveRangeSharpening("scalar-evolution-use-expensive-range-sharpening", cl::Hidden, cl::init(false), cl::desc("Use more powerful methods of sharpening expression ranges. May " "be costly in terms of compile time"))
static const SCEV * getUnsignedOverflowLimitForStep(const SCEV *Step, ICmpInst::Predicate *Pred, ScalarEvolution *SE)
static bool IsKnownPredicateViaMinOrMax(ScalarEvolution &SE, CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS)
Is LHS Pred RHS true on the virtue of LHS or RHS being a Min or Max expression?
static bool BrPHIToSelect(DominatorTree &DT, CondBrInst *BI, PHINode *Merge, Value *&C, Value *&LHS, Value *&RHS)
This file defines the make_scope_exit function, which executes user-defined cleanup logic at scope ex...
static bool InBlock(const Value *V, const BasicBlock *BB)
Provides some synthesis utilities to produce sequences of values.
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition Statistic.h:171
This file contains some functions that are useful when dealing with strings.
#define LLVM_DEBUG(...)
Definition Debug.h:119
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static SymbolRef::Type getType(const Symbol *Sym)
Definition TapiFile.cpp:39
LocallyHashedType DenseMapInfo< LocallyHashedType >::Empty
static std::optional< bool > isImpliedCondOperands(CmpInst::Predicate Pred, const Value *ALHS, const Value *ARHS, const Value *BLHS, const Value *BRHS)
Return true if "icmp Pred BLHS BRHS" is true whenever "icmp PredALHS ARHS" is true.
Virtual Register Rewriter
Value * RHS
Value * LHS
BinaryOperator * Mul
static const uint32_t IV[8]
Definition blake3_impl.h:83
SCEVCastSinkingRewriter(ScalarEvolution &SE, Type *TargetTy, ConversionFn CreatePtrCast)
static const SCEV * rewrite(const SCEV *Scev, ScalarEvolution &SE, Type *TargetTy, ConversionFn CreatePtrCast)
const SCEV * visitUnknown(const SCEVUnknown *Expr)
const SCEV * visitMulExpr(const SCEVMulExpr *Expr)
const SCEV * visitAddExpr(const SCEVAddExpr *Expr)
const SCEV * visit(const SCEV *S)
Class for arbitrary precision integers.
Definition APInt.h:78
LLVM_ABI APInt umul_ov(const APInt &RHS, bool &Overflow) const
Definition APInt.cpp:2023
LLVM_ABI APInt zext(unsigned width) const
Zero extend to a new width.
Definition APInt.cpp:1055
bool isMinSignedValue() const
Determine if this is the smallest signed value.
Definition APInt.h:424
uint64_t getZExtValue() const
Get zero extended value.
Definition APInt.h:1563
void setHighBits(unsigned hiBits)
Set the top hiBits bits.
Definition APInt.h:1414
LLVM_ABI APInt getHiBits(unsigned numBits) const
Compute an APInt containing numBits highbits from this APInt.
Definition APInt.cpp:640
unsigned getActiveBits() const
Compute the number of active bits in the value.
Definition APInt.h:1535
LLVM_ABI APInt trunc(unsigned width) const
Truncate to new width.
Definition APInt.cpp:968
static APInt getMaxValue(unsigned numBits)
Gets maximum unsigned value of APInt for specific bit width.
Definition APInt.h:207
APInt abs() const
Get the absolute value.
Definition APInt.h:1818
bool sgt(const APInt &RHS) const
Signed greater than comparison.
Definition APInt.h:1208
bool isAllOnes() const
Determine if all bits are set. This is true for zero-width values.
Definition APInt.h:372
bool ugt(const APInt &RHS) const
Unsigned greater than comparison.
Definition APInt.h:1189
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
Definition APInt.h:381
bool isSignMask() const
Check if the APInt's value is returned by getSignMask.
Definition APInt.h:467
LLVM_ABI APInt urem(const APInt &RHS) const
Unsigned remainder operation.
Definition APInt.cpp:1709
unsigned getBitWidth() const
Return the number of bits in the APInt.
Definition APInt.h:1511
bool ult(const APInt &RHS) const
Unsigned less than comparison.
Definition APInt.h:1118
static APInt getSignedMaxValue(unsigned numBits)
Gets maximum signed value of APInt for a specific bit width.
Definition APInt.h:210
static APInt getMinValue(unsigned numBits)
Gets minimum unsigned value of APInt for a specific bit width.
Definition APInt.h:217
bool isNegative() const
Determine sign of this APInt.
Definition APInt.h:330
bool sle(const APInt &RHS) const
Signed less or equal comparison.
Definition APInt.h:1173
static APInt getSignedMinValue(unsigned numBits)
Gets minimum signed value of APInt for a specific bit width.
Definition APInt.h:220
bool isNonPositive() const
Determine if this APInt Value is non-positive (<= 0).
Definition APInt.h:362
unsigned countTrailingZeros() const
Definition APInt.h:1670
bool isStrictlyPositive() const
Determine if this APInt Value is positive.
Definition APInt.h:357
unsigned logBase2() const
Definition APInt.h:1784
APInt ashr(unsigned ShiftAmt) const
Arithmetic right-shift function.
Definition APInt.h:834
LLVM_ABI APInt multiplicativeInverse() const
Definition APInt.cpp:1317
bool ule(const APInt &RHS) const
Unsigned less or equal comparison.
Definition APInt.h:1157
LLVM_ABI APInt sext(unsigned width) const
Sign extend to a new width.
Definition APInt.cpp:1028
APInt shl(unsigned shiftAmt) const
Left-shift function.
Definition APInt.h:880
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
Definition APInt.h:441
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Constructs an APInt value that has the bottom loBitsSet bits set.
Definition APInt.h:307
bool isSignBitSet() const
Determine if sign bit of this APInt is set.
Definition APInt.h:342
bool slt(const APInt &RHS) const
Signed less than comparison.
Definition APInt.h:1137
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
Definition APInt.h:201
bool isIntN(unsigned N) const
Check if this APInt has an N-bits unsigned integer value.
Definition APInt.h:433
static APInt getOneBitSet(unsigned numBits, unsigned BitNo)
Return an APInt with exactly one bit set in the result.
Definition APInt.h:240
bool uge(const APInt &RHS) const
Unsigned greater or equal comparison.
Definition APInt.h:1228
This templated class represents "all analyses that operate over <aparticular IR unit>" (e....
Definition Analysis.h:50
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Represent the analysis usage information of a pass.
void setPreservesAll()
Set by analyses that do not transform their input at all.
AnalysisUsage & addRequiredTransitive()
Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
iterator end() const
Definition ArrayRef.h:130
size_t size() const
Get the array size.
Definition ArrayRef.h:141
iterator begin() const
Definition ArrayRef.h:129
A function analysis which provides an AssumptionCache.
An immutable pass that tracks lazily created AssumptionCache objects.
A cache of @llvm.assume calls within a function.
MutableArrayRef< WeakVH > assumptions()
Access the list of assumption handles currently tracked for this function.
LLVM Basic Block Representation.
Definition BasicBlock.h:62
iterator begin()
Instruction iterator methods.
Definition BasicBlock.h:461
const Function * getParent() const
Return the enclosing method, or null if none.
Definition BasicBlock.h:213
LLVM_ABI const BasicBlock * getSinglePredecessor() const
Return the predecessor of this block if it has a single predecessor block.
const Instruction & front() const
Definition BasicBlock.h:484
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction; assumes that the block is well-formed.
Definition BasicBlock.h:237
LLVM_ABI unsigned getNoWrapKind() const
Returns one of OBO::NoSignedWrap or OBO::NoUnsignedWrap.
LLVM_ABI Instruction::BinaryOps getBinaryOp() const
Returns the binary operation underlying the intrinsic.
BinaryOps getOpcode() const
Definition InstrTypes.h:374
This class represents a function call, abstracting a target machine's calling convention.
virtual void deleted()
Callback for Value destruction.
void setValPtr(Value *P)
bool isFalseWhenEqual() const
This is just a convenience.
Definition InstrTypes.h:948
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition InstrTypes.h:676
@ ICMP_SLT
signed less than
Definition InstrTypes.h:705
@ ICMP_SLE
signed less or equal
Definition InstrTypes.h:706
@ ICMP_UGE
unsigned greater or equal
Definition InstrTypes.h:700
@ ICMP_UGT
unsigned greater than
Definition InstrTypes.h:699
@ ICMP_SGT
signed greater than
Definition InstrTypes.h:703
@ ICMP_ULT
unsigned less than
Definition InstrTypes.h:701
@ ICMP_NE
not equal
Definition InstrTypes.h:698
@ ICMP_SGE
signed greater or equal
Definition InstrTypes.h:704
@ ICMP_ULE
unsigned less or equal
Definition InstrTypes.h:702
bool isSigned() const
Definition InstrTypes.h:930
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
Definition InstrTypes.h:827
bool isTrueWhenEqual() const
This is just a convenience.
Definition InstrTypes.h:942
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
Definition InstrTypes.h:789
bool isUnsigned() const
Definition InstrTypes.h:936
bool isRelational() const
Return true if the predicate is relational (not EQ or NE).
Definition InstrTypes.h:926
An abstraction over a floating-point predicate, and a pack of an integer predicate with samesign info...
static LLVM_ABI std::optional< CmpPredicate > getMatching(CmpPredicate A, CmpPredicate B)
Compares two CmpPredicates taking samesign into account and returns the canonicalized CmpPredicate if...
LLVM_ABI CmpInst::Predicate getPreferredSignedPredicate() const
Attempts to return a signed CmpInst::Predicate from the CmpPredicate.
CmpInst::Predicate dropSameSign() const
Drops samesign information.
Conditional Branch instruction.
Value * getCondition() const
BasicBlock * getSuccessor(unsigned i) const
static LLVM_ABI Constant * getNot(Constant *C)
static Constant * getPtrAdd(Constant *Ptr, Constant *Offset, GEPNoWrapFlags NW=GEPNoWrapFlags::none(), std::optional< ConstantRange > InRange=std::nullopt, Type *OnlyIfReduced=nullptr)
Create a getelementptr i8, ptr, offset constant expression.
Definition Constants.h:1478
static LLVM_ABI Constant * getPtrToInt(Constant *C, Type *Ty, bool OnlyIfReduced=false)
static LLVM_ABI Constant * getPtrToAddr(Constant *C, Type *Ty, bool OnlyIfReduced=false)
static LLVM_ABI Constant * getAdd(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
static LLVM_ABI Constant * getNeg(Constant *C, bool HasNSW=false)
static LLVM_ABI Constant * getTrunc(Constant *C, Type *Ty, bool OnlyIfReduced=false)
This is the shared class of boolean and integer constants.
Definition Constants.h:87
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
Definition Constants.h:219
static LLVM_ABI ConstantInt * getFalse(LLVMContext &Context)
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
Definition Constants.h:168
const APInt & getValue() const
Return the constant as an APInt value reference.
Definition Constants.h:159
static LLVM_ABI ConstantInt * getBool(LLVMContext &Context, bool V)
This class represents a range of values.
LLVM_ABI ConstantRange add(const ConstantRange &Other) const
Return a new range representing the possible values resulting from an addition of a value in this ran...
LLVM_ABI ConstantRange zextOrTrunc(uint32_t BitWidth) const
Make this range have the bit width given by BitWidth.
PreferredRangeType
If represented precisely, the result of some range operations may consist of multiple disjoint ranges...
LLVM_ABI bool getEquivalentICmp(CmpInst::Predicate &Pred, APInt &RHS) const
Set up Pred and RHS such that ConstantRange::makeExactICmpRegion(Pred, RHS) == *this.
const APInt & getLower() const
Return the lower value for this range.
LLVM_ABI ConstantRange urem(const ConstantRange &Other) const
Return a new range representing the possible values resulting from an unsigned remainder operation of...
LLVM_ABI bool isFullSet() const
Return true if this set contains all of the elements possible for this data-type.
LLVM_ABI bool icmp(CmpInst::Predicate Pred, const ConstantRange &Other) const
Does the predicate Pred hold between ranges this and Other?
LLVM_ABI bool isEmptySet() const
Return true if this set contains no members.
LLVM_ABI ConstantRange zeroExtend(uint32_t BitWidth) const
Return a new range in the specified integer type, which must be strictly larger than the current type...
LLVM_ABI bool isSignWrappedSet() const
Return true if this set wraps around the signed domain.
LLVM_ABI APInt getSignedMin() const
Return the smallest signed value contained in the ConstantRange.
LLVM_ABI bool isWrappedSet() const
Return true if this set wraps around the unsigned domain.
LLVM_ABI void print(raw_ostream &OS) const
Print out the bounds to a stream.
LLVM_ABI ConstantRange truncate(uint32_t BitWidth, unsigned NoWrapKind=0) const
Return a new range in the specified integer type, which must be strictly smaller than the current typ...
LLVM_ABI ConstantRange signExtend(uint32_t BitWidth) const
Return a new range in the specified integer type, which must be strictly larger than the current type...
const APInt & getUpper() const
Return the upper value for this range.
LLVM_ABI ConstantRange unionWith(const ConstantRange &CR, PreferredRangeType Type=Smallest) const
Return the range that results from the union of this range with another range.
static LLVM_ABI ConstantRange makeExactICmpRegion(CmpInst::Predicate Pred, const APInt &Other)
Produce the exact range such that all values in the returned range satisfy the given predicate with a...
LLVM_ABI bool contains(const APInt &Val) const
Return true if the specified value is in the set.
LLVM_ABI APInt getUnsignedMax() const
Return the largest unsigned value contained in the ConstantRange.
LLVM_ABI ConstantRange intersectWith(const ConstantRange &CR, PreferredRangeType Type=Smallest) const
Return the range that results from the intersection of this range with another range.
LLVM_ABI APInt getSignedMax() const
Return the largest signed value contained in the ConstantRange.
static ConstantRange getNonEmpty(APInt Lower, APInt Upper)
Create non-empty constant range with the given bounds.
static LLVM_ABI ConstantRange makeGuaranteedNoWrapRegion(Instruction::BinaryOps BinOp, const ConstantRange &Other, unsigned NoWrapKind)
Produce the largest range containing all X such that "X BinOp Y" is guaranteed not to wrap (overflow)...
LLVM_ABI unsigned getMinSignedBits() const
Compute the maximal number of bits needed to represent every value in this signed range.
uint32_t getBitWidth() const
Get the bit width of this ConstantRange.
LLVM_ABI ConstantRange sub(const ConstantRange &Other) const
Return a new range representing the possible values resulting from a subtraction of a value in this r...
LLVM_ABI ConstantRange sextOrTrunc(uint32_t BitWidth) const
Make this range have the bit width given by BitWidth.
static LLVM_ABI ConstantRange makeExactNoWrapRegion(Instruction::BinaryOps BinOp, const APInt &Other, unsigned NoWrapKind)
Produce the range that contains X if and only if "X BinOp Other" does not wrap.
This is an important base class in LLVM.
Definition Constant.h:43
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:64
LLVM_ABI const StructLayout * getStructLayout(StructType *Ty) const
Returns a StructLayout object, indicating the alignment of the struct, its size, and the offsets of i...
LLVM_ABI IntegerType * getIntPtrType(LLVMContext &C, unsigned AddressSpace=0) const
Returns an integer type with size at least as big as that of a pointer in the given address space.
LLVM_ABI unsigned getIndexTypeSizeInBits(Type *Ty) const
The size in bits of the index used in GEP calculation for this type.
LLVM_ABI IntegerType * getIndexType(LLVMContext &C, unsigned AddressSpace) const
Returns the type of a GEP index in AddressSpace.
TypeSize getTypeSizeInBits(Type *Ty) const
Size examples:
Definition DataLayout.h:791
ValueT lookup(const_arg_type_t< KeyT > Val) const
Return the entry for the specified key, or a default constructed value if no such entry exists.
Definition DenseMap.h:205
iterator find(const_arg_type_t< KeyT > Val)
Definition DenseMap.h:178
std::pair< iterator, bool > try_emplace(KeyT &&Key, Ts &&...Args)
Definition DenseMap.h:254
DenseMapIterator< KeyT, ValueT, KeyInfoT, BucketT > iterator
Definition DenseMap.h:74
iterator find_as(const LookupKeyT &Val)
Alternate version of find() which allows a different, and possibly less expensive,...
Definition DenseMap.h:191
size_type count(const_arg_type_t< KeyT > Val) const
Return 1 if the specified key is in the map, 0 otherwise.
Definition DenseMap.h:174
iterator end()
Definition DenseMap.h:81
bool contains(const_arg_type_t< KeyT > Val) const
Return true if the specified key is in the map, false otherwise.
Definition DenseMap.h:169
void swap(DerivedT &RHS)
Definition DenseMap.h:368
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition DenseMap.h:239
Analysis pass which computes a DominatorTree.
Definition Dominators.h:278
Legacy analysis pass which computes a DominatorTree.
Definition Dominators.h:314
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition Dominators.h:159
LLVM_ABI bool isReachableFromEntry(const Use &U) const
Provide an overload for a Use.
LLVM_ABI bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
This class describes a reference to an interned FoldingSetNodeID, which can be a useful to store node...
Definition FoldingSet.h:171
This class is used to gather all the unique data bits of a node.
Definition FoldingSet.h:208
FunctionPass(char &pid)
Definition Pass.h:316
Represents flags for the getelementptr instruction/expression.
bool hasNoUnsignedSignedWrap() const
bool hasNoUnsignedWrap() const
static GEPNoWrapFlags none()
static LLVM_ABI Type * getTypeAtIndex(Type *Ty, Value *Idx)
Return the type of the element at the given index of an indexable type.
Module * getParent()
Get the module that this global value is contained inside of...
static bool isPrivateLinkage(LinkageTypes Linkage)
static bool isInternalLinkage(LinkageTypes Linkage)
This instruction compares its operands according to the predicate given to the constructor.
CmpPredicate getCmpPredicate() const
static bool isGE(Predicate P)
Return true if the predicate is SGE or UGE.
CmpPredicate getSwappedCmpPredicate() const
static LLVM_ABI bool compare(const APInt &LHS, const APInt &RHS, ICmpInst::Predicate Pred)
Return result of LHS Pred RHS comparison.
static bool isLT(Predicate P)
Return true if the predicate is SLT or ULT.
CmpPredicate getInverseCmpPredicate() const
Predicate getNonStrictCmpPredicate() const
For example, SGT -> SGE, SLT -> SLE, ULT -> ULE, UGT -> UGE.
static bool isGT(Predicate P)
Return true if the predicate is SGT or UGT.
Predicate getFlippedSignednessPredicate() const
For example, SLT->ULT, ULT->SLT, SLE->ULE, ULE->SLE, EQ->EQ.
static CmpPredicate getInverseCmpPredicate(CmpPredicate Pred)
static bool isEquality(Predicate P)
Return true if this predicate is either EQ or NE.
bool isRelational() const
Return true if the predicate is relational (not EQ or NE).
static bool isLE(Predicate P)
Return true if the predicate is SLE or ULE.
LLVM_ABI bool hasNoUnsignedWrap() const LLVM_READONLY
Determine whether the no unsigned wrap flag is set.
LLVM_ABI bool hasNoSignedWrap() const LLVM_READONLY
Determine whether the no signed wrap flag is set.
LLVM_ABI bool isIdenticalToWhenDefined(const Instruction *I, bool IntersectAttrs=false) const LLVM_READONLY
This is like isIdenticalTo, except that it ignores the SubclassOptionalData flags,...
Class to represent integer types.
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition Type.cpp:354
A helper class to return the specified delimiter string after the first invocation of operator String...
An instruction for reading from memory.
Analysis pass that exposes the LoopInfo for a function.
Definition LoopInfo.h:587
bool contains(const LoopT *L) const
Return true if the specified loop is contained within in this loop.
BlockT * getHeader() const
unsigned getLoopDepth() const
Return the nesting level of this loop.
BlockT * getLoopPredecessor() const
If the given loop's header has exactly one unique predecessor outside the loop, return it.
LoopT * getParentLoop() const
Return the parent loop if it exists or nullptr for top level loops.
unsigned getLoopDepth(const BlockT *BB) const
Return the loop nesting level of the specified block.
LoopT * getLoopFor(const BlockT *BB) const
Return the inner most loop that BB lives in.
The legacy pass manager's analysis pass to compute loop information.
Definition LoopInfo.h:612
Represents a single loop in the control flow graph.
Definition LoopInfo.h:40
bool isLoopInvariant(const Value *V) const
Return true if the specified value is loop invariant.
Definition LoopInfo.cpp:67
Metadata node.
Definition Metadata.h:1080
A Module instance is used to store all the information related to an LLVM module.
Definition Module.h:67
unsigned getOpcode() const
Return the opcode for this Instruction or ConstantExpr.
Definition Operator.h:43
Utility class for integer operators which may exhibit overflow - Add, Sub, Mul, and Shl.
Definition Operator.h:78
bool hasNoSignedWrap() const
Test whether this operation is known to never undergo signed overflow, aka the nsw property.
Definition Operator.h:111
bool hasNoUnsignedWrap() const
Test whether this operation is known to never undergo unsigned overflow, aka the nuw property.
Definition Operator.h:105
iterator_range< const_block_iterator > blocks() const
op_range incoming_values()
Value * getIncomingValueForBlock(const BasicBlock *BB) const
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
unsigned getNumIncomingValues() const
Return the number of incoming edges.
AnalysisType & getAnalysis() const
getAnalysis<AnalysisType>() - This function is used by subclasses to get to the analysis information ...
PointerIntPair - This class implements a pair of a pointer and small integer.
static PointerType * getUnqual(Type *ElementType)
This constructs a pointer to an object of the specified type in the default address space (address sp...
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
LLVM_ABI void addPredicate(const SCEVPredicate &Pred)
Adds a new predicate.
LLVM_ABI const SCEVPredicate & getPredicate() const
LLVM_ABI const SCEV * getPredicatedSCEV(const SCEV *Expr)
Returns the rewritten SCEV for Expr in the context of the current SCEV predicate.
LLVM_ABI bool hasNoOverflow(Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags)
Returns true if we've proved that V doesn't wrap by means of a SCEV predicate.
LLVM_ABI void setNoOverflow(Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags)
Proves that V doesn't overflow by adding SCEV predicate.
LLVM_ABI void print(raw_ostream &OS, unsigned Depth) const
Print the SCEV mappings done by the Predicated Scalar Evolution.
LLVM_ABI bool areAddRecsEqualWithPreds(const SCEVAddRecExpr *AR1, const SCEVAddRecExpr *AR2) const
Check if AR1 and AR2 are equal, while taking into account Equal predicates in Preds.
LLVM_ABI PredicatedScalarEvolution(ScalarEvolution &SE, Loop &L)
LLVM_ABI const SCEVAddRecExpr * getAsAddRec(Value *V)
Attempts to produce an AddRecExpr for V by adding additional SCEV predicates.
LLVM_ABI unsigned getSmallConstantMaxTripCount()
Returns the upper bound of the loop trip count as a normal unsigned value, or 0 if the trip count is ...
LLVM_ABI const SCEV * getBackedgeTakenCount()
Get the (predicated) backedge count for the analyzed loop.
LLVM_ABI const SCEV * getSymbolicMaxBackedgeTakenCount()
Get the (predicated) symbolic max backedge count for the analyzed loop.
LLVM_ABI const SCEV * getSCEV(Value *V)
Returns the SCEV expression of V, in the context of the current SCEV predicate.
A set of analyses that are preserved following a run of a transformation pass.
Definition Analysis.h:112
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition Analysis.h:118
PreservedAnalysisChecker getChecker() const
Build a checker for this PreservedAnalyses and the specified analysis type.
Definition Analysis.h:275
constexpr bool isValid() const
Definition Register.h:112
This node represents an addition of some number of SCEVs.
This node represents a polynomial recurrence on the trip count of the specified loop.
LLVM_ABI const SCEV * evaluateAtIteration(const SCEV *It, ScalarEvolution &SE) const
Return the value of this chain of recurrences at the specified iteration number.
void setNoWrapFlags(NoWrapFlags Flags)
Set flags for a recurrence without clearing any previously set flags.
bool isAffine() const
Return true if this represents an expression A + B*x where A and B are loop invariant values.
bool isQuadratic() const
Return true if this represents an expression A + B*x + C*x^2 where A, B and C are loop invariant valu...
LLVM_ABI const SCEV * getNumIterationsInRange(const ConstantRange &Range, ScalarEvolution &SE) const
Return the number of iterations of this loop that produce values in the specified constant range.
LLVM_ABI const SCEVAddRecExpr * getPostIncExpr(ScalarEvolution &SE) const
Return an expression representing the value of this expression one iteration of the loop ahead.
SCEVUse getStepRecurrence(ScalarEvolution &SE) const
Constructs and returns the recurrence indicating how much this expression steps by.
This is the base class for unary cast operator classes.
LLVM_ABI SCEVCastExpr(const FoldingSetNodeIDRef ID, SCEVTypes SCEVTy, SCEVUse op, Type *ty)
void setNoWrapFlags(NoWrapFlags Flags)
Set flags for a non-recurrence without clearing previously set flags.
This class represents an assumption that the expression LHS Pred RHS evaluates to true,...
SCEVComparePredicate(const FoldingSetNodeIDRef ID, const ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS)
bool isAlwaysTrue() const override
Returns true if the predicate is always true.
void print(raw_ostream &OS, unsigned Depth=0) const override
Prints a textual representation of this predicate with an indentation of Depth.
bool implies(const SCEVPredicate *N, ScalarEvolution &SE) const override
Implementation of the SCEVPredicate interface.
This class represents a constant integer value.
ConstantInt * getValue() const
const APInt & getAPInt() const
This is the base class for unary integral cast operator classes.
LLVM_ABI SCEVIntegralCastExpr(const FoldingSetNodeIDRef ID, SCEVTypes SCEVTy, SCEVUse op, Type *ty)
This node is the base class min/max selections.
static enum SCEVTypes negate(enum SCEVTypes T)
This node represents multiplication of some number of SCEVs.
This node is a base class providing common functionality for n'ary operators.
ArrayRef< SCEVUse > operands() const
NoWrapFlags getNoWrapFlags(NoWrapFlags Mask=NoWrapMask) const
SCEVUse getOperand(unsigned i) const
This class represents an assumption made using SCEV expressions which can be checked at run-time.
SCEVPredicate(const SCEVPredicate &)=default
virtual bool implies(const SCEVPredicate *N, ScalarEvolution &SE) const =0
Returns true if this predicate implies N.
SCEVPredicateKind Kind
This class represents a cast from a pointer to a pointer-sized integer value, without capturing the p...
This class represents a cast from a pointer to a pointer-sized integer value.
This visitor recursively visits a SCEV expression and re-writes it.
const SCEV * visitSignExtendExpr(const SCEVSignExtendExpr *Expr)
const SCEV * visitZeroExtendExpr(const SCEVZeroExtendExpr *Expr)
const SCEV * visitSMinExpr(const SCEVSMinExpr *Expr)
const SCEV * visitUMinExpr(const SCEVUMinExpr *Expr)
This class represents a signed minimum selection.
This node is the base class for sequential/in-order min/max selections.
static SCEVTypes getEquivalentNonSequentialSCEVType(SCEVTypes Ty)
This class represents a sign extension of a small integer value to a larger integer value.
Visit all nodes in the expression tree using worklist traversal.
This class represents a truncation of an integer value to a smaller integer value.
This class represents a binary unsigned division operation.
This class represents an unsigned minimum selection.
This class represents a composition of other SCEV predicates, and is the class that most clients will...
void print(raw_ostream &OS, unsigned Depth) const override
Prints a textual representation of this predicate with an indentation of Depth.
bool implies(const SCEVPredicate *N, ScalarEvolution &SE) const override
Returns true if this predicate implies N.
SCEVUnionPredicate(ArrayRef< const SCEVPredicate * > Preds, ScalarEvolution &SE)
Union predicates don't get cached so create a dummy set ID for it.
bool isAlwaysTrue() const override
Implementation of the SCEVPredicate interface.
This means that we are dealing with an entirely unknown SCEV value, and only represent it as its LLVM...
This class represents the value of vscale, as used when defining the length of a scalable vector or r...
This class represents an assumption made on an AddRec expression.
IncrementWrapFlags
Similar to SCEV::NoWrapFlags, but with slightly different semantics for FlagNUSW.
SCEVWrapPredicate(const FoldingSetNodeIDRef ID, const SCEVAddRecExpr *AR, IncrementWrapFlags Flags)
bool implies(const SCEVPredicate *N, ScalarEvolution &SE) const override
Returns true if this predicate implies N.
static SCEVWrapPredicate::IncrementWrapFlags setFlags(SCEVWrapPredicate::IncrementWrapFlags Flags, SCEVWrapPredicate::IncrementWrapFlags OnFlags)
void print(raw_ostream &OS, unsigned Depth=0) const override
Prints a textual representation of this predicate with an indentation of Depth.
bool isAlwaysTrue() const override
Returns true if the predicate is always true.
const SCEVAddRecExpr * getExpr() const
Implementation of the SCEVPredicate interface.
static SCEVWrapPredicate::IncrementWrapFlags clearFlags(SCEVWrapPredicate::IncrementWrapFlags Flags, SCEVWrapPredicate::IncrementWrapFlags OffFlags)
Convenient IncrementWrapFlags manipulation methods.
static SCEVWrapPredicate::IncrementWrapFlags getImpliedFlags(const SCEVAddRecExpr *AR, ScalarEvolution &SE)
Returns the set of SCEVWrapPredicate no wrap flags implied by a SCEVAddRecExpr.
IncrementWrapFlags getFlags() const
Returns the set assumed no overflow flags.
This class represents a zero extension of a small integer value to a larger integer value.
This class represents an analyzed expression in the program.
unsigned short getExpressionSize() const
SCEVNoWrapFlags NoWrapFlags
LLVM_ABI bool isOne() const
Return true if the expression is a constant one.
static constexpr auto FlagNUW
LLVM_ABI void computeAndSetCanonical(ScalarEvolution &SE)
Compute and set the canonical SCEV, by constructing a SCEV with the same operands,...
LLVM_ABI bool isZero() const
Return true if the expression is a constant zero.
const SCEV * CanonicalSCEV
Pointer to the canonical version of the SCEV, i.e.
static constexpr auto FlagAnyWrap
LLVM_ABI void dump() const
This method is used for debugging.
LLVM_ABI bool isAllOnesValue() const
Return true if the expression is a constant all-ones value.
LLVM_ABI bool isNonConstantNegative() const
Return true if the specified scev is negated, but not a constant.
static constexpr auto FlagNSW
LLVM_ABI ArrayRef< SCEVUse > operands() const
Return operands of this SCEV expression.
LLVM_ABI void print(raw_ostream &OS) const
Print out the internal representation of this scalar to the specified stream.
SCEV(const FoldingSetNodeIDRef ID, SCEVTypes SCEVTy, unsigned short ExpressionSize)
SCEVTypes getSCEVType() const
static constexpr auto FlagNW
LLVM_ABI Type * getType() const
Return the LLVM type of this SCEV expression.
Analysis pass that exposes the ScalarEvolution for a function.
LLVM_ABI ScalarEvolution run(Function &F, FunctionAnalysisManager &AM)
LLVM_ABI PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
LLVM_ABI PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
void print(raw_ostream &OS, const Module *=nullptr) const override
print - Print out the internal state of the pass.
bool runOnFunction(Function &F) override
runOnFunction - Virtual method overriden by subclasses to do the per-function processing of the pass.
void releaseMemory() override
releaseMemory() - This member can be implemented by a pass if it wants to be able to release its memo...
void verifyAnalysis() const override
verifyAnalysis() - This member can be implemented by a analysis pass to check state of analysis infor...
static LLVM_ABI LoopGuards collect(const Loop *L, ScalarEvolution &SE)
Collect rewrite map for loop guards for loop L, together with flags indicating if NUW and NSW can be ...
LLVM_ABI const SCEV * rewrite(const SCEV *Expr) const
Try to apply the collected loop guards to Expr.
The main scalar evolution driver.
LLVM_ABI const SCEV * getUDivExpr(SCEVUse LHS, SCEVUse RHS)
Get a canonical unsigned division expression, or something simpler if possible.
const SCEV * getConstantMaxBackedgeTakenCount(const Loop *L)
When successful, this returns a SCEVConstant that is greater than or equal to (i.e.
static bool hasFlags(SCEV::NoWrapFlags Flags, SCEV::NoWrapFlags TestFlags)
const DataLayout & getDataLayout() const
Return the DataLayout associated with the module this SCEV instance is operating on.
LLVM_ABI bool isKnownNonNegative(const SCEV *S)
Test if the given expression is known to be non-negative.
LLVM_ABI bool isKnownOnEveryIteration(CmpPredicate Pred, const SCEVAddRecExpr *LHS, const SCEV *RHS)
Test if the condition described by Pred, LHS, RHS is known to be true on every iteration of the loop ...
LLVM_ABI const SCEV * getNegativeSCEV(const SCEV *V, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap)
Return the SCEV object corresponding to -V.
LLVM_ABI std::optional< LoopInvariantPredicate > getLoopInvariantExitCondDuringFirstIterationsImpl(CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS, const Loop *L, const Instruction *CtxI, const SCEV *MaxIter)
LLVM_ABI const SCEV * getUDivCeilSCEV(const SCEV *N, const SCEV *D)
Compute ceil(N / D).
LLVM_ABI std::optional< LoopInvariantPredicate > getLoopInvariantExitCondDuringFirstIterations(CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS, const Loop *L, const Instruction *CtxI, const SCEV *MaxIter)
If the result of the predicate LHS Pred RHS is loop invariant with respect to L at given Context duri...
LLVM_ABI Type * getWiderType(Type *Ty1, Type *Ty2) const
LLVM_ABI const SCEV * getAbsExpr(const SCEV *Op, bool IsNSW)
LLVM_ABI bool isKnownNonPositive(const SCEV *S)
Test if the given expression is known to be non-positive.
LLVM_ABI bool isKnownNegative(const SCEV *S)
Test if the given expression is known to be negative.
LLVM_ABI const SCEV * getPredicatedConstantMaxBackedgeTakenCount(const Loop *L, SmallVectorImpl< const SCEVPredicate * > &Predicates)
Similar to getConstantMaxBackedgeTakenCount, except it will add a set of SCEV predicates to Predicate...
LLVM_ABI const SCEV * removePointerBase(const SCEV *S)
Compute an expression equivalent to S - getPointerBase(S).
LLVM_ABI bool isLoopEntryGuardedByCond(const Loop *L, CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS)
Test whether entry to the loop is protected by a conditional between LHS and RHS.
LLVM_ABI bool isKnownNonZero(const SCEV *S)
Test if the given expression is known to be non-zero.
LLVM_ABI const SCEV * getURemExpr(SCEVUse LHS, SCEVUse RHS)
Represents an unsigned remainder expression based on unsigned division.
LLVM_ABI const SCEV * getSCEVAtScope(const SCEV *S, const Loop *L)
Return a SCEV expression for the specified value at the specified scope in the program.
LLVM_ABI const SCEV * getBackedgeTakenCount(const Loop *L, ExitCountKind Kind=Exact)
If the specified loop has a predictable backedge-taken count, return it, otherwise return a SCEVCould...
LLVM_ABI const SCEV * getSMinExpr(SCEVUse LHS, SCEVUse RHS)
LLVM_ABI void setNoWrapFlags(SCEVAddRecExpr *AddRec, SCEV::NoWrapFlags Flags)
Update no-wrap flags of an AddRec.
LLVM_ABI const SCEV * getUMaxFromMismatchedTypes(const SCEV *LHS, const SCEV *RHS)
Promote the operands to the wider of the types using zero-extension, and then perform a umax operatio...
const SCEV * getZero(Type *Ty)
Return a SCEV for the constant 0 of a specific type.
LLVM_ABI bool willNotOverflow(Instruction::BinaryOps BinOp, bool Signed, const SCEV *LHS, const SCEV *RHS, const Instruction *CtxI=nullptr)
Is operation BinOp between LHS and RHS provably does not have a signed/unsigned overflow (Signed)?
LLVM_ABI ExitLimit computeExitLimitFromCond(const Loop *L, Value *ExitCond, bool ExitIfTrue, bool ControlsOnlyExit, bool AllowPredicates=false)
Compute the number of times the backedge of the specified loop will execute if its exit condition wer...
LLVM_ABI const SCEV * getZeroExtendExprImpl(const SCEV *Op, Type *Ty, unsigned Depth=0)
LLVM_ABI const SCEV * getMinMaxExpr(SCEVTypes Kind, SmallVectorImpl< SCEVUse > &Operands)
LLVM_ABI const SCEVPredicate * getEqualPredicate(const SCEV *LHS, const SCEV *RHS)
LLVM_ABI unsigned getSmallConstantTripMultiple(const Loop *L, const SCEV *ExitCount)
Returns the largest constant divisor of the trip count as a normal unsigned value,...
LLVM_ABI uint64_t getTypeSizeInBits(Type *Ty) const
Return the size in bits of the specified type, for which isSCEVable must return true.
LLVM_ABI const SCEV * getConstant(ConstantInt *V)
LLVM_ABI const SCEV * getPredicatedBackedgeTakenCount(const Loop *L, SmallVectorImpl< const SCEVPredicate * > &Predicates)
Similar to getBackedgeTakenCount, except it will add a set of SCEV predicates to Predicates that are ...
LLVM_ABI const SCEV * getSCEV(Value *V)
Return a SCEV expression for the full generality of the specified expression.
LLVM_ABI const SCEV * getMinusSCEV(SCEVUse LHS, SCEVUse RHS, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Return LHS-RHS.
ConstantRange getSignedRange(const SCEV *S)
Determine the signed range for a particular SCEV.
LLVM_ABI const SCEV * getAddRecExpr(SCEVUse Start, SCEVUse Step, const Loop *L, SCEV::NoWrapFlags Flags)
Get an add recurrence expression for the specified loop.
LLVM_ABI const SCEV * getNoopOrSignExtend(const SCEV *V, Type *Ty)
Return a SCEV corresponding to a conversion of the input value to the specified type.
bool loopHasNoAbnormalExits(const Loop *L)
Return true if the loop has no abnormal exits.
LLVM_ABI const SCEV * getTripCountFromExitCount(const SCEV *ExitCount)
A version of getTripCountFromExitCount below which always picks an evaluation type which can not resu...
LLVM_ABI ScalarEvolution(Function &F, TargetLibraryInfo &TLI, AssumptionCache &AC, DominatorTree &DT, LoopInfo &LI)
const SCEV * getOne(Type *Ty)
Return a SCEV for the constant 1 of a specific type.
LLVM_ABI const SCEV * getTruncateOrNoop(const SCEV *V, Type *Ty)
Return a SCEV corresponding to a conversion of the input value to the specified type.
LLVM_ABI const SCEV * getLosslessPtrToIntExpr(const SCEV *Op)
LLVM_ABI const SCEV * getCastExpr(SCEVTypes Kind, const SCEV *Op, Type *Ty)
LLVM_ABI const SCEV * getSequentialMinMaxExpr(SCEVTypes Kind, SmallVectorImpl< SCEVUse > &Operands)
LLVM_ABI std::optional< bool > evaluatePredicateAt(CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS, const Instruction *CtxI)
Check whether the condition described by Pred, LHS, and RHS is true or false in the given Context.
LLVM_ABI unsigned getSmallConstantMaxTripCount(const Loop *L, SmallVectorImpl< const SCEVPredicate * > *Predicates=nullptr)
Returns the upper bound of the loop trip count as a normal unsigned value.
LLVM_ABI const SCEV * getPtrToIntExpr(const SCEV *Op, Type *Ty)
LLVM_ABI bool isBackedgeTakenCountMaxOrZero(const Loop *L)
Return true if the backedge taken count is either the value returned by getConstantMaxBackedgeTakenCo...
LLVM_ABI void forgetLoop(const Loop *L)
This method should be called by the client when it has changed a loop in a way that may effect Scalar...
LLVM_ABI bool isLoopInvariant(const SCEV *S, const Loop *L)
Return true if the value of the given SCEV is unchanging in the specified loop.
LLVM_ABI bool isKnownPositive(const SCEV *S)
Test if the given expression is known to be positive.
LLVM_ABI bool SimplifyICmpOperands(CmpPredicate &Pred, SCEVUse &LHS, SCEVUse &RHS, unsigned Depth=0)
Simplify LHS and RHS in a comparison with predicate Pred.
APInt getUnsignedRangeMin(const SCEV *S)
Determine the min of the unsigned range for a particular SCEV.
LLVM_ABI const SCEV * getOffsetOfExpr(Type *IntTy, StructType *STy, unsigned FieldNo)
Return an expression for offsetof on the given field with type IntTy.
LLVM_ABI LoopDisposition getLoopDisposition(const SCEV *S, const Loop *L)
Return the "disposition" of the given SCEV with respect to the given loop.
LLVM_ABI bool containsAddRecurrence(const SCEV *S)
Return true if the SCEV is a scAddRecExpr or it contains scAddRecExpr.
LLVM_ABI const SCEV * getSignExtendExprImpl(const SCEV *Op, Type *Ty, unsigned Depth=0)
LLVM_ABI bool hasOperand(const SCEV *S, const SCEV *Op) const
Test whether the given SCEV has Op as a direct or indirect operand.
LLVM_ABI const SCEV * getZeroExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth=0)
LLVM_ABI bool isSCEVable(Type *Ty) const
Test if values of the given type are analyzable within the SCEV framework.
LLVM_ABI Type * getEffectiveSCEVType(Type *Ty) const
Return a type with the same bitwidth as the given type and which represents how SCEV will treat the g...
LLVM_ABI const SCEVPredicate * getComparePredicate(ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS)
LLVM_ABI bool haveSameSign(const SCEV *S1, const SCEV *S2)
Return true if we know that S1 and S2 must have the same sign.
LLVM_ABI const SCEV * getNotSCEV(const SCEV *V)
Return the SCEV object corresponding to ~V.
LLVM_ABI const SCEV * getElementCount(Type *Ty, ElementCount EC, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap)
LLVM_ABI bool instructionCouldExistWithOperands(const SCEV *A, const SCEV *B)
Return true if there exists a point in the program at which both A and B could be operands to the sam...
ConstantRange getUnsignedRange(const SCEV *S)
Determine the unsigned range for a particular SCEV.
LLVM_ABI void print(raw_ostream &OS) const
LLVM_ABI const SCEV * getPredicatedExitCount(const Loop *L, const BasicBlock *ExitingBlock, SmallVectorImpl< const SCEVPredicate * > *Predicates, ExitCountKind Kind=Exact)
Same as above except this uses the predicated backedge taken info and may require predicates.
static SCEV::NoWrapFlags clearFlags(SCEV::NoWrapFlags Flags, SCEV::NoWrapFlags OffFlags)
LLVM_ABI void forgetTopmostLoop(const Loop *L)
LLVM_ABI void forgetValue(Value *V)
This method should be called by the client when it has changed a value in a way that may effect its v...
APInt getSignedRangeMin(const SCEV *S)
Determine the min of the signed range for a particular SCEV.
LLVM_ABI const SCEV * getNoopOrAnyExtend(const SCEV *V, Type *Ty)
Return a SCEV corresponding to a conversion of the input value to the specified type.
LLVM_ABI void forgetBlockAndLoopDispositions(Value *V=nullptr)
Called when the client has changed the disposition of values in a loop or block.
LLVM_ABI const SCEV * getTruncateExpr(const SCEV *Op, Type *Ty, unsigned Depth=0)
LLVM_ABI const SCEV * getUMaxExpr(SCEVUse LHS, SCEVUse RHS)
static SCEV::NoWrapFlags maskFlags(SCEV::NoWrapFlags Flags, SCEV::NoWrapFlags Mask)
Convenient NoWrapFlags manipulation.
LLVM_ABI std::optional< LoopInvariantPredicate > getLoopInvariantPredicate(CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS, const Loop *L, const Instruction *CtxI=nullptr)
If the result of the predicate LHS Pred RHS is loop invariant with respect to L, return a LoopInvaria...
LLVM_ABI const SCEV * getStoreSizeOfExpr(Type *IntTy, Type *StoreTy)
Return an expression for the store size of StoreTy that is type IntTy.
LLVM_ABI const SCEVPredicate * getWrapPredicate(const SCEVAddRecExpr *AR, SCEVWrapPredicate::IncrementWrapFlags AddedFlags)
LLVM_ABI bool isLoopBackedgeGuardedByCond(const Loop *L, CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS)
Test whether the backedge of the loop is protected by a conditional between LHS and RHS.
LLVM_ABI APInt getNonZeroConstantMultiple(const SCEV *S)
const SCEV * getMinusOne(Type *Ty)
Return a SCEV for the constant -1 of a specific type.
static SCEV::NoWrapFlags setFlags(SCEV::NoWrapFlags Flags, SCEV::NoWrapFlags OnFlags)
LLVM_ABI bool hasLoopInvariantBackedgeTakenCount(const Loop *L)
Return true if the specified loop has an analyzable loop-invariant backedge-taken count.
LLVM_ABI BlockDisposition getBlockDisposition(const SCEV *S, const BasicBlock *BB)
Return the "disposition" of the given SCEV with respect to the given block.
LLVM_ABI const SCEV * getNoopOrZeroExtend(const SCEV *V, Type *Ty)
Return a SCEV corresponding to a conversion of the input value to the specified type.
LLVM_ABI bool invalidate(Function &F, const PreservedAnalyses &PA, FunctionAnalysisManager::Invalidator &Inv)
LLVM_ABI const SCEV * getUMinFromMismatchedTypes(const SCEV *LHS, const SCEV *RHS, bool Sequential=false)
Promote the operands to the wider of the types using zero-extension, and then perform a umin operatio...
LLVM_ABI bool loopIsFiniteByAssumption(const Loop *L)
Return true if this loop is finite by assumption.
LLVM_ABI const SCEV * getExistingSCEV(Value *V)
Return an existing SCEV for V if there is one, otherwise return nullptr.
LLVM_ABI APInt getConstantMultiple(const SCEV *S, const Instruction *CtxI=nullptr)
Returns the max constant multiple of S.
LoopDisposition
An enum describing the relationship between a SCEV and a loop.
@ LoopComputable
The SCEV varies predictably with the loop.
@ LoopVariant
The SCEV is loop-variant (unknown).
@ LoopInvariant
The SCEV is loop-invariant.
LLVM_ABI bool isKnownMultipleOf(const SCEV *S, uint64_t M, SmallVectorImpl< const SCEVPredicate * > &Assumptions)
Check that S is a multiple of M.
LLVM_ABI const SCEV * getAnyExtendExpr(const SCEV *Op, Type *Ty)
getAnyExtendExpr - Return a SCEV for the given operand extended with unspecified bits out to the give...
LLVM_ABI bool isKnownToBeAPowerOfTwo(const SCEV *S, bool OrZero=false, bool OrNegative=false)
Test if the given expression is known to be a power of 2.
LLVM_ABI std::optional< SCEV::NoWrapFlags > getStrengthenedNoWrapFlagsFromBinOp(const OverflowingBinaryOperator *OBO)
Parse NSW/NUW flags from add/sub/mul IR binary operation Op into SCEV no-wrap flags,...
LLVM_ABI void forgetLcssaPhiWithNewPredecessor(Loop *L, PHINode *V)
Forget LCSSA phi node V of loop L to which a new predecessor was added, such that it may no longer be...
LLVM_ABI bool containsUndefs(const SCEV *S) const
Return true if the SCEV expression contains an undef value.
LLVM_ABI std::optional< MonotonicPredicateType > getMonotonicPredicateType(const SCEVAddRecExpr *LHS, ICmpInst::Predicate Pred)
If, for all loop invariant X, the predicate "LHS `Pred` X" is monotonically increasing or decreasing,...
LLVM_ABI const SCEV * getCouldNotCompute()
LLVM_ABI const SCEV * getMulExpr(SmallVectorImpl< SCEVUse > &Ops, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Get a canonical multiply expression, or something simpler if possible.
LLVM_ABI bool isAvailableAtLoopEntry(const SCEV *S, const Loop *L)
Determine if the SCEV can be evaluated at loop's entry.
LLVM_ABI uint32_t getMinTrailingZeros(const SCEV *S, const Instruction *CtxI=nullptr)
Determine the minimum number of zero bits that S is guaranteed to end in (at every loop iteration).
BlockDisposition
An enum describing the relationship between a SCEV and a basic block.
@ DominatesBlock
The SCEV dominates the block.
@ ProperlyDominatesBlock
The SCEV properly dominates the block.
@ DoesNotDominateBlock
The SCEV does not dominate the block.
LLVM_ABI const SCEV * getExitCount(const Loop *L, const BasicBlock *ExitingBlock, ExitCountKind Kind=Exact)
Return the number of times the backedge executes before the given exit would be taken; if not exactly...
LLVM_ABI const SCEV * getSignExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth=0)
LLVM_ABI void getPoisonGeneratingValues(SmallPtrSetImpl< const Value * > &Result, const SCEV *S)
Return the set of Values that, if poison, will definitively result in S being poison as well.
LLVM_ABI void forgetLoopDispositions()
Called when the client has changed the disposition of values in this loop.
LLVM_ABI const SCEV * getVScale(Type *Ty)
LLVM_ABI unsigned getSmallConstantTripCount(const Loop *L)
Returns the exact trip count of the loop if we can compute it, and the result is a small constant.
LLVM_ABI bool hasComputableLoopEvolution(const SCEV *S, const Loop *L)
Return true if the given SCEV changes value in a known way in the specified loop.
LLVM_ABI const SCEV * getPointerBase(const SCEV *V)
Transitively follow the chain of pointer-type operands until reaching a SCEV that does not have a sin...
LLVM_ABI void forgetAllLoops()
LLVM_ABI bool dominates(const SCEV *S, const BasicBlock *BB)
Return true if elements that makes up the given SCEV dominate the specified basic block.
APInt getUnsignedRangeMax(const SCEV *S)
Determine the max of the unsigned range for a particular SCEV.
LLVM_ABI const SCEV * getAddExpr(SmallVectorImpl< SCEVUse > &Ops, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Get a canonical add expression, or something simpler if possible.
ExitCountKind
The terms "backedge taken count" and "exit count" are used interchangeably to refer to the number of ...
@ SymbolicMaximum
An expression which provides an upper bound on the exact trip count.
@ ConstantMaximum
A constant which provides an upper bound on the exact trip count.
@ Exact
An expression exactly describing the number of times the backedge has executed when a loop is exited.
LLVM_ABI bool isKnownPredicate(CmpPredicate Pred, SCEVUse LHS, SCEVUse RHS)
Test if the given expression is known to satisfy the condition described by Pred, LHS,...
LLVM_ABI const SCEV * applyLoopGuards(const SCEV *Expr, const Loop *L)
Try to apply information from loop guards for L to Expr.
LLVM_ABI const SCEV * getPtrToAddrExpr(const SCEV *Op)
LLVM_ABI const SCEVAddRecExpr * convertSCEVToAddRecWithPredicates(const SCEV *S, const Loop *L, SmallVectorImpl< const SCEVPredicate * > &Preds)
Tries to convert the S expression to an AddRec expression, adding additional predicates to Preds as r...
LLVM_ABI const SCEV * getSMaxExpr(SCEVUse LHS, SCEVUse RHS)
LLVM_ABI const SCEV * getElementSize(Instruction *Inst)
Return the size of an element read or written by Inst.
LLVM_ABI const SCEV * getSizeOfExpr(Type *IntTy, TypeSize Size)
Return an expression for a TypeSize.
LLVM_ABI std::optional< bool > evaluatePredicate(CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS)
Check whether the condition described by Pred, LHS, and RHS is true or false.
LLVM_ABI const SCEV * getUnknown(Value *V)
LLVM_ABI std::optional< std::pair< const SCEV *, SmallVector< const SCEVPredicate *, 3 > > > createAddRecFromPHIWithCasts(const SCEVUnknown *SymbolicPHI)
Checks if SymbolicPHI can be rewritten as an AddRecExpr under some Predicates.
LLVM_ABI const SCEV * getTruncateOrZeroExtend(const SCEV *V, Type *Ty, unsigned Depth=0)
Return a SCEV corresponding to a conversion of the input value to the specified type.
LLVM_ABI bool isKnownViaInduction(CmpPredicate Pred, SCEVUse LHS, SCEVUse RHS)
We'd like to check the predicate on every iteration of the most dominated loop between loops used in ...
LLVM_ABI std::optional< APInt > computeConstantDifference(const SCEV *LHS, const SCEV *RHS)
Compute LHS - RHS and returns the result as an APInt if it is a constant, and std::nullopt if it isn'...
LLVM_ABI bool properlyDominates(const SCEV *S, const BasicBlock *BB)
Return true if elements that makes up the given SCEV properly dominate the specified basic block.
LLVM_ABI const SCEV * getUDivExactExpr(SCEVUse LHS, SCEVUse RHS)
Get a canonical unsigned division expression, or something simpler if possible.
LLVM_ABI const SCEV * rewriteUsingPredicate(const SCEV *S, const Loop *L, const SCEVPredicate &A)
Re-writes the SCEV according to the Predicates in A.
LLVM_ABI std::pair< const SCEV *, const SCEV * > SplitIntoInitAndPostInc(const Loop *L, const SCEV *S)
Splits SCEV expression S into two SCEVs.
LLVM_ABI bool canReuseInstruction(const SCEV *S, Instruction *I, SmallVectorImpl< Instruction * > &DropPoisonGeneratingInsts)
Check whether it is poison-safe to represent the expression S using the instruction I.
LLVM_ABI bool isKnownPredicateAt(CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS, const Instruction *CtxI)
Test if the given expression is known to satisfy the condition described by Pred, LHS,...
LLVM_ABI const SCEV * getPredicatedSymbolicMaxBackedgeTakenCount(const Loop *L, SmallVectorImpl< const SCEVPredicate * > &Predicates)
Similar to getSymbolicMaxBackedgeTakenCount, except it will add a set of SCEV predicates to Predicate...
LLVM_ABI const SCEV * getGEPExpr(GEPOperator *GEP, ArrayRef< SCEVUse > IndexExprs)
Returns an expression for a GEP.
LLVM_ABI const SCEV * getUMinExpr(SCEVUse LHS, SCEVUse RHS, bool Sequential=false)
LLVM_ABI void registerUser(const SCEV *User, ArrayRef< const SCEV * > Ops)
Notify this ScalarEvolution that User directly uses SCEVs in Ops.
LLVM_ABI bool isBasicBlockEntryGuardedByCond(const BasicBlock *BB, CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS)
Test whether entry to the basic block is protected by a conditional between LHS and RHS.
LLVM_ABI const SCEV * getTruncateOrSignExtend(const SCEV *V, Type *Ty, unsigned Depth=0)
Return a SCEV corresponding to a conversion of the input value to the specified type.
LLVM_ABI bool containsErasedValue(const SCEV *S) const
Return true if the SCEV expression contains a Value that has been optimised out and is now a nullptr.
const SCEV * getSymbolicMaxBackedgeTakenCount(const Loop *L)
When successful, this returns a SCEV that is greater than or equal to (i.e.
APInt getSignedRangeMax(const SCEV *S)
Determine the max of the signed range for a particular SCEV.
LLVM_ABI void verify() const
LLVMContext & getContext() const
Implements a dense probed hash-table based set with some number of buckets stored inline.
Definition DenseSet.h:291
size_type size() const
Definition SmallPtrSet.h:99
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
bool contains(ConstPtrType Ptr) const
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void reserve(size_type N)
iterator erase(const_iterator CI)
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
iterator insert(iterator I, T &&Elt)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
Represent a constant reference to a string, i.e.
Definition StringRef.h:56
Used to lazily calculate structure layout information for a target machine, based on the DataLayout s...
Definition DataLayout.h:743
TypeSize getElementOffset(unsigned Idx) const
Definition DataLayout.h:774
TypeSize getSizeInBits() const
Definition DataLayout.h:754
Class to represent struct types.
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:46
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
Definition Type.cpp:313
bool isPointerTy() const
True if this is an instance of PointerType.
Definition Type.h:284
LLVM_ABI TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
Definition Type.cpp:201
static LLVM_ABI IntegerType * getInt1Ty(LLVMContext &C)
Definition Type.cpp:310
bool isIntOrPtrTy() const
Return true if this is an integer type or a pointer type.
Definition Type.h:272
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition Type.h:257
static LLVM_ABI IntegerType * getIntNTy(LLVMContext &C, unsigned N)
Definition Type.cpp:317
A Use represents the edge between a Value definition and its users.
Definition Use.h:35
op_range operands()
Definition User.h:267
Use & Op()
Definition User.h:171
Value * getOperand(unsigned i) const
Definition User.h:207
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:255
LLVMContext & getContext() const
All values hold a context through their type.
Definition Value.h:258
unsigned getValueID() const
Return an ID for the concrete type of this object.
Definition Value.h:543
LLVM_ABI void printAsOperand(raw_ostream &O, bool PrintType=true, const Module *M=nullptr) const
Print the name of this Value out to the specified raw_ostream.
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition Value.cpp:318
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition TypeSize.h:168
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
Definition ilist_node.h:34
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition raw_ostream.h:53
raw_ostream & indent(unsigned NumSpaces)
indent - Insert 'NumSpaces' spaces.
Changed
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
const APInt & smin(const APInt &A, const APInt &B)
Determine the smaller of two APInts considered to be signed.
Definition APInt.h:2277
const APInt & smax(const APInt &A, const APInt &B)
Determine the larger of two APInts considered to be signed.
Definition APInt.h:2282
const APInt & umin(const APInt &A, const APInt &B)
Determine the smaller of two APInts considered to be unsigned.
Definition APInt.h:2287
LLVM_ABI std::optional< APInt > SolveQuadraticEquationWrap(APInt A, APInt B, APInt C, unsigned RangeWidth)
Let q(n) = An^2 + Bn + C, and BW = bit width of the value range (e.g.
Definition APInt.cpp:2864
const APInt & umax(const APInt &A, const APInt &B)
Determine the larger of two APInts considered to be unsigned.
Definition APInt.h:2292
LLVM_ABI APInt GreatestCommonDivisor(APInt A, APInt B)
Compute GCD of two unsigned APInt values.
Definition APInt.cpp:830
constexpr bool any(E Val)
@ Entry
Definition COFF.h:862
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
int getMinValue(MCInstrInfo const &MCII, MCInst const &MCI)
Return the minimum value of an extendable operand.
@ BasicBlock
Various leaf nodes.
Definition ISDOpcodes.h:81
LLVM_ABI Function * getDeclarationIfExists(const Module *M, ID id)
Look up the Function declaration of the intrinsic id in the Module M and return it if it exists.
Predicate
Predicate - These are "(BI << 5) | BO" for various predicates.
match_combine_or< Ty... > m_CombineOr(const Ty &...Ps)
Combine pattern matchers matching any of Ps patterns.
BinaryOp_match< LHS, RHS, Instruction::AShr > m_AShr(const LHS &L, const RHS &R)
ap_match< APInt > m_APInt(const APInt *&Res)
Match a ConstantInt or splatted ConstantVector, binding the specified pointer to the contained APInt.
bool match(Val *V, const Pattern &P)
IntrinsicID_match m_Intrinsic()
Match intrinsic calls like this: m_Intrinsic<Intrinsic::fabs>(m_Value(X))
ThreeOps_match< Cond, LHS, RHS, Instruction::Select > m_Select(const Cond &C, const LHS &L, const RHS &R)
Matches SelectInst.
auto m_BasicBlock()
Match an arbitrary basic block value and ignore it.
ExtractValue_match< Ind, Val_t > m_ExtractValue(const Val_t &V)
Match a single index ExtractValue instruction.
auto m_Value()
Match an arbitrary value and ignore it.
auto m_LogicalOr()
Matches L || R where L and R are arbitrary values.
match_bind< WithOverflowInst > m_WithOverflowInst(WithOverflowInst *&I)
Match a with overflow intrinsic, capturing it if we match.
BinaryOp_match< LHS, RHS, Instruction::SDiv > m_SDiv(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::LShr > m_LShr(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::Shl > m_Shl(const LHS &L, const RHS &R)
auto m_LogicalAnd()
Matches L && R where L and R are arbitrary values.
brc_match< Cond_t, match_bind< BasicBlock >, match_bind< BasicBlock > > m_Br(const Cond_t &C, BasicBlock *&T, BasicBlock *&F)
auto m_ConstantInt()
Match an arbitrary ConstantInt and ignore it.
bind_cst_ty m_scev_APInt(const APInt *&C)
Match an SCEV constant and bind it to an APInt.
cst_pred_ty< is_all_ones > m_scev_AllOnes()
Match an integer with all bits set.
SCEVUnaryExpr_match< SCEVZeroExtendExpr, Op0_t > m_scev_ZExt(const Op0_t &Op0)
is_undef_or_poison m_scev_UndefOrPoison()
Match an SCEVUnknown wrapping undef or poison.
cst_pred_ty< is_one > m_scev_One()
Match an integer 1.
specificloop_ty m_SpecificLoop(const Loop *L)
SCEVUnaryExpr_match< SCEVSignExtendExpr, Op0_t > m_scev_SExt(const Op0_t &Op0)
match_bind< const SCEVMulExpr > m_scev_Mul(const SCEVMulExpr *&V)
cst_pred_ty< is_zero > m_scev_Zero()
Match an integer 0.
SCEVUnaryExpr_match< SCEVTruncateExpr, Op0_t > m_scev_Trunc(const Op0_t &Op0)
bool match(const SCEV *S, const Pattern &P)
SCEVBinaryExpr_match< SCEVUDivExpr, Op0_t, Op1_t > m_scev_UDiv(const Op0_t &Op0, const Op1_t &Op1)
specificscev_ty m_scev_Specific(const SCEV *S)
Match if we have a specific specified SCEV.
SCEVAffineAddRec_match< Op0_t, Op1_t, match_isa< const Loop > > m_scev_AffineAddRec(const Op0_t &Op0, const Op1_t &Op1)
match_bind< const SCEVUnknown > m_SCEVUnknown(const SCEVUnknown *&V)
SCEVBinaryExpr_match< SCEVMulExpr, Op0_t, Op1_t, SCEV::FlagNUW, true > m_scev_c_NUWMul(const Op0_t &Op0, const Op1_t &Op1)
match_bind< const SCEVAddExpr > m_scev_Add(const SCEVAddExpr *&V)
SCEVBinaryExpr_match< SCEVMulExpr, Op0_t, Op1_t, SCEV::FlagAnyWrap, true > m_scev_c_Mul(const Op0_t &Op0, const Op1_t &Op1)
SCEVBinaryExpr_match< SCEVSMaxExpr, Op0_t, Op1_t > m_scev_SMax(const Op0_t &Op0, const Op1_t &Op1)
SCEVURem_match< Op0_t, Op1_t > m_scev_URem(Op0_t LHS, Op1_t RHS, ScalarEvolution &SE)
Match the mathematical pattern A - (A / B) * B, where A and B can be arbitrary expressions.
@ Valid
The data is already valid.
initializer< Ty > init(const Ty &Val)
LocationClass< Ty > location(Ty &L)
@ Switch
The "resume-switch" lowering, where there are separate resume and destroy functions that are shared b...
Definition CoroShape.h:31
constexpr double e
NodeAddr< PhiNode * > Phi
Definition RDFGraph.h:390
friend class Instruction
Iterator for Instructions in a `BasicBlock.
Definition BasicBlock.h:73
This is an optimization pass for GlobalISel generic memory operations.
void visitAll(const SCEV *Root, SV &Visitor)
Use SCEVTraversal to visit all nodes in the given expression tree.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition STLExtras.h:315
@ Offset
Definition DWP.cpp:557
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
LLVM_ATTRIBUTE_ALWAYS_INLINE DynamicAPInt gcd(const DynamicAPInt &A, const DynamicAPInt &B)
void stable_sort(R &&Range)
Definition STLExtras.h:2115
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1738
SaveAndRestore(T &) -> SaveAndRestore< T >
Printable print(const GCNRegPressure &RP, const GCNSubtarget *ST=nullptr, unsigned DynamicVGPRBlockSize=0)
LLVM_ABI bool canCreatePoison(const Operator *Op, bool ConsiderFlagsAndMetadata=true)
LLVM_ABI bool mustTriggerUB(const Instruction *I, const SmallPtrSetImpl< const Value * > &KnownPoison)
Return true if the given instruction must trigger undefined behavior when I is executed with any oper...
LLVM_ABI bool canConstantFoldCallTo(const CallBase *Call, const Function *F)
canConstantFoldCallTo - Return true if its even possible to fold a call to the specified function.
InterleavedRange< Range > interleaved(const Range &R, StringRef Separator=", ", StringRef Prefix="", StringRef Suffix="")
Output range R as a sequence of interleaved elements.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
LLVM_ABI bool verifyFunction(const Function &F, raw_ostream *OS=nullptr)
Check a function for errors, useful for use when debugging a pass.
auto successors(const MachineBasicBlock *BB)
scope_exit(Callable) -> scope_exit< Callable >
constexpr from_range_t from_range
auto dyn_cast_if_present(const Y &Val)
dyn_cast_if_present<X> - Functionally identical to dyn_cast, except that a null (or none in the case ...
Definition Casting.h:732
bool set_is_subset(const S1Ty &S1, const S2Ty &S2)
set_is_subset(A, B) - Return true iff A in B
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition STLExtras.h:2207
constexpr bool isUIntN(unsigned N, uint64_t x)
Checks if an unsigned integer fits into the given (dynamic) bit width.
Definition MathExtras.h:243
LLVM_ABI Constant * ConstantFoldCompareInstOperands(unsigned Predicate, Constant *LHS, Constant *RHS, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, const Instruction *I=nullptr)
Attempt to constant fold a compare instruction (icmp/fcmp) with the specified operands.
void * PointerTy
LLVM_ABI bool VerifySCEV
auto uninitialized_copy(R &&Src, IterTy Dst)
Definition STLExtras.h:2110
bool isa_and_nonnull(const Y &Val)
Definition Casting.h:676
LLVM_ABI ConstantRange getConstantRangeFromMetadata(const MDNode &RangeMD)
Parse out a conservative ConstantRange from !range metadata.
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
Definition bit.h:204
LLVM_ABI Value * simplifyInstruction(Instruction *I, const SimplifyQuery &Q)
See if we can compute a simplified version of this instruction.
LLVM_ABI bool isOverflowIntrinsicNoWrap(const WithOverflowInst *WO, const DominatorTree &DT)
Returns true if the arithmetic part of the WO 's result is used only along the paths control dependen...
DomTreeNodeBase< BasicBlock > DomTreeNode
Definition Dominators.h:94
LLVM_ABI bool matchSimpleRecurrence(const PHINode *P, BinaryOperator *&BO, Value *&Start, Value *&Step)
Attempt to match a simple first order recurrence cycle of the form: iv = phi Ty [Start,...
auto dyn_cast_or_null(const Y &Val)
Definition Casting.h:753
void erase(Container &C, ValueType V)
Wrapper function to remove a value from a container:
Definition STLExtras.h:2199
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1745
iterator_range< pointee_iterator< WrappedIteratorT > > make_pointee_range(RangeT &&Range)
Definition iterator.h:341
auto reverse(ContainerTy &&C)
Definition STLExtras.h:407
LLVM_ABI bool isMustProgress(const Loop *L)
Return true if this loop can be assumed to make progress.
LLVM_ABI bool impliesPoison(const Value *ValAssumedPoison, const Value *V)
Return true if V is poison given that ValAssumedPoison is already poison.
LLVM_ABI bool isFinite(const Loop *L)
Return true if this loop can be assumed to run for a finite number of iterations.
LLVM_ABI void computeKnownBits(const Value *V, KnownBits &Known, const DataLayout &DL, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true, unsigned Depth=0)
Determine which bits of V are known to be either zero or one and return them in the KnownZero/KnownOn...
unsigned short computeExpressionSize(ArrayRef< SCEVUse > Args)
LLVM_ABI bool programUndefinedIfPoison(const Instruction *Inst)
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:209
bool isPointerTy(const Type *T)
Definition SPIRVUtils.h:370
FunctionAddr VTableAddr Count
Definition InstrProf.h:139
LLVM_ABI ConstantRange getVScaleRange(const Function *F, unsigned BitWidth)
Determine the possible constant range of vscale with the given bit width, based on the vscale_range f...
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
LLVM_ATTRIBUTE_VISIBILITY_DEFAULT AnalysisKey InnerAnalysisManagerProxy< AnalysisManagerT, IRUnitT, ExtraArgTs... >::Key
LLVM_ABI bool isKnownNonZero(const Value *V, const SimplifyQuery &Q, unsigned Depth=0)
Return true if the given value is known to be non-zero when defined.
LLVM_ABI bool propagatesPoison(const Use &PoisonOp)
Return true if PoisonOp's user yields poison or raises UB if its operand PoisonOp is poison.
@ UMin
Unsigned integer min implemented in terms of select(cmp()).
@ Mul
Product of integers.
@ SMax
Signed integer max implemented in terms of select(cmp()).
@ SMin
Signed integer min implemented in terms of select(cmp()).
@ Add
Sum of integers.
@ UMax
Unsigned integer max implemented in terms of select(cmp()).
auto count(R &&Range, const E &Element)
Wrapper function around std::count to count the number of times an element Element occurs in the give...
Definition STLExtras.h:2011
DWARFExpression::Operation Op
auto max_element(R &&Range)
Provide wrappers to std::max_element which take ranges instead of having to pass begin/end explicitly...
Definition STLExtras.h:2087
raw_ostream & operator<<(raw_ostream &OS, const APFixedPoint &FX)
ArrayRef(const T &OneElt) -> ArrayRef< T >
LLVM_ABI unsigned ComputeNumSignBits(const Value *Op, const DataLayout &DL, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true, unsigned Depth=0)
Return the number of times the sign bit of the register is replicated into the other bits.
constexpr unsigned BitWidth
OutputIt move(R &&Range, OutputIt Out)
Provide wrappers to std::move which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1916
LLVM_ABI bool isGuaranteedToTransferExecutionToSuccessor(const Instruction *I)
Return true if this function can prove that the instruction I will always transfer execution to one o...
auto count_if(R &&Range, UnaryPredicate P)
Wrapper function around std::count_if to count the number of times an element satisfying a given pred...
Definition STLExtras.h:2018
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
constexpr bool isIntN(unsigned N, int64_t x)
Checks if an signed integer fits into the given (dynamic) bit width.
Definition MathExtras.h:248
auto predecessors(const MachineBasicBlock *BB)
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition STLExtras.h:1946
iterator_range< df_iterator< T > > depth_first(const T &G)
auto seq(T Begin, T End)
Iterate over an integral type from Begin up to - but not including - End.
Definition Sequence.h:305
AnalysisManager< Function > FunctionAnalysisManager
Convenience typedef for the Function analysis manager.
LLVM_ABI bool isGuaranteedNotToBePoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Returns true if V cannot be poison, but may be undef.
LLVM_ABI Constant * ConstantFoldInstOperands(const Instruction *I, ArrayRef< Constant * > Ops, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, bool AllowNonDeterministic=true)
ConstantFoldInstOperands - Attempt to constant fold an instruction with the specified operands.
SCEVUseT< const SCEV * > SCEVUse
bool SCEVExprContains(const SCEV *Root, PredTy Pred)
Return true if any node in Root satisfies the predicate Pred.
Implement std::hash so that hash_code can be used in STL containers.
Definition BitVector.h:874
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition BitVector.h:876
#define N
#define NC
Definition regutils.h:42
A special type used by analysis passes to provide an address that identifies that particular analysis...
Definition Analysis.h:29
static KnownBits makeConstant(const APInt &C)
Create known bits from a known constant.
Definition KnownBits.h:315
bool isNonNegative() const
Returns true if this value is known to be non-negative.
Definition KnownBits.h:106
static LLVM_ABI KnownBits ashr(const KnownBits &LHS, const KnownBits &RHS, bool ShAmtNonZero=false, bool Exact=false)
Compute known bits for ashr(LHS, RHS).
unsigned getBitWidth() const
Get the bit width of this value.
Definition KnownBits.h:44
static LLVM_ABI KnownBits lshr(const KnownBits &LHS, const KnownBits &RHS, bool ShAmtNonZero=false, bool Exact=false)
Compute known bits for lshr(LHS, RHS).
KnownBits zextOrTrunc(unsigned BitWidth) const
Return known bits for a zero extension or truncation of the value we're tracking.
Definition KnownBits.h:200
APInt getMaxValue() const
Return the maximal unsigned value possible given these KnownBits.
Definition KnownBits.h:146
APInt getMinValue() const
Return the minimal unsigned value possible given these KnownBits.
Definition KnownBits.h:130
bool isNegative() const
Returns true if this value is known to be negative.
Definition KnownBits.h:103
static LLVM_ABI KnownBits shl(const KnownBits &LHS, const KnownBits &RHS, bool NUW=false, bool NSW=false, bool ShAmtNonZero=false)
Compute known bits for shl(LHS, RHS).
An object of this class is returned by queries that could not be answered.
static LLVM_ABI bool classof(const SCEV *S)
Methods for support type inquiry through isa, cast, and dyn_cast:
This class defines a simple visitor class that may be used for various SCEV analysis purposes.
A utility class that uses RAII to save and restore the value of a variable.
Information about the number of loop iterations for which a loop exit's branch condition evaluates to...
LLVM_ABI ExitLimit(const SCEV *E)
Construct either an exact exit limit from a constant, or an unknown one from a SCEVCouldNotCompute.
SmallVector< const SCEVPredicate *, 4 > Predicates
A vector of predicate guards for this ExitLimit.