LLVM 23.0.0git
AggressiveInstCombine.cpp
Go to the documentation of this file.
1//===- AggressiveInstCombine.cpp ------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the aggressive expression pattern combiner classes.
10// Currently, it handles expression patterns for:
11// * Truncate instruction
12//
13//===----------------------------------------------------------------------===//
14
17#include "llvm/ADT/Statistic.h"
27#include "llvm/IR/DataLayout.h"
28#include "llvm/IR/Dominators.h"
29#include "llvm/IR/Function.h"
30#include "llvm/IR/IRBuilder.h"
31#include "llvm/IR/Instruction.h"
32#include "llvm/IR/MDBuilder.h"
40
41using namespace llvm;
42using namespace PatternMatch;
43
44#define DEBUG_TYPE "aggressive-instcombine"
45
46namespace llvm {
48}
49
50STATISTIC(NumAnyOrAllBitsSet, "Number of any/all-bits-set patterns folded");
51STATISTIC(NumGuardedRotates,
52 "Number of guarded rotates transformed into funnel shifts");
53STATISTIC(NumGuardedFunnelShifts,
54 "Number of guarded funnel shifts transformed into funnel shifts");
55STATISTIC(NumPopCountRecognized, "Number of popcount idioms recognized");
56STATISTIC(NumSelectCTTZFolded,
57 "Number of select-based split cttz patterns folded");
58STATISTIC(NumSelectCTLZFolded,
59 "Number of select-based split ctlz patterns folded");
60
62 "aggressive-instcombine-max-scan-instrs", cl::init(64), cl::Hidden,
63 cl::desc("Max number of instructions to scan for aggressive instcombine."));
64
66 "strncmp-inline-threshold", cl::init(3), cl::Hidden,
67 cl::desc("The maximum length of a constant string for a builtin string cmp "
68 "call eligible for inlining. The default value is 3."));
69
71 MemChrInlineThreshold("memchr-inline-threshold", cl::init(3), cl::Hidden,
72 cl::desc("The maximum length of a constant string to "
73 "inline a memchr call."));
74
75/// Try to fold a select-based split cttz pattern into a single full-width cttz.
76///
77/// %lo = trunc iN %val to i(N/2)
78/// %cmp = icmp eq i(N/2) %lo, 0
79/// %shr = lshr iN %val, N/2
80/// %hi = trunc iN %shr to i(N/2)
81/// %cttz_hi = call i(N/2) @llvm.cttz.i(N/2)(i(N/2) %hi, ...)
82/// %hi_plus = add/or_disjoint i(N/2) %cttz_hi, N/2
83/// %cttz_lo = call i(N/2) @llvm.cttz.i(N/2)(i(N/2) %lo, ...)
84/// %result = select i1 %cmp, i(N/2) %hi_plus, i(N/2) %cttz_lo
85/// -->
86/// %cttz_wide = call iN @llvm.cttz.iN(iN %val, i1 false)
87/// %result = trunc iN %cttz_wide to i(N/2)
88/// Alive proof (for i64/i32): https://alive2.llvm.org/ce/z/-s14-s
90 Value *Cond, *TrueVal, *FalseVal;
91 if (!match(&I, m_Select(m_Value(Cond), m_Value(TrueVal), m_Value(FalseVal))))
92 return false;
93
94 Type *HalfTy = I.getType();
95 if (!HalfTy->isIntegerTy())
96 return false;
97 unsigned HalfWidth = HalfTy->getIntegerBitWidth();
98
99 // Bail out on very small types (i1, i2): the full-width cttz can return
100 // values not representable in the half type (e.g., cttz.i4 can return 4,
101 // which doesn't fit in i2).
102 if (HalfWidth <= 2)
103 return false;
104
105 unsigned FullWidth = HalfWidth * 2;
106
107 // select (icmp eq (trunc SrcVal to i(N/2)), 0), HiResult, LoResult
108 // Or select (icmp ne ...), LoResult, HiResult
109 Value *LoTrunc;
110 Value *HiResult, *LoResult;
111 if (match(Cond,
113 HiResult = TrueVal;
114 LoResult = FalseVal;
115 } else if (match(Cond, m_SpecificICmp(CmpInst::ICMP_NE, m_Value(LoTrunc),
116 m_ZeroInt()))) {
117 HiResult = FalseVal;
118 LoResult = TrueVal;
119 } else {
120 return false;
121 }
122
123 // LoTrunc: trunc iN SrcVal to i(N/2)
124 Value *SrcVal;
125 if (!match(LoTrunc, m_Trunc(m_Value(SrcVal))))
126 return false;
127 if (!SrcVal->getType()->isIntegerTy(FullWidth))
128 return false;
129
130 // LoResult: cttz(trunc(SrcVal), _), must use same truncated value
131 if (!match(LoResult, m_OneUse(m_Cttz(m_Specific(LoTrunc), m_Value()))))
132 return false;
133
134 // HiResult: add/or_disjoint(cttz(trunc(lshr(SrcVal, N/2)), _), N/2)
135 Value *CttzHiCall;
136 if (!match(HiResult, m_OneUse(m_AddLike(m_Value(CttzHiCall),
137 m_SpecificInt(HalfWidth)))))
138 return false;
139
140 Value *HiCttzArg;
141 if (!match(CttzHiCall, m_OneUse(m_Cttz(m_Value(HiCttzArg), m_Value()))))
142 return false;
143
144 if (!match(HiCttzArg,
145 m_Trunc(m_LShr(m_Specific(SrcVal), m_SpecificInt(HalfWidth)))))
146 return false;
147
148 // Match successful.
149 IRBuilder<> Builder(&I);
150 Value *CttzWide = Builder.CreateIntrinsic(
151 Intrinsic::cttz, {SrcVal->getType()}, {SrcVal, Builder.getFalse()});
152 Value *Trunc = Builder.CreateTrunc(CttzWide, HalfTy);
153
154 I.replaceAllUsesWith(Trunc);
155 ++NumSelectCTTZFolded;
156 return true;
157}
158
159/// Same as foldSelectSplitCTTZ but for leading zeros (ctlz).
160///
161/// %shr = lshr iN %val, N/2
162/// %hi = trunc iN %shr to i(N/2)
163/// %cmp = icmp eq i(N/2) %hi, 0 (or icmp eq iN %shr, 0)
164/// %lo = trunc iN %val to i(N/2)
165/// %ctlz_lo = call i(N/2) @llvm.ctlz.i(N/2)(i(N/2) %lo, ...)
166/// %lo_plus = add/or_disjoint i(N/2) %ctlz_lo, N/2
167/// %ctlz_hi = call i(N/2) @llvm.ctlz.i(N/2)(i(N/2) %hi, ...)
168/// %result = select i1 %cmp, i(N/2) %lo_plus, i(N/2) %ctlz_hi
169/// -->
170/// %ctlz_wide = call iN @llvm.ctlz.iN(iN %val, i1 false)
171/// %result = trunc iN %ctlz_wide to i(N/2)
172///
173/// Alive proof (for i64/i32): https://alive2.llvm.org/ce/z/WfQepH
175 Value *Cond, *TrueVal, *FalseVal;
176 if (!match(&I, m_Select(m_Value(Cond), m_Value(TrueVal), m_Value(FalseVal))))
177 return false;
178
179 Type *HalfTy = I.getType();
180 if (!HalfTy->isIntegerTy())
181 return false;
182 unsigned HalfWidth = HalfTy->getIntegerBitWidth();
183
184 // Bail out on very small types (i1, i2): the full-width ctlz can return
185 // values not representable in the half type (e.g., ctlz.i4 can return 4,
186 // which doesn't fit in i2).
187 if (HalfWidth <= 2)
188 return false;
189
190 unsigned FullWidth = HalfWidth * 2;
191
192 // select (icmp eq HiPart, 0), LoResult, HiResult
193 // HiPart could be (trunc (lshr SrcVal, N/2) to i(N/2)) or (lshr SrcVal, N/2)
194 Value *HiPart;
195 Value *LoResult, *HiResult;
196 if (match(Cond,
198 LoResult = TrueVal; // upper is zero: count in lower + N/2
199 HiResult = FalseVal; // upper non-zero: count in upper
200 } else if (match(Cond, m_SpecificICmp(CmpInst::ICMP_NE, m_Value(HiPart),
201 m_ZeroInt()))) {
202 LoResult = FalseVal;
203 HiResult = TrueVal;
204 } else {
205 return false;
206 }
207
208 // Extract SrcVal from HiPart: either trunc(lshr(SrcVal, N/2)) or
209 // lshr(SrcVal, N/2)
210 Value *SrcVal;
211 if (match(HiPart,
212 m_Trunc(m_LShr(m_Value(SrcVal), m_SpecificInt(HalfWidth))))) {
213 // HiPart is trunc(lshr(SrcVal, N/2))
214 } else if (match(HiPart, m_LShr(m_Value(SrcVal), m_SpecificInt(HalfWidth)))) {
215 // HiPart is lshr(SrcVal, N/2)
216 } else {
217 return false;
218 }
219 if (!SrcVal->getType()->isIntegerTy(FullWidth))
220 return false;
221
222 // HiResult: ctlz(trunc(lshr(SrcVal, N/2)), _)
223 Value *HiCtlzArg;
224 if (!match(HiResult, m_OneUse(m_Ctlz(m_Value(HiCtlzArg), m_Value()))))
225 return false;
226
227 if (!match(HiCtlzArg,
228 m_Trunc(m_LShr(m_Specific(SrcVal), m_SpecificInt(HalfWidth)))))
229 return false;
230
231 // LoResult: add/or_disjoint(ctlz(trunc(SrcVal), _), N/2)
232 Value *CtlzLoCall;
233 if (!match(LoResult, m_OneUse(m_AddLike(m_Value(CtlzLoCall),
234 m_SpecificInt(HalfWidth)))))
235 return false;
236
237 Value *LoCtlzArg;
238 if (!match(CtlzLoCall, m_OneUse(m_Ctlz(m_Value(LoCtlzArg), m_Value()))))
239 return false;
240
241 if (!match(LoCtlzArg, m_Trunc(m_Specific(SrcVal))))
242 return false;
243
244 // Match successful.
245 IRBuilder<> Builder(&I);
246 Value *CtlzWide = Builder.CreateIntrinsic(
247 Intrinsic::ctlz, {SrcVal->getType()}, {SrcVal, Builder.getFalse()});
248 Value *Trunc = Builder.CreateTrunc(CtlzWide, HalfTy);
249
250 I.replaceAllUsesWith(Trunc);
251 ++NumSelectCTLZFolded;
252 return true;
253}
254
255/// Match a pattern for a bitwise funnel/rotate operation that partially guards
256/// against undefined behavior by branching around the funnel-shift/rotation
257/// when the shift amount is 0.
259 if (I.getOpcode() != Instruction::PHI || I.getNumOperands() != 2)
260 return false;
261
262 // As with the one-use checks below, this is not strictly necessary, but we
263 // are being cautious to avoid potential perf regressions on targets that
264 // do not actually have a funnel/rotate instruction (where the funnel shift
265 // would be expanded back into math/shift/logic ops).
266 if (!isPowerOf2_32(I.getType()->getScalarSizeInBits()))
267 return false;
268
269 // Match V to funnel shift left/right and capture the source operands and
270 // shift amount.
271 auto matchFunnelShift = [](Value *V, Value *&ShVal0, Value *&ShVal1,
272 Value *&ShAmt) {
273 unsigned Width = V->getType()->getScalarSizeInBits();
274
275 // fshl(ShVal0, ShVal1, ShAmt)
276 // == (ShVal0 << ShAmt) | (ShVal1 >> (Width -ShAmt))
277 if (match(V, m_OneUse(m_c_Or(
278 m_Shl(m_Value(ShVal0), m_Value(ShAmt)),
279 m_LShr(m_Value(ShVal1), m_Sub(m_SpecificInt(Width),
280 m_Deferred(ShAmt))))))) {
281 return Intrinsic::fshl;
282 }
283
284 // fshr(ShVal0, ShVal1, ShAmt)
285 // == (ShVal0 >> ShAmt) | (ShVal1 << (Width - ShAmt))
286 if (match(V,
288 m_Value(ShAmt))),
289 m_LShr(m_Value(ShVal1), m_Deferred(ShAmt)))))) {
290 return Intrinsic::fshr;
291 }
292
294 };
295
296 // One phi operand must be a funnel/rotate operation, and the other phi
297 // operand must be the source value of that funnel/rotate operation:
298 // phi [ rotate(RotSrc, ShAmt), FunnelBB ], [ RotSrc, GuardBB ]
299 // phi [ fshl(ShVal0, ShVal1, ShAmt), FunnelBB ], [ ShVal0, GuardBB ]
300 // phi [ fshr(ShVal0, ShVal1, ShAmt), FunnelBB ], [ ShVal1, GuardBB ]
301 PHINode &Phi = cast<PHINode>(I);
302 unsigned FunnelOp = 0, GuardOp = 1;
303 Value *P0 = Phi.getOperand(0), *P1 = Phi.getOperand(1);
304 Value *ShVal0, *ShVal1, *ShAmt;
305 Intrinsic::ID IID = matchFunnelShift(P0, ShVal0, ShVal1, ShAmt);
306 if (IID == Intrinsic::not_intrinsic ||
307 (IID == Intrinsic::fshl && ShVal0 != P1) ||
308 (IID == Intrinsic::fshr && ShVal1 != P1)) {
309 IID = matchFunnelShift(P1, ShVal0, ShVal1, ShAmt);
310 if (IID == Intrinsic::not_intrinsic ||
311 (IID == Intrinsic::fshl && ShVal0 != P0) ||
312 (IID == Intrinsic::fshr && ShVal1 != P0))
313 return false;
314 assert((IID == Intrinsic::fshl || IID == Intrinsic::fshr) &&
315 "Pattern must match funnel shift left or right");
316 std::swap(FunnelOp, GuardOp);
317 }
318
319 // The incoming block with our source operand must be the "guard" block.
320 // That must contain a cmp+branch to avoid the funnel/rotate when the shift
321 // amount is equal to 0. The other incoming block is the block with the
322 // funnel/rotate.
323 BasicBlock *GuardBB = Phi.getIncomingBlock(GuardOp);
324 BasicBlock *FunnelBB = Phi.getIncomingBlock(FunnelOp);
325 Instruction *TermI = GuardBB->getTerminator();
326
327 // Ensure that the shift values dominate each block.
328 if (!DT.dominates(ShVal0, TermI) || !DT.dominates(ShVal1, TermI))
329 return false;
330
331 BasicBlock *PhiBB = Phi.getParent();
333 m_ZeroInt()),
334 m_SpecificBB(PhiBB), m_SpecificBB(FunnelBB))))
335 return false;
336
337 IRBuilder<> Builder(PhiBB, PhiBB->getFirstInsertionPt());
338
339 if (ShVal0 == ShVal1)
340 ++NumGuardedRotates;
341 else
342 ++NumGuardedFunnelShifts;
343
344 // If this is not a rotate then the select was blocking poison from the
345 // 'shift-by-zero' non-TVal, but a funnel shift won't - so freeze it.
346 bool IsFshl = IID == Intrinsic::fshl;
347 if (ShVal0 != ShVal1) {
348 if (IsFshl && !llvm::isGuaranteedNotToBePoison(ShVal1))
349 ShVal1 = Builder.CreateFreeze(ShVal1);
350 else if (!IsFshl && !llvm::isGuaranteedNotToBePoison(ShVal0))
351 ShVal0 = Builder.CreateFreeze(ShVal0);
352 }
353
354 // We matched a variation of this IR pattern:
355 // GuardBB:
356 // %cmp = icmp eq i32 %ShAmt, 0
357 // br i1 %cmp, label %PhiBB, label %FunnelBB
358 // FunnelBB:
359 // %sub = sub i32 32, %ShAmt
360 // %shr = lshr i32 %ShVal1, %sub
361 // %shl = shl i32 %ShVal0, %ShAmt
362 // %fsh = or i32 %shr, %shl
363 // br label %PhiBB
364 // PhiBB:
365 // %cond = phi i32 [ %fsh, %FunnelBB ], [ %ShVal0, %GuardBB ]
366 // -->
367 // llvm.fshl.i32(i32 %ShVal0, i32 %ShVal1, i32 %ShAmt)
368 Phi.replaceAllUsesWith(
369 Builder.CreateIntrinsic(IID, Phi.getType(), {ShVal0, ShVal1, ShAmt}));
370 return true;
371}
372
373/// This is used by foldAnyOrAllBitsSet() to capture a source value (Root) and
374/// the bit indexes (Mask) needed by a masked compare. If we're matching a chain
375/// of 'and' ops, then we also need to capture the fact that we saw an
376/// "and X, 1", so that's an extra return value for that case.
377namespace {
378struct MaskOps {
379 Value *Root = nullptr;
380 APInt Mask;
381 bool MatchAndChain;
382 bool FoundAnd1 = false;
383
384 MaskOps(unsigned BitWidth, bool MatchAnds)
385 : Mask(APInt::getZero(BitWidth)), MatchAndChain(MatchAnds) {}
386};
387} // namespace
388
389/// This is a recursive helper for foldAnyOrAllBitsSet() that walks through a
390/// chain of 'and' or 'or' instructions looking for shift ops of a common source
391/// value. Examples:
392/// or (or (or X, (X >> 3)), (X >> 5)), (X >> 8)
393/// returns { X, 0x129 }
394/// and (and (X >> 1), 1), (X >> 4)
395/// returns { X, 0x12 }
396static bool matchAndOrChain(Value *V, MaskOps &MOps) {
397 Value *Op0, *Op1;
398 if (MOps.MatchAndChain) {
399 // Recurse through a chain of 'and' operands. This requires an extra check
400 // vs. the 'or' matcher: we must find an "and X, 1" instruction somewhere
401 // in the chain to know that all of the high bits are cleared.
402 if (match(V, m_And(m_Value(Op0), m_One()))) {
403 MOps.FoundAnd1 = true;
404 return matchAndOrChain(Op0, MOps);
405 }
406 if (match(V, m_And(m_Value(Op0), m_Value(Op1))))
407 return matchAndOrChain(Op0, MOps) && matchAndOrChain(Op1, MOps);
408 } else {
409 // Recurse through a chain of 'or' operands.
410 if (match(V, m_Or(m_Value(Op0), m_Value(Op1))))
411 return matchAndOrChain(Op0, MOps) && matchAndOrChain(Op1, MOps);
412 }
413
414 // We need a shift-right or a bare value representing a compare of bit 0 of
415 // the original source operand.
416 Value *Candidate;
417 const APInt *BitIndex = nullptr;
418 if (!match(V, m_LShr(m_Value(Candidate), m_APInt(BitIndex))))
419 Candidate = V;
420
421 // Initialize result source operand.
422 if (!MOps.Root)
423 MOps.Root = Candidate;
424
425 // The shift constant is out-of-range? This code hasn't been simplified.
426 if (BitIndex && BitIndex->uge(MOps.Mask.getBitWidth()))
427 return false;
428
429 // Fill in the mask bit derived from the shift constant.
430 MOps.Mask.setBit(BitIndex ? BitIndex->getZExtValue() : 0);
431 return MOps.Root == Candidate;
432}
433
434/// Match patterns that correspond to "any-bits-set" and "all-bits-set".
435/// These will include a chain of 'or' or 'and'-shifted bits from a
436/// common source value:
437/// and (or (lshr X, C), ...), 1 --> (X & CMask) != 0
438/// and (and (lshr X, C), ...), 1 --> (X & CMask) == CMask
439/// Note: "any-bits-clear" and "all-bits-clear" are variations of these patterns
440/// that differ only with a final 'not' of the result. We expect that final
441/// 'not' to be folded with the compare that we create here (invert predicate).
443 // The 'any-bits-set' ('or' chain) pattern is simpler to match because the
444 // final "and X, 1" instruction must be the final op in the sequence.
445 bool MatchAllBitsSet;
446 bool MatchTrunc;
447 Value *X;
448 if (I.getType()->isIntOrIntVectorTy(1)) {
449 if (match(&I, m_Trunc(m_OneUse(m_And(m_Value(), m_Value())))))
450 MatchAllBitsSet = true;
451 else if (match(&I, m_Trunc(m_OneUse(m_Or(m_Value(), m_Value())))))
452 MatchAllBitsSet = false;
453 else
454 return false;
455 MatchTrunc = true;
456 X = I.getOperand(0);
457 } else {
458 if (match(&I, m_c_And(m_OneUse(m_And(m_Value(), m_Value())), m_Value()))) {
459 X = &I;
460 MatchAllBitsSet = true;
461 } else if (match(&I,
462 m_And(m_OneUse(m_Or(m_Value(), m_Value())), m_One()))) {
463 X = I.getOperand(0);
464 MatchAllBitsSet = false;
465 } else
466 return false;
467 MatchTrunc = false;
468 }
469 Type *Ty = X->getType();
470
471 MaskOps MOps(Ty->getScalarSizeInBits(), MatchAllBitsSet);
472 if (!matchAndOrChain(X, MOps) ||
473 (MatchAllBitsSet && !MatchTrunc && !MOps.FoundAnd1))
474 return false;
475
476 // The pattern was found. Create a masked compare that replaces all of the
477 // shift and logic ops.
478 IRBuilder<> Builder(&I);
479 Constant *Mask = ConstantInt::get(Ty, MOps.Mask);
480 Value *And = Builder.CreateAnd(MOps.Root, Mask);
481 Value *Cmp = MatchAllBitsSet ? Builder.CreateICmpEQ(And, Mask)
482 : Builder.CreateIsNotNull(And);
483 Value *Zext = MatchTrunc ? Cmp : Builder.CreateZExt(Cmp, Ty);
484 I.replaceAllUsesWith(Zext);
485 ++NumAnyOrAllBitsSet;
486 return true;
487}
488
489/// Helper function to replace an instruction with a popcount intrinsic.
490/// This creates the ctpop intrinsic and replaces all uses of the instruction.
492 LLVM_DEBUG(dbgs() << "Recognized popcount intrinsic\n");
493 IRBuilder<> Builder(&I);
494 I.replaceAllUsesWith(
495 Builder.CreateIntrinsic(Intrinsic::ctpop, I.getType(), {Root}));
496 ++NumPopCountRecognized;
497}
498
499// Try to recognize below function as popcount intrinsic.
500// This is the "best" algorithm from
501// http://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel
502// Also used in TargetLowering::expandCTPOP().
503//
504// int popcount(unsigned int i) {
505// i = i - ((i >> 1) & 0x55555555);
506// i = (i & 0x33333333) + ((i >> 2) & 0x33333333);
507// i = ((i + (i >> 4)) & 0x0F0F0F0F);
508// return (i * 0x01010101) >> 24;
509// }
511 if (I.getOpcode() != Instruction::LShr)
512 return false;
513
514 Type *Ty = I.getType();
515 if (!Ty->isIntOrIntVectorTy())
516 return false;
517
518 unsigned Len = Ty->getScalarSizeInBits();
519 // FIXME: fix Len == 8 and other irregular type lengths.
520 if (!(Len <= 128 && Len > 8 && Len % 8 == 0))
521 return false;
522
523 APInt Mask55 = APInt::getSplat(Len, APInt(8, 0x55));
524 APInt Mask33 = APInt::getSplat(Len, APInt(8, 0x33));
525 APInt Mask0F = APInt::getSplat(Len, APInt(8, 0x0F));
526 APInt Mask01 = APInt::getSplat(Len, APInt(8, 0x01));
527 APInt MaskShift = APInt(Len, Len - 8);
528
529 Value *Op0 = I.getOperand(0);
530 Value *Op1 = I.getOperand(1);
531 Value *MulOp0;
532 // Matching "(i * 0x01010101...) >> 24".
533 if ((match(Op0, m_Mul(m_Value(MulOp0), m_SpecificInt(Mask01)))) &&
535 Value *ShiftOp0;
536 // Matching "((i + (i >> 4)) & 0x0F0F0F0F...)".
537 if (match(MulOp0, m_And(m_c_Add(m_LShr(m_Value(ShiftOp0), m_SpecificInt(4)),
538 m_Deferred(ShiftOp0)),
539 m_SpecificInt(Mask0F)))) {
540 Value *AndOp0;
541 // Matching "(i & 0x33333333...) + ((i >> 2) & 0x33333333...)".
542 if (match(ShiftOp0,
543 m_c_Add(m_And(m_Value(AndOp0), m_SpecificInt(Mask33)),
545 m_SpecificInt(Mask33))))) {
546 Value *Root, *SubOp1;
547 // Matching "i - ((i >> 1) & 0x55555555...)".
548 const APInt *AndMask;
549 if (match(AndOp0, m_Sub(m_Value(Root), m_Value(SubOp1))) &&
550 match(SubOp1, m_And(m_LShr(m_Specific(Root), m_SpecificInt(1)),
551 m_APInt(AndMask)))) {
552 auto CheckAndMask = [&]() {
553 if (*AndMask == Mask55)
554 return true;
555
556 // Exact match failed, see if any bits are known to be 0 where we
557 // expect a 1 in the mask.
558 if (!AndMask->isSubsetOf(Mask55))
559 return false;
560
561 APInt NeededMask = Mask55 & ~*AndMask;
562 return MaskedValueIsZero(cast<Instruction>(SubOp1)->getOperand(0),
563 NeededMask,
564 SimplifyQuery(I.getDataLayout()));
565 };
566
567 if (CheckAndMask()) {
568 replaceWithPopCount(I, Root);
569 return true;
570 }
571 }
572 }
573 }
574 }
575
576 return false;
577}
578
579// Try to recognize below function as popcount intrinsic.
580// Ref. Hacker Delights
581// int popcount32(unsigned int i) {
582// uWord = (uWord & 0x55555555) + ((uWord>>1) & 0x55555555);
583// uWord = (uWord & 0x33333333) + ((uWord>>2) & 0x33333333);
584// uWord = (uWord & 0x0F0F0F0F) + ((uWord>>4) & 0x0F0F0F0F);
585// uWord = (uWord & 0x00FF00FF) + ((uWord>>8) & 0x00FF00FF);
586// return (uWord & 0x0000FFFF) + (uWord>>16);
587// }
588// int popcount64(unsigned long i) {
589// uWord = (uWord & 0x5555555555555555) + ((uWord>>1) & 0x5555555555555555);
590// uWord = (uWord & 0x3333333333333333) + ((uWord>>2) & 0x3333333333333333);
591// uWord = (uWord & 0x0F0F0F0F0F0F0F0F) + ((uWord>>4) & 0x0F0F0F0F0F0F0F0F);
592// uWord = (uWord & 0x00FF00FF00FF00FF) + ((uWord>>8) & 0x00FF00FF00FF00FF);
593// uWord = (uWord & 0x0000FFFF0000FFFF) + ((uWord>>16) & 0x0000FFFF0000FFFF);
594// return (uWord & 0x00000000FFFFFFFF) + (uWord>>32) & 0x00000000FFFFFFFF;
595// }
596//
597// InstCombine may narrow AND masks when it can prove the removed bits are
598// known zero (e.g. 0x0F0F0F0F -> 0x07070707). We accept such narrowed masks
599// by checking they are subsets of the expected masks and verifying the missing
600// bits are known zero via MaskedValueIsZero.
602 if (I.getOpcode() != Instruction::Add)
603 return false;
604
605 Type *Ty = I.getType();
606 if (!Ty->isIntOrIntVectorTy())
607 return false;
608
609 unsigned Len = Ty->getScalarSizeInBits();
610 if (Len > 64 || Len <= 8 || Len % 8 != 0)
611 return false;
612
613 // Len should be a power of 2 for the loop to work correctly
614 if (!isPowerOf2_32(Len))
615 return false;
616
617 APInt Mask55 = APInt::getSplat(Len, APInt(8, 0x55));
618 APInt Mask33 = APInt::getSplat(Len, APInt(8, 0x33));
619
620 SimplifyQuery SQ(I.getDataLayout());
621
622 // Check if CapturedMask is a valid (possibly narrowed) version of
623 // ExpectedMask for the given Operand. Returns true if the masks match
624 // exactly, or if CapturedMask is a subset and the missing bits are
625 // known zero in the Operand.
626 auto isValidNarrowedMask = [&](const APInt &CapturedMask,
627 const APInt &ExpectedMask,
628 Value *Operand) -> bool {
629 if (CapturedMask == ExpectedMask)
630 return true;
631 if (!CapturedMask.isSubsetOf(ExpectedMask))
632 return false;
633 APInt NeededMask = ExpectedMask & ~CapturedMask;
634 return MaskedValueIsZero(Operand, NeededMask, SQ);
635 };
636
637 // For "(x & M) + ((x >> S) & M)" patterns, both AND masks may be narrowed.
638 // Require subsets of BaseMask and prove any implied missing bits are zero.
639 auto narrowAddPairMasksOk = [&](const APInt &BaseMask, unsigned ShiftAmt,
640 Value *Val, const APInt &AndMask1,
641 const APInt &AndMask2) -> bool {
642 if (!AndMask1.isSubsetOf(BaseMask) || !AndMask2.isSubsetOf(BaseMask))
643 return false;
644 APInt NeededShifted = (BaseMask & ~AndMask1).shl(ShiftAmt);
645 APInt NeededUnshifted = BaseMask & ~AndMask2;
646 APInt AllNeeded = NeededShifted | NeededUnshifted;
647 return AllNeeded.isZero() || MaskedValueIsZero(Val, AllNeeded, SQ);
648 };
649
650 Value *ShiftOp;
651 Value *Start = &I;
652 for (unsigned I = Len; I >= 8; I = I / 2) {
653 APInt Mask = APInt::getSplat(Len, APInt::getLowBitsSet(I, I / 2));
654 const APInt *AndMask1 = nullptr, *AndMask2 = nullptr;
655
656 // Matching "(uWord & Mask) + ((uWord>>I/2) & Mask)".
657 // Both masks might have been narrowed by InstCombine.
658 if (match(Start,
659 m_c_Add(m_And(m_LShr(m_Value(ShiftOp), m_SpecificInt(I / 2)),
660 m_APInt(AndMask1)),
661 m_And(m_Deferred(ShiftOp), m_APInt(AndMask2))))) {
662 if (!narrowAddPairMasksOk(Mask, I / 2, ShiftOp, *AndMask1, *AndMask2))
663 return false;
664 }
665 // Matching "(uWord & Mask) + (uWord>>I/2)".
666 // The mask might have been narrowed by InstCombine.
667 else if (match(Start,
668 m_c_Add(m_LShr(m_Value(ShiftOp), m_SpecificInt(I / 2)),
669 m_And(m_Deferred(ShiftOp), m_APInt(AndMask1))))) {
670 if (!isValidNarrowedMask(*AndMask1, Mask, ShiftOp))
671 return false;
672 } else
673 return false;
674 Start = ShiftOp;
675 }
676
677 // Matching "uWord = (uWord & Mask33) + ((uWord>>2) & Mask33)".
678 const APInt *AndMask1 = nullptr, *AndMask2 = nullptr;
679 if (!match(Start, m_c_Add(m_And(m_LShr(m_Value(ShiftOp), m_SpecificInt(2)),
680 m_APInt(AndMask1)),
681 m_And(m_Deferred(ShiftOp), m_APInt(AndMask2)))))
682 return false;
683 if (!narrowAddPairMasksOk(Mask33, 2, ShiftOp, *AndMask1, *AndMask2))
684 return false;
685
686 Start = ShiftOp;
687 Value *Root;
688 // Matching "uWord = (uWord & Mask55) + ((uWord>>1) & Mask55)".
689 AndMask1 = nullptr;
690 AndMask2 = nullptr;
691 if (!match(Start, m_c_Add(m_And(m_LShr(m_Value(Root), m_SpecificInt(1)),
692 m_APInt(AndMask1)),
693 m_And(m_Deferred(Root), m_APInt(AndMask2)))))
694 return false;
695 if (!narrowAddPairMasksOk(Mask55, 1, Root, *AndMask1, *AndMask2))
696 return false;
697
698 replaceWithPopCount(I, Root);
699 return true;
700}
701
702// Try to recognize below function as popcount intrinsic.
703// Ref. Hackers Delight
704// int popcnt(unsigned x) {
705// x = x - ((x >> 1) & 0x55555555);
706// x = (x & 0x33333333) + ((x >> 2) & 0x33333333);
707// x = (x + (x >> 4)) & 0x0F0F0F0F;
708// x = x + (x >> 8);
709// x = x + (x >> 16);
710// return x & 0x0000003F;
711// }
712
713// int popcnt(unsigned x) {
714// x = x - ((x >> 1) & 0x55555555);
715// x = x - 3*((x >> 2) & 0x33333333);
716// x = (x + (x >> 4)) & 0x0F0F0F0F;
717// x = x + (x >> 8);
718// x = x + (x >> 16);
719// return x & 0x0000003F;
720// }
721
723 if (I.getOpcode() != Instruction::And)
724 return false;
725
726 Type *Ty = I.getType();
727 if (!Ty->isIntOrIntVectorTy())
728 return false;
729
730 unsigned Len = Ty->getScalarSizeInBits();
731
732 if (Len > 64 || Len <= 8 || Len % 8 != 0)
733 return false;
734
735 // Len should be a power of 2 for the loop to work correctly
736 if (!isPowerOf2_32(Len))
737 return false;
738
739 APInt Mask55 = APInt::getSplat(Len, APInt(8, 0x55));
740 APInt Mask33 = APInt::getSplat(Len, APInt(8, 0x33));
741 APInt Mask0F = APInt::getSplat(Len, APInt(8, 0x0F));
742
743 Value *Add1;
744 const APInt *MaskRes;
745 if (!match(&I, m_And(m_Value(Add1), m_APInt(MaskRes))))
746 return false;
747 // Number of bits needed to represent Len.
748 unsigned NumLenBits = Log2_32(Len) + 1;
749 // The "mask" here really only needs to fulfill two conditions:
750 // (1) All ones for the lower NumLenBits-bits
751 // (2) Zeros from bit 8 and onward.
752 // Condition (1) is straightforward. The reason behind condition
753 // (2) is that we don't care any 8-bit chunks but the first one
754 // in the original divide-and-conquer algorithm.
755 if (MaskRes->countTrailingOnes() < NumLenBits || MaskRes->getActiveBits() > 8)
756 return false;
757
758 Value *Add2;
759 for (unsigned I = Len; I >= 16; I = I / 2) {
760 // Matching "x = x + (x >> I/2)" for I-bit.
761 if (!match(Add1, m_c_Add(m_LShr(m_Value(Add2), m_SpecificInt(I / 2)),
762 m_Deferred(Add2))))
763 return false;
764 Add1 = Add2;
765 }
766
767 Value *And1 = Add1;
768 // Matching "x = (x + (x >> 4)) & 0x0F0F0F0F".
769 if (!match(And1, m_And(m_c_Add(m_LShr(m_Value(Add2), m_SpecificInt(4)),
770 m_Deferred(Add2)),
771 m_SpecificInt(Mask0F))))
772 return false;
773
774 Value *Sub1;
775 llvm::APInt NegThree(/*BitWidth=*/Len, /*Value=*/-3,
776 /*isSigned=*/true);
777 // x = (x & 0x33333333) + ((x >> 2) & 0x33333333)".
778 if (!match(Add2, m_c_Add(m_And(m_LShr(m_Value(Sub1), m_SpecificInt(2)),
779 m_SpecificInt(Mask33)),
780 m_And(m_Deferred(Sub1), m_SpecificInt(Mask33)))) &&
781 // Matching "x = x - 3*((x >> 2) & 0x33333333)".
783 m_SpecificInt(Mask33)),
784 m_SpecificInt(NegThree)),
785 m_Deferred(Sub1))))
786 return false;
787
788 Value *Root;
789 // x = x - ((x >> 1) & 0x55555555);
790 if (!match(Sub1, m_Sub(m_Value(Root),
792 m_SpecificInt(Mask55)))))
793 return false;
794
795 replaceWithPopCount(I, Root);
796 return true;
797}
798
799/// Fold smin(smax(fptosi(x), C1), C2) to llvm.fptosi.sat(x), providing C1 and
800/// C2 saturate the value of the fp conversion. The transform is not reversable
801/// as the fptosi.sat is more defined than the input - all values produce a
802/// valid value for the fptosi.sat, where as some produce poison for original
803/// that were out of range of the integer conversion. The reversed pattern may
804/// use fmax and fmin instead. As we cannot directly reverse the transform, and
805/// it is not always profitable, we make it conditional on the cost being
806/// reported as lower by TTI.
808 // Look for min(max(fptosi, converting to fptosi_sat.
809 Value *In;
810 const APInt *MinC, *MaxC;
812 m_APInt(MinC))),
813 m_APInt(MaxC))) &&
815 m_APInt(MaxC))),
816 m_APInt(MinC))))
817 return false;
818
819 // Check that the constants clamp a saturate.
820 if (!(*MinC + 1).isPowerOf2() || -*MaxC != *MinC + 1)
821 return false;
822
823 Type *IntTy = I.getType();
824 Type *FpTy = In->getType();
825 Type *SatTy =
826 IntegerType::get(IntTy->getContext(), (*MinC + 1).exactLogBase2() + 1);
827 if (auto *VecTy = dyn_cast<VectorType>(IntTy))
828 SatTy = VectorType::get(SatTy, VecTy->getElementCount());
829
830 // Get the cost of the intrinsic, and check that against the cost of
831 // fptosi+smin+smax
832 InstructionCost SatCost = TTI.getIntrinsicInstrCost(
833 IntrinsicCostAttributes(Intrinsic::fptosi_sat, SatTy, {In}, {FpTy}),
835 SatCost += TTI.getCastInstrCost(Instruction::SExt, IntTy, SatTy,
838
839 InstructionCost MinMaxCost = TTI.getCastInstrCost(
840 Instruction::FPToSI, IntTy, FpTy, TTI::CastContextHint::None,
842 MinMaxCost += TTI.getIntrinsicInstrCost(
843 IntrinsicCostAttributes(Intrinsic::smin, IntTy, {IntTy}),
845 MinMaxCost += TTI.getIntrinsicInstrCost(
846 IntrinsicCostAttributes(Intrinsic::smax, IntTy, {IntTy}),
848
849 if (SatCost >= MinMaxCost)
850 return false;
851
852 IRBuilder<> Builder(&I);
853 Value *Sat =
854 Builder.CreateIntrinsic(Intrinsic::fptosi_sat, {SatTy, FpTy}, In);
855 I.replaceAllUsesWith(Builder.CreateSExt(Sat, IntTy));
856 return true;
857}
858
859/// Try to replace a mathlib call to sqrt with the LLVM intrinsic. This avoids
860/// pessimistic codegen that has to account for setting errno and can enable
861/// vectorization.
862static bool foldSqrt(CallInst *Call, LibFunc Func, TargetTransformInfo &TTI,
864 DominatorTree &DT) {
865 // If (1) this is a sqrt libcall, (2) we can assume that NAN is not created
866 // (because NNAN or the operand arg must not be less than -0.0) and (2) we
867 // would not end up lowering to a libcall anyway (which could change the value
868 // of errno), then:
869 // (1) errno won't be set.
870 // (2) it is safe to convert this to an intrinsic call.
871 Type *Ty = Call->getType();
872 Value *Arg = Call->getArgOperand(0);
873 if (TTI.haveFastSqrt(Ty) &&
874 (Call->hasNoNaNs() ||
876 Arg, SimplifyQuery(Call->getDataLayout(), &TLI, &DT, &AC, Call)))) {
877 IRBuilder<> Builder(Call);
878 Value *NewSqrt =
879 Builder.CreateIntrinsic(Intrinsic::sqrt, Ty, Arg, Call, "sqrt");
880 Call->replaceAllUsesWith(NewSqrt);
881
882 // Explicitly erase the old call because a call with side effects is not
883 // trivially dead.
884 Call->eraseFromParent();
885 return true;
886 }
887
888 return false;
889}
890
891// Check if this array of constants represents a cttz table.
892// Iterate over the elements from \p Table by trying to find/match all
893// the numbers from 0 to \p InputBits that should represent cttz results.
894static bool isCTTZTable(Constant *Table, const APInt &Mul, const APInt &Shift,
895 const APInt &AndMask, Type *AccessTy,
896 unsigned InputBits, const APInt &GEPIdxFactor,
897 const DataLayout &DL) {
898 for (unsigned Idx = 0; Idx < InputBits; Idx++) {
899 APInt Index =
900 (APInt::getOneBitSet(InputBits, Idx) * Mul).lshr(Shift) & AndMask;
902 ConstantFoldLoadFromConst(Table, AccessTy, Index * GEPIdxFactor, DL));
903 if (!C || C->getValue() != Idx)
904 return false;
905 }
906
907 return true;
908}
909
910// Try to recognize table-based ctz implementation.
911// E.g., an example in C (for more cases please see the llvm/tests):
912// int f(unsigned x) {
913// static const char table[32] =
914// {0, 1, 28, 2, 29, 14, 24, 3, 30,
915// 22, 20, 15, 25, 17, 4, 8, 31, 27,
916// 13, 23, 21, 19, 16, 7, 26, 12, 18, 6, 11, 5, 10, 9};
917// return table[((unsigned)((x & -x) * 0x077CB531U)) >> 27];
918// }
919// this can be lowered to `cttz` instruction.
920// There is also a special case when the element is 0.
921//
922// The (x & -x) sets the lowest non-zero bit to 1. The multiply is a de-bruijn
923// sequence that contains each pattern of bits in it. The shift extracts
924// the top bits after the multiply, and that index into the table should
925// represent the number of trailing zeros in the original number.
926//
927// Here are some examples or LLVM IR for a 64-bit target:
928//
929// CASE 1:
930// %sub = sub i32 0, %x
931// %and = and i32 %sub, %x
932// %mul = mul i32 %and, 125613361
933// %shr = lshr i32 %mul, 27
934// %idxprom = zext i32 %shr to i64
935// %arrayidx = getelementptr inbounds [32 x i8], [32 x i8]* @ctz1.table, i64 0,
936// i64 %idxprom
937// %0 = load i8, i8* %arrayidx, align 1, !tbaa !8
938//
939// CASE 2:
940// %sub = sub i32 0, %x
941// %and = and i32 %sub, %x
942// %mul = mul i32 %and, 72416175
943// %shr = lshr i32 %mul, 26
944// %idxprom = zext i32 %shr to i64
945// %arrayidx = getelementptr inbounds [64 x i16], [64 x i16]* @ctz2.table,
946// i64 0, i64 %idxprom
947// %0 = load i16, i16* %arrayidx, align 2, !tbaa !8
948//
949// CASE 3:
950// %sub = sub i32 0, %x
951// %and = and i32 %sub, %x
952// %mul = mul i32 %and, 81224991
953// %shr = lshr i32 %mul, 27
954// %idxprom = zext i32 %shr to i64
955// %arrayidx = getelementptr inbounds [32 x i32], [32 x i32]* @ctz3.table,
956// i64 0, i64 %idxprom
957// %0 = load i32, i32* %arrayidx, align 4, !tbaa !8
958//
959// CASE 4:
960// %sub = sub i64 0, %x
961// %and = and i64 %sub, %x
962// %mul = mul i64 %and, 283881067100198605
963// %shr = lshr i64 %mul, 58
964// %arrayidx = getelementptr inbounds [64 x i8], [64 x i8]* @table, i64 0,
965// i64 %shr
966// %0 = load i8, i8* %arrayidx, align 1, !tbaa !8
967//
968// All these can be lowered to @llvm.cttz.i32/64 intrinsics.
971 if (!LI)
972 return false;
973
974 Type *AccessType = LI->getType();
975 if (!AccessType->isIntegerTy())
976 return false;
977
979 if (!GEP || !GEP->hasNoUnsignedSignedWrap())
980 return false;
981
982 GlobalVariable *GVTable = dyn_cast<GlobalVariable>(GEP->getPointerOperand());
983 if (!GVTable || !GVTable->hasInitializer() || !GVTable->isConstant())
984 return false;
985
986 unsigned BW = DL.getIndexTypeSizeInBits(GEP->getType());
987 APInt ModOffset(BW, 0);
989 if (!GEP->collectOffset(DL, BW, VarOffsets, ModOffset) ||
990 VarOffsets.size() != 1 || ModOffset != 0)
991 return false;
992 auto [GepIdx, GEPScale] = VarOffsets.front();
993
994 Value *X1;
995 const APInt *MulConst, *ShiftConst, *AndCst = nullptr;
996 // Check that the gep variable index is ((x & -x) * MulConst) >> ShiftConst.
997 // This might be extended to the pointer index type, and if the gep index type
998 // has been replaced with an i8 then a new And (and different ShiftConst) will
999 // be present.
1000 auto MatchInner = m_LShr(
1001 m_Mul(m_c_And(m_Neg(m_Value(X1)), m_Deferred(X1)), m_APInt(MulConst)),
1002 m_APInt(ShiftConst));
1003 if (!match(GepIdx, m_CastOrSelf(MatchInner)) &&
1004 !match(GepIdx, m_CastOrSelf(m_And(MatchInner, m_APInt(AndCst)))))
1005 return false;
1006
1007 unsigned InputBits = X1->getType()->getScalarSizeInBits();
1008 if (InputBits != 16 && InputBits != 32 && InputBits != 64 && InputBits != 128)
1009 return false;
1010
1011 if (!GEPScale.isIntN(InputBits) ||
1012 !isCTTZTable(GVTable->getInitializer(), *MulConst, *ShiftConst,
1013 AndCst ? *AndCst : APInt::getAllOnes(InputBits), AccessType,
1014 InputBits, GEPScale.zextOrTrunc(InputBits), DL))
1015 return false;
1016
1017 ConstantInt *ZeroTableElem = cast<ConstantInt>(
1018 ConstantFoldLoadFromConst(GVTable->getInitializer(), AccessType, DL));
1019 bool DefinedForZero = ZeroTableElem->getZExtValue() == InputBits;
1020
1021 IRBuilder<> B(LI);
1022 ConstantInt *BoolConst = B.getInt1(!DefinedForZero);
1023 Type *XType = X1->getType();
1024 auto Cttz = B.CreateIntrinsic(Intrinsic::cttz, {XType}, {X1, BoolConst});
1025 Value *ZExtOrTrunc = nullptr;
1026
1027 if (DefinedForZero) {
1028 ZExtOrTrunc = B.CreateZExtOrTrunc(Cttz, AccessType);
1029 } else {
1030 // If the value in elem 0 isn't the same as InputBits, we still want to
1031 // produce the value from the table.
1032 auto Cmp = B.CreateICmpEQ(X1, ConstantInt::get(XType, 0));
1033 auto Select = B.CreateSelect(Cmp, B.CreateZExt(ZeroTableElem, XType), Cttz);
1034
1035 // The true branch of select handles the cttz(0) case, which is rare.
1038 SelectI->setMetadata(
1039 LLVMContext::MD_prof,
1040 MDBuilder(SelectI->getContext()).createUnlikelyBranchWeights());
1041 }
1042
1043 // NOTE: If the table[0] is 0, but the cttz(0) is defined by the Target
1044 // it should be handled as: `cttz(x) & (typeSize - 1)`.
1045
1046 ZExtOrTrunc = B.CreateZExtOrTrunc(Select, AccessType);
1047 }
1048
1049 LI->replaceAllUsesWith(ZExtOrTrunc);
1050
1051 return true;
1052}
1053
1054// Check if this array of constants represents a log2 table.
1055// Iterate over the elements from \p Table by trying to find/match all
1056// the numbers from 0 to \p InputBits that should represent log2 results.
1057static bool isLog2Table(Constant *Table, const APInt &Mul, const APInt &Shift,
1058 Type *AccessTy, unsigned InputBits,
1059 const APInt &GEPIdxFactor, const DataLayout &DL) {
1060 for (unsigned Idx = 0; Idx < InputBits; Idx++) {
1061 APInt Index = (APInt::getLowBitsSet(InputBits, Idx + 1) * Mul).lshr(Shift);
1063 ConstantFoldLoadFromConst(Table, AccessTy, Index * GEPIdxFactor, DL));
1064 if (!C || C->getValue() != Idx)
1065 return false;
1066 }
1067
1068 // Verify that an input of zero will select table index 0.
1069 APInt ZeroIndex = Mul.lshr(Shift);
1070 if (!ZeroIndex.isZero())
1071 return false;
1072
1073 return true;
1074}
1075
1076// Try to recognize table-based log2 implementation.
1077// E.g., an example in C (for more cases please the llvm/tests):
1078// int f(unsigned v) {
1079// static const char table[32] =
1080// {0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30,
1081// 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31};
1082//
1083// v |= v >> 1; // first round down to one less than a power of 2
1084// v |= v >> 2;
1085// v |= v >> 4;
1086// v |= v >> 8;
1087// v |= v >> 16;
1088//
1089// return table[(unsigned)(v * 0x07C4ACDDU) >> 27];
1090// }
1091// this can be lowered to `ctlz` instruction.
1092// There is also a special case when the element is 0.
1093//
1094// The >> and |= sequence sets all bits below the most significant set bit. The
1095// multiply is a de-bruijn sequence that contains each pattern of bits in it.
1096// The shift extracts the top bits after the multiply, and that index into the
1097// table should represent the floor log base 2 of the original number.
1098//
1099// Here are some examples of LLVM IR for a 64-bit target.
1100//
1101// CASE 1:
1102// %shr = lshr i32 %v, 1
1103// %or = or i32 %shr, %v
1104// %shr1 = lshr i32 %or, 2
1105// %or2 = or i32 %shr1, %or
1106// %shr3 = lshr i32 %or2, 4
1107// %or4 = or i32 %shr3, %or2
1108// %shr5 = lshr i32 %or4, 8
1109// %or6 = or i32 %shr5, %or4
1110// %shr7 = lshr i32 %or6, 16
1111// %or8 = or i32 %shr7, %or6
1112// %mul = mul i32 %or8, 130329821
1113// %shr9 = lshr i32 %mul, 27
1114// %idxprom = zext nneg i32 %shr9 to i64
1115// %arrayidx = getelementptr inbounds i8, ptr @table, i64 %idxprom
1116// %0 = load i8, ptr %arrayidx, align 1
1117//
1118// CASE 2:
1119// %shr = lshr i64 %v, 1
1120// %or = or i64 %shr, %v
1121// %shr1 = lshr i64 %or, 2
1122// %or2 = or i64 %shr1, %or
1123// %shr3 = lshr i64 %or2, 4
1124// %or4 = or i64 %shr3, %or2
1125// %shr5 = lshr i64 %or4, 8
1126// %or6 = or i64 %shr5, %or4
1127// %shr7 = lshr i64 %or6, 16
1128// %or8 = or i64 %shr7, %or6
1129// %shr9 = lshr i64 %or8, 32
1130// %or10 = or i64 %shr9, %or8
1131// %mul = mul i64 %or10, 285870213051386505
1132// %shr11 = lshr i64 %mul, 58
1133// %arrayidx = getelementptr inbounds i8, ptr @table, i64 %shr11
1134// %0 = load i8, ptr %arrayidx, align 1
1135//
1136// All these can be lowered to @llvm.ctlz.i32/64 intrinsics and a subtract.
1140 if (!LI)
1141 return false;
1142
1143 Type *AccessType = LI->getType();
1144 if (!AccessType->isIntegerTy())
1145 return false;
1146
1148 if (!GEP || !GEP->hasNoUnsignedSignedWrap())
1149 return false;
1150
1151 GlobalVariable *GVTable = dyn_cast<GlobalVariable>(GEP->getPointerOperand());
1152 if (!GVTable || !GVTable->hasInitializer() || !GVTable->isConstant())
1153 return false;
1154
1155 unsigned BW = DL.getIndexTypeSizeInBits(GEP->getType());
1156 APInt ModOffset(BW, 0);
1158 if (!GEP->collectOffset(DL, BW, VarOffsets, ModOffset) ||
1159 VarOffsets.size() != 1 || ModOffset != 0)
1160 return false;
1161 auto [GepIdx, GEPScale] = VarOffsets.front();
1162
1163 Value *X;
1164 const APInt *MulConst, *ShiftConst;
1165 // Check that the gep variable index is (x * MulConst) >> ShiftConst.
1166 auto MatchInner =
1167 m_LShr(m_Mul(m_Value(X), m_APInt(MulConst)), m_APInt(ShiftConst));
1168 if (!match(GepIdx, m_CastOrSelf(MatchInner)))
1169 return false;
1170
1171 unsigned InputBits = X->getType()->getScalarSizeInBits();
1172 if (InputBits != 16 && InputBits != 32 && InputBits != 64 && InputBits != 128)
1173 return false;
1174
1175 // Verify shift amount.
1176 // TODO: Allow other shift amounts when we have proper test coverage.
1177 if (*ShiftConst != InputBits - Log2_32(InputBits))
1178 return false;
1179
1180 // Match the sequence of OR operations with right shifts by powers of 2.
1181 for (unsigned ShiftAmt = InputBits / 2; ShiftAmt != 0; ShiftAmt /= 2) {
1182 Value *Y;
1183 if (!match(X, m_c_Or(m_LShr(m_Value(Y), m_SpecificInt(ShiftAmt)),
1184 m_Deferred(Y))))
1185 return false;
1186 X = Y;
1187 }
1188
1189 if (!GEPScale.isIntN(InputBits) ||
1190 !isLog2Table(GVTable->getInitializer(), *MulConst, *ShiftConst,
1191 AccessType, InputBits, GEPScale.zextOrTrunc(InputBits), DL))
1192 return false;
1193
1194 ConstantInt *ZeroTableElem = cast<ConstantInt>(
1195 ConstantFoldLoadFromConst(GVTable->getInitializer(), AccessType, DL));
1196
1197 // Use InputBits - 1 - ctlz(X) to compute log2(X).
1198 IRBuilder<> B(LI);
1199 ConstantInt *BoolConst = B.getTrue();
1200 Type *XType = X->getType();
1201
1202 // Check the the backend has an efficient ctlz instruction.
1203 // FIXME: Teach the backend to emit the original code when ctlz isn't
1204 // supported like we do for cttz.
1206 Intrinsic::ctlz, XType,
1207 {PoisonValue::get(XType), /*is_zero_poison=*/BoolConst});
1208 InstructionCost Cost =
1209 TTI.getIntrinsicInstrCost(Attrs, TargetTransformInfo::TCK_SizeAndLatency);
1211 return false;
1212
1213 Value *Ctlz = B.CreateIntrinsic(Intrinsic::ctlz, {XType}, {X, BoolConst});
1214
1215 Constant *InputBitsM1 = ConstantInt::get(XType, InputBits - 1);
1216 Value *Sub = B.CreateSub(InputBitsM1, Ctlz);
1217
1218 // The table won't produce a sensible result for 0.
1219 Value *Cmp = B.CreateICmpEQ(X, ConstantInt::get(XType, 0));
1220 Value *Select = B.CreateSelect(Cmp, B.CreateZExt(ZeroTableElem, XType), Sub);
1221
1222 // The true branch of select handles the log2(0) case, which is rare.
1225 SelectI->setMetadata(
1226 LLVMContext::MD_prof,
1227 MDBuilder(SelectI->getContext()).createUnlikelyBranchWeights());
1228 }
1229
1230 Value *ZExtOrTrunc = B.CreateZExtOrTrunc(Select, AccessType);
1231
1232 LI->replaceAllUsesWith(ZExtOrTrunc);
1233
1234 return true;
1235}
1236
1237/// This is used by foldLoadsRecursive() to capture a Root Load node which is
1238/// of type or(load, load) and recursively build the wide load. Also capture the
1239/// shift amount, zero extend type and loadSize.
1249
1250// Identify and Merge consecutive loads recursively which is of the form
1251// (ZExt(L1) << shift1) | (ZExt(L2) << shift2) -> ZExt(L3) << shift1
1252// (ZExt(L1) << shift1) | ZExt(L2) -> ZExt(L3)
1253static bool foldLoadsRecursive(Value *V, LoadOps &LOps, const DataLayout &DL,
1254 AliasAnalysis &AA, bool IsRoot = false) {
1255 uint64_t ShAmt2;
1256 Value *X;
1257 Instruction *L1, *L2;
1258
1259 // For the root instruction, allow multiple uses since the final result
1260 // may legitimately be used in multiple places. For intermediate values,
1261 // require single use to avoid creating duplicate loads.
1262 if (!IsRoot && !V->hasOneUse())
1263 return false;
1264
1265 if (!match(V, m_c_Or(m_Value(X),
1267 ShAmt2)))))
1268 return false;
1269
1270 if (!foldLoadsRecursive(X, LOps, DL, AA, /*IsRoot=*/false) && LOps.FoundRoot)
1271 // Avoid Partial chain merge.
1272 return false;
1273
1274 // Check if the pattern has loads
1275 LoadInst *LI1 = LOps.Root;
1276 uint64_t ShAmt1 = LOps.Shift;
1277 if (LOps.FoundRoot == false &&
1278 match(X, m_OneUse(
1279 m_ShlOrSelf(m_OneUse(m_ZExt(m_Instruction(L1))), ShAmt1)))) {
1280 LI1 = dyn_cast<LoadInst>(L1);
1281 }
1282 LoadInst *LI2 = dyn_cast<LoadInst>(L2);
1283
1284 // Check if loads are same, atomic, volatile and having same address space.
1285 if (LI1 == LI2 || !LI1 || !LI2 || !LI1->isSimple() || !LI2->isSimple() ||
1287 return false;
1288
1289 // Check if Loads come from same BB.
1290 if (LI1->getParent() != LI2->getParent())
1291 return false;
1292
1293 // Find the data layout
1294 bool IsBigEndian = DL.isBigEndian();
1295
1296 // Check if loads are consecutive and same size.
1297 Value *Load1Ptr = LI1->getPointerOperand();
1298 APInt Offset1(DL.getIndexTypeSizeInBits(Load1Ptr->getType()), 0);
1299 Load1Ptr =
1300 Load1Ptr->stripAndAccumulateConstantOffsets(DL, Offset1,
1301 /* AllowNonInbounds */ true);
1302
1303 Value *Load2Ptr = LI2->getPointerOperand();
1304 APInt Offset2(DL.getIndexTypeSizeInBits(Load2Ptr->getType()), 0);
1305 Load2Ptr =
1306 Load2Ptr->stripAndAccumulateConstantOffsets(DL, Offset2,
1307 /* AllowNonInbounds */ true);
1308
1309 // Verify if both loads have same base pointers
1310 uint64_t LoadSize1 = LI1->getType()->getPrimitiveSizeInBits();
1311 uint64_t LoadSize2 = LI2->getType()->getPrimitiveSizeInBits();
1312 if (Load1Ptr != Load2Ptr)
1313 return false;
1314
1315 // Make sure that there are no padding bits.
1316 if (!DL.typeSizeEqualsStoreSize(LI1->getType()) ||
1317 !DL.typeSizeEqualsStoreSize(LI2->getType()))
1318 return false;
1319
1320 // Alias Analysis to check for stores b/w the loads.
1321 LoadInst *Start = LOps.FoundRoot ? LOps.RootInsert : LI1, *End = LI2;
1323 if (!Start->comesBefore(End)) {
1324 std::swap(Start, End);
1325 // If LOps.RootInsert comes after LI2, since we use LI2 as the new insert
1326 // point, we should make sure whether the memory region accessed by LOps
1327 // isn't modified.
1328 if (LOps.FoundRoot)
1330 LOps.Root->getPointerOperand(),
1331 LocationSize::precise(DL.getTypeStoreSize(
1332 IntegerType::get(LI1->getContext(), LOps.LoadSize))),
1333 LOps.AATags);
1334 else
1335 Loc = MemoryLocation::get(End);
1336 } else
1337 Loc = MemoryLocation::get(End);
1338 unsigned NumScanned = 0;
1339 for (Instruction &Inst :
1340 make_range(Start->getIterator(), End->getIterator())) {
1341 if (Inst.mayWriteToMemory() && isModSet(AA.getModRefInfo(&Inst, Loc)))
1342 return false;
1343
1344 if (++NumScanned > MaxInstrsToScan)
1345 return false;
1346 }
1347
1348 // Make sure Load with lower Offset is at LI1
1349 bool Reverse = false;
1350 if (Offset2.slt(Offset1)) {
1351 std::swap(LI1, LI2);
1352 std::swap(ShAmt1, ShAmt2);
1353 std::swap(Offset1, Offset2);
1354 std::swap(Load1Ptr, Load2Ptr);
1355 std::swap(LoadSize1, LoadSize2);
1356 Reverse = true;
1357 }
1358
1359 // Big endian swap the shifts
1360 if (IsBigEndian)
1361 std::swap(ShAmt1, ShAmt2);
1362
1363 // First load is always LI1. This is where we put the new load.
1364 // Use the merged load size available from LI1 for forward loads.
1365 if (LOps.FoundRoot) {
1366 if (!Reverse)
1367 LoadSize1 = LOps.LoadSize;
1368 else
1369 LoadSize2 = LOps.LoadSize;
1370 }
1371
1372 // Verify if shift amount and load index aligns and verifies that loads
1373 // are consecutive.
1374 uint64_t ShiftDiff = IsBigEndian ? LoadSize2 : LoadSize1;
1375 uint64_t PrevSize =
1376 DL.getTypeStoreSize(IntegerType::get(LI1->getContext(), LoadSize1));
1377 if ((ShAmt2 - ShAmt1) != ShiftDiff || (Offset2 - Offset1) != PrevSize)
1378 return false;
1379
1380 // Update LOps
1381 AAMDNodes AATags1 = LOps.AATags;
1382 AAMDNodes AATags2 = LI2->getAAMetadata();
1383 if (LOps.FoundRoot == false) {
1384 LOps.FoundRoot = true;
1385 AATags1 = LI1->getAAMetadata();
1386 }
1387 LOps.LoadSize = LoadSize1 + LoadSize2;
1388 LOps.RootInsert = Start;
1389
1390 // Concatenate the AATags of the Merged Loads.
1391 LOps.AATags = AATags1.concat(AATags2);
1392
1393 LOps.Root = LI1;
1394 LOps.Shift = ShAmt1;
1395 LOps.ZextType = X->getType();
1396 return true;
1397}
1398
1399// For a given BB instruction, evaluate all loads in the chain that form a
1400// pattern which suggests that the loads can be combined. The one and only use
1401// of the loads is to form a wider load.
1404 const DominatorTree &DT) {
1405 // Only consider load chains of scalar values.
1406 if (isa<VectorType>(I.getType()))
1407 return false;
1408
1409 LoadOps LOps;
1410 if (!foldLoadsRecursive(&I, LOps, DL, AA, /*IsRoot=*/true) || !LOps.FoundRoot)
1411 return false;
1412
1413 IRBuilder<> Builder(&I);
1414 LoadInst *NewLoad = nullptr, *LI1 = LOps.Root;
1415
1416 IntegerType *WiderType = IntegerType::get(I.getContext(), LOps.LoadSize);
1417 // TTI based checks if we want to proceed with wider load
1418 bool Allowed = TTI.isTypeLegal(WiderType);
1419 if (!Allowed)
1420 return false;
1421
1422 unsigned AS = LI1->getPointerAddressSpace();
1423 unsigned Fast = 0;
1424 Allowed = TTI.allowsMisalignedMemoryAccesses(I.getContext(), LOps.LoadSize,
1425 AS, LI1->getAlign(), &Fast);
1426 if (!Allowed || !Fast)
1427 return false;
1428
1429 // Get the Index and Ptr for the new GEP.
1430 Value *Load1Ptr = LI1->getPointerOperand();
1431 Builder.SetInsertPoint(LOps.RootInsert);
1432 if (!DT.dominates(Load1Ptr, LOps.RootInsert)) {
1433 APInt Offset1(DL.getIndexTypeSizeInBits(Load1Ptr->getType()), 0);
1434 Load1Ptr = Load1Ptr->stripAndAccumulateConstantOffsets(
1435 DL, Offset1, /* AllowNonInbounds */ true);
1436 Load1Ptr = Builder.CreatePtrAdd(Load1Ptr, Builder.getInt(Offset1));
1437 }
1438 // Generate wider load.
1439 NewLoad = Builder.CreateAlignedLoad(WiderType, Load1Ptr, LI1->getAlign(),
1440 LI1->isVolatile(), "");
1441 NewLoad->takeName(LI1);
1442 // Set the New Load AATags Metadata.
1443 if (LOps.AATags)
1444 NewLoad->setAAMetadata(LOps.AATags);
1445
1446 Value *NewOp = NewLoad;
1447 // Check if zero extend needed.
1448 if (LOps.ZextType)
1449 NewOp = Builder.CreateZExt(NewOp, LOps.ZextType);
1450
1451 // Check if shift needed. We need to shift with the amount of load1
1452 // shift if not zero.
1453 if (LOps.Shift)
1454 NewOp = Builder.CreateShl(NewOp, LOps.Shift);
1455 I.replaceAllUsesWith(NewOp);
1456
1457 return true;
1458}
1459
1460/// ValWidth bits starting at ValOffset of Val stored at PtrBase+PtrOffset.
1468
1469 bool isCompatibleWith(const PartStore &Other) const {
1470 return PtrBase == Other.PtrBase && Val == Other.Val;
1471 }
1472
1473 bool operator<(const PartStore &Other) const {
1474 return PtrOffset.slt(Other.PtrOffset);
1475 }
1476};
1477
1478static std::optional<PartStore> matchPartStore(Instruction &I,
1479 const DataLayout &DL) {
1480 auto *Store = dyn_cast<StoreInst>(&I);
1481 if (!Store || !Store->isSimple())
1482 return std::nullopt;
1483
1484 Value *StoredVal = Store->getValueOperand();
1485 Type *StoredTy = StoredVal->getType();
1486 if (!StoredTy->isIntegerTy() || !DL.typeSizeEqualsStoreSize(StoredTy))
1487 return std::nullopt;
1488
1489 uint64_t ValWidth = StoredTy->getPrimitiveSizeInBits();
1490 uint64_t ValOffset;
1491 Value *Val;
1492 if (!match(StoredVal, m_Trunc(m_LShrOrSelf(m_Value(Val), ValOffset))))
1493 return std::nullopt;
1494
1495 Value *Ptr = Store->getPointerOperand();
1496 APInt PtrOffset(DL.getIndexTypeSizeInBits(Ptr->getType()), 0);
1498 DL, PtrOffset, /*AllowNonInbounds=*/true);
1499 return {{PtrBase, PtrOffset, Val, ValOffset, ValWidth, Store}};
1500}
1501
1503 unsigned Width, const DataLayout &DL,
1505 if (Parts.size() < 2)
1506 return false;
1507
1508 // Check whether combining the stores is profitable.
1509 // FIXME: We could generate smaller stores if we can't produce a large one.
1510 const PartStore &First = Parts.front();
1511 LLVMContext &Ctx = First.Store->getContext();
1512 Type *NewTy = Type::getIntNTy(Ctx, Width);
1513 unsigned Fast = 0;
1514 if (!TTI.isTypeLegal(NewTy) ||
1515 !TTI.allowsMisalignedMemoryAccesses(Ctx, Width,
1516 First.Store->getPointerAddressSpace(),
1517 First.Store->getAlign(), &Fast) ||
1518 !Fast)
1519 return false;
1520
1521 // Generate the combined store.
1522 IRBuilder<> Builder(First.Store);
1523 Value *Val = First.Val;
1524 if (First.ValOffset != 0)
1525 Val = Builder.CreateLShr(Val, First.ValOffset);
1526 Val = Builder.CreateZExtOrTrunc(Val, NewTy);
1527 StoreInst *Store = Builder.CreateAlignedStore(
1528 Val, First.Store->getPointerOperand(), First.Store->getAlign());
1529
1530 // Merge various metadata onto the new store.
1531 AAMDNodes AATags = First.Store->getAAMetadata();
1532 SmallVector<Instruction *> Stores = {First.Store};
1533 Stores.reserve(Parts.size());
1534 SmallVector<DebugLoc> DbgLocs = {First.Store->getDebugLoc()};
1535 DbgLocs.reserve(Parts.size());
1536 for (const PartStore &Part : drop_begin(Parts)) {
1537 AATags = AATags.concat(Part.Store->getAAMetadata());
1538 Stores.push_back(Part.Store);
1539 DbgLocs.push_back(Part.Store->getDebugLoc());
1540 }
1541 Store->setAAMetadata(AATags);
1542 Store->mergeDIAssignID(Stores);
1543 Store->setDebugLoc(DebugLoc::getMergedLocations(DbgLocs));
1544
1545 // Remove the old stores.
1546 for (const PartStore &Part : Parts)
1547 Part.Store->eraseFromParent();
1548
1549 return true;
1550}
1551
1554 if (Parts.size() < 2)
1555 return false;
1556
1557 // We now have multiple parts of the same value stored to the same pointer.
1558 // Sort the parts by pointer offset, and make sure they are consistent with
1559 // the value offsets. Also check that the value is fully covered without
1560 // overlaps.
1561 bool Changed = false;
1562 llvm::sort(Parts);
1563 int64_t LastEndOffsetFromFirst = 0;
1564 const PartStore *First = &Parts[0];
1565 for (const PartStore &Part : Parts) {
1566 APInt PtrOffsetFromFirst = Part.PtrOffset - First->PtrOffset;
1567 int64_t ValOffsetFromFirst = Part.ValOffset - First->ValOffset;
1568 if (PtrOffsetFromFirst * 8 != ValOffsetFromFirst ||
1569 LastEndOffsetFromFirst != ValOffsetFromFirst) {
1571 LastEndOffsetFromFirst, DL, TTI);
1572 First = &Part;
1573 LastEndOffsetFromFirst = Part.ValWidth;
1574 continue;
1575 }
1576
1577 LastEndOffsetFromFirst = ValOffsetFromFirst + Part.ValWidth;
1578 }
1579
1581 LastEndOffsetFromFirst, DL, TTI);
1582 return Changed;
1583}
1584
1587 // FIXME: Add big endian support.
1588 if (DL.isBigEndian())
1589 return false;
1590
1591 BatchAAResults BatchAA(AA);
1593 bool MadeChange = false;
1594 for (Instruction &I : make_early_inc_range(BB)) {
1595 if (std::optional<PartStore> Part = matchPartStore(I, DL)) {
1596 if (Parts.empty() || Part->isCompatibleWith(Parts[0])) {
1597 Parts.push_back(std::move(*Part));
1598 continue;
1599 }
1600
1601 MadeChange |= mergePartStores(Parts, DL, TTI);
1602 Parts.clear();
1603 Parts.push_back(std::move(*Part));
1604 continue;
1605 }
1606
1607 if (Parts.empty())
1608 continue;
1609
1610 if (I.mayThrow() ||
1611 (I.mayReadOrWriteMemory() &&
1613 &I, MemoryLocation::getBeforeOrAfter(Parts[0].PtrBase))))) {
1614 MadeChange |= mergePartStores(Parts, DL, TTI);
1615 Parts.clear();
1616 continue;
1617 }
1618 }
1619
1620 MadeChange |= mergePartStores(Parts, DL, TTI);
1621 return MadeChange;
1622}
1623
1624/// Combine away instructions providing they are still equivalent when compared
1625/// against 0. i.e do they have any bits set.
1627 auto *I = dyn_cast<Instruction>(V);
1628 if (!I || I->getOpcode() != Instruction::Or || !I->hasOneUse())
1629 return nullptr;
1630
1631 Value *A;
1632
1633 // Look deeper into the chain of or's, combining away shl (so long as they are
1634 // nuw or nsw).
1635 Value *Op0 = I->getOperand(0);
1636 if (match(Op0, m_CombineOr(m_NSWShl(m_Value(A), m_Value()),
1637 m_NUWShl(m_Value(A), m_Value()))))
1638 Op0 = A;
1639 else if (auto *NOp = optimizeShiftInOrChain(Op0, Builder))
1640 Op0 = NOp;
1641
1642 Value *Op1 = I->getOperand(1);
1643 if (match(Op1, m_CombineOr(m_NSWShl(m_Value(A), m_Value()),
1644 m_NUWShl(m_Value(A), m_Value()))))
1645 Op1 = A;
1646 else if (auto *NOp = optimizeShiftInOrChain(Op1, Builder))
1647 Op1 = NOp;
1648
1649 if (Op0 != I->getOperand(0) || Op1 != I->getOperand(1))
1650 return Builder.CreateOr(Op0, Op1);
1651 return nullptr;
1652}
1653
1656 const DominatorTree &DT) {
1657 CmpPredicate Pred;
1658 Value *Op0;
1659 if (!match(&I, m_ICmp(Pred, m_Value(Op0), m_Zero())) ||
1660 !ICmpInst::isEquality(Pred))
1661 return false;
1662
1663 // If the chain or or's matches a load, combine to that before attempting to
1664 // remove shifts.
1665 if (auto OpI = dyn_cast<Instruction>(Op0))
1666 if (OpI->getOpcode() == Instruction::Or)
1667 if (foldConsecutiveLoads(*OpI, DL, TTI, AA, DT))
1668 return true;
1669
1670 IRBuilder<> Builder(&I);
1671 // icmp eq/ne or(shl(a), b), 0 -> icmp eq/ne or(a, b), 0
1672 if (auto *Res = optimizeShiftInOrChain(Op0, Builder)) {
1673 I.replaceAllUsesWith(Builder.CreateICmp(Pred, Res, I.getOperand(1)));
1674 return true;
1675 }
1676
1677 return false;
1678}
1679
1680// Calculate GEP Stride and accumulated const ModOffset. Return Stride and
1681// ModOffset
1682static std::pair<APInt, APInt>
1684 unsigned BW = DL.getIndexTypeSizeInBits(PtrOp->getType());
1685 std::optional<APInt> Stride;
1686 APInt ModOffset(BW, 0);
1687 // Return a minimum gep stride, greatest common divisor of consective gep
1688 // index scales(c.f. Bézout's identity).
1689 while (auto *GEP = dyn_cast<GEPOperator>(PtrOp)) {
1691 if (!GEP->collectOffset(DL, BW, VarOffsets, ModOffset))
1692 break;
1693
1694 for (auto [V, Scale] : VarOffsets) {
1695 // Only keep a power of two factor for non-inbounds
1696 if (!GEP->hasNoUnsignedSignedWrap())
1697 Scale = APInt::getOneBitSet(Scale.getBitWidth(), Scale.countr_zero());
1698
1699 if (!Stride)
1700 Stride = Scale;
1701 else
1702 Stride = APIntOps::GreatestCommonDivisor(*Stride, Scale);
1703 }
1704
1705 PtrOp = GEP->getPointerOperand();
1706 }
1707
1708 // Check whether pointer arrives back at Global Variable via at least one GEP.
1709 // Even if it doesn't, we can check by alignment.
1710 if (!isa<GlobalVariable>(PtrOp) || !Stride)
1711 return {APInt(BW, 1), APInt(BW, 0)};
1712
1713 // In consideration of signed GEP indices, non-negligible offset become
1714 // remainder of division by minimum GEP stride.
1715 ModOffset = ModOffset.srem(*Stride);
1716 if (ModOffset.isNegative())
1717 ModOffset += *Stride;
1718
1719 return {*Stride, ModOffset};
1720}
1721
1722/// If C is a constant patterned array and all valid loaded results for given
1723/// alignment are same to a constant, return that constant.
1725 auto *LI = dyn_cast<LoadInst>(&I);
1726 if (!LI || LI->isVolatile())
1727 return false;
1728
1729 // We can only fold the load if it is from a constant global with definitive
1730 // initializer. Skip expensive logic if this is not the case.
1731 auto *PtrOp = LI->getPointerOperand();
1733 if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer())
1734 return false;
1735
1736 // Bail for large initializers in excess of 4K to avoid too many scans.
1737 Constant *C = GV->getInitializer();
1738 uint64_t GVSize = DL.getTypeAllocSize(C->getType());
1739 if (!GVSize || 4096 < GVSize)
1740 return false;
1741
1742 Type *LoadTy = LI->getType();
1743 unsigned BW = DL.getIndexTypeSizeInBits(PtrOp->getType());
1744 auto [Stride, ConstOffset] = getStrideAndModOffsetOfGEP(PtrOp, DL);
1745
1746 // Any possible offset could be multiple of GEP stride. And any valid
1747 // offset is multiple of load alignment, so checking only multiples of bigger
1748 // one is sufficient to say results' equality.
1749 if (auto LA = LI->getAlign();
1750 LA <= GV->getAlign().valueOrOne() && Stride.getZExtValue() < LA.value()) {
1751 ConstOffset = APInt(BW, 0);
1752 Stride = APInt(BW, LA.value());
1753 }
1754
1755 Constant *Ca = ConstantFoldLoadFromConst(C, LoadTy, ConstOffset, DL);
1756 if (!Ca)
1757 return false;
1758
1759 unsigned E = GVSize - DL.getTypeStoreSize(LoadTy);
1760 for (; ConstOffset.getZExtValue() <= E; ConstOffset += Stride)
1761 if (Ca != ConstantFoldLoadFromConst(C, LoadTy, ConstOffset, DL))
1762 return false;
1763
1764 I.replaceAllUsesWith(Ca);
1765
1766 return true;
1767}
1768
1769namespace {
1770class StrNCmpInliner {
1771public:
1772 StrNCmpInliner(CallInst *CI, LibFunc Func, DomTreeUpdater *DTU,
1773 const DataLayout &DL)
1774 : CI(CI), Func(Func), DTU(DTU), DL(DL) {}
1775
1776 bool optimizeStrNCmp();
1777
1778private:
1779 void inlineCompare(Value *LHS, StringRef RHS, uint64_t N, bool Swapped);
1780
1781 CallInst *CI;
1782 LibFunc Func;
1783 DomTreeUpdater *DTU;
1784 const DataLayout &DL;
1785};
1786
1787} // namespace
1788
1789/// First we normalize calls to strncmp/strcmp to the form of
1790/// compare(s1, s2, N), which means comparing first N bytes of s1 and s2
1791/// (without considering '\0').
1792///
1793/// Examples:
1794///
1795/// \code
1796/// strncmp(s, "a", 3) -> compare(s, "a", 2)
1797/// strncmp(s, "abc", 3) -> compare(s, "abc", 3)
1798/// strncmp(s, "a\0b", 3) -> compare(s, "a\0b", 2)
1799/// strcmp(s, "a") -> compare(s, "a", 2)
1800///
1801/// char s2[] = {'a'}
1802/// strncmp(s, s2, 3) -> compare(s, s2, 3)
1803///
1804/// char s2[] = {'a', 'b', 'c', 'd'}
1805/// strncmp(s, s2, 3) -> compare(s, s2, 3)
1806/// \endcode
1807///
1808/// We only handle cases where N and exactly one of s1 and s2 are constant.
1809/// Cases that s1 and s2 are both constant are already handled by the
1810/// instcombine pass.
1811///
1812/// We do not handle cases where N > StrNCmpInlineThreshold.
1813///
1814/// We also do not handles cases where N < 2, which are already
1815/// handled by the instcombine pass.
1816///
1817bool StrNCmpInliner::optimizeStrNCmp() {
1818 if (StrNCmpInlineThreshold < 2)
1819 return false;
1820
1822 return false;
1823
1824 Value *Str1P = CI->getArgOperand(0);
1825 Value *Str2P = CI->getArgOperand(1);
1826 // Should be handled elsewhere.
1827 if (Str1P == Str2P)
1828 return false;
1829
1830 StringRef Str1, Str2;
1831 bool HasStr1 = getConstantStringInfo(Str1P, Str1, /*TrimAtNul=*/false);
1832 bool HasStr2 = getConstantStringInfo(Str2P, Str2, /*TrimAtNul=*/false);
1833 if (HasStr1 == HasStr2)
1834 return false;
1835
1836 // Note that '\0' and characters after it are not trimmed.
1837 StringRef Str = HasStr1 ? Str1 : Str2;
1838 Value *StrP = HasStr1 ? Str2P : Str1P;
1839
1840 size_t Idx = Str.find('\0');
1841 uint64_t N = Idx == StringRef::npos ? UINT64_MAX : Idx + 1;
1842 if (Func == LibFunc_strncmp) {
1843 if (auto *ConstInt = dyn_cast<ConstantInt>(CI->getArgOperand(2)))
1844 N = std::min(N, ConstInt->getZExtValue());
1845 else
1846 return false;
1847 }
1848 // Now N means how many bytes we need to compare at most.
1849 if (N > Str.size() || N < 2 || N > StrNCmpInlineThreshold)
1850 return false;
1851
1852 // Cases where StrP has two or more dereferenceable bytes might be better
1853 // optimized elsewhere.
1854 bool CanBeNull = false, CanBeFreed = false;
1855 if (StrP->getPointerDereferenceableBytes(DL, CanBeNull, CanBeFreed) > 1)
1856 return false;
1857 inlineCompare(StrP, Str, N, HasStr1);
1858 return true;
1859}
1860
1861/// Convert
1862///
1863/// \code
1864/// ret = compare(s1, s2, N)
1865/// \endcode
1866///
1867/// into
1868///
1869/// \code
1870/// ret = (int)s1[0] - (int)s2[0]
1871/// if (ret != 0)
1872/// goto NE
1873/// ...
1874/// ret = (int)s1[N-2] - (int)s2[N-2]
1875/// if (ret != 0)
1876/// goto NE
1877/// ret = (int)s1[N-1] - (int)s2[N-1]
1878/// NE:
1879/// \endcode
1880///
1881/// CFG before and after the transformation:
1882///
1883/// (before)
1884/// BBCI
1885///
1886/// (after)
1887/// BBCI -> BBSubs[0] (sub,icmp) --NE-> BBNE -> BBTail
1888/// | ^
1889/// E |
1890/// | |
1891/// BBSubs[1] (sub,icmp) --NE-----+
1892/// ... |
1893/// BBSubs[N-1] (sub) ---------+
1894///
1895void StrNCmpInliner::inlineCompare(Value *LHS, StringRef RHS, uint64_t N,
1896 bool Swapped) {
1897 auto &Ctx = CI->getContext();
1898 IRBuilder<> B(Ctx);
1899 // We want these instructions to be recognized as inlined instructions for the
1900 // compare call, but we don't have a source location for the definition of
1901 // that function, since we're generating that code now. Because the generated
1902 // code is a viable point for a memory access error, we make the pragmatic
1903 // choice here to directly use CI's location so that we have useful
1904 // attribution for the generated code.
1905 B.SetCurrentDebugLocation(CI->getDebugLoc());
1906
1907 BasicBlock *BBCI = CI->getParent();
1908 BasicBlock *BBTail =
1909 SplitBlock(BBCI, CI, DTU, nullptr, nullptr, BBCI->getName() + ".tail");
1910
1912 for (uint64_t I = 0; I < N; ++I)
1913 BBSubs.push_back(
1914 BasicBlock::Create(Ctx, "sub_" + Twine(I), BBCI->getParent(), BBTail));
1915 BasicBlock *BBNE = BasicBlock::Create(Ctx, "ne", BBCI->getParent(), BBTail);
1916
1917 cast<UncondBrInst>(BBCI->getTerminator())->setSuccessor(BBSubs[0]);
1918
1919 B.SetInsertPoint(BBNE);
1920 PHINode *Phi = B.CreatePHI(CI->getType(), N);
1921 B.CreateBr(BBTail);
1922
1923 Value *Base = LHS;
1924 for (uint64_t i = 0; i < N; ++i) {
1925 B.SetInsertPoint(BBSubs[i]);
1926 Value *VL =
1927 B.CreateZExt(B.CreateLoad(B.getInt8Ty(),
1928 B.CreateInBoundsPtrAdd(Base, B.getInt64(i))),
1929 CI->getType());
1930 Value *VR =
1931 ConstantInt::get(CI->getType(), static_cast<unsigned char>(RHS[i]));
1932 Value *Sub = Swapped ? B.CreateSub(VR, VL) : B.CreateSub(VL, VR);
1933 if (i < N - 1) {
1934 CondBrInst *CondBrInst = B.CreateCondBr(
1935 B.CreateICmpNE(Sub, ConstantInt::get(CI->getType(), 0)), BBNE,
1936 BBSubs[i + 1]);
1937
1938 Function *F = CI->getFunction();
1939 assert(F && "Instruction does not belong to a function!");
1940 std::optional<Function::ProfileCount> EC = F->getEntryCount();
1941 if (EC && EC->getCount() > 0)
1943 } else {
1944 B.CreateBr(BBNE);
1945 }
1946
1947 Phi->addIncoming(Sub, BBSubs[i]);
1948 }
1949
1950 CI->replaceAllUsesWith(Phi);
1951 CI->eraseFromParent();
1952
1953 if (DTU) {
1955 Updates.push_back({DominatorTree::Insert, BBCI, BBSubs[0]});
1956 for (uint64_t i = 0; i < N; ++i) {
1957 if (i < N - 1)
1958 Updates.push_back({DominatorTree::Insert, BBSubs[i], BBSubs[i + 1]});
1959 Updates.push_back({DominatorTree::Insert, BBSubs[i], BBNE});
1960 }
1961 Updates.push_back({DominatorTree::Insert, BBNE, BBTail});
1962 Updates.push_back({DominatorTree::Delete, BBCI, BBTail});
1963 DTU->applyUpdates(Updates);
1964 }
1965}
1966
1967/// Convert memchr with a small constant string into a switch
1969 const DataLayout &DL) {
1970 if (isa<Constant>(Call->getArgOperand(1)))
1971 return false;
1972
1973 StringRef Str;
1974 Value *Base = Call->getArgOperand(0);
1975 if (!getConstantStringInfo(Base, Str, /*TrimAtNul=*/false))
1976 return false;
1977
1978 uint64_t N = Str.size();
1979 if (auto *ConstInt = dyn_cast<ConstantInt>(Call->getArgOperand(2))) {
1980 uint64_t Val = ConstInt->getZExtValue();
1981 // Ignore the case that n is larger than the size of string.
1982 if (Val > N)
1983 return false;
1984 N = Val;
1985 } else
1986 return false;
1987
1989 return false;
1990
1991 BasicBlock *BB = Call->getParent();
1992 BasicBlock *BBNext = SplitBlock(BB, Call, DTU);
1993 IRBuilder<> IRB(BB);
1994 IRB.SetCurrentDebugLocation(Call->getDebugLoc());
1995 IntegerType *ByteTy = IRB.getInt8Ty();
1997 SwitchInst *SI = IRB.CreateSwitch(
1998 IRB.CreateTrunc(Call->getArgOperand(1), ByteTy), BBNext, N);
1999 // We can't know the precise weights here, as they would depend on the value
2000 // distribution of Call->getArgOperand(1). So we just mark it as "unknown".
2002 Type *IndexTy = DL.getIndexType(Call->getType());
2004
2005 BasicBlock *BBSuccess = BasicBlock::Create(
2006 Call->getContext(), "memchr.success", BB->getParent(), BBNext);
2007 IRB.SetInsertPoint(BBSuccess);
2008 PHINode *IndexPHI = IRB.CreatePHI(IndexTy, N, "memchr.idx");
2009 Value *FirstOccursLocation = IRB.CreateInBoundsPtrAdd(Base, IndexPHI);
2010 IRB.CreateBr(BBNext);
2011 if (DTU)
2012 Updates.push_back({DominatorTree::Insert, BBSuccess, BBNext});
2013
2015 for (uint64_t I = 0; I < N; ++I) {
2016 ConstantInt *CaseVal =
2017 ConstantInt::get(ByteTy, static_cast<unsigned char>(Str[I]));
2018 if (!Cases.insert(CaseVal).second)
2019 continue;
2020
2021 BasicBlock *BBCase = BasicBlock::Create(Call->getContext(), "memchr.case",
2022 BB->getParent(), BBSuccess);
2023 SI->addCase(CaseVal, BBCase);
2024 IRB.SetInsertPoint(BBCase);
2025 IndexPHI->addIncoming(ConstantInt::get(IndexTy, I), BBCase);
2026 IRB.CreateBr(BBSuccess);
2027 if (DTU) {
2028 Updates.push_back({DominatorTree::Insert, BB, BBCase});
2029 Updates.push_back({DominatorTree::Insert, BBCase, BBSuccess});
2030 }
2031 }
2032
2033 PHINode *PHI =
2034 PHINode::Create(Call->getType(), 2, Call->getName(), BBNext->begin());
2035 PHI->addIncoming(Constant::getNullValue(Call->getType()), BB);
2036 PHI->addIncoming(FirstOccursLocation, BBSuccess);
2037
2038 Call->replaceAllUsesWith(PHI);
2039 Call->eraseFromParent();
2040
2041 if (DTU)
2042 DTU->applyUpdates(Updates);
2043
2044 return true;
2045}
2046
2049 DominatorTree &DT, const DataLayout &DL,
2050 bool &MadeCFGChange) {
2051
2052 auto *CI = dyn_cast<CallInst>(&I);
2053 if (!CI || CI->isNoBuiltin())
2054 return false;
2055
2056 Function *CalledFunc = CI->getCalledFunction();
2057 if (!CalledFunc)
2058 return false;
2059
2060 LibFunc LF;
2061 if (!TLI.getLibFunc(*CalledFunc, LF) ||
2062 !isLibFuncEmittable(CI->getModule(), &TLI, LF))
2063 return false;
2064
2065 DomTreeUpdater DTU(&DT, DomTreeUpdater::UpdateStrategy::Lazy);
2066
2067 switch (LF) {
2068 case LibFunc_sqrt:
2069 case LibFunc_sqrtf:
2070 case LibFunc_sqrtl:
2071 return foldSqrt(CI, LF, TTI, TLI, AC, DT);
2072 case LibFunc_strcmp:
2073 case LibFunc_strncmp:
2074 if (StrNCmpInliner(CI, LF, &DTU, DL).optimizeStrNCmp()) {
2075 MadeCFGChange = true;
2076 return true;
2077 }
2078 break;
2079 case LibFunc_memchr:
2080 if (foldMemChr(CI, &DTU, DL)) {
2081 MadeCFGChange = true;
2082 return true;
2083 }
2084 break;
2085 default:;
2086 }
2087 return false;
2088}
2089
2090/// Match high part of long multiplication.
2091///
2092/// Considering a multiply made up of high and low parts, we can split the
2093/// multiply into:
2094/// x * y == (xh*T + xl) * (yh*T + yl)
2095/// where xh == x>>32 and xl == x & 0xffffffff. T = 2^32.
2096/// This expands to
2097/// xh*yh*T*T + xh*yl*T + xl*yh*T + xl*yl
2098/// which can be drawn as
2099/// [ xh*yh ]
2100/// [ xh*yl ]
2101/// [ xl*yh ]
2102/// [ xl*yl ]
2103/// We are looking for the "high" half, which is xh*yh + xh*yl>>32 + xl*yh>>32 +
2104/// some carrys. The carry makes this difficult and there are multiple ways of
2105/// representing it. The ones we attempt to support here are:
2106/// Carry: xh*yh + carry + lowsum
2107/// carry = lowsum < xh*yl ? 0x1000000 : 0
2108/// lowsum = xh*yl + xl*yh + (xl*yl>>32)
2109/// Ladder: xh*yh + c2>>32 + c3>>32
2110/// c2 = xh*yl + (xl*yl>>32); c3 = c2&0xffffffff + xl*yh
2111/// or c2 = (xl*yh&0xffffffff) + xh*yl + (xl*yl>>32); c3 = xl*yh
2112/// Carry4: xh*yh + carry + crosssum>>32 + (xl*yl + crosssum&0xffffffff) >> 32
2113/// crosssum = xh*yl + xl*yh
2114/// carry = crosssum < xh*yl ? 0x1000000 : 0
2115/// Ladder4: xh*yh + (xl*yh)>>32 + (xh*yl)>>32 + low>>32;
2116/// low = (xl*yl)>>32 + (xl*yh)&0xffffffff + (xh*yl)&0xffffffff
2117///
2118/// They all start by matching xh*yh + 2 or 3 other operands. The bottom of the
2119/// tree is xh*yh, xh*yl, xl*yh and xl*yl.
2121 Type *Ty = I.getType();
2122 if (!Ty->isIntOrIntVectorTy())
2123 return false;
2124
2125 unsigned BitWidth = Ty->getScalarSizeInBits();
2127 if (BitWidth % 2 != 0)
2128 return false;
2129
2130 auto CreateMulHigh = [&](Value *X, Value *Y) {
2131 IRBuilder<> Builder(&I);
2132 Type *NTy = Ty->getWithNewBitWidth(BitWidth * 2);
2133 Value *XExt = Builder.CreateZExt(X, NTy);
2134 Value *YExt = Builder.CreateZExt(Y, NTy);
2135 Value *Mul = Builder.CreateMul(XExt, YExt, "", /*HasNUW=*/true);
2136 Value *High = Builder.CreateLShr(Mul, BitWidth);
2137 Value *Res = Builder.CreateTrunc(High, Ty, "", /*HasNUW=*/true);
2138 Res->takeName(&I);
2139 I.replaceAllUsesWith(Res);
2140 LLVM_DEBUG(dbgs() << "Created long multiply from parts of " << *X << " and "
2141 << *Y << "\n");
2142 return true;
2143 };
2144
2145 // Common check routines for X_lo*Y_lo and X_hi*Y_lo
2146 auto CheckLoLo = [&](Value *XlYl, Value *X, Value *Y) {
2147 return match(XlYl, m_c_Mul(m_And(m_Specific(X), m_SpecificInt(LowMask)),
2148 m_And(m_Specific(Y), m_SpecificInt(LowMask))));
2149 };
2150 auto CheckHiLo = [&](Value *XhYl, Value *X, Value *Y) {
2151 return match(XhYl,
2153 m_And(m_Specific(Y), m_SpecificInt(LowMask))));
2154 };
2155
2156 auto FoldMulHighCarry = [&](Value *X, Value *Y, Instruction *Carry,
2157 Instruction *B) {
2158 // Looking for LowSum >> 32 and carry (select)
2159 if (Carry->getOpcode() != Instruction::Select)
2160 std::swap(Carry, B);
2161
2162 // Carry = LowSum < XhYl ? 0x100000000 : 0
2163 Value *LowSum, *XhYl;
2164 if (!match(Carry,
2167 m_Value(XhYl))),
2169 m_Zero()))))
2170 return false;
2171
2172 // XhYl can be Xh*Yl or Xl*Yh
2173 if (!CheckHiLo(XhYl, X, Y)) {
2174 if (CheckHiLo(XhYl, Y, X))
2175 std::swap(X, Y);
2176 else
2177 return false;
2178 }
2179 if (XhYl->hasNUsesOrMore(3))
2180 return false;
2181
2182 // B = LowSum >> 32
2183 if (!match(B, m_OneUse(m_LShr(m_Specific(LowSum),
2184 m_SpecificInt(BitWidth / 2)))) ||
2185 LowSum->hasNUsesOrMore(3))
2186 return false;
2187
2188 // LowSum = XhYl + XlYh + XlYl>>32
2189 Value *XlYh, *XlYl;
2190 auto XlYlHi = m_LShr(m_Value(XlYl), m_SpecificInt(BitWidth / 2));
2191 if (!match(LowSum,
2192 m_c_Add(m_Specific(XhYl),
2193 m_OneUse(m_c_Add(m_OneUse(m_Value(XlYh)), XlYlHi)))) &&
2194 !match(LowSum, m_c_Add(m_OneUse(m_Value(XlYh)),
2195 m_OneUse(m_c_Add(m_Specific(XhYl), XlYlHi)))) &&
2196 !match(LowSum,
2197 m_c_Add(XlYlHi, m_OneUse(m_c_Add(m_Specific(XhYl),
2198 m_OneUse(m_Value(XlYh)))))))
2199 return false;
2200
2201 // Check XlYl and XlYh
2202 if (!CheckLoLo(XlYl, X, Y))
2203 return false;
2204 if (!CheckHiLo(XlYh, Y, X))
2205 return false;
2206
2207 return CreateMulHigh(X, Y);
2208 };
2209
2210 auto FoldMulHighLadder = [&](Value *X, Value *Y, Instruction *A,
2211 Instruction *B) {
2212 // xh*yh + c2>>32 + c3>>32
2213 // c2 = xh*yl + (xl*yl>>32); c3 = c2&0xffffffff + xl*yh
2214 // or c2 = (xl*yh&0xffffffff) + xh*yl + (xl*yl>>32); c3 = xh*yl
2215 Value *XlYh, *XhYl, *XlYl, *C2, *C3;
2216 // Strip off the two expected shifts.
2217 if (!match(A, m_LShr(m_Value(C2), m_SpecificInt(BitWidth / 2))) ||
2219 return false;
2220
2221 if (match(C3, m_c_Add(m_Add(m_Value(), m_Value()), m_Value())))
2222 std::swap(C2, C3);
2223 // Try to match c2 = (xl*yh&0xffffffff) + xh*yl + (xl*yl>>32)
2224 if (match(C2,
2226 m_Value(XlYh)),
2227 m_LShr(m_Value(XlYl), m_SpecificInt(BitWidth / 2)))) ||
2229 m_LShr(m_Value(XlYl),
2230 m_SpecificInt(BitWidth / 2))),
2231 m_Value(XlYh))) ||
2233 m_SpecificInt(BitWidth / 2)),
2234 m_Value(XlYh)),
2235 m_And(m_Specific(C3), m_SpecificInt(LowMask))))) {
2236 XhYl = C3;
2237 } else {
2238 // Match c3 = c2&0xffffffff + xl*yh
2239 if (!match(C3, m_c_Add(m_And(m_Specific(C2), m_SpecificInt(LowMask)),
2240 m_Value(XlYh))))
2241 std::swap(C2, C3);
2242 if (!match(C3, m_c_Add(m_OneUse(
2243 m_And(m_Specific(C2), m_SpecificInt(LowMask))),
2244 m_Value(XlYh))) ||
2245 !C3->hasOneUse() || C2->hasNUsesOrMore(3))
2246 return false;
2247
2248 // Match c2 = xh*yl + (xl*yl >> 32)
2249 if (!match(C2, m_c_Add(m_LShr(m_Value(XlYl), m_SpecificInt(BitWidth / 2)),
2250 m_Value(XhYl))))
2251 return false;
2252 }
2253
2254 // Match XhYl and XlYh - they can appear either way around.
2255 if (!CheckHiLo(XlYh, Y, X))
2256 std::swap(XlYh, XhYl);
2257 if (!CheckHiLo(XlYh, Y, X))
2258 return false;
2259 if (!CheckHiLo(XhYl, X, Y))
2260 return false;
2261 if (!CheckLoLo(XlYl, X, Y))
2262 return false;
2263
2264 return CreateMulHigh(X, Y);
2265 };
2266
2267 auto FoldMulHighLadder4 = [&](Value *X, Value *Y, Instruction *A,
2269 /// Ladder4: xh*yh + (xl*yh)>>32 + (xh+yl)>>32 + low>>32;
2270 /// low = (xl*yl)>>32 + (xl*yh)&0xffffffff + (xh*yl)&0xffffffff
2271
2272 // Find A = Low >> 32 and B/C = XhYl>>32, XlYh>>32.
2273 auto ShiftAdd =
2275 if (!match(A, ShiftAdd))
2276 std::swap(A, B);
2277 if (!match(A, ShiftAdd))
2278 std::swap(A, C);
2279 Value *Low;
2281 return false;
2282
2283 // Match B == XhYl>>32 and C == XlYh>>32
2284 Value *XhYl, *XlYh;
2285 if (!match(B, m_LShr(m_Value(XhYl), m_SpecificInt(BitWidth / 2))) ||
2286 !match(C, m_LShr(m_Value(XlYh), m_SpecificInt(BitWidth / 2))))
2287 return false;
2288 if (!CheckHiLo(XhYl, X, Y))
2289 std::swap(XhYl, XlYh);
2290 if (!CheckHiLo(XhYl, X, Y) || XhYl->hasNUsesOrMore(3))
2291 return false;
2292 if (!CheckHiLo(XlYh, Y, X) || XlYh->hasNUsesOrMore(3))
2293 return false;
2294
2295 // Match Low as XlYl>>32 + XhYl&0xffffffff + XlYh&0xffffffff
2296 Value *XlYl;
2297 if (!match(
2298 Low,
2299 m_c_Add(
2301 m_OneUse(m_And(m_Specific(XhYl), m_SpecificInt(LowMask))),
2302 m_OneUse(m_And(m_Specific(XlYh), m_SpecificInt(LowMask))))),
2303 m_OneUse(
2304 m_LShr(m_Value(XlYl), m_SpecificInt(BitWidth / 2))))) &&
2305 !match(
2306 Low,
2307 m_c_Add(
2309 m_OneUse(m_And(m_Specific(XhYl), m_SpecificInt(LowMask))),
2310 m_OneUse(
2311 m_LShr(m_Value(XlYl), m_SpecificInt(BitWidth / 2))))),
2312 m_OneUse(m_And(m_Specific(XlYh), m_SpecificInt(LowMask))))) &&
2313 !match(
2314 Low,
2315 m_c_Add(
2317 m_OneUse(m_And(m_Specific(XlYh), m_SpecificInt(LowMask))),
2318 m_OneUse(
2319 m_LShr(m_Value(XlYl), m_SpecificInt(BitWidth / 2))))),
2320 m_OneUse(m_And(m_Specific(XhYl), m_SpecificInt(LowMask))))))
2321 return false;
2322 if (!CheckLoLo(XlYl, X, Y))
2323 return false;
2324
2325 return CreateMulHigh(X, Y);
2326 };
2327
2328 auto FoldMulHighCarry4 = [&](Value *X, Value *Y, Instruction *Carry,
2330 // xh*yh + carry + crosssum>>32 + (xl*yl + crosssum&0xffffffff) >> 32
2331 // crosssum = xh*yl+xl*yh
2332 // carry = crosssum < xh*yl ? 0x1000000 : 0
2333 if (Carry->getOpcode() != Instruction::Select)
2334 std::swap(Carry, B);
2335 if (Carry->getOpcode() != Instruction::Select)
2336 std::swap(Carry, C);
2337
2338 // Carry = CrossSum < XhYl ? 0x100000000 : 0
2339 Value *CrossSum, *XhYl;
2340 if (!match(Carry,
2343 m_Value(CrossSum), m_Value(XhYl))),
2345 m_Zero()))))
2346 return false;
2347
2348 if (!match(B, m_LShr(m_Specific(CrossSum), m_SpecificInt(BitWidth / 2))))
2349 std::swap(B, C);
2350 if (!match(B, m_LShr(m_Specific(CrossSum), m_SpecificInt(BitWidth / 2))))
2351 return false;
2352
2353 Value *XlYl, *LowAccum;
2354 if (!match(C, m_LShr(m_Value(LowAccum), m_SpecificInt(BitWidth / 2))) ||
2355 !match(LowAccum, m_c_Add(m_OneUse(m_LShr(m_Value(XlYl),
2356 m_SpecificInt(BitWidth / 2))),
2357 m_OneUse(m_And(m_Specific(CrossSum),
2358 m_SpecificInt(LowMask))))) ||
2359 LowAccum->hasNUsesOrMore(3))
2360 return false;
2361 if (!CheckLoLo(XlYl, X, Y))
2362 return false;
2363
2364 if (!CheckHiLo(XhYl, X, Y))
2365 std::swap(X, Y);
2366 if (!CheckHiLo(XhYl, X, Y))
2367 return false;
2368 Value *XlYh;
2369 if (!match(CrossSum, m_c_Add(m_Specific(XhYl), m_OneUse(m_Value(XlYh)))) ||
2370 !CheckHiLo(XlYh, Y, X) || CrossSum->hasNUsesOrMore(4) ||
2371 XhYl->hasNUsesOrMore(3))
2372 return false;
2373
2374 return CreateMulHigh(X, Y);
2375 };
2376
2377 // X and Y are the two inputs, A, B and C are other parts of the pattern
2378 // (crosssum>>32, carry, etc).
2379 Value *X, *Y;
2380 Instruction *A, *B, *C;
2381 auto HiHi = m_OneUse(m_Mul(m_LShr(m_Value(X), m_SpecificInt(BitWidth / 2)),
2383 if ((match(&I, m_c_Add(HiHi, m_OneUse(m_Add(m_Instruction(A),
2384 m_Instruction(B))))) ||
2386 m_OneUse(m_c_Add(HiHi, m_Instruction(B)))))) &&
2387 A->hasOneUse() && B->hasOneUse())
2388 if (FoldMulHighCarry(X, Y, A, B) || FoldMulHighLadder(X, Y, A, B))
2389 return true;
2390
2391 if ((match(&I, m_c_Add(HiHi, m_OneUse(m_c_Add(
2394 m_Instruction(C))))))) ||
2398 m_Instruction(C))))))) ||
2402 m_OneUse(m_c_Add(HiHi, m_Instruction(C))))))) ||
2403 match(&I,
2406 A->hasOneUse() && B->hasOneUse() && C->hasOneUse())
2407 return FoldMulHighCarry4(X, Y, A, B, C) ||
2408 FoldMulHighLadder4(X, Y, A, B, C);
2409
2410 return false;
2411}
2412
2413/// This is the entry point for folds that could be implemented in regular
2414/// InstCombine, but they are separated because they are not expected to
2415/// occur frequently and/or have more than a constant-length pattern match.
2419 AssumptionCache &AC, bool &MadeCFGChange) {
2420 bool MadeChange = false;
2421 for (BasicBlock &BB : F) {
2422 // Ignore unreachable basic blocks.
2423 if (!DT.isReachableFromEntry(&BB))
2424 continue;
2425
2426 const DataLayout &DL = F.getDataLayout();
2427
2428 // Walk the block backwards for efficiency. We're matching a chain of
2429 // use->defs, so we're more likely to succeed by starting from the bottom.
2430 // Also, we want to avoid matching partial patterns.
2431 // TODO: It would be more efficient if we removed dead instructions
2432 // iteratively in this loop rather than waiting until the end.
2434 MadeChange |= foldAnyOrAllBitsSet(I);
2435 MadeChange |= foldGuardedFunnelShift(I, DT);
2436 MadeChange |= foldSelectSplitCTTZ(I);
2437 MadeChange |= foldSelectSplitCTLZ(I);
2438 MadeChange |= tryToRecognizePopCount(I);
2439 MadeChange |= tryToRecognizePopCount1(I);
2440 MadeChange |= tryToRecognizePopCount2n3(I);
2441 MadeChange |= tryToFPToSat(I, TTI);
2442 MadeChange |= tryToRecognizeTableBasedCttz(I, DL);
2443 MadeChange |= tryToRecognizeTableBasedLog2(I, DL, TTI);
2444 MadeChange |= foldConsecutiveLoads(I, DL, TTI, AA, DT);
2445 MadeChange |= foldPatternedLoads(I, DL);
2446 MadeChange |= foldICmpOrChain(I, DL, TTI, AA, DT);
2447 MadeChange |= foldMulHigh(I);
2448 // NOTE: This function introduces erasing of the instruction `I`, so it
2449 // needs to be called at the end of this sequence, otherwise we may make
2450 // bugs.
2451 MadeChange |= foldLibCalls(I, TTI, TLI, AC, DT, DL, MadeCFGChange);
2452 }
2453
2454 // Do this separately to avoid redundantly scanning stores multiple times.
2455 MadeChange |= foldConsecutiveStores(BB, DL, TTI, AA);
2456 }
2457
2458 // We're done with transforms, so remove dead instructions.
2459 if (MadeChange)
2460 for (BasicBlock &BB : F)
2462
2463 return MadeChange;
2464}
2465
2466/// This is the entry point for all transforms. Pass manager differences are
2467/// handled in the callers of this function.
2470 AliasAnalysis &AA, bool &MadeCFGChange) {
2471 bool MadeChange = false;
2472 const DataLayout &DL = F.getDataLayout();
2473 TruncInstCombine TIC(AC, TLI, DL, DT);
2474 MadeChange |= TIC.run(F);
2475 MadeChange |= foldUnusualPatterns(F, DT, TTI, TLI, AA, AC, MadeCFGChange);
2476 return MadeChange;
2477}
2478
2481 auto &AC = AM.getResult<AssumptionAnalysis>(F);
2482 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
2483 auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
2484 auto &TTI = AM.getResult<TargetIRAnalysis>(F);
2485 auto &AA = AM.getResult<AAManager>(F);
2486 bool MadeCFGChange = false;
2487 if (!runImpl(F, AC, TTI, TLI, DT, AA, MadeCFGChange)) {
2488 // No changes, all analyses are preserved.
2489 return PreservedAnalyses::all();
2490 }
2491 // Mark all the analyses that instcombine updates as preserved.
2493 if (MadeCFGChange)
2495 else
2497 return PA;
2498}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
AMDGPU Register Bank Select
Rewrite undef for PHI
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static void replaceWithPopCount(Instruction &I, Value *Root)
Helper function to replace an instruction with a popcount intrinsic.
static bool tryToRecognizePopCount(Instruction &I)
static bool foldSqrt(CallInst *Call, LibFunc Func, TargetTransformInfo &TTI, TargetLibraryInfo &TLI, AssumptionCache &AC, DominatorTree &DT)
Try to replace a mathlib call to sqrt with the LLVM intrinsic.
static bool isLog2Table(Constant *Table, const APInt &Mul, const APInt &Shift, Type *AccessTy, unsigned InputBits, const APInt &GEPIdxFactor, const DataLayout &DL)
static bool foldAnyOrAllBitsSet(Instruction &I)
Match patterns that correspond to "any-bits-set" and "all-bits-set".
static cl::opt< unsigned > MemChrInlineThreshold("memchr-inline-threshold", cl::init(3), cl::Hidden, cl::desc("The maximum length of a constant string to " "inline a memchr call."))
static bool tryToFPToSat(Instruction &I, TargetTransformInfo &TTI)
Fold smin(smax(fptosi(x), C1), C2) to llvm.fptosi.sat(x), providing C1 and C2 saturate the value of t...
static cl::opt< unsigned > StrNCmpInlineThreshold("strncmp-inline-threshold", cl::init(3), cl::Hidden, cl::desc("The maximum length of a constant string for a builtin string cmp " "call eligible for inlining. The default value is 3."))
static bool matchAndOrChain(Value *V, MaskOps &MOps)
This is a recursive helper for foldAnyOrAllBitsSet() that walks through a chain of 'and' or 'or' inst...
static bool foldMemChr(CallInst *Call, DomTreeUpdater *DTU, const DataLayout &DL)
Convert memchr with a small constant string into a switch.
static bool tryToRecognizePopCount2n3(Instruction &I)
static Value * optimizeShiftInOrChain(Value *V, IRBuilder<> &Builder)
Combine away instructions providing they are still equivalent when compared against 0.
static bool foldConsecutiveLoads(Instruction &I, const DataLayout &DL, TargetTransformInfo &TTI, AliasAnalysis &AA, const DominatorTree &DT)
static bool foldGuardedFunnelShift(Instruction &I, const DominatorTree &DT)
Match a pattern for a bitwise funnel/rotate operation that partially guards against undefined behavio...
static bool tryToRecognizeTableBasedCttz(Instruction &I, const DataLayout &DL)
static bool mergePartStores(SmallVectorImpl< PartStore > &Parts, const DataLayout &DL, TargetTransformInfo &TTI)
static bool foldLoadsRecursive(Value *V, LoadOps &LOps, const DataLayout &DL, AliasAnalysis &AA, bool IsRoot=false)
static bool mergeConsecutivePartStores(ArrayRef< PartStore > Parts, unsigned Width, const DataLayout &DL, TargetTransformInfo &TTI)
static cl::opt< unsigned > MaxInstrsToScan("aggressive-instcombine-max-scan-instrs", cl::init(64), cl::Hidden, cl::desc("Max number of instructions to scan for aggressive instcombine."))
static bool foldSelectSplitCTTZ(Instruction &I)
Try to fold a select-based split cttz pattern into a single full-width cttz.
static bool foldSelectSplitCTLZ(Instruction &I)
Same as foldSelectSplitCTTZ but for leading zeros (ctlz).
static bool tryToRecognizePopCount1(Instruction &I)
static bool foldICmpOrChain(Instruction &I, const DataLayout &DL, TargetTransformInfo &TTI, AliasAnalysis &AA, const DominatorTree &DT)
static bool isCTTZTable(Constant *Table, const APInt &Mul, const APInt &Shift, const APInt &AndMask, Type *AccessTy, unsigned InputBits, const APInt &GEPIdxFactor, const DataLayout &DL)
static std::optional< PartStore > matchPartStore(Instruction &I, const DataLayout &DL)
static bool foldConsecutiveStores(BasicBlock &BB, const DataLayout &DL, TargetTransformInfo &TTI, AliasAnalysis &AA)
static std::pair< APInt, APInt > getStrideAndModOffsetOfGEP(Value *PtrOp, const DataLayout &DL)
static bool foldPatternedLoads(Instruction &I, const DataLayout &DL)
If C is a constant patterned array and all valid loaded results for given alignment are same to a con...
static bool tryToRecognizeTableBasedLog2(Instruction &I, const DataLayout &DL, TargetTransformInfo &TTI)
static bool foldLibCalls(Instruction &I, TargetTransformInfo &TTI, TargetLibraryInfo &TLI, AssumptionCache &AC, DominatorTree &DT, const DataLayout &DL, bool &MadeCFGChange)
static bool foldMulHigh(Instruction &I)
Match high part of long multiplication.
static bool foldUnusualPatterns(Function &F, DominatorTree &DT, TargetTransformInfo &TTI, TargetLibraryInfo &TLI, AliasAnalysis &AA, AssumptionCache &AC, bool &MadeCFGChange)
This is the entry point for folds that could be implemented in regular InstCombine,...
AggressiveInstCombiner - Combine expression patterns to form expressions with fewer,...
This is the interface for LLVM's primary stateless and local alias analysis.
#define X(NUM, ENUM, NAME)
Definition ELF.h:853
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static bool runImpl(Function &F, const TargetLowering &TLI, const LibcallLoweringInfo &Libcalls, AssumptionCache *AC)
#define DEBUG_TYPE
This is the interface for a simple mod/ref and alias analysis over globals.
Hexagon Common GEP
static MaybeAlign getAlign(Value *Ptr)
static Instruction * matchFunnelShift(Instruction &Or, InstCombinerImpl &IC)
Match UB-safe variants of the funnel shift intrinsic.
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
uint64_t High
This file contains the declarations for profiling metadata utility functions.
const SmallVectorImpl< MachineOperand > & Cond
static const MCExpr * MaskShift(const MCExpr *Val, uint32_t Mask, uint32_t Shift, MCContext &Ctx)
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition Statistic.h:171
#define LLVM_DEBUG(...)
Definition Debug.h:119
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
This pass exposes codegen information to IR-level passes.
Value * RHS
Value * LHS
BinaryOperator * Mul
A manager for alias analyses.
Class for arbitrary precision integers.
Definition APInt.h:78
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
Definition APInt.h:235
uint64_t getZExtValue() const
Get zero extended value.
Definition APInt.h:1563
unsigned getActiveBits() const
Compute the number of active bits in the value.
Definition APInt.h:1535
void setBit(unsigned BitPosition)
Set the given bit to 1 whose position is given as "bitPosition".
Definition APInt.h:1353
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
Definition APInt.h:381
unsigned getBitWidth() const
Return the number of bits in the APInt.
Definition APInt.h:1511
bool isNegative() const
Determine sign of this APInt.
Definition APInt.h:330
static LLVM_ABI APInt getSplat(unsigned NewLen, const APInt &V)
Return a value containing V broadcasted over NewLen bits.
Definition APInt.cpp:652
LLVM_ABI APInt srem(const APInt &RHS) const
Function for signed remainder operation.
Definition APInt.cpp:1788
bool isSubsetOf(const APInt &RHS) const
This operation checks that all bits set in this APInt are also set in RHS.
Definition APInt.h:1264
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Constructs an APInt value that has the bottom loBitsSet bits set.
Definition APInt.h:307
bool slt(const APInt &RHS) const
Signed less than comparison.
Definition APInt.h:1137
unsigned countTrailingOnes() const
Definition APInt.h:1685
static APInt getOneBitSet(unsigned numBits, unsigned BitNo)
Return an APInt with exactly one bit set in the result.
Definition APInt.h:240
bool uge(const APInt &RHS) const
Unsigned greater or equal comparison.
Definition APInt.h:1228
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
const T & front() const
Get the first element.
Definition ArrayRef.h:144
size_t size() const
Get the array size.
Definition ArrayRef.h:141
A function analysis which provides an AssumptionCache.
A cache of @llvm.assume calls within a function.
LLVM Basic Block Representation.
Definition BasicBlock.h:62
iterator begin()
Instruction iterator methods.
Definition BasicBlock.h:461
LLVM_ABI const_iterator getFirstInsertionPt() const
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
const Function * getParent() const
Return the enclosing method, or null if none.
Definition BasicBlock.h:213
static BasicBlock * Create(LLVMContext &Context, const Twine &Name="", Function *Parent=nullptr, BasicBlock *InsertBefore=nullptr)
Creates a new BasicBlock.
Definition BasicBlock.h:206
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction; assumes that the block is well-formed.
Definition BasicBlock.h:237
This class is a wrapper over an AAResults, and it is intended to be used only when there are no IR ch...
ModRefInfo getModRefInfo(const Instruction *I, const std::optional< MemoryLocation > &OptLoc)
Represents analyses that only rely on functions' control flow.
Definition Analysis.h:73
Value * getArgOperand(unsigned i) const
This class represents a function call, abstracting a target machine's calling convention.
@ ICMP_ULT
unsigned less than
Definition InstrTypes.h:701
@ ICMP_NE
not equal
Definition InstrTypes.h:698
An abstraction over a floating-point predicate, and a pack of an integer predicate with samesign info...
This is the shared class of boolean and integer constants.
Definition Constants.h:87
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
Definition Constants.h:168
This is an important base class in LLVM.
Definition Constant.h:43
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:64
static LLVM_ABI DebugLoc getMergedLocations(ArrayRef< DebugLoc > Locs)
Try to combine the vector of locations passed as input in a single one.
Definition DebugLoc.cpp:166
Analysis pass which computes a DominatorTree.
Definition Dominators.h:278
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition Dominators.h:159
LLVM_ABI bool isReachableFromEntry(const Use &U) const
Provide an overload for a Use.
LLVM_ABI bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
void applyUpdates(ArrayRef< UpdateT > Updates)
Submit updates to all available trees.
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
const Constant * getInitializer() const
getInitializer - Return the initializer for this global variable.
bool hasInitializer() const
Definitions have initializers, declarations don't.
bool isConstant() const
If the value is a global constant, its value is immutable throughout the runtime execution of the pro...
static bool isEquality(Predicate P)
Return true if this predicate is either EQ or NE.
void SetCurrentDebugLocation(const DebugLoc &L)
Set location information used by debugging information.
Definition IRBuilder.h:247
UncondBrInst * CreateBr(BasicBlock *Dest)
Create an unconditional 'br label X' instruction.
Definition IRBuilder.h:1232
PHINode * CreatePHI(Type *Ty, unsigned NumReservedValues, const Twine &Name="")
Definition IRBuilder.h:2539
SwitchInst * CreateSwitch(Value *V, BasicBlock *Dest, unsigned NumCases=10, MDNode *BranchWeights=nullptr, MDNode *Unpredictable=nullptr)
Create a switch instruction with the specified value, default dest, and with a hint for the number of...
Definition IRBuilder.h:1261
Value * CreateTrunc(Value *V, Type *DestTy, const Twine &Name="", bool IsNUW=false, bool IsNSW=false)
Definition IRBuilder.h:2106
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
Definition IRBuilder.h:207
Value * CreateInBoundsPtrAdd(Value *Ptr, Value *Offset, const Twine &Name="")
Definition IRBuilder.h:2096
IntegerType * getInt8Ty()
Fetch the type representing an 8-bit integer.
Definition IRBuilder.h:576
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition IRBuilder.h:2858
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
LLVM_ABI void setAAMetadata(const AAMDNodes &N)
Sets the AA metadata on this instruction from the AAMDNodes structure.
LLVM_ABI InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
LLVM_ABI const Function * getFunction() const
Return the function this instruction belongs to.
LLVM_ABI AAMDNodes getAAMetadata() const
Returns the AA metadata for this instruction.
Class to represent integer types.
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition Type.cpp:354
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
An instruction for reading from memory.
unsigned getPointerAddressSpace() const
Returns the address space of the pointer operand.
Value * getPointerOperand()
bool isSimple() const
static LocationSize precise(uint64_t Value)
LLVM_ABI MDNode * createUnlikelyBranchWeights()
Return metadata containing two branch weights, with significant bias towards false destination.
Definition MDBuilder.cpp:48
size_type size() const
Definition MapVector.h:58
std::pair< KeyT, ValueT > & front()
Definition MapVector.h:81
Representation for a specific memory location.
static LLVM_ABI MemoryLocation get(const LoadInst *LI)
Return a location with information about the memory reference by the given instruction.
static MemoryLocation getBeforeOrAfter(const Value *Ptr, const AAMDNodes &AATags=AAMDNodes())
Return a location that may access any location before or after Ptr, while remaining within the underl...
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
A set of analyses that are preserved following a run of a transformation pass.
Definition Analysis.h:112
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition Analysis.h:118
PreservedAnalyses & preserveSet()
Mark an analysis set as preserved.
Definition Analysis.h:151
PreservedAnalyses & preserve()
Mark an analysis as preserved.
Definition Analysis.h:132
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void reserve(size_type N)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
Represent a constant reference to a string, i.e.
Definition StringRef.h:56
static constexpr size_t npos
Definition StringRef.h:58
Multiway switch.
Analysis pass providing the TargetTransformInfo.
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
bool getLibFunc(StringRef funcName, LibFunc &F) const
Searches for a particular function name.
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
@ None
The insert/extract is not used with a load/store.
@ TCK_RecipThroughput
Reciprocal throughput.
@ TCK_SizeAndLatency
The weighted sum of size and latency.
@ TCC_Basic
The cost of a typical 'add' instruction.
bool run(Function &F)
Perform TruncInst pattern optimization on given function.
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:46
LLVM_ABI unsigned getIntegerBitWidth() const
LLVM_ABI TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
Definition Type.cpp:201
LLVM_ABI Type * getWithNewBitWidth(unsigned NewBitWidth) const
Given an integer or vector type, change the lane bitwidth to NewBitwidth, whilst keeping the old numb...
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
Definition Type.cpp:236
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition Type.h:257
static LLVM_ABI IntegerType * getIntNTy(LLVMContext &C, unsigned N)
Definition Type.cpp:317
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:255
bool hasOneUse() const
Return true if there is exactly one use of this value.
Definition Value.h:439
LLVM_ABI void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition Value.cpp:549
LLVMContext & getContext() const
All values hold a context through their type.
Definition Value.h:258
LLVM_ABI bool hasNUsesOrMore(unsigned N) const
Return true if this value has N uses or more.
Definition Value.cpp:154
LLVM_ABI const Value * stripAndAccumulateConstantOffsets(const DataLayout &DL, APInt &Offset, bool AllowNonInbounds, bool AllowInvariantGroup=false, function_ref< bool(Value &Value, APInt &Offset)> ExternalAnalysis=nullptr, bool LookThroughIntToPtr=false) const
Accumulate the constant offset this value has compared to a base pointer.
LLVM_ABI uint64_t getPointerDereferenceableBytes(const DataLayout &DL, bool &CanBeNull, bool &CanBeFreed) const
Returns the number of bytes known to be dereferenceable for the pointer value.
Definition Value.cpp:890
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition Value.cpp:318
LLVM_ABI void takeName(Value *V)
Transfer the name from V to this value.
Definition Value.cpp:399
static LLVM_ABI VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
const ParentTy * getParent() const
Definition ilist_node.h:34
CallInst * Call
Changed
#define UINT64_MAX
Definition DataTypes.h:77
Abstract Attribute helper functions.
Definition Attributor.h:165
LLVM_ABI APInt GreatestCommonDivisor(APInt A, APInt B)
Compute GCD of two unsigned APInt values.
Definition APInt.cpp:830
@ Fast
Attempts to make calls as fast as possible (e.g.
Definition CallingConv.h:41
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
@ BasicBlock
Various leaf nodes.
Definition ISDOpcodes.h:81
SpecificConstantMatch m_ZeroInt()
Convenience matchers for specific integer values.
BinaryOp_match< SpecificConstantMatch, SrcTy, TargetOpcode::G_SUB > m_Neg(const SrcTy &&Src)
Matches a register negated by a G_SUB.
OneUse_match< SubPat > m_OneUse(const SubPat &SP)
match_combine_or< Ty... > m_CombineOr(const Ty &...Ps)
Combine pattern matchers matching any of Ps patterns.
BinaryOp_match< LHS, RHS, Instruction::And > m_And(const LHS &L, const RHS &R)
ShiftLike_match< LHS, Instruction::LShr > m_LShrOrSelf(const LHS &L, uint64_t &R)
Matches lshr L, ConstShAmt or L itself (R will be set to zero in this case).
BinaryOp_match< LHS, RHS, Instruction::Add > m_Add(const LHS &L, const RHS &R)
match_combine_or< CastInst_match< OpTy, CastInst >, OpTy > m_CastOrSelf(const OpTy &Op)
Matches any cast or self. Used to ignore casts.
ap_match< APInt > m_APInt(const APInt *&Res)
Match a ConstantInt or splatted ConstantVector, binding the specified pointer to the contained APInt.
BinaryOp_match< LHS, RHS, Instruction::And, true > m_c_And(const LHS &L, const RHS &R)
Matches an And with LHS and RHS in either order.
CastInst_match< OpTy, TruncInst > m_Trunc(const OpTy &Op)
Matches Trunc.
specific_intval< false > m_SpecificInt(const APInt &V)
Match a specific integer value or vector with all elements equal to the value.
bool match(Val *V, const Pattern &P)
match_bind< Instruction > m_Instruction(Instruction *&I)
Match an instruction, capturing it if we match.
match_deferred< Value > m_Deferred(Value *const &V)
Like m_Specific(), but works if the specific value to match is determined as part of the same match()...
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
cst_pred_ty< is_one > m_One()
Match an integer 1 or a vector with all elements equal to 1.
ThreeOps_match< Cond, LHS, RHS, Instruction::Select > m_Select(const Cond &C, const LHS &L, const RHS &R)
Matches SelectInst.
MaxMin_match< ICmpInst, LHS, RHS, smin_pred_ty > m_SMin(const LHS &L, const RHS &R)
auto m_Value()
Match an arbitrary value and ignore it.
ShiftLike_match< LHS, Instruction::Shl > m_ShlOrSelf(const LHS &L, uint64_t &R)
Matches shl L, ConstShAmt or L itself (R will be set to zero in this case).
BinaryOp_match< LHS, RHS, Instruction::Mul > m_Mul(const LHS &L, const RHS &R)
specific_bbval m_SpecificBB(BasicBlock *BB)
Match a specific basic block value.
OverflowingBinaryOp_match< LHS, RHS, Instruction::Shl, OverflowingBinaryOperator::NoSignedWrap > m_NSWShl(const LHS &L, const RHS &R)
SpecificCmpClass_match< LHS, RHS, ICmpInst > m_SpecificICmp(CmpPredicate MatchPred, const LHS &L, const RHS &R)
CastInst_match< OpTy, ZExtInst > m_ZExt(const OpTy &Op)
Matches ZExt.
OverflowingBinaryOp_match< LHS, RHS, Instruction::Shl, OverflowingBinaryOperator::NoUnsignedWrap > m_NUWShl(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::Add, true > m_c_Add(const LHS &L, const RHS &R)
Matches a Add with LHS and RHS in either order.
match_combine_or< BinaryOp_match< LHS, RHS, Instruction::Add >, DisjointOr_match< LHS, RHS > > m_AddLike(const LHS &L, const RHS &R)
Match either "add" or "or disjoint".
CastInst_match< OpTy, FPToSIInst > m_FPToSI(const OpTy &Op)
MaxMin_match< ICmpInst, LHS, RHS, smax_pred_ty > m_SMax(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::LShr > m_LShr(const LHS &L, const RHS &R)
CmpClass_match< LHS, RHS, ICmpInst > m_ICmp(CmpPredicate &Pred, const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::Shl > m_Shl(const LHS &L, const RHS &R)
m_Intrinsic_Ty< Opnd0, Opnd1 >::Ty m_Cttz(const Opnd0 &Op0, const Opnd1 &Op1)
brc_match< Cond_t, match_bind< BasicBlock >, match_bind< BasicBlock > > m_Br(const Cond_t &C, BasicBlock *&T, BasicBlock *&F)
m_Intrinsic_Ty< Opnd0, Opnd1 >::Ty m_Ctlz(const Opnd0 &Op0, const Opnd1 &Op1)
BinaryOp_match< LHS, RHS, Instruction::Or > m_Or(const LHS &L, const RHS &R)
is_zero m_Zero()
Match any null constant or a vector with all elements equal to 0.
BinaryOp_match< LHS, RHS, Instruction::Or, true > m_c_Or(const LHS &L, const RHS &R)
Matches an Or with LHS and RHS in either order.
BinaryOp_match< LHS, RHS, Instruction::Mul, true > m_c_Mul(const LHS &L, const RHS &R)
Matches a Mul with LHS and RHS in either order.
BinaryOp_match< LHS, RHS, Instruction::Sub > m_Sub(const LHS &L, const RHS &R)
initializer< Ty > init(const Ty &Val)
NodeAddr< PhiNode * > Phi
Definition RDFGraph.h:390
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition STLExtras.h:315
@ Low
Lower the current thread's priority such that it does not affect foreground tasks significantly.
Definition Threading.h:280
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
cl::opt< bool > ProfcheckDisableMetadataFixes
Definition LoopInfo.cpp:60
LLVM_ABI void setExplicitlyUnknownBranchWeightsIfProfiled(Instruction &I, StringRef PassName, const Function *F=nullptr)
Like setExplicitlyUnknownBranchWeights(...), but only sets unknown branch weights in the new instruct...
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
LLVM_ABI bool isOnlyUsedInZeroComparison(const Instruction *CxtI)
LLVM_ABI bool getConstantStringInfo(const Value *V, StringRef &Str, bool TrimAtNul=true)
This function computes the length of a null-terminated C string pointed to by V.
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition STLExtras.h:633
LLVM_ABI bool SimplifyInstructionsInBlock(BasicBlock *BB, const TargetLibraryInfo *TLI=nullptr)
Scan the specified basic block and try to simplify any instructions in it and recursively delete dead...
Definition Local.cpp:723
LLVM_ABI void setExplicitlyUnknownBranchWeights(Instruction &I, StringRef PassName)
Specify that the branch weights for this terminator cannot be known at compile time.
LLVM_ABI bool MaskedValueIsZero(const Value *V, const APInt &Mask, const SimplifyQuery &SQ, unsigned Depth=0)
Return true if 'V & Mask' is known to be zero.
LLVM_ABI bool isLibFuncEmittable(const Module *M, const TargetLibraryInfo *TLI, LibFunc TheLibFunc)
Check whether the library function is available on target and also that it in the current Module is a...
auto dyn_cast_or_null(const Y &Val)
Definition Casting.h:753
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition MathExtras.h:331
auto reverse(ContainerTy &&C)
Definition STLExtras.h:407
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition MathExtras.h:279
bool isModSet(const ModRefInfo MRI)
Definition ModRef.h:49
void sort(IteratorTy Start, IteratorTy End)
Definition STLExtras.h:1635
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:209
bool isModOrRefSet(const ModRefInfo MRI)
Definition ModRef.h:43
LLVM_ABI Constant * ConstantFoldLoadFromConst(Constant *C, Type *Ty, const APInt &Offset, const DataLayout &DL)
Extract value of C at the given Offset reinterpreted as Ty.
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
@ Other
Any other memory.
Definition ModRef.h:68
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
Definition ModRef.h:74
TargetTransformInfo TTI
IRBuilder(LLVMContext &, FolderTy, InserterTy, MDNode *, ArrayRef< OperandBundleDef >) -> IRBuilder< FolderTy, InserterTy >
@ Sub
Subtraction of integers.
LLVM_ABI BasicBlock * SplitBlock(BasicBlock *Old, BasicBlock::iterator SplitPt, DominatorTree *DT, LoopInfo *LI=nullptr, MemorySSAUpdater *MSSAU=nullptr, const Twine &BBName="")
Split the specified block at the specified instruction.
ArrayRef(const T &OneElt) -> ArrayRef< T >
constexpr unsigned BitWidth
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
AnalysisManager< Function > FunctionAnalysisManager
Convenience typedef for the Function analysis manager.
LLVM_ABI bool isGuaranteedNotToBePoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Returns true if V cannot be poison, but may be undef.
LLVM_ABI const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=MaxLookupSearchDepth)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
AAResults AliasAnalysis
Temporary typedef for legacy code that uses a generic AliasAnalysis pointer or reference.
LLVM_ABI bool cannotBeOrderedLessThanZero(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Return true if we can prove that the specified FP value is either NaN or never less than -0....
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition BitVector.h:876
#define N
This is used by foldLoadsRecursive() to capture a Root Load node which is of type or(load,...
ValWidth bits starting at ValOffset of Val stored at PtrBase+PtrOffset.
bool operator<(const PartStore &Other) const
bool isCompatibleWith(const PartStore &Other) const
A collection of metadata nodes that might be associated with a memory access used by the alias-analys...
Definition Metadata.h:763
LLVM_ABI AAMDNodes concat(const AAMDNodes &Other) const
Determine the best AAMDNodes after concatenating two different locations together.
Matching combinators.
A MapVector that performs no allocations if smaller than a certain size.
Definition MapVector.h:334