LLVM 23.0.0git
CombinerHelper.h
Go to the documentation of this file.
1//===-- llvm/CodeGen/GlobalISel/CombinerHelper.h --------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===--------------------------------------------------------------------===//
8/// \file
9/// This contains common combine transformations that may be used in a combine
10/// pass,or by the target elsewhere.
11/// Targets can pick individual opcode transformations from the helper or use
12/// tryCombine which invokes all transformations. All of the transformations
13/// return true if the MachineInstruction changed and false otherwise.
14///
15//===--------------------------------------------------------------------===//
16
17#ifndef LLVM_CODEGEN_GLOBALISEL_COMBINERHELPER_H
18#define LLVM_CODEGEN_GLOBALISEL_COMBINERHELPER_H
19
20#include "llvm/ADT/DenseMap.h"
26#include "llvm/IR/InstrTypes.h"
27#include <functional>
28
29namespace llvm {
30
32class APInt;
33class ConstantFP;
34class GPtrAdd;
35class GZExtLoad;
39class MachineInstr;
40class MachineOperand;
43class LegalizerInfo;
44struct LegalityQuery;
45class RegisterBank;
47class TargetInstrInfo;
48class TargetLowering;
50
52 LLT Ty; // The result type of the extend.
53 unsigned ExtendOpcode; // G_ANYEXT/G_SEXT/G_ZEXT
55};
56
61 bool RematOffset = false; // True if Offset is a constant that needs to be
62 // rematerialized before the new load/store.
63 bool IsPre = false;
64};
65
67 int64_t Imm;
70 unsigned Flags;
71};
72
75 int64_t Imm;
76};
77
84
93
94using BuildFnTy = std::function<void(MachineIRBuilder &)>;
95
97 SmallVector<std::function<void(MachineInstrBuilder &)>, 4>;
99 unsigned Opcode = 0; /// The opcode for the produced instruction.
100 OperandBuildSteps OperandFns; /// Operands to be added to the instruction.
104};
105
107 /// Describes instructions to be built during a combine.
111 std::initializer_list<InstructionBuildSteps> InstrsToBuild)
113};
114
116protected:
127
128public:
130 bool IsPreLegalize, GISelValueTracking *VT = nullptr,
131 MachineDominatorTree *MDT = nullptr,
132 const LegalizerInfo *LI = nullptr);
133
135
137 return Builder;
138 }
139
140 const TargetInstrInfo &getTII() const { return *TII; }
141
142 const TargetRegisterInfo &getTRI() const { return *TRI; }
143
144 const RegisterBankInfo &getRBI() const { return *RBI; }
145
146 const TargetLowering &getTargetLowering() const;
147
148 const MachineFunction &getMachineFunction() const;
149
150 const DataLayout &getDataLayout() const;
151
152 LLVMContext &getContext() const;
153
154 /// \returns true if the combiner is running pre-legalization.
155 bool isPreLegalize() const;
156
157 /// \returns true if \p Query is legal on the target.
158 bool isLegal(const LegalityQuery &Query) const;
159
160 /// \return true if the combine is running prior to legalization, or if \p
161 /// Query is legal on the target.
162 bool isLegalOrBeforeLegalizer(const LegalityQuery &Query) const;
163
164 /// \return true if \p Query is legal on the target, or if \p Query will
165 /// perform WidenScalar action on the target.
166 bool isLegalOrHasWidenScalar(const LegalityQuery &Query) const;
167
168 /// \return true if \p Query is legal on the target, or if \p Query will
169 /// perform a FewerElements action on the target.
170 bool isLegalOrHasFewerElements(const LegalityQuery &Query) const;
171
172 /// \return true if the combine is running prior to legalization, or if \p Ty
173 /// is a legal integer constant type on the target.
174 bool isConstantLegalOrBeforeLegalizer(const LLT Ty) const;
175
176 /// MachineRegisterInfo::replaceRegWith() and inform the observer of the changes
177 void replaceRegWith(MachineRegisterInfo &MRI, Register FromReg, Register ToReg) const;
178
179 /// Replace a single register operand with a new register and inform the
180 /// observer of the changes.
182 Register ToReg) const;
183
184 /// Replace the opcode in instruction with a new opcode and inform the
185 /// observer of the changes.
186 void replaceOpcodeWith(MachineInstr &FromMI, unsigned ToOpcode) const;
187
188 /// Get the register bank of \p Reg.
189 /// If Reg has not been assigned a register, a register class,
190 /// or a register bank, then this returns nullptr.
191 ///
192 /// \pre Reg.isValid()
193 const RegisterBank *getRegBank(Register Reg) const;
194
195 /// Set the register bank of \p Reg.
196 /// Does nothing if the RegBank is null.
197 /// This is the counterpart to getRegBank.
198 void setRegBank(Register Reg, const RegisterBank *RegBank) const;
199
200 /// If \p MI is COPY, try to combine it.
201 /// Returns true if MI changed.
202 bool tryCombineCopy(MachineInstr &MI) const;
203 bool matchCombineCopy(MachineInstr &MI) const;
204 void applyCombineCopy(MachineInstr &MI) const;
205
206 /// Returns true if \p DefMI precedes \p UseMI or they are the same
207 /// instruction. Both must be in the same basic block.
208 bool isPredecessor(const MachineInstr &DefMI,
209 const MachineInstr &UseMI) const;
210
211 /// Returns true if \p DefMI dominates \p UseMI. By definition an
212 /// instruction dominates itself.
213 ///
214 /// If we haven't been provided with a MachineDominatorTree during
215 /// construction, this function returns a conservative result that tracks just
216 /// a single basic block.
217 bool dominates(const MachineInstr &DefMI, const MachineInstr &UseMI) const;
218
219 /// If \p MI is extend that consumes the result of a load, try to combine it.
220 /// Returns true if MI changed.
223 PreferredTuple &MatchInfo) const;
225 PreferredTuple &MatchInfo) const;
226
227 /// Match (and (load x), mask) -> zextload x
229 BuildFnTy &MatchInfo) const;
230
231 /// Combine a G_EXTRACT_VECTOR_ELT of a load into a narrowed
232 /// load.
234 BuildFnTy &MatchInfo) const;
235
237 IndexedLoadStoreMatchInfo &MatchInfo) const;
239 IndexedLoadStoreMatchInfo &MatchInfo) const;
240
243
244 /// Match sext_inreg(load p), imm -> sextload p
246 std::tuple<Register, unsigned> &MatchInfo) const;
248 std::tuple<Register, unsigned> &MatchInfo) const;
249
250 /// Try to combine G_[SU]DIV and G_[SU]REM into a single G_[SU]DIVREM
251 /// when their source operands are identical.
252 bool matchCombineDivRem(MachineInstr &MI, MachineInstr *&OtherMI) const;
253 void applyCombineDivRem(MachineInstr &MI, MachineInstr *&OtherMI) const;
254
255 /// If a brcond's true block is not the fallthrough, make it so by inverting
256 /// the condition and swapping operands.
258 MachineInstr *&BrCond) const;
260 MachineInstr *&BrCond) const;
261
262 /// If \p MI is G_CONCAT_VECTORS, try to combine it.
263 /// Returns true if MI changed.
264 /// Right now, we support:
265 /// - concat_vector(undef, undef) => undef
266 /// - concat_vector(build_vector(A, B), build_vector(C, D)) =>
267 /// build_vector(A, B, C, D)
268 /// ==========================================================
269 /// Check if the G_CONCAT_VECTORS \p MI is undef or if it
270 /// can be flattened into a build_vector.
271 /// In the first case \p Ops will be empty
272 /// In the second case \p Ops will contain the operands
273 /// needed to produce the flattened build_vector.
274 ///
275 /// \pre MI.getOpcode() == G_CONCAT_VECTORS.
278 /// Replace \p MI with a flattened build_vector with \p Ops
279 /// or an implicit_def if \p Ops is empty.
282
285 /// Replace \p MI with a flattened build_vector with \p Ops
286 /// or an implicit_def if \p Ops is empty.
289
290 /// Replace \p MI with a build_vector.
292
293 /// Try to combine G_SHUFFLE_VECTOR into G_CONCAT_VECTORS.
294 /// Returns true if MI changed.
295 ///
296 /// \pre MI.getOpcode() == G_SHUFFLE_VECTOR.
298 /// Check if the G_SHUFFLE_VECTOR \p MI can be replaced by a
299 /// concat_vectors.
300 /// \p Ops will contain the operands needed to produce the flattened
301 /// concat_vectors.
302 ///
303 /// \pre MI.getOpcode() == G_SHUFFLE_VECTOR.
306 /// Replace \p MI with a concat_vectors with \p Ops.
308 ArrayRef<Register> Ops) const;
309
310 /// Optimize memcpy intrinsics et al, e.g. constant len calls.
311 /// /p MaxLen if non-zero specifies the max length of a mem libcall to inline.
312 ///
313 /// For example (pre-indexed):
314 ///
315 /// $addr = G_PTR_ADD $base, $offset
316 /// [...]
317 /// $val = G_LOAD $addr
318 /// [...]
319 /// $whatever = COPY $addr
320 ///
321 /// -->
322 ///
323 /// $val, $addr = G_INDEXED_LOAD $base, $offset, 1 (IsPre)
324 /// [...]
325 /// $whatever = COPY $addr
326 ///
327 /// or (post-indexed):
328 ///
329 /// G_STORE $val, $base
330 /// [...]
331 /// $addr = G_PTR_ADD $base, $offset
332 /// [...]
333 /// $whatever = COPY $addr
334 ///
335 /// -->
336 ///
337 /// $addr = G_INDEXED_STORE $val, $base, $offset
338 /// [...]
339 /// $whatever = COPY $addr
340 bool tryCombineMemCpyFamily(MachineInstr &MI, unsigned MaxLen = 0) const;
341
342 bool matchPtrAddImmedChain(MachineInstr &MI, PtrAddChain &MatchInfo) const;
343 void applyPtrAddImmedChain(MachineInstr &MI, PtrAddChain &MatchInfo) const;
344
345 /// Fold (shift (shift base, x), y) -> (shift base (x+y))
346 bool matchShiftImmedChain(MachineInstr &MI, RegisterImmPair &MatchInfo) const;
347 void applyShiftImmedChain(MachineInstr &MI, RegisterImmPair &MatchInfo) const;
348
349 /// If we have a shift-by-constant of a bitwise logic op that itself has a
350 /// shift-by-constant operand with identical opcode, we may be able to convert
351 /// that into 2 independent shifts followed by the logic op.
353 ShiftOfShiftedLogic &MatchInfo) const;
355 ShiftOfShiftedLogic &MatchInfo) const;
356
357 bool matchCommuteShift(MachineInstr &MI, BuildFnTy &MatchInfo) const;
358
359 /// Fold (lshr (trunc (lshr x, C1)), C2) -> trunc (shift x, (C1 + C2))
361 MachineInstr &ShiftMI) const;
363 LshrOfTruncOfLshr &MatchInfo) const;
364
365 /// Transform a multiply by a power-of-2 value to a left shift.
366 bool matchCombineMulToShl(MachineInstr &MI, unsigned &ShiftVal) const;
367 void applyCombineMulToShl(MachineInstr &MI, unsigned &ShiftVal) const;
368
369 // Transform a G_SUB with constant on the RHS to G_ADD.
370 bool matchCombineSubToAdd(MachineInstr &MI, BuildFnTy &MatchInfo) const;
371
372 // Transform a G_SHL with an extended source into a narrower shift if
373 // possible.
375 RegisterImmPair &MatchData) const;
377 const RegisterImmPair &MatchData) const;
378
379 /// Fold away a merge of an unmerge of the corresponding values.
380 bool matchCombineMergeUnmerge(MachineInstr &MI, Register &MatchInfo) const;
381
382 /// Reduce a shift by a constant to an unmerge and a shift on a half sized
383 /// type. This will not produce a shift smaller than \p TargetShiftSize.
384 bool matchCombineShiftToUnmerge(MachineInstr &MI, unsigned TargetShiftSize,
385 unsigned &ShiftVal) const;
387 const unsigned &ShiftVal) const;
389 unsigned TargetShiftAmount) const;
390
391 /// Transform <ty,...> G_UNMERGE(G_MERGE ty X, Y, Z) -> ty X, Y, Z.
393 MachineInstr &MI, SmallVectorImpl<Register> &Operands) const;
395 MachineInstr &MI, SmallVectorImpl<Register> &Operands) const;
396
397 /// Transform G_UNMERGE Constant -> Constant1, Constant2, ...
399 SmallVectorImpl<APInt> &Csts) const;
401 SmallVectorImpl<APInt> &Csts) const;
402
403 /// Transform G_UNMERGE G_IMPLICIT_DEF -> G_IMPLICIT_DEF, G_IMPLICIT_DEF, ...
406 std::function<void(MachineIRBuilder &)> &MatchInfo) const;
407
408 /// Transform X, Y<dead> = G_UNMERGE Z -> X = G_TRUNC Z.
411
412 /// Transform X, Y = G_UNMERGE(G_ZEXT(Z)) -> X = G_ZEXT(Z); Y = G_CONSTANT 0
415
416 /// Transform fp_instr(cst) to constant result of the fp operation.
418 const ConstantFP *Cst) const;
419
420 /// Constant fold a unary integer op (G_CTLZ, G_CTTZ, G_CTPOP and their
421 /// _ZERO_POISON variants, G_ABS, G_BSWAP, G_BITREVERSE) when the operand is
422 /// a scalar constant or a G_BUILD_VECTOR of constants.
424 BuildFnTy &MatchInfo) const;
425
426 /// Transform IntToPtr(PtrToInt(x)) to x if cast is in the same address space.
429
430 /// Transform PtrToInt(IntToPtr(x)) to x.
432
433 /// Transform G_ADD (G_PTRTOINT x), y -> G_PTRTOINT (G_PTR_ADD x, y)
434 /// Transform G_ADD y, (G_PTRTOINT x) -> G_PTRTOINT (G_PTR_ADD x, y)
435 bool
437 std::pair<Register, bool> &PtrRegAndCommute) const;
438 void
440 std::pair<Register, bool> &PtrRegAndCommute) const;
441
442 // Transform G_PTR_ADD (G_PTRTOINT C1), C2 -> C1 + C2
445
446 /// Transform anyext(trunc(x)) to x.
448
449 /// Transform zext(trunc(x)) to x.
451
452 /// Transform trunc (shl x, K) to shl (trunc x), K
453 /// if K < VT.getScalarSizeInBits().
454 ///
455 /// Transforms trunc ([al]shr x, K) to (trunc ([al]shr (MidVT (trunc x)), K))
456 /// if K <= (MidVT.getScalarSizeInBits() - VT.getScalarSizeInBits())
457 /// MidVT is obtained by finding a legal type between the trunc's src and dst
458 /// types.
459 bool
461 std::pair<MachineInstr *, LLT> &MatchInfo) const;
462 void
464 std::pair<MachineInstr *, LLT> &MatchInfo) const;
465
466 /// Return true if any explicit use operand on \p MI is defined by a
467 /// G_IMPLICIT_DEF.
469
470 /// Return true if all register explicit use operands on \p MI are defined by
471 /// a G_IMPLICIT_DEF.
473
474 /// Return true if a G_SHUFFLE_VECTOR instruction \p MI has an undef mask.
476
477 /// Return true if a G_STORE instruction \p MI is storing an undef value.
478 bool matchUndefStore(MachineInstr &MI) const;
479
480 /// Return true if a G_SELECT instruction \p MI has an undef comparison.
482
483 /// Return true if a G_{EXTRACT,INSERT}_VECTOR_ELT has an out of range index.
485
486 /// Return true if a G_SELECT instruction \p MI has a constant comparison. If
487 /// true, \p OpIdx will store the operand index of the known selected value.
488 bool matchConstantSelectCmp(MachineInstr &MI, unsigned &OpIdx) const;
489
490 /// Replace an instruction with a G_FCONSTANT with value \p C.
491 void replaceInstWithFConstant(MachineInstr &MI, double C) const;
492
493 /// Replace an instruction with an G_FCONSTANT with value \p CFP.
495
496 /// Replace an instruction with a G_CONSTANT with value \p C.
497 void replaceInstWithConstant(MachineInstr &MI, int64_t C) const;
498
499 /// Replace an instruction with a G_CONSTANT with value \p C.
501
502 /// Replace an instruction with a G_IMPLICIT_DEF.
504
505 /// Delete \p MI and replace all of its uses with its \p OpIdx-th operand.
507
508 /// Delete \p MI and replace all of its uses with \p Replacement.
510 Register Replacement) const;
511
512 /// @brief Replaces the shift amount in \p MI with ShiftAmt % BW
513 /// @param MI
515
516 /// Return true if \p MOP1 and \p MOP2 are register operands are defined by
517 /// equivalent instructions.
518 bool matchEqualDefs(const MachineOperand &MOP1,
519 const MachineOperand &MOP2) const;
520
521 /// Return true if \p MOP is defined by a G_CONSTANT or splat with a value equal to
522 /// \p C.
523 bool matchConstantOp(const MachineOperand &MOP, int64_t C) const;
524
525 /// Return true if \p MOP is defined by a G_FCONSTANT or splat with a value exactly
526 /// equal to \p C.
527 bool matchConstantFPOp(const MachineOperand &MOP, double C) const;
528
529 /// @brief Checks if constant at \p ConstIdx is larger than \p MI 's bitwidth
530 /// @param ConstIdx Index of the constant
531 bool matchConstantLargerBitWidth(MachineInstr &MI, unsigned ConstIdx) const;
532
533 /// Optimize (cond ? x : x) -> x
535
536 /// Optimize (x op x) -> x
537 bool matchBinOpSameVal(MachineInstr &MI) const;
538
539 /// Check if operand \p OpIdx is undef.
540 bool matchOperandIsUndef(MachineInstr &MI, unsigned OpIdx) const;
541
542 /// Check if operand \p MO is known to be a power of 2. When \p OrNegative
543 /// is true, also match operands whose negation is a power of 2 (i.e. whose
544 /// absolute value is a power of 2).
546 bool OrNegative = false) const;
547
548 /// Erase \p MI
549 void eraseInst(MachineInstr &MI) const;
550
551 /// Return true if MI is a G_ADD which can be simplified to a G_SUB.
553 std::tuple<Register, Register> &MatchInfo) const;
555 std::tuple<Register, Register> &MatchInfo) const;
556
557 /// Fold `a bitwiseop (~b +/- c)` -> `a bitwiseop ~(b -/+ c)`
558 bool matchBinopWithNeg(MachineInstr &MI, BuildFnTy &MatchInfo) const;
559
560 /// Match (logic_op (op x...), (op y...)) -> (op (logic_op x, y))
562 MachineInstr &MI, InstructionStepsMatchInfo &MatchInfo) const;
563
564 /// Replace \p MI with a series of instructions described in \p MatchInfo.
566 InstructionStepsMatchInfo &MatchInfo) const;
567
568 /// Match ashr (shl x, C), C -> sext_inreg (C)
570 std::tuple<Register, int64_t> &MatchInfo) const;
572 std::tuple<Register, int64_t> &MatchInfo) const;
573
574 /// Fold and(and(x, C1), C2) -> C1&C2 ? and(x, C1&C2) : 0
575 bool matchOverlappingAnd(MachineInstr &MI, BuildFnTy &MatchInfo) const;
576
577 /// \return true if \p MI is a G_AND instruction whose operands are x and y
578 /// where x & y == x or x & y == y. (E.g., one of operands is all-ones value.)
579 ///
580 /// \param [in] MI - The G_AND instruction.
581 /// \param [out] Replacement - A register the G_AND should be replaced with on
582 /// success.
583 bool matchRedundantAnd(MachineInstr &MI, Register &Replacement) const;
584
585 /// \return true if \p MI is a G_OR instruction whose operands are x and y
586 /// where x | y == x or x | y == y. (E.g., one of operands is all-zeros
587 /// value.)
588 ///
589 /// \param [in] MI - The G_OR instruction.
590 /// \param [out] Replacement - A register the G_OR should be replaced with on
591 /// success.
592 bool matchRedundantOr(MachineInstr &MI, Register &Replacement) const;
593
594 /// \return true if \p MI is a G_SEXT_INREG that can be erased.
596
597 /// Combine inverting a result of a compare into the opposite cond code.
599 SmallVectorImpl<Register> &RegsToNegate) const;
601 SmallVectorImpl<Register> &RegsToNegate) const;
602
603 /// Fold (xor (and x, y), y) -> (and (not x), y)
604 ///{
606 std::pair<Register, Register> &MatchInfo) const;
608 std::pair<Register, Register> &MatchInfo) const;
609 ///}
610
611 /// Combine G_PTR_ADD with nullptr to G_INTTOPTR
612 bool matchPtrAddZero(MachineInstr &MI) const;
613 void applyPtrAddZero(MachineInstr &MI) const;
614
615 /// Combine G_UREM x, (known power of 2) to an add and bitmasking.
617
618 /// Push a binary operator through a select on constants.
619 ///
620 /// binop (select cond, K0, K1), K2 ->
621 /// select cond, (binop K0, K2), (binop K1, K2)
622 bool matchFoldBinOpIntoSelect(MachineInstr &MI, unsigned &SelectOpNo) const;
624 const unsigned &SelectOpNo) const;
625
627 SmallVectorImpl<Register> &MatchInfo) const;
628
630 SmallVectorImpl<Register> &MatchInfo) const;
631
632 /// Match expression trees of the form
633 ///
634 /// \code
635 /// sN *a = ...
636 /// sM val = a[0] | (a[1] << N) | (a[2] << 2N) | (a[3] << 3N) ...
637 /// \endcode
638 ///
639 /// And check if the tree can be replaced with a M-bit load + possibly a
640 /// bswap.
641 bool matchLoadOrCombine(MachineInstr &MI, BuildFnTy &MatchInfo) const;
642
645
648
651 SmallVectorImpl<std::pair<Register, MachineInstr *>> &MatchInfo) const;
654 SmallVectorImpl<std::pair<Register, MachineInstr *>> &MatchInfo) const;
655
656 /// Use a function which takes in a MachineIRBuilder to perform a combine.
657 /// By default, it erases the instruction \p MI from the function.
658 void applyBuildFn(MachineInstr &MI, BuildFnTy &MatchInfo) const;
659 /// Use a function which takes in a MachineIRBuilder to perform a combine.
660 /// This variant does not erase \p MI after calling the build function.
661 void applyBuildFnNoErase(MachineInstr &MI, BuildFnTy &MatchInfo) const;
662
663 bool matchOrShiftToFunnelShift(MachineInstr &MI, bool AllowScalarConstants,
664 BuildFnTy &MatchInfo) const;
669
671 Register &UnmergeSrc) const;
674 Register &UnmergeSrc) const;
675
676 bool matchUseVectorTruncate(MachineInstr &MI, Register &MatchInfo) const;
677 void applyUseVectorTruncate(MachineInstr &MI, Register &MatchInfo) const;
678
679 /// \returns true if a G_ICMP instruction \p MI can be replaced with a true
680 /// or false constant based off of KnownBits information.
682 int64_t &MatchInfo) const;
683
684 /// \returns true if a G_ICMP \p MI can be replaced with its LHS based off of
685 /// KnownBits information.
686 bool matchICmpToLHSKnownBits(MachineInstr &MI, BuildFnTy &MatchInfo) const;
687
688 /// \returns true if (and (or x, c1), c2) can be replaced with (and x, c2)
689 bool matchAndOrDisjointMask(MachineInstr &MI, BuildFnTy &MatchInfo) const;
690
692 BuildFnTy &MatchInfo) const;
693 /// Match: and (lshr x, cst), mask -> ubfx x, cst, width
695 BuildFnTy &MatchInfo) const;
696
697 /// Match: shr (shl x, n), k -> sbfx/ubfx x, pos, width
699 BuildFnTy &MatchInfo) const;
700
701 /// Match: shr (and x, n), k -> ubfx x, pos, width
703 BuildFnTy &MatchInfo) const;
704
705 // Helpers for reassociation:
707 BuildFnTy &MatchInfo) const;
710 BuildFnTy &MatchInfo) const;
713 BuildFnTy &MatchInfo) const;
714 /// Reassociate pointer calculations with G_ADD involved, to allow better
715 /// addressing mode usage.
716 bool matchReassocPtrAdd(MachineInstr &MI, BuildFnTy &MatchInfo) const;
717
718 /// Try to reassociate to reassociate operands of a commutative binop.
719 bool tryReassocBinOp(unsigned Opc, Register DstReg, Register Op0,
720 Register Op1, BuildFnTy &MatchInfo) const;
721 /// Reassociate commutative binary operations like G_ADD.
722 bool matchReassocCommBinOp(MachineInstr &MI, BuildFnTy &MatchInfo) const;
723
724 /// Do constant folding when opportunities are exposed after MIR building.
725 bool matchConstantFoldCastOp(MachineInstr &MI, APInt &MatchInfo) const;
726
727 /// Do constant folding when opportunities are exposed after MIR building.
728 bool matchConstantFoldBinOp(MachineInstr &MI, APInt &MatchInfo) const;
729
730 /// Do constant FP folding when opportunities are exposed after MIR building.
731 bool matchConstantFoldFPBinOp(MachineInstr &MI, ConstantFP *&MatchInfo) const;
732
733 /// Constant fold G_FMA/G_FMAD.
734 bool matchConstantFoldFMA(MachineInstr &MI, ConstantFP *&MatchInfo) const;
735
736 /// \returns true if it is possible to narrow the width of a scalar binop
737 /// feeding a G_AND instruction \p MI.
738 bool matchNarrowBinopFeedingAnd(MachineInstr &MI, BuildFnTy &MatchInfo) const;
739
740 /// Given an G_UDIV \p MI or G_UREM \p MI expressing a divide by constant,
741 /// return an expression that implements it by multiplying by a magic number.
742 /// Ref: "Hacker's Delight" or "The PowerPC Compiler Writer's Guide".
744 /// Combine G_UDIV or G_UREM by constant into a multiply by magic constant.
747
748 /// Given an G_SDIV \p MI or G_SREM \p MI expressing a signed divide by
749 /// constant, return an expression that implements it by multiplying by a
750 /// magic number. Ref: "Hacker's Delight" or "The PowerPC Compiler Writer's
751 /// Guide".
753 /// Combine G_SDIV or G_SREM by constant into a multiply by magic constant.
756
757 /// Given an G_SDIV \p MI expressing a signed divided by a pow2 constant,
758 /// return expressions that implements it by shifting.
759 bool matchDivByPow2(MachineInstr &MI, bool IsSigned) const;
760 void applySDivByPow2(MachineInstr &MI) const;
761 /// Given an G_UDIV \p MI expressing an unsigned divided by a pow2 constant,
762 /// return expressions that implements it by shifting.
763 void applyUDivByPow2(MachineInstr &MI) const;
764
765 /// Combine G_SREM x, (+/-2^k) to a bias-and-mask sequence.
767
768 // G_UMULH x, (1 << c)) -> x >> (bitwidth - c)
769 bool matchUMulHToLShr(MachineInstr &MI) const;
770 void applyUMulHToLShr(MachineInstr &MI) const;
771
772 // Combine trunc(smin(smax(x, C1), C2)) -> truncssat_s(x)
773 // or trunc(smax(smin(x, C2), C1)) -> truncssat_s(x).
774 bool matchTruncSSatS(MachineInstr &MI, Register &MatchInfo) const;
775 void applyTruncSSatS(MachineInstr &MI, Register &MatchInfo) const;
776
777 // Combine trunc(smin(smax(x, 0), C)) -> truncssat_u(x)
778 // or trunc(smax(smin(x, C), 0)) -> truncssat_u(x)
779 // or trunc(umin(smax(x, 0), C)) -> truncssat_u(x)
780 bool matchTruncSSatU(MachineInstr &MI, Register &MatchInfo) const;
781 void applyTruncSSatU(MachineInstr &MI, Register &MatchInfo) const;
782
783 // Combine trunc(umin(x, C)) -> truncusat_u(x).
784 bool matchTruncUSatU(MachineInstr &MI, MachineInstr &MinMI) const;
785
786 // Combine truncusat_u(fptoui(x)) -> fptoui_sat(x)
788
789 /// Try to transform \p MI by using all of the above
790 /// combine functions. Returns true if changed.
792
793 /// Emit loads and stores that perform the given memcpy.
794 /// Assumes \p MI is a G_MEMCPY_INLINE
795 /// TODO: implement dynamically sized inline memcpy,
796 /// and rename: s/bool tryEmit/void emit/
798
799 /// Match:
800 /// (G_UMULO x, 2) -> (G_UADDO x, x)
801 /// (G_SMULO x, 2) -> (G_SADDO x, x)
802 bool matchMulOBy2(MachineInstr &MI, BuildFnTy &MatchInfo) const;
803
804 /// Match:
805 /// (G_*MULO x, 0) -> 0 + no carry out
806 bool matchMulOBy0(MachineInstr &MI, BuildFnTy &MatchInfo) const;
807
808 /// Match:
809 /// (G_*ADDE x, y, 0) -> (G_*ADDO x, y)
810 /// (G_*SUBE x, y, 0) -> (G_*SUBO x, y)
811 bool matchAddEToAddO(MachineInstr &MI, BuildFnTy &MatchInfo) const;
812
813 /// Transform (fadd x, fneg(y)) -> (fsub x, y)
814 /// (fadd fneg(x), y) -> (fsub y, x)
815 /// (fsub x, fneg(y)) -> (fadd x, y)
816 /// (fmul fneg(x), fneg(y)) -> (fmul x, y)
817 /// (fdiv fneg(x), fneg(y)) -> (fdiv x, y)
818 /// (fmad fneg(x), fneg(y), z) -> (fmad x, y, z)
819 /// (fma fneg(x), fneg(y), z) -> (fma x, y, z)
820 bool matchRedundantNegOperands(MachineInstr &MI, BuildFnTy &MatchInfo) const;
821
822 bool matchFsubToFneg(MachineInstr &MI, Register &MatchInfo) const;
823 void applyFsubToFneg(MachineInstr &MI, Register &MatchInfo) const;
824
825 bool canCombineFMadOrFMA(MachineInstr &MI, bool &AllowFusionGlobally,
826 bool &HasFMAD, bool &Aggressive,
827 bool CanReassociate = false) const;
828
829 /// Transform (fadd (fmul x, y), z) -> (fma x, y, z)
830 /// (fadd (fmul x, y), z) -> (fmad x, y, z)
832 BuildFnTy &MatchInfo) const;
833
834 /// Transform (fadd (fpext (fmul x, y)), z) -> (fma (fpext x), (fpext y), z)
835 /// (fadd (fpext (fmul x, y)), z) -> (fmad (fpext x), (fpext y), z)
837 BuildFnTy &MatchInfo) const;
838
839 /// Transform (fadd (fma x, y, (fmul u, v)), z) -> (fma x, y, (fma u, v, z))
840 /// (fadd (fmad x, y, (fmul u, v)), z) -> (fmad x, y, (fmad u, v, z))
842 BuildFnTy &MatchInfo) const;
843
844 // Transform (fadd (fma x, y, (fpext (fmul u, v))), z)
845 // -> (fma x, y, (fma (fpext u), (fpext v), z))
846 // (fadd (fmad x, y, (fpext (fmul u, v))), z)
847 // -> (fmad x, y, (fmad (fpext u), (fpext v), z))
848 bool
850 BuildFnTy &MatchInfo) const;
851
852 /// Transform (fsub (fmul x, y), z) -> (fma x, y, -z)
853 /// (fsub (fmul x, y), z) -> (fmad x, y, -z)
855 BuildFnTy &MatchInfo) const;
856
857 /// Transform (fsub (fneg (fmul, x, y)), z) -> (fma (fneg x), y, (fneg z))
858 /// (fsub (fneg (fmul, x, y)), z) -> (fmad (fneg x), y, (fneg z))
860 BuildFnTy &MatchInfo) const;
861
862 /// Transform (fsub (fpext (fmul x, y)), z)
863 /// -> (fma (fpext x), (fpext y), (fneg z))
864 /// (fsub (fpext (fmul x, y)), z)
865 /// -> (fmad (fpext x), (fpext y), (fneg z))
867 BuildFnTy &MatchInfo) const;
868
869 /// Transform (fsub (fpext (fneg (fmul x, y))), z)
870 /// -> (fneg (fma (fpext x), (fpext y), z))
871 /// (fsub (fpext (fneg (fmul x, y))), z)
872 /// -> (fneg (fmad (fpext x), (fpext y), z))
874 BuildFnTy &MatchInfo) const;
875
876 bool matchCombineFMinMaxNaN(MachineInstr &MI, unsigned &Info) const;
877
879 SmallVector<MachineInstr *> &MatchInfo) const;
881
882 /// Transform G_ADD(x, G_SUB(y, x)) to y.
883 /// Transform G_ADD(G_SUB(y, x), x) to y.
884 bool matchAddSubSameReg(MachineInstr &MI, Register &Src) const;
885
887 Register &MatchInfo) const;
888 bool matchTruncBuildVectorFold(MachineInstr &MI, Register &MatchInfo) const;
890 Register &MatchInfo) const;
891
892 /// Transform:
893 /// (x + y) - y -> x
894 /// (x + y) - x -> y
895 /// x - (y + x) -> 0 - y
896 /// x - (x + z) -> 0 - z
897 bool matchSubAddSameReg(MachineInstr &MI, BuildFnTy &MatchInfo) const;
898
899 /// \returns true if it is possible to simplify a select instruction \p MI
900 /// to a min/max instruction of some sort.
902 BuildFnTy &MatchInfo) const;
903
904 /// Transform:
905 /// (X + Y) == X -> Y == 0
906 /// (X - Y) == X -> Y == 0
907 /// (X ^ Y) == X -> Y == 0
908 /// (X + Y) != X -> Y != 0
909 /// (X - Y) != X -> Y != 0
910 /// (X ^ Y) != X -> Y != 0
912 BuildFnTy &MatchInfo) const;
913
914 /// Match shifts greater or equal to the range (the bitwidth of the result
915 /// datatype, or the effective bitwidth of the source value).
917 std::optional<int64_t> &MatchInfo) const;
918
919 /// Match constant LHS ops that should be commuted.
921
922 /// Combine sext of trunc.
923 bool matchSextOfTrunc(const MachineOperand &MO, BuildFnTy &MatchInfo) const;
924
925 /// Combine zext of trunc.
926 bool matchZextOfTrunc(const MachineOperand &MO, BuildFnTy &MatchInfo) const;
927
928 /// Combine zext nneg to sext.
929 bool matchNonNegZext(const MachineOperand &MO, BuildFnTy &MatchInfo) const;
930
931 /// Match constant LHS FP ops that should be commuted.
933
934 // Given a binop \p MI, commute operands 1 and 2.
936
937 /// Combine select to integer min/max.
938 bool matchSelectIMinMax(const MachineOperand &MO, BuildFnTy &MatchInfo) const;
939
940 /// Tranform (neg (min/max x, (neg x))) into (max/min x, (neg x)).
941 bool matchSimplifyNegMinMax(MachineInstr &MI, BuildFnTy &MatchInfo) const;
942
943 /// Combine selects.
944 bool matchSelect(MachineInstr &MI, BuildFnTy &MatchInfo) const;
945
946 /// Combine ands.
947 bool matchAnd(MachineInstr &MI, BuildFnTy &MatchInfo) const;
948
949 /// Combine ors.
950 bool matchOr(MachineInstr &MI, BuildFnTy &MatchInfo) const;
951
952 /// trunc (binop X, C) --> binop (trunc X, trunc C).
953 bool matchNarrowBinop(const MachineInstr &TruncMI,
954 const MachineInstr &BinopMI,
955 BuildFnTy &MatchInfo) const;
956
957 bool matchCastOfInteger(const MachineInstr &CastMI, APInt &MatchInfo) const;
958
959 /// Combine addos.
960 bool matchAddOverflow(MachineInstr &MI, BuildFnTy &MatchInfo) const;
961
962 /// Combine extract vector element.
963 bool matchExtractVectorElement(MachineInstr &MI, BuildFnTy &MatchInfo) const;
964
965 /// Combine extract vector element with a build vector on the vector register.
967 const MachineInstr &MI2,
968 BuildFnTy &MatchInfo) const;
969
970 /// Combine extract vector element with a build vector trunc on the vector
971 /// register.
972 bool
974 BuildFnTy &MatchInfo) const;
975
976 /// Combine extract vector element with a shuffle vector on the vector
977 /// register.
979 const MachineInstr &MI2,
980 BuildFnTy &MatchInfo) const;
981
982 /// Combine extract vector element with a insert vector element on the vector
983 /// register and different indices.
984 bool
986 BuildFnTy &MatchInfo) const;
987
988 /// Remove references to rhs if it is undef
989 bool matchShuffleUndefRHS(MachineInstr &MI, BuildFnTy &MatchInfo) const;
990
991 /// Turn shuffle a, b, mask -> shuffle undef, b, mask iff mask does not
992 /// reference a.
993 bool matchShuffleDisjointMask(MachineInstr &MI, BuildFnTy &MatchInfo) const;
994
995 /// Use a function which takes in a MachineIRBuilder to perform a combine.
996 /// By default, it erases the instruction def'd on \p MO from the function.
997 void applyBuildFnMO(const MachineOperand &MO, BuildFnTy &MatchInfo) const;
998
999 /// Match FPOWI if it's safe to extend it into a series of multiplications.
1000 bool matchFPowIExpansion(MachineInstr &MI, int64_t Exponent) const;
1001
1002 /// Expands FPOWI into a series of multiplications and a division if the
1003 /// exponent is negative.
1004 void applyExpandFPowI(MachineInstr &MI, int64_t Exponent) const;
1005
1006 /// Combine insert vector element OOB.
1008 BuildFnTy &MatchInfo) const;
1009
1011 BuildFnTy &MatchInfo) const;
1012
1013 bool matchAddOfVScale(const MachineOperand &MO, BuildFnTy &MatchInfo) const;
1014
1015 bool matchMulOfVScale(const MachineOperand &MO, BuildFnTy &MatchInfo) const;
1016
1017 bool matchSubOfVScale(const MachineOperand &MO, BuildFnTy &MatchInfo) const;
1018
1019 bool matchShlOfVScale(const MachineOperand &MO, BuildFnTy &MatchInfo) const;
1020
1021 /// Transform trunc ([asz]ext x) to x or ([asz]ext x) or (trunc x).
1022 bool matchTruncateOfExt(const MachineInstr &Root, const MachineInstr &ExtMI,
1023 BuildFnTy &MatchInfo) const;
1024
1025 bool matchCastOfSelect(const MachineInstr &Cast, const MachineInstr &SelectMI,
1026 BuildFnTy &MatchInfo) const;
1028 BuildFnTy &MatchInfo) const;
1029
1031 BuildFnTy &MatchInfo) const;
1032
1034 BuildFnTy &MatchInfo) const;
1035
1037 BuildFnTy &MatchInfo) const;
1038
1039 // fold ((A-C1)+C2) -> (A+(C2-C1))
1041 BuildFnTy &MatchInfo) const;
1042
1043 bool matchExtOfExt(const MachineInstr &FirstMI, const MachineInstr &SecondMI,
1044 BuildFnTy &MatchInfo) const;
1045
1046 bool matchCastOfBuildVector(const MachineInstr &CastMI,
1047 const MachineInstr &BVMI,
1048 BuildFnTy &MatchInfo) const;
1049
1051 BuildFnTy &MatchInfo) const;
1053 BuildFnTy &MatchInfo) const;
1054
1055 // unmerge_values(anyext(build vector)) -> build vector(anyext)
1057 BuildFnTy &MatchInfo) const;
1058
1059 // merge_values(_, undef) -> anyext
1060 bool matchMergeXAndUndef(const MachineInstr &MI, BuildFnTy &MatchInfo) const;
1061
1062 // merge_values(_, zero) -> zext
1063 bool matchMergeXAndZero(const MachineInstr &MI, BuildFnTy &MatchInfo) const;
1064
1065 // overflow sub
1066 bool matchSuboCarryOut(const MachineInstr &MI, BuildFnTy &MatchInfo) const;
1067
1068 // (sext_inreg (sext_inreg x, K0), K1)
1070 BuildFnTy &MatchInfo) const;
1071
1072 // (ctlz (xor x, (sra x, bitwidth-1))) -> (add (ctls x), 1) or
1073 // (ctlz (or (shl (xor x, (sra x, bitwidth-1)), 1), 1) -> (ctls x)
1074 bool matchCtls(MachineInstr &CtlzMI, BuildFnTy &MatchInfo) const;
1075
1076private:
1077 /// Checks for legality of an indexed variant of \p LdSt.
1078 bool isIndexedLoadStoreLegal(GLoadStore &LdSt) const;
1079
1080 /// Helper function for matchBinopWithNeg: tries to match one commuted form
1081 /// of `a bitwiseop (~b +/- c)` -> `a bitwiseop ~(b -/+ c)`.
1082 bool matchBinopWithNegInner(Register MInner, Register Other, unsigned RootOpc,
1083 Register Dst, LLT Ty, BuildFnTy &MatchInfo) const;
1084 /// Given a non-indexed load or store instruction \p MI, find an offset that
1085 /// can be usefully and legally folded into it as a post-indexing operation.
1086 ///
1087 /// \returns true if a candidate is found.
1088 bool findPostIndexCandidate(GLoadStore &MI, Register &Addr, Register &Base,
1089 Register &Offset, bool &RematOffset) const;
1090
1091 /// Given a non-indexed load or store instruction \p MI, find an offset that
1092 /// can be usefully and legally folded into it as a pre-indexing operation.
1093 ///
1094 /// \returns true if a candidate is found.
1095 bool findPreIndexCandidate(GLoadStore &MI, Register &Addr, Register &Base,
1096 Register &Offset) const;
1097
1098 /// Helper function for matchLoadOrCombine. Searches for Registers
1099 /// which may have been produced by a load instruction + some arithmetic.
1100 ///
1101 /// \param [in] Root - The search root.
1102 ///
1103 /// \returns The Registers found during the search.
1104 std::optional<SmallVector<Register, 8>>
1105 findCandidatesForLoadOrCombine(const MachineInstr *Root) const;
1106
1107 /// Helper function for matchLoadOrCombine.
1108 ///
1109 /// Checks if every register in \p RegsToVisit is defined by a load
1110 /// instruction + some arithmetic.
1111 ///
1112 /// \param [out] MemOffset2Idx - Maps the byte positions each load ends up
1113 /// at to the index of the load.
1114 /// \param [in] MemSizeInBits - The number of bits each load should produce.
1115 ///
1116 /// \returns On success, a 3-tuple containing lowest-index load found, the
1117 /// lowest index, and the last load in the sequence.
1118 std::optional<std::tuple<GZExtLoad *, int64_t, GZExtLoad *>>
1119 findLoadOffsetsForLoadOrCombine(
1121 const SmallVector<Register, 8> &RegsToVisit,
1122 const unsigned MemSizeInBits) const;
1123
1124 /// Examines the G_PTR_ADD instruction \p PtrAdd and determines if performing
1125 /// a re-association of its operands would break an existing legal addressing
1126 /// mode that the address computation currently represents.
1127 bool reassociationCanBreakAddressingModePattern(MachineInstr &PtrAdd) const;
1128
1129 /// Behavior when a floating point min/max is given one NaN and one
1130 /// non-NaN as input.
1131 enum class SelectPatternNaNBehaviour {
1132 NOT_APPLICABLE = 0, /// NaN behavior not applicable.
1133 RETURNS_NAN, /// Given one NaN input, returns the NaN.
1134 RETURNS_OTHER, /// Given one NaN input, returns the non-NaN.
1135 RETURNS_ANY /// Given one NaN input, can return either (or both operands are
1136 /// known non-NaN.)
1137 };
1138
1139 /// \returns which of \p LHS and \p RHS would be the result of a non-equality
1140 /// floating point comparison where one of \p LHS and \p RHS may be NaN.
1141 ///
1142 /// If both \p LHS and \p RHS may be NaN, returns
1143 /// SelectPatternNaNBehaviour::NOT_APPLICABLE.
1144 SelectPatternNaNBehaviour
1145 computeRetValAgainstNaN(Register LHS, Register RHS,
1146 bool IsOrderedComparison) const;
1147
1148 /// Determines the floating point min/max opcode which should be used for
1149 /// a G_SELECT fed by a G_FCMP with predicate \p Pred.
1150 ///
1151 /// \returns 0 if this G_SELECT should not be combined to a floating point
1152 /// min or max. If it should be combined, returns one of
1153 ///
1154 /// * G_FMAXNUM
1155 /// * G_FMAXIMUM
1156 /// * G_FMINNUM
1157 /// * G_FMINIMUM
1158 ///
1159 /// Helper function for matchFPSelectToMinMax.
1160 unsigned getFPMinMaxOpcForSelect(CmpInst::Predicate Pred, LLT DstTy,
1161 SelectPatternNaNBehaviour VsNaNRetVal) const;
1162
1163 /// Handle floating point cases for matchSimplifySelectToMinMax.
1164 ///
1165 /// E.g.
1166 ///
1167 /// select (fcmp uge x, 1.0) x, 1.0 -> fmax x, 1.0
1168 /// select (fcmp uge x, 1.0) 1.0, x -> fminnm x, 1.0
1169 bool matchFPSelectToMinMax(Register Dst, Register Cond, Register TrueVal,
1170 Register FalseVal, BuildFnTy &MatchInfo) const;
1171
1172 /// Try to fold selects to logical operations.
1173 bool tryFoldBoolSelectToLogic(GSelect *Select, BuildFnTy &MatchInfo) const;
1174
1175 bool tryFoldSelectOfConstants(GSelect *Select, BuildFnTy &MatchInfo) const;
1176
1177 bool isOneOrOneSplat(Register Src, bool AllowUndefs) const;
1178 bool isZeroOrZeroSplat(Register Src, bool AllowUndefs) const;
1179 bool isConstantSplatVector(Register Src, int64_t SplatValue,
1180 bool AllowUndefs) const;
1181 bool isConstantOrConstantVectorI(Register Src) const;
1182
1183 std::optional<APInt> getConstantOrConstantSplatVector(Register Src) const;
1184
1185 /// Fold (icmp Pred1 V1, C1) && (icmp Pred2 V2, C2)
1186 /// or (icmp Pred1 V1, C1) || (icmp Pred2 V2, C2)
1187 /// into a single comparison using range-based reasoning.
1188 bool tryFoldAndOrOrICmpsUsingRanges(GLogicalBinOp *Logic,
1189 BuildFnTy &MatchInfo) const;
1190
1191 // Simplify (cmp cc0 x, y) (&& or ||) (cmp cc1 x, y) -> cmp cc2 x, y.
1192 bool tryFoldLogicOfFCmps(GLogicalBinOp *Logic, BuildFnTy &MatchInfo) const;
1193
1194 bool isCastFree(unsigned Opcode, LLT ToTy, LLT FromTy) const;
1195
1196 bool constantFoldICmp(const GICmp &ICmp, const GIConstant &LHSCst,
1197 const GIConstant &RHSCst, BuildFnTy &MatchInfo) const;
1198 bool constantFoldFCmp(const GFCmp &FCmp, const GFConstant &LHSCst,
1199 const GFConstant &RHSCst, BuildFnTy &MatchInfo) const;
1200};
1201} // namespace llvm
1202
1203#endif
MachineInstrBuilder & UseMI
MachineInstrBuilder MachineInstrBuilder & DefMI
AMDGPU Register Bank Select
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
This file defines the DenseMap class.
Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...
IRTranslator LLVM IR MI
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
static bool isConstantSplatVector(SDValue N, APInt &SplatValue, unsigned MinSizeInBits)
Implement a low-level type suitable for MachineInstr level instruction selection.
Register Reg
MachineInstr unsigned OpIdx
const SmallVectorImpl< MachineOperand > & Cond
This file defines the SmallVector class.
Value * RHS
Value * LHS
Class for arbitrary precision integers.
Definition APInt.h:78
Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition InstrTypes.h:676
void applyCombineExtendingLoads(MachineInstr &MI, PreferredTuple &MatchInfo) const
bool matchCommuteShift(MachineInstr &MI, BuildFnTy &MatchInfo) const
bool matchRepeatedFPDivisor(MachineInstr &MI, SmallVector< MachineInstr * > &MatchInfo) const
bool matchFoldC2MinusAPlusC1(const MachineInstr &MI, BuildFnTy &MatchInfo) const
bool matchLoadOrCombine(MachineInstr &MI, BuildFnTy &MatchInfo) const
Match expression trees of the form.
bool tryCombine(MachineInstr &MI) const
Try to transform MI by using all of the above combine functions.
const RegisterBank * getRegBank(Register Reg) const
Get the register bank of Reg.
void applyPtrAddZero(MachineInstr &MI) const
bool matchEqualDefs(const MachineOperand &MOP1, const MachineOperand &MOP2) const
Return true if MOP1 and MOP2 are register operands are defined by equivalent instructions.
void applyUDivOrURemByConst(MachineInstr &MI) const
bool matchConstantFoldBinOp(MachineInstr &MI, APInt &MatchInfo) const
Do constant folding when opportunities are exposed after MIR building.
void applyCombineUnmergeWithDeadLanesToTrunc(MachineInstr &MI) const
bool matchUnmergeValuesAnyExtBuildVector(const MachineInstr &MI, BuildFnTy &MatchInfo) const
bool matchCtls(MachineInstr &CtlzMI, BuildFnTy &MatchInfo) const
bool matchSelectSameVal(MachineInstr &MI) const
Optimize (cond ? x : x) -> x.
bool matchAddEToAddO(MachineInstr &MI, BuildFnTy &MatchInfo) const
Match: (G_*ADDE x, y, 0) -> (G_*ADDO x, y) (G_*SUBE x, y, 0) -> (G_*SUBO x, y)
bool matchReassocConstantInnerRHS(GPtrAdd &MI, MachineInstr *RHS, BuildFnTy &MatchInfo) const
bool matchBitfieldExtractFromShr(MachineInstr &MI, BuildFnTy &MatchInfo) const
Match: shr (shl x, n), k -> sbfx/ubfx x, pos, width.
bool matchFoldAMinusC1PlusC2(const MachineInstr &MI, BuildFnTy &MatchInfo) const
bool matchTruncSSatU(MachineInstr &MI, Register &MatchInfo) const
void applySimplifyURemByPow2(MachineInstr &MI) const
Combine G_UREM x, (known power of 2) to an add and bitmasking.
bool matchCombineUnmergeZExtToZExt(MachineInstr &MI) const
Transform X, Y = G_UNMERGE(G_ZEXT(Z)) -> X = G_ZEXT(Z); Y = G_CONSTANT 0.
bool matchPtrAddZero(MachineInstr &MI) const
}
const TargetInstrInfo * TII
void applyCombineConcatVectors(MachineInstr &MI, SmallVector< Register > &Ops) const
Replace MI with a flattened build_vector with Ops or an implicit_def if Ops is empty.
void applyXorOfAndWithSameReg(MachineInstr &MI, std::pair< Register, Register > &MatchInfo) const
bool canCombineFMadOrFMA(MachineInstr &MI, bool &AllowFusionGlobally, bool &HasFMAD, bool &Aggressive, bool CanReassociate=false) const
bool matchFoldAPlusC1MinusC2(const MachineInstr &MI, BuildFnTy &MatchInfo) const
bool matchExtractVecEltBuildVec(MachineInstr &MI, Register &Reg) const
void applyCombineUnmergeConstant(MachineInstr &MI, SmallVectorImpl< APInt > &Csts) const
bool matchShiftsTooBig(MachineInstr &MI, std::optional< int64_t > &MatchInfo) const
Match shifts greater or equal to the range (the bitwidth of the result datatype, or the effective bit...
bool matchCombineFAddFpExtFMulToFMadOrFMA(MachineInstr &MI, BuildFnTy &MatchInfo) const
Transform (fadd (fpext (fmul x, y)), z) -> (fma (fpext x), (fpext y), z) (fadd (fpext (fmul x,...
bool matchCombineIndexedLoadStore(MachineInstr &MI, IndexedLoadStoreMatchInfo &MatchInfo) const
void applyCombineShuffleConcat(MachineInstr &MI, SmallVector< Register > &Ops) const
Replace MI with a flattened build_vector with Ops or an implicit_def if Ops is empty.
void replaceSingleDefInstWithReg(MachineInstr &MI, Register Replacement) const
Delete MI and replace all of its uses with Replacement.
void applyCombineShuffleToBuildVector(MachineInstr &MI) const
Replace MI with a build_vector.
bool matchZextOfTrunc(const MachineOperand &MO, BuildFnTy &MatchInfo) const
Combine zext of trunc.
bool matchCombineExtractedVectorLoad(MachineInstr &MI, BuildFnTy &MatchInfo) const
Combine a G_EXTRACT_VECTOR_ELT of a load into a narrowed load.
void replaceRegWith(MachineRegisterInfo &MRI, Register FromReg, Register ToReg) const
MachineRegisterInfo::replaceRegWith() and inform the observer of the changes.
void replaceRegOpWith(MachineRegisterInfo &MRI, MachineOperand &FromRegOp, Register ToReg) const
Replace a single register operand with a new register and inform the observer of the changes.
bool matchReassocCommBinOp(MachineInstr &MI, BuildFnTy &MatchInfo) const
Reassociate commutative binary operations like G_ADD.
bool matchExtractVectorElementWithBuildVectorTrunc(const MachineOperand &MO, BuildFnTy &MatchInfo) const
Combine extract vector element with a build vector trunc on the vector register.
void applyBuildFnMO(const MachineOperand &MO, BuildFnTy &MatchInfo) const
Use a function which takes in a MachineIRBuilder to perform a combine.
bool matchCommuteConstantToRHS(MachineInstr &MI) const
Match constant LHS ops that should be commuted.
const DataLayout & getDataLayout() const
bool matchBinOpSameVal(MachineInstr &MI) const
Optimize (x op x) -> x.
bool matchSimplifyNegMinMax(MachineInstr &MI, BuildFnTy &MatchInfo) const
Tranform (neg (min/max x, (neg x))) into (max/min x, (neg x)).
bool matchCombineDivRem(MachineInstr &MI, MachineInstr *&OtherMI) const
Try to combine G_[SU]DIV and G_[SU]REM into a single G_[SU]DIVREM when their source operands are iden...
bool matchNonNegZext(const MachineOperand &MO, BuildFnTy &MatchInfo) const
Combine zext nneg to sext.
void applyUMulHToLShr(MachineInstr &MI) const
void applyNotCmp(MachineInstr &MI, SmallVectorImpl< Register > &RegsToNegate) const
bool isLegalOrHasFewerElements(const LegalityQuery &Query) const
bool matchShiftImmedChain(MachineInstr &MI, RegisterImmPair &MatchInfo) const
Fold (shift (shift base, x), y) -> (shift base (x+y))
void applyCombineI2PToP2I(MachineInstr &MI, Register &Reg) const
bool matchTruncLshrBuildVectorFold(MachineInstr &MI, Register &MatchInfo) const
bool matchAllExplicitUsesAreUndef(MachineInstr &MI) const
Return true if all register explicit use operands on MI are defined by a G_IMPLICIT_DEF.
bool isPredecessor(const MachineInstr &DefMI, const MachineInstr &UseMI) const
Returns true if DefMI precedes UseMI or they are the same instruction.
bool matchPtrAddImmedChain(MachineInstr &MI, PtrAddChain &MatchInfo) const
bool matchTruncSSatS(MachineInstr &MI, Register &MatchInfo) const
const TargetLowering & getTargetLowering() const
bool matchExtractVectorElementWithDifferentIndices(const MachineOperand &MO, BuildFnTy &MatchInfo) const
Combine extract vector element with a insert vector element on the vector register and different indi...
bool matchShuffleUndefRHS(MachineInstr &MI, BuildFnTy &MatchInfo) const
Remove references to rhs if it is undef.
void applyBuildInstructionSteps(MachineInstr &MI, InstructionStepsMatchInfo &MatchInfo) const
Replace MI with a series of instructions described in MatchInfo.
void applySDivByPow2(MachineInstr &MI) const
void applySimplifyAddToSub(MachineInstr &MI, std::tuple< Register, Register > &MatchInfo) const
void applyUDivByPow2(MachineInstr &MI) const
Given an G_UDIV MI expressing an unsigned divided by a pow2 constant, return expressions that impleme...
bool matchOr(MachineInstr &MI, BuildFnTy &MatchInfo) const
Combine ors.
bool matchLshrOfTruncOfLshr(MachineInstr &MI, LshrOfTruncOfLshr &MatchInfo, MachineInstr &ShiftMI) const
Fold (lshr (trunc (lshr x, C1)), C2) -> trunc (shift x, (C1 + C2))
bool matchInsertVectorElementOOB(MachineInstr &MI, BuildFnTy &MatchInfo) const
Combine insert vector element OOB.
bool matchSimplifyAddToSub(MachineInstr &MI, std::tuple< Register, Register > &MatchInfo) const
Return true if MI is a G_ADD which can be simplified to a G_SUB.
void replaceInstWithConstant(MachineInstr &MI, int64_t C) const
Replace an instruction with a G_CONSTANT with value C.
bool tryEmitMemcpyInline(MachineInstr &MI) const
Emit loads and stores that perform the given memcpy.
bool matchCombineFSubFpExtFMulToFMadOrFMA(MachineInstr &MI, BuildFnTy &MatchInfo) const
Transform (fsub (fpext (fmul x, y)), z) -> (fma (fpext x), (fpext y), (fneg z)) (fsub (fpext (fmul x,...
void applyFsubToFneg(MachineInstr &MI, Register &MatchInfo) const
bool matchConstantLargerBitWidth(MachineInstr &MI, unsigned ConstIdx) const
Checks if constant at ConstIdx is larger than MI 's bitwidth.
GISelValueTracking * getValueTracking() const
void applyCombineCopy(MachineInstr &MI) const
bool matchExtractVectorElement(MachineInstr &MI, BuildFnTy &MatchInfo) const
Combine extract vector element.
bool matchSextOfTrunc(const MachineOperand &MO, BuildFnTy &MatchInfo) const
Combine sext of trunc.
bool matchAddSubSameReg(MachineInstr &MI, Register &Src) const
Transform G_ADD(x, G_SUB(y, x)) to y.
bool matchCombineShlOfExtend(MachineInstr &MI, RegisterImmPair &MatchData) const
bool matchMergeXAndZero(const MachineInstr &MI, BuildFnTy &MatchInfo) const
void applyCombineAddP2IToPtrAdd(MachineInstr &MI, std::pair< Register, bool > &PtrRegAndCommute) const
bool matchCombineFSubFMulToFMadOrFMA(MachineInstr &MI, BuildFnTy &MatchInfo) const
Transform (fsub (fmul x, y), z) -> (fma x, y, -z) (fsub (fmul x, y), z) -> (fmad x,...
bool matchCombineFAddFMAFMulToFMadOrFMA(MachineInstr &MI, BuildFnTy &MatchInfo) const
Transform (fadd (fma x, y, (fmul u, v)), z) -> (fma x, y, (fma u, v, z)) (fadd (fmad x,...
bool matchSextTruncSextLoad(MachineInstr &MI) const
bool matchMulOfVScale(const MachineOperand &MO, BuildFnTy &MatchInfo) const
bool matchCombineMergeUnmerge(MachineInstr &MI, Register &MatchInfo) const
Fold away a merge of an unmerge of the corresponding values.
bool matchCombineInsertVecElts(MachineInstr &MI, SmallVectorImpl< Register > &MatchInfo) const
bool matchCombineBuildUnmerge(MachineInstr &MI, MachineRegisterInfo &MRI, Register &UnmergeSrc) const
bool matchDivByPow2(MachineInstr &MI, bool IsSigned) const
Given an G_SDIV MI expressing a signed divided by a pow2 constant, return expressions that implements...
bool matchAddOfVScale(const MachineOperand &MO, BuildFnTy &MatchInfo) const
bool matchNarrowBinopFeedingAnd(MachineInstr &MI, BuildFnTy &MatchInfo) const
bool matchShlOfVScale(const MachineOperand &MO, BuildFnTy &MatchInfo) const
bool matchRedundantNegOperands(MachineInstr &MI, BuildFnTy &MatchInfo) const
Transform (fadd x, fneg(y)) -> (fsub x, y) (fadd fneg(x), y) -> (fsub y, x) (fsub x,...
bool matchCombineLoadWithAndMask(MachineInstr &MI, BuildFnTy &MatchInfo) const
Match (and (load x), mask) -> zextload x.
bool matchCombineFAddFMulToFMadOrFMA(MachineInstr &MI, BuildFnTy &MatchInfo) const
Transform (fadd (fmul x, y), z) -> (fma x, y, z) (fadd (fmul x, y), z) -> (fmad x,...
bool matchCombineCopy(MachineInstr &MI) const
bool matchExtendThroughPhis(MachineInstr &MI, MachineInstr *&ExtMI) const
void applyShiftImmedChain(MachineInstr &MI, RegisterImmPair &MatchInfo) const
bool matchXorOfAndWithSameReg(MachineInstr &MI, std::pair< Register, Register > &MatchInfo) const
Fold (xor (and x, y), y) -> (and (not x), y) {.
bool matchCombineShuffleVector(MachineInstr &MI, SmallVectorImpl< Register > &Ops) const
Check if the G_SHUFFLE_VECTOR MI can be replaced by a concat_vectors.
void applyCombineConstPtrAddToI2P(MachineInstr &MI, APInt &NewCst) const
bool matchTruncateOfExt(const MachineInstr &Root, const MachineInstr &ExtMI, BuildFnTy &MatchInfo) const
Transform trunc ([asz]ext x) to x or ([asz]ext x) or (trunc x).
bool matchCombineAddP2IToPtrAdd(MachineInstr &MI, std::pair< Register, bool > &PtrRegAndCommute) const
Transform G_ADD (G_PTRTOINT x), y -> G_PTRTOINT (G_PTR_ADD x, y) Transform G_ADD y,...
void replaceInstWithFConstant(MachineInstr &MI, double C) const
Replace an instruction with a G_FCONSTANT with value C.
bool matchMergeXAndUndef(const MachineInstr &MI, BuildFnTy &MatchInfo) const
bool matchFunnelShiftToRotate(MachineInstr &MI) const
Match an FSHL or FSHR that can be combined to a ROTR or ROTL rotate.
bool matchOrShiftToFunnelShift(MachineInstr &MI, bool AllowScalarConstants, BuildFnTy &MatchInfo) const
bool matchRedundantSExtInReg(MachineInstr &MI) const
void replaceOpcodeWith(MachineInstr &FromMI, unsigned ToOpcode) const
Replace the opcode in instruction with a new opcode and inform the observer of the changes.
void applyFunnelShiftConstantModulo(MachineInstr &MI) const
Replaces the shift amount in MI with ShiftAmt % BW.
bool matchFoldC1Minus2MinusC2(const MachineInstr &MI, BuildFnTy &MatchInfo) const
void applyCombineShlOfExtend(MachineInstr &MI, const RegisterImmPair &MatchData) const
void applyUseVectorTruncate(MachineInstr &MI, Register &MatchInfo) const
CombinerHelper(GISelChangeObserver &Observer, MachineIRBuilder &B, bool IsPreLegalize, GISelValueTracking *VT=nullptr, MachineDominatorTree *MDT=nullptr, const LegalizerInfo *LI=nullptr)
bool matchShuffleDisjointMask(MachineInstr &MI, BuildFnTy &MatchInfo) const
Turn shuffle a, b, mask -> shuffle undef, b, mask iff mask does not reference a.
bool matchCombineMulToShl(MachineInstr &MI, unsigned &ShiftVal) const
Transform a multiply by a power-of-2 value to a left shift.
void applyCombineShuffleVector(MachineInstr &MI, ArrayRef< Register > Ops) const
Replace MI with a concat_vectors with Ops.
bool matchCombineConstPtrAddToI2P(MachineInstr &MI, APInt &NewCst) const
bool matchCombineUnmergeUndef(MachineInstr &MI, std::function< void(MachineIRBuilder &)> &MatchInfo) const
Transform G_UNMERGE G_IMPLICIT_DEF -> G_IMPLICIT_DEF, G_IMPLICIT_DEF, ...
void applyFoldBinOpIntoSelect(MachineInstr &MI, const unsigned &SelectOpNo) const
SelectOperand is the operand in binary operator MI that is the select to fold.
bool matchFoldAMinusC1MinusC2(const MachineInstr &MI, BuildFnTy &MatchInfo) const
void applyCombineIndexedLoadStore(MachineInstr &MI, IndexedLoadStoreMatchInfo &MatchInfo) const
bool matchMulOBy2(MachineInstr &MI, BuildFnTy &MatchInfo) const
Match: (G_UMULO x, 2) -> (G_UADDO x, x) (G_SMULO x, 2) -> (G_SADDO x, x)
bool matchCombineShuffleConcat(MachineInstr &MI, SmallVector< Register > &Ops) const
void applySextInRegOfLoad(MachineInstr &MI, std::tuple< Register, unsigned > &MatchInfo) const
bool tryCombineCopy(MachineInstr &MI) const
If MI is COPY, try to combine it.
bool matchTruncUSatU(MachineInstr &MI, MachineInstr &MinMI) const
const RegisterBankInfo & getRBI() const
bool matchICmpToLHSKnownBits(MachineInstr &MI, BuildFnTy &MatchInfo) const
bool matchExtOfExt(const MachineInstr &FirstMI, const MachineInstr &SecondMI, BuildFnTy &MatchInfo) const
bool matchReassocPtrAdd(MachineInstr &MI, BuildFnTy &MatchInfo) const
Reassociate pointer calculations with G_ADD involved, to allow better addressing mode usage.
bool matchCanonicalizeFCmp(const MachineInstr &MI, BuildFnTy &MatchInfo) const
bool matchUndefShuffleVectorMask(MachineInstr &MI) const
Return true if a G_SHUFFLE_VECTOR instruction MI has an undef mask.
bool matchAnyExplicitUseIsUndef(MachineInstr &MI) const
Return true if any explicit use operand on MI is defined by a G_IMPLICIT_DEF.
bool matchCombineI2PToP2I(MachineInstr &MI, Register &Reg) const
Transform IntToPtr(PtrToInt(x)) to x if cast is in the same address space.
bool matchCombineSubToAdd(MachineInstr &MI, BuildFnTy &MatchInfo) const
const TargetRegisterInfo & getTRI() const
bool matchShiftOfShiftedLogic(MachineInstr &MI, ShiftOfShiftedLogic &MatchInfo) const
If we have a shift-by-constant of a bitwise logic op that itself has a shift-by-constant operand with...
bool matchCombineConcatVectors(MachineInstr &MI, SmallVector< Register > &Ops) const
If MI is G_CONCAT_VECTORS, try to combine it.
bool matchInsertExtractVecEltOutOfBounds(MachineInstr &MI) const
Return true if a G_{EXTRACT,INSERT}_VECTOR_ELT has an out of range index.
bool matchExtractVectorElementWithShuffleVector(const MachineInstr &MI, const MachineInstr &MI2, BuildFnTy &MatchInfo) const
Combine extract vector element with a shuffle vector on the vector register.
bool matchExtractAllEltsFromBuildVector(MachineInstr &MI, SmallVectorImpl< std::pair< Register, MachineInstr * > > &MatchInfo) const
LLVMContext & getContext() const
void applyPtrAddImmedChain(MachineInstr &MI, PtrAddChain &MatchInfo) const
bool isConstantLegalOrBeforeLegalizer(const LLT Ty) const
bool matchNotCmp(MachineInstr &MI, SmallVectorImpl< Register > &RegsToNegate) const
Combine inverting a result of a compare into the opposite cond code.
bool matchSextInRegOfLoad(MachineInstr &MI, std::tuple< Register, unsigned > &MatchInfo) const
Match sext_inreg(load p), imm -> sextload p.
bool matchSelectIMinMax(const MachineOperand &MO, BuildFnTy &MatchInfo) const
Combine select to integer min/max.
bool matchConstantFoldUnaryIntOp(MachineInstr &MI, BuildFnTy &MatchInfo) const
Constant fold a unary integer op (G_CTLZ, G_CTTZ, G_CTPOP and their _ZERO_POISON variants,...
void applyCombineConstantFoldFpUnary(MachineInstr &MI, const ConstantFP *Cst) const
Transform fp_instr(cst) to constant result of the fp operation.
bool isLegal(const LegalityQuery &Query) const
bool matchICmpToTrueFalseKnownBits(MachineInstr &MI, int64_t &MatchInfo) const
bool matchOperandIsKnownToBeAPowerOfTwo(const MachineOperand &MO, bool OrNegative=false) const
Check if operand MO is known to be a power of 2.
bool tryReassocBinOp(unsigned Opc, Register DstReg, Register Op0, Register Op1, BuildFnTy &MatchInfo) const
Try to reassociate to reassociate operands of a commutative binop.
void eraseInst(MachineInstr &MI) const
Erase MI.
bool matchConstantFoldFPBinOp(MachineInstr &MI, ConstantFP *&MatchInfo) const
Do constant FP folding when opportunities are exposed after MIR building.
void applyBuildFnNoErase(MachineInstr &MI, BuildFnTy &MatchInfo) const
Use a function which takes in a MachineIRBuilder to perform a combine.
bool matchUseVectorTruncate(MachineInstr &MI, Register &MatchInfo) const
bool matchUndefStore(MachineInstr &MI) const
Return true if a G_STORE instruction MI is storing an undef value.
MachineRegisterInfo & MRI
void applyCombineP2IToI2P(MachineInstr &MI, Register &Reg) const
Transform PtrToInt(IntToPtr(x)) to x.
void applyExtendThroughPhis(MachineInstr &MI, MachineInstr *&ExtMI) const
bool matchConstantFPOp(const MachineOperand &MOP, double C) const
Return true if MOP is defined by a G_FCONSTANT or splat with a value exactly equal to C.
MachineInstr * buildUDivOrURemUsingMul(MachineInstr &MI) const
Given an G_UDIV MI or G_UREM MI expressing a divide by constant, return an expression that implements...
void applyExtractVecEltBuildVec(MachineInstr &MI, Register &Reg) const
bool matchFoldBinOpIntoSelect(MachineInstr &MI, unsigned &SelectOpNo) const
Push a binary operator through a select on constants.
bool tryCombineShiftToUnmerge(MachineInstr &MI, unsigned TargetShiftAmount) const
bool tryCombineExtendingLoads(MachineInstr &MI) const
If MI is extend that consumes the result of a load, try to combine it.
bool isLegalOrBeforeLegalizer(const LegalityQuery &Query) const
bool matchBuildVectorIdentityFold(MachineInstr &MI, Register &MatchInfo) const
bool matchBitfieldExtractFromShrAnd(MachineInstr &MI, BuildFnTy &MatchInfo) const
Match: shr (and x, n), k -> ubfx x, pos, width.
void applyTruncSSatS(MachineInstr &MI, Register &MatchInfo) const
bool matchConstantFoldCastOp(MachineInstr &MI, APInt &MatchInfo) const
Do constant folding when opportunities are exposed after MIR building.
bool tryCombineShuffleVector(MachineInstr &MI) const
Try to combine G_SHUFFLE_VECTOR into G_CONCAT_VECTORS.
void applyRotateOutOfRange(MachineInstr &MI) const
bool matchReassocFoldConstantsInSubTree(GPtrAdd &MI, MachineInstr *LHS, MachineInstr *RHS, BuildFnTy &MatchInfo) const
bool matchHoistLogicOpWithSameOpcodeHands(MachineInstr &MI, InstructionStepsMatchInfo &MatchInfo) const
Match (logic_op (op x...), (op y...)) -> (op (logic_op x, y))
bool matchBitfieldExtractFromAnd(MachineInstr &MI, BuildFnTy &MatchInfo) const
Match: and (lshr x, cst), mask -> ubfx x, cst, width.
bool matchBitfieldExtractFromSExtInReg(MachineInstr &MI, BuildFnTy &MatchInfo) const
Form a G_SBFX from a G_SEXT_INREG fed by a right shift.
bool matchNarrowBinop(const MachineInstr &TruncMI, const MachineInstr &BinopMI, BuildFnTy &MatchInfo) const
trunc (binop X, C) --> binop (trunc X, trunc C).
bool matchUndefSelectCmp(MachineInstr &MI) const
Return true if a G_SELECT instruction MI has an undef comparison.
bool matchAndOrDisjointMask(MachineInstr &MI, BuildFnTy &MatchInfo) const
void replaceInstWithUndef(MachineInstr &MI) const
Replace an instruction with a G_IMPLICIT_DEF.
bool matchRedundantBinOpInEquality(MachineInstr &MI, BuildFnTy &MatchInfo) const
Transform: (X + Y) == X -> Y == 0 (X - Y) == X -> Y == 0 (X ^ Y) == X -> Y == 0 (X + Y) !...
bool matchOptBrCondByInvertingCond(MachineInstr &MI, MachineInstr *&BrCond) const
If a brcond's true block is not the fallthrough, make it so by inverting the condition and swapping o...
bool matchAddOverflow(MachineInstr &MI, BuildFnTy &MatchInfo) const
Combine addos.
void applyAshShlToSextInreg(MachineInstr &MI, std::tuple< Register, int64_t > &MatchInfo) const
bool matchSelect(MachineInstr &MI, BuildFnTy &MatchInfo) const
Combine selects.
bool matchCombineExtendingLoads(MachineInstr &MI, PreferredTuple &MatchInfo) const
bool matchCombineUnmergeWithDeadLanesToTrunc(MachineInstr &MI) const
Transform X, Y<dead> = G_UNMERGE Z -> X = G_TRUNC Z.
bool matchFsubToFneg(MachineInstr &MI, Register &MatchInfo) const
bool matchRotateOutOfRange(MachineInstr &MI) const
void applyExpandFPowI(MachineInstr &MI, int64_t Exponent) const
Expands FPOWI into a series of multiplications and a division if the exponent is negative.
void setRegBank(Register Reg, const RegisterBank *RegBank) const
Set the register bank of Reg.
bool matchConstantSelectCmp(MachineInstr &MI, unsigned &OpIdx) const
Return true if a G_SELECT instruction MI has a constant comparison.
bool matchCommuteFPConstantToRHS(MachineInstr &MI) const
Match constant LHS FP ops that should be commuted.
void applyCombineDivRem(MachineInstr &MI, MachineInstr *&OtherMI) const
const TargetInstrInfo & getTII() const
bool matchCombineFMinMaxNaN(MachineInstr &MI, unsigned &Info) const
bool matchRedundantOr(MachineInstr &MI, Register &Replacement) const
void applyTruncSSatU(MachineInstr &MI, Register &MatchInfo) const
void applySimplifySRemByPow2(MachineInstr &MI) const
Combine G_SREM x, (+/-2^k) to a bias-and-mask sequence.
bool matchCombineFSubFpExtFNegFMulToFMadOrFMA(MachineInstr &MI, BuildFnTy &MatchInfo) const
Transform (fsub (fpext (fneg (fmul x, y))), z) -> (fneg (fma (fpext x), (fpext y),...
bool matchTruncBuildVectorFold(MachineInstr &MI, Register &MatchInfo) const
bool matchSubOfVScale(const MachineOperand &MO, BuildFnTy &MatchInfo) const
void applyCombineTruncOfShift(MachineInstr &MI, std::pair< MachineInstr *, LLT > &MatchInfo) const
bool matchConstantOp(const MachineOperand &MOP, int64_t C) const
Return true if MOP is defined by a G_CONSTANT or splat with a value equal to C.
const LegalizerInfo * LI
void applyCombineMulToShl(MachineInstr &MI, unsigned &ShiftVal) const
void applyCombineBuildUnmerge(MachineInstr &MI, MachineRegisterInfo &MRI, MachineIRBuilder &B, Register &UnmergeSrc) const
bool matchUMulHToLShr(MachineInstr &MI) const
MachineDominatorTree * MDT
MachineIRBuilder & getBuilder() const
void applyFunnelShiftToRotate(MachineInstr &MI) const
bool matchSimplifySelectToMinMax(MachineInstr &MI, BuildFnTy &MatchInfo) const
void applyRepeatedFPDivisor(SmallVector< MachineInstr * > &MatchInfo) const
bool matchTruncUSatUToFPTOUISat(MachineInstr &MI, MachineInstr &SrcMI) const
const RegisterBankInfo * RBI
bool matchMulOBy0(MachineInstr &MI, BuildFnTy &MatchInfo) const
Match: (G_*MULO x, 0) -> 0 + no carry out.
GISelValueTracking * VT
bool matchBinopWithNeg(MachineInstr &MI, BuildFnTy &MatchInfo) const
Fold a bitwiseop (~b +/- c) -> a bitwiseop ~(b -/+ c)
bool matchCombineUnmergeConstant(MachineInstr &MI, SmallVectorImpl< APInt > &Csts) const
Transform G_UNMERGE Constant -> Constant1, Constant2, ...
void applyShiftOfShiftedLogic(MachineInstr &MI, ShiftOfShiftedLogic &MatchInfo) const
const TargetRegisterInfo * TRI
bool matchRedundantAnd(MachineInstr &MI, Register &Replacement) const
bool dominates(const MachineInstr &DefMI, const MachineInstr &UseMI) const
Returns true if DefMI dominates UseMI.
GISelChangeObserver & Observer
void applyBuildFn(MachineInstr &MI, BuildFnTy &MatchInfo) const
Use a function which takes in a MachineIRBuilder to perform a combine.
bool matchCombineTruncOfShift(MachineInstr &MI, std::pair< MachineInstr *, LLT > &MatchInfo) const
Transform trunc (shl x, K) to shl (trunc x), K if K < VT.getScalarSizeInBits().
bool matchCombineShiftToUnmerge(MachineInstr &MI, unsigned TargetShiftSize, unsigned &ShiftVal) const
Reduce a shift by a constant to an unmerge and a shift on a half sized type.
bool matchUDivOrURemByConst(MachineInstr &MI) const
Combine G_UDIV or G_UREM by constant into a multiply by magic constant.
bool matchAnd(MachineInstr &MI, BuildFnTy &MatchInfo) const
Combine ands.
bool matchSuboCarryOut(const MachineInstr &MI, BuildFnTy &MatchInfo) const
bool matchRedundantSextInReg(MachineInstr &Root, MachineInstr &Other, BuildFnTy &MatchInfo) const
bool matchConstantFoldFMA(MachineInstr &MI, ConstantFP *&MatchInfo) const
Constant fold G_FMA/G_FMAD.
bool matchCombineFSubFNegFMulToFMadOrFMA(MachineInstr &MI, BuildFnTy &MatchInfo) const
Transform (fsub (fneg (fmul, x, y)), z) -> (fma (fneg x), y, (fneg z)) (fsub (fneg (fmul,...
bool matchCombineZextTrunc(MachineInstr &MI, Register &Reg) const
Transform zext(trunc(x)) to x.
bool matchOperandIsUndef(MachineInstr &MI, unsigned OpIdx) const
Check if operand OpIdx is undef.
void applyLshrOfTruncOfLshr(MachineInstr &MI, LshrOfTruncOfLshr &MatchInfo) const
bool tryCombineMemCpyFamily(MachineInstr &MI, unsigned MaxLen=0) const
Optimize memcpy intrinsics et al, e.g.
bool matchFreezeOfSingleMaybePoisonOperand(MachineInstr &MI, BuildFnTy &MatchInfo) const
void applySDivOrSRemByConst(MachineInstr &MI) const
MachineInstr * buildSDivOrSRemUsingMul(MachineInstr &MI) const
Given an G_SDIV MI or G_SREM MI expressing a signed divide by constant, return an expression that imp...
bool isLegalOrHasWidenScalar(const LegalityQuery &Query) const
bool matchCanonicalizeICmp(const MachineInstr &MI, BuildFnTy &MatchInfo) const
bool matchCastOfBuildVector(const MachineInstr &CastMI, const MachineInstr &BVMI, BuildFnTy &MatchInfo) const
bool matchSubAddSameReg(MachineInstr &MI, BuildFnTy &MatchInfo) const
Transform: (x + y) - y -> x (x + y) - x -> y x - (y + x) -> 0 - y x - (x + z) -> 0 - z.
bool matchReassocConstantInnerLHS(GPtrAdd &MI, MachineInstr *LHS, MachineInstr *RHS, BuildFnTy &MatchInfo) const
bool matchCastOfInteger(const MachineInstr &CastMI, APInt &MatchInfo) const
bool matchOverlappingAnd(MachineInstr &MI, BuildFnTy &MatchInfo) const
Fold and(and(x, C1), C2) -> C1&C2 ? and(x, C1&C2) : 0.
bool matchCombineAnyExtTrunc(MachineInstr &MI, Register &Reg) const
Transform anyext(trunc(x)) to x.
void applyExtractAllEltsFromBuildVector(MachineInstr &MI, SmallVectorImpl< std::pair< Register, MachineInstr * > > &MatchInfo) const
MachineIRBuilder & Builder
void applyCommuteBinOpOperands(MachineInstr &MI) const
void replaceSingleDefInstWithOperand(MachineInstr &MI, unsigned OpIdx) const
Delete MI and replace all of its uses with its OpIdx-th operand.
void applySextTruncSextLoad(MachineInstr &MI) const
const MachineFunction & getMachineFunction() const
bool matchCombineFAddFpExtFMulToFMadOrFMAAggressive(MachineInstr &MI, BuildFnTy &MatchInfo) const
bool matchExtractVectorElementWithBuildVector(const MachineInstr &MI, const MachineInstr &MI2, BuildFnTy &MatchInfo) const
Combine extract vector element with a build vector on the vector register.
bool matchSDivOrSRemByConst(MachineInstr &MI) const
Combine G_SDIV or G_SREM by constant into a multiply by magic constant.
void applyOptBrCondByInvertingCond(MachineInstr &MI, MachineInstr *&BrCond) const
void applyCombineShiftToUnmerge(MachineInstr &MI, const unsigned &ShiftVal) const
bool matchCastOfSelect(const MachineInstr &Cast, const MachineInstr &SelectMI, BuildFnTy &MatchInfo) const
bool matchFPowIExpansion(MachineInstr &MI, int64_t Exponent) const
Match FPOWI if it's safe to extend it into a series of multiplications.
void applyCombineInsertVecElts(MachineInstr &MI, SmallVectorImpl< Register > &MatchInfo) const
bool matchCombineUnmergeMergeToPlainValues(MachineInstr &MI, SmallVectorImpl< Register > &Operands) const
Transform <ty,...> G_UNMERGE(G_MERGE ty X, Y, Z) -> ty X, Y, Z.
void applyCombineUnmergeMergeToPlainValues(MachineInstr &MI, SmallVectorImpl< Register > &Operands) const
bool matchAshrShlToSextInreg(MachineInstr &MI, std::tuple< Register, int64_t > &MatchInfo) const
Match ashr (shl x, C), C -> sext_inreg (C)
void applyCombineUnmergeZExtToZExt(MachineInstr &MI) const
ConstantFP - Floating Point Values [float, double].
Definition Constants.h:420
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:64
Represent a G_FCMP.
An floating-point-like constant.
Definition Utils.h:679
Represent a G_ICMP.
An integer-like constant.
Definition Utils.h:640
Abstract class that contains various methods for clients to notify about changes.
Represents any type of generic load or store.
Represents a logical binary operation.
Represents a G_PTR_ADD.
Represents a G_SELECT.
Represents a G_ZEXTLOAD.
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
DominatorTree Class - Concrete subclass of DominatorTreeBase that is used to compute a normal dominat...
Helper class to build MachineInstr.
Representation of each machine instruction.
MachineOperand class - Representation of each machine instruction operand.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Holds all the information related to register banks.
This class implements the register bank concept.
Wrapper class representing virtual and physical registers.
Definition Register.h:20
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
TargetInstrInfo - Interface to description of machine instruction set.
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
This is an optimization pass for GlobalISel generic memory operations.
@ Offset
Definition DWP.cpp:557
std::function< void(MachineIRBuilder &)> BuildFnTy
SmallVector< std::function< void(MachineInstrBuilder &)>, 4 > OperandBuildSteps
LLVM_ABI bool isOneOrOneSplat(SDValue V, bool AllowUndefs=false)
Return true if the value is a constant 1 integer or a splatted vector of a constant 1 integer (with n...
@ Other
Any other memory.
Definition ModRef.h:68
LLVM_ABI bool isZeroOrZeroSplat(SDValue N, bool AllowUndefs=false)
Return true if the value is a constant 0 integer or a splatted vector of a constant 0 integer (with n...
InstructionBuildSteps(unsigned Opcode, const OperandBuildSteps &OperandFns)
InstructionBuildSteps()=default
Operands to be added to the instruction.
OperandBuildSteps OperandFns
The opcode for the produced instruction.
InstructionStepsMatchInfo(std::initializer_list< InstructionBuildSteps > InstrsToBuild)
SmallVector< InstructionBuildSteps, 2 > InstrsToBuild
Describes instructions to be built during a combine.
The LegalityQuery object bundles together all the information that's needed to decide whether a given...
const RegisterBank * Bank