LLVM 23.0.0git
PeepholeOptimizer.cpp
Go to the documentation of this file.
1//===- PeepholeOptimizer.cpp - Peephole Optimizations ---------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// Perform peephole optimizations on the machine code:
10//
11// - Optimize Extensions
12//
13// Optimization of sign / zero extension instructions. It may be extended to
14// handle other instructions with similar properties.
15//
16// On some targets, some instructions, e.g. X86 sign / zero extension, may
17// leave the source value in the lower part of the result. This optimization
18// will replace some uses of the pre-extension value with uses of the
19// sub-register of the results.
20//
21// - Optimize Comparisons
22//
23// Optimization of comparison instructions. For instance, in this code:
24//
25// sub r1, 1
26// cmp r1, 0
27// bz L1
28//
29// If the "sub" instruction all ready sets (or could be modified to set) the
30// same flag that the "cmp" instruction sets and that "bz" uses, then we can
31// eliminate the "cmp" instruction.
32//
33// Another instance, in this code:
34//
35// sub r1, r3 | sub r1, imm
36// cmp r3, r1 or cmp r1, r3 | cmp r1, imm
37// bge L1
38//
39// If the branch instruction can use flag from "sub", then we can replace
40// "sub" with "subs" and eliminate the "cmp" instruction.
41//
42// - Optimize Loads:
43//
44// Loads that can be folded into a later instruction. A load is foldable
45// if it loads to virtual registers and the virtual register defined has
46// a single use.
47//
48// - Optimize Copies and Bitcast (more generally, target specific copies):
49//
50// Rewrite copies and bitcasts to avoid cross register bank copies
51// when possible.
52// E.g., Consider the following example, where capital and lower
53// letters denote different register file:
54// b = copy A <-- cross-bank copy
55// C = copy b <-- cross-bank copy
56// =>
57// b = copy A <-- cross-bank copy
58// C = copy A <-- same-bank copy
59//
60// E.g., for bitcast:
61// b = bitcast A <-- cross-bank copy
62// C = bitcast b <-- cross-bank copy
63// =>
64// b = bitcast A <-- cross-bank copy
65// C = copy A <-- same-bank copy
66//===----------------------------------------------------------------------===//
67
69#include "llvm/ADT/DenseMap.h"
71#include "llvm/ADT/SmallSet.h"
73#include "llvm/ADT/Statistic.h"
89#include "llvm/MC/LaneBitmask.h"
90#include "llvm/MC/MCInstrDesc.h"
91#include "llvm/Pass.h"
93#include "llvm/Support/Debug.h"
95#include <cassert>
96#include <cstdint>
97#include <utility>
98
99using namespace llvm;
102
103#define DEBUG_TYPE "peephole-opt"
104
105// Optimize Extensions
106static cl::opt<bool> Aggressive("aggressive-ext-opt", cl::Hidden,
107 cl::desc("Aggressive extension optimization"));
108
109static cl::opt<bool>
110 DisablePeephole("disable-peephole", cl::Hidden, cl::init(false),
111 cl::desc("Disable the peephole optimizer"));
112
113/// Specifiy whether or not the value tracking looks through
114/// complex instructions. When this is true, the value tracker
115/// bails on everything that is not a copy or a bitcast.
116static cl::opt<bool>
117 DisableAdvCopyOpt("disable-adv-copy-opt", cl::Hidden, cl::init(false),
118 cl::desc("Disable advanced copy optimization"));
119
121 "disable-non-allocatable-phys-copy-opt", cl::Hidden, cl::init(false),
122 cl::desc("Disable non-allocatable physical register copy optimization"));
123
124// Limit the number of PHI instructions to process
125// in PeepholeOptimizer::getNextSource.
127 RewritePHILimit("rewrite-phi-limit", cl::Hidden, cl::init(10),
128 cl::desc("Limit the length of PHI chains to lookup"));
129
130// Limit the length of recurrence chain when evaluating the benefit of
131// commuting operands.
133 "recurrence-chain-limit", cl::Hidden, cl::init(3),
134 cl::desc("Maximum length of recurrence chain when evaluating the benefit "
135 "of commuting operands"));
136
137STATISTIC(NumReuse, "Number of extension results reused");
138STATISTIC(NumCmps, "Number of compares eliminated");
139STATISTIC(NumImmFold, "Number of move immediate folded");
140STATISTIC(NumLoadFold, "Number of loads folded");
141STATISTIC(NumSelects, "Number of selects optimized");
142STATISTIC(NumUncoalescableCopies, "Number of uncoalescable copies optimized");
143STATISTIC(NumRewrittenCopies, "Number of copies rewritten");
144STATISTIC(NumNAPhysCopies, "Number of non-allocatable physical copies removed");
145
146namespace {
147
148class ValueTrackerResult;
149class RecurrenceInstr;
150
151/// Interface to query instructions amenable to copy rewriting.
152class Rewriter {
153protected:
154 MachineInstr &CopyLike;
155 int CurrentSrcIdx = 0; ///< The index of the source being rewritten.
156public:
157 Rewriter(MachineInstr &CopyLike) : CopyLike(CopyLike) {}
158 virtual ~Rewriter() = default;
159
160 /// Get the next rewritable source (SrcReg, SrcSubReg) and
161 /// the related value that it affects (DstReg, DstSubReg).
162 /// A source is considered rewritable if its register class and the
163 /// register class of the related DstReg may not be register
164 /// coalescer friendly. In other words, given a copy-like instruction
165 /// not all the arguments may be returned at rewritable source, since
166 /// some arguments are none to be register coalescer friendly.
167 ///
168 /// Each call of this method moves the current source to the next
169 /// rewritable source.
170 /// For instance, let CopyLike be the instruction to rewrite.
171 /// CopyLike has one definition and one source:
172 /// dst.dstSubIdx = CopyLike src.srcSubIdx.
173 ///
174 /// The first call will give the first rewritable source, i.e.,
175 /// the only source this instruction has:
176 /// (SrcReg, SrcSubReg) = (src, srcSubIdx).
177 /// This source defines the whole definition, i.e.,
178 /// (DstReg, DstSubReg) = (dst, dstSubIdx).
179 ///
180 /// The second and subsequent calls will return false, as there is only one
181 /// rewritable source.
182 ///
183 /// \return True if a rewritable source has been found, false otherwise.
184 /// The output arguments are valid if and only if true is returned.
185 virtual bool getNextRewritableSource(RegSubRegPair &Src,
186 RegSubRegPair &Dst) = 0;
187
188 /// Rewrite the current source with \p NewReg and \p NewSubReg if possible.
189 /// \return True if the rewriting was possible, false otherwise.
190 virtual bool RewriteCurrentSource(Register NewReg, unsigned NewSubReg) = 0;
191};
192
193/// Rewriter for COPY instructions.
194class CopyRewriter : public Rewriter {
195public:
196 CopyRewriter(MachineInstr &MI) : Rewriter(MI) {
197 assert(MI.isCopy() && "Expected copy instruction");
198 }
199 ~CopyRewriter() override = default;
200
201 bool getNextRewritableSource(RegSubRegPair &Src,
202 RegSubRegPair &Dst) override {
203 if (++CurrentSrcIdx > 1)
204 return false;
205
206 // The rewritable source is the argument.
207 const MachineOperand &MOSrc = CopyLike.getOperand(CurrentSrcIdx);
208 Src = RegSubRegPair(MOSrc.getReg(), MOSrc.getSubReg());
209 // What we track are the alternative sources of the definition.
210 const MachineOperand &MODef = CopyLike.getOperand(0);
211 Dst = RegSubRegPair(MODef.getReg(), MODef.getSubReg());
212 return true;
213 }
214
215 bool RewriteCurrentSource(Register NewReg, unsigned NewSubReg) override {
216 MachineOperand &MOSrc = CopyLike.getOperand(CurrentSrcIdx);
217 MOSrc.setReg(NewReg);
218 MOSrc.setSubReg(NewSubReg);
219 return true;
220 }
221};
222
223/// Helper class to rewrite uncoalescable copy like instructions
224/// into new COPY (coalescable friendly) instructions.
225class UncoalescableRewriter : public Rewriter {
226 int NumDefs; ///< Number of defs in the bitcast.
227
228public:
229 UncoalescableRewriter(MachineInstr &MI) : Rewriter(MI) {
230 NumDefs = MI.getDesc().getNumDefs();
231 }
232
233 /// \see See Rewriter::getNextRewritableSource()
234 /// All such sources need to be considered rewritable in order to
235 /// rewrite a uncoalescable copy-like instruction. This method return
236 /// each definition that must be checked if rewritable.
237 bool getNextRewritableSource(RegSubRegPair &Src,
238 RegSubRegPair &Dst) override {
239 // Find the next non-dead definition and continue from there.
240 if (CurrentSrcIdx == NumDefs)
241 return false;
242
243 while (CopyLike.getOperand(CurrentSrcIdx).isDead()) {
244 ++CurrentSrcIdx;
245 if (CurrentSrcIdx == NumDefs)
246 return false;
247 }
248
249 // What we track are the alternative sources of the definition.
250 Src = RegSubRegPair(0, 0);
251 const MachineOperand &MODef = CopyLike.getOperand(CurrentSrcIdx);
252 Dst = RegSubRegPair(MODef.getReg(), MODef.getSubReg());
253
254 CurrentSrcIdx++;
255 return true;
256 }
257
258 bool RewriteCurrentSource(Register NewReg, unsigned NewSubReg) override {
259 return false;
260 }
261};
262
263/// Specialized rewriter for INSERT_SUBREG instruction.
264class InsertSubregRewriter : public Rewriter {
265public:
266 InsertSubregRewriter(MachineInstr &MI) : Rewriter(MI) {
267 assert(MI.isInsertSubreg() && "Invalid instruction");
268 }
269
270 /// \see See Rewriter::getNextRewritableSource()
271 /// Here CopyLike has the following form:
272 /// dst = INSERT_SUBREG Src1, Src2.src2SubIdx, subIdx.
273 /// Src1 has the same register class has dst, hence, there is
274 /// nothing to rewrite.
275 /// Src2.src2SubIdx, may not be register coalescer friendly.
276 /// Therefore, the first call to this method returns:
277 /// (SrcReg, SrcSubReg) = (Src2, src2SubIdx).
278 /// (DstReg, DstSubReg) = (dst, subIdx).
279 ///
280 /// Subsequence calls will return false.
281 bool getNextRewritableSource(RegSubRegPair &Src,
282 RegSubRegPair &Dst) override {
283 // If we already get the only source we can rewrite, return false.
284 if (CurrentSrcIdx == 2)
285 return false;
286 // We are looking at v2 = INSERT_SUBREG v0, v1, sub0.
287 CurrentSrcIdx = 2;
288 const MachineOperand &MOInsertedReg = CopyLike.getOperand(2);
289 Src = RegSubRegPair(MOInsertedReg.getReg(), MOInsertedReg.getSubReg());
290 const MachineOperand &MODef = CopyLike.getOperand(0);
291
292 // We want to track something that is compatible with the
293 // partial definition.
294 if (MODef.getSubReg())
295 // Bail if we have to compose sub-register indices.
296 return false;
297 Dst = RegSubRegPair(MODef.getReg(),
298 (unsigned)CopyLike.getOperand(3).getImm());
299 return true;
300 }
301
302 bool RewriteCurrentSource(Register NewReg, unsigned NewSubReg) override {
303 if (CurrentSrcIdx != 2)
304 return false;
305 // We are rewriting the inserted reg.
306 MachineOperand &MO = CopyLike.getOperand(CurrentSrcIdx);
307 MO.setReg(NewReg);
308 MO.setSubReg(NewSubReg);
309 return true;
310 }
311};
312
313/// Specialized rewriter for EXTRACT_SUBREG instruction.
314class ExtractSubregRewriter : public Rewriter {
315 const TargetInstrInfo &TII;
316
317public:
318 ExtractSubregRewriter(MachineInstr &MI, const TargetInstrInfo &TII)
319 : Rewriter(MI), TII(TII) {
320 assert(MI.isExtractSubreg() && "Invalid instruction");
321 }
322
323 /// \see Rewriter::getNextRewritableSource()
324 /// Here CopyLike has the following form:
325 /// dst.dstSubIdx = EXTRACT_SUBREG Src, subIdx.
326 /// There is only one rewritable source: Src.subIdx,
327 /// which defines dst.dstSubIdx.
328 bool getNextRewritableSource(RegSubRegPair &Src,
329 RegSubRegPair &Dst) override {
330 // If we already get the only source we can rewrite, return false.
331 if (CurrentSrcIdx == 1)
332 return false;
333 // We are looking at v1 = EXTRACT_SUBREG v0, sub0.
334 CurrentSrcIdx = 1;
335 const MachineOperand &MOExtractedReg = CopyLike.getOperand(1);
336 // If we have to compose sub-register indices, bail out.
337 if (MOExtractedReg.getSubReg())
338 return false;
339
340 Src =
341 RegSubRegPair(MOExtractedReg.getReg(), CopyLike.getOperand(2).getImm());
342
343 // We want to track something that is compatible with the definition.
344 const MachineOperand &MODef = CopyLike.getOperand(0);
345 Dst = RegSubRegPair(MODef.getReg(), MODef.getSubReg());
346 return true;
347 }
348
349 bool RewriteCurrentSource(Register NewReg, unsigned NewSubReg) override {
350 // The only source we can rewrite is the input register.
351 if (CurrentSrcIdx != 1)
352 return false;
353
354 CopyLike.getOperand(CurrentSrcIdx).setReg(NewReg);
355
356 // If we find a source that does not require to extract something,
357 // rewrite the operation with a copy.
358 if (!NewSubReg) {
359 // Move the current index to an invalid position.
360 // We do not want another call to this method to be able
361 // to do any change.
362 CurrentSrcIdx = -1;
363 // Rewrite the operation as a COPY.
364 // Get rid of the sub-register index.
365 CopyLike.removeOperand(2);
366 // Morph the operation into a COPY.
367 CopyLike.setDesc(TII.get(TargetOpcode::COPY));
368 return true;
369 }
370 CopyLike.getOperand(CurrentSrcIdx + 1).setImm(NewSubReg);
371 return true;
372 }
373};
374
375/// Specialized rewriter for REG_SEQUENCE instruction.
376class RegSequenceRewriter : public Rewriter {
377public:
378 RegSequenceRewriter(MachineInstr &MI) : Rewriter(MI) {
379 assert(MI.isRegSequence() && "Invalid instruction");
380 CurrentSrcIdx = -1;
381 }
382
383 /// \see Rewriter::getNextRewritableSource()
384 /// Here CopyLike has the following form:
385 /// dst = REG_SEQUENCE Src1.src1SubIdx, subIdx1, Src2.src2SubIdx, subIdx2.
386 /// Each call will return a different source, walking all the available
387 /// source.
388 ///
389 /// The first call returns:
390 /// (SrcReg, SrcSubReg) = (Src1, src1SubIdx).
391 /// (DstReg, DstSubReg) = (dst, subIdx1).
392 ///
393 /// The second call returns:
394 /// (SrcReg, SrcSubReg) = (Src2, src2SubIdx).
395 /// (DstReg, DstSubReg) = (dst, subIdx2).
396 ///
397 /// And so on, until all the sources have been traversed, then
398 /// it returns false.
399 bool getNextRewritableSource(RegSubRegPair &Src,
400 RegSubRegPair &Dst) override {
401 // We are looking at v0 = REG_SEQUENCE v1, sub1, v2, sub2, etc.
402 CurrentSrcIdx += 2;
403 if (static_cast<unsigned>(CurrentSrcIdx) >= CopyLike.getNumOperands())
404 return false;
405
406 const MachineOperand &MOInsertedReg = CopyLike.getOperand(CurrentSrcIdx);
407 Src.Reg = MOInsertedReg.getReg();
408 Src.SubReg = MOInsertedReg.getSubReg();
409
410 // We want to track something that is compatible with the related
411 // partial definition.
412 Dst.SubReg = CopyLike.getOperand(CurrentSrcIdx + 1).getImm();
413
414 const MachineOperand &MODef = CopyLike.getOperand(0);
415 Dst.Reg = MODef.getReg();
416 assert(MODef.getSubReg() == 0 && "cannot have subregister def in SSA");
417 return true;
418 }
419
420 bool RewriteCurrentSource(Register NewReg, unsigned NewSubReg) override {
421 MachineOperand &MO = CopyLike.getOperand(CurrentSrcIdx);
422 MO.setReg(NewReg);
423 MO.setSubReg(NewSubReg);
424 return true;
425 }
426};
427
428class PeepholeOptimizer : private MachineFunction::Delegate {
429 const TargetInstrInfo *TII = nullptr;
430 const TargetRegisterInfo *TRI = nullptr;
431 MachineRegisterInfo *MRI = nullptr;
432 MachineDominatorTree *DT = nullptr; // Machine dominator tree
433 MachineLoopInfo *MLI = nullptr;
434
435public:
436 PeepholeOptimizer(MachineDominatorTree *DT, MachineLoopInfo *MLI)
437 : DT(DT), MLI(MLI) {}
438
439 bool run(MachineFunction &MF);
440 /// Track Def -> Use info used for rewriting copies.
441 using RewriteMapTy = SmallDenseMap<RegSubRegPair, ValueTrackerResult>;
442
443 /// Sequence of instructions that formulate recurrence cycle.
444 using RecurrenceCycle = SmallVector<RecurrenceInstr, 4>;
445
446private:
447 bool optimizeCmpInstr(MachineInstr &MI, MachineFunction &MF,
448 SmallPtrSet<MachineInstr *, 16> &LocalMIs);
449 bool optimizeExtInstr(MachineInstr &MI, MachineBasicBlock &MBB,
450 SmallPtrSetImpl<MachineInstr *> &LocalMIs);
451 bool optimizeSelect(MachineInstr &MI,
452 SmallPtrSetImpl<MachineInstr *> &LocalMIs);
453 bool optimizeCondBranch(MachineInstr &MI);
454
455 bool optimizeCoalescableCopyImpl(Rewriter &&CpyRewriter);
456 bool optimizeCoalescableCopy(MachineInstr &MI);
457 bool optimizeUncoalescableCopy(MachineInstr &MI,
458 SmallPtrSetImpl<MachineInstr *> &LocalMIs);
459 bool optimizeRecurrence(MachineInstr &PHI);
460 bool findNextSource(const TargetRegisterClass *DefRC, unsigned DefSubReg,
461 RegSubRegPair RegSubReg, RewriteMapTy &RewriteMap);
462 bool isMoveImmediate(MachineInstr &MI, SmallSet<Register, 4> &ImmDefRegs,
463 DenseMap<Register, MachineInstr *> &ImmDefMIs);
464 bool foldImmediate(MachineInstr &MI, SmallSet<Register, 4> &ImmDefRegs,
465 DenseMap<Register, MachineInstr *> &ImmDefMIs,
466 bool &Deleted);
467
468 /// Finds recurrence cycles, but only ones that formulated around
469 /// a def operand and a use operand that are tied. If there is a use
470 /// operand commutable with the tied use operand, find recurrence cycle
471 /// along that operand as well.
472 bool findTargetRecurrence(Register Reg,
473 const SmallSet<Register, 2> &TargetReg,
474 RecurrenceCycle &RC);
475
476 /// If copy instruction \p MI is a virtual register copy or a copy of a
477 /// constant physical register to a virtual register, track it in the
478 /// set CopySrcMIs. If this virtual register was previously seen as a
479 /// copy, replace the uses of this copy with the previously seen copy's
480 /// destination register.
481 bool foldRedundantCopy(MachineInstr &MI);
482
483 /// Is the register \p Reg a non-allocatable physical register?
484 bool isNAPhysCopy(Register Reg);
485
486 /// If copy instruction \p MI is a non-allocatable virtual<->physical
487 /// register copy, track it in the \p NAPhysToVirtMIs map. If this
488 /// non-allocatable physical register was previously copied to a virtual
489 /// registered and hasn't been clobbered, the virt->phys copy can be
490 /// deleted.
491 bool
492 foldRedundantNAPhysCopy(MachineInstr &MI,
493 DenseMap<Register, MachineInstr *> &NAPhysToVirtMIs);
494
495 bool isLoadFoldable(MachineInstr &MI,
496 SmallSet<Register, 16> &FoldAsLoadDefCandidates);
497
498 /// Try to fold the load defined by \p FoldReg into \p MI using
499 /// TII->optimizeLoadInstr. On success, updates \p LocalMIs, erases the old
500 /// instructions, and returns the replacement; returns nullptr otherwise.
501 MachineInstr *foldLoadInto(MachineFunction &MF, MachineInstr &MI,
502 Register FoldReg,
503 SmallPtrSet<MachineInstr *, 16> &LocalMIs);
504
505 /// Check whether \p MI is understood by the register coalescer
506 /// but may require some rewriting.
507 static bool isCoalescableCopy(const MachineInstr &MI) {
508 // SubregToRegs are not interesting, because they are already register
509 // coalescer friendly.
510 return MI.isCopy() ||
511 (!DisableAdvCopyOpt && (MI.isRegSequence() || MI.isInsertSubreg() ||
512 MI.isExtractSubreg()));
513 }
514
515 /// Check whether \p MI is a copy like instruction that is
516 /// not recognized by the register coalescer.
517 static bool isUncoalescableCopy(const MachineInstr &MI) {
518 return MI.isBitcast() || (!DisableAdvCopyOpt && (MI.isRegSequenceLike() ||
519 MI.isInsertSubregLike() ||
520 MI.isExtractSubregLike()));
521 }
522
523 MachineInstr &rewriteSource(MachineInstr &CopyLike, RegSubRegPair Def,
524 RewriteMapTy &RewriteMap);
525
526 // Set of copies to virtual registers keyed by source register. Never
527 // holds any physreg which requires def tracking.
528 DenseMap<RegSubRegPair, MachineInstr *> CopySrcMIs;
529
530 // MachineFunction::Delegate implementation. Used to maintain CopySrcMIs.
531 void MF_HandleInsertion(MachineInstr &MI) override {}
532
533 bool getCopySrc(MachineInstr &MI, RegSubRegPair &SrcPair) {
534 if (!MI.isCopy())
535 return false;
536
537 Register SrcReg = MI.getOperand(1).getReg();
538 unsigned SrcSubReg = MI.getOperand(1).getSubReg();
539 if (!SrcReg.isVirtual() && !MRI->isConstantPhysReg(SrcReg))
540 return false;
541
542 SrcPair = RegSubRegPair(SrcReg, SrcSubReg);
543 return true;
544 }
545
546 // If a COPY instruction is to be deleted or changed, we should also remove
547 // it from CopySrcMIs.
548 void deleteChangedCopy(MachineInstr &MI) {
549 RegSubRegPair SrcPair;
550 if (!getCopySrc(MI, SrcPair))
551 return;
552
553 auto It = CopySrcMIs.find(SrcPair);
554 if (It != CopySrcMIs.end() && It->second == &MI)
555 CopySrcMIs.erase(It);
556 }
557
558 void MF_HandleRemoval(MachineInstr &MI) override { deleteChangedCopy(MI); }
559
560 void MF_HandleChangeDesc(MachineInstr &MI, const MCInstrDesc &TID) override {
561 deleteChangedCopy(MI);
562 }
563};
564
565class PeepholeOptimizerLegacy : public MachineFunctionPass {
566public:
567 static char ID; // Pass identification
568
569 PeepholeOptimizerLegacy() : MachineFunctionPass(ID) {}
570
571 bool runOnMachineFunction(MachineFunction &MF) override;
572
573 void getAnalysisUsage(AnalysisUsage &AU) const override {
574 AU.setPreservesCFG();
576 AU.addRequired<MachineLoopInfoWrapperPass>();
577 AU.addPreserved<MachineLoopInfoWrapperPass>();
578 if (Aggressive) {
579 AU.addRequired<MachineDominatorTreeWrapperPass>();
580 AU.addPreserved<MachineDominatorTreeWrapperPass>();
581 }
582 }
583
584 MachineFunctionProperties getRequiredProperties() const override {
585 return MachineFunctionProperties().setIsSSA();
586 }
587};
588
589/// Helper class to hold instructions that are inside recurrence cycles.
590/// The recurrence cycle is formulated around 1) a def operand and its
591/// tied use operand, or 2) a def operand and a use operand that is commutable
592/// with another use operand which is tied to the def operand. In the latter
593/// case, index of the tied use operand and the commutable use operand are
594/// maintained with CommutePair.
595class RecurrenceInstr {
596public:
597 using IndexPair = std::pair<unsigned, unsigned>;
598
599 RecurrenceInstr(MachineInstr *MI) : MI(MI) {}
600 RecurrenceInstr(MachineInstr *MI, unsigned Idx1, unsigned Idx2)
601 : MI(MI), CommutePair(std::make_pair(Idx1, Idx2)) {}
602
603 MachineInstr *getMI() const { return MI; }
604 std::optional<IndexPair> getCommutePair() const { return CommutePair; }
605
606private:
607 MachineInstr *MI;
608 std::optional<IndexPair> CommutePair;
609};
610
611/// Helper class to hold a reply for ValueTracker queries.
612/// Contains the returned sources for a given search and the instructions
613/// where the sources were tracked from.
614class ValueTrackerResult {
615private:
616 /// Track all sources found by one ValueTracker query.
618
619 /// Instruction using the sources in 'RegSrcs'.
620 const MachineInstr *Inst = nullptr;
621
622public:
623 ValueTrackerResult() = default;
624
625 ValueTrackerResult(Register Reg, unsigned SubReg) { addSource(Reg, SubReg); }
626
627 bool isValid() const { return getNumSources() > 0; }
628
629 void setInst(const MachineInstr *I) { Inst = I; }
630 const MachineInstr *getInst() const { return Inst; }
631
632 void clear() {
633 RegSrcs.clear();
634 Inst = nullptr;
635 }
636
637 void addSource(Register SrcReg, unsigned SrcSubReg) {
638 RegSrcs.push_back(RegSubRegPair(SrcReg, SrcSubReg));
639 }
640
641 void setSource(int Idx, Register SrcReg, unsigned SrcSubReg) {
642 assert(Idx < getNumSources() && "Reg pair source out of index");
643 RegSrcs[Idx] = RegSubRegPair(SrcReg, SrcSubReg);
644 }
645
646 int getNumSources() const { return RegSrcs.size(); }
647
648 RegSubRegPair getSrc(int Idx) const { return RegSrcs[Idx]; }
649
650 Register getSrcReg(int Idx) const {
651 assert(Idx < getNumSources() && "Reg source out of index");
652 return RegSrcs[Idx].Reg;
653 }
654
655 unsigned getSrcSubReg(int Idx) const {
656 assert(Idx < getNumSources() && "SubReg source out of index");
657 return RegSrcs[Idx].SubReg;
658 }
659
660 bool operator==(const ValueTrackerResult &Other) const {
661 if (Other.getInst() != getInst())
662 return false;
663
664 if (Other.getNumSources() != getNumSources())
665 return false;
666
667 for (int i = 0, e = Other.getNumSources(); i != e; ++i)
668 if (Other.getSrcReg(i) != getSrcReg(i) ||
669 Other.getSrcSubReg(i) != getSrcSubReg(i))
670 return false;
671 return true;
672 }
673};
674
675/// Helper class to track the possible sources of a value defined by
676/// a (chain of) copy related instructions.
677/// Given a definition (instruction and definition index), this class
678/// follows the use-def chain to find successive suitable sources.
679/// The given source can be used to rewrite the definition into
680/// def = COPY src.
681///
682/// For instance, let us consider the following snippet:
683/// v0 =
684/// v2 = INSERT_SUBREG v1, v0, sub0
685/// def = COPY v2.sub0
686///
687/// Using a ValueTracker for def = COPY v2.sub0 will give the following
688/// suitable sources:
689/// v2.sub0 and v0.
690/// Then, def can be rewritten into def = COPY v0.
691class ValueTracker {
692private:
693 /// The current point into the use-def chain.
694 const MachineInstr *Def = nullptr;
695
696 /// The index of the definition in Def.
697 unsigned DefIdx = 0;
698
699 /// The sub register index of the definition.
700 unsigned DefSubReg;
701
702 /// The register where the value can be found.
703 Register Reg;
704
705 /// MachineRegisterInfo used to perform tracking.
706 const MachineRegisterInfo &MRI;
707
708 /// Optional TargetInstrInfo used to perform some complex tracking.
709 const TargetInstrInfo *TII;
710
711 /// Dispatcher to the right underlying implementation of getNextSource.
712 ValueTrackerResult getNextSourceImpl();
713
714 /// Specialized version of getNextSource for Copy instructions.
715 ValueTrackerResult getNextSourceFromCopy();
716
717 /// Specialized version of getNextSource for Bitcast instructions.
718 ValueTrackerResult getNextSourceFromBitcast();
719
720 /// Specialized version of getNextSource for RegSequence instructions.
721 ValueTrackerResult getNextSourceFromRegSequence();
722
723 /// Specialized version of getNextSource for InsertSubreg instructions.
724 ValueTrackerResult getNextSourceFromInsertSubreg();
725
726 /// Specialized version of getNextSource for ExtractSubreg instructions.
727 ValueTrackerResult getNextSourceFromExtractSubreg();
728
729 /// Specialized version of getNextSource for SubregToReg instructions.
730 ValueTrackerResult getNextSourceFromSubregToReg();
731
732 /// Specialized version of getNextSource for PHI instructions.
733 ValueTrackerResult getNextSourceFromPHI();
734
735public:
736 /// Create a ValueTracker instance for the value defined by \p Reg.
737 /// \p DefSubReg represents the sub register index the value tracker will
738 /// track. It does not need to match the sub register index used in the
739 /// definition of \p Reg.
740 /// If \p Reg is a physical register, a value tracker constructed with
741 /// this constructor will not find any alternative source.
742 /// Indeed, when \p Reg is a physical register that constructor does not
743 /// know which definition of \p Reg it should track.
744 /// Use the next constructor to track a physical register.
745 ValueTracker(Register Reg, unsigned DefSubReg, const MachineRegisterInfo &MRI,
746 const TargetInstrInfo *TII = nullptr)
747 : DefSubReg(DefSubReg), Reg(Reg), MRI(MRI), TII(TII) {
748 if (!Reg.isPhysical()) {
749 Def = MRI.getVRegDef(Reg);
750 DefIdx = MRI.def_begin(Reg).getOperandNo();
751 }
752 }
753
754 /// Following the use-def chain, get the next available source
755 /// for the tracked value.
756 /// \return A ValueTrackerResult containing a set of registers
757 /// and sub registers with tracked values. A ValueTrackerResult with
758 /// an empty set of registers means no source was found.
759 ValueTrackerResult getNextSource();
760};
761
762} // end anonymous namespace
763
764char PeepholeOptimizerLegacy::ID = 0;
765
766char &llvm::PeepholeOptimizerLegacyID = PeepholeOptimizerLegacy::ID;
767
768INITIALIZE_PASS_BEGIN(PeepholeOptimizerLegacy, DEBUG_TYPE,
769 "Peephole Optimizations", false, false)
772INITIALIZE_PASS_END(PeepholeOptimizerLegacy, DEBUG_TYPE,
773 "Peephole Optimizations", false, false)
774
775/// If instruction is a copy-like instruction, i.e. it reads a single register
776/// and writes a single register and it does not modify the source, and if the
777/// source value is preserved as a sub-register of the result, then replace all
778/// reachable uses of the source with the subreg of the result.
779///
780/// Do not generate an EXTRACT that is used only in a debug use, as this changes
781/// the code. Since this code does not currently share EXTRACTs, just ignore all
782/// debug uses.
783bool PeepholeOptimizer::optimizeExtInstr(
785 SmallPtrSetImpl<MachineInstr *> &LocalMIs) {
786 Register SrcReg, DstReg;
787 unsigned SubIdx;
788 if (!TII->isCoalescableExtInstr(MI, SrcReg, DstReg, SubIdx))
789 return false;
790
791 if (DstReg.isPhysical() || SrcReg.isPhysical())
792 return false;
793
794 if (MRI->hasOneNonDBGUse(SrcReg))
795 // No other uses.
796 return false;
797
798 // Ensure DstReg can get a register class that actually supports
799 // sub-registers. Don't change the class until we commit.
800 const TargetRegisterClass *DstRC = MRI->getRegClass(DstReg);
801 DstRC = TRI->getSubClassWithSubReg(DstRC, SubIdx);
802 if (!DstRC)
803 return false;
804
805 // The ext instr may be operating on a sub-register of SrcReg as well.
806 // PPC::EXTSW is a 32 -> 64-bit sign extension, but it reads a 64-bit
807 // register.
808 // If UseSrcSubIdx is Set, SubIdx also applies to SrcReg, and only uses of
809 // SrcReg:SubIdx should be replaced.
810 bool UseSrcSubIdx =
811 TRI->getSubClassWithSubReg(MRI->getRegClass(SrcReg), SubIdx) != nullptr;
812
813 // The source has other uses. See if we can replace the other uses with use of
814 // the result of the extension.
816 for (MachineInstr &UI : MRI->use_nodbg_instructions(DstReg))
817 ReachedBBs.insert(UI.getParent());
818
819 // Uses that are in the same BB of uses of the result of the instruction.
821
822 // Uses that the result of the instruction can reach.
824
825 bool ExtendLife = true;
826 for (MachineOperand &UseMO : MRI->use_nodbg_operands(SrcReg)) {
827 MachineInstr *UseMI = UseMO.getParent();
828 if (UseMI == &MI)
829 continue;
830
831 if (UseMI->isPHI()) {
832 ExtendLife = false;
833 continue;
834 }
835
836 // Only accept uses of SrcReg:SubIdx.
837 if (UseSrcSubIdx && UseMO.getSubReg() != SubIdx)
838 continue;
839
840 // It's an error to translate this:
841 //
842 // %reg1025 = <sext> %reg1024
843 // ...
844 // %reg1026 = SUBREG_TO_REG %reg1024, 4
845 //
846 // into this:
847 //
848 // %reg1025 = <sext> %reg1024
849 // ...
850 // %reg1027 = COPY %reg1025:4
851 // %reg1026 = SUBREG_TO_REG %reg1027, 4
852 //
853 // The problem here is that SUBREG_TO_REG is there to assert that an
854 // implicit zext occurs. It doesn't insert a zext instruction. If we allow
855 // the COPY here, it will give us the value after the <sext>, not the
856 // original value of %reg1024 before <sext>.
857 if (UseMI->getOpcode() == TargetOpcode::SUBREG_TO_REG)
858 continue;
859
860 MachineBasicBlock *UseMBB = UseMI->getParent();
861 if (UseMBB == &MBB) {
862 // Local uses that come after the extension.
863 if (!LocalMIs.count(UseMI))
864 Uses.push_back(&UseMO);
865 } else if (ReachedBBs.count(UseMBB)) {
866 // Non-local uses where the result of the extension is used. Always
867 // replace these unless it's a PHI.
868 Uses.push_back(&UseMO);
869 } else if (Aggressive && DT->dominates(&MBB, UseMBB)) {
870 // We may want to extend the live range of the extension result in order
871 // to replace these uses.
872 ExtendedUses.push_back(&UseMO);
873 } else {
874 // Both will be live out of the def MBB anyway. Don't extend live range of
875 // the extension result.
876 ExtendLife = false;
877 break;
878 }
879 }
880
881 if (ExtendLife && !ExtendedUses.empty())
882 // Extend the liveness of the extension result.
883 Uses.append(ExtendedUses.begin(), ExtendedUses.end());
884
885 // Now replace all uses.
886 bool Changed = false;
887 if (!Uses.empty()) {
888 SmallPtrSet<MachineBasicBlock *, 4> PHIBBs;
889
890 // Look for PHI uses of the extended result, we don't want to extend the
891 // liveness of a PHI input. It breaks all kinds of assumptions down
892 // stream. A PHI use is expected to be the kill of its source values.
893 for (MachineInstr &UI : MRI->use_nodbg_instructions(DstReg))
894 if (UI.isPHI())
895 PHIBBs.insert(UI.getParent());
896
897 const TargetRegisterClass *RC = MRI->getRegClass(SrcReg);
898 for (MachineOperand *UseMO : Uses) {
899 MachineInstr *UseMI = UseMO->getParent();
900 MachineBasicBlock *UseMBB = UseMI->getParent();
901 if (PHIBBs.count(UseMBB))
902 continue;
903
904 // About to add uses of DstReg, clear DstReg's kill flags.
905 if (!Changed) {
906 MRI->clearKillFlags(DstReg);
907 MRI->constrainRegClass(DstReg, DstRC);
908 }
909
910 // SubReg defs are illegal in machine SSA phase,
911 // we should not generate SubReg defs.
912 //
913 // For example, for the instructions:
914 //
915 // %1:g8rc_and_g8rc_nox0 = EXTSW %0:g8rc
916 // %3:gprc_and_gprc_nor0 = COPY %0.sub_32:g8rc
917 //
918 // We should generate:
919 //
920 // %1:g8rc_and_g8rc_nox0 = EXTSW %0:g8rc
921 // %6:gprc_and_gprc_nor0 = COPY %1.sub_32:g8rc_and_g8rc_nox0
922 // %3:gprc_and_gprc_nor0 = COPY %6:gprc_and_gprc_nor0
923 //
924 if (UseSrcSubIdx)
925 RC = MRI->getRegClass(UseMI->getOperand(0).getReg());
926
927 Register NewVR = MRI->createVirtualRegister(RC);
928 BuildMI(*UseMBB, UseMI, UseMI->getDebugLoc(),
929 TII->get(TargetOpcode::COPY), NewVR)
930 .addReg(DstReg, {}, SubIdx);
931 if (UseSrcSubIdx)
932 UseMO->setSubReg(0);
933
934 UseMO->setReg(NewVR);
935 ++NumReuse;
936 Changed = true;
937 }
938 }
939
940 return Changed;
941}
942
943/// If the instruction is a compare and the previous instruction it's comparing
944/// against already sets (or could be modified to set) the same flag as the
945/// compare, then we can remove the comparison and use the flag from the
946/// previous instruction.
947bool PeepholeOptimizer::optimizeCmpInstr(
950 // If this instruction is a comparison against zero and isn't comparing a
951 // physical register, we can try to optimize it.
952 Register SrcReg, SrcReg2;
953 int64_t CmpMask, CmpValue;
954 if (!TII->analyzeCompare(MI, SrcReg, SrcReg2, CmpMask, CmpValue) ||
955 SrcReg.isPhysical() || SrcReg2.isPhysical())
956 return false;
957
958 // Attempt to optimize the comparison instruction.
959 LLVM_DEBUG(dbgs() << "Attempting to optimize compare: " << MI);
960 if (!TII->optimizeCompareInstr(MI, SrcReg, SrcReg2, CmpMask, CmpValue, MRI))
961 return false;
962
963 LLVM_DEBUG(dbgs() << " -> Successfully optimized compare!\n");
964 ++NumCmps;
965
966 // The eliminated compare may have been the extra use preventing a
967 // load from being folded into the flag-setting instruction.
968 if (SrcReg.isVirtual() && MRI->hasOneNonDBGUser(SrcReg)) {
969 MachineInstr *FlagProducer = MRI->use_nodbg_begin(SrcReg)->getParent();
970 MachineInstr *LoadMI = MRI->getVRegDef(SrcReg);
971 if (LocalMIs.count(FlagProducer) && LoadMI && LoadMI->canFoldAsLoad() &&
972 LoadMI->mayLoad() && LocalMIs.count(LoadMI))
973 foldLoadInto(MF, *FlagProducer, SrcReg, LocalMIs);
974 }
975
976 return true;
977}
978
979/// Optimize a select instruction.
980bool PeepholeOptimizer::optimizeSelect(
981 MachineInstr &MI, SmallPtrSetImpl<MachineInstr *> &LocalMIs) {
982 assert(MI.isSelect() && "Should only be called when MI->isSelect() is true");
983 if (!TII->optimizeSelect(MI, LocalMIs))
984 return false;
985 LLVM_DEBUG(dbgs() << "Deleting select: " << MI);
986 MI.eraseFromParent();
987 ++NumSelects;
988 return true;
989}
990
991/// Check if a simpler conditional branch can be generated.
992bool PeepholeOptimizer::optimizeCondBranch(MachineInstr &MI) {
993 return TII->optimizeCondBranch(MI);
994}
995
996/// Try to find a better source value that shares the same register file to
997/// replace \p RegSubReg in an instruction like
998/// `DefRC.DefSubReg = COPY RegSubReg`
999///
1000/// When true is returned, the \p RewriteMap can be used by the client to
1001/// retrieve all Def -> Use along the way up to the next source. Any found
1002/// Use that is not itself a key for another entry, is the next source to
1003/// use. During the search for the next source, multiple sources can be found
1004/// given multiple incoming sources of a PHI instruction. In this case, we
1005/// look in each PHI source for the next source; all found next sources must
1006/// share the same register file as \p Reg and \p SubReg. The client should
1007/// then be capable to rewrite all intermediate PHIs to get the next source.
1008/// \return False if no alternative sources are available. True otherwise.
1009bool PeepholeOptimizer::findNextSource(const TargetRegisterClass *DefRC,
1010 unsigned DefSubReg,
1011 RegSubRegPair RegSubReg,
1012 RewriteMapTy &RewriteMap) {
1013 // Do not try to find a new source for a physical register.
1014 // So far we do not have any motivating example for doing that.
1015 // Thus, instead of maintaining untested code, we will revisit that if
1016 // that changes at some point.
1017 Register Reg = RegSubReg.Reg;
1018 RegSubRegPair CurSrcPair = RegSubReg;
1019 SmallVector<RegSubRegPair, 4> SrcToLook = {CurSrcPair};
1020
1021 unsigned PHICount = 0;
1022 do {
1023 CurSrcPair = SrcToLook.pop_back_val();
1024 // As explained above, do not handle physical registers
1025 if (CurSrcPair.Reg.isPhysical())
1026 return false;
1027
1028 ValueTracker ValTracker(CurSrcPair.Reg, CurSrcPair.SubReg, *MRI, TII);
1029
1030 // Follow the chain of copies until we find a more suitable source, a phi
1031 // or have to abort.
1032 while (true) {
1033 ValueTrackerResult Res = ValTracker.getNextSource();
1034 // Abort at the end of a chain (without finding a suitable source).
1035 if (!Res.isValid())
1036 return false;
1037
1038 // Insert the Def -> Use entry for the recently found source.
1039 auto [InsertPt, WasInserted] = RewriteMap.try_emplace(CurSrcPair, Res);
1040
1041 if (!WasInserted) {
1042 const ValueTrackerResult &CurSrcRes = InsertPt->second;
1043
1044 assert(CurSrcRes == Res && "ValueTrackerResult found must match");
1045 // An existent entry with multiple sources is a PHI cycle we must avoid.
1046 // Otherwise it's an entry with a valid next source we already found.
1047 if (CurSrcRes.getNumSources() > 1) {
1049 << "findNextSource: found PHI cycle, aborting...\n");
1050 return false;
1051 }
1052 break;
1053 }
1054
1055 // ValueTrackerResult usually have one source unless it's the result from
1056 // a PHI instruction. Add the found PHI edges to be looked up further.
1057 unsigned NumSrcs = Res.getNumSources();
1058 if (NumSrcs > 1) {
1059 PHICount++;
1060 if (PHICount >= RewritePHILimit) {
1061 LLVM_DEBUG(dbgs() << "findNextSource: PHI limit reached\n");
1062 return false;
1063 }
1064
1065 for (unsigned i = 0; i < NumSrcs; ++i)
1066 SrcToLook.push_back(Res.getSrc(i));
1067 break;
1068 }
1069
1070 CurSrcPair = Res.getSrc(0);
1071 // Do not extend the live-ranges of physical registers as they add
1072 // constraints to the register allocator. Moreover, if we want to extend
1073 // the live-range of a physical register, unlike SSA virtual register,
1074 // we will have to check that they aren't redefine before the related use.
1075 if (CurSrcPair.Reg.isPhysical())
1076 return false;
1077
1078 // Keep following the chain if the value isn't any better yet.
1079 const TargetRegisterClass *SrcRC = MRI->getRegClass(CurSrcPair.Reg);
1080 if (!TRI->shouldRewriteCopySrc(DefRC, DefSubReg, SrcRC,
1081 CurSrcPair.SubReg))
1082 continue;
1083
1084 // We currently cannot deal with subreg operands on PHI instructions
1085 // (see insertPHI()).
1086 if (PHICount > 0 && CurSrcPair.SubReg != 0)
1087 continue;
1088
1089 // We found a suitable source, and are done with this chain.
1090 break;
1091 }
1092 } while (!SrcToLook.empty());
1093
1094 // If we did not find a more suitable source, there is nothing to optimize.
1095 return CurSrcPair.Reg != Reg;
1096}
1097
1098/// Insert a PHI instruction with incoming edges \p SrcRegs that are
1099/// guaranteed to have the same register class. This is necessary whenever we
1100/// successfully traverse a PHI instruction and find suitable sources coming
1101/// from its edges. By inserting a new PHI, we provide a rewritten PHI def
1102/// suitable to be used in a new COPY instruction.
1104 const TargetInstrInfo &TII,
1105 const SmallVectorImpl<RegSubRegPair> &SrcRegs,
1106 MachineInstr &OrigPHI) {
1107 assert(!SrcRegs.empty() && "No sources to create a PHI instruction?");
1108
1109 const TargetRegisterClass *NewRC = MRI.getRegClass(SrcRegs[0].Reg);
1110 // NewRC is only correct if no subregisters are involved. findNextSource()
1111 // should have rejected those cases already.
1112 assert(SrcRegs[0].SubReg == 0 && "should not have subreg operand");
1113 Register NewVR = MRI.createVirtualRegister(NewRC);
1114 MachineBasicBlock *MBB = OrigPHI.getParent();
1115 MachineInstrBuilder MIB = BuildMI(*MBB, &OrigPHI, OrigPHI.getDebugLoc(),
1116 TII.get(TargetOpcode::PHI), NewVR);
1117
1118 unsigned MBBOpIdx = 2;
1119 for (const RegSubRegPair &RegPair : SrcRegs) {
1120 MIB.addReg(RegPair.Reg, {}, RegPair.SubReg);
1121 MIB.addMBB(OrigPHI.getOperand(MBBOpIdx).getMBB());
1122 // Since we're extended the lifetime of RegPair.Reg, clear the
1123 // kill flags to account for that and make RegPair.Reg reaches
1124 // the new PHI.
1125 MRI.clearKillFlags(RegPair.Reg);
1126 MBBOpIdx += 2;
1127 }
1128
1129 return *MIB;
1130}
1131
1132/// Given a \p Def.Reg and Def.SubReg pair, use \p RewriteMap to find
1133/// the new source to use for rewrite. If \p HandleMultipleSources is true and
1134/// multiple sources for a given \p Def are found along the way, we found a
1135/// PHI instructions that needs to be rewritten.
1136/// TODO: HandleMultipleSources should be removed once we test PHI handling
1137/// with coalescable copies.
1138static RegSubRegPair
1140 RegSubRegPair Def,
1141 const PeepholeOptimizer::RewriteMapTy &RewriteMap,
1142 bool HandleMultipleSources = true) {
1143 RegSubRegPair LookupSrc(Def.Reg, Def.SubReg);
1144 while (true) {
1145 ValueTrackerResult Res = RewriteMap.lookup(LookupSrc);
1146 // If there are no entries on the map, LookupSrc is the new source.
1147 if (!Res.isValid())
1148 return LookupSrc;
1149
1150 // There's only one source for this definition, keep searching...
1151 unsigned NumSrcs = Res.getNumSources();
1152 if (NumSrcs == 1) {
1153 LookupSrc.Reg = Res.getSrcReg(0);
1154 LookupSrc.SubReg = Res.getSrcSubReg(0);
1155 continue;
1156 }
1157
1158 // TODO: Remove once multiple srcs w/ coalescable copies are supported.
1159 if (!HandleMultipleSources)
1160 break;
1161
1162 // Multiple sources, recurse into each source to find a new source
1163 // for it. Then, rewrite the PHI accordingly to its new edges.
1165 for (unsigned i = 0; i < NumSrcs; ++i) {
1166 RegSubRegPair PHISrc(Res.getSrcReg(i), Res.getSrcSubReg(i));
1167 NewPHISrcs.push_back(
1168 getNewSource(MRI, TII, PHISrc, RewriteMap, HandleMultipleSources));
1169 }
1170
1171 // Build the new PHI node and return its def register as the new source.
1172 MachineInstr &OrigPHI = const_cast<MachineInstr &>(*Res.getInst());
1173 MachineInstr &NewPHI = insertPHI(*MRI, *TII, NewPHISrcs, OrigPHI);
1174 LLVM_DEBUG(dbgs() << "-- getNewSource\n");
1175 LLVM_DEBUG(dbgs() << " Replacing: " << OrigPHI);
1176 LLVM_DEBUG(dbgs() << " With: " << NewPHI);
1177 const MachineOperand &MODef = NewPHI.getOperand(0);
1178 return RegSubRegPair(MODef.getReg(), MODef.getSubReg());
1179 }
1180
1181 return RegSubRegPair(0, 0);
1182}
1183
1184bool PeepholeOptimizer::optimizeCoalescableCopyImpl(Rewriter &&CpyRewriter) {
1185 bool Changed = false;
1186 // Get the right rewriter for the current copy.
1187 // Rewrite each rewritable source.
1188 RegSubRegPair Dst;
1189 RegSubRegPair TrackPair;
1190 while (CpyRewriter.getNextRewritableSource(TrackPair, Dst)) {
1191 if (Dst.Reg.isPhysical()) {
1192 // Do not try to find a new source for a physical register.
1193 // So far we do not have any motivating example for doing that.
1194 // Thus, instead of maintaining untested code, we will revisit that if
1195 // that changes at some point.
1196 continue;
1197 }
1198
1199 const TargetRegisterClass *DefRC = MRI->getRegClass(Dst.Reg);
1200
1201 // Keep track of PHI nodes and its incoming edges when looking for sources.
1202 RewriteMapTy RewriteMap;
1203 // Try to find a more suitable source. If we failed to do so, or get the
1204 // actual source, move to the next source.
1205 if (!findNextSource(DefRC, Dst.SubReg, TrackPair, RewriteMap))
1206 continue;
1207
1208 // Get the new source to rewrite. TODO: Only enable handling of multiple
1209 // sources (PHIs) once we have a motivating example and testcases for it.
1210 RegSubRegPair NewSrc = getNewSource(MRI, TII, TrackPair, RewriteMap,
1211 /*HandleMultipleSources=*/false);
1212 assert(TrackPair.Reg != NewSrc.Reg &&
1213 "should not rewrite source to original value");
1214 if (!NewSrc.Reg)
1215 continue;
1216
1217 if (NewSrc.SubReg) {
1218 // Verify the register class supports the subregister index. ARM's
1219 // copy-like queries return register:subreg pairs where the register's
1220 // current class does not directly support the subregister index.
1221 const TargetRegisterClass *RC = MRI->getRegClass(NewSrc.Reg);
1222 const TargetRegisterClass *WithSubRC =
1223 TRI->getSubClassWithSubReg(RC, NewSrc.SubReg);
1224 if (!MRI->constrainRegClass(NewSrc.Reg, WithSubRC))
1225 continue;
1226 Changed = true;
1227 }
1228
1229 // Rewrite source.
1230 if (CpyRewriter.RewriteCurrentSource(NewSrc.Reg, NewSrc.SubReg)) {
1231 // We may have extended the live-range of NewSrc, account for that.
1232 MRI->clearKillFlags(NewSrc.Reg);
1233 Changed = true;
1234 }
1235 }
1236
1237 // TODO: We could have a clean-up method to tidy the instruction.
1238 // E.g., v0 = INSERT_SUBREG v1, v1.sub0, sub0
1239 // => v0 = COPY v1
1240 // Currently we haven't seen motivating example for that and we
1241 // want to avoid untested code.
1242 NumRewrittenCopies += Changed;
1243 return Changed;
1244}
1245
1246/// Optimize generic copy instructions to avoid cross register bank copy.
1247/// The optimization looks through a chain of copies and tries to find a source
1248/// that has a compatible register class.
1249/// Two register classes are considered to be compatible if they share the same
1250/// register bank.
1251/// New copies issued by this optimization are register allocator
1252/// friendly. This optimization does not remove any copy as it may
1253/// overconstrain the register allocator, but replaces some operands
1254/// when possible.
1255/// \pre isCoalescableCopy(*MI) is true.
1256/// \return True, when \p MI has been rewritten. False otherwise.
1257bool PeepholeOptimizer::optimizeCoalescableCopy(MachineInstr &MI) {
1258 assert(isCoalescableCopy(MI) && "Invalid argument");
1259 assert(MI.getDesc().getNumDefs() == 1 &&
1260 "Coalescer can understand multiple defs?!");
1261 const MachineOperand &MODef = MI.getOperand(0);
1262 // Do not rewrite physical definitions.
1263 if (MODef.getReg().isPhysical())
1264 return false;
1265
1266 switch (MI.getOpcode()) {
1267 case TargetOpcode::COPY:
1268 return optimizeCoalescableCopyImpl(CopyRewriter(MI));
1269 case TargetOpcode::INSERT_SUBREG:
1270 return optimizeCoalescableCopyImpl(InsertSubregRewriter(MI));
1271 case TargetOpcode::EXTRACT_SUBREG:
1272 return optimizeCoalescableCopyImpl(ExtractSubregRewriter(MI, *TII));
1273 case TargetOpcode::REG_SEQUENCE:
1274 return optimizeCoalescableCopyImpl(RegSequenceRewriter(MI));
1275 default:
1276 // Handle uncoalescable copy-like instructions.
1277 if (MI.isBitcast() || MI.isRegSequenceLike() || MI.isInsertSubregLike() ||
1278 MI.isExtractSubregLike())
1279 return optimizeCoalescableCopyImpl(UncoalescableRewriter(MI));
1280 return false;
1281 }
1282}
1283
1284/// Rewrite the source found through \p Def, by using the \p RewriteMap
1285/// and create a new COPY instruction. More info about RewriteMap in
1286/// PeepholeOptimizer::findNextSource. Right now this is only used to handle
1287/// Uncoalescable copies, since they are copy like instructions that aren't
1288/// recognized by the register allocator.
1289MachineInstr &PeepholeOptimizer::rewriteSource(MachineInstr &CopyLike,
1290 RegSubRegPair Def,
1291 RewriteMapTy &RewriteMap) {
1292 assert(!Def.Reg.isPhysical() && "We do not rewrite physical registers");
1293
1294 // Find the new source to use in the COPY rewrite.
1295 RegSubRegPair NewSrc = getNewSource(MRI, TII, Def, RewriteMap);
1296
1297 // Insert the COPY.
1298 const TargetRegisterClass *DefRC = MRI->getRegClass(Def.Reg);
1299 Register NewVReg = MRI->createVirtualRegister(DefRC);
1300
1301 if (NewSrc.SubReg) {
1302 const TargetRegisterClass *NewSrcRC = MRI->getRegClass(NewSrc.Reg);
1303 const TargetRegisterClass *WithSubRC =
1304 TRI->getSubClassWithSubReg(NewSrcRC, NewSrc.SubReg);
1305
1306 // The new source may not directly support the subregister, but we should be
1307 // able to assume it is constrainable to support the subregister (otherwise
1308 // ValueTracker was lying and reported a useless value).
1309 if (!MRI->constrainRegClass(NewSrc.Reg, WithSubRC))
1310 llvm_unreachable("replacement register cannot support subregister");
1311 }
1312
1313 MachineInstr *NewCopy =
1314 BuildMI(*CopyLike.getParent(), &CopyLike, CopyLike.getDebugLoc(),
1315 TII->get(TargetOpcode::COPY), NewVReg)
1316 .addReg(NewSrc.Reg, {}, NewSrc.SubReg);
1317
1318 if (Def.SubReg) {
1319 NewCopy->getOperand(0).setSubReg(Def.SubReg);
1320 NewCopy->getOperand(0).setIsUndef();
1321 }
1322
1323 LLVM_DEBUG(dbgs() << "-- RewriteSource\n");
1324 LLVM_DEBUG(dbgs() << " Replacing: " << CopyLike);
1325 LLVM_DEBUG(dbgs() << " With: " << *NewCopy);
1326 MRI->replaceRegWith(Def.Reg, NewVReg);
1327 MRI->clearKillFlags(NewVReg);
1328
1329 // We extended the lifetime of NewSrc.Reg, clear the kill flags to
1330 // account for that.
1331 MRI->clearKillFlags(NewSrc.Reg);
1332
1333 return *NewCopy;
1334}
1335
1336/// Optimize copy-like instructions to create
1337/// register coalescer friendly instruction.
1338/// The optimization tries to kill-off the \p MI by looking
1339/// through a chain of copies to find a source that has a compatible
1340/// register class.
1341/// If such a source is found, it replace \p MI by a generic COPY
1342/// operation.
1343/// \pre isUncoalescableCopy(*MI) is true.
1344/// \return True, when \p MI has been optimized. In that case, \p MI has
1345/// been removed from its parent.
1346/// All COPY instructions created, are inserted in \p LocalMIs.
1347bool PeepholeOptimizer::optimizeUncoalescableCopy(
1348 MachineInstr &MI, SmallPtrSetImpl<MachineInstr *> &LocalMIs) {
1349 assert(isUncoalescableCopy(MI) && "Invalid argument");
1350 UncoalescableRewriter CpyRewriter(MI);
1351
1352 // Rewrite each rewritable source by generating new COPYs. This works
1353 // differently from optimizeCoalescableCopy since it first makes sure that all
1354 // definitions can be rewritten.
1355 RewriteMapTy RewriteMap;
1356 RegSubRegPair Src;
1358 SmallVector<RegSubRegPair, 4> RewritePairs;
1359 while (CpyRewriter.getNextRewritableSource(Src, Def)) {
1360 // If a physical register is here, this is probably for a good reason.
1361 // Do not rewrite that.
1362 if (Def.Reg.isPhysical())
1363 return false;
1364
1365 // FIXME: Uncoalescable copies are treated differently by
1366 // UncoalescableRewriter, and this probably should not share
1367 // API. getNextRewritableSource really finds rewritable defs.
1368 const TargetRegisterClass *DefRC = MRI->getRegClass(Def.Reg);
1369
1370 // If we do not know how to rewrite this definition, there is no point
1371 // in trying to kill this instruction.
1372 if (!findNextSource(DefRC, Def.SubReg, Def, RewriteMap))
1373 return false;
1374
1375 RewritePairs.push_back(Def);
1376 }
1377
1378 // The change is possible for all defs, do it.
1379 for (const RegSubRegPair &Def : RewritePairs) {
1380 // Rewrite the "copy" in a way the register coalescer understands.
1381 MachineInstr &NewCopy = rewriteSource(MI, Def, RewriteMap);
1382 LocalMIs.insert(&NewCopy);
1383 }
1384
1385 // MI is now dead.
1386 LLVM_DEBUG(dbgs() << "Deleting uncoalescable copy: " << MI);
1387 MI.eraseFromParent();
1388 ++NumUncoalescableCopies;
1389 return true;
1390}
1391
1392/// Check whether MI is a candidate for folding into a later instruction.
1393/// We only fold loads to virtual registers and the virtual register defined
1394/// has a single user.
1395bool PeepholeOptimizer::isLoadFoldable(
1396 MachineInstr &MI, SmallSet<Register, 16> &FoldAsLoadDefCandidates) {
1397 if (!MI.canFoldAsLoad() || !MI.mayLoad())
1398 return false;
1399 const MCInstrDesc &MCID = MI.getDesc();
1400 if (MCID.getNumDefs() != 1)
1401 return false;
1402
1403 Register Reg = MI.getOperand(0).getReg();
1404 // To reduce compilation time, we check MRI->hasOneNonDBGUser when inserting
1405 // loads. It should be checked when processing uses of the load, since
1406 // uses can be removed during peephole.
1407 if (Reg.isVirtual() && !MI.getOperand(0).getSubReg() &&
1408 MRI->hasOneNonDBGUser(Reg)) {
1409 FoldAsLoadDefCandidates.insert(Reg);
1410 return true;
1411 }
1412 return false;
1413}
1414
1415MachineInstr *
1416PeepholeOptimizer::foldLoadInto(MachineFunction &MF, MachineInstr &MI,
1417 Register FoldReg,
1418 SmallPtrSet<MachineInstr *, 16> &LocalMIs) {
1419 Register Reg = FoldReg;
1420 MachineInstr *DefMI = nullptr;
1421 MachineInstr *CopyMI = nullptr;
1422 MachineInstr *FoldMI = TII->optimizeLoadInstr(MI, MRI, Reg, DefMI, CopyMI);
1423 if (!FoldMI)
1424 return nullptr;
1425 LLVM_DEBUG(dbgs() << "Replacing: " << MI << " With: " << *FoldMI);
1426 LocalMIs.erase(&MI);
1427 LocalMIs.erase(DefMI);
1428 LocalMIs.insert(FoldMI);
1429 if (CopyMI)
1430 LocalMIs.insert(CopyMI);
1431 if (MI.shouldUpdateAdditionalCallInfo())
1432 MF.moveAdditionalCallInfo(&MI, FoldMI);
1433 MI.eraseFromParent();
1435 MRI->markUsesInDebugValueAsUndef(FoldReg);
1436 ++NumLoadFold;
1437 return FoldMI;
1438}
1439
1440bool PeepholeOptimizer::isMoveImmediate(
1441 MachineInstr &MI, SmallSet<Register, 4> &ImmDefRegs,
1442 DenseMap<Register, MachineInstr *> &ImmDefMIs) {
1443 const MCInstrDesc &MCID = MI.getDesc();
1444 if (MCID.getNumDefs() != 1 || !MI.getOperand(0).isReg())
1445 return false;
1446 Register Reg = MI.getOperand(0).getReg();
1447 if (!Reg.isVirtual())
1448 return false;
1449
1450 int64_t ImmVal;
1451 if (!MI.isMoveImmediate() && !TII->getConstValDefinedInReg(MI, Reg, ImmVal))
1452 return false;
1453
1454 ImmDefMIs.insert(std::make_pair(Reg, &MI));
1455 ImmDefRegs.insert(Reg);
1456 return true;
1457}
1458
1459/// Try folding register operands that are defined by move immediate
1460/// instructions, i.e. a trivial constant folding optimization, if
1461/// and only if the def and use are in the same BB.
1462bool PeepholeOptimizer::foldImmediate(
1463 MachineInstr &MI, SmallSet<Register, 4> &ImmDefRegs,
1464 DenseMap<Register, MachineInstr *> &ImmDefMIs, bool &Deleted) {
1465 Deleted = false;
1466 for (unsigned i = 0, e = MI.getDesc().getNumOperands(); i != e; ++i) {
1467 MachineOperand &MO = MI.getOperand(i);
1468 if (!MO.isReg() || MO.isDef())
1469 continue;
1470 Register Reg = MO.getReg();
1471 if (!Reg.isVirtual())
1472 continue;
1473 if (ImmDefRegs.count(Reg) == 0)
1474 continue;
1475 auto II = ImmDefMIs.find(Reg);
1476 assert(II != ImmDefMIs.end() && "couldn't find immediate definition");
1477 if (TII->foldImmediate(MI, *II->second, Reg, MRI)) {
1478 ++NumImmFold;
1479 // foldImmediate can delete ImmDefMI if MI was its only user. If ImmDefMI
1480 // is not deleted, and we happened to get a same MI, we can delete MI and
1481 // replace its users.
1482 if (MRI->getVRegDef(Reg) &&
1483 MI.isIdenticalTo(*II->second, MachineInstr::IgnoreVRegDefs)) {
1484 Register DstReg = MI.getOperand(0).getReg();
1485 if (DstReg.isVirtual() &&
1486 MRI->getRegClass(DstReg) == MRI->getRegClass(Reg)) {
1487 MRI->replaceRegWith(DstReg, Reg);
1488 MRI->clearKillFlags(Reg);
1489 MI.eraseFromParent();
1490 Deleted = true;
1491 }
1492 }
1493 return true;
1494 }
1495 }
1496 return false;
1497}
1498
1499// FIXME: This is very simple and misses some cases which should be handled when
1500// motivating examples are found.
1501//
1502// The copy rewriting logic should look at uses as well as defs and be able to
1503// eliminate copies across blocks.
1504//
1505// Later copies that are subregister extracts will also not be eliminated since
1506// only the first copy is considered.
1507//
1508// e.g.
1509// %1 = COPY %0
1510// %2 = COPY %0:sub1
1511//
1512// Should replace %2 uses with %1:sub1
1513bool PeepholeOptimizer::foldRedundantCopy(MachineInstr &MI) {
1514 assert(MI.isCopy() && "expected a COPY machine instruction");
1515
1516 RegSubRegPair SrcPair;
1517 if (!getCopySrc(MI, SrcPair))
1518 return false;
1519
1520 Register DstReg = MI.getOperand(0).getReg();
1521 if (!DstReg.isVirtual())
1522 return false;
1523
1524 if (CopySrcMIs.insert(std::make_pair(SrcPair, &MI)).second) {
1525 // First copy of this reg seen.
1526 return false;
1527 }
1528
1529 MachineInstr *PrevCopy = CopySrcMIs.find(SrcPair)->second;
1530
1531 assert(SrcPair.SubReg == PrevCopy->getOperand(1).getSubReg() &&
1532 "Unexpected mismatching subreg!");
1533
1534 Register PrevDstReg = PrevCopy->getOperand(0).getReg();
1535
1536 // Only replace if the copy register class is the same.
1537 //
1538 // TODO: If we have multiple copies to different register classes, we may want
1539 // to track multiple copies of the same source register.
1540 if (MRI->getRegClass(DstReg) != MRI->getRegClass(PrevDstReg))
1541 return false;
1542
1543 MRI->replaceRegWith(DstReg, PrevDstReg);
1544
1545 // Lifetime of the previous copy has been extended.
1546 MRI->clearKillFlags(PrevDstReg);
1547 return true;
1548}
1549
1550bool PeepholeOptimizer::isNAPhysCopy(Register Reg) {
1551 return Reg.isPhysical() && !MRI->isAllocatable(Reg);
1552}
1553
1554bool PeepholeOptimizer::foldRedundantNAPhysCopy(
1555 MachineInstr &MI, DenseMap<Register, MachineInstr *> &NAPhysToVirtMIs) {
1556 assert(MI.isCopy() && "expected a COPY machine instruction");
1557
1559 return false;
1560
1561 Register DstReg = MI.getOperand(0).getReg();
1562 Register SrcReg = MI.getOperand(1).getReg();
1563 if (isNAPhysCopy(SrcReg) && DstReg.isVirtual()) {
1564 // %vreg = COPY $physreg
1565 // Avoid using a datastructure which can track multiple live non-allocatable
1566 // phys->virt copies since LLVM doesn't seem to do this.
1567 NAPhysToVirtMIs.insert({SrcReg, &MI});
1568 return false;
1569 }
1570
1571 if (!(SrcReg.isVirtual() && isNAPhysCopy(DstReg)))
1572 return false;
1573
1574 // $physreg = COPY %vreg
1575 auto PrevCopy = NAPhysToVirtMIs.find(DstReg);
1576 if (PrevCopy == NAPhysToVirtMIs.end()) {
1577 // We can't remove the copy: there was an intervening clobber of the
1578 // non-allocatable physical register after the copy to virtual.
1579 LLVM_DEBUG(dbgs() << "NAPhysCopy: intervening clobber forbids erasing "
1580 << MI);
1581 return false;
1582 }
1583
1584 Register PrevDstReg = PrevCopy->second->getOperand(0).getReg();
1585 if (PrevDstReg == SrcReg) {
1586 // Remove the virt->phys copy: we saw the virtual register definition, and
1587 // the non-allocatable physical register's state hasn't changed since then.
1588 LLVM_DEBUG(dbgs() << "NAPhysCopy: erasing " << MI);
1589 ++NumNAPhysCopies;
1590 return true;
1591 }
1592
1593 // Potential missed optimization opportunity: we saw a different virtual
1594 // register get a copy of the non-allocatable physical register, and we only
1595 // track one such copy. Avoid getting confused by this new non-allocatable
1596 // physical register definition, and remove it from the tracked copies.
1597 LLVM_DEBUG(dbgs() << "NAPhysCopy: missed opportunity " << MI);
1598 NAPhysToVirtMIs.erase(PrevCopy);
1599 return false;
1600}
1601
1602/// \bried Returns true if \p MO is a virtual register operand.
1604 return MO.isReg() && MO.getReg().isVirtual();
1605}
1606
1607bool PeepholeOptimizer::findTargetRecurrence(
1608 Register Reg, const SmallSet<Register, 2> &TargetRegs,
1609 RecurrenceCycle &RC) {
1610 // Recurrence found if Reg is in TargetRegs.
1611 if (TargetRegs.count(Reg))
1612 return true;
1613
1614 // TODO: Curerntly, we only allow the last instruction of the recurrence
1615 // cycle (the instruction that feeds the PHI instruction) to have more than
1616 // one uses to guarantee that commuting operands does not tie registers
1617 // with overlapping live range. Once we have actual live range info of
1618 // each register, this constraint can be relaxed.
1619 if (!MRI->hasOneNonDBGUse(Reg))
1620 return false;
1621
1622 // Give up if the reccurrence chain length is longer than the limit.
1623 if (RC.size() >= MaxRecurrenceChain)
1624 return false;
1625
1626 MachineInstr &MI = *(MRI->use_instr_nodbg_begin(Reg));
1627 unsigned Idx = MI.findRegisterUseOperandIdx(Reg, /*TRI=*/nullptr);
1628
1629 // Only interested in recurrences whose instructions have only one def, which
1630 // is a virtual register.
1631 if (MI.getDesc().getNumDefs() != 1)
1632 return false;
1633
1634 MachineOperand &DefOp = MI.getOperand(0);
1635 if (!isVirtualRegisterOperand(DefOp))
1636 return false;
1637
1638 // Check if def operand of MI is tied to any use operand. We are only
1639 // interested in the case that all the instructions in the recurrence chain
1640 // have there def operand tied with one of the use operand.
1641 unsigned TiedUseIdx;
1642 if (!MI.isRegTiedToUseOperand(0, &TiedUseIdx))
1643 return false;
1644
1645 if (Idx == TiedUseIdx) {
1646 RC.push_back(RecurrenceInstr(&MI));
1647 return findTargetRecurrence(DefOp.getReg(), TargetRegs, RC);
1648 } else {
1649 // If Idx is not TiedUseIdx, check if Idx is commutable with TiedUseIdx.
1650 unsigned CommIdx = TargetInstrInfo::CommuteAnyOperandIndex;
1651 if (TII->findCommutedOpIndices(MI, Idx, CommIdx) && CommIdx == TiedUseIdx) {
1652 RC.push_back(RecurrenceInstr(&MI, Idx, CommIdx));
1653 return findTargetRecurrence(DefOp.getReg(), TargetRegs, RC);
1654 }
1655 }
1656
1657 return false;
1658}
1659
1660/// Phi instructions will eventually be lowered to copy instructions.
1661/// If phi is in a loop header, a recurrence may formulated around the source
1662/// and destination of the phi. For such case commuting operands of the
1663/// instructions in the recurrence may enable coalescing of the copy instruction
1664/// generated from the phi. For example, if there is a recurrence of
1665///
1666/// LoopHeader:
1667/// %1 = phi(%0, %100)
1668/// LoopLatch:
1669/// %0<def, tied1> = ADD %2<def, tied0>, %1
1670///
1671/// , the fact that %0 and %2 are in the same tied operands set makes
1672/// the coalescing of copy instruction generated from the phi in
1673/// LoopHeader(i.e. %1 = COPY %0) impossible, because %1 and
1674/// %2 have overlapping live range. This introduces additional move
1675/// instruction to the final assembly. However, if we commute %2 and
1676/// %1 of ADD instruction, the redundant move instruction can be
1677/// avoided.
1678bool PeepholeOptimizer::optimizeRecurrence(MachineInstr &PHI) {
1679 SmallSet<Register, 2> TargetRegs;
1680 for (unsigned Idx = 1; Idx < PHI.getNumOperands(); Idx += 2) {
1681 MachineOperand &MO = PHI.getOperand(Idx);
1682 assert(isVirtualRegisterOperand(MO) && "Invalid PHI instruction");
1683 TargetRegs.insert(MO.getReg());
1684 }
1685
1686 bool Changed = false;
1687 RecurrenceCycle RC;
1688 if (findTargetRecurrence(PHI.getOperand(0).getReg(), TargetRegs, RC)) {
1689 // Commutes operands of instructions in RC if necessary so that the copy to
1690 // be generated from PHI can be coalesced.
1691 LLVM_DEBUG(dbgs() << "Optimize recurrence chain from " << PHI);
1692 for (auto &RI : RC) {
1693 LLVM_DEBUG(dbgs() << "\tInst: " << *(RI.getMI()));
1694 auto CP = RI.getCommutePair();
1695 if (CP) {
1696 Changed = true;
1697 TII->commuteInstruction(*(RI.getMI()), false, (*CP).first,
1698 (*CP).second);
1699 LLVM_DEBUG(dbgs() << "\t\tCommuted: " << *(RI.getMI()));
1700 }
1701 }
1702 }
1703
1704 return Changed;
1705}
1706
1707PreservedAnalyses
1710 MFPropsModifier _(*this, MF);
1711 auto *DT =
1712 Aggressive ? &MFAM.getResult<MachineDominatorTreeAnalysis>(MF) : nullptr;
1713 auto *MLI = &MFAM.getResult<MachineLoopAnalysis>(MF);
1714 PeepholeOptimizer Impl(DT, MLI);
1715 bool Changed = Impl.run(MF);
1716 if (!Changed)
1717 return PreservedAnalyses::all();
1718
1720 PA.preserve<MachineDominatorTreeAnalysis>();
1721 PA.preserve<MachineLoopAnalysis>();
1722 PA.preserveSet<CFGAnalyses>();
1723 return PA;
1724}
1725
1726bool PeepholeOptimizerLegacy::runOnMachineFunction(MachineFunction &MF) {
1727 if (skipFunction(MF.getFunction()))
1728 return false;
1729 auto *DT = Aggressive
1730 ? &getAnalysis<MachineDominatorTreeWrapperPass>().getDomTree()
1731 : nullptr;
1732 auto *MLI = &getAnalysis<MachineLoopInfoWrapperPass>().getLI();
1733 PeepholeOptimizer Impl(DT, MLI);
1734 return Impl.run(MF);
1735}
1736
1737bool PeepholeOptimizer::run(MachineFunction &MF) {
1738
1739 LLVM_DEBUG(dbgs() << "********** PEEPHOLE OPTIMIZER **********\n");
1740 LLVM_DEBUG(dbgs() << "********** Function: " << MF.getName() << '\n');
1741
1742 if (DisablePeephole)
1743 return false;
1744
1745 TII = MF.getSubtarget().getInstrInfo();
1747 MRI = &MF.getRegInfo();
1748 MF.setDelegate(this);
1749
1750 bool Changed = false;
1751
1752 for (MachineBasicBlock &MBB : MF) {
1753 bool SeenMoveImm = false;
1754
1755 // During this forward scan, at some point it needs to answer the question
1756 // "given a pointer to an MI in the current BB, is it located before or
1757 // after the current instruction".
1758 // To perform this, the following set keeps track of the MIs already seen
1759 // during the scan, if a MI is not in the set, it is assumed to be located
1760 // after. Newly created MIs have to be inserted in the set as well.
1762 SmallSet<Register, 4> ImmDefRegs;
1764 SmallSet<Register, 16> FoldAsLoadDefCandidates;
1765
1766 // Track when a non-allocatable physical register is copied to a virtual
1767 // register so that useless moves can be removed.
1768 //
1769 // $physreg is the map index; MI is the last valid `%vreg = COPY $physreg`
1770 // without any intervening re-definition of $physreg.
1771 DenseMap<Register, MachineInstr *> NAPhysToVirtMIs;
1772
1773 CopySrcMIs.clear();
1774
1775 bool IsLoopHeader = MLI->isLoopHeader(&MBB);
1776
1777 for (MachineBasicBlock::iterator MII = MBB.begin(), MIE = MBB.end();
1778 MII != MIE;) {
1779 MachineInstr *MI = &*MII;
1780 // We may be erasing MI below, increment MII now.
1781 ++MII;
1782 LocalMIs.insert(MI);
1783
1784 // Skip debug instructions. They should not affect this peephole
1785 // optimization.
1786 if (MI->isDebugInstr())
1787 continue;
1788
1789 if (MI->isPosition())
1790 continue;
1791
1792 if (IsLoopHeader && MI->isPHI()) {
1793 if (optimizeRecurrence(*MI)) {
1794 Changed = true;
1795 continue;
1796 }
1797 }
1798
1799 if (!MI->isCopy()) {
1800 for (const MachineOperand &MO : MI->operands()) {
1801 // Visit all operands: definitions can be implicit or explicit.
1802 if (MO.isReg()) {
1803 Register Reg = MO.getReg();
1804 if (MO.isDef() && isNAPhysCopy(Reg)) {
1805 const auto &Def = NAPhysToVirtMIs.find(Reg);
1806 if (Def != NAPhysToVirtMIs.end()) {
1807 // A new definition of the non-allocatable physical register
1808 // invalidates previous copies.
1810 << "NAPhysCopy: invalidating because of " << *MI);
1811 NAPhysToVirtMIs.erase(Def);
1812 }
1813 }
1814 } else if (MO.isRegMask()) {
1815 const uint32_t *RegMask = MO.getRegMask();
1816 for (auto &RegMI : NAPhysToVirtMIs) {
1817 Register Def = RegMI.first;
1818 if (MachineOperand::clobbersPhysReg(RegMask, Def)) {
1820 << "NAPhysCopy: invalidating because of " << *MI);
1821 NAPhysToVirtMIs.erase(Def);
1822 }
1823 }
1824 }
1825 }
1826 }
1827
1828 if (MI->isImplicitDef() || MI->isKill())
1829 continue;
1830
1831 if (MI->isInlineAsm() || MI->hasUnmodeledSideEffects()) {
1832 // Blow away all non-allocatable physical registers knowledge since we
1833 // don't know what's correct anymore.
1834 //
1835 // FIXME: handle explicit asm clobbers.
1836 LLVM_DEBUG(dbgs() << "NAPhysCopy: blowing away all info due to "
1837 << *MI);
1838 NAPhysToVirtMIs.clear();
1839 }
1840
1841 if (MI->isCompare() && optimizeCmpInstr(*MI, MF, LocalMIs)) {
1842 LocalMIs.erase(MI);
1843 Changed = true;
1844 continue;
1845 }
1846
1847 if ((isUncoalescableCopy(*MI) &&
1848 optimizeUncoalescableCopy(*MI, LocalMIs)) ||
1849 (MI->isSelect() && optimizeSelect(*MI, LocalMIs))) {
1850 // MI is deleted.
1851 LocalMIs.erase(MI);
1852 Changed = true;
1853 continue;
1854 }
1855
1856 if (MI->isConditionalBranch() && optimizeCondBranch(*MI)) {
1857 Changed = true;
1858 continue;
1859 }
1860
1861 if (isCoalescableCopy(*MI) && optimizeCoalescableCopy(*MI)) {
1862 // MI is just rewritten.
1863 Changed = true;
1864 continue;
1865 }
1866
1867 if (MI->isCopy() && (foldRedundantCopy(*MI) ||
1868 foldRedundantNAPhysCopy(*MI, NAPhysToVirtMIs))) {
1869 LocalMIs.erase(MI);
1870 LLVM_DEBUG(dbgs() << "Deleting redundant copy: " << *MI << "\n");
1871 MI->eraseFromParent();
1872 Changed = true;
1873 continue;
1874 }
1875
1876 if (isMoveImmediate(*MI, ImmDefRegs, ImmDefMIs)) {
1877 SeenMoveImm = true;
1878 } else {
1879 Changed |= optimizeExtInstr(*MI, MBB, LocalMIs);
1880 // optimizeExtInstr might have created new instructions after MI
1881 // and before the already incremented MII. Adjust MII so that the
1882 // next iteration sees the new instructions.
1883 MII = MI;
1884 ++MII;
1885 if (SeenMoveImm) {
1886 bool Deleted;
1887 Changed |= foldImmediate(*MI, ImmDefRegs, ImmDefMIs, Deleted);
1888 if (Deleted) {
1889 LocalMIs.erase(MI);
1890 continue;
1891 }
1892 }
1893 }
1894
1895 // Check whether MI is a load candidate for folding into a later
1896 // instruction. If MI is not a candidate, check whether we can fold an
1897 // earlier load into MI.
1898 if (!isLoadFoldable(*MI, FoldAsLoadDefCandidates) &&
1899 !FoldAsLoadDefCandidates.empty()) {
1900
1901 // We visit each operand even after successfully folding a previous
1902 // one. This allows us to fold multiple loads into a single
1903 // instruction. We do assume that optimizeLoadInstr doesn't insert
1904 // foldable uses earlier in the argument list. Since we don't restart
1905 // iteration, we'd miss such cases.
1906 const MCInstrDesc &MIDesc = MI->getDesc();
1907 for (unsigned i = MIDesc.getNumDefs(); i != MI->getNumOperands(); ++i) {
1908 const MachineOperand &MOp = MI->getOperand(i);
1909 if (!MOp.isReg())
1910 continue;
1911 Register FoldAsLoadDefReg = MOp.getReg();
1912 if (FoldAsLoadDefCandidates.count(FoldAsLoadDefReg)) {
1913 // We need to fold load after optimizeCmpInstr, since
1914 // optimizeCmpInstr can enable folding by converting SUB to CMP.
1915 Register FoldedReg = FoldAsLoadDefReg;
1916 if (MachineInstr *FoldMI =
1917 foldLoadInto(MF, *MI, FoldAsLoadDefReg, LocalMIs)) {
1918 FoldAsLoadDefCandidates.erase(FoldedReg);
1919 // MI is replaced with FoldMI so we can continue trying to fold
1920 Changed = true;
1921 MI = FoldMI;
1922 }
1923 }
1924 }
1925 }
1926
1927 // If we run into an instruction we can't fold across, discard
1928 // the load candidates. Note: We might be able to fold *into* this
1929 // instruction, so this needs to be after the folding logic.
1930 if (MI->isLoadFoldBarrier()) {
1931 LLVM_DEBUG(dbgs() << "Encountered load fold barrier on " << *MI);
1932 FoldAsLoadDefCandidates.clear();
1933 }
1934 }
1935 }
1936
1937 MF.resetDelegate(this);
1938 return Changed;
1939}
1940
1941ValueTrackerResult ValueTracker::getNextSourceFromCopy() {
1942 assert(Def->isCopy() && "Invalid definition");
1943 // Copy instruction are supposed to be: Def = Src.
1944 // If someone breaks this assumption, bad things will happen everywhere.
1945 // There may be implicit uses preventing the copy to be moved across
1946 // some target specific register definitions
1947 assert(Def->getNumOperands() - Def->getNumImplicitOperands() == 2 &&
1948 "Invalid number of operands");
1949 assert(!Def->hasImplicitDef() && "Only implicit uses are allowed");
1950 assert(!Def->getOperand(DefIdx).getSubReg() && "no subregister defs in SSA");
1951
1952 // Otherwise, we want the whole source.
1953 const MachineOperand &Src = Def->getOperand(1);
1954 if (Src.isUndef())
1955 return ValueTrackerResult();
1956
1957 Register SrcReg = Src.getReg();
1958 unsigned SubReg = Src.getSubReg();
1959 if (DefSubReg) {
1960 const TargetRegisterInfo *TRI = MRI.getTargetRegisterInfo();
1961 SubReg = TRI->composeSubRegIndices(SubReg, DefSubReg);
1962
1963 if (SrcReg.isVirtual()) {
1964 // TODO: Try constraining on rewrite if we can
1965 const TargetRegisterClass *RegRC = MRI.getRegClass(SrcReg);
1966 if (!TRI->isSubRegValidForRegClass(RegRC, SubReg))
1967 return ValueTrackerResult();
1968 } else {
1969 if (!TRI->getSubReg(SrcReg, SubReg))
1970 return ValueTrackerResult();
1971 }
1972 }
1973
1974 return ValueTrackerResult(SrcReg, SubReg);
1975}
1976
1977ValueTrackerResult ValueTracker::getNextSourceFromBitcast() {
1978 assert(Def->isBitcast() && "Invalid definition");
1979
1980 // Bail if there are effects that a plain copy will not expose.
1981 if (Def->mayRaiseFPException() || Def->hasUnmodeledSideEffects())
1982 return ValueTrackerResult();
1983
1984 // Bitcasts with more than one def are not supported.
1985 if (Def->getDesc().getNumDefs() != 1)
1986 return ValueTrackerResult();
1987
1988 assert(!Def->getOperand(DefIdx).getSubReg() && "no subregister defs in SSA");
1989
1990 unsigned SrcIdx = Def->getNumOperands();
1991 for (unsigned OpIdx = DefIdx + 1, EndOpIdx = SrcIdx; OpIdx != EndOpIdx;
1992 ++OpIdx) {
1993 const MachineOperand &MO = Def->getOperand(OpIdx);
1994 if (!MO.isReg() || !MO.getReg())
1995 continue;
1996 // Ignore dead implicit defs.
1997 if (MO.isImplicit() && MO.isDead())
1998 continue;
1999 assert(!MO.isDef() && "We should have skipped all the definitions by now");
2000 if (SrcIdx != EndOpIdx)
2001 // Multiple sources?
2002 return ValueTrackerResult();
2003 SrcIdx = OpIdx;
2004 }
2005
2006 // In some rare case, Def has no input, SrcIdx is out of bound,
2007 // getOperand(SrcIdx) will fail below.
2008 if (SrcIdx >= Def->getNumOperands())
2009 return ValueTrackerResult();
2010
2011 const MachineOperand &DefOp = Def->getOperand(DefIdx);
2012
2013 // Stop when any user of the bitcast is a SUBREG_TO_REG, replacing with a COPY
2014 // will break the assumed guarantees for the upper bits.
2015 for (const MachineInstr &UseMI : MRI.use_nodbg_instructions(DefOp.getReg())) {
2016 if (UseMI.isSubregToReg())
2017 return ValueTrackerResult();
2018 }
2019
2020 const MachineOperand &Src = Def->getOperand(SrcIdx);
2021 if (Src.isUndef())
2022 return ValueTrackerResult();
2023 return ValueTrackerResult(Src.getReg(), Src.getSubReg());
2024}
2025
2026ValueTrackerResult ValueTracker::getNextSourceFromRegSequence() {
2027 assert((Def->isRegSequence() || Def->isRegSequenceLike()) &&
2028 "Invalid definition");
2029
2030 assert(!Def->getOperand(DefIdx).getSubReg() && "illegal subregister def");
2031
2033 if (!TII->getRegSequenceInputs(*Def, DefIdx, RegSeqInputRegs))
2034 return ValueTrackerResult();
2035
2036 // We are looking at:
2037 // Def = REG_SEQUENCE v0, sub0, v1, sub1, ...
2038 //
2039 // Check if one of the operands exactly defines the subreg we are interested
2040 // in.
2041 for (const RegSubRegPairAndIdx &RegSeqInput : RegSeqInputRegs) {
2042 if (RegSeqInput.SubIdx == DefSubReg)
2043 return ValueTrackerResult(RegSeqInput.Reg, RegSeqInput.SubReg);
2044 }
2045
2046 const TargetRegisterInfo *TRI = MRI.getTargetRegisterInfo();
2047
2048 // If we did not find an exact match, see if we can do a composition to
2049 // extract a sub-subregister.
2050 for (const RegSubRegPairAndIdx &RegSeqInput : RegSeqInputRegs) {
2051 LaneBitmask DefMask = TRI->getSubRegIndexLaneMask(DefSubReg);
2052 LaneBitmask ThisOpRegMask = TRI->getSubRegIndexLaneMask(RegSeqInput.SubIdx);
2053
2054 // Check that this extract reads a subset of this single reg_sequence input.
2055 //
2056 // FIXME: We should be able to filter this in terms of the indexes directly
2057 // without checking the lanemasks.
2058 if ((DefMask & ThisOpRegMask) != DefMask)
2059 continue;
2060
2061 unsigned ReverseDefCompose =
2062 TRI->reverseComposeSubRegIndices(RegSeqInput.SubIdx, DefSubReg);
2063 if (!ReverseDefCompose)
2064 continue;
2065
2066 unsigned ComposedDefInSrcReg1 =
2067 TRI->composeSubRegIndices(RegSeqInput.SubReg, ReverseDefCompose);
2068
2069 // TODO: We should be able to defer checking if the result register class
2070 // supports the index to continue looking for a rewritable source.
2071 //
2072 // TODO: Should we modify the register class to support the index?
2073 const TargetRegisterClass *SrcRC = MRI.getRegClass(RegSeqInput.Reg);
2074 if (!TRI->isSubRegValidForRegClass(SrcRC, ComposedDefInSrcReg1))
2075 return ValueTrackerResult();
2076
2077 return ValueTrackerResult(RegSeqInput.Reg, ComposedDefInSrcReg1);
2078 }
2079
2080 // If the subreg we are tracking is super-defined by another subreg,
2081 // we could follow this value. However, this would require to compose
2082 // the subreg and we do not do that for now.
2083 return ValueTrackerResult();
2084}
2085
2086ValueTrackerResult ValueTracker::getNextSourceFromInsertSubreg() {
2087 assert((Def->isInsertSubreg() || Def->isInsertSubregLike()) &&
2088 "Invalid definition");
2089 assert(!Def->getOperand(DefIdx).getSubReg() && "no subreg defs in SSA");
2090
2092 RegSubRegPairAndIdx InsertedReg;
2093 if (!TII->getInsertSubregInputs(*Def, DefIdx, BaseReg, InsertedReg))
2094 return ValueTrackerResult();
2095
2096 // We are looking at:
2097 // Def = INSERT_SUBREG v0, v1, sub1
2098 // There are two cases:
2099 // 1. DefSubReg == sub1, get v1.
2100 // 2. DefSubReg != sub1, the value may be available through v0.
2101
2102 // #1 Check if the inserted register matches the required sub index.
2103 if (InsertedReg.SubIdx == DefSubReg) {
2104 return ValueTrackerResult(InsertedReg.Reg, InsertedReg.SubReg);
2105 }
2106 // #2 Otherwise, if the sub register we are looking for is not partial
2107 // defined by the inserted element, we can look through the main
2108 // register (v0).
2109 const MachineOperand &MODef = Def->getOperand(DefIdx);
2110 // If the result register (Def) and the base register (v0) do not
2111 // have the same register class or if we have to compose
2112 // subregisters, bail out.
2113 if (MRI.getRegClass(MODef.getReg()) != MRI.getRegClass(BaseReg.Reg) ||
2114 BaseReg.SubReg)
2115 return ValueTrackerResult();
2116
2117 // Get the TRI and check if the inserted sub-register overlaps with the
2118 // sub-register we are tracking.
2119 const TargetRegisterInfo *TRI = MRI.getTargetRegisterInfo();
2120 if ((TRI->getSubRegIndexLaneMask(DefSubReg) &
2121 TRI->getSubRegIndexLaneMask(InsertedReg.SubIdx))
2122 .any())
2123 return ValueTrackerResult();
2124 // At this point, the value is available in v0 via the same subreg
2125 // we used for Def.
2126 return ValueTrackerResult(BaseReg.Reg, DefSubReg);
2127}
2128
2129ValueTrackerResult ValueTracker::getNextSourceFromExtractSubreg() {
2130 assert((Def->isExtractSubreg() || Def->isExtractSubregLike()) &&
2131 "Invalid definition");
2132 // We are looking at:
2133 // Def = EXTRACT_SUBREG v0, sub0
2134
2135 // Bail if we have to compose sub registers.
2136 // Indeed, if DefSubReg != 0, we would have to compose it with sub0.
2137 if (DefSubReg)
2138 return ValueTrackerResult();
2139
2140 RegSubRegPairAndIdx ExtractSubregInputReg;
2141 if (!TII->getExtractSubregInputs(*Def, DefIdx, ExtractSubregInputReg))
2142 return ValueTrackerResult();
2143
2144 // Bail if we have to compose sub registers.
2145 // Likewise, if v0.subreg != 0, we would have to compose v0.subreg with sub0.
2146 if (ExtractSubregInputReg.SubReg)
2147 return ValueTrackerResult();
2148 // Otherwise, the value is available in the v0.sub0.
2149 return ValueTrackerResult(ExtractSubregInputReg.Reg,
2150 ExtractSubregInputReg.SubIdx);
2151}
2152
2153ValueTrackerResult ValueTracker::getNextSourceFromSubregToReg() {
2154 assert(Def->isSubregToReg() && "Invalid definition");
2155 // We are looking at:
2156 // Def = SUBREG_TO_REG v0, sub0
2157
2158 // Bail if we have to compose sub registers.
2159 // If DefSubReg != sub0, we would have to check that all the bits
2160 // we track are included in sub0 and if yes, we would have to
2161 // determine the right subreg in v0.
2162 if (DefSubReg != Def->getOperand(2).getImm())
2163 return ValueTrackerResult();
2164 // Bail if we have to compose sub registers.
2165 // Likewise, if v0.subreg != 0, we would have to compose it with sub0.
2166 if (Def->getOperand(1).getSubReg())
2167 return ValueTrackerResult();
2168
2169 return ValueTrackerResult(Def->getOperand(1).getReg(),
2170 Def->getOperand(2).getImm());
2171}
2172
2173/// Explore each PHI incoming operand and return its sources.
2174ValueTrackerResult ValueTracker::getNextSourceFromPHI() {
2175 assert(Def->isPHI() && "Invalid definition");
2176 ValueTrackerResult Res;
2177
2178 // Return all register sources for PHI instructions.
2179 for (unsigned i = 1, e = Def->getNumOperands(); i < e; i += 2) {
2180 const MachineOperand &MO = Def->getOperand(i);
2181 assert(MO.isReg() && "Invalid PHI instruction");
2182 // We have no code to deal with undef operands. They shouldn't happen in
2183 // normal programs anyway.
2184 if (MO.isUndef())
2185 return ValueTrackerResult();
2186 Res.addSource(MO.getReg(), MO.getSubReg());
2187 }
2188
2189 return Res;
2190}
2191
2192ValueTrackerResult ValueTracker::getNextSourceImpl() {
2193 assert(Def && "This method needs a valid definition");
2194
2195 assert(((Def->getOperand(DefIdx).isDef() &&
2196 (DefIdx < Def->getDesc().getNumDefs() ||
2197 Def->getDesc().isVariadic())) ||
2198 Def->getOperand(DefIdx).isImplicit()) &&
2199 "Invalid DefIdx");
2200 if (Def->isCopy())
2201 return getNextSourceFromCopy();
2202 if (Def->isBitcast())
2203 return getNextSourceFromBitcast();
2204 // All the remaining cases involve "complex" instructions.
2205 // Bail if we did not ask for the advanced tracking.
2207 return ValueTrackerResult();
2208 if (Def->isRegSequence() || Def->isRegSequenceLike())
2209 return getNextSourceFromRegSequence();
2210 if (Def->isInsertSubreg() || Def->isInsertSubregLike())
2211 return getNextSourceFromInsertSubreg();
2212 if (Def->isExtractSubreg() || Def->isExtractSubregLike())
2213 return getNextSourceFromExtractSubreg();
2214 if (Def->isSubregToReg())
2215 return getNextSourceFromSubregToReg();
2216 if (Def->isPHI())
2217 return getNextSourceFromPHI();
2218 return ValueTrackerResult();
2219}
2220
2221ValueTrackerResult ValueTracker::getNextSource() {
2222 // If we reach a point where we cannot move up in the use-def chain,
2223 // there is nothing we can get.
2224 if (!Def)
2225 return ValueTrackerResult();
2226
2227 ValueTrackerResult Res = getNextSourceImpl();
2228 if (Res.isValid()) {
2229 // Update definition, definition index, and subregister for the
2230 // next call of getNextSource.
2231 // Update the current register.
2232 bool OneRegSrc = Res.getNumSources() == 1;
2233 if (OneRegSrc)
2234 Reg = Res.getSrcReg(0);
2235 // Update the result before moving up in the use-def chain
2236 // with the instruction containing the last found sources.
2237 Res.setInst(Def);
2238
2239 // If we can still move up in the use-def chain, move to the next
2240 // definition.
2241 if (!Reg.isPhysical() && OneRegSrc) {
2243 if (DI != MRI.def_end()) {
2244 Def = DI->getParent();
2245 DefIdx = DI.getOperandNo();
2246 DefSubReg = Res.getSrcSubReg(0);
2247 } else {
2248 Def = nullptr;
2249 }
2250 return Res;
2251 }
2252 }
2253 // If we end up here, this means we will not be able to find another source
2254 // for the next iteration. Make sure any new call to getNextSource bails out
2255 // early by cutting the use-def chain.
2256 Def = nullptr;
2257 return Res;
2258}
for(const MachineOperand &MO :llvm::drop_begin(OldMI.operands(), Desc.getNumOperands()))
MachineInstrBuilder & UseMI
MachineInstrBuilder MachineInstrBuilder & DefMI
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
Rewrite undef for PHI
MachineBasicBlock & MBB
This file defines the DenseMap class.
#define DEBUG_TYPE
const HexagonInstrInfo * TII
#define _
IRTranslator LLVM IR MI
A common definition of LaneBitmask for use in TableGen and CodeGen.
#define I(x, y, z)
Definition MD5.cpp:57
TargetInstrInfo::RegSubRegPair RegSubRegPair
Register Reg
Register const TargetRegisterInfo * TRI
Promote Memory to Register
Definition Mem2Reg.cpp:110
MachineInstr unsigned OpIdx
uint64_t IntrinsicInst * II
if(PassOpts->AAPipeline)
#define INITIALIZE_PASS_DEPENDENCY(depName)
Definition PassSupport.h:42
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
Definition PassSupport.h:44
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
Definition PassSupport.h:39
static cl::opt< unsigned > RewritePHILimit("rewrite-phi-limit", cl::Hidden, cl::init(10), cl::desc("Limit the length of PHI chains to lookup"))
static cl::opt< bool > DisablePeephole("disable-peephole", cl::Hidden, cl::init(false), cl::desc("Disable the peephole optimizer"))
static cl::opt< unsigned > MaxRecurrenceChain("recurrence-chain-limit", cl::Hidden, cl::init(3), cl::desc("Maximum length of recurrence chain when evaluating the benefit " "of commuting operands"))
static cl::opt< bool > DisableNAPhysCopyOpt("disable-non-allocatable-phys-copy-opt", cl::Hidden, cl::init(false), cl::desc("Disable non-allocatable physical register copy optimization"))
static bool isVirtualRegisterOperand(MachineOperand &MO)
\bried Returns true if MO is a virtual register operand.
static MachineInstr & insertPHI(MachineRegisterInfo &MRI, const TargetInstrInfo &TII, const SmallVectorImpl< RegSubRegPair > &SrcRegs, MachineInstr &OrigPHI)
Insert a PHI instruction with incoming edges SrcRegs that are guaranteed to have the same register cl...
static cl::opt< bool > Aggressive("aggressive-ext-opt", cl::Hidden, cl::desc("Aggressive extension optimization"))
static cl::opt< bool > DisableAdvCopyOpt("disable-adv-copy-opt", cl::Hidden, cl::init(false), cl::desc("Disable advanced copy optimization"))
Specifiy whether or not the value tracking looks through complex instructions.
TargetInstrInfo::RegSubRegPairAndIdx RegSubRegPairAndIdx
static RegSubRegPair getNewSource(MachineRegisterInfo *MRI, const TargetInstrInfo *TII, RegSubRegPair Def, const PeepholeOptimizer::RewriteMapTy &RewriteMap, bool HandleMultipleSources=true)
Given a Def.Reg and Def.SubReg pair, use RewriteMap to find the new source to use for rewrite.
Remove Loads Into Fake Uses
static bool isValid(const char C)
Returns true if C is a valid mangled character: <0-9a-zA-Z_>.
This file defines the SmallPtrSet class.
This file defines the SmallSet class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition Statistic.h:171
#define LLVM_DEBUG(...)
Definition Debug.h:119
Virtual Register Rewriter
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
AnalysisUsage & addRequired()
AnalysisUsage & addPreserved()
Add the specified Pass class to the set of analyses preserved by this pass.
LLVM_ABI void setPreservesCFG()
This function should be called by the pass, iff they do not:
Definition Pass.cpp:270
Represents analyses that only rely on functions' control flow.
Definition Analysis.h:73
ValueT lookup(const_arg_type_t< KeyT > Val) const
Return the entry for the specified key, or a default constructed value if no such entry exists.
Definition DenseMap.h:205
iterator find(const_arg_type_t< KeyT > Val)
Definition DenseMap.h:178
bool erase(const KeyT &Val)
Definition DenseMap.h:328
iterator end()
Definition DenseMap.h:81
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition DenseMap.h:239
bool analyzeCompare(const MachineInstr &MI, Register &SrcReg, Register &SrcReg2, int64_t &Mask, int64_t &Value) const override
For a comparison instruction, return the source registers in SrcReg and SrcReg2 if having two registe...
bool isLoopHeader(const BlockT *BB) const
unsigned getNumDefs() const
Return the number of MachineOperands that are register definitions.
An RAII based helper class to modify MachineFunctionProperties when running pass.
MachineInstrBundleIterator< MachineInstr > iterator
Analysis pass which computes a MachineDominatorTree.
Analysis pass which computes a MachineDominatorTree.
bool dominates(const MachineInstr *A, const MachineInstr *B) const
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
void moveAdditionalCallInfo(const MachineInstr *Old, const MachineInstr *New)
Move the call site info from Old to \New call site info.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
void setDelegate(Delegate *delegate)
Set the delegate.
const MachineInstrBuilder & addReg(Register RegNo, RegState Flags={}, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
Representation of each machine instruction.
const MachineBasicBlock * getParent() const
bool mayLoad(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read memory.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
const MachineOperand & getOperand(unsigned i) const
LLVM_ABI MachineInstrBundleIterator< MachineInstr > eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
bool canFoldAsLoad(QueryType Type=IgnoreBundle) const
Return true for instructions that can be folded as memory operands in other instructions.
Analysis pass that exposes the MachineLoopInfo for a machine function.
MachineOperand class - Representation of each machine instruction operand.
void setSubReg(unsigned subReg)
unsigned getSubReg() const
bool isReg() const
isReg - Tests if this is a MO_Register operand.
bool isRegMask() const
isRegMask - Tests if this is a MO_RegisterMask operand.
MachineBasicBlock * getMBB() const
LLVM_ABI void setReg(Register Reg)
Change the register this operand corresponds to.
MachineInstr * getParent()
getParent - Return the instruction that this operand belongs to.
void setIsUndef(bool Val=true)
Register getReg() const
getReg - Returns the register number.
static bool clobbersPhysReg(const uint32_t *RegMask, MCRegister PhysReg)
clobbersPhysReg - Returns true if this RegMask clobbers PhysReg.
const uint32_t * getRegMask() const
getRegMask - Returns a bit mask of registers preserved by this RegMask operand.
unsigned getOperandNo() const
getOperandNo - Return the operand # of this MachineOperand in its MachineInstr.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
LLVM_ABI bool hasOneNonDBGUse(Register RegNo) const
hasOneNonDBGUse - Return true if there is exactly one non-Debug use of the specified register.
use_nodbg_iterator use_nodbg_begin(Register RegNo) const
LLVM_ABI void markUsesInDebugValueAsUndef(Register Reg) const
markUsesInDebugValueAsUndef - Mark every DBG_VALUE referencing the specified register as undefined wh...
const TargetRegisterClass * getRegClass(Register Reg) const
Return the register class of the specified virtual register.
LLVM_ABI void clearKillFlags(Register Reg) const
clearKillFlags - Iterate over all the uses of the given register and clear the kill flag from the Mac...
LLVM_ABI MachineInstr * getVRegDef(Register Reg) const
getVRegDef - Return the machine instr that defines the specified virtual register or null if none is ...
iterator_range< use_nodbg_iterator > use_nodbg_operands(Register Reg) const
def_iterator def_begin(Register RegNo) const
LLVM_ABI Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
use_instr_nodbg_iterator use_instr_nodbg_begin(Register RegNo) const
LLVM_ABI bool hasOneNonDBGUser(Register RegNo) const
hasOneNonDBGUse - Return true if there is exactly one non-Debug instruction using the specified regis...
bool isAllocatable(MCRegister PhysReg) const
isAllocatable - Returns true when PhysReg belongs to an allocatable register class and it hasn't been...
defusechain_iterator< false, true, false, true, false > def_iterator
def_iterator/def_begin/def_end - Walk all defs of the specified register.
iterator_range< use_instr_nodbg_iterator > use_nodbg_instructions(Register Reg) const
static def_iterator def_end()
const TargetRegisterInfo * getTargetRegisterInfo() const
LLVM_ABI const TargetRegisterClass * constrainRegClass(Register Reg, const TargetRegisterClass *RC, unsigned MinNumRegs=0)
constrainRegClass - Constrain the register class of the specified virtual register to be a common sub...
LLVM_ABI void replaceRegWith(Register FromReg, Register ToReg)
replaceRegWith - Replace all instances of FromReg with ToReg in the machine function.
PreservedAnalyses run(MachineFunction &MF, MachineFunctionAnalysisManager &MFAM)
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition Analysis.h:118
Wrapper class representing virtual and physical registers.
Definition Register.h:20
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
Definition Register.h:79
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
Definition Register.h:83
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
bool erase(PtrType Ptr)
Remove pointer from the set.
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
Definition SmallSet.h:134
size_type count(const T &V) const
count - Return 1 if the element is in the set, 0 otherwise.
Definition SmallSet.h:176
bool empty() const
Definition SmallSet.h:169
bool erase(const T &V)
Definition SmallSet.h:200
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
Definition SmallSet.h:184
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
TargetInstrInfo - Interface to description of machine instruction set.
static const unsigned CommuteAnyOperandIndex
virtual const TargetInstrInfo * getInstrInfo() const
virtual const TargetRegisterInfo * getRegisterInfo() const =0
Return the target's register information.
Changed
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
MCInstrDesc const & getDesc(MCInstrInfo const &MCII, MCInst const &MCI)
initializer< Ty > init(const Ty &Val)
DXILDebugInfoMap run(Module &M)
NodeAddr< DefNode * > Def
Definition RDFGraph.h:384
BaseReg
Stack frame base register. Bit 0 of FREInfo.Info.
Definition SFrame.h:77
This is an optimization pass for GlobalISel generic memory operations.
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
AnalysisManager< MachineFunction > MachineFunctionAnalysisManager
bool operator==(const AddressRangeValuePair &LHS, const AddressRangeValuePair &RHS)
LLVM_ABI char & PeepholeOptimizerLegacyID
PeepholeOptimizer - This pass performs peephole optimizations - like extension and comparison elimina...
LLVM_ABI PreservedAnalyses getMachineFunctionPassPreservedAnalyses()
Returns the minimum set of Analyses that all machine function passes must preserve.
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:209
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
@ Other
Any other memory.
Definition ModRef.h:68
A pair composed of a pair of a register and a sub-register index, and another sub-register index.
A pair composed of a register and a sub-register index.