LLVM 22.0.0git
MachineInstr.cpp
Go to the documentation of this file.
1//===- lib/CodeGen/MachineInstr.cpp ---------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// Methods common to all machine instructions.
10//
11//===----------------------------------------------------------------------===//
12
14#include "llvm/ADT/ArrayRef.h"
15#include "llvm/ADT/Hashing.h"
16#include "llvm/ADT/STLExtras.h"
38#include "llvm/IR/Constants.h"
40#include "llvm/IR/DebugLoc.h"
41#include "llvm/IR/Function.h"
42#include "llvm/IR/InlineAsm.h"
44#include "llvm/IR/LLVMContext.h"
45#include "llvm/IR/Metadata.h"
46#include "llvm/IR/Module.h"
48#include "llvm/IR/Operator.h"
49#include "llvm/MC/MCInstrDesc.h"
53#include "llvm/Support/Debug.h"
58#include <algorithm>
59#include <cassert>
60#include <cstdint>
61#include <cstring>
62#include <utility>
63
64using namespace llvm;
65
66static cl::opt<bool>
67 PrintMIAddrs("print-mi-addrs", cl::Hidden,
68 cl::desc("Print addresses of MachineInstrs when dumping"));
69
71 if (const MachineBasicBlock *MBB = MI.getParent())
72 if (const MachineFunction *MF = MBB->getParent())
73 return MF;
74 return nullptr;
75}
76
77// Try to crawl up to the machine function and get TRI/MRI/TII from it.
79 const TargetRegisterInfo *&TRI,
81 const TargetInstrInfo *&TII) {
82
83 if (const MachineFunction *MF = getMFIfAvailable(MI)) {
84 TRI = MF->getSubtarget().getRegisterInfo();
85 MRI = &MF->getRegInfo();
86 TII = MF->getSubtarget().getInstrInfo();
87 }
88}
89
91 for (MCPhysReg ImpDef : MCID->implicit_defs())
92 addOperand(MF, MachineOperand::CreateReg(ImpDef, true, true));
93 for (MCPhysReg ImpUse : MCID->implicit_uses())
94 addOperand(MF, MachineOperand::CreateReg(ImpUse, false, true));
95}
96
97/// MachineInstr ctor - This constructor creates a MachineInstr and adds the
98/// implicit operands. It reserves space for the number of operands specified by
99/// the MCInstrDesc.
100MachineInstr::MachineInstr(MachineFunction &MF, const MCInstrDesc &TID,
101 DebugLoc DL, bool NoImp)
102 : MCID(&TID), NumOperands(0), Flags(0), AsmPrinterFlags(0),
103 DbgLoc(std::move(DL)), DebugInstrNum(0), Opcode(TID.Opcode) {
104 assert(DbgLoc.hasTrivialDestructor() && "Expected trivial destructor");
105
106 // Reserve space for the expected number of operands.
107 if (unsigned NumOps = MCID->getNumOperands() + MCID->implicit_defs().size() +
108 MCID->implicit_uses().size()) {
109 CapOperands = OperandCapacity::get(NumOps);
110 Operands = MF.allocateOperandArray(CapOperands);
111 }
112
113 if (!NoImp)
115}
116
117/// MachineInstr ctor - Copies MachineInstr arg exactly.
118/// Does not copy the number from debug instruction numbering, to preserve
119/// uniqueness.
120MachineInstr::MachineInstr(MachineFunction &MF, const MachineInstr &MI)
121 : MCID(&MI.getDesc()), NumOperands(0), Flags(0), AsmPrinterFlags(0),
122 Info(MI.Info), DbgLoc(MI.getDebugLoc()), DebugInstrNum(0),
123 Opcode(MI.getOpcode()) {
124 assert(DbgLoc.hasTrivialDestructor() && "Expected trivial destructor");
125
126 CapOperands = OperandCapacity::get(MI.getNumOperands());
127 Operands = MF.allocateOperandArray(CapOperands);
128
129 // Copy operands.
130 for (const MachineOperand &MO : MI.operands())
131 addOperand(MF, MO);
132
133 // Replicate ties between the operands, which addOperand was not
134 // able to do reliably.
135 for (unsigned i = 0, e = getNumOperands(); i < e; ++i) {
136 MachineOperand &NewMO = getOperand(i);
137 const MachineOperand &OrigMO = MI.getOperand(i);
138 NewMO.TiedTo = OrigMO.TiedTo;
139 }
140
141 // Copy all the sensible flags.
142 setFlags(MI.Flags);
143}
144
146 if (getParent())
147 getMF()->handleChangeDesc(*this, TID);
148 MCID = &TID;
149 Opcode = TID.Opcode;
150}
151
152void MachineInstr::moveBefore(MachineInstr *MovePos) {
153 MovePos->getParent()->splice(MovePos, getParent(), getIterator());
154}
155
156/// getRegInfo - If this instruction is embedded into a MachineFunction,
157/// return the MachineRegisterInfo object for the current function, otherwise
158/// return null.
159MachineRegisterInfo *MachineInstr::getRegInfo() {
161 return &MBB->getParent()->getRegInfo();
162 return nullptr;
163}
164
165const MachineRegisterInfo *MachineInstr::getRegInfo() const {
166 if (const MachineBasicBlock *MBB = getParent())
167 return &MBB->getParent()->getRegInfo();
168 return nullptr;
169}
170
171void MachineInstr::removeRegOperandsFromUseLists(MachineRegisterInfo &MRI) {
172 for (MachineOperand &MO : operands())
173 if (MO.isReg())
174 MRI.removeRegOperandFromUseList(&MO);
175}
176
177void MachineInstr::addRegOperandsToUseLists(MachineRegisterInfo &MRI) {
178 for (MachineOperand &MO : operands())
179 if (MO.isReg())
180 MRI.addRegOperandToUseList(&MO);
181}
182
185 assert(MBB && "Use MachineInstrBuilder to add operands to dangling instrs");
186 MachineFunction *MF = MBB->getParent();
187 assert(MF && "Use MachineInstrBuilder to add operands to dangling instrs");
188 addOperand(*MF, Op);
189}
190
191/// Move NumOps MachineOperands from Src to Dst, with support for overlapping
192/// ranges. If MRI is non-null also update use-def chains.
194 unsigned NumOps, MachineRegisterInfo *MRI) {
195 if (MRI)
196 return MRI->moveOperands(Dst, Src, NumOps);
197 // MachineOperand is a trivially copyable type so we can just use memmove.
198 assert(Dst && Src && "Unknown operands");
199 std::memmove(Dst, Src, NumOps * sizeof(MachineOperand));
200}
201
202/// addOperand - Add the specified operand to the instruction. If it is an
203/// implicit operand, it is added to the end of the operand list. If it is
204/// an explicit operand it is added at the end of the explicit operand list
205/// (before the first implicit operand).
207 assert(isUInt<LLVM_MI_NUMOPERANDS_BITS>(NumOperands + 1) &&
208 "Cannot add more operands.");
209 assert(MCID && "Cannot add operands before providing an instr descriptor");
210
211 // Check if we're adding one of our existing operands.
212 if (&Op >= Operands && &Op < Operands + NumOperands) {
213 // This is unusual: MI->addOperand(MI->getOperand(i)).
214 // If adding Op requires reallocating or moving existing operands around,
215 // the Op reference could go stale. Support it by copying Op.
216 MachineOperand CopyOp(Op);
217 return addOperand(MF, CopyOp);
218 }
219
220 // Find the insert location for the new operand. Implicit registers go at
221 // the end, everything else goes before the implicit regs.
222 //
223 // FIXME: Allow mixed explicit and implicit operands on inline asm.
224 // InstrEmitter::EmitSpecialNode() is marking inline asm clobbers as
225 // implicit-defs, but they must not be moved around. See the FIXME in
226 // InstrEmitter.cpp.
227 unsigned OpNo = getNumOperands();
228 bool isImpReg = Op.isReg() && Op.isImplicit();
229 if (!isImpReg && !isInlineAsm()) {
230 while (OpNo && Operands[OpNo-1].isReg() && Operands[OpNo-1].isImplicit()) {
231 --OpNo;
232 assert(!Operands[OpNo].isTied() && "Cannot move tied operands");
233 }
234 }
235
236 // OpNo now points as the desired insertion point. Unless this is a variadic
237 // instruction, only implicit regs are allowed beyond MCID->getNumOperands().
238 // RegMask operands go between the explicit and implicit operands.
239 MachineRegisterInfo *MRI = getRegInfo();
240
241 // Determine if the Operands array needs to be reallocated.
242 // Save the old capacity and operand array.
243 OperandCapacity OldCap = CapOperands;
244 MachineOperand *OldOperands = Operands;
245 if (!OldOperands || OldCap.getSize() == getNumOperands()) {
246 CapOperands = OldOperands ? OldCap.getNext() : OldCap.get(1);
247 Operands = MF.allocateOperandArray(CapOperands);
248 // Move the operands before the insertion point.
249 if (OpNo)
250 moveOperands(Operands, OldOperands, OpNo, MRI);
251 }
252
253 // Move the operands following the insertion point.
254 if (OpNo != NumOperands)
255 moveOperands(Operands + OpNo + 1, OldOperands + OpNo, NumOperands - OpNo,
256 MRI);
257 ++NumOperands;
258
259 // Deallocate the old operand array.
260 if (OldOperands != Operands && OldOperands)
261 MF.deallocateOperandArray(OldCap, OldOperands);
262
263 // Copy Op into place. It still needs to be inserted into the MRI use lists.
264 MachineOperand *NewMO = new (Operands + OpNo) MachineOperand(Op);
265 NewMO->ParentMI = this;
266
267 // When adding a register operand, tell MRI about it.
268 if (NewMO->isReg()) {
269 // Ensure isOnRegUseList() returns false, regardless of Op's status.
270 NewMO->Contents.Reg.Prev = nullptr;
271 // Ignore existing ties. This is not a property that can be copied.
272 NewMO->TiedTo = 0;
273 // Add the new operand to MRI, but only for instructions in an MBB.
274 if (MRI)
275 MRI->addRegOperandToUseList(NewMO);
276 // The MCID operand information isn't accurate until we start adding
277 // explicit operands. The implicit operands are added first, then the
278 // explicits are inserted before them.
279 if (!isImpReg) {
280 // Tie uses to defs as indicated in MCInstrDesc.
281 if (NewMO->isUse()) {
282 int DefIdx = MCID->getOperandConstraint(OpNo, MCOI::TIED_TO);
283 if (DefIdx != -1)
284 tieOperands(DefIdx, OpNo);
285 }
286 // If the register operand is flagged as early, mark the operand as such.
287 if (MCID->getOperandConstraint(OpNo, MCOI::EARLY_CLOBBER) != -1)
288 NewMO->setIsEarlyClobber(true);
289 }
290 // Ensure debug instructions set debug flag on register uses.
291 if (NewMO->isUse() && isDebugInstr())
292 NewMO->setIsDebug();
293 }
294}
295
296void MachineInstr::removeOperand(unsigned OpNo) {
297 assert(OpNo < getNumOperands() && "Invalid operand number");
298 untieRegOperand(OpNo);
299
300#ifndef NDEBUG
301 // Moving tied operands would break the ties.
302 for (unsigned i = OpNo + 1, e = getNumOperands(); i != e; ++i)
303 if (Operands[i].isReg())
304 assert(!Operands[i].isTied() && "Cannot move tied operands");
305#endif
306
307 MachineRegisterInfo *MRI = getRegInfo();
308 if (MRI && Operands[OpNo].isReg())
309 MRI->removeRegOperandFromUseList(Operands + OpNo);
310
311 // Don't call the MachineOperand destructor. A lot of this code depends on
312 // MachineOperand having a trivial destructor anyway, and adding a call here
313 // wouldn't make it 'destructor-correct'.
314
315 if (unsigned N = NumOperands - 1 - OpNo)
316 moveOperands(Operands + OpNo, Operands + OpNo + 1, N, MRI);
317 --NumOperands;
318}
319
320void MachineInstr::setExtraInfo(MachineFunction &MF,
322 MCSymbol *PreInstrSymbol,
323 MCSymbol *PostInstrSymbol,
324 MDNode *HeapAllocMarker, MDNode *PCSections,
325 uint32_t CFIType, MDNode *MMRAs) {
326 bool HasPreInstrSymbol = PreInstrSymbol != nullptr;
327 bool HasPostInstrSymbol = PostInstrSymbol != nullptr;
328 bool HasHeapAllocMarker = HeapAllocMarker != nullptr;
329 bool HasPCSections = PCSections != nullptr;
330 bool HasCFIType = CFIType != 0;
331 bool HasMMRAs = MMRAs != nullptr;
332 int NumPointers = MMOs.size() + HasPreInstrSymbol + HasPostInstrSymbol +
333 HasHeapAllocMarker + HasPCSections + HasCFIType + HasMMRAs;
334
335 // Drop all extra info if there is none.
336 if (NumPointers <= 0) {
337 Info.clear();
338 return;
339 }
340
341 // If more than one pointer, then store out of line. Store heap alloc markers
342 // out of line because PointerSumType cannot hold more than 4 tag types with
343 // 32-bit pointers.
344 // FIXME: Maybe we should make the symbols in the extra info mutable?
345 else if (NumPointers > 1 || HasMMRAs || HasHeapAllocMarker || HasPCSections ||
346 HasCFIType) {
347 Info.set<EIIK_OutOfLine>(
348 MF.createMIExtraInfo(MMOs, PreInstrSymbol, PostInstrSymbol,
349 HeapAllocMarker, PCSections, CFIType, MMRAs));
350 return;
351 }
352
353 // Otherwise store the single pointer inline.
354 if (HasPreInstrSymbol)
355 Info.set<EIIK_PreInstrSymbol>(PreInstrSymbol);
356 else if (HasPostInstrSymbol)
357 Info.set<EIIK_PostInstrSymbol>(PostInstrSymbol);
358 else
359 Info.set<EIIK_MMO>(MMOs[0]);
360}
361
363 if (memoperands_empty())
364 return;
365
366 setExtraInfo(MF, {}, getPreInstrSymbol(), getPostInstrSymbol(),
369}
370
373 if (MMOs.empty()) {
374 dropMemRefs(MF);
375 return;
376 }
377
378 setExtraInfo(MF, MMOs, getPreInstrSymbol(), getPostInstrSymbol(),
381}
382
390
391void MachineInstr::cloneMemRefs(MachineFunction &MF, const MachineInstr &MI) {
392 if (this == &MI)
393 // Nothing to do for a self-clone!
394 return;
395
396 assert(&MF == MI.getMF() &&
397 "Invalid machine functions when cloning memory refrences!");
398 // See if we can just steal the extra info already allocated for the
399 // instruction. We can do this whenever the pre- and post-instruction symbols
400 // are the same (including null).
401 if (getPreInstrSymbol() == MI.getPreInstrSymbol() &&
402 getPostInstrSymbol() == MI.getPostInstrSymbol() &&
403 getHeapAllocMarker() == MI.getHeapAllocMarker() &&
404 getPCSections() == MI.getPCSections() && getMMRAMetadata() &&
405 MI.getMMRAMetadata()) {
406 Info = MI.Info;
407 return;
408 }
409
410 // Otherwise, fall back on a copy-based clone.
411 setMemRefs(MF, MI.memoperands());
412}
413
414/// Check to see if the MMOs pointed to by the two MemRefs arrays are
415/// identical.
418 if (LHS.size() != RHS.size())
419 return false;
420
421 auto LHSPointees = make_pointee_range(LHS);
422 auto RHSPointees = make_pointee_range(RHS);
423 return std::equal(LHSPointees.begin(), LHSPointees.end(),
424 RHSPointees.begin());
425}
426
429 // Try handling easy numbers of MIs with simpler mechanisms.
430 if (MIs.empty()) {
431 dropMemRefs(MF);
432 return;
433 }
434 if (MIs.size() == 1) {
435 cloneMemRefs(MF, *MIs[0]);
436 return;
437 }
438 // Because an empty memoperands list provides *no* information and must be
439 // handled conservatively (assuming the instruction can do anything), the only
440 // way to merge with it is to drop all other memoperands.
441 if (MIs[0]->memoperands_empty()) {
442 dropMemRefs(MF);
443 return;
444 }
445
446 // Handle the general case.
448 // Start with the first instruction.
449 assert(&MF == MIs[0]->getMF() &&
450 "Invalid machine functions when cloning memory references!");
451 MergedMMOs.append(MIs[0]->memoperands_begin(), MIs[0]->memoperands_end());
452 // Now walk all the other instructions and accumulate any different MMOs.
453 for (const MachineInstr &MI : make_pointee_range(MIs.slice(1))) {
454 assert(&MF == MI.getMF() &&
455 "Invalid machine functions when cloning memory references!");
456
457 // Skip MIs with identical operands to the first. This is a somewhat
458 // arbitrary hack but will catch common cases without being quadratic.
459 // TODO: We could fully implement merge semantics here if needed.
460 if (hasIdenticalMMOs(MIs[0]->memoperands(), MI.memoperands()))
461 continue;
462
463 // Because an empty memoperands list provides *no* information and must be
464 // handled conservatively (assuming the instruction can do anything), the
465 // only way to merge with it is to drop all other memoperands.
466 if (MI.memoperands_empty()) {
467 dropMemRefs(MF);
468 return;
469 }
470
471 // Otherwise accumulate these into our temporary buffer of the merged state.
472 MergedMMOs.append(MI.memoperands_begin(), MI.memoperands_end());
473 }
474
475 setMemRefs(MF, MergedMMOs);
476}
477
479 // Do nothing if old and new symbols are the same.
480 if (Symbol == getPreInstrSymbol())
481 return;
482
483 // If there was only one symbol and we're removing it, just clear info.
484 if (!Symbol && Info.is<EIIK_PreInstrSymbol>()) {
485 Info.clear();
486 return;
487 }
488
489 setExtraInfo(MF, memoperands(), Symbol, getPostInstrSymbol(),
492}
493
495 // Do nothing if old and new symbols are the same.
496 if (Symbol == getPostInstrSymbol())
497 return;
498
499 // If there was only one symbol and we're removing it, just clear info.
500 if (!Symbol && Info.is<EIIK_PostInstrSymbol>()) {
501 Info.clear();
502 return;
503 }
504
505 setExtraInfo(MF, memoperands(), getPreInstrSymbol(), Symbol,
508}
509
511 // Do nothing if old and new symbols are the same.
512 if (Marker == getHeapAllocMarker())
513 return;
514
515 setExtraInfo(MF, memoperands(), getPreInstrSymbol(), getPostInstrSymbol(),
517}
518
520 // Do nothing if old and new symbols are the same.
521 if (PCSections == getPCSections())
522 return;
523
524 setExtraInfo(MF, memoperands(), getPreInstrSymbol(), getPostInstrSymbol(),
525 getHeapAllocMarker(), PCSections, getCFIType(),
527}
528
530 // Do nothing if old and new types are the same.
531 if (Type == getCFIType())
532 return;
533
534 setExtraInfo(MF, memoperands(), getPreInstrSymbol(), getPostInstrSymbol(),
536}
537
539 // Do nothing if old and new symbols are the same.
540 if (MMRAs == getMMRAMetadata())
541 return;
542
543 setExtraInfo(MF, memoperands(), getPreInstrSymbol(), getPostInstrSymbol(),
545}
546
548 const MachineInstr &MI) {
549 if (this == &MI)
550 // Nothing to do for a self-clone!
551 return;
552
553 assert(&MF == MI.getMF() &&
554 "Invalid machine functions when cloning instruction symbols!");
555
556 setPreInstrSymbol(MF, MI.getPreInstrSymbol());
557 setPostInstrSymbol(MF, MI.getPostInstrSymbol());
558 setHeapAllocMarker(MF, MI.getHeapAllocMarker());
559 setPCSections(MF, MI.getPCSections());
560 setMMRAMetadata(MF, MI.getMMRAMetadata());
561}
562
563uint32_t MachineInstr::mergeFlagsWith(const MachineInstr &Other) const {
564 // For now, the just return the union of the flags. If the flags get more
565 // complicated over time, we might need more logic here.
566 return getFlags() | Other.getFlags();
567}
568
570 uint32_t MIFlags = 0;
571 // Copy the wrapping flags.
572 if (const OverflowingBinaryOperator *OB =
574 if (OB->hasNoSignedWrap())
576 if (OB->hasNoUnsignedWrap())
578 } else if (const TruncInst *TI = dyn_cast<TruncInst>(&I)) {
579 if (TI->hasNoSignedWrap())
581 if (TI->hasNoUnsignedWrap())
583 } else if (const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(&I)) {
584 if (GEP->hasNoUnsignedSignedWrap())
586 if (GEP->hasNoUnsignedWrap())
588 if (GEP->isInBounds())
590 }
591
592 // Copy the nonneg flag.
594 if (PNI->hasNonNeg())
596 // Copy the disjoint flag.
597 } else if (const PossiblyDisjointInst *PD =
599 if (PD->isDisjoint())
601 }
602
603 // Copy the samesign flag.
604 if (const ICmpInst *ICmp = dyn_cast<ICmpInst>(&I))
605 if (ICmp->hasSameSign())
607
608 // Copy the exact flag.
610 if (PE->isExact())
612
613 // Copy the fast-math flags.
615 const FastMathFlags Flags = FP->getFastMathFlags();
616 if (Flags.noNaNs())
618 if (Flags.noInfs())
620 if (Flags.noSignedZeros())
622 if (Flags.allowReciprocal())
624 if (Flags.allowContract())
626 if (Flags.approxFunc())
628 if (Flags.allowReassoc())
630 }
631
632 if (I.getMetadata(LLVMContext::MD_unpredictable))
634
635 return MIFlags;
636}
637
641
642bool MachineInstr::hasPropertyInBundle(uint64_t Mask, QueryType Type) const {
643 assert(!isBundledWithPred() && "Must be called on bundle header");
645 if (MII->getDesc().getFlags() & Mask) {
646 if (Type == AnyInBundle)
647 return true;
648 } else {
649 if (Type == AllInBundle && !MII->isBundle())
650 return false;
651 }
652 // This was the last instruction in the bundle.
653 if (!MII->isBundledWithSucc())
654 return Type == AllInBundle;
655 }
656}
657
658bool MachineInstr::isIdenticalTo(const MachineInstr &Other,
659 MICheckType Check) const {
660 // If opcodes or number of operands are not the same then the two
661 // instructions are obviously not identical.
662 if (Other.getOpcode() != getOpcode() ||
663 Other.getNumOperands() != getNumOperands())
664 return false;
665
666 if (isBundle()) {
667 // We have passed the test above that both instructions have the same
668 // opcode, so we know that both instructions are bundles here. Let's compare
669 // MIs inside the bundle.
670 assert(Other.isBundle() && "Expected that both instructions are bundles.");
673 // Loop until we analysed the last intruction inside at least one of the
674 // bundles.
675 while (I1->isBundledWithSucc() && I2->isBundledWithSucc()) {
676 ++I1;
677 ++I2;
678 if (!I1->isIdenticalTo(*I2, Check))
679 return false;
680 }
681 // If we've reached the end of just one of the two bundles, but not both,
682 // the instructions are not identical.
683 if (I1->isBundledWithSucc() || I2->isBundledWithSucc())
684 return false;
685 }
686
687 // Check operands to make sure they match.
688 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
689 const MachineOperand &MO = getOperand(i);
690 const MachineOperand &OMO = Other.getOperand(i);
691 if (!MO.isReg()) {
692 if (!MO.isIdenticalTo(OMO))
693 return false;
694 continue;
695 }
696
697 // Clients may or may not want to ignore defs when testing for equality.
698 // For example, machine CSE pass only cares about finding common
699 // subexpressions, so it's safe to ignore virtual register defs.
700 if (MO.isDef()) {
701 if (Check == IgnoreDefs)
702 continue;
703 else if (Check == IgnoreVRegDefs) {
704 if (!MO.getReg().isVirtual() || !OMO.getReg().isVirtual())
705 if (!MO.isIdenticalTo(OMO))
706 return false;
707 } else {
708 if (!MO.isIdenticalTo(OMO))
709 return false;
710 if (Check == CheckKillDead && MO.isDead() != OMO.isDead())
711 return false;
712 }
713 } else {
714 if (!MO.isIdenticalTo(OMO))
715 return false;
716 if (Check == CheckKillDead && MO.isKill() != OMO.isKill())
717 return false;
718 }
719 }
720 // If DebugLoc does not match then two debug instructions are not identical.
721 if (isDebugInstr())
722 if (getDebugLoc() && Other.getDebugLoc() &&
723 getDebugLoc() != Other.getDebugLoc())
724 return false;
725 // If pre- or post-instruction symbols do not match then the two instructions
726 // are not identical.
727 if (getPreInstrSymbol() != Other.getPreInstrSymbol() ||
728 getPostInstrSymbol() != Other.getPostInstrSymbol())
729 return false;
730 // Call instructions with different CFI types are not identical.
731 if (isCall() && getCFIType() != Other.getCFIType())
732 return false;
733
734 return true;
735}
736
737bool MachineInstr::isEquivalentDbgInstr(const MachineInstr &Other) const {
738 if (!isDebugValueLike() || !Other.isDebugValueLike())
739 return false;
740 if (getDebugLoc() != Other.getDebugLoc())
741 return false;
742 if (getDebugVariable() != Other.getDebugVariable())
743 return false;
744 if (getNumDebugOperands() != Other.getNumDebugOperands())
745 return false;
746 for (unsigned OpIdx = 0; OpIdx < getNumDebugOperands(); ++OpIdx)
747 if (!getDebugOperand(OpIdx).isIdenticalTo(Other.getDebugOperand(OpIdx)))
748 return false;
751 Other.getDebugExpression(), Other.isIndirectDebugValue()))
752 return false;
753 return true;
754}
755
757 return getParent()->getParent();
758}
759
761 assert(getParent() && "Not embedded in a basic block!");
762 return getParent()->remove(this);
763}
764
766 assert(getParent() && "Not embedded in a basic block!");
767 return getParent()->remove_instr(this);
768}
769
771 assert(getParent() && "Not embedded in a basic block!");
772 getParent()->erase(this);
773}
774
776 assert(getParent() && "Not embedded in a basic block!");
777 getParent()->erase_instr(this);
778}
779
781 if (!isCall(Type))
782 return false;
783 switch (getOpcode()) {
784 case TargetOpcode::PATCHPOINT:
785 case TargetOpcode::STACKMAP:
786 case TargetOpcode::STATEPOINT:
787 case TargetOpcode::FENTRY_CALL:
788 return false;
789 }
790 return true;
791}
792
798
799template <typename Operand, typename Instruction>
800static iterator_range<
801 filter_iterator<Operand *, std::function<bool(Operand &Op)>>>
803 std::function<bool(Operand & Op)> OpUsesReg(
804 [Reg](Operand &Op) { return Op.isReg() && Op.getReg() == Reg; });
805 return make_filter_range(MI->debug_operands(), OpUsesReg);
806}
807
809 std::function<bool(const MachineOperand &Op)>>>
814
820
822 unsigned NumOperands = MCID->getNumOperands();
823 if (!MCID->isVariadic())
824 return NumOperands;
825
826 for (const MachineOperand &MO : operands_impl().drop_front(NumOperands)) {
827 // The operands must always be in the following order:
828 // - explicit reg defs,
829 // - other explicit operands (reg uses, immediates, etc.),
830 // - implicit reg defs
831 // - implicit reg uses
832 if (MO.isReg() && MO.isImplicit())
833 break;
834 ++NumOperands;
835 }
836 return NumOperands;
837}
838
840 unsigned NumDefs = MCID->getNumDefs();
841 if (!MCID->isVariadic())
842 return NumDefs;
843
844 for (const MachineOperand &MO : operands_impl().drop_front(NumDefs)) {
845 if (!MO.isReg() || !MO.isDef() || MO.isImplicit())
846 break;
847 ++NumDefs;
848 }
849 return NumDefs;
850}
851
853 assert(!isBundledWithPred() && "MI is already bundled with its predecessor");
856 --Pred;
857 assert(!Pred->isBundledWithSucc() && "Inconsistent bundle flags");
858 Pred->setFlag(BundledSucc);
859}
860
862 assert(!isBundledWithSucc() && "MI is already bundled with its successor");
865 ++Succ;
866 assert(!Succ->isBundledWithPred() && "Inconsistent bundle flags");
867 Succ->setFlag(BundledPred);
868}
869
871 assert(isBundledWithPred() && "MI isn't bundled with its predecessor");
874 --Pred;
875 assert(Pred->isBundledWithSucc() && "Inconsistent bundle flags");
876 Pred->clearFlag(BundledSucc);
877}
878
880 assert(isBundledWithSucc() && "MI isn't bundled with its successor");
883 ++Succ;
884 assert(Succ->isBundledWithPred() && "Inconsistent bundle flags");
885 Succ->clearFlag(BundledPred);
886}
887
889 if (isInlineAsm()) {
890 unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm();
891 if (ExtraInfo & InlineAsm::Extra_IsAlignStack)
892 return true;
893 }
894 return false;
895}
896
898 assert(isInlineAsm() && "getInlineAsmDialect() only works for inline asms!");
899 unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm();
900 return InlineAsm::AsmDialect((ExtraInfo & InlineAsm::Extra_AsmDialect) != 0);
901}
902
904 unsigned *GroupNo) const {
905 assert(isInlineAsm() && "Expected an inline asm instruction");
906 assert(OpIdx < getNumOperands() && "OpIdx out of range");
907
908 // Ignore queries about the initial operands.
910 return -1;
911
912 unsigned Group = 0;
913 unsigned NumOps;
914 for (unsigned i = InlineAsm::MIOp_FirstOperand, e = getNumOperands(); i < e;
915 i += NumOps) {
916 const MachineOperand &FlagMO = getOperand(i);
917 // If we reach the implicit register operands, stop looking.
918 if (!FlagMO.isImm())
919 return -1;
920 const InlineAsm::Flag F(FlagMO.getImm());
921 NumOps = 1 + F.getNumOperandRegisters();
922 if (i + NumOps > OpIdx) {
923 if (GroupNo)
924 *GroupNo = Group;
925 return i;
926 }
927 ++Group;
928 }
929 return -1;
930}
931
933 assert(isDebugLabel() && "not a DBG_LABEL");
934 return cast<DILabel>(getOperand(0).getMetadata());
935}
936
938 assert((isDebugValueLike()) && "not a DBG_VALUE*");
939 unsigned VariableOp = isNonListDebugValue() ? 2 : 0;
940 return getOperand(VariableOp);
941}
942
944 assert((isDebugValueLike()) && "not a DBG_VALUE*");
945 unsigned VariableOp = isNonListDebugValue() ? 2 : 0;
946 return getOperand(VariableOp);
947}
948
952
954 assert((isDebugValueLike()) && "not a DBG_VALUE*");
955 unsigned ExpressionOp = isNonListDebugValue() ? 3 : 1;
956 return getOperand(ExpressionOp);
957}
958
960 assert((isDebugValueLike()) && "not a DBG_VALUE*");
961 unsigned ExpressionOp = isNonListDebugValue() ? 3 : 1;
962 return getOperand(ExpressionOp);
963}
964
968
972
975 const TargetInstrInfo *TII,
976 const TargetRegisterInfo *TRI) const {
977 assert(getParent() && "Can't have an MBB reference here!");
978 assert(getMF() && "Can't have an MF reference here!");
979 // Most opcodes have fixed constraints in their MCInstrDesc.
980 if (!isInlineAsm())
981 return TII->getRegClass(getDesc(), OpIdx, TRI);
982
983 if (!getOperand(OpIdx).isReg())
984 return nullptr;
985
986 // For tied uses on inline asm, get the constraint from the def.
987 unsigned DefIdx;
988 if (getOperand(OpIdx).isUse() && isRegTiedToDefOperand(OpIdx, &DefIdx))
989 OpIdx = DefIdx;
990
991 // Inline asm stores register class constraints in the flag word.
992 int FlagIdx = findInlineAsmFlagIdx(OpIdx);
993 if (FlagIdx < 0)
994 return nullptr;
995
996 const InlineAsm::Flag F(getOperand(FlagIdx).getImm());
997 unsigned RCID;
998 if ((F.isRegUseKind() || F.isRegDefKind() || F.isRegDefEarlyClobberKind()) &&
999 F.hasRegClassConstraint(RCID))
1000 return TRI->getRegClass(RCID);
1001
1002 // Assume that all registers in a memory operand are pointers.
1003 if (F.isMemKind())
1004 return TRI->getPointerRegClass();
1005
1006 return nullptr;
1007}
1008
1010 Register Reg, const TargetRegisterClass *CurRC, const TargetInstrInfo *TII,
1011 const TargetRegisterInfo *TRI, bool ExploreBundle) const {
1012 // Check every operands inside the bundle if we have
1013 // been asked to.
1014 if (ExploreBundle)
1015 for (ConstMIBundleOperands OpndIt(*this); OpndIt.isValid() && CurRC;
1016 ++OpndIt)
1017 CurRC = OpndIt->getParent()->getRegClassConstraintEffectForVRegImpl(
1018 OpndIt.getOperandNo(), Reg, CurRC, TII, TRI);
1019 else
1020 // Otherwise, just check the current operands.
1021 for (unsigned i = 0, e = NumOperands; i < e && CurRC; ++i)
1022 CurRC = getRegClassConstraintEffectForVRegImpl(i, Reg, CurRC, TII, TRI);
1023 return CurRC;
1024}
1025
1026const TargetRegisterClass *MachineInstr::getRegClassConstraintEffectForVRegImpl(
1027 unsigned OpIdx, Register Reg, const TargetRegisterClass *CurRC,
1028 const TargetInstrInfo *TII, const TargetRegisterInfo *TRI) const {
1029 assert(CurRC && "Invalid initial register class");
1030 // Check if Reg is constrained by some of its use/def from MI.
1031 const MachineOperand &MO = getOperand(OpIdx);
1032 if (!MO.isReg() || MO.getReg() != Reg)
1033 return CurRC;
1034 // If yes, accumulate the constraints through the operand.
1035 return getRegClassConstraintEffect(OpIdx, CurRC, TII, TRI);
1036}
1037
1039 unsigned OpIdx, const TargetRegisterClass *CurRC,
1040 const TargetInstrInfo *TII, const TargetRegisterInfo *TRI) const {
1042 const MachineOperand &MO = getOperand(OpIdx);
1043 assert(MO.isReg() &&
1044 "Cannot get register constraints for non-register operand");
1045 assert(CurRC && "Invalid initial register class");
1046 if (unsigned SubIdx = MO.getSubReg()) {
1047 if (OpRC)
1048 CurRC = TRI->getMatchingSuperRegClass(CurRC, OpRC, SubIdx);
1049 else
1050 CurRC = TRI->getSubClassWithSubReg(CurRC, SubIdx);
1051 } else if (OpRC)
1052 CurRC = TRI->getCommonSubClass(CurRC, OpRC);
1053 return CurRC;
1054}
1055
1056/// Return the number of instructions inside the MI bundle, not counting the
1057/// header instruction.
1060 unsigned Size = 0;
1061 while (I->isBundledWithSucc()) {
1062 ++Size;
1063 ++I;
1064 }
1065 return Size;
1066}
1067
1068/// Returns true if the MachineInstr has an implicit-use operand of exactly
1069/// the given register (not considering sub/super-registers).
1071 for (const MachineOperand &MO : implicit_operands()) {
1072 if (MO.isReg() && MO.isUse() && MO.getReg() == Reg)
1073 return true;
1074 }
1075 return false;
1076}
1077
1078/// findRegisterUseOperandIdx() - Returns the MachineOperand that is a use of
1079/// the specific register or -1 if it is not found. It further tightens
1080/// the search criteria to a use that kills the register if isKill is true.
1082 const TargetRegisterInfo *TRI,
1083 bool isKill) const {
1084 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
1085 const MachineOperand &MO = getOperand(i);
1086 if (!MO.isReg() || !MO.isUse())
1087 continue;
1088 Register MOReg = MO.getReg();
1089 if (!MOReg)
1090 continue;
1091 if (MOReg == Reg || (TRI && Reg && MOReg && TRI->regsOverlap(MOReg, Reg)))
1092 if (!isKill || MO.isKill())
1093 return i;
1094 }
1095 return -1;
1096}
1097
1098/// readsWritesVirtualRegister - Return a pair of bools (reads, writes)
1099/// indicating if this instruction reads or writes Reg. This also considers
1100/// partial defines.
1101std::pair<bool,bool>
1104 bool PartDef = false; // Partial redefine.
1105 bool FullDef = false; // Full define.
1106 bool Use = false;
1107
1108 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
1109 const MachineOperand &MO = getOperand(i);
1110 if (!MO.isReg() || MO.getReg() != Reg)
1111 continue;
1112 if (Ops)
1113 Ops->push_back(i);
1114 if (MO.isUse())
1115 Use |= !MO.isUndef();
1116 else if (MO.getSubReg() && !MO.isUndef())
1117 // A partial def undef doesn't count as reading the register.
1118 PartDef = true;
1119 else
1120 FullDef = true;
1121 }
1122 // A partial redefine uses Reg unless there is also a full define.
1123 return std::make_pair(Use || (PartDef && !FullDef), PartDef || FullDef);
1124}
1125
1126/// findRegisterDefOperandIdx() - Returns the operand index that is a def of
1127/// the specified register or -1 if it is not found. If isDead is true, defs
1128/// that are not dead are skipped. If TargetRegisterInfo is non-null, then it
1129/// also checks if there is a def of a super-register.
1131 const TargetRegisterInfo *TRI,
1132 bool isDead, bool Overlap) const {
1133 bool isPhys = Reg.isPhysical();
1134 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
1135 const MachineOperand &MO = getOperand(i);
1136 // Accept regmask operands when Overlap is set.
1137 // Ignore them when looking for a specific def operand (Overlap == false).
1138 if (isPhys && Overlap && MO.isRegMask() && MO.clobbersPhysReg(Reg))
1139 return i;
1140 if (!MO.isReg() || !MO.isDef())
1141 continue;
1142 Register MOReg = MO.getReg();
1143 bool Found = (MOReg == Reg);
1144 if (!Found && TRI && isPhys && MOReg.isPhysical()) {
1145 if (Overlap)
1146 Found = TRI->regsOverlap(MOReg, Reg);
1147 else
1148 Found = TRI->isSubRegister(MOReg, Reg);
1149 }
1150 if (Found && (!isDead || MO.isDead()))
1151 return i;
1152 }
1153 return -1;
1154}
1155
1156/// findFirstPredOperandIdx() - Find the index of the first operand in the
1157/// operand list that is used to represent the predicate. It returns -1 if
1158/// none is found.
1160 // Don't call MCID.findFirstPredOperandIdx() because this variant
1161 // is sometimes called on an instruction that's not yet complete, and
1162 // so the number of operands is less than the MCID indicates. In
1163 // particular, the PTX target does this.
1164 const MCInstrDesc &MCID = getDesc();
1165 if (MCID.isPredicable()) {
1166 for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
1167 if (MCID.operands()[i].isPredicate())
1168 return i;
1169 }
1170
1171 return -1;
1172}
1173
1174// MachineOperand::TiedTo is 4 bits wide.
1175const unsigned TiedMax = 15;
1176
1177/// tieOperands - Mark operands at DefIdx and UseIdx as tied to each other.
1178///
1179/// Use and def operands can be tied together, indicated by a non-zero TiedTo
1180/// field. TiedTo can have these values:
1181///
1182/// 0: Operand is not tied to anything.
1183/// 1 to TiedMax-1: Tied to getOperand(TiedTo-1).
1184/// TiedMax: Tied to an operand >= TiedMax-1.
1185///
1186/// The tied def must be one of the first TiedMax operands on a normal
1187/// instruction. INLINEASM instructions allow more tied defs.
1188///
1189void MachineInstr::tieOperands(unsigned DefIdx, unsigned UseIdx) {
1190 MachineOperand &DefMO = getOperand(DefIdx);
1191 MachineOperand &UseMO = getOperand(UseIdx);
1192 assert(DefMO.isDef() && "DefIdx must be a def operand");
1193 assert(UseMO.isUse() && "UseIdx must be a use operand");
1194 assert(!DefMO.isTied() && "Def is already tied to another use");
1195 assert(!UseMO.isTied() && "Use is already tied to another def");
1196
1197 if (DefIdx < TiedMax) {
1198 UseMO.TiedTo = DefIdx + 1;
1199 } else {
1200 // Inline asm can use the group descriptors to find tied operands,
1201 // statepoint tied operands are trivial to match (1-1 reg def with reg use),
1202 // but on normal instruction, the tied def must be within the first TiedMax
1203 // operands.
1204 assert((isInlineAsm() || getOpcode() == TargetOpcode::STATEPOINT) &&
1205 "DefIdx out of range");
1206 UseMO.TiedTo = TiedMax;
1207 }
1208
1209 // UseIdx can be out of range, we'll search for it in findTiedOperandIdx().
1210 DefMO.TiedTo = std::min(UseIdx + 1, TiedMax);
1211}
1212
1213/// Given the index of a tied register operand, find the operand it is tied to.
1214/// Defs are tied to uses and vice versa. Returns the index of the tied operand
1215/// which must exist.
1216unsigned MachineInstr::findTiedOperandIdx(unsigned OpIdx) const {
1217 const MachineOperand &MO = getOperand(OpIdx);
1218 assert(MO.isTied() && "Operand isn't tied");
1219
1220 // Normally TiedTo is in range.
1221 if (MO.TiedTo < TiedMax)
1222 return MO.TiedTo - 1;
1223
1224 // Uses on normal instructions can be out of range.
1225 if (!isInlineAsm() && getOpcode() != TargetOpcode::STATEPOINT) {
1226 // Normal tied defs must be in the 0..TiedMax-1 range.
1227 if (MO.isUse())
1228 return TiedMax - 1;
1229 // MO is a def. Search for the tied use.
1230 for (unsigned i = TiedMax - 1, e = getNumOperands(); i != e; ++i) {
1231 const MachineOperand &UseMO = getOperand(i);
1232 if (UseMO.isReg() && UseMO.isUse() && UseMO.TiedTo == OpIdx + 1)
1233 return i;
1234 }
1235 llvm_unreachable("Can't find tied use");
1236 }
1237
1238 if (getOpcode() == TargetOpcode::STATEPOINT) {
1239 // In STATEPOINT defs correspond 1-1 to GC pointer operands passed
1240 // on registers.
1241 StatepointOpers SO(this);
1242 unsigned CurUseIdx = SO.getFirstGCPtrIdx();
1243 assert(CurUseIdx != -1U && "only gc pointer statepoint operands can be tied");
1244 unsigned NumDefs = getNumDefs();
1245 for (unsigned CurDefIdx = 0; CurDefIdx < NumDefs; ++CurDefIdx) {
1246 while (!getOperand(CurUseIdx).isReg())
1247 CurUseIdx = StackMaps::getNextMetaArgIdx(this, CurUseIdx);
1248 if (OpIdx == CurDefIdx)
1249 return CurUseIdx;
1250 if (OpIdx == CurUseIdx)
1251 return CurDefIdx;
1252 CurUseIdx = StackMaps::getNextMetaArgIdx(this, CurUseIdx);
1253 }
1254 llvm_unreachable("Can't find tied use");
1255 }
1256
1257 // Now deal with inline asm by parsing the operand group descriptor flags.
1258 // Find the beginning of each operand group.
1259 SmallVector<unsigned, 8> GroupIdx;
1260 unsigned OpIdxGroup = ~0u;
1261 unsigned NumOps;
1262 for (unsigned i = InlineAsm::MIOp_FirstOperand, e = getNumOperands(); i < e;
1263 i += NumOps) {
1264 const MachineOperand &FlagMO = getOperand(i);
1265 assert(FlagMO.isImm() && "Invalid tied operand on inline asm");
1266 unsigned CurGroup = GroupIdx.size();
1267 GroupIdx.push_back(i);
1268 const InlineAsm::Flag F(FlagMO.getImm());
1269 NumOps = 1 + F.getNumOperandRegisters();
1270 // OpIdx belongs to this operand group.
1271 if (OpIdx > i && OpIdx < i + NumOps)
1272 OpIdxGroup = CurGroup;
1273 unsigned TiedGroup;
1274 if (!F.isUseOperandTiedToDef(TiedGroup))
1275 continue;
1276 // Operands in this group are tied to operands in TiedGroup which must be
1277 // earlier. Find the number of operands between the two groups.
1278 unsigned Delta = i - GroupIdx[TiedGroup];
1279
1280 // OpIdx is a use tied to TiedGroup.
1281 if (OpIdxGroup == CurGroup)
1282 return OpIdx - Delta;
1283
1284 // OpIdx is a def tied to this use group.
1285 if (OpIdxGroup == TiedGroup)
1286 return OpIdx + Delta;
1287 }
1288 llvm_unreachable("Invalid tied operand on inline asm");
1289}
1290
1291/// clearKillInfo - Clears kill flags on all operands.
1292///
1294 for (MachineOperand &MO : operands()) {
1295 if (MO.isReg() && MO.isUse())
1296 MO.setIsKill(false);
1297 }
1298}
1299
1301 unsigned SubIdx,
1302 const TargetRegisterInfo &RegInfo) {
1303 if (ToReg.isPhysical()) {
1304 if (SubIdx)
1305 ToReg = RegInfo.getSubReg(ToReg, SubIdx);
1306 for (MachineOperand &MO : operands()) {
1307 if (!MO.isReg() || MO.getReg() != FromReg)
1308 continue;
1309 MO.substPhysReg(ToReg, RegInfo);
1310 }
1311 } else {
1312 for (MachineOperand &MO : operands()) {
1313 if (!MO.isReg() || MO.getReg() != FromReg)
1314 continue;
1315 MO.substVirtReg(ToReg, SubIdx, RegInfo);
1316 }
1317 }
1318}
1319
1320/// isSafeToMove - Return true if it is safe to move this instruction. If
1321/// SawStore is set to true, it means that there is a store (or call) between
1322/// the instruction's location and its intended destination.
1323bool MachineInstr::isSafeToMove(bool &SawStore) const {
1324 // Ignore stuff that we obviously can't move.
1325 //
1326 // Treat volatile loads as stores. This is not strictly necessary for
1327 // volatiles, but it is required for atomic loads. It is not allowed to move
1328 // a load across an atomic load with Ordering > Monotonic.
1329 if (mayStore() || isCall() || isPHI() ||
1330 (mayLoad() && hasOrderedMemoryRef())) {
1331 SawStore = true;
1332 return false;
1333 }
1334
1335 // Don't touch instructions that have non-trivial invariants. For example,
1336 // terminators have to be at the end of a basic block.
1337 if (isPosition() || isDebugInstr() || isTerminator() ||
1339 return false;
1340
1341 // Don't touch instructions which can have non-load/store effects.
1342 //
1343 // Inline asm has a "sideeffect" marker to indicate whether the asm has
1344 // intentional side-effects. Even if an inline asm is not "sideeffect",
1345 // though, it still can't be speculatively executed: the operation might
1346 // not be valid on the current target, or for some combinations of operands.
1347 // (Some transforms that move an instruction don't speculatively execute it;
1348 // we currently don't try to handle that distinction here.)
1349 //
1350 // Other instructions handled here include those that can raise FP
1351 // exceptions, x86 "DIV" instructions which trap on divide by zero, and
1352 // stack adjustments.
1354 isInlineAsm())
1355 return false;
1356
1357 // See if this instruction does a load. If so, we have to guarantee that the
1358 // loaded value doesn't change between the load and the its intended
1359 // destination. The check for isInvariantLoad gives the target the chance to
1360 // classify the load as always returning a constant, e.g. a constant pool
1361 // load.
1363 // Otherwise, this is a real load. If there is a store between the load and
1364 // end of block, we can't move it.
1365 return !SawStore;
1366
1367 return true;
1368}
1369
1371 // Don't delete frame allocation labels.
1372 // FIXME: Why is LOCAL_ESCAPE not considered in MachineInstr::isLabel?
1373 if (getOpcode() == TargetOpcode::LOCAL_ESCAPE)
1374 return false;
1375
1376 // Don't delete FAKE_USE.
1377 // FIXME: Why is FAKE_USE not considered in MachineInstr::isPosition?
1378 if (isFakeUse())
1379 return false;
1380
1381 // LIFETIME markers should be preserved.
1382 // FIXME: Why are LIFETIME markers not considered in MachineInstr::isPosition?
1383 if (isLifetimeMarker())
1384 return false;
1385
1386 // If we can move an instruction, we can remove it. Otherwise, it has
1387 // a side-effect of some sort.
1388 bool SawStore = false;
1389 return isPHI() || isSafeToMove(SawStore);
1390}
1391
1393 LiveRegUnits *LivePhysRegs) const {
1394 // Instructions without side-effects are dead iff they only define dead regs.
1395 // This function is hot and this loop returns early in the common case,
1396 // so only perform additional checks before this if absolutely necessary.
1397 for (const MachineOperand &MO : all_defs()) {
1398 Register Reg = MO.getReg();
1399 if (Reg.isPhysical()) {
1400 // Don't delete live physreg defs, or any reserved register defs.
1401 if (!LivePhysRegs || !LivePhysRegs->available(Reg) || MRI.isReserved(Reg))
1402 return false;
1403 } else {
1404 if (MO.isDead())
1405 continue;
1406 for (const MachineInstr &Use : MRI.use_nodbg_instructions(Reg)) {
1407 if (&Use != this)
1408 // This def has a non-debug use. Don't delete the instruction!
1409 return false;
1410 }
1411 }
1412 }
1413
1414 // Technically speaking inline asm without side effects and no defs can still
1415 // be deleted. But there is so much bad inline asm code out there, we should
1416 // let them be.
1417 if (isInlineAsm())
1418 return false;
1419
1420 // FIXME: See issue #105950 for why LIFETIME markers are considered dead here.
1421 if (isLifetimeMarker())
1422 return true;
1423
1424 // If there are no defs with uses, then we call the instruction dead so long
1425 // as we do not suspect it may have sideeffects.
1426 return wouldBeTriviallyDead();
1427}
1428
1430 BatchAAResults *AA, bool UseTBAA,
1431 const MachineMemOperand *MMOa,
1432 const MachineMemOperand *MMOb) {
1433 // The following interface to AA is fashioned after DAGCombiner::isAlias and
1434 // operates with MachineMemOperand offset with some important assumptions:
1435 // - LLVM fundamentally assumes flat address spaces.
1436 // - MachineOperand offset can *only* result from legalization and cannot
1437 // affect queries other than the trivial case of overlap checking.
1438 // - These offsets never wrap and never step outside of allocated objects.
1439 // - There should never be any negative offsets here.
1440 //
1441 // FIXME: Modify API to hide this math from "user"
1442 // Even before we go to AA we can reason locally about some memory objects. It
1443 // can save compile time, and possibly catch some corner cases not currently
1444 // covered.
1445
1446 int64_t OffsetA = MMOa->getOffset();
1447 int64_t OffsetB = MMOb->getOffset();
1448 int64_t MinOffset = std::min(OffsetA, OffsetB);
1449
1450 LocationSize WidthA = MMOa->getSize();
1451 LocationSize WidthB = MMOb->getSize();
1452 bool KnownWidthA = WidthA.hasValue();
1453 bool KnownWidthB = WidthB.hasValue();
1454 bool BothMMONonScalable = !WidthA.isScalable() && !WidthB.isScalable();
1455
1456 const Value *ValA = MMOa->getValue();
1457 const Value *ValB = MMOb->getValue();
1458 bool SameVal = (ValA && ValB && (ValA == ValB));
1459 if (!SameVal) {
1460 const PseudoSourceValue *PSVa = MMOa->getPseudoValue();
1461 const PseudoSourceValue *PSVb = MMOb->getPseudoValue();
1462 if (PSVa && ValB && !PSVa->mayAlias(&MFI))
1463 return false;
1464 if (PSVb && ValA && !PSVb->mayAlias(&MFI))
1465 return false;
1466 if (PSVa && PSVb && (PSVa == PSVb))
1467 SameVal = true;
1468 }
1469
1470 if (SameVal && BothMMONonScalable) {
1471 if (!KnownWidthA || !KnownWidthB)
1472 return true;
1473 int64_t MaxOffset = std::max(OffsetA, OffsetB);
1474 int64_t LowWidth = (MinOffset == OffsetA)
1475 ? WidthA.getValue().getKnownMinValue()
1476 : WidthB.getValue().getKnownMinValue();
1477 return (MinOffset + LowWidth > MaxOffset);
1478 }
1479
1480 if (!AA)
1481 return true;
1482
1483 if (!ValA || !ValB)
1484 return true;
1485
1486 assert((OffsetA >= 0) && "Negative MachineMemOperand offset");
1487 assert((OffsetB >= 0) && "Negative MachineMemOperand offset");
1488
1489 // If Scalable Location Size has non-zero offset, Width + Offset does not work
1490 // at the moment
1491 if ((WidthA.isScalable() && OffsetA > 0) ||
1492 (WidthB.isScalable() && OffsetB > 0))
1493 return true;
1494
1495 int64_t OverlapA =
1496 KnownWidthA ? WidthA.getValue().getKnownMinValue() + OffsetA - MinOffset
1498 int64_t OverlapB =
1499 KnownWidthB ? WidthB.getValue().getKnownMinValue() + OffsetB - MinOffset
1501
1502 LocationSize LocA = (WidthA.isScalable() || !KnownWidthA)
1503 ? WidthA
1504 : LocationSize::precise(OverlapA);
1505 LocationSize LocB = (WidthB.isScalable() || !KnownWidthB)
1506 ? WidthB
1507 : LocationSize::precise(OverlapB);
1508
1509 return !AA->isNoAlias(
1510 MemoryLocation(ValA, LocA, UseTBAA ? MMOa->getAAInfo() : AAMDNodes()),
1511 MemoryLocation(ValB, LocB, UseTBAA ? MMOb->getAAInfo() : AAMDNodes()));
1512}
1513
1515 bool UseTBAA) const {
1516 const MachineFunction *MF = getMF();
1518 const MachineFrameInfo &MFI = MF->getFrameInfo();
1519
1520 // Exclude call instruction which may alter the memory but can not be handled
1521 // by this function.
1522 if (isCall() || Other.isCall())
1523 return true;
1524
1525 // If neither instruction stores to memory, they can't alias in any
1526 // meaningful way, even if they read from the same address.
1527 if (!mayStore() && !Other.mayStore())
1528 return false;
1529
1530 // Both instructions must be memory operations to be able to alias.
1531 if (!mayLoadOrStore() || !Other.mayLoadOrStore())
1532 return false;
1533
1534 // Let the target decide if memory accesses cannot possibly overlap.
1535 if (TII->areMemAccessesTriviallyDisjoint(*this, Other))
1536 return false;
1537
1538 // Memory operations without memory operands may access anything. Be
1539 // conservative and assume `MayAlias`.
1540 if (memoperands_empty() || Other.memoperands_empty())
1541 return true;
1542
1543 // Skip if there are too many memory operands.
1544 auto NumChecks = getNumMemOperands() * Other.getNumMemOperands();
1545 if (NumChecks > TII->getMemOperandAACheckLimit())
1546 return true;
1547
1548 // Check each pair of memory operands from both instructions, which can't
1549 // alias only if all pairs won't alias.
1550 for (auto *MMOa : memoperands()) {
1551 for (auto *MMOb : Other.memoperands()) {
1552 if (!MMOa->isStore() && !MMOb->isStore())
1553 continue;
1554 if (MemOperandsHaveAlias(MFI, AA, UseTBAA, MMOa, MMOb))
1555 return true;
1556 }
1557 }
1558
1559 return false;
1560}
1561
1562bool MachineInstr::mayAlias(AAResults *AA, const MachineInstr &Other,
1563 bool UseTBAA) const {
1564 if (AA) {
1565 BatchAAResults BAA(*AA);
1566 return mayAlias(&BAA, Other, UseTBAA);
1567 }
1568 return mayAlias(static_cast<BatchAAResults *>(nullptr), Other, UseTBAA);
1569}
1570
1571/// hasOrderedMemoryRef - Return true if this instruction may have an ordered
1572/// or volatile memory reference, or if the information describing the memory
1573/// reference is not available. Return false if it is known to have no ordered
1574/// memory references.
1576 // An instruction known never to access memory won't have a volatile access.
1577 if (!mayStore() &&
1578 !mayLoad() &&
1579 !isCall() &&
1581 return false;
1582
1583 // Otherwise, if the instruction has no memory reference information,
1584 // conservatively assume it wasn't preserved.
1585 if (memoperands_empty())
1586 return true;
1587
1588 // Check if any of our memory operands are ordered.
1589 return llvm::any_of(memoperands(), [](const MachineMemOperand *MMO) {
1590 return !MMO->isUnordered();
1591 });
1592}
1593
1594/// isDereferenceableInvariantLoad - Return true if this instruction will never
1595/// trap and is loading from a location whose value is invariant across a run of
1596/// this function.
1598 // If the instruction doesn't load at all, it isn't an invariant load.
1599 if (!mayLoad())
1600 return false;
1601
1602 // If the instruction has lost its memoperands, conservatively assume that
1603 // it may not be an invariant load.
1604 if (memoperands_empty())
1605 return false;
1606
1607 const MachineFrameInfo &MFI = getParent()->getParent()->getFrameInfo();
1608
1609 for (MachineMemOperand *MMO : memoperands()) {
1610 if (!MMO->isUnordered())
1611 // If the memory operand has ordering side effects, we can't move the
1612 // instruction. Such an instruction is technically an invariant load,
1613 // but the caller code would need updated to expect that.
1614 return false;
1615 if (MMO->isStore()) return false;
1616 if (MMO->isInvariant() && MMO->isDereferenceable())
1617 continue;
1618
1619 // A load from a constant PseudoSourceValue is invariant.
1620 if (const PseudoSourceValue *PSV = MMO->getPseudoValue()) {
1621 if (PSV->isConstant(&MFI))
1622 continue;
1623 }
1624
1625 // Otherwise assume conservatively.
1626 return false;
1627 }
1628
1629 // Everything checks out.
1630 return true;
1631}
1632
1634 if (!isPHI())
1635 return {};
1636 assert(getNumOperands() >= 3 &&
1637 "It's illegal to have a PHI without source operands");
1638
1639 Register Reg = getOperand(1).getReg();
1640 for (unsigned i = 3, e = getNumOperands(); i < e; i += 2)
1641 if (getOperand(i).getReg() != Reg)
1642 return {};
1643 return Reg;
1644}
1645
1648 return true;
1649 if (isInlineAsm()) {
1650 unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm();
1651 if (ExtraInfo & InlineAsm::Extra_HasSideEffects)
1652 return true;
1653 }
1654
1655 return false;
1656}
1657
1659 return mayStore() || isCall() ||
1661}
1662
1663/// allDefsAreDead - Return true if all the defs of this instruction are dead.
1664///
1666 for (const MachineOperand &MO : operands()) {
1667 if (!MO.isReg() || MO.isUse())
1668 continue;
1669 if (!MO.isDead())
1670 return false;
1671 }
1672 return true;
1673}
1674
1676 for (const MachineOperand &MO : implicit_operands()) {
1677 if (!MO.isReg() || MO.isUse())
1678 continue;
1679 if (!MO.isDead())
1680 return false;
1681 }
1682 return true;
1683}
1684
1685/// copyImplicitOps - Copy implicit register operands from specified
1686/// instruction to this instruction.
1688 const MachineInstr &MI) {
1689 for (const MachineOperand &MO :
1690 llvm::drop_begin(MI.operands(), MI.getDesc().getNumOperands()))
1691 if ((MO.isReg() && MO.isImplicit()) || MO.isRegMask())
1692 addOperand(MF, MO);
1693}
1694
1696 const MCInstrDesc &MCID = getDesc();
1697 if (MCID.Opcode == TargetOpcode::STATEPOINT)
1698 return true;
1699 for (unsigned I = 0, E = getNumOperands(); I < E; ++I) {
1700 const auto &Operand = getOperand(I);
1701 if (!Operand.isReg() || Operand.isDef())
1702 // Ignore the defined registers as MCID marks only the uses as tied.
1703 continue;
1704 int ExpectedTiedIdx = MCID.getOperandConstraint(I, MCOI::TIED_TO);
1705 int TiedIdx = Operand.isTied() ? int(findTiedOperandIdx(I)) : -1;
1706 if (ExpectedTiedIdx != TiedIdx)
1707 return true;
1708 }
1709 return false;
1710}
1711
1713 const MachineRegisterInfo &MRI) const {
1715 if (!Op.isReg())
1716 return LLT{};
1717
1719 return MRI.getType(Op.getReg());
1720
1721 auto &OpInfo = getDesc().operands()[OpIdx];
1722 if (!OpInfo.isGenericType())
1723 return MRI.getType(Op.getReg());
1724
1725 if (PrintedTypes[OpInfo.getGenericTypeIndex()])
1726 return LLT{};
1727
1728 LLT TypeToPrint = MRI.getType(Op.getReg());
1729 // Don't mark the type index printed if it wasn't actually printed: maybe
1730 // another operand with the same type index has an actual type attached:
1731 if (TypeToPrint.isValid())
1732 PrintedTypes.set(OpInfo.getGenericTypeIndex());
1733 return TypeToPrint;
1734}
1735
1736#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1738 dbgs() << " ";
1739 print(dbgs());
1740}
1741
1742LLVM_DUMP_METHOD void MachineInstr::dumprImpl(
1743 const MachineRegisterInfo &MRI, unsigned Depth, unsigned MaxDepth,
1744 SmallPtrSetImpl<const MachineInstr *> &AlreadySeenInstrs) const {
1745 if (Depth >= MaxDepth)
1746 return;
1747 if (!AlreadySeenInstrs.insert(this).second)
1748 return;
1749 // PadToColumn always inserts at least one space.
1750 // Don't mess up the alignment if we don't want any space.
1751 if (Depth)
1752 fdbgs().PadToColumn(Depth * 2);
1753 print(fdbgs());
1754 for (const MachineOperand &MO : operands()) {
1755 if (!MO.isReg() || MO.isDef())
1756 continue;
1757 Register Reg = MO.getReg();
1758 if (Reg.isPhysical())
1759 continue;
1760 const MachineInstr *NewMI = MRI.getUniqueVRegDef(Reg);
1761 if (NewMI == nullptr)
1762 continue;
1763 NewMI->dumprImpl(MRI, Depth + 1, MaxDepth, AlreadySeenInstrs);
1764 }
1765}
1766
1768 unsigned MaxDepth) const {
1769 SmallPtrSet<const MachineInstr *, 16> AlreadySeenInstrs;
1770 dumprImpl(MRI, 0, MaxDepth, AlreadySeenInstrs);
1771}
1772#endif
1773
1774void MachineInstr::print(raw_ostream &OS, bool IsStandalone, bool SkipOpers,
1775 bool SkipDebugLoc, bool AddNewLine,
1776 const TargetInstrInfo *TII) const {
1777 const Module *M = nullptr;
1778 const Function *F = nullptr;
1779 if (const MachineFunction *MF = getMFIfAvailable(*this)) {
1780 F = &MF->getFunction();
1781 M = F->getParent();
1782 if (!TII)
1783 TII = MF->getSubtarget().getInstrInfo();
1784 }
1785
1786 ModuleSlotTracker MST(M);
1787 if (F)
1788 MST.incorporateFunction(*F);
1789 print(OS, MST, IsStandalone, SkipOpers, SkipDebugLoc, AddNewLine, TII);
1790}
1791
1793 bool IsStandalone, bool SkipOpers, bool SkipDebugLoc,
1794 bool AddNewLine, const TargetInstrInfo *TII) const {
1795 // We can be a bit tidier if we know the MachineFunction.
1796 const TargetRegisterInfo *TRI = nullptr;
1797 const MachineRegisterInfo *MRI = nullptr;
1798 tryToGetTargetInfo(*this, TRI, MRI, TII);
1799
1800 if (isCFIInstruction())
1801 assert(getNumOperands() == 1 && "Expected 1 operand in CFI instruction");
1802
1803 SmallBitVector PrintedTypes(8);
1804 bool ShouldPrintRegisterTies = IsStandalone || hasComplexRegisterTies();
1805 auto getTiedOperandIdx = [&](unsigned OpIdx) {
1806 if (!ShouldPrintRegisterTies)
1807 return 0U;
1808 const MachineOperand &MO = getOperand(OpIdx);
1809 if (MO.isReg() && MO.isTied() && !MO.isDef())
1810 return findTiedOperandIdx(OpIdx);
1811 return 0U;
1812 };
1813 unsigned StartOp = 0;
1814 unsigned e = getNumOperands();
1815
1816 // Print explicitly defined operands on the left of an assignment syntax.
1817 while (StartOp < e) {
1818 const MachineOperand &MO = getOperand(StartOp);
1819 if (!MO.isReg() || !MO.isDef() || MO.isImplicit())
1820 break;
1821
1822 if (StartOp != 0)
1823 OS << ", ";
1824
1825 LLT TypeToPrint = MRI ? getTypeToPrint(StartOp, PrintedTypes, *MRI) : LLT{};
1826 unsigned TiedOperandIdx = getTiedOperandIdx(StartOp);
1827 MO.print(OS, MST, TypeToPrint, StartOp, /*PrintDef=*/false, IsStandalone,
1828 ShouldPrintRegisterTies, TiedOperandIdx, TRI);
1829 ++StartOp;
1830 }
1831
1832 if (StartOp != 0)
1833 OS << " = ";
1834
1836 OS << "frame-setup ";
1838 OS << "frame-destroy ";
1840 OS << "nnan ";
1842 OS << "ninf ";
1844 OS << "nsz ";
1846 OS << "arcp ";
1848 OS << "contract ";
1850 OS << "afn ";
1852 OS << "reassoc ";
1854 OS << "nuw ";
1856 OS << "nsw ";
1858 OS << "exact ";
1860 OS << "nofpexcept ";
1862 OS << "nomerge ";
1864 OS << "nneg ";
1866 OS << "disjoint ";
1868 OS << "nusw ";
1870 OS << "samesign ";
1872 OS << "inbounds ";
1873
1874 // Print the opcode name.
1875 if (TII)
1876 OS << TII->getName(getOpcode());
1877 else
1878 OS << "UNKNOWN";
1879
1880 if (SkipOpers)
1881 return;
1882
1883 // Print the rest of the operands.
1884 bool FirstOp = true;
1885 unsigned AsmDescOp = ~0u;
1886 unsigned AsmOpCount = 0;
1887
1889 // Print asm string.
1890 OS << " ";
1891 const unsigned OpIdx = InlineAsm::MIOp_AsmString;
1892 LLT TypeToPrint = MRI ? getTypeToPrint(OpIdx, PrintedTypes, *MRI) : LLT{};
1893 unsigned TiedOperandIdx = getTiedOperandIdx(OpIdx);
1894 getOperand(OpIdx).print(OS, MST, TypeToPrint, OpIdx, /*PrintDef=*/true,
1895 IsStandalone, ShouldPrintRegisterTies,
1896 TiedOperandIdx, TRI);
1897
1898 // Print HasSideEffects, MayLoad, MayStore, IsAlignStack
1899 unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm();
1900 if (ExtraInfo & InlineAsm::Extra_HasSideEffects)
1901 OS << " [sideeffect]";
1902 if (ExtraInfo & InlineAsm::Extra_MayLoad)
1903 OS << " [mayload]";
1904 if (ExtraInfo & InlineAsm::Extra_MayStore)
1905 OS << " [maystore]";
1906 if (ExtraInfo & InlineAsm::Extra_IsConvergent)
1907 OS << " [isconvergent]";
1908 if (ExtraInfo & InlineAsm::Extra_IsAlignStack)
1909 OS << " [alignstack]";
1911 OS << " [attdialect]";
1913 OS << " [inteldialect]";
1914
1915 StartOp = AsmDescOp = InlineAsm::MIOp_FirstOperand;
1916 FirstOp = false;
1917 }
1918
1919 for (unsigned i = StartOp, e = getNumOperands(); i != e; ++i) {
1920 const MachineOperand &MO = getOperand(i);
1921
1922 if (FirstOp) FirstOp = false; else OS << ",";
1923 OS << " ";
1924
1925 if (isDebugValueLike() && MO.isMetadata()) {
1926 // Pretty print DBG_VALUE* instructions.
1927 auto *DIV = dyn_cast<DILocalVariable>(MO.getMetadata());
1928 if (DIV && !DIV->getName().empty())
1929 OS << "!\"" << DIV->getName() << '\"';
1930 else {
1931 LLT TypeToPrint = MRI ? getTypeToPrint(i, PrintedTypes, *MRI) : LLT{};
1932 unsigned TiedOperandIdx = getTiedOperandIdx(i);
1933 MO.print(OS, MST, TypeToPrint, i, /*PrintDef=*/true, IsStandalone,
1934 ShouldPrintRegisterTies, TiedOperandIdx, TRI);
1935 }
1936 } else if (isDebugLabel() && MO.isMetadata()) {
1937 // Pretty print DBG_LABEL instructions.
1938 auto *DIL = dyn_cast<DILabel>(MO.getMetadata());
1939 if (DIL && !DIL->getName().empty())
1940 OS << "\"" << DIL->getName() << '\"';
1941 else {
1942 LLT TypeToPrint = MRI ? getTypeToPrint(i, PrintedTypes, *MRI) : LLT{};
1943 unsigned TiedOperandIdx = getTiedOperandIdx(i);
1944 MO.print(OS, MST, TypeToPrint, i, /*PrintDef=*/true, IsStandalone,
1945 ShouldPrintRegisterTies, TiedOperandIdx, TRI);
1946 }
1947 } else if (i == AsmDescOp && MO.isImm()) {
1948 // Pretty print the inline asm operand descriptor.
1949 OS << '$' << AsmOpCount++;
1950 unsigned Flag = MO.getImm();
1951 const InlineAsm::Flag F(Flag);
1952 OS << ":[";
1953 OS << F.getKindName();
1954
1955 unsigned RCID;
1956 if (!F.isImmKind() && !F.isMemKind() && F.hasRegClassConstraint(RCID)) {
1957 if (TRI) {
1958 OS << ':' << TRI->getRegClassName(TRI->getRegClass(RCID));
1959 } else
1960 OS << ":RC" << RCID;
1961 }
1962
1963 if (F.isMemKind()) {
1964 const InlineAsm::ConstraintCode MCID = F.getMemoryConstraintID();
1965 OS << ":" << InlineAsm::getMemConstraintName(MCID);
1966 }
1967
1968 unsigned TiedTo;
1969 if (F.isUseOperandTiedToDef(TiedTo))
1970 OS << " tiedto:$" << TiedTo;
1971
1972 if ((F.isRegDefKind() || F.isRegDefEarlyClobberKind() ||
1973 F.isRegUseKind()) &&
1974 F.getRegMayBeFolded()) {
1975 OS << " foldable";
1976 }
1977
1978 OS << ']';
1979
1980 // Compute the index of the next operand descriptor.
1981 AsmDescOp += 1 + F.getNumOperandRegisters();
1982 } else {
1983 LLT TypeToPrint = MRI ? getTypeToPrint(i, PrintedTypes, *MRI) : LLT{};
1984 unsigned TiedOperandIdx = getTiedOperandIdx(i);
1985 if (MO.isImm() && isOperandSubregIdx(i))
1987 else
1988 MO.print(OS, MST, TypeToPrint, i, /*PrintDef=*/true, IsStandalone,
1989 ShouldPrintRegisterTies, TiedOperandIdx, TRI);
1990 }
1991 }
1992
1993 // Print any optional symbols attached to this instruction as-if they were
1994 // operands.
1995 if (MCSymbol *PreInstrSymbol = getPreInstrSymbol()) {
1996 if (!FirstOp) {
1997 FirstOp = false;
1998 OS << ',';
1999 }
2000 OS << " pre-instr-symbol ";
2001 MachineOperand::printSymbol(OS, *PreInstrSymbol);
2002 }
2003 if (MCSymbol *PostInstrSymbol = getPostInstrSymbol()) {
2004 if (!FirstOp) {
2005 FirstOp = false;
2006 OS << ',';
2007 }
2008 OS << " post-instr-symbol ";
2009 MachineOperand::printSymbol(OS, *PostInstrSymbol);
2010 }
2011 if (MDNode *HeapAllocMarker = getHeapAllocMarker()) {
2012 if (!FirstOp) {
2013 FirstOp = false;
2014 OS << ',';
2015 }
2016 OS << " heap-alloc-marker ";
2017 HeapAllocMarker->printAsOperand(OS, MST);
2018 }
2019 if (MDNode *PCSections = getPCSections()) {
2020 if (!FirstOp) {
2021 FirstOp = false;
2022 OS << ',';
2023 }
2024 OS << " pcsections ";
2025 PCSections->printAsOperand(OS, MST);
2026 }
2027 if (MDNode *MMRA = getMMRAMetadata()) {
2028 if (!FirstOp) {
2029 FirstOp = false;
2030 OS << ',';
2031 }
2032 OS << " mmra ";
2033 MMRA->printAsOperand(OS, MST);
2034 }
2035 if (uint32_t CFIType = getCFIType()) {
2036 if (!FirstOp)
2037 OS << ',';
2038 OS << " cfi-type " << CFIType;
2039 }
2040
2041 if (DebugInstrNum) {
2042 if (!FirstOp)
2043 OS << ",";
2044 OS << " debug-instr-number " << DebugInstrNum;
2045 }
2046
2047 if (!SkipDebugLoc) {
2048 if (const DebugLoc &DL = getDebugLoc()) {
2049 if (!FirstOp)
2050 OS << ',';
2051 OS << " debug-location ";
2052 DL->printAsOperand(OS, MST);
2053 }
2054 }
2055
2056 if (!memoperands_empty()) {
2058 const LLVMContext *Context = nullptr;
2059 std::unique_ptr<LLVMContext> CtxPtr;
2060 const MachineFrameInfo *MFI = nullptr;
2061 if (const MachineFunction *MF = getMFIfAvailable(*this)) {
2062 MFI = &MF->getFrameInfo();
2063 Context = &MF->getFunction().getContext();
2064 } else {
2065 CtxPtr = std::make_unique<LLVMContext>();
2066 Context = CtxPtr.get();
2067 }
2068
2069 OS << " :: ";
2070 bool NeedComma = false;
2071 for (const MachineMemOperand *Op : memoperands()) {
2072 if (NeedComma)
2073 OS << ", ";
2074 Op->print(OS, MST, SSNs, *Context, MFI, TII);
2075 NeedComma = true;
2076 }
2077 }
2078
2079 if (SkipDebugLoc)
2080 return;
2081
2082 bool HaveSemi = false;
2083
2084 // Print debug location information.
2085 if (const DebugLoc &DL = getDebugLoc()) {
2086 if (!HaveSemi) {
2087 OS << ';';
2088 HaveSemi = true;
2089 }
2090 OS << ' ';
2091 DL.print(OS);
2092 }
2093
2094 // Print extra comments for DEBUG_VALUE and friends if they are well-formed.
2095 if ((isNonListDebugValue() && getNumOperands() >= 4) ||
2096 (isDebugValueList() && getNumOperands() >= 2) ||
2097 (isDebugRef() && getNumOperands() >= 3)) {
2098 if (getDebugVariableOp().isMetadata()) {
2099 if (!HaveSemi) {
2100 OS << ";";
2101 HaveSemi = true;
2102 }
2103 auto *DV = getDebugVariable();
2104 OS << " line no:" << DV->getLine();
2106 OS << " indirect";
2107 }
2108 }
2109 // TODO: DBG_LABEL
2110
2111 if (PrintMIAddrs)
2112 OS << " ; " << this;
2113
2114 if (AddNewLine)
2115 OS << '\n';
2116}
2117
2119 const TargetRegisterInfo *RegInfo,
2120 bool AddIfNotFound) {
2121 bool isPhysReg = IncomingReg.isPhysical();
2122 bool hasAliases = isPhysReg &&
2123 MCRegAliasIterator(IncomingReg, RegInfo, false).isValid();
2124 bool Found = false;
2126 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
2127 MachineOperand &MO = getOperand(i);
2128 if (!MO.isReg() || !MO.isUse() || MO.isUndef())
2129 continue;
2130
2131 // DEBUG_VALUE nodes do not contribute to code generation and should
2132 // always be ignored. Failure to do so may result in trying to modify
2133 // KILL flags on DEBUG_VALUE nodes.
2134 if (MO.isDebug())
2135 continue;
2136
2137 Register Reg = MO.getReg();
2138 if (!Reg)
2139 continue;
2140
2141 if (Reg == IncomingReg) {
2142 if (!Found) {
2143 if (MO.isKill())
2144 // The register is already marked kill.
2145 return true;
2146 if (isPhysReg && isRegTiedToDefOperand(i))
2147 // Two-address uses of physregs must not be marked kill.
2148 return true;
2149 MO.setIsKill();
2150 Found = true;
2151 }
2152 } else if (hasAliases && MO.isKill() && Reg.isPhysical()) {
2153 // A super-register kill already exists.
2154 if (RegInfo->isSuperRegister(IncomingReg, Reg))
2155 return true;
2156 if (RegInfo->isSubRegister(IncomingReg, Reg))
2157 DeadOps.push_back(i);
2158 }
2159 }
2160
2161 // Trim unneeded kill operands.
2162 while (!DeadOps.empty()) {
2163 unsigned OpIdx = DeadOps.back();
2164 if (getOperand(OpIdx).isImplicit() &&
2167 else
2168 getOperand(OpIdx).setIsKill(false);
2169 DeadOps.pop_back();
2170 }
2171
2172 // If not found, this means an alias of one of the operands is killed. Add a
2173 // new implicit operand if required.
2174 if (!Found && AddIfNotFound) {
2176 false /*IsDef*/,
2177 true /*IsImp*/,
2178 true /*IsKill*/));
2179 return true;
2180 }
2181 return Found;
2182}
2183
2185 const TargetRegisterInfo *RegInfo) {
2186 if (!Reg.isPhysical())
2187 RegInfo = nullptr;
2188 for (MachineOperand &MO : operands()) {
2189 if (!MO.isReg() || !MO.isUse() || !MO.isKill())
2190 continue;
2191 Register OpReg = MO.getReg();
2192 if ((RegInfo && RegInfo->regsOverlap(Reg, OpReg)) || Reg == OpReg)
2193 MO.setIsKill(false);
2194 }
2195}
2196
2198 const TargetRegisterInfo *RegInfo,
2199 bool AddIfNotFound) {
2200 bool isPhysReg = Reg.isPhysical();
2201 bool hasAliases = isPhysReg &&
2202 MCRegAliasIterator(Reg, RegInfo, false).isValid();
2203 bool Found = false;
2205 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
2206 MachineOperand &MO = getOperand(i);
2207 if (!MO.isReg() || !MO.isDef())
2208 continue;
2209 Register MOReg = MO.getReg();
2210 if (!MOReg)
2211 continue;
2212
2213 if (MOReg == Reg) {
2214 MO.setIsDead();
2215 Found = true;
2216 } else if (hasAliases && MO.isDead() && MOReg.isPhysical()) {
2217 // There exists a super-register that's marked dead.
2218 if (RegInfo->isSuperRegister(Reg, MOReg))
2219 return true;
2220 if (RegInfo->isSubRegister(Reg, MOReg))
2221 DeadOps.push_back(i);
2222 }
2223 }
2224
2225 // Trim unneeded dead operands.
2226 while (!DeadOps.empty()) {
2227 unsigned OpIdx = DeadOps.back();
2228 if (getOperand(OpIdx).isImplicit() &&
2231 else
2232 getOperand(OpIdx).setIsDead(false);
2233 DeadOps.pop_back();
2234 }
2235
2236 // If not found, this means an alias of one of the operands is dead. Add a
2237 // new implicit operand if required.
2238 if (Found || !AddIfNotFound)
2239 return Found;
2240
2242 true /*IsDef*/,
2243 true /*IsImp*/,
2244 false /*IsKill*/,
2245 true /*IsDead*/));
2246 return true;
2247}
2248
2250 for (MachineOperand &MO : all_defs())
2251 if (MO.getReg() == Reg)
2252 MO.setIsDead(false);
2253}
2254
2256 for (MachineOperand &MO : all_defs())
2257 if (MO.getReg() == Reg && MO.getSubReg() != 0)
2258 MO.setIsUndef(IsUndef);
2259}
2260
2262 const TargetRegisterInfo *RegInfo) {
2263 if (Reg.isPhysical()) {
2264 MachineOperand *MO = findRegisterDefOperand(Reg, RegInfo, false, false);
2265 if (MO)
2266 return;
2267 } else {
2268 for (const MachineOperand &MO : all_defs()) {
2269 if (MO.getReg() == Reg && MO.getSubReg() == 0)
2270 return;
2271 }
2272 }
2274 true /*IsDef*/,
2275 true /*IsImp*/));
2276}
2277
2279 const TargetRegisterInfo &TRI) {
2280 bool HasRegMask = false;
2281 for (MachineOperand &MO : operands()) {
2282 if (MO.isRegMask()) {
2283 HasRegMask = true;
2284 continue;
2285 }
2286 if (!MO.isReg() || !MO.isDef()) continue;
2287 Register Reg = MO.getReg();
2288 if (!Reg.isPhysical())
2289 continue;
2290 // If there are no uses, including partial uses, the def is dead.
2291 if (llvm::none_of(UsedRegs,
2292 [&](MCRegister Use) { return TRI.regsOverlap(Use, Reg); }))
2293 MO.setIsDead();
2294 }
2295
2296 // This is a call with a register mask operand.
2297 // Mask clobbers are always dead, so add defs for the non-dead defines.
2298 if (HasRegMask)
2299 for (const Register &UsedReg : UsedRegs)
2300 addRegisterDefined(UsedReg, &TRI);
2301}
2302
2303unsigned
2305 // Build up a buffer of hash code components.
2306 SmallVector<size_t, 16> HashComponents;
2307 HashComponents.reserve(MI->getNumOperands() + 1);
2308 HashComponents.push_back(MI->getOpcode());
2309 for (const MachineOperand &MO : MI->operands()) {
2310 if (MO.isReg() && MO.isDef() && MO.getReg().isVirtual())
2311 continue; // Skip virtual register defs.
2312
2313 HashComponents.push_back(hash_value(MO));
2314 }
2315 return hash_combine_range(HashComponents);
2316}
2317
2319 // Find the source location cookie.
2320 const MDNode *LocMD = nullptr;
2321 for (unsigned i = getNumOperands(); i != 0; --i) {
2322 if (getOperand(i-1).isMetadata() &&
2323 (LocMD = getOperand(i-1).getMetadata()) &&
2324 LocMD->getNumOperands() != 0) {
2326 return LocMD;
2327 }
2328 }
2329
2330 return nullptr;
2331}
2332
2335 const MDNode *LocMD = getLocCookieMD();
2336 uint64_t LocCookie =
2337 LocMD
2338 ? mdconst::extract<ConstantInt>(LocMD->getOperand(0))->getZExtValue()
2339 : 0;
2341 Ctx.diagnose(DiagnosticInfoInlineAsm(LocCookie, Msg));
2342}
2343
2345 const Function &Fn = getMF()->getFunction();
2346 Fn.getContext().diagnose(
2348}
2349
2351 const MCInstrDesc &MCID, bool IsIndirect,
2352 Register Reg, const MDNode *Variable,
2353 const MDNode *Expr) {
2354 assert(isa<DILocalVariable>(Variable) && "not a variable");
2355 assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
2356 assert(cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(DL) &&
2357 "Expected inlined-at fields to agree");
2358 auto MIB = BuildMI(MF, DL, MCID).addReg(Reg);
2359 if (IsIndirect)
2360 MIB.addImm(0U);
2361 else
2362 MIB.addReg(0U);
2363 return MIB.addMetadata(Variable).addMetadata(Expr);
2364}
2365
2367 const MCInstrDesc &MCID, bool IsIndirect,
2368 ArrayRef<MachineOperand> DebugOps,
2369 const MDNode *Variable, const MDNode *Expr) {
2370 assert(isa<DILocalVariable>(Variable) && "not a variable");
2371 assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
2372 assert(cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(DL) &&
2373 "Expected inlined-at fields to agree");
2374 if (MCID.Opcode == TargetOpcode::DBG_VALUE) {
2375 assert(DebugOps.size() == 1 &&
2376 "DBG_VALUE must contain exactly one debug operand");
2377 MachineOperand DebugOp = DebugOps[0];
2378 if (DebugOp.isReg())
2379 return BuildMI(MF, DL, MCID, IsIndirect, DebugOp.getReg(), Variable,
2380 Expr);
2381
2382 auto MIB = BuildMI(MF, DL, MCID).add(DebugOp);
2383 if (IsIndirect)
2384 MIB.addImm(0U);
2385 else
2386 MIB.addReg(0U);
2387 return MIB.addMetadata(Variable).addMetadata(Expr);
2388 }
2389
2390 auto MIB = BuildMI(MF, DL, MCID);
2391 MIB.addMetadata(Variable).addMetadata(Expr);
2392 for (const MachineOperand &DebugOp : DebugOps)
2393 if (DebugOp.isReg())
2394 MIB.addReg(DebugOp.getReg());
2395 else
2396 MIB.add(DebugOp);
2397 return MIB;
2398}
2399
2402 const DebugLoc &DL, const MCInstrDesc &MCID,
2403 bool IsIndirect, Register Reg,
2404 const MDNode *Variable, const MDNode *Expr) {
2405 MachineFunction &MF = *BB.getParent();
2406 MachineInstr *MI = BuildMI(MF, DL, MCID, IsIndirect, Reg, Variable, Expr);
2407 BB.insert(I, MI);
2408 return MachineInstrBuilder(MF, MI);
2409}
2410
2413 const DebugLoc &DL, const MCInstrDesc &MCID,
2414 bool IsIndirect,
2415 ArrayRef<MachineOperand> DebugOps,
2416 const MDNode *Variable, const MDNode *Expr) {
2417 MachineFunction &MF = *BB.getParent();
2418 MachineInstr *MI =
2419 BuildMI(MF, DL, MCID, IsIndirect, DebugOps, Variable, Expr);
2420 BB.insert(I, MI);
2421 return MachineInstrBuilder(MF, *MI);
2422}
2423
2424/// Compute the new DIExpression to use with a DBG_VALUE for a spill slot.
2425/// This prepends DW_OP_deref when spilling an indirect DBG_VALUE.
2427 const MachineInstr &MI,
2428 const SmallVectorImpl<const MachineOperand *> &SpilledOperands) {
2429 assert(MI.getDebugVariable()->isValidLocationForIntrinsic(MI.getDebugLoc()) &&
2430 "Expected inlined-at fields to agree");
2431
2432 const DIExpression *Expr = MI.getDebugExpression();
2433 if (MI.isIndirectDebugValue()) {
2434 assert(MI.getDebugOffset().getImm() == 0 &&
2435 "DBG_VALUE with nonzero offset");
2437 } else if (MI.isDebugValueList()) {
2438 // We will replace the spilled register with a frame index, so
2439 // immediately deref all references to the spilled register.
2440 std::array<uint64_t, 1> Ops{{dwarf::DW_OP_deref}};
2441 for (const MachineOperand *Op : SpilledOperands) {
2442 unsigned OpIdx = MI.getDebugOperandIndex(Op);
2443 Expr = DIExpression::appendOpsToArg(Expr, Ops, OpIdx);
2444 }
2445 }
2446 return Expr;
2447}
2449 Register SpillReg) {
2450 assert(MI.hasDebugOperandForReg(SpillReg) && "Spill Reg is not used in MI.");
2452 llvm::make_pointer_range(MI.getDebugOperandsForReg(SpillReg)));
2453 return computeExprForSpill(MI, SpillOperands);
2454}
2455
2458 const MachineInstr &Orig,
2459 int FrameIndex, Register SpillReg) {
2460 assert(!Orig.isDebugRef() &&
2461 "DBG_INSTR_REF should not reference a virtual register.");
2462 const DIExpression *Expr = computeExprForSpill(Orig, SpillReg);
2463 MachineInstrBuilder NewMI =
2464 BuildMI(BB, I, Orig.getDebugLoc(), Orig.getDesc());
2465 // Non-Variadic Operands: Location, Offset, Variable, Expression
2466 // Variadic Operands: Variable, Expression, Locations...
2467 if (Orig.isNonListDebugValue())
2468 NewMI.addFrameIndex(FrameIndex).addImm(0U);
2469 NewMI.addMetadata(Orig.getDebugVariable()).addMetadata(Expr);
2470 if (Orig.isDebugValueList()) {
2471 for (const MachineOperand &Op : Orig.debug_operands())
2472 if (Op.isReg() && Op.getReg() == SpillReg)
2473 NewMI.addFrameIndex(FrameIndex);
2474 else
2475 NewMI.add(MachineOperand(Op));
2476 }
2477 return NewMI;
2478}
2481 const MachineInstr &Orig, int FrameIndex,
2482 const SmallVectorImpl<const MachineOperand *> &SpilledOperands) {
2483 const DIExpression *Expr = computeExprForSpill(Orig, SpilledOperands);
2484 MachineInstrBuilder NewMI =
2485 BuildMI(BB, I, Orig.getDebugLoc(), Orig.getDesc());
2486 // Non-Variadic Operands: Location, Offset, Variable, Expression
2487 // Variadic Operands: Variable, Expression, Locations...
2488 if (Orig.isNonListDebugValue())
2489 NewMI.addFrameIndex(FrameIndex).addImm(0U);
2490 NewMI.addMetadata(Orig.getDebugVariable()).addMetadata(Expr);
2491 if (Orig.isDebugValueList()) {
2492 for (const MachineOperand &Op : Orig.debug_operands())
2493 if (is_contained(SpilledOperands, &Op))
2494 NewMI.addFrameIndex(FrameIndex);
2495 else
2496 NewMI.add(MachineOperand(Op));
2497 }
2498 return NewMI;
2499}
2500
2502 Register Reg) {
2503 const DIExpression *Expr = computeExprForSpill(Orig, Reg);
2504 if (Orig.isNonListDebugValue())
2506 for (MachineOperand &Op : Orig.getDebugOperandsForReg(Reg))
2507 Op.ChangeToFrameIndex(FrameIndex);
2508 Orig.getDebugExpressionOp().setMetadata(Expr);
2509}
2510
2513 MachineInstr &MI = *this;
2514 if (!MI.getOperand(0).isReg())
2515 return;
2516
2518 for (MachineBasicBlock::iterator DE = MI.getParent()->end();
2519 DI != DE; ++DI) {
2520 if (!DI->isDebugValue())
2521 return;
2522 if (DI->hasDebugOperandForReg(MI.getOperand(0).getReg()))
2523 DbgValues.push_back(&*DI);
2524 }
2525}
2526
2528 // Collect matching debug values.
2530
2531 if (!getOperand(0).isReg())
2532 return;
2533
2534 Register DefReg = getOperand(0).getReg();
2535 auto *MRI = getRegInfo();
2536 for (auto &MO : MRI->use_operands(DefReg)) {
2537 auto *DI = MO.getParent();
2538 if (!DI->isDebugValue())
2539 continue;
2540 if (DI->hasDebugOperandForReg(DefReg)) {
2541 DbgValues.push_back(DI);
2542 }
2543 }
2544
2545 // Propagate Reg to debug value instructions.
2546 for (auto *DBI : DbgValues)
2547 for (MachineOperand &Op : DBI->getDebugOperandsForReg(DefReg))
2548 Op.setReg(Reg);
2549}
2550
2552
2554 const MachineFrameInfo &MFI) {
2555 std::optional<TypeSize> Size;
2556 for (const auto *A : Accesses) {
2557 if (MFI.isSpillSlotObjectIndex(
2558 cast<FixedStackPseudoSourceValue>(A->getPseudoValue())
2559 ->getFrameIndex())) {
2560 LocationSize S = A->getSize();
2561 if (!S.hasValue())
2563 if (!Size)
2564 Size = S.getValue();
2565 else
2566 Size = *Size + S.getValue();
2567 }
2568 }
2569 if (!Size)
2570 return LocationSize::precise(0);
2571 return LocationSize::precise(*Size);
2572}
2573
2574std::optional<LocationSize>
2576 int FI;
2577 if (TII->isStoreToStackSlotPostFE(*this, FI)) {
2578 const MachineFrameInfo &MFI = getMF()->getFrameInfo();
2579 if (MFI.isSpillSlotObjectIndex(FI))
2580 return (*memoperands_begin())->getSize();
2581 }
2582 return std::nullopt;
2583}
2584
2585std::optional<LocationSize>
2588 if (TII->hasStoreToStackSlot(*this, Accesses))
2589 return getSpillSlotSize(Accesses, getMF()->getFrameInfo());
2590 return std::nullopt;
2591}
2592
2593std::optional<LocationSize>
2595 int FI;
2596 if (TII->isLoadFromStackSlotPostFE(*this, FI)) {
2597 const MachineFrameInfo &MFI = getMF()->getFrameInfo();
2598 if (MFI.isSpillSlotObjectIndex(FI))
2599 return (*memoperands_begin())->getSize();
2600 }
2601 return std::nullopt;
2602}
2603
2604std::optional<LocationSize>
2607 if (TII->hasLoadFromStackSlot(*this, Accesses))
2608 return getSpillSlotSize(Accesses, getMF()->getFrameInfo());
2609 return std::nullopt;
2610}
2611
2613 if (DebugInstrNum == 0)
2614 DebugInstrNum = getParent()->getParent()->getNewDebugInstrNum();
2615 return DebugInstrNum;
2616}
2617
2619 if (DebugInstrNum == 0)
2620 DebugInstrNum = MF.getNewDebugInstrNum();
2621 return DebugInstrNum;
2622}
2623
2624std::tuple<LLT, LLT> MachineInstr::getFirst2LLTs() const {
2625 return std::tuple(getRegInfo()->getType(getOperand(0).getReg()),
2626 getRegInfo()->getType(getOperand(1).getReg()));
2627}
2628
2629std::tuple<LLT, LLT, LLT> MachineInstr::getFirst3LLTs() const {
2630 return std::tuple(getRegInfo()->getType(getOperand(0).getReg()),
2631 getRegInfo()->getType(getOperand(1).getReg()),
2632 getRegInfo()->getType(getOperand(2).getReg()));
2633}
2634
2635std::tuple<LLT, LLT, LLT, LLT> MachineInstr::getFirst4LLTs() const {
2636 return std::tuple(getRegInfo()->getType(getOperand(0).getReg()),
2637 getRegInfo()->getType(getOperand(1).getReg()),
2638 getRegInfo()->getType(getOperand(2).getReg()),
2639 getRegInfo()->getType(getOperand(3).getReg()));
2640}
2641
2642std::tuple<LLT, LLT, LLT, LLT, LLT> MachineInstr::getFirst5LLTs() const {
2643 return std::tuple(getRegInfo()->getType(getOperand(0).getReg()),
2644 getRegInfo()->getType(getOperand(1).getReg()),
2645 getRegInfo()->getType(getOperand(2).getReg()),
2646 getRegInfo()->getType(getOperand(3).getReg()),
2647 getRegInfo()->getType(getOperand(4).getReg()));
2648}
2649
2650std::tuple<Register, LLT, Register, LLT>
2652 Register Reg0 = getOperand(0).getReg();
2653 Register Reg1 = getOperand(1).getReg();
2654 return std::tuple(Reg0, getRegInfo()->getType(Reg0), Reg1,
2655 getRegInfo()->getType(Reg1));
2656}
2657
2658std::tuple<Register, LLT, Register, LLT, Register, LLT>
2660 Register Reg0 = getOperand(0).getReg();
2661 Register Reg1 = getOperand(1).getReg();
2662 Register Reg2 = getOperand(2).getReg();
2663 return std::tuple(Reg0, getRegInfo()->getType(Reg0), Reg1,
2664 getRegInfo()->getType(Reg1), Reg2,
2665 getRegInfo()->getType(Reg2));
2666}
2667
2668std::tuple<Register, LLT, Register, LLT, Register, LLT, Register, LLT>
2670 Register Reg0 = getOperand(0).getReg();
2671 Register Reg1 = getOperand(1).getReg();
2672 Register Reg2 = getOperand(2).getReg();
2673 Register Reg3 = getOperand(3).getReg();
2674 return std::tuple(
2675 Reg0, getRegInfo()->getType(Reg0), Reg1, getRegInfo()->getType(Reg1),
2676 Reg2, getRegInfo()->getType(Reg2), Reg3, getRegInfo()->getType(Reg3));
2677}
2678
2680 LLT>
2682 Register Reg0 = getOperand(0).getReg();
2683 Register Reg1 = getOperand(1).getReg();
2684 Register Reg2 = getOperand(2).getReg();
2685 Register Reg3 = getOperand(3).getReg();
2686 Register Reg4 = getOperand(4).getReg();
2687 return std::tuple(
2688 Reg0, getRegInfo()->getType(Reg0), Reg1, getRegInfo()->getType(Reg1),
2689 Reg2, getRegInfo()->getType(Reg2), Reg3, getRegInfo()->getType(Reg3),
2690 Reg4, getRegInfo()->getType(Reg4));
2691}
2692
2695 assert(InsertBefore != nullptr && "invalid iterator");
2696 assert(InsertBefore->getParent() == this &&
2697 "iterator points to operand of other inst");
2698 if (Ops.empty())
2699 return;
2700
2701 // Do one pass to untie operands.
2703 for (const MachineOperand &MO : operands()) {
2704 if (MO.isReg() && MO.isTied()) {
2705 unsigned OpNo = getOperandNo(&MO);
2706 unsigned TiedTo = findTiedOperandIdx(OpNo);
2707 TiedOpIndices[OpNo] = TiedTo;
2708 untieRegOperand(OpNo);
2709 }
2710 }
2711
2712 unsigned OpIdx = getOperandNo(InsertBefore);
2713 unsigned NumOperands = getNumOperands();
2714 unsigned OpsToMove = NumOperands - OpIdx;
2715
2717 MovingOps.reserve(OpsToMove);
2718
2719 for (unsigned I = 0; I < OpsToMove; ++I) {
2720 MovingOps.emplace_back(getOperand(OpIdx));
2722 }
2723 for (const MachineOperand &MO : Ops)
2724 addOperand(MO);
2725 for (const MachineOperand &OpMoved : MovingOps)
2726 addOperand(OpMoved);
2727
2728 // Re-tie operands.
2729 for (auto [Tie1, Tie2] : TiedOpIndices) {
2730 if (Tie1 >= OpIdx)
2731 Tie1 += Ops.size();
2732 if (Tie2 >= OpIdx)
2733 Tie2 += Ops.size();
2734 tieOperands(Tie1, Tie2);
2735 }
2736}
2737
2738bool MachineInstr::mayFoldInlineAsmRegOp(unsigned OpId) const {
2739 assert(OpId && "expected non-zero operand id");
2740 assert(isInlineAsm() && "should only be used on inline asm");
2741
2742 if (!getOperand(OpId).isReg())
2743 return false;
2744
2745 const MachineOperand &MD = getOperand(OpId - 1);
2746 if (!MD.isImm())
2747 return false;
2748
2749 InlineAsm::Flag F(MD.getImm());
2750 if (F.isRegUseKind() || F.isRegDefKind() || F.isRegDefEarlyClobberKind())
2751 return F.getRegMayBeFolded();
2752 return false;
2753}
2754
2756 assert(isPHI());
2757
2758 // Phi might have multiple entries for MBB. Need to remove them all.
2759 unsigned RemovedCount = 0;
2760 for (unsigned N = getNumOperands(); N > 2; N -= 2) {
2761 if (getOperand(N - 1).getMBB() == &MBB) {
2762 removeOperand(N - 1);
2763 removeOperand(N - 2);
2764 RemovedCount += 2;
2765 }
2766 }
2767 return RemovedCount;
2768}
unsigned const MachineRegisterInfo * MRI
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
Analysis containing CSE Info
Definition CSEInfo.cpp:27
#define LLVM_DUMP_METHOD
Mark debug helper function definitions like dump() that should not be stripped from debug builds.
Definition Compiler.h:638
This file contains the declarations for the subclasses of Constant, which represent the different fla...
DXIL Forward Handle Accesses
Hexagon Common GEP
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
Module.h This file contains the declarations for the Module class.
const size_t AbstractManglingParser< Derived, Alloc >::NumOps
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
A set of register units.
Implement a low-level type suitable for MachineInstr level instruction selection.
#define F(x, y, z)
Definition MD5.cpp:55
#define I(x, y, z)
Definition MD5.cpp:58
static DebugLoc getDebugLoc(MachineBasicBlock::instr_iterator FirstMI, MachineBasicBlock::instr_iterator LastMI)
Return the first DebugLoc that has line number information, given a range of instructions.
const unsigned TiedMax
static void moveOperands(MachineOperand *Dst, MachineOperand *Src, unsigned NumOps, MachineRegisterInfo *MRI)
Move NumOps MachineOperands from Src to Dst, with support for overlapping ranges.
static cl::opt< bool > PrintMIAddrs("print-mi-addrs", cl::Hidden, cl::desc("Print addresses of MachineInstrs when dumping"))
static LocationSize getSpillSlotSize(const MMOList &Accesses, const MachineFrameInfo &MFI)
static const DIExpression * computeExprForSpill(const MachineInstr &MI, const SmallVectorImpl< const MachineOperand * > &SpilledOperands)
Compute the new DIExpression to use with a DBG_VALUE for a spill slot.
static bool MemOperandsHaveAlias(const MachineFrameInfo &MFI, BatchAAResults *AA, bool UseTBAA, const MachineMemOperand *MMOa, const MachineMemOperand *MMOb)
static iterator_range< filter_iterator< Operand *, std::function< bool(Operand &Op)> > > getDebugOperandsForRegHelper(Instruction *MI, Register Reg)
SmallVector< const MachineMemOperand *, 2 > MMOList
static void tryToGetTargetInfo(const MachineInstr &MI, const TargetRegisterInfo *&TRI, const MachineRegisterInfo *&MRI, const TargetInstrInfo *&TII)
static const MachineFunction * getMFIfAvailable(const MachineInstr &MI)
static bool hasIdenticalMMOs(ArrayRef< MachineMemOperand * > LHS, ArrayRef< MachineMemOperand * > RHS)
Check to see if the MMOs pointed to by the two MemRefs arrays are identical.
Register Reg
Register const TargetRegisterInfo * TRI
This file provides utility analysis objects describing memory locations.
This file contains the declarations for metadata subclasses.
static unsigned getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
static bool isReg(const MCInst &MI, unsigned OpNo)
MachineInstr unsigned OpIdx
static bool isValid(const char C)
Returns true if C is a valid mangled character: <0-9a-zA-Z_>.
This file contains some templates that are useful if you are working with the STL at all.
static cl::opt< bool > UseTBAA("use-tbaa-in-sched-mi", cl::Hidden, cl::init(true), cl::desc("Enable use of TBAA during MI DAG construction"))
This file implements the SmallBitVector class.
This file defines the SmallVector class.
static SymbolRef::Type getType(const Symbol *Sym)
Definition TapiFile.cpp:39
static std::optional< unsigned > getOpcode(ArrayRef< VPValue * > Values)
Returns the opcode of Values or ~0 if they do not all agree.
Definition VPlanSLP.cpp:247
Value * RHS
Value * LHS
Capacity getNext() const
Get the next larger capacity.
size_t getSize() const
Get the number of elements in an array with this capacity.
static Capacity get(size_t N)
Get the capacity of an array that can hold at least N elements.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:41
size_t size() const
size - Get the array size.
Definition ArrayRef.h:143
bool empty() const
empty - Check if the array is empty.
Definition ArrayRef.h:138
ArrayRef< T > slice(size_t N, size_t M) const
slice(n, m) - Chop off the first N elements of the array, and keep M elements in the array.
Definition ArrayRef.h:187
This class is a wrapper over an AAResults, and it is intended to be used only when there are no IR ch...
ConstMIBundleOperands - Iterate over all operands in a const bundle of machine instructions.
DWARF expression.
LLVM_ABI bool isEntryValue() const
Check if the expression consists of exactly one entry value operand.
static LLVM_ABI bool isEqualExpression(const DIExpression *FirstExpr, bool FirstIndirect, const DIExpression *SecondExpr, bool SecondIndirect)
Determines whether two debug values should produce equivalent DWARF expressions, using their DIExpres...
static LLVM_ABI DIExpression * appendOpsToArg(const DIExpression *Expr, ArrayRef< uint64_t > Ops, unsigned ArgNo, bool StackValue=false)
Create a copy of Expr by appending the given list of Ops to each instance of the operand DW_OP_LLVM_a...
static LLVM_ABI DIExpression * prepend(const DIExpression *Expr, uint8_t Flags, int64_t Offset=0)
Prepend DIExpr with a deref and offset operation and optionally turn it into a stack value or/and an ...
A debug info location.
Definition DebugLoc.h:124
bool hasTrivialDestructor() const
Check whether this has a trivial destructor.
Definition DebugLoc.h:244
Diagnostic information for inline asm reporting.
Utility class for floating point operations which can have information about relaxed accuracy require...
Definition Operator.h:200
Convenience struct for specifying and reasoning about fast-math flags.
Definition FMF.h:22
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Definition Function.cpp:359
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
This instruction compares its operands according to the predicate given to the constructor.
static StringRef getMemConstraintName(ConstraintCode C)
Definition InlineAsm.h:470
constexpr bool isValid() const
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
LLVM_ABI void diagnose(const DiagnosticInfo &DI)
Report a message to the currently installed diagnostic handler.
A set of physical registers with utility functions to track liveness when walking backward/forward th...
bool available(const MachineRegisterInfo &MRI, MCRegister Reg) const
Returns true if register Reg and no aliasing register is in the set.
A set of register units used to track register liveness.
bool hasValue() const
static LocationSize precise(uint64_t Value)
static constexpr LocationSize beforeOrAfterPointer()
Any location before or after the base pointer (but still within the underlying object).
bool isScalable() const
TypeSize getValue() const
Describe properties that are true of each instruction in the target description file.
ArrayRef< MCOperandInfo > operands() const
unsigned short Opcode
MCRegAliasIterator enumerates all registers aliasing Reg.
Wrapper class representing physical registers. Should be passed by value.
Definition MCRegister.h:33
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
Definition MCSymbol.h:42
Metadata node.
Definition Metadata.h:1078
const MDOperand & getOperand(unsigned I) const
Definition Metadata.h:1442
unsigned getNumOperands() const
Return number of MDNode operands.
Definition Metadata.h:1448
bool isValid() const
isValid - Returns true until all the operands have been visited.
LLVM_ABI MachineInstr * remove_instr(MachineInstr *I)
Remove the possibly bundled instruction from the instruction list without deleting it.
LLVM_ABI instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
instr_iterator erase_instr(MachineInstr *I)
Remove an instruction from the instruction list and delete it.
MachineInstr * remove(MachineInstr *I)
Remove the unbundled instruction from the instruction list without deleting it.
Instructions::iterator instr_iterator
Instructions::const_iterator const_instr_iterator
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
LLVM_ABI instr_iterator erase(instr_iterator I)
Remove an instruction from the instruction list and delete it.
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
MachineInstrBundleIterator< MachineInstr > iterator
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
bool isSpillSlotObjectIndex(int ObjectIdx) const
Returns true if the specified index corresponds to a spill slot.
MachineInstr::ExtraInfo * createMIExtraInfo(ArrayRef< MachineMemOperand * > MMOs, MCSymbol *PreInstrSymbol=nullptr, MCSymbol *PostInstrSymbol=nullptr, MDNode *HeapAllocMarker=nullptr, MDNode *PCSections=nullptr, uint32_t CFIType=0, MDNode *MMRAs=nullptr)
Allocate and construct an extra info structure for a MachineInstr.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
void deallocateOperandArray(OperandCapacity Cap, MachineOperand *Array)
Dellocate an array of MachineOperands and recycle the memory.
MachineOperand * allocateOperandArray(OperandCapacity Cap)
Allocate an array of MachineOperands.
void handleChangeDesc(MachineInstr &MI, const MCInstrDesc &TID)
Function & getFunction()
Return the LLVM function that this machine code represents.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addMetadata(const MDNode *MD) const
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
Representation of each machine instruction.
bool mayRaiseFPException() const
Return true if this instruction could possibly raise a floating-point exception.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
LLVM_ABI void setRegisterDefReadUndef(Register Reg, bool IsUndef=true)
Mark all subregister defs of register Reg with the undef flag.
bool isDebugValueList() const
LLVM_ABI void bundleWithPred()
Bundle this instruction with its predecessor.
bool isPosition() const
bool isTerminator(QueryType Type=AnyInBundle) const
Returns true if this instruction part of the terminator for a basic block.
LLVM_ABI std::tuple< Register, LLT, Register, LLT, Register, LLT, Register, LLT, Register, LLT > getFirst5RegLLTs() const
LLVM_ABI iterator_range< filter_iterator< const MachineOperand *, std::function< bool(const MachineOperand &Op)> > > getDebugOperandsForReg(Register Reg) const
Returns a range of all of the operands that correspond to a debug use of Reg.
mop_range debug_operands()
Returns all operands that are used to determine the variable location for this DBG_VALUE instruction.
bool mayLoadOrStore(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read or modify memory.
LLVM_ABI void setCFIType(MachineFunction &MF, uint32_t Type)
Set the CFI type for the instruction.
LLVM_ABI MachineInstr * removeFromParent()
Unlink 'this' from the containing basic block, and return it without deleting it.
const MachineBasicBlock * getParent() const
MDNode * getMMRAMetadata() const
Helper to extract mmra.op metadata.
LLVM_ABI void bundleWithSucc()
Bundle this instruction with its successor.
uint32_t getCFIType() const
Helper to extract a CFI type hash if one has been added.
bool isDebugLabel() const
LLVM_ABI void setPreInstrSymbol(MachineFunction &MF, MCSymbol *Symbol)
Set a symbol that will be emitted just prior to the instruction itself.
bool hasProperty(unsigned MCFlag, QueryType Type=AnyInBundle) const
Return true if the instruction (or in the case of a bundle, the instructions inside the bundle) has t...
LLVM_ABI bool isDereferenceableInvariantLoad() const
Return true if this load instruction never traps and points to a memory location whose value doesn't ...
void setFlags(unsigned flags)
QueryType
API for querying MachineInstr properties.
LLVM_ABI void addImplicitDefUseOperands(MachineFunction &MF)
Add all implicit def and use operands to this instruction.
filtered_mop_range all_defs()
Returns an iterator range over all operands that are (explicit or implicit) register defs.
LLVM_ABI std::tuple< LLT, LLT, LLT, LLT, LLT > getFirst5LLTs() const
bool isCall(QueryType Type=AnyInBundle) const
LLVM_ABI std::tuple< Register, LLT, Register, LLT, Register, LLT > getFirst3RegLLTs() const
bool getFlag(MIFlag Flag) const
Return whether an MI flag is set.
LLVM_ABI uint32_t mergeFlagsWith(const MachineInstr &Other) const
Return the MIFlags which represent both MachineInstrs.
LLVM_ABI const MachineOperand & getDebugExpressionOp() const
Return the operand for the complex address expression referenced by this DBG_VALUE instruction.
LLVM_ABI std::pair< bool, bool > readsWritesVirtualRegister(Register Reg, SmallVectorImpl< unsigned > *Ops=nullptr) const
Return a pair of bools (reads, writes) indicating if this instruction reads or writes Reg.
LLVM_ABI Register isConstantValuePHI() const
If the specified instruction is a PHI that always merges together the same virtual register,...
bool isRegTiedToDefOperand(unsigned UseOpIdx, unsigned *DefOpIdx=nullptr) const
Return true if the use operand of the specified index is tied to a def operand.
LLVM_ABI bool allImplicitDefsAreDead() const
Return true if all the implicit defs of this instruction are dead.
LLVM_ABI void cloneMemRefs(MachineFunction &MF, const MachineInstr &MI)
Clone another MachineInstr's memory reference descriptor list and replace ours with it.
LLVM_ABI const TargetRegisterClass * getRegClassConstraintEffectForVReg(Register Reg, const TargetRegisterClass *CurRC, const TargetInstrInfo *TII, const TargetRegisterInfo *TRI, bool ExploreBundle=false) const
Applies the constraints (def/use) implied by this MI on Reg to the given CurRC.
LLVM_ABI bool isSafeToMove(bool &SawStore) const
Return true if it is safe to move this instruction.
LLVM_ABI bool mayAlias(BatchAAResults *AA, const MachineInstr &Other, bool UseTBAA) const
Returns true if this instruction's memory access aliases the memory access of Other.
bool isBundle() const
bool isDebugInstr() const
unsigned getNumDebugOperands() const
Returns the total number of operands which are debug locations.
unsigned getNumOperands() const
Retuns the total number of operands.
LLVM_ABI void addOperand(MachineFunction &MF, const MachineOperand &Op)
Add the specified operand to the instruction.
LLVM_ABI MachineInstr * removeFromBundle()
Unlink this instruction from its basic block and return it without deleting it.
LLVM_ABI void dumpr(const MachineRegisterInfo &MRI, unsigned MaxDepth=UINT_MAX) const
Print on dbgs() the current instruction and the instructions defining its operands and so on until we...
LLVM_ABI void copyIRFlags(const Instruction &I)
Copy all flags to MachineInst MIFlags.
bool isDebugValueLike() const
bool isInlineAsm() const
bool memoperands_empty() const
Return true if we don't have any memory operands which described the memory access done by this instr...
mmo_iterator memoperands_end() const
Access to memory operands of the instruction.
bool isDebugRef() const
LLVM_ABI void collectDebugValues(SmallVectorImpl< MachineInstr * > &DbgValues)
Scan instructions immediately following MI and collect any matching DBG_VALUEs.
LLVM_ABI std::optional< LocationSize > getRestoreSize(const TargetInstrInfo *TII) const
Return a valid size if the instruction is a restore instruction.
unsigned getOperandNo(const_mop_iterator I) const
Returns the number of the operand iterator I points to.
LLVM_ABI unsigned getNumExplicitOperands() const
Returns the number of non-implicit operands.
mop_range implicit_operands()
LLVM_ABI void setMemRefs(MachineFunction &MF, ArrayRef< MachineMemOperand * > MemRefs)
Assign this MachineInstr's memory reference descriptor list.
LLVM_ABI bool wouldBeTriviallyDead() const
Return true if this instruction would be trivially dead if all of its defined registers were dead.
bool isBundledWithPred() const
Return true if this instruction is part of a bundle, and it is not the first instruction in the bundl...
LLVM_ABI std::tuple< LLT, LLT > getFirst2LLTs() const
LLVM_ABI std::optional< LocationSize > getFoldedSpillSize(const TargetInstrInfo *TII) const
Return a valid size if the instruction is a folded spill instruction.
LLVM_ABI void unbundleFromPred()
Break bundle above this instruction.
LLVM_ABI void copyImplicitOps(MachineFunction &MF, const MachineInstr &MI)
Copy implicit register operands from specified instruction to this instruction.
bool mayLoad(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read memory.
LLVM_ABI bool isStackAligningInlineAsm() const
LLVM_ABI void dropMemRefs(MachineFunction &MF)
Clear this MachineInstr's memory reference descriptor list.
LLVM_ABI int findRegisterUseOperandIdx(Register Reg, const TargetRegisterInfo *TRI, bool isKill=false) const
Returns the operand index that is a use of the specific register or -1 if it is not found.
MDNode * getPCSections() const
Helper to extract PCSections metadata target sections.
bool isCFIInstruction() const
LLVM_ABI int findFirstPredOperandIdx() const
Find the index of the first operand in the operand list that is used to represent the predicate.
const MCInstrDesc & getDesc() const
Returns the target instruction descriptor of this MachineInstr.
LLVM_ABI unsigned getBundleSize() const
Return the number of instructions inside the MI bundle, excluding the bundle header.
LLVM_ABI void cloneMergedMemRefs(MachineFunction &MF, ArrayRef< const MachineInstr * > MIs)
Clone the merge of multiple MachineInstrs' memory reference descriptors list and replace ours with it...
mop_range operands()
LLVM_ABI bool isCandidateForAdditionalCallInfo(QueryType Type=IgnoreBundle) const
Return true if this is a call instruction that may have an additional information associated with it.
LLVM_ABI std::tuple< Register, LLT, Register, LLT, Register, LLT, Register, LLT > getFirst4RegLLTs() const
LLVM_ABI std::tuple< Register, LLT, Register, LLT > getFirst2RegLLTs() const
unsigned getNumMemOperands() const
Return the number of memory operands.
void clearFlag(MIFlag Flag)
clearFlag - Clear a MI flag.
LLVM_ABI std::optional< LocationSize > getFoldedRestoreSize(const TargetInstrInfo *TII) const
Return a valid size if the instruction is a folded restore instruction.
LLVM_ABI const TargetRegisterClass * getRegClassConstraintEffect(unsigned OpIdx, const TargetRegisterClass *CurRC, const TargetInstrInfo *TII, const TargetRegisterInfo *TRI) const
Applies the constraints (def/use) implied by the OpIdx operand to the given CurRC.
bool isOperandSubregIdx(unsigned OpIdx) const
Return true if operand OpIdx is a subregister index.
LLVM_ABI InlineAsm::AsmDialect getInlineAsmDialect() const
LLVM_ABI bool hasUnmodeledSideEffects() const
Return true if this instruction has side effects that are not modeled by mayLoad / mayStore,...
LLVM_ABI bool isEquivalentDbgInstr(const MachineInstr &Other) const
Returns true if this instruction is a debug instruction that represents an identical debug value to O...
LLVM_ABI const DILabel * getDebugLabel() const
Return the debug label referenced by this DBG_LABEL instruction.
void untieRegOperand(unsigned OpIdx)
Break any tie involving OpIdx.
static LLVM_ABI uint32_t copyFlagsFromInstruction(const Instruction &I)
LLVM_ABI unsigned removePHIIncomingValueFor(const MachineBasicBlock &MBB)
Remove all incoming values of Phi instruction for the given block.
LLVM_ABI void insert(mop_iterator InsertBefore, ArrayRef< MachineOperand > Ops)
Inserts Ops BEFORE It. Can untie/retie tied operands.
LLVM_ABI void setDesc(const MCInstrDesc &TID)
Replace the instruction descriptor (thus opcode) of the current instruction with a new one.
bool isJumpTableDebugInfo() const
LLVM_ABI unsigned getNumExplicitDefs() const
Returns the number of non-implicit definitions.
LLVM_ABI void eraseFromBundle()
Unlink 'this' from its basic block and delete it.
LLVM_ABI void setHeapAllocMarker(MachineFunction &MF, MDNode *MD)
Set a marker on instructions that denotes where we should create and emit heap alloc site labels.
LLVM_ABI const DILocalVariable * getDebugVariable() const
Return the debug variable referenced by this DBG_VALUE instruction.
LLVM_ABI bool hasComplexRegisterTies() const
Return true when an instruction has tied register that can't be determined by the instruction's descr...
LLVM_ABI LLT getTypeToPrint(unsigned OpIdx, SmallBitVector &PrintedTypes, const MachineRegisterInfo &MRI) const
Debugging supportDetermine the generic type to be printed (if needed) on uses and defs.
bool isLifetimeMarker() const
LLVM_ABI void substituteRegister(Register FromReg, Register ToReg, unsigned SubIdx, const TargetRegisterInfo &RegInfo)
Replace all occurrences of FromReg with ToReg:SubIdx, properly composing subreg indices where necessa...
LLVM_ABI unsigned findTiedOperandIdx(unsigned OpIdx) const
Given the index of a tied register operand, find the operand it is tied to.
LLVM_ABI void tieOperands(unsigned DefIdx, unsigned UseIdx)
Add a tie between the register operands at DefIdx and UseIdx.
mmo_iterator memoperands_begin() const
Access to memory operands of the instruction.
LLVM_ABI void cloneInstrSymbols(MachineFunction &MF, const MachineInstr &MI)
Clone another MachineInstr's pre- and post- instruction symbols and replace ours with it.
LLVM_ABI void changeDebugValuesDefReg(Register Reg)
Find all DBG_VALUEs that point to the register def in this instruction and point them to Reg instead.
LLVM_ABI bool isIdenticalTo(const MachineInstr &Other, MICheckType Check=CheckDefs) const
Return true if this instruction is identical to Other.
LLVM_ABI bool hasOrderedMemoryRef() const
Return true if this instruction may have an ordered or volatile memory reference, or if the informati...
LLVM_ABI void emitGenericError(const Twine &ErrMsg) const
LLVM_ABI const MachineFunction * getMF() const
Return the function that contains the basic block that this instruction belongs to.
LLVM_ABI const DIExpression * getDebugExpression() const
Return the complex address expression referenced by this DBG_VALUE instruction.
ArrayRef< MachineMemOperand * > memoperands() const
Access to memory operands of the instruction.
LLVM_ABI void print(raw_ostream &OS, bool IsStandalone=true, bool SkipOpers=false, bool SkipDebugLoc=false, bool AddNewLine=true, const TargetInstrInfo *TII=nullptr) const
Print this MI to OS.
bool isNonListDebugValue() const
MachineOperand * mop_iterator
iterator/begin/end - Iterate over all operands of a machine instruction.
LLVM_ABI bool isLoadFoldBarrier() const
Returns true if it is illegal to fold a load across this instruction.
bool mayStore(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly modify memory.
void setFlag(MIFlag Flag)
Set a MI flag.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
LLVM_ABI bool isDead(const MachineRegisterInfo &MRI, LiveRegUnits *LivePhysRegs=nullptr) const
Check whether an MI is dead.
LLVM_ABI std::tuple< LLT, LLT, LLT > getFirst3LLTs() const
LLVM_ABI const MachineOperand & getDebugVariableOp() const
Return the operand for the debug variable referenced by this DBG_VALUE instruction.
LLVM_ABI void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
LLVM_ABI void setPhysRegsDeadExcept(ArrayRef< Register > UsedRegs, const TargetRegisterInfo &TRI)
Mark every physreg used by this instruction as dead except those in the UsedRegs list.
LLVM_ABI void removeOperand(unsigned OpNo)
Erase an operand from an instruction, leaving it with one fewer operand than it started with.
friend class MachineFunction
MCSymbol * getPreInstrSymbol() const
Helper to extract a pre-instruction symbol if one has been added.
LLVM_ABI bool addRegisterKilled(Register IncomingReg, const TargetRegisterInfo *RegInfo, bool AddIfNotFound=false)
We have determined MI kills a register.
LLVM_ABI void setPostInstrSymbol(MachineFunction &MF, MCSymbol *Symbol)
Set a symbol that will be emitted just after the instruction itself.
bool isDebugValue() const
LLVM_ABI void dump() const
const MachineOperand & getDebugOffset() const
Return the operand containing the offset to be used if this DBG_VALUE instruction is indirect; will b...
MachineOperand & getDebugOperand(unsigned Index)
LLVM_ABI std::optional< LocationSize > getSpillSize(const TargetInstrInfo *TII) const
Return a valid size if the instruction is a spill instruction.
bool isBundledWithSucc() const
Return true if this instruction is part of a bundle, and it is not the last instruction in the bundle...
LLVM_ABI void addRegisterDefined(Register Reg, const TargetRegisterInfo *RegInfo=nullptr)
We have determined MI defines a register.
MDNode * getHeapAllocMarker() const
Helper to extract a heap alloc marker if one has been added.
LLVM_ABI unsigned getDebugInstrNum()
Fetch the instruction number of this MachineInstr.
LLVM_ABI std::tuple< LLT, LLT, LLT, LLT > getFirst4LLTs() const
LLVM_ABI void clearRegisterDeads(Register Reg)
Clear all dead flags on operands defining register Reg.
LLVM_ABI void clearRegisterKills(Register Reg, const TargetRegisterInfo *RegInfo)
Clear all kill flags affecting Reg.
const MachineOperand & getOperand(unsigned i) const
LLVM_ABI void emitInlineAsmError(const Twine &ErrMsg) const
Emit an error referring to the source location of this instruction.
uint32_t getFlags() const
Return the MI flags bitvector.
bool isPseudoProbe() const
LLVM_ABI bool hasRegisterImplicitUseOperand(Register Reg) const
Returns true if the MachineInstr has an implicit-use operand of exactly the given register (not consi...
LLVM_ABI bool shouldUpdateAdditionalCallInfo() const
Return true if copying, moving, or erasing this instruction requires updating additional call info (s...
MCSymbol * getPostInstrSymbol() const
Helper to extract a post-instruction symbol if one has been added.
LLVM_ABI void unbundleFromSucc()
Break bundle below this instruction.
LLVM_ABI void clearKillInfo()
Clears kill flags on all operands.
LLVM_ABI bool isDebugEntryValue() const
A DBG_VALUE is an entry value iff its debug expression contains the DW_OP_LLVM_entry_value operation.
bool isIndirectDebugValue() const
A DBG_VALUE is indirect iff the location operand is a register and the offset operand is an immediate...
unsigned getNumDefs() const
Returns the total number of definitions.
LLVM_ABI void setPCSections(MachineFunction &MF, MDNode *MD)
bool isKill() const
LLVM_ABI const MDNode * getLocCookieMD() const
For inline asm, get the !srcloc metadata node if we have it, and decode the loc cookie from it.
LLVM_ABI int findRegisterDefOperandIdx(Register Reg, const TargetRegisterInfo *TRI, bool isDead=false, bool Overlap=false) const
Returns the operand index that is a def of the specified register or -1 if it is not found.
bool isFakeUse() const
bool isVariadic(QueryType Type=IgnoreBundle) const
Return true if this instruction can have a variable number of operands.
LLVM_ABI int findInlineAsmFlagIdx(unsigned OpIdx, unsigned *GroupNo=nullptr) const
Find the index of the flag word operand that corresponds to operand OpIdx on an inline asm instructio...
LLVM_ABI bool allDefsAreDead() const
Return true if all the defs of this instruction are dead.
LLVM_ABI void setMMRAMetadata(MachineFunction &MF, MDNode *MMRAs)
LLVM_ABI const TargetRegisterClass * getRegClassConstraint(unsigned OpIdx, const TargetInstrInfo *TII, const TargetRegisterInfo *TRI) const
Compute the static register class constraint for operand OpIdx.
LLVM_ABI void moveBefore(MachineInstr *MovePos)
Move the instruction before MovePos.
MachineOperand * findRegisterDefOperand(Register Reg, const TargetRegisterInfo *TRI, bool isDead=false, bool Overlap=false)
Wrapper for findRegisterDefOperandIdx, it returns a pointer to the MachineOperand rather than an inde...
LLVM_ABI void addMemOperand(MachineFunction &MF, MachineMemOperand *MO)
Add a MachineMemOperand to the machine instruction.
LLVM_ABI bool addRegisterDead(Register Reg, const TargetRegisterInfo *RegInfo, bool AddIfNotFound=false)
We have determined MI defined a register without a use.
LLVM_ABI bool mayFoldInlineAsmRegOp(unsigned OpId) const
Returns true if the register operand can be folded with a load or store into a frame index.
A description of a memory reference used in the backend.
LocationSize getSize() const
Return the size in bytes of the memory reference.
const PseudoSourceValue * getPseudoValue() const
bool isUnordered() const
Returns true if this memory operation doesn't have any ordering constraints other than normal aliasin...
AAMDNodes getAAInfo() const
Return the AA tags for the memory reference.
const Value * getValue() const
Return the base address of the memory access.
int64_t getOffset() const
For normal values, this is a byte offset added to the base address.
MachineOperand class - Representation of each machine instruction operand.
unsigned getSubReg() const
LLVM_ABI void substVirtReg(Register Reg, unsigned SubIdx, const TargetRegisterInfo &)
substVirtReg - Substitute the current register with the virtual subregister Reg:SubReg.
static LLVM_ABI void printSubRegIdx(raw_ostream &OS, uint64_t Index, const TargetRegisterInfo *TRI)
Print a subreg index operand.
int64_t getImm() const
bool isReg() const
isReg - Tests if this is a MO_Register operand.
bool isRegMask() const
isRegMask - Tests if this is a MO_RegisterMask operand.
const MDNode * getMetadata() const
void setIsDead(bool Val=true)
void setMetadata(const MDNode *MD)
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
LLVM_ABI void ChangeToImmediate(int64_t ImmVal, unsigned TargetFlags=0)
ChangeToImmediate - Replace this operand with a new immediate operand of the specified value.
bool isMetadata() const
isMetadata - Tests if this is a MO_Metadata operand.
LLVM_ABI void print(raw_ostream &os, const TargetRegisterInfo *TRI=nullptr) const
Print the MachineOperand to os.
void setIsKill(bool Val=true)
MachineInstr * getParent()
getParent - Return the instruction that this operand belongs to.
LLVM_ABI void substPhysReg(MCRegister Reg, const TargetRegisterInfo &)
substPhysReg - Substitute the current register with the physical register Reg, taking any existing Su...
void setIsEarlyClobber(bool Val=true)
void setIsUndef(bool Val=true)
void setIsDebug(bool Val=true)
Register getReg() const
getReg - Returns the register number.
LLVM_ABI bool isIdenticalTo(const MachineOperand &Other) const
Returns true if this operand is identical to the specified operand except for liveness related flags ...
static bool clobbersPhysReg(const uint32_t *RegMask, MCRegister PhysReg)
clobbersPhysReg - Returns true if this RegMask clobbers PhysReg.
static LLVM_ABI void printSymbol(raw_ostream &OS, MCSymbol &Sym)
Print a MCSymbol as an operand.
static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Representation for a specific memory location.
LLVM_ABI void printAsOperand(raw_ostream &OS, const Module *M=nullptr) const
Print as operand.
Manage lifetime of a slot tracker for printing IR.
void incorporateFunction(const Function &F)
Incorporate the given function.
A Module instance is used to store all the information related to an LLVM module.
Definition Module.h:67
Utility class for integer operators which may exhibit overflow - Add, Sub, Mul, and Shl.
Definition Operator.h:78
An or instruction, which can be marked as "disjoint", indicating that the inputs don't have a 1 in th...
Definition InstrTypes.h:404
A udiv, sdiv, lshr, or ashr instruction, which can be marked as "exact", indicating that no bits are ...
Definition Operator.h:154
Instruction that can have a nneg flag (zext/uitofp).
Definition InstrTypes.h:639
Special value supplied for machine level alias analysis.
virtual bool mayAlias(const MachineFrameInfo *) const
Return true if the memory pointed to by this PseudoSourceValue can ever alias an LLVM IR Value.
Wrapper class representing virtual and physical registers.
Definition Register.h:20
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
Definition Register.h:79
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
Definition Register.h:83
This is a 'bitvector' (really, a variable-sized bit array), optimized for the case when the array is ...
SmallBitVector & set()
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void reserve(size_type N)
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
static LLVM_ABI unsigned getNextMetaArgIdx(const MachineInstr *MI, unsigned CurIdx)
Get index of next meta operand.
MI-level Statepoint operands.
Definition StackMaps.h:159
LLVM_ABI int getFirstGCPtrIdx()
Get index of first GC pointer operand of -1 if there are none.
TargetInstrInfo - Interface to description of machine instruction set.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual const TargetInstrInfo * getInstrInfo() const
This class represents a truncation of integer types.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition Twine.h:82
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
A Use represents the edge between a Value definition and its users.
Definition Use.h:35
LLVM Value Representation.
Definition Value.h:75
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
Definition TypeSize.h:166
formatted_raw_ostream & PadToColumn(unsigned NewCol)
PadToColumn - Align the output to some column number.
A range adaptor for a pair of iterators.
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition raw_ostream.h:53
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Abstract Attribute helper functions.
Definition Attributor.h:165
MCInstrDesc const & getDesc(MCInstrInfo const &MCII, MCInst const &MCI)
@ UnmodeledSideEffects
std::enable_if_t< detail::IsValidPointer< X, Y >::value, bool > hasa(Y &&MD)
Check whether Metadata has a Value.
Definition Metadata.h:650
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > extract(Y &&MD)
Extract a Value from Metadata.
Definition Metadata.h:667
constexpr double e
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition STLExtras.h:316
hash_code hash_value(const FixedPointSemantics &Val)
LLVM_ABI formatted_raw_ostream & fdbgs()
fdbgs() - This returns a reference to a formatted_raw_ostream for debug output.
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
LLVM_ABI void updateDbgValueForSpill(MachineInstr &Orig, int FrameIndex, Register Reg)
Update a DBG_VALUE whose value has been spilled to FrameIndex.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1732
iterator_range< pointee_iterator< WrappedIteratorT > > make_pointee_range(RangeT &&Range)
Definition iterator.h:336
MachineInstr * getImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
bool none_of(R &&Range, UnaryPredicate P)
Provide wrappers to std::none_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1739
constexpr bool isUInt(uint64_t x)
Checks if an unsigned integer fits into the given bit width.
Definition MathExtras.h:189
iterator_range< filter_iterator< detail::IterOfRange< RangeT >, PredicateT > > make_filter_range(RangeT &&Range, PredicateT Pred)
Convenience function that takes a range of elements and a predicate, and return a new filter_iterator...
Definition STLExtras.h:550
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
@ Other
Any other memory.
Definition ModRef.h:68
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
Definition MCRegister.h:21
DWARFExpression::Operation Op
OutputIt move(R &&Range, OutputIt Out)
Provide wrappers to std::move which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1867
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
LLVM_ABI MachineInstr * buildDbgValueForSpill(MachineBasicBlock &BB, MachineBasicBlock::iterator I, const MachineInstr &Orig, int FrameIndex, Register SpillReg)
Clone a DBG_VALUE whose value has been spilled to FrameIndex.
iterator_range< pointer_iterator< WrappedIteratorT > > make_pointer_range(RangeT &&Range)
Definition iterator.h:363
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition STLExtras.h:1897
filter_iterator_impl< WrappedIteratorT, PredicateT, detail::fwd_or_bidi_tag< WrappedIteratorT > > filter_iterator
Defines filter_iterator to a suitable specialization of filter_iterator_impl, based on the underlying...
Definition STLExtras.h:537
hash_code hash_combine_range(InputIteratorT first, InputIteratorT last)
Compute a hash_code for a sequence of values.
Definition Hashing.h:466
Implement std::hash so that hash_code can be used in STL containers.
Definition BitVector.h:867
#define N
A collection of metadata nodes that might be associated with a memory access used by the alias-analys...
Definition Metadata.h:761
static LLVM_ABI unsigned getHashValue(const MachineInstr *const &MI)