52#define DEBUG_TYPE "amdgpu-lower-vgpr-encoding"
56class AMDGPULowerVGPREncoding {
57 static constexpr unsigned OpNum = 4;
58 static constexpr unsigned BitsPerField = 2;
59 static constexpr unsigned NumFields = 4;
60 static constexpr unsigned FieldMask = (1 << BitsPerField) - 1;
61 static constexpr unsigned ModeWidth = NumFields * BitsPerField;
62 static constexpr unsigned ModeMask = (1 << ModeWidth) - 1;
64 std::bitset<BitsPerField * NumFields>>;
66 class ModeTy :
public ModeType {
69 ModeTy() : ModeType(0) {}
71 operator int64_t()
const {
return raw_bits().to_ulong(); }
73 static ModeTy fullMask() {
104 unsigned ClauseRemaining;
107 unsigned ClauseBreaks;
113 bool setMode(ModeTy NewMode, ModeTy Mask,
118 setMode(ModeTy(), ModeTy::fullMask(),
I);
132 const AMDGPU::OpName
Ops[OpNum],
133 const AMDGPU::OpName *Ops2 =
nullptr);
142bool AMDGPULowerVGPREncoding::setMode(ModeTy NewMode, ModeTy Mask,
144 assert((NewMode.raw_bits() & ~Mask.raw_bits()).none());
146 auto Delta = NewMode.raw_bits() ^ CurrentMode.raw_bits();
148 if ((Delta & Mask.raw_bits()).none()) {
153 if (MostRecentModeSet && (Delta & CurrentMask.raw_bits()).none()) {
154 CurrentMode |= NewMode;
160 int64_t OldModeBits =
Op.getImm() & (ModeMask << ModeWidth);
162 Op.setImm(CurrentMode | OldModeBits);
167 int64_t OldModeBits = CurrentMode << ModeWidth;
170 MostRecentModeSet =
BuildMI(*
MBB,
I, {},
TII->get(AMDGPU::S_SET_VGPR_MSB))
171 .
addImm(NewMode | OldModeBits);
173 CurrentMode = NewMode;
178std::optional<unsigned>
185 if (!RC || !
TRI->isVGPRClass(RC))
188 unsigned Idx =
TRI->getHWRegIndex(
Reg);
192void AMDGPULowerVGPREncoding::computeMode(ModeTy &NewMode, ModeTy &Mask,
194 const AMDGPU::OpName
Ops[OpNum],
195 const AMDGPU::OpName *Ops2) {
199 for (
unsigned I = 0;
I < OpNum; ++
I) {
202 std::optional<unsigned> MSBits;
204 MSBits = getMSBs(*
Op);
207 if (MSBits.has_value() && Ops2) {
208 auto Op2 =
TII->getNamedOperand(
MI, Ops2[
I]);
210 std::optional<unsigned> MSBits2;
211 MSBits2 = getMSBs(*Op2);
212 if (MSBits2.has_value() && MSBits != MSBits2)
218 if (!MSBits.has_value() && Ops2) {
219 Op =
TII->getNamedOperand(
MI, Ops2[
I]);
221 MSBits = getMSBs(*
Op);
224 if (!MSBits.has_value())
230 if (
Ops[
I] == AMDGPU::OpName::src2 && !
Op->isDef() &&
Op->isTied() &&
233 TII->hasVALU32BitEncoding(
MI.getOpcode()))))
236 NewMode[
I] = MSBits.value();
241bool AMDGPULowerVGPREncoding::runOnMachineInstr(
MachineInstr &
MI) {
244 ModeTy NewMode, Mask;
245 computeMode(NewMode, Mask,
MI,
Ops.first,
Ops.second);
246 return setMode(NewMode, Mask,
MI.getIterator());
248 assert(!
TII->hasVGPRUses(
MI) ||
MI.isMetaInstruction() ||
MI.isPseudo());
255 if (!ClauseRemaining)
260 if (ClauseRemaining == ClauseLen) {
261 I =
Clause->getPrevNode()->getIterator();
269 Clause->eraseFromBundle();
279 Clause->getOperand(0).setImm(ClauseLen | (ClauseBreaks << 8));
288 if (!ST.has1024AddressableVGPRs())
291 TII = ST.getInstrInfo();
292 TRI = ST.getRegisterInfo();
295 ClauseLen = ClauseRemaining = 0;
298 for (
auto &
MBB : MF) {
299 MostRecentModeSet =
nullptr;
303 if (
MI.isMetaInstruction())
306 if (
MI.isTerminator() ||
MI.isCall()) {
307 if (
MI.getOpcode() == AMDGPU::S_ENDPGM ||
308 MI.getOpcode() == AMDGPU::S_ENDPGM_SAVED)
311 resetMode(
MI.getIterator());
315 if (
MI.isInlineAsm()) {
316 if (
TII->hasVGPRUses(
MI))
317 resetMode(
MI.getIterator());
321 if (
MI.getOpcode() == AMDGPU::S_CLAUSE) {
322 assert(!ClauseRemaining &&
"Nested clauses are not supported");
323 ClauseLen =
MI.getOperand(0).getImm();
324 ClauseBreaks = (ClauseLen >> 8) & 15;
325 ClauseLen = ClauseRemaining = (ClauseLen & 63) + 1;
337 resetMode(
MBB.instr_end());
350 return AMDGPULowerVGPREncoding().run(MF);
361char AMDGPULowerVGPREncodingLegacy::ID = 0;
366 "AMDGPU Lower VGPR Encoding",
false,
false)
371 if (!AMDGPULowerVGPREncoding().run(MF))
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
Provides AMDGPU specific target descriptions.
AMD GCN specific subclass of TargetSubtarget.
const HexagonInstrInfo * TII
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
Register const TargetRegisterInfo * TRI
This file implements the PackedVector class.
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
Interface definition for SIInstrInfo.
Represent the analysis usage information of a pass.
LLVM_ABI void setPreservesCFG()
This function should be called by the pass, iff they do not:
Represents analyses that only rely on functions' control flow.
Wrapper class representing physical registers. Should be passed by value.
Instructions::iterator instr_iterator
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
Representation of each machine instruction.
const MachineOperand & getOperand(unsigned i) const
MachineOperand class - Representation of each machine instruction operand.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
Register getReg() const
getReg - Returns the register number.
Store a vector of values using a specific number of bits for each value.
A set of analyses that are preserved following a run of a transformation pass.
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
PreservedAnalyses & preserveSet()
Mark an analysis set as preserved.
static bool isVOP2(const MachineInstr &MI)
static bool isVOP3(const MCInstrDesc &Desc)
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
std::pair< const AMDGPU::OpName *, const AMDGPU::OpName * > getVGPRLoweringOperandTables(const MCInstrDesc &Desc)
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
This is an optimization pass for GlobalISel generic memory operations.
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
AnalysisManager< MachineFunction > MachineFunctionAnalysisManager
DWARFExpression::Operation Op
char & AMDGPULowerVGPREncodingLegacyID