LLVM 23.0.0git
AMDGPUIGroupLP.cpp
Go to the documentation of this file.
1//===--- AMDGPUIGroupLP.cpp - AMDGPU IGroupLP ------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// \file This file defines a set of schedule DAG mutations that can be used to
10// override default scheduler behavior to enforce specific scheduling patterns.
11// They should be used in cases where runtime performance considerations such as
12// inter-wavefront interactions, mean that compile-time heuristics cannot
13// predict the optimal instruction ordering, or in kernels where optimum
14// instruction scheduling is important enough to warrant manual intervention.
15//
16//===----------------------------------------------------------------------===//
17
18#include "AMDGPUIGroupLP.h"
20#include "SIInstrInfo.h"
23#include "llvm/ADT/DenseMap.h"
26
27#include <type_traits>
28
29using namespace llvm;
30
31#define DEBUG_TYPE "igrouplp"
32
33namespace {
34
35static cl::opt<bool> EnableExactSolver(
36 "amdgpu-igrouplp-exact-solver", cl::Hidden,
37 cl::desc("Whether to use the exponential time solver to fit "
38 "the instructions to the pipeline as closely as "
39 "possible."),
40 cl::init(false));
41
42static cl::opt<unsigned> CutoffForExact(
43 "amdgpu-igrouplp-exact-solver-cutoff", cl::init(0), cl::Hidden,
44 cl::desc("The maximum number of scheduling group conflicts "
45 "which we attempt to solve with the exponential time "
46 "exact solver. Problem sizes greater than this will"
47 "be solved by the less accurate greedy algorithm. Selecting "
48 "solver by size is superseded by manually selecting "
49 "the solver (e.g. by amdgpu-igrouplp-exact-solver"));
50
51static cl::opt<uint64_t> MaxBranchesExplored(
52 "amdgpu-igrouplp-exact-solver-max-branches", cl::init(0), cl::Hidden,
53 cl::desc("The amount of branches that we are willing to explore with"
54 "the exact algorithm before giving up."));
55
56static cl::opt<bool> UseCostHeur(
57 "amdgpu-igrouplp-exact-solver-cost-heur", cl::init(true), cl::Hidden,
58 cl::desc("Whether to use the cost heuristic to make choices as we "
59 "traverse the search space using the exact solver. Defaulted "
60 "to on, and if turned off, we will use the node order -- "
61 "attempting to put the later nodes in the later sched groups. "
62 "Experimentally, results are mixed, so this should be set on a "
63 "case-by-case basis."));
64
65// Components of the mask that determines which instruction types may be may be
66// classified into a SchedGroup.
67enum class SchedGroupMask {
68 NONE = 0u,
69 ALU = 1u << 0,
70 VALU = 1u << 1,
71 SALU = 1u << 2,
72 MFMA = 1u << 3,
73 VMEM = 1u << 4,
74 VMEM_READ = 1u << 5,
75 VMEM_WRITE = 1u << 6,
76 DS = 1u << 7,
77 DS_READ = 1u << 8,
78 DS_WRITE = 1u << 9,
79 TRANS = 1u << 10,
80 ALL = ALU | VALU | SALU | MFMA | VMEM | VMEM_READ | VMEM_WRITE | DS |
81 DS_READ | DS_WRITE | TRANS,
82 LLVM_MARK_AS_BITMASK_ENUM(/* LargestFlag = */ ALL)
83};
84
85class SchedGroup;
86
87// InstructionRule class is used to enact a filter which determines whether or
88// not an SU maps to a given SchedGroup. It contains complementary data
89// structures (e.g Cache) to help those filters.
90class InstructionRule {
91protected:
92 const SIInstrInfo *TII;
93 unsigned SGID;
94 // A cache made available to the Filter to store SUnits for subsequent
95 // invocations of the Filter
96 std::optional<SmallVector<SUnit *, 4>> Cache;
97
98public:
99 virtual bool
100 apply(const SUnit *, const ArrayRef<SUnit *>,
102 return true;
103 };
104
105 InstructionRule(const SIInstrInfo *TII, unsigned SGID,
106 bool NeedsCache = false)
107 : TII(TII), SGID(SGID) {
108 if (NeedsCache) {
109 Cache = SmallVector<SUnit *, 4>();
110 }
111 }
112
113 virtual ~InstructionRule() = default;
114};
115
116using SUnitsToCandidateSGsMap = DenseMap<SUnit *, SmallVector<int, 4>>;
117
118// Classify instructions into groups to enable fine tuned control over the
119// scheduler. These groups may be more specific than current SchedModel
120// instruction classes.
121class SchedGroup {
122private:
123 // Mask that defines which instruction types can be classified into this
124 // SchedGroup. The instruction types correspond to the mask from SCHED_BARRIER
125 // and SCHED_GROUP_BARRIER.
126 SchedGroupMask SGMask;
127
128 // Maximum number of SUnits that can be added to this group.
129 std::optional<unsigned> MaxSize;
130
131 // SchedGroups will only synchronize with other SchedGroups that have the same
132 // SyncID.
133 int SyncID = 0;
134
135 // SGID is used to map instructions to candidate SchedGroups
136 unsigned SGID;
137
138 // The different rules each instruction in this SchedGroup must conform to
140
141 // Count of the number of created SchedGroups, used to initialize SGID.
142 static unsigned NumSchedGroups;
143
144 // Use SGMask to determine whether we can classify MI as a member of this
145 // SchedGroup object.
146 bool canAddMI(const MachineInstr &MI) const;
147
148public:
149 // Collection of SUnits that are classified as members of this group.
150 SmallVector<SUnit *, 32> Collection;
151
153 const SIInstrInfo *TII;
154
155 // Try to add and edge from SU A to SU B.
156 bool tryAddEdge(SUnit *A, SUnit *B);
157
158 // Returns true if SU can be added to this SchedGroup.
159 bool canAddSU(SUnit &SU) const;
160
161 // Add DAG dependencies from all SUnits in this SchedGroup and this SU. If
162 // MakePred is true, SU will be a predecessor of the SUnits in this
163 // SchedGroup, otherwise SU will be a successor.
164 void link(SUnit &SU, bool MakePred = false);
165
166 // Add DAG dependencies and track which edges are added, and the count of
167 // missed edges
168 int link(SUnit &SU, bool MakePred,
169 std::list<std::pair<SUnit *, SUnit *>> &AddedEdges);
170
171 // Add DAG dependencies from all SUnits in this SchedGroup and this SU.
172 // Use the predicate to determine whether SU should be a predecessor (P =
173 // true) or a successor (P = false) of this SchedGroup.
174 void link(SUnit &SU, function_ref<bool(const SUnit *A, const SUnit *B)> P);
175
176 // Add DAG dependencies such that SUnits in this group shall be ordered
177 // before SUnits in OtherGroup.
178 void link(SchedGroup &OtherGroup);
179
180 // Returns true if no more instructions may be added to this group.
181 bool isFull() const { return MaxSize && Collection.size() >= *MaxSize; }
182
183 // Append a constraint that SUs must meet in order to fit into this
184 // SchedGroup. Since many rules involve the relationship between a SchedGroup
185 // and the SUnits in other SchedGroups, rules are checked at Pipeline Solve
186 // time (rather than SchedGroup init time.)
187 void addRule(std::shared_ptr<InstructionRule> NewRule) {
188 Rules.push_back(NewRule);
189 }
190
191 // Returns true if the SU matches all rules
192 bool allowedByRules(const SUnit *SU,
193 SmallVectorImpl<SchedGroup> &SyncPipe) const {
194 for (auto &Rule : Rules) {
195 if (!Rule->apply(SU, Collection, SyncPipe))
196 return false;
197 }
198 return true;
199 }
200
201 // Add SU to the SchedGroup.
202 void add(SUnit &SU) {
203 LLVM_DEBUG(dbgs() << "For SchedGroup with mask "
204 << format_hex((int)SGMask, 10, true) << " adding "
205 << *SU.getInstr());
206 Collection.push_back(&SU);
207 }
208
209 // Remove last element in the SchedGroup
210 void pop() { Collection.pop_back(); }
211
212 template <class T>
213 void findCandidateSUnits(T Begin, T End,
214 SUnitsToCandidateSGsMap &SyncedInstrs);
215
216 /// Find each SUnit in the DAG that could potentially be added to
217 /// this SchedGroup and add the SGID to the candidate SchedGroups
218 /// for SU in \p SyncedInstrs.
219 void findCandidateSUnits(SUnitsToCandidateSGsMap &SyncedInstrs);
220
221 int getSyncID() { return SyncID; }
222
223 int getSGID() { return SGID; }
224
225 SchedGroupMask getMask() { return SGMask; }
226
227 SchedGroup(SchedGroupMask SGMask, std::optional<unsigned> MaxSize,
228 ScheduleDAGInstrs *DAG, const SIInstrInfo *TII)
229 : SGMask(SGMask), MaxSize(MaxSize), DAG(DAG), TII(TII) {
230 SGID = NumSchedGroups++;
231 }
232
233 SchedGroup(SchedGroupMask SGMask, std::optional<unsigned> MaxSize, int SyncID,
234 ScheduleDAGInstrs *DAG, const SIInstrInfo *TII)
235 : SGMask(SGMask), MaxSize(MaxSize), SyncID(SyncID), DAG(DAG), TII(TII) {
236 SGID = NumSchedGroups++;
237 }
238};
239
240using SUToCandSGsPair = std::pair<SUnit *, SmallVector<int, 4>>;
241using SUsToCandSGsVec = SmallVector<SUToCandSGsPair, 4>;
242
243// The PipelineSolver is used to assign SUnits to SchedGroups in a pipeline
244// in non-trivial cases. For example, if the requested pipeline is
245// {VMEM_READ, VALU, MFMA, VMEM_READ} and we encounter a VMEM_READ instruction
246// in the DAG, then we will have an instruction that can not be trivially
247// assigned to a SchedGroup. The PipelineSolver class implements two algorithms
248// to find a good solution to the pipeline -- a greedy algorithm and an exact
249// algorithm. The exact algorithm has an exponential time complexity and should
250// only be used for small sized problems or medium sized problems where an exact
251// solution is highly desired.
252class PipelineSolver {
253 [[maybe_unused]] ScheduleDAGMI *DAG;
254
255 // Instructions that can be assigned to multiple SchedGroups
257 SmallVector<SUsToCandSGsVec, 4> PipelineInstrs;
259 // The current working pipeline
261 // The pipeline that has the best solution found so far
263
264 // Whether or not we actually have any SyncedInstrs to try to solve.
265 bool NeedsSolver = false;
266
267 // Compute an estimate of the size of search tree -- the true size is
268 // the product of each conflictedInst.Matches.size() across all SyncPipelines
269 unsigned computeProblemSize();
270
271 // The cost penalty of not assigning a SU to a SchedGroup
272 int MissPenalty = 0;
273
274 // Costs in terms of the number of edges we are unable to add
275 int BestCost = -1;
276 int CurrCost = 0;
277
278 // Index pointing to the conflicting instruction that is currently being
279 // fitted
280 int CurrConflInstNo = 0;
281 // Index to the pipeline that is currently being fitted
282 int CurrSyncGroupIdx = 0;
283 // The first non trivial pipeline
284 int BeginSyncGroupIdx = 0;
285
286 // How many branches we have explored
287 uint64_t BranchesExplored = 0;
288
289 // The direction in which we process the candidate SchedGroups per SU
290 bool IsBottomUp = true;
291
292 // Update indices to fit next conflicting instruction
293 void advancePosition();
294 // Recede indices to attempt to find better fit for previous conflicting
295 // instruction
296 void retreatPosition();
297
298 // The exponential time algorithm which finds the provably best fit
299 bool solveExact();
300 // The polynomial time algorithm which attempts to find a good fit
301 bool solveGreedy();
302 // Find the best SchedGroup for the current SU using the heuristic given all
303 // current information. One step in the greedy algorithm. Templated against
304 // the SchedGroup iterator (either reverse or forward).
305 template <typename T>
306 void greedyFind(std::list<std::pair<SUnit *, SUnit *>> &AddedEdges, T I, T E);
307 // Whether or not the current solution is optimal
308 bool checkOptimal();
309 // Populate the ready list, prioiritizing fewest missed edges first
310 // Templated against the SchedGroup iterator (either reverse or forward).
311 template <typename T>
312 void populateReadyList(SmallVectorImpl<std::pair<int, int>> &ReadyList, T I,
313 T E);
314 // Add edges corresponding to the SchedGroups as assigned by solver
315 void makePipeline();
316 // Link the SchedGroups in the best found pipeline.
317 // Tmplated against the SchedGroup iterator (either reverse or forward).
318 template <typename T> void linkSchedGroups(T I, T E);
319 // Add the edges from the SU to the other SchedGroups in pipeline, and
320 // return the number of edges missed.
321 int addEdges(SmallVectorImpl<SchedGroup> &SyncPipeline, SUnit *SU, int SGID,
322 std::list<std::pair<SUnit *, SUnit *>> &AddedEdges);
323 /// Link the pipeline as if \p SU was in the SchedGroup with ID \p SGID. It
324 /// returns the cost (in terms of missed pipeline edges), and tracks the edges
325 /// added in \p AddedEdges
326 template <typename T>
327 int linkSUnit(SUnit *SU, int SGID,
328 std::list<std::pair<SUnit *, SUnit *>> &AddedEdges, T I, T E);
329 /// Remove the edges passed via \p AddedEdges
330 void removeEdges(const std::list<std::pair<SUnit *, SUnit *>> &AddedEdges);
331 // Convert the passed in maps to arrays for bidirectional iterators
332 void convertSyncMapsToArrays();
333
334 void reset();
335
336public:
337 // Invoke the solver to map instructions to instruction groups. Heuristic &&
338 // command-line-option determines to use exact or greedy algorithm.
339 void solve();
340
341 PipelineSolver(DenseMap<int, SmallVector<SchedGroup, 4>> &SyncedSchedGroups,
343 ScheduleDAGMI *DAG, bool IsBottomUp = true)
344 : DAG(DAG), SyncedInstrs(SyncedInstrs),
345 SyncedSchedGroups(SyncedSchedGroups), IsBottomUp(IsBottomUp) {
346
347 for (auto &PipelineInstrs : SyncedInstrs) {
348 if (!PipelineInstrs.second.empty()) {
349 NeedsSolver = true;
350 break;
351 }
352 }
353
354 if (!NeedsSolver)
355 return;
356
357 convertSyncMapsToArrays();
358
359 CurrPipeline = BestPipeline;
360
361 while (static_cast<size_t>(BeginSyncGroupIdx) < PipelineInstrs.size() &&
362 PipelineInstrs[BeginSyncGroupIdx].empty())
363 ++BeginSyncGroupIdx;
364
365 if (static_cast<size_t>(BeginSyncGroupIdx) >= PipelineInstrs.size())
366 return;
367 }
368};
369
370void PipelineSolver::reset() {
371
372 for (auto &SyncPipeline : CurrPipeline) {
373 for (auto &SG : SyncPipeline) {
374 SmallVector<SUnit *, 32> TempCollection = SG.Collection;
375 SG.Collection.clear();
376 auto *SchedBarr = llvm::find_if(TempCollection, [](SUnit *SU) {
377 return SU->getInstr()->getOpcode() == AMDGPU::SCHED_GROUP_BARRIER;
378 });
379 if (SchedBarr != TempCollection.end())
380 SG.Collection.push_back(*SchedBarr);
381 }
382 }
383
384 CurrSyncGroupIdx = BeginSyncGroupIdx;
385 CurrConflInstNo = 0;
386 CurrCost = 0;
387}
388
389void PipelineSolver::convertSyncMapsToArrays() {
390 for (auto &SyncPipe : SyncedSchedGroups) {
391 BestPipeline.insert(BestPipeline.begin(), SyncPipe.second);
392 }
393
394 int PipelineIDx = SyncedInstrs.size() - 1;
395 PipelineInstrs.resize(SyncedInstrs.size());
396 for (auto &SyncInstrMap : SyncedInstrs) {
397 for (auto &SUsToCandSGs : SyncInstrMap.second) {
398 if (PipelineInstrs[PipelineIDx].empty()) {
399 PipelineInstrs[PipelineIDx].push_back(
400 std::pair(SUsToCandSGs.first, SUsToCandSGs.second));
401 continue;
402 }
403 auto *SortPosition = PipelineInstrs[PipelineIDx].begin();
404 // Insert them in sorted order -- this allows for good parsing order in
405 // the greedy algorithm
406 while (SortPosition != PipelineInstrs[PipelineIDx].end() &&
407 SUsToCandSGs.first->NodeNum > SortPosition->first->NodeNum)
408 ++SortPosition;
409 PipelineInstrs[PipelineIDx].insert(
410 SortPosition, std::pair(SUsToCandSGs.first, SUsToCandSGs.second));
411 }
412 --PipelineIDx;
413 }
414}
415
416template <typename T> void PipelineSolver::linkSchedGroups(T I, T E) {
417 for (; I != E; ++I) {
418 auto &GroupA = *I;
419 for (auto J = std::next(I); J != E; ++J) {
420 auto &GroupB = *J;
421 GroupA.link(GroupB);
422 }
423 }
424}
425
426void PipelineSolver::makePipeline() {
427 // Preserve the order of barrier for subsequent SchedGroupBarrier mutations
428 for (auto &SyncPipeline : BestPipeline) {
429 LLVM_DEBUG(dbgs() << "Printing SchedGroups\n");
430 for (auto &SG : SyncPipeline) {
431 LLVM_DEBUG(dbgs() << "SchedGroup with SGID " << SG.getSGID()
432 << " has: \n");
433 SUnit *SGBarr = nullptr;
434 for (auto &SU : SG.Collection) {
435 if (SU->getInstr()->getOpcode() == AMDGPU::SCHED_GROUP_BARRIER)
436 SGBarr = SU;
437 LLVM_DEBUG(dbgs() << "SU(" << SU->NodeNum << ")\n");
438 }
439 // Command line requested IGroupLP doesn't have SGBarr
440 if (!SGBarr)
441 continue;
442 SG.link(*SGBarr, false);
443 }
444 }
445
446 for (auto &SyncPipeline : BestPipeline) {
447 IsBottomUp ? linkSchedGroups(SyncPipeline.rbegin(), SyncPipeline.rend())
448 : linkSchedGroups(SyncPipeline.begin(), SyncPipeline.end());
449 }
450}
451
452template <typename T>
453int PipelineSolver::linkSUnit(
454 SUnit *SU, int SGID, std::list<std::pair<SUnit *, SUnit *>> &AddedEdges,
455 T I, T E) {
456 bool MakePred = false;
457 int AddedCost = 0;
458 for (; I < E; ++I) {
459 if (I->getSGID() == SGID) {
460 MakePred = true;
461 continue;
462 }
463 auto Group = *I;
464 AddedCost += Group.link(*SU, MakePred, AddedEdges);
465 assert(AddedCost >= 0);
466 }
467 return AddedCost;
468}
469
470int PipelineSolver::addEdges(
471 SmallVectorImpl<SchedGroup> &SyncPipeline, SUnit *SU, int SGID,
472 std::list<std::pair<SUnit *, SUnit *>> &AddedEdges) {
473
474 // For IsBottomUp, the first SchedGroup in SyncPipeline contains the
475 // instructions that are the ultimate successors in the resultant mutation.
476 // Therefore, in such a configuration, the SchedGroups occurring before the
477 // candidate SGID are successors of the candidate SchedGroup, thus the current
478 // SU should be linked as a predecessor to SUs in those SchedGroups. The
479 // opposite is true if !IsBottomUp. IsBottomUp occurs in the case of multiple
480 // SCHED_GROUP_BARRIERS, or if a user specifies IGLP_OPT SchedGroups using
481 // IsBottomUp (in reverse).
482 return IsBottomUp ? linkSUnit(SU, SGID, AddedEdges, SyncPipeline.rbegin(),
483 SyncPipeline.rend())
484 : linkSUnit(SU, SGID, AddedEdges, SyncPipeline.begin(),
485 SyncPipeline.end());
486}
487
488void PipelineSolver::removeEdges(
489 const std::list<std::pair<SUnit *, SUnit *>> &EdgesToRemove) {
490 // Only remove the edges that we have added when testing
491 // the fit.
492 for (auto &PredSuccPair : EdgesToRemove) {
493 SUnit *Pred = PredSuccPair.first;
494 SUnit *Succ = PredSuccPair.second;
495
496 auto *Match = llvm::find_if(
497 Succ->Preds, [&Pred](SDep &P) { return P.getSUnit() == Pred; });
498 if (Match != Succ->Preds.end()) {
499 assert(Match->isArtificial());
500 Succ->removePred(*Match);
501 }
502 }
503}
504
505void PipelineSolver::advancePosition() {
506 ++CurrConflInstNo;
507
508 if (static_cast<size_t>(CurrConflInstNo) >=
509 PipelineInstrs[CurrSyncGroupIdx].size()) {
510 CurrConflInstNo = 0;
511 ++CurrSyncGroupIdx;
512 // Advance to next non-trivial pipeline
513 while (static_cast<size_t>(CurrSyncGroupIdx) < PipelineInstrs.size() &&
514 PipelineInstrs[CurrSyncGroupIdx].empty())
515 ++CurrSyncGroupIdx;
516 }
517}
518
519void PipelineSolver::retreatPosition() {
520 assert(CurrConflInstNo >= 0);
521 assert(CurrSyncGroupIdx >= 0);
522
523 if (CurrConflInstNo > 0) {
524 --CurrConflInstNo;
525 return;
526 }
527
528 if (CurrConflInstNo == 0) {
529 // If we return to the starting position, we have explored
530 // the entire tree
531 if (CurrSyncGroupIdx == BeginSyncGroupIdx)
532 return;
533
534 --CurrSyncGroupIdx;
535 // Go to previous non-trivial pipeline
536 while (PipelineInstrs[CurrSyncGroupIdx].empty())
537 --CurrSyncGroupIdx;
538
539 CurrConflInstNo = PipelineInstrs[CurrSyncGroupIdx].size() - 1;
540 }
541}
542
543bool PipelineSolver::checkOptimal() {
544 if (static_cast<size_t>(CurrSyncGroupIdx) == PipelineInstrs.size()) {
545 if (BestCost == -1 || CurrCost < BestCost) {
546 BestPipeline = CurrPipeline;
547 BestCost = CurrCost;
548 LLVM_DEBUG(dbgs() << "Found Fit with cost " << BestCost << "\n");
549 }
550 assert(BestCost >= 0);
551 }
552
553 bool DoneExploring = false;
554 if (MaxBranchesExplored > 0 && BranchesExplored >= MaxBranchesExplored)
555 DoneExploring = true;
556
557 return (DoneExploring || BestCost == 0);
558}
559
560template <typename T>
561void PipelineSolver::populateReadyList(
562 SmallVectorImpl<std::pair<int, int>> &ReadyList, T I, T E) {
563 SUToCandSGsPair CurrSU = PipelineInstrs[CurrSyncGroupIdx][CurrConflInstNo];
564 auto SyncPipeline = CurrPipeline[CurrSyncGroupIdx];
565 assert(CurrSU.second.size() >= 1);
566
567 for (; I != E; ++I) {
568 std::list<std::pair<SUnit *, SUnit *>> AddedEdges;
569 int CandSGID = *I;
570 SchedGroup *Match = llvm::find_if(SyncPipeline, [CandSGID](SchedGroup &SG) {
571 return SG.getSGID() == CandSGID;
572 });
573 assert(Match);
574
575 if (UseCostHeur) {
576 if (Match->isFull()) {
577 ReadyList.push_back(std::pair(*I, MissPenalty));
578 continue;
579 }
580
581 int TempCost = addEdges(SyncPipeline, CurrSU.first, CandSGID, AddedEdges);
582 ReadyList.push_back(std::pair(*I, TempCost));
583 removeEdges(AddedEdges);
584 } else
585 ReadyList.push_back(std::pair(*I, -1));
586 }
587
588 if (UseCostHeur)
589 std::sort(ReadyList.begin(), ReadyList.end(), llvm::less_second());
590
591 assert(ReadyList.size() == CurrSU.second.size());
592}
593
594bool PipelineSolver::solveExact() {
595 if (checkOptimal())
596 return true;
597
598 if (static_cast<size_t>(CurrSyncGroupIdx) == PipelineInstrs.size())
599 return false;
600
601 assert(static_cast<size_t>(CurrSyncGroupIdx) < PipelineInstrs.size());
602 assert(static_cast<size_t>(CurrConflInstNo) <
603 PipelineInstrs[CurrSyncGroupIdx].size());
604 SUToCandSGsPair CurrSU = PipelineInstrs[CurrSyncGroupIdx][CurrConflInstNo];
605 LLVM_DEBUG(dbgs() << "Fitting SU(" << CurrSU.first->NodeNum
606 << ") in Pipeline # " << CurrSyncGroupIdx << "\n");
607
608 // SchedGroup -> Cost pairs
610 // Prioritize the candidate sched groups in terms of lowest cost first
611 IsBottomUp ? populateReadyList(ReadyList, CurrSU.second.rbegin(),
612 CurrSU.second.rend())
613 : populateReadyList(ReadyList, CurrSU.second.begin(),
614 CurrSU.second.end());
615
616 auto *I = ReadyList.begin();
617 auto *E = ReadyList.end();
618 for (; I != E; ++I) {
619 // If we are trying SGs in least cost order, and the current SG is cost
620 // infeasible, then all subsequent SGs will also be cost infeasible, so we
621 // can prune.
622 if (BestCost != -1 && (CurrCost + I->second > BestCost))
623 return false;
624
625 int CandSGID = I->first;
626 int AddedCost = 0;
627 std::list<std::pair<SUnit *, SUnit *>> AddedEdges;
628 auto &SyncPipeline = CurrPipeline[CurrSyncGroupIdx];
629 SchedGroup *Match;
630 for (auto &SG : SyncPipeline) {
631 if (SG.getSGID() == CandSGID)
632 Match = &SG;
633 }
634
635 if (Match->isFull())
636 continue;
637
638 if (!Match->allowedByRules(CurrSU.first, SyncPipeline))
639 continue;
640
641 LLVM_DEBUG(dbgs() << "Assigning to SchedGroup with Mask "
642 << (int)Match->getMask() << "and ID " << CandSGID
643 << "\n");
644 Match->add(*CurrSU.first);
645 AddedCost = addEdges(SyncPipeline, CurrSU.first, CandSGID, AddedEdges);
646 LLVM_DEBUG(dbgs() << "Cost of Assignment: " << AddedCost << "\n");
647 CurrCost += AddedCost;
648 advancePosition();
649 ++BranchesExplored;
650 bool FinishedExploring = false;
651 // If the Cost after adding edges is greater than a known solution,
652 // backtrack
653 if (CurrCost < BestCost || BestCost == -1) {
654 if (solveExact()) {
655 FinishedExploring = BestCost != 0;
656 if (!FinishedExploring)
657 return true;
658 }
659 }
660
661 retreatPosition();
662 CurrCost -= AddedCost;
663 removeEdges(AddedEdges);
664 Match->pop();
665 CurrPipeline[CurrSyncGroupIdx] = SyncPipeline;
666 if (FinishedExploring)
667 return true;
668 }
669
670 // Try the pipeline where the current instruction is omitted
671 // Potentially if we omit a problematic instruction from the pipeline,
672 // all the other instructions can nicely fit.
673 CurrCost += MissPenalty;
674 advancePosition();
675
676 LLVM_DEBUG(dbgs() << "NOT Assigned (" << CurrSU.first->NodeNum << ")\n");
677
678 bool FinishedExploring = false;
679 if (CurrCost < BestCost || BestCost == -1) {
680 if (solveExact()) {
681 bool FinishedExploring = BestCost != 0;
682 if (!FinishedExploring)
683 return true;
684 }
685 }
686
687 retreatPosition();
688 CurrCost -= MissPenalty;
689 return FinishedExploring;
690}
691
692template <typename T>
693void PipelineSolver::greedyFind(
694 std::list<std::pair<SUnit *, SUnit *>> &AddedEdges, T I, T E) {
695 SUToCandSGsPair CurrSU = PipelineInstrs[CurrSyncGroupIdx][CurrConflInstNo];
696 int BestNodeCost = -1;
697 int TempCost;
698 SchedGroup *BestGroup = nullptr;
699 int BestGroupID = -1;
700 std::list<std::pair<SUnit *, SUnit *>> BestEdges;
701 auto &SyncPipeline = CurrPipeline[CurrSyncGroupIdx];
702 LLVM_DEBUG(dbgs() << "Fitting SU(" << CurrSU.first->NodeNum
703 << ") in Pipeline # " << CurrSyncGroupIdx << "\n");
704
705 // Since we have added the potential SchedGroups from bottom up, but
706 // traversed the DAG from top down, parse over the groups from last to
707 // first. If we fail to do this for the greedy algorithm, the solution will
708 // likely not be good in more complex cases.
709 for (; I != E; ++I) {
710 int CandSGID = *I;
711 SchedGroup *Match = llvm::find_if(SyncPipeline, [CandSGID](SchedGroup &SG) {
712 return SG.getSGID() == CandSGID;
713 });
714 assert(Match);
715
716 LLVM_DEBUG(dbgs() << "Trying SGID # " << CandSGID << " with Mask "
717 << (int)Match->getMask() << "\n");
718
719 if (Match->isFull()) {
720 LLVM_DEBUG(dbgs() << "SGID # " << CandSGID << " is full\n");
721 continue;
722 }
723 if (!Match->allowedByRules(CurrSU.first, SyncPipeline)) {
724 LLVM_DEBUG(dbgs() << "SGID # " << CandSGID << " has conflicting rule\n");
725 continue;
726 }
727
728 std::list<std::pair<SUnit *, SUnit *>> TempEdges;
729 TempCost = addEdges(SyncPipeline, CurrSU.first, CandSGID, TempEdges);
730 LLVM_DEBUG(dbgs() << "Cost of Group " << TempCost << "\n");
731
732 if (TempCost < BestNodeCost || BestNodeCost == -1) {
733 BestEdges = TempEdges;
734 BestGroup = Match;
735 BestNodeCost = TempCost;
736 BestGroupID = CandSGID;
737
738 if (BestNodeCost == 0)
739 break;
740 }
741
742 removeEdges(TempEdges);
743 }
744
745 if (BestGroupID != -1) {
746 BestGroup->add(*CurrSU.first);
747 if (AddedEdges.empty())
748 AddedEdges = BestEdges;
749 else
750 AddedEdges.splice(std::prev(AddedEdges.cend()), BestEdges);
751
752 for (const std::pair<SUnit *, SUnit *> &E : BestEdges) {
753 if (!BestGroup->tryAddEdge(E.first, E.second))
754 llvm_unreachable("Edges known to be insertable.");
755 }
756
757 LLVM_DEBUG(dbgs() << "Best Group has ID: " << BestGroupID << " and Mask"
758 << (int)BestGroup->getMask() << "\n");
759 BestCost += TempCost;
760 } else
761 BestCost += MissPenalty;
762
763 CurrPipeline[CurrSyncGroupIdx] = SyncPipeline;
764}
765
766bool PipelineSolver::solveGreedy() {
767 BestCost = 0;
768 std::list<std::pair<SUnit *, SUnit *>> AddedEdges;
769
770 while (static_cast<size_t>(CurrSyncGroupIdx) < PipelineInstrs.size()) {
771 SUToCandSGsPair CurrSU = PipelineInstrs[CurrSyncGroupIdx][CurrConflInstNo];
772 IsBottomUp
773 ? greedyFind(AddedEdges, CurrSU.second.rbegin(), CurrSU.second.rend())
774 : greedyFind(AddedEdges, CurrSU.second.begin(), CurrSU.second.end());
775 advancePosition();
776 }
777 BestPipeline = CurrPipeline;
778 removeEdges(AddedEdges);
779 return false;
780}
781
782unsigned PipelineSolver::computeProblemSize() {
783 unsigned ProblemSize = 0;
784 for (auto &PipeConflicts : PipelineInstrs) {
785 ProblemSize += PipeConflicts.size();
786 }
787
788 return ProblemSize;
789}
790
791void PipelineSolver::solve() {
792 if (!NeedsSolver)
793 return;
794
795 unsigned ProblemSize = computeProblemSize();
796 assert(ProblemSize > 0);
797
798 bool BelowCutoff = (CutoffForExact > 0) && ProblemSize <= CutoffForExact;
799 MissPenalty = (ProblemSize / 2) + 1;
800
801 LLVM_DEBUG(DAG->dump());
802 if (EnableExactSolver || BelowCutoff) {
803 LLVM_DEBUG(dbgs() << "Starting Greedy pipeline solver\n");
804 solveGreedy();
805 reset();
806 LLVM_DEBUG(dbgs() << "Greedy produced best cost of " << BestCost << "\n");
807 if (BestCost > 0) {
808 LLVM_DEBUG(dbgs() << "Starting EXACT pipeline solver\n");
809 solveExact();
810 LLVM_DEBUG(dbgs() << "Exact produced best cost of " << BestCost << "\n");
811 }
812 } else { // Use the Greedy Algorithm by default
813 LLVM_DEBUG(dbgs() << "Starting GREEDY pipeline solver\n");
814 solveGreedy();
815 }
816
817 makePipeline();
818 LLVM_DEBUG(dbgs() << "After applying mutation\n");
819 LLVM_DEBUG(DAG->dump());
820}
821
822enum IGLPStrategyID : int {
823 MFMASmallGemmOptID = 0,
824 MFMASmallGemmSingleWaveOptID = 1,
825 MFMAExpInterleaveID = 2,
826 MFMAExpSimpleInterleaveID = 3
827};
828
829// Implement a IGLP scheduling strategy.
830class IGLPStrategy {
831protected:
833
834 const SIInstrInfo *TII;
835
836public:
837 /// Add SchedGroups to \p SyncedSchedGroups to implement this Strategy.
838 virtual bool applyIGLPStrategy(
840 DenseMap<int, SmallVector<SchedGroup, 4>> &SyncedSchedGroups,
842
843 // Returns true if this strategy should be applied to a ScheduleDAG.
844 virtual bool shouldApplyStrategy(ScheduleDAGInstrs *DAG,
846
847 bool IsBottomUp = true;
848
849 IGLPStrategy(ScheduleDAGInstrs *DAG, const SIInstrInfo *TII)
850 : DAG(DAG), TII(TII) {}
851
852 virtual ~IGLPStrategy() = default;
853};
854
855class MFMASmallGemmOpt final : public IGLPStrategy {
856private:
857public:
858 bool applyIGLPStrategy(
860 DenseMap<int, SmallVector<SchedGroup, 4>> &SyncedSchedGroups,
862
863 bool shouldApplyStrategy(ScheduleDAGInstrs *DAG,
865 return true;
866 }
867
868 MFMASmallGemmOpt(ScheduleDAGInstrs *DAG, const SIInstrInfo *TII)
869 : IGLPStrategy(DAG, TII) {
870 IsBottomUp = true;
871 }
872};
873
874bool MFMASmallGemmOpt::applyIGLPStrategy(
876 DenseMap<int, SmallVector<SchedGroup, 4>> &SyncedSchedGroups,
878 // Count the number of MFMA instructions.
879 unsigned MFMACount = 0;
880 for (const MachineInstr &I : *DAG)
881 if (TII->isMFMAorWMMA(I))
882 ++MFMACount;
883
884 const unsigned PipelineSyncID = 0;
885 SchedGroup *SG = nullptr;
886 for (unsigned I = 0; I < MFMACount * 3; ++I) {
887 SG = &SyncedSchedGroups[PipelineSyncID].emplace_back(
888 SchedGroupMask::DS, 2, PipelineSyncID, DAG, TII);
889 SG->findCandidateSUnits(SyncedInstrs[SG->getSyncID()]);
890
891 SG = &SyncedSchedGroups[PipelineSyncID].emplace_back(
892 SchedGroupMask::MFMA, 1, PipelineSyncID, DAG, TII);
893 SG->findCandidateSUnits(SyncedInstrs[SG->getSyncID()]);
894 }
895
896 return true;
897}
898
899class MFMAExpInterleaveOpt final : public IGLPStrategy {
900private:
901 // The count of TRANS SUs involved in the interleaved pipeline
902 static unsigned TransPipeCount;
903 // The count of MFMA SUs involved in the interleaved pipeline
904 static unsigned MFMAPipeCount;
905 // The count of Add SUs involved in the interleaved pipeline
906 static unsigned AddPipeCount;
907 // The number of transitive MFMA successors for each TRANS SU
908 static unsigned MFMAEnablement;
909 // The number of transitive TRANS predecessors for each MFMA SU
910 static unsigned ExpRequirement;
911 // The count of independent "chains" of MFMA instructions in the pipeline
912 static unsigned MFMAChains;
913 // Whether or not the pipeline has V_CVT instructions
914 static bool HasCvt;
915 // Whether or not there are instructions between the TRANS instruction and
916 // V_CVT
917 static bool HasChainBetweenCvt;
918 // The first occuring DS_READ which feeds an MFMA chain
919 static std::optional<unsigned> FirstPipeDSR;
920 // The MFMAPipe SUs with no MFMA predecessors
921 SmallVector<SUnit *, 4> MFMAChainSeeds;
922 // Compute the heuristics for the pipeline, returning whether or not the DAG
923 // is well formatted for the mutation
924 bool analyzeDAG(const SIInstrInfo *TII);
925
926 /// Whether or not the instruction is a transitive predecessor of an MFMA
927 /// instruction
928 class IsPipeExp final : public InstructionRule {
929 public:
930 bool apply(const SUnit *SU, const ArrayRef<SUnit *> Collection,
931 SmallVectorImpl<SchedGroup> &SyncPipe) override {
932
933 auto *DAG = SyncPipe[0].DAG;
934
935 if (Cache->empty()) {
936 auto I = DAG->SUnits.rbegin();
937 auto E = DAG->SUnits.rend();
938 for (; I != E; I++) {
939 if (TII->isMFMAorWMMA(*I->getInstr()))
940 Cache->push_back(&*I);
941 }
942 if (Cache->empty())
943 return false;
944 }
945
946 auto Reaches = any_of(*Cache, [&SU, &DAG](SUnit *TargetSU) {
947 return DAG->IsReachable(TargetSU, const_cast<SUnit *>(SU));
948 });
949
950 return Reaches;
951 }
952 IsPipeExp(const SIInstrInfo *TII, unsigned SGID, bool NeedsCache = false)
953 : InstructionRule(TII, SGID, NeedsCache) {}
954 };
955
956 /// Whether or not the instruction is a transitive predecessor of the
957 /// \p Number th MFMA of the MFMAs occuring after a TRANS instruction
958 class EnablesNthMFMA final : public InstructionRule {
959 private:
960 unsigned Number = 1;
961
962 public:
963 bool apply(const SUnit *SU, const ArrayRef<SUnit *> Collection,
964 SmallVectorImpl<SchedGroup> &SyncPipe) override {
965 bool FoundTrans = false;
966 unsigned Counter = 1;
967 auto *DAG = SyncPipe[0].DAG;
968
969 if (Cache->empty()) {
970 auto I = DAG->SUnits.begin();
971 auto E = DAG->SUnits.end();
972 for (; I != E; I++) {
973 if (FoundTrans && TII->isMFMAorWMMA(*I->getInstr())) {
974 if (Counter == Number) {
975 Cache->push_back(&*I);
976 break;
977 }
978 ++Counter;
979 }
980 if (!FoundTrans && TII->isTRANS(I->getInstr()->getOpcode()))
981 FoundTrans = true;
982 }
983 if (Cache->empty())
984 return false;
985 }
986
987 return DAG->IsReachable((*Cache)[0], const_cast<SUnit *>(SU));
988 }
989
990 EnablesNthMFMA(unsigned Number, const SIInstrInfo *TII, unsigned SGID,
991 bool NeedsCache = false)
992 : InstructionRule(TII, SGID, NeedsCache), Number(Number) {}
993 };
994
995 /// Whether or not the instruction enables the exact MFMA that is the \p
996 /// Number th MFMA in the chain starting with \p ChainSeed
997 class EnablesNthMFMAInChain final : public InstructionRule {
998 private:
999 unsigned Number = 1;
1000 SUnit *ChainSeed;
1001
1002 public:
1003 bool apply(const SUnit *SU, const ArrayRef<SUnit *> Collection,
1004 SmallVectorImpl<SchedGroup> &SyncPipe) override {
1005 auto *DAG = SyncPipe[0].DAG;
1006
1007 if (!SU || !TII->isMFMAorWMMA(*ChainSeed->getInstr()))
1008 return false;
1009
1010 if (Cache->empty()) {
1011 auto *TempSU = ChainSeed;
1012 auto Depth = Number;
1013 while (Depth > 0) {
1014 --Depth;
1015 bool Found = false;
1016 for (auto &Succ : TempSU->Succs) {
1017 if (TII->isMFMAorWMMA(*Succ.getSUnit()->getInstr())) {
1018 TempSU = Succ.getSUnit();
1019 Found = true;
1020 break;
1021 }
1022 }
1023 if (!Found)
1024 return false;
1025 }
1026
1027 Cache->push_back(TempSU);
1028 }
1029 // If we failed to find the instruction to be placed into the cache, we
1030 // would have already exited.
1031 assert(!Cache->empty());
1032
1033 return DAG->IsReachable((*Cache)[0], const_cast<SUnit *>(SU));
1034 }
1035
1036 EnablesNthMFMAInChain(unsigned Number, SUnit *ChainSeed,
1037 const SIInstrInfo *TII, unsigned SGID,
1038 bool NeedsCache = false)
1039 : InstructionRule(TII, SGID, NeedsCache), Number(Number),
1040 ChainSeed(ChainSeed) {}
1041 };
1042
1043 /// Whether or not the instruction has less than \p Size immediate successors.
1044 /// If \p HasIntermediary is true, this tests also whether all successors of
1045 /// the SUnit have less than \p Size successors.
1046 class LessThanNSuccs final : public InstructionRule {
1047 private:
1048 unsigned Size = 1;
1049 bool HasIntermediary = false;
1050
1051 public:
1052 bool apply(const SUnit *SU, const ArrayRef<SUnit *> Collection,
1053 SmallVectorImpl<SchedGroup> &SyncPipe) override {
1054 if (!SyncPipe.size())
1055 return false;
1056
1057 unsigned SuccSize = llvm::count_if(SU->Succs, [](const SDep &Succ) {
1058 return Succ.getKind() == SDep::Data;
1059 });
1060 if (SuccSize >= Size)
1061 return false;
1062
1063 if (HasIntermediary) {
1064 for (auto Succ : SU->Succs) {
1065 unsigned SuccSize =
1066 llvm::count_if(Succ.getSUnit()->Succs, [](const SDep &SuccSucc) {
1067 return SuccSucc.getKind() == SDep::Data;
1068 });
1069 if (SuccSize >= Size)
1070 return false;
1071 }
1072 }
1073
1074 return true;
1075 }
1076 LessThanNSuccs(unsigned Size, const SIInstrInfo *TII, unsigned SGID,
1077 bool HasIntermediary = false, bool NeedsCache = false)
1078 : InstructionRule(TII, SGID, NeedsCache), Size(Size),
1079 HasIntermediary(HasIntermediary) {}
1080 };
1081
1082 /// Whether or not the instruction has greater than or equal to \p Size
1083 /// immediate successors. If \p HasIntermediary is true, this tests also
1084 /// whether all successors of the SUnit have greater than or equal to \p Size
1085 /// successors.
1086 class GreaterThanOrEqualToNSuccs final : public InstructionRule {
1087 private:
1088 unsigned Size = 1;
1089 bool HasIntermediary = false;
1090
1091 public:
1092 bool apply(const SUnit *SU, const ArrayRef<SUnit *> Collection,
1093 SmallVectorImpl<SchedGroup> &SyncPipe) override {
1094 if (!SyncPipe.size())
1095 return false;
1096
1097 unsigned SuccSize = llvm::count_if(SU->Succs, [](const SDep &Succ) {
1098 return Succ.getKind() == SDep::Data;
1099 });
1100 if (SuccSize >= Size)
1101 return true;
1102
1103 if (HasIntermediary) {
1104 for (auto Succ : SU->Succs) {
1105 unsigned SuccSize =
1106 llvm::count_if(Succ.getSUnit()->Succs, [](const SDep &SuccSucc) {
1107 return SuccSucc.getKind() == SDep::Data;
1108 });
1109 if (SuccSize >= Size)
1110 return true;
1111 }
1112 }
1113
1114 return false;
1115 }
1116 GreaterThanOrEqualToNSuccs(unsigned Size, const SIInstrInfo *TII,
1117 unsigned SGID, bool HasIntermediary = false,
1118 bool NeedsCache = false)
1119 : InstructionRule(TII, SGID, NeedsCache), Size(Size),
1120 HasIntermediary(HasIntermediary) {}
1121 };
1122
1123 // Whether or not the instruction is a relevant V_CVT instruction.
1124 class IsCvt final : public InstructionRule {
1125 public:
1126 bool apply(const SUnit *SU, const ArrayRef<SUnit *> Collection,
1127 SmallVectorImpl<SchedGroup> &SyncPipe) override {
1128 auto Opc = SU->getInstr()->getOpcode();
1129 return Opc == AMDGPU::V_CVT_F16_F32_e32 ||
1130 Opc == AMDGPU::V_CVT_I32_F32_e32;
1131 }
1132 IsCvt(const SIInstrInfo *TII, unsigned SGID, bool NeedsCache = false)
1133 : InstructionRule(TII, SGID, NeedsCache) {}
1134 };
1135
1136 // Whether or not the instruction is FMA_F32.
1137 class IsFMA final : public InstructionRule {
1138 public:
1139 bool apply(const SUnit *SU, const ArrayRef<SUnit *> Collection,
1140 SmallVectorImpl<SchedGroup> &SyncPipe) override {
1141 return SU->getInstr()->getOpcode() == AMDGPU::V_FMA_F32_e64 ||
1142 SU->getInstr()->getOpcode() == AMDGPU::V_PK_FMA_F32;
1143 }
1144 IsFMA(const SIInstrInfo *TII, unsigned SGID, bool NeedsCache = false)
1145 : InstructionRule(TII, SGID, NeedsCache) {}
1146 };
1147
1148 // Whether or not the instruction is a V_ADD_F32 instruction.
1149 class IsPipeAdd final : public InstructionRule {
1150 public:
1151 bool apply(const SUnit *SU, const ArrayRef<SUnit *> Collection,
1152 SmallVectorImpl<SchedGroup> &SyncPipe) override {
1153 return SU->getInstr()->getOpcode() == AMDGPU::V_ADD_F32_e32;
1154 }
1155 IsPipeAdd(const SIInstrInfo *TII, unsigned SGID, bool NeedsCache = false)
1156 : InstructionRule(TII, SGID, NeedsCache) {}
1157 };
1158
1159 /// Whether or not the instruction is an immediate RAW successor
1160 /// of the SchedGroup \p Distance steps before.
1161 class IsSuccOfPrevNthGroup final : public InstructionRule {
1162 private:
1163 unsigned Distance = 1;
1164
1165 public:
1166 bool apply(const SUnit *SU, const ArrayRef<SUnit *> Collection,
1167 SmallVectorImpl<SchedGroup> &SyncPipe) override {
1168 SchedGroup *OtherGroup = nullptr;
1169 if (!SyncPipe.size())
1170 return false;
1171
1172 for (auto &PipeSG : SyncPipe) {
1173 if ((unsigned)PipeSG.getSGID() == SGID - Distance)
1174 OtherGroup = &PipeSG;
1175 }
1176
1177 if (!OtherGroup)
1178 return false;
1179 if (!OtherGroup->Collection.size())
1180 return true;
1181
1182 for (auto &OtherEle : OtherGroup->Collection) {
1183 for (auto &Succ : OtherEle->Succs) {
1184 if (Succ.getSUnit() == SU && Succ.getKind() == SDep::Data)
1185 return true;
1186 }
1187 }
1188
1189 return false;
1190 }
1191 IsSuccOfPrevNthGroup(unsigned Distance, const SIInstrInfo *TII,
1192 unsigned SGID, bool NeedsCache = false)
1193 : InstructionRule(TII, SGID, NeedsCache), Distance(Distance) {}
1194 };
1195
1196 /// Whether or not the instruction is a transitive successor of any
1197 /// instruction the the SchedGroup \p Distance steps before.
1198 class IsReachableFromPrevNthGroup final : public InstructionRule {
1199 private:
1200 unsigned Distance = 1;
1201
1202 public:
1203 bool apply(const SUnit *SU, const ArrayRef<SUnit *> Collection,
1204 SmallVectorImpl<SchedGroup> &SyncPipe) override {
1205 SchedGroup *OtherGroup = nullptr;
1206 if (!SyncPipe.size())
1207 return false;
1208
1209 for (auto &PipeSG : SyncPipe) {
1210 if ((unsigned)PipeSG.getSGID() == SGID - Distance)
1211 OtherGroup = &PipeSG;
1212 }
1213
1214 if (!OtherGroup)
1215 return false;
1216 if (!OtherGroup->Collection.size())
1217 return true;
1218
1219 auto *DAG = SyncPipe[0].DAG;
1220
1221 for (auto &OtherEle : OtherGroup->Collection)
1222 if (DAG->IsReachable(const_cast<SUnit *>(SU), OtherEle))
1223 return true;
1224
1225 return false;
1226 }
1227 IsReachableFromPrevNthGroup(unsigned Distance, const SIInstrInfo *TII,
1228 unsigned SGID, bool NeedsCache = false)
1229 : InstructionRule(TII, SGID, NeedsCache), Distance(Distance) {}
1230 };
1231
1232 /// Whether or not the instruction occurs after the SU with NodeNUm \p Number
1233 class OccursAtOrAfterNode final : public InstructionRule {
1234 private:
1235 unsigned Number = 1;
1236
1237 public:
1238 bool apply(const SUnit *SU, const ArrayRef<SUnit *> Collection,
1239 SmallVectorImpl<SchedGroup> &SyncPipe) override {
1240
1241 return SU->NodeNum >= Number;
1242 }
1243 OccursAtOrAfterNode(unsigned Number, const SIInstrInfo *TII, unsigned SGID,
1244 bool NeedsCache = false)
1245 : InstructionRule(TII, SGID, NeedsCache), Number(Number) {}
1246 };
1247
1248 /// Whether or not the SU is exactly the \p Number th MFMA in the chain
1249 /// starting with \p ChainSeed
1250 class IsExactMFMA final : public InstructionRule {
1251 private:
1252 unsigned Number = 1;
1253 SUnit *ChainSeed;
1254
1255 public:
1256 bool apply(const SUnit *SU, const ArrayRef<SUnit *> Collection,
1257 SmallVectorImpl<SchedGroup> &SyncPipe) override {
1258 if (!SU || !TII->isMFMAorWMMA(*ChainSeed->getInstr()))
1259 return false;
1260
1261 if (Cache->empty()) {
1262 auto *TempSU = ChainSeed;
1263 auto Depth = Number;
1264 while (Depth > 0) {
1265 --Depth;
1266 bool Found = false;
1267 for (auto &Succ : TempSU->Succs) {
1268 if (TII->isMFMAorWMMA(*Succ.getSUnit()->getInstr())) {
1269 TempSU = Succ.getSUnit();
1270 Found = true;
1271 break;
1272 }
1273 }
1274 if (!Found) {
1275 return false;
1276 }
1277 }
1278 Cache->push_back(TempSU);
1279 }
1280 // If we failed to find the instruction to be placed into the cache, we
1281 // would have already exited.
1282 assert(!Cache->empty());
1283
1284 return (*Cache)[0] == SU;
1285 }
1286
1287 IsExactMFMA(unsigned Number, SUnit *ChainSeed, const SIInstrInfo *TII,
1288 unsigned SGID, bool NeedsCache = false)
1289 : InstructionRule(TII, SGID, NeedsCache), Number(Number),
1290 ChainSeed(ChainSeed) {}
1291 };
1292
1293 // Whether the instruction occurs after the first TRANS instruction. This
1294 // implies the instruction can not be a predecessor of the first TRANS
1295 // insruction
1296 class OccursAfterExp final : public InstructionRule {
1297 public:
1298 bool apply(const SUnit *SU, const ArrayRef<SUnit *> Collection,
1299 SmallVectorImpl<SchedGroup> &SyncPipe) override {
1300
1301 auto *DAG = SyncPipe[0].DAG;
1302 if (Cache->empty()) {
1303 for (auto &SU : DAG->SUnits)
1304 if (TII->isTRANS(SU.getInstr()->getOpcode())) {
1305 Cache->push_back(&SU);
1306 break;
1307 }
1308 if (Cache->empty())
1309 return false;
1310 }
1311
1312 return SU->NodeNum > (*Cache)[0]->NodeNum;
1313 }
1314
1315 OccursAfterExp(const SIInstrInfo *TII, unsigned SGID,
1316 bool NeedsCache = false)
1317 : InstructionRule(TII, SGID, NeedsCache) {}
1318 };
1319
1320public:
1321 bool applyIGLPStrategy(
1323 DenseMap<int, SmallVector<SchedGroup, 4>> &SyncedSchedGroups,
1325
1326 bool shouldApplyStrategy(ScheduleDAGInstrs *DAG,
1328
1329 MFMAExpInterleaveOpt(ScheduleDAGInstrs *DAG, const SIInstrInfo *TII)
1330 : IGLPStrategy(DAG, TII) {
1331 IsBottomUp = false;
1332 }
1333};
1334
1335unsigned MFMAExpInterleaveOpt::TransPipeCount = 0;
1336unsigned MFMAExpInterleaveOpt::MFMAPipeCount = 0;
1337unsigned MFMAExpInterleaveOpt::AddPipeCount = 0;
1338unsigned MFMAExpInterleaveOpt::MFMAEnablement = 0;
1339unsigned MFMAExpInterleaveOpt::ExpRequirement = 0;
1340unsigned MFMAExpInterleaveOpt::MFMAChains = 0;
1341bool MFMAExpInterleaveOpt::HasCvt = false;
1342bool MFMAExpInterleaveOpt::HasChainBetweenCvt = false;
1343std::optional<unsigned> MFMAExpInterleaveOpt::FirstPipeDSR = std::nullopt;
1344
1345bool MFMAExpInterleaveOpt::analyzeDAG(const SIInstrInfo *TII) {
1346 SmallVector<SUnit *, 10> ExpPipeCands;
1347 SmallVector<SUnit *, 10> MFMAPipeCands;
1348 SmallVector<SUnit *, 10> MFMAPipeSUs;
1351
1352 auto isBitPack = [](unsigned Opc) {
1353 return Opc == AMDGPU::V_PACK_B32_F16_e64 || Opc == AMDGPU::V_PERM_B32_e64;
1354 };
1355
1356 auto isCvt = [](unsigned Opc) {
1357 return Opc == AMDGPU::V_CVT_F16_F32_e32 || Opc == AMDGPU::V_CVT_I32_F32_e32;
1358 };
1359
1360 auto isAdd = [](unsigned Opc) { return Opc == AMDGPU::V_ADD_F32_e32; };
1361
1362 AddPipeCount = 0;
1363 for (SUnit &SU : DAG->SUnits) {
1364 auto Opc = SU.getInstr()->getOpcode();
1365 if (TII->isTRANS(Opc)) {
1366 // Avoid counting a potential bonus V_EXP which all the MFMA depend on
1367 if (SU.Succs.size() >= 7)
1368 continue;
1369 for (auto &Succ : SU.Succs) {
1370 if (Succ.getSUnit()->Succs.size() >= 7)
1371 continue;
1372 }
1373 ExpPipeCands.push_back(&SU);
1374 }
1375
1376 if (TII->isMFMAorWMMA(*SU.getInstr()))
1377 MFMAPipeCands.push_back(&SU);
1378
1379 if (isBitPack(Opc))
1380 PackSUs.push_back(&SU);
1381
1382 if (isCvt(Opc))
1383 CvtSUs.push_back(&SU);
1384
1385 if (isAdd(Opc))
1386 ++AddPipeCount;
1387 }
1388
1389 if (!(PackSUs.size() && MFMAPipeCands.size() && ExpPipeCands.size()))
1390 return false;
1391
1392 TransPipeCount = 0;
1393
1394 std::optional<SUnit *> TempMFMA;
1395 std::optional<SUnit *> TempExp;
1396 // Count the number of EXPs that reach an MFMA
1397 for (auto &PredSU : ExpPipeCands) {
1398 for (auto &SuccSU : MFMAPipeCands) {
1399 if (DAG->IsReachable(SuccSU, PredSU)) {
1400 if (!TempExp) {
1401 TempExp = PredSU;
1402 TempMFMA = SuccSU;
1403 }
1404 MFMAPipeSUs.push_back(SuccSU);
1405 ++TransPipeCount;
1406 break;
1407 }
1408 }
1409 }
1410
1411 if (!(TempExp && TempMFMA))
1412 return false;
1413
1414 HasChainBetweenCvt = none_of((*TempExp)->Succs, [&isCvt](SDep &Succ) {
1415 return isCvt(Succ.getSUnit()->getInstr()->getOpcode());
1416 });
1417
1418 // Count the number of MFMAs that are reached by an EXP
1419 for (auto &SuccSU : MFMAPipeCands) {
1420 if (MFMAPipeSUs.size() &&
1421 any_of(MFMAPipeSUs, [&SuccSU](SUnit *PotentialMatch) {
1422 return PotentialMatch->NodeNum == SuccSU->NodeNum;
1423 }))
1424 continue;
1425
1426 for (auto &PredSU : ExpPipeCands) {
1427 if (DAG->IsReachable(SuccSU, PredSU)) {
1428 MFMAPipeSUs.push_back(SuccSU);
1429 break;
1430 }
1431 }
1432 }
1433
1434 MFMAPipeCount = MFMAPipeSUs.size();
1435
1436 assert(TempExp && TempMFMA);
1437 assert(MFMAPipeCount > 0);
1438
1439 std::optional<SUnit *> TempCvt;
1440 for (auto &SuccSU : CvtSUs) {
1441 if (DAG->IsReachable(SuccSU, *TempExp)) {
1442 TempCvt = SuccSU;
1443 break;
1444 }
1445 }
1446
1447 HasCvt = false;
1448 if (TempCvt.has_value()) {
1449 for (auto &SuccSU : MFMAPipeSUs) {
1450 if (DAG->IsReachable(SuccSU, *TempCvt)) {
1451 HasCvt = true;
1452 break;
1453 }
1454 }
1455 }
1456
1457 MFMAChains = 0;
1458 for (auto &MFMAPipeSU : MFMAPipeSUs) {
1459 if (is_contained(MFMAChainSeeds, MFMAPipeSU))
1460 continue;
1461 if (none_of(MFMAPipeSU->Preds, [&TII](SDep &Succ) {
1462 return TII->isMFMAorWMMA(*Succ.getSUnit()->getInstr());
1463 })) {
1464 MFMAChainSeeds.push_back(MFMAPipeSU);
1465 ++MFMAChains;
1466 }
1467 }
1468
1469 if (!MFMAChains)
1470 return false;
1471
1472 for (auto Pred : MFMAChainSeeds[0]->Preds) {
1473 if (TII->isDS(Pred.getSUnit()->getInstr()->getOpcode()) &&
1474 Pred.getSUnit()->getInstr()->mayLoad())
1475 FirstPipeDSR = Pred.getSUnit()->NodeNum;
1476 }
1477
1478 // The number of bit pack operations that depend on a single V_EXP
1479 unsigned PackSuccCount =
1480 llvm::count_if(PackSUs, [this, &TempExp](SUnit *VPack) {
1481 return DAG->IsReachable(VPack, *TempExp);
1482 });
1483
1484 // The number of bit pack operations an MFMA depends on
1485 unsigned PackPredCount =
1486 llvm::count_if((*TempMFMA)->Preds, [&isBitPack](SDep &Pred) {
1487 auto Opc = Pred.getSUnit()->getInstr()->getOpcode();
1488 return isBitPack(Opc);
1489 });
1490
1491 auto *PackPred = llvm::find_if((*TempMFMA)->Preds, [&isBitPack](SDep &Pred) {
1492 auto Opc = Pred.getSUnit()->getInstr()->getOpcode();
1493 return isBitPack(Opc);
1494 });
1495
1496 if (PackPred == (*TempMFMA)->Preds.end())
1497 return false;
1498
1499 MFMAEnablement = 0;
1500 ExpRequirement = 0;
1501 // How many MFMAs depend on a single bit pack operation
1502 MFMAEnablement =
1503 llvm::count_if(PackPred->getSUnit()->Succs, [&TII](SDep &Succ) {
1504 return TII->isMFMAorWMMA(*Succ.getSUnit()->getInstr());
1505 });
1506
1507 // The number of MFMAs that depend on a single V_EXP
1508 MFMAEnablement *= PackSuccCount;
1509
1510 // The number of V_EXPs required to resolve all dependencies for an MFMA
1511 ExpRequirement =
1512 llvm::count_if(ExpPipeCands, [this, &PackPred](SUnit *ExpBase) {
1513 return DAG->IsReachable(PackPred->getSUnit(), ExpBase);
1514 });
1515
1516 ExpRequirement *= PackPredCount;
1517 return true;
1518}
1519
1520bool MFMAExpInterleaveOpt::shouldApplyStrategy(ScheduleDAGInstrs *DAG,
1522 const GCNSubtarget &ST = DAG->MF.getSubtarget<GCNSubtarget>();
1523 const SIInstrInfo *TII = ST.getInstrInfo();
1524
1526 MFMAChainSeeds.clear();
1527 if (Phase != AMDGPU::SchedulingPhase::PostRA && !analyzeDAG(TII))
1528 return false;
1529
1530 return true;
1531}
1532
1533bool MFMAExpInterleaveOpt::applyIGLPStrategy(
1535 DenseMap<int, SmallVector<SchedGroup, 4>> &SyncedSchedGroups,
1537
1538 bool IsSmallKernelType =
1539 MFMAEnablement == 2 && ExpRequirement == 4 && TransPipeCount == 32;
1540 bool IsLargeKernelType =
1541 MFMAEnablement == 4 && ExpRequirement == 4 && TransPipeCount == 64;
1542
1543 if (!(IsSmallKernelType || IsLargeKernelType))
1544 return false;
1545
1546 const GCNSubtarget &ST = DAG->MF.getSubtarget<GCNSubtarget>();
1547 const SIInstrInfo *TII = ST.getInstrInfo();
1548
1549 unsigned PipelineSyncID = 0;
1550 SchedGroup *SG = nullptr;
1551
1552 unsigned MFMAChain = 0;
1553 unsigned PositionInChain = 0;
1554 unsigned CurrMFMAForTransPosition = 0;
1555
1556 auto incrementTransPosition = [&MFMAChain, &PositionInChain,
1557 &CurrMFMAForTransPosition]() {
1558 CurrMFMAForTransPosition += MFMAEnablement;
1559 PositionInChain = (CurrMFMAForTransPosition / MFMAChains);
1560 MFMAChain = CurrMFMAForTransPosition % MFMAChains;
1561 };
1562
1563 auto getNextTransPositionInChain = [&CurrMFMAForTransPosition]() {
1564 auto TempMFMAForTrans = CurrMFMAForTransPosition + MFMAEnablement;
1565 return (TempMFMAForTrans / MFMAChains);
1566 };
1567
1568 auto getNextTransMFMAChain = [&CurrMFMAForTransPosition]() {
1569 auto TempMFMAForTrans = CurrMFMAForTransPosition + MFMAEnablement;
1570 return TempMFMAForTrans % MFMAChains;
1571 };
1572
1573 unsigned CurrMFMAPosition = 0;
1574 unsigned MFMAChainForMFMA = 0;
1575 unsigned PositionInChainForMFMA = 0;
1576
1577 auto incrementMFMAPosition = [&CurrMFMAPosition, &MFMAChainForMFMA,
1578 &PositionInChainForMFMA]() {
1579 ++CurrMFMAPosition;
1580 MFMAChainForMFMA = CurrMFMAPosition % MFMAChains;
1581 PositionInChainForMFMA = CurrMFMAPosition / MFMAChains;
1582 };
1583
1584 bool IsPostRA = Phase == AMDGPU::SchedulingPhase::PostRA;
1585 assert(IsPostRA || MFMAChainSeeds.size() == MFMAChains);
1586
1587 bool UsesFMA = IsSmallKernelType || !IsPostRA;
1588 bool UsesDSRead = IsLargeKernelType && !IsPostRA && FirstPipeDSR;
1589 bool UsesCvt = HasCvt && (IsSmallKernelType || !IsPostRA);
1590 bool UsesVALU = IsSmallKernelType;
1591
1592 // PHASE 1: "Prefetch"
1593 if (UsesFMA) {
1594 // First Round FMA
1595 SG = &SyncedSchedGroups[PipelineSyncID].emplace_back(
1596 SchedGroupMask::VALU, ExpRequirement, PipelineSyncID, DAG, TII);
1597 if (!IsPostRA && MFMAChains) {
1598 SG->addRule(std::make_shared<EnablesNthMFMAInChain>(
1599 PositionInChain, MFMAChainSeeds[MFMAChain], TII, SG->getSGID(),
1600 true));
1601 } else
1602 SG->addRule(
1603 std::make_shared<EnablesNthMFMA>(1, TII, SG->getSGID(), true));
1604 SG->addRule(std::make_shared<IsFMA>(TII, SG->getSGID()));
1605 SG->findCandidateSUnits(SyncedInstrs[SG->getSyncID()]);
1606
1607 // Second Round FMA
1608 SG = &SyncedSchedGroups[PipelineSyncID].emplace_back(
1609 SchedGroupMask::VALU, ExpRequirement, PipelineSyncID, DAG, TII);
1610 if (!IsPostRA && MFMAChains) {
1611 SG->addRule(std::make_shared<EnablesNthMFMAInChain>(
1612 getNextTransPositionInChain(),
1613 MFMAChainSeeds[getNextTransMFMAChain()], TII, SG->getSGID(), true));
1614 } else
1615 SG->addRule(std::make_shared<EnablesNthMFMA>(MFMAEnablement + 1, TII,
1616 SG->getSGID(), true));
1617 SG->addRule(std::make_shared<IsFMA>(TII, SG->getSGID()));
1618 SG->findCandidateSUnits(SyncedInstrs[SG->getSyncID()]);
1619 }
1620
1621 if (UsesDSRead) {
1622 SG = &SyncedSchedGroups[PipelineSyncID].emplace_back(
1623 SchedGroupMask::DS_READ, 2, PipelineSyncID, DAG, TII);
1624 SG->addRule(std::make_shared<OccursAtOrAfterNode>(*FirstPipeDSR, TII,
1625 SG->getSGID()));
1626 SG->findCandidateSUnits(SyncedInstrs[SG->getSyncID()]);
1627 }
1628
1629 // First Round EXP
1630 SG = &SyncedSchedGroups[PipelineSyncID].emplace_back(
1631 SchedGroupMask::TRANS, ExpRequirement, PipelineSyncID, DAG, TII);
1632 if (!IsPostRA && MFMAChains)
1633 SG->addRule(std::make_shared<EnablesNthMFMAInChain>(
1634 PositionInChain, MFMAChainSeeds[MFMAChain], TII, SG->getSGID(), true));
1635 else
1636 SG->addRule(std::make_shared<EnablesNthMFMA>(1, TII, SG->getSGID(), true));
1637 SG->addRule(std::make_shared<IsPipeExp>(TII, SG->getSGID(), true));
1638 SG->addRule(std::make_shared<LessThanNSuccs>(8, TII, SG->getSGID(),
1639 HasChainBetweenCvt));
1640 SG->findCandidateSUnits(SyncedInstrs[SG->getSyncID()]);
1641
1642 incrementTransPosition();
1643
1644 // First Round CVT, Third Round FMA, Second Round EXP; interleaved
1645 for (unsigned I = 0; I < ExpRequirement; I++) {
1646 // First Round CVT
1647 if (UsesCvt) {
1648 SG = &SyncedSchedGroups[PipelineSyncID].emplace_back(
1649 SchedGroupMask::VALU, 1, PipelineSyncID, DAG, TII);
1650 SG->addRule(std::make_shared<IsCvt>(TII, SG->getSGID()));
1651 if (HasChainBetweenCvt)
1652 SG->addRule(std::make_shared<IsReachableFromPrevNthGroup>(
1653 1 + (2 + UsesFMA) * I, TII, SG->getSGID()));
1654 else
1655 SG->addRule(std::make_shared<IsSuccOfPrevNthGroup>(
1656 1 + (2 + UsesFMA) * I, TII, SG->getSGID()));
1657 SG->findCandidateSUnits(SyncedInstrs[SG->getSyncID()]);
1658 }
1659
1660 // Third Round FMA
1661 if (UsesFMA) {
1662 SG = &SyncedSchedGroups[PipelineSyncID].emplace_back(
1663 SchedGroupMask::VALU, 1, PipelineSyncID, DAG, TII);
1664 if (!IsPostRA && MFMAChains) {
1665 SG->addRule(std::make_shared<EnablesNthMFMAInChain>(
1666 getNextTransPositionInChain(),
1667 MFMAChainSeeds[getNextTransMFMAChain()], TII, SG->getSGID(), true));
1668 } else
1669 SG->addRule(std::make_shared<EnablesNthMFMA>(2 * MFMAEnablement + 1,
1670 TII, SG->getSGID(), true));
1671 SG->addRule(std::make_shared<IsFMA>(TII, SG->getSGID()));
1672 SG->findCandidateSUnits(SyncedInstrs[SG->getSyncID()]);
1673 }
1674
1675 // Second Round EXP
1676 SG = &SyncedSchedGroups[PipelineSyncID].emplace_back(
1677 SchedGroupMask::TRANS, 1, PipelineSyncID, DAG, TII);
1678 if (!IsPostRA && MFMAChains)
1679 SG->addRule(std::make_shared<EnablesNthMFMAInChain>(
1680 PositionInChain, MFMAChainSeeds[MFMAChain], TII, SG->getSGID(),
1681 true));
1682 else
1683 SG->addRule(std::make_shared<EnablesNthMFMA>(MFMAEnablement + 1, TII,
1684 SG->getSGID(), true));
1685 SG->addRule(std::make_shared<IsPipeExp>(TII, SG->getSGID(), true));
1686 SG->addRule(std::make_shared<LessThanNSuccs>(8, TII, SG->getSGID(),
1687 HasChainBetweenCvt));
1688 SG->findCandidateSUnits(SyncedInstrs[SG->getSyncID()]);
1689 }
1690
1691 // The "extra" EXP which enables all MFMA
1692 // TODO: UsesExtraExp
1693 SG = &SyncedSchedGroups[PipelineSyncID].emplace_back(
1694 SchedGroupMask::TRANS, 1, PipelineSyncID, DAG, TII);
1695 SG->addRule(std::make_shared<IsPipeExp>(TII, SG->getSGID(), true));
1696 SG->addRule(std::make_shared<GreaterThanOrEqualToNSuccs>(
1697 8, TII, SG->getSGID(), HasChainBetweenCvt));
1698 SG->findCandidateSUnits(SyncedInstrs[SG->getSyncID()]);
1699
1700 // PHASE 2: Main Interleave Loop
1701
1702 // The number of MFMAs per iteration
1703 unsigned MFMARatio =
1704 MFMAEnablement > ExpRequirement ? MFMAEnablement / ExpRequirement : 1;
1705 // The number of Exps per iteration
1706 unsigned ExpRatio =
1707 MFMAEnablement > ExpRequirement ? 1 : ExpRequirement / MFMAEnablement;
1708 // The reamaining Exps
1709 unsigned RemainingExp = TransPipeCount > (2 * ExpRequirement)
1710 ? TransPipeCount - (2 * ExpRequirement)
1711 : 0;
1712 unsigned ExpLoopCount = RemainingExp / ExpRatio;
1713 // In loop MFMAs
1714 unsigned MFMAInLoop = MFMAPipeCount > (MFMAEnablement * 2)
1715 ? MFMAPipeCount - (MFMAEnablement * 2)
1716 : 0;
1717 unsigned MFMALoopCount = MFMAInLoop / MFMARatio;
1718 unsigned VALUOps =
1719 AddPipeCount < MFMAPipeCount ? 1 : AddPipeCount / MFMAPipeCount;
1720 unsigned LoopSize = std::min(ExpLoopCount, MFMALoopCount);
1721
1722 for (unsigned I = 0; I < LoopSize; I++) {
1723 if (!(I * ExpRatio % ExpRequirement))
1724 incrementTransPosition();
1725
1726 // Round N MFMA
1727 SG = &SyncedSchedGroups[PipelineSyncID].emplace_back(
1728 SchedGroupMask::MFMA, MFMARatio, PipelineSyncID, DAG, TII);
1729 if (!IsPostRA && MFMAChains)
1730 SG->addRule(std::make_shared<IsExactMFMA>(
1731 PositionInChainForMFMA, MFMAChainSeeds[MFMAChainForMFMA], TII,
1732 SG->getSGID(), true));
1733 else
1734 SG->addRule(std::make_shared<OccursAfterExp>(TII, SG->getSGID(), true));
1735 SG->findCandidateSUnits(SyncedInstrs[SG->getSyncID()]);
1736 incrementMFMAPosition();
1737
1738 if (UsesVALU) {
1739 SG = &SyncedSchedGroups[PipelineSyncID].emplace_back(
1740 SchedGroupMask::VALU, VALUOps, PipelineSyncID, DAG, TII);
1741 SG->addRule(std::make_shared<IsPipeAdd>(TII, SG->getSGID()));
1742 SG->findCandidateSUnits(SyncedInstrs[SG->getSyncID()]);
1743 }
1744
1745 if (UsesDSRead && !(I % 4)) {
1746 SG = &SyncedSchedGroups[PipelineSyncID].emplace_back(
1747 SchedGroupMask::DS_READ, 2, PipelineSyncID, DAG, TII);
1748 SG->addRule(std::make_shared<OccursAtOrAfterNode>(*FirstPipeDSR, TII,
1749 SG->getSGID()));
1750 SG->findCandidateSUnits(SyncedInstrs[SG->getSyncID()]);
1751 }
1752
1753 // CVT, EXP, FMA Interleaving
1754 for (unsigned J = 0; J < ExpRatio; J++) {
1755 auto MFMAOffset = (1 + UsesVALU) * MFMARatio * (I + 1);
1756 auto MaxMFMAOffset =
1757 (1 + UsesVALU) * ExpRequirement * MFMARatio / ExpRatio;
1758
1759 // Round N + 1 CVT
1760 if (UsesCvt) {
1761 SG = &SyncedSchedGroups[PipelineSyncID].emplace_back(
1762 SchedGroupMask::VALU, 1, PipelineSyncID, DAG, TII);
1763 SG->addRule(std::make_shared<IsCvt>(TII, SG->getSGID()));
1764 auto BaseDiff = (2 + UsesFMA) * (ExpRequirement - 1) + 1;
1765 auto DSROffset = I / 4 + 1;
1766 auto MaxDSROffset = MaxMFMAOffset / 4;
1767 // TODO: UsesExtraExp
1768 auto ExpOffset = I * ExpRatio + J >= ExpRequirement ? 0 : 1;
1769 auto CurrentOffset = UsesDSRead * std::min(MaxDSROffset, DSROffset) +
1770 std::min(MaxMFMAOffset, MFMAOffset) + BaseDiff +
1771 ExpOffset;
1772 if (HasChainBetweenCvt)
1773 SG->addRule(std::make_shared<IsReachableFromPrevNthGroup>(
1774 CurrentOffset, TII, SG->getSGID()));
1775 else
1776 SG->addRule(std::make_shared<IsSuccOfPrevNthGroup>(CurrentOffset, TII,
1777 SG->getSGID()));
1778 SG->findCandidateSUnits(SyncedInstrs[SG->getSyncID()]);
1779 }
1780
1781 // Round N + 3 FMA
1782 if (UsesFMA) {
1783 SG = &SyncedSchedGroups[PipelineSyncID].emplace_back(
1784 SchedGroupMask::VALU, 1, PipelineSyncID, DAG, TII);
1785 if (!IsPostRA && MFMAChains)
1786 SG->addRule(std::make_shared<EnablesNthMFMAInChain>(
1787 getNextTransPositionInChain(),
1788 MFMAChainSeeds[getNextTransMFMAChain()], TII, SG->getSGID(),
1789 true));
1790 else
1791 SG->addRule(std::make_shared<EnablesNthMFMA>(
1792 (((I * ExpRatio + J) / ExpRequirement) + 3) * MFMAEnablement + 1,
1793 TII, SG->getSGID(), true));
1794 SG->addRule(std::make_shared<IsFMA>(TII, SG->getSGID()));
1795 SG->findCandidateSUnits(SyncedInstrs[SG->getSyncID()]);
1796 }
1797
1798 // Round N + 2 Exp
1799 SG = &SyncedSchedGroups[PipelineSyncID].emplace_back(
1800 SchedGroupMask::TRANS, 1, PipelineSyncID, DAG, TII);
1801 if (!IsPostRA && MFMAChains)
1802 SG->addRule(std::make_shared<EnablesNthMFMAInChain>(
1803 PositionInChain, MFMAChainSeeds[MFMAChain], TII, SG->getSGID(),
1804 true));
1805 else
1806 SG->addRule(std::make_shared<EnablesNthMFMA>(
1807 (((I * ExpRatio + J) / ExpRequirement) + 2) * MFMAEnablement + 1,
1808 TII, SG->getSGID(), true));
1809 SG->addRule(std::make_shared<IsPipeExp>(TII, SG->getSGID(), true));
1810 SG->addRule(std::make_shared<LessThanNSuccs>(8, TII, SG->getSGID(),
1811 HasChainBetweenCvt));
1812 SG->findCandidateSUnits(SyncedInstrs[SG->getSyncID()]);
1813 }
1814 }
1815
1816 // PHASE 3: Remaining MFMAs
1817 SG = &SyncedSchedGroups[PipelineSyncID].emplace_back(
1818 SchedGroupMask::MFMA, MFMAEnablement * 2, PipelineSyncID, DAG, TII);
1819 SG->addRule(std::make_shared<OccursAfterExp>(TII, SG->getSGID(), true));
1820 SG->findCandidateSUnits(SyncedInstrs[SG->getSyncID()]);
1821 return true;
1822}
1823
1824class MFMAExpSimpleInterleaveOpt final : public IGLPStrategy {
1825public:
1826 bool applyIGLPStrategy(
1828 DenseMap<int, SmallVector<SchedGroup, 4>> &SyncedSchedGroups,
1830
1831 bool shouldApplyStrategy(ScheduleDAGInstrs *DAG,
1832 AMDGPU::SchedulingPhase Phase) override {
1833 return true;
1834 }
1835
1836 MFMAExpSimpleInterleaveOpt(ScheduleDAGInstrs *DAG, const SIInstrInfo *TII)
1837 : IGLPStrategy(DAG, TII) {
1838 IsBottomUp = true;
1839 }
1840};
1841
1842bool MFMAExpSimpleInterleaveOpt::applyIGLPStrategy(
1844 DenseMap<int, SmallVector<SchedGroup, 4>> &SyncedSchedGroups,
1846 // Count the number of MFMA instructions.
1847 unsigned MFMACount = 0;
1848 for (const MachineInstr &I : *DAG)
1849 if (TII->isMFMAorWMMA(I))
1850 ++MFMACount;
1851
1852 const unsigned PipelineSyncID = 0;
1853 for (unsigned I = 0; I < MFMACount * 3; ++I) {
1854 SchedGroup *SG = &SyncedSchedGroups[PipelineSyncID].emplace_back(
1855 SchedGroupMask::TRANS, 1, PipelineSyncID, DAG, TII);
1856 SG->findCandidateSUnits(SyncedInstrs[SG->getSyncID()]);
1857
1858 SG = &SyncedSchedGroups[PipelineSyncID].emplace_back(
1859 SchedGroupMask::MFMA, 1, PipelineSyncID, DAG, TII);
1860 SG->findCandidateSUnits(SyncedInstrs[SG->getSyncID()]);
1861 }
1862
1863 return true;
1864}
1865
1866class MFMASmallGemmSingleWaveOpt final : public IGLPStrategy {
1867private:
1868 // Whether the DS_READ is a predecessor of first four MFMA in region
1869 class EnablesInitialMFMA final : public InstructionRule {
1870 public:
1871 bool apply(const SUnit *SU, const ArrayRef<SUnit *> Collection,
1872 SmallVectorImpl<SchedGroup> &SyncPipe) override {
1873 if (!SyncPipe.size())
1874 return false;
1875 int MFMAsFound = 0;
1876 if (!Cache->size()) {
1877 for (auto &Elt : SyncPipe[0].DAG->SUnits) {
1878 if (TII->isMFMAorWMMA(*Elt.getInstr())) {
1879 ++MFMAsFound;
1880 if (MFMAsFound > 4)
1881 break;
1882 Cache->push_back(&Elt);
1883 }
1884 }
1885 }
1886
1887 auto *DAG = SyncPipe[0].DAG;
1888 for (auto &Elt : *Cache) {
1889 if (DAG->IsReachable(Elt, const_cast<SUnit *>(SU)))
1890 return true;
1891 }
1892 return false;
1893 }
1894
1895 EnablesInitialMFMA(const SIInstrInfo *TII, unsigned SGID,
1896 bool NeedsCache = false)
1897 : InstructionRule(TII, SGID, NeedsCache) {}
1898 };
1899
1900 // Whether the MI is a V_PERM and is a predecessor of a common DS_WRITE
1901 class IsPermForDSW final : public InstructionRule {
1902 public:
1903 bool apply(const SUnit *SU, const ArrayRef<SUnit *> Collection,
1904 SmallVectorImpl<SchedGroup> &SyncPipe) override {
1905 auto *MI = SU->getInstr();
1906 if (MI->getOpcode() != AMDGPU::V_PERM_B32_e64)
1907 return false;
1908
1909 bool FitsInGroup = false;
1910 // Does the VALU have a DS_WRITE successor
1911 if (!Collection.size()) {
1912 for (auto &Succ : SU->Succs) {
1913 SUnit *SuccUnit = Succ.getSUnit();
1914 if (TII->isDS(*SuccUnit->getInstr()) &&
1915 SuccUnit->getInstr()->mayStore()) {
1916 Cache->push_back(SuccUnit);
1917 FitsInGroup = true;
1918 }
1919 }
1920 return FitsInGroup;
1921 }
1922
1923 // Does the VALU have a DS_WRITE successor that is the same as other
1924 // VALU already in the group. The V_PERMs will all share 1 DS_W succ
1925 return llvm::any_of(*Cache, [&SU](SUnit *Elt) {
1926 return llvm::any_of(SU->Succs, [&Elt](const SDep &ThisSucc) {
1927 return ThisSucc.getSUnit() == Elt;
1928 });
1929 });
1930 }
1931
1932 IsPermForDSW(const SIInstrInfo *TII, unsigned SGID, bool NeedsCache = false)
1933 : InstructionRule(TII, SGID, NeedsCache) {}
1934 };
1935
1936 // Whether the SU is a successor of any element in previous SchedGroup
1937 class IsSuccOfPrevGroup final : public InstructionRule {
1938 public:
1939 bool apply(const SUnit *SU, const ArrayRef<SUnit *> Collection,
1940 SmallVectorImpl<SchedGroup> &SyncPipe) override {
1941 SchedGroup *OtherGroup = nullptr;
1942 for (auto &PipeSG : SyncPipe) {
1943 if ((unsigned)PipeSG.getSGID() == SGID - 1) {
1944 OtherGroup = &PipeSG;
1945 }
1946 }
1947
1948 if (!OtherGroup)
1949 return false;
1950 if (!OtherGroup->Collection.size())
1951 return true;
1952
1953 // Does the previous VALU have this DS_Write as a successor
1954 return any_of(OtherGroup->Collection, [&SU](SUnit *Elt) {
1955 return any_of(Elt->Succs,
1956 [&SU](SDep &Succ) { return Succ.getSUnit() == SU; });
1957 });
1958 }
1959 IsSuccOfPrevGroup(const SIInstrInfo *TII, unsigned SGID,
1960 bool NeedsCache = false)
1961 : InstructionRule(TII, SGID, NeedsCache) {}
1962 };
1963
1964 // Whether the combined load width of group is 128 bits
1965 class VMEMSize final : public InstructionRule {
1966 public:
1967 bool apply(const SUnit *SU, const ArrayRef<SUnit *> Collection,
1968 SmallVectorImpl<SchedGroup> &SyncPipe) override {
1969 auto *MI = SU->getInstr();
1970 if (MI->getOpcode() == TargetOpcode::BUNDLE)
1971 return false;
1972 if (!Collection.size())
1973 return true;
1974
1975 int NumBits = 0;
1976
1977 auto TRI = TII->getRegisterInfo();
1978 auto &MRI = MI->getMF()->getRegInfo();
1979 for (auto &Elt : Collection) {
1980 auto Op = Elt->getInstr()->getOperand(0);
1981 auto Size =
1982 TRI.getRegSizeInBits(*TRI.getRegClassForOperandReg(MRI, Op));
1983 NumBits += Size;
1984 }
1985
1986 if (NumBits < 128) {
1987 assert(TII->isVMEM(*MI) && MI->mayLoad());
1988 if (NumBits + TRI.getRegSizeInBits(*TRI.getRegClassForOperandReg(
1989 MRI, MI->getOperand(0))) <=
1990 128)
1991 return true;
1992 }
1993
1994 return false;
1995 }
1996
1997 VMEMSize(const SIInstrInfo *TII, unsigned SGID, bool NeedsCache = false)
1998 : InstructionRule(TII, SGID, NeedsCache) {}
1999 };
2000
2001 /// Whether the SU shares a V_PERM predecessor with any SU in the SchedGroup
2002 /// that is \p Distance steps away
2003 class SharesPredWithPrevNthGroup final : public InstructionRule {
2004 private:
2005 unsigned Distance = 1;
2006
2007 public:
2008 bool apply(const SUnit *SU, const ArrayRef<SUnit *> Collection,
2009 SmallVectorImpl<SchedGroup> &SyncPipe) override {
2010 SchedGroup *OtherGroup = nullptr;
2011 if (!SyncPipe.size())
2012 return false;
2013
2014 if (!Cache->size()) {
2015
2016 for (auto &PipeSG : SyncPipe) {
2017 if ((unsigned)PipeSG.getSGID() == SGID - Distance) {
2018 OtherGroup = &PipeSG;
2019 }
2020 }
2021
2022 if (!OtherGroup)
2023 return false;
2024 if (!OtherGroup->Collection.size())
2025 return true;
2026
2027 for (auto &OtherEle : OtherGroup->Collection) {
2028 for (auto &Pred : OtherEle->Preds) {
2029 if (Pred.getSUnit()->getInstr()->getOpcode() ==
2030 AMDGPU::V_PERM_B32_e64)
2031 Cache->push_back(Pred.getSUnit());
2032 }
2033 }
2034
2035 // If the other group has no PERM preds, then this group won't share any
2036 if (!Cache->size())
2037 return false;
2038 }
2039
2040 auto *DAG = SyncPipe[0].DAG;
2041 // Does the previous DS_WRITE share a V_PERM predecessor with this
2042 // VMEM_READ
2043 return llvm::any_of(*Cache, [&SU, &DAG](SUnit *Elt) {
2044 return DAG->IsReachable(const_cast<SUnit *>(SU), Elt);
2045 });
2046 }
2047 SharesPredWithPrevNthGroup(unsigned Distance, const SIInstrInfo *TII,
2048 unsigned SGID, bool NeedsCache = false)
2049 : InstructionRule(TII, SGID, NeedsCache), Distance(Distance) {}
2050 };
2051
2052public:
2053 bool applyIGLPStrategy(
2055 DenseMap<int, SmallVector<SchedGroup, 4>> &SyncedSchedGroups,
2057
2058 bool shouldApplyStrategy(ScheduleDAGInstrs *DAG,
2059 AMDGPU::SchedulingPhase Phase) override {
2060 return true;
2061 }
2062
2063 MFMASmallGemmSingleWaveOpt(ScheduleDAGInstrs *DAG, const SIInstrInfo *TII)
2064 : IGLPStrategy(DAG, TII) {
2065 IsBottomUp = false;
2066 }
2067};
2068
2069static unsigned DSWCount = 0;
2070static unsigned DSWWithPermCount = 0;
2071static unsigned DSWWithSharedVMEMCount = 0;
2072
2073bool MFMASmallGemmSingleWaveOpt::applyIGLPStrategy(
2074 DenseMap<int, SUnitsToCandidateSGsMap> &SyncedInstrs,
2075 DenseMap<int, SmallVector<SchedGroup, 4>> &SyncedSchedGroups,
2077 unsigned MFMACount = 0;
2078 unsigned DSRCount = 0;
2079
2080 bool IsInitial = Phase == AMDGPU::SchedulingPhase::Initial;
2081
2082 assert((!IsInitial || (DSWCount == 0 && DSWWithPermCount == 0 &&
2083 DSWWithSharedVMEMCount == 0)) &&
2084 "DSWCounters should be zero in pre-RA scheduling!");
2085 SmallVector<SUnit *, 6> DSWithPerms;
2086 for (auto &SU : DAG->SUnits) {
2087 auto *I = SU.getInstr();
2088 if (TII->isMFMAorWMMA(*I))
2089 ++MFMACount;
2090 else if (TII->isDS(*I)) {
2091 if (I->mayLoad())
2092 ++DSRCount;
2093 else if (I->mayStore() && IsInitial) {
2094 ++DSWCount;
2095 for (auto Pred : SU.Preds) {
2096 if (Pred.getSUnit()->getInstr()->getOpcode() ==
2097 AMDGPU::V_PERM_B32_e64) {
2098 DSWithPerms.push_back(&SU);
2099 break;
2100 }
2101 }
2102 }
2103 }
2104 }
2105
2106 if (IsInitial) {
2107 DSWWithPermCount = DSWithPerms.size();
2108 auto *I = DSWithPerms.begin();
2109 auto *E = DSWithPerms.end();
2110
2111 // Get the count of DS_WRITES with V_PERM predecessors which
2112 // have loop carried dependencies (WAR) on the same VMEM_READs.
2113 // We consider partial overlap as a miss -- in other words,
2114 // for a given DS_W, we only consider another DS_W as matching
2115 // if there is a corresponding (in terms of the VMEM_R it uses) V_PERM pred
2116 // for every V_PERM pred of this DS_W.
2117 DenseMap<MachineInstr *, SUnit *> VMEMLookup;
2119 for (; I != E; I++) {
2120 SUnit *Cand = nullptr;
2121 bool MissedAny = false;
2122 for (auto &Pred : (*I)->Preds) {
2123 if (Pred.getSUnit()->getInstr()->getOpcode() != AMDGPU::V_PERM_B32_e64)
2124 continue;
2125
2126 if (Cand && llvm::is_contained(Counted, Cand))
2127 break;
2128
2129 for (auto &Succ : Pred.getSUnit()->Succs) {
2130 auto *MI = Succ.getSUnit()->getInstr();
2131 if (!TII->isVMEM(*MI) || !MI->mayLoad())
2132 continue;
2133
2134 if (MissedAny || !VMEMLookup.size()) {
2135 MissedAny = true;
2136 VMEMLookup[MI] = *I;
2137 continue;
2138 }
2139
2140 auto [It, Inserted] = VMEMLookup.try_emplace(MI, *I);
2141 if (Inserted) {
2142 MissedAny = true;
2143 continue;
2144 }
2145
2146 Cand = It->second;
2147 if (llvm::is_contained(Counted, Cand)) {
2148 MissedAny = true;
2149 break;
2150 }
2151 }
2152 }
2153 if (!MissedAny && Cand) {
2154 DSWWithSharedVMEMCount += 2;
2155 Counted.push_back(Cand);
2156 Counted.push_back(*I);
2157 }
2158 }
2159 }
2160
2161 assert(DSWWithSharedVMEMCount <= DSWWithPermCount);
2162 SchedGroup *SG;
2163 unsigned PipelineSyncID = 0;
2164 // For kernels with V_PERM, there are enough VALU to mix in between MFMAs
2165 if (DSWWithPermCount) {
2166 for (unsigned I = 0; I < MFMACount; I++) {
2167 SG = &SyncedSchedGroups[PipelineSyncID].emplace_back(
2168 SchedGroupMask::MFMA, 1, PipelineSyncID, DAG, TII);
2169 SG->findCandidateSUnits(SyncedInstrs[SG->getSyncID()]);
2170
2171 SG = &SyncedSchedGroups[PipelineSyncID].emplace_back(
2172 SchedGroupMask::VALU, 2, PipelineSyncID, DAG, TII);
2173 SG->findCandidateSUnits(SyncedInstrs[SG->getSyncID()]);
2174 }
2175 }
2176
2177 PipelineSyncID = 1;
2178 // Phase 1: Break up DS_READ and MFMA clusters.
2179 // First DS_READ to make ready initial MFMA, then interleave MFMA with DS_READ
2180 // prefetch
2181
2182 // Make ready initial MFMA
2183 SG = &SyncedSchedGroups[PipelineSyncID].emplace_back(
2184 SchedGroupMask::DS_READ, 4, PipelineSyncID, DAG, TII);
2185 SG->addRule(std::make_shared<EnablesInitialMFMA>(TII, SG->getSGID(), true));
2186 SG->findCandidateSUnits(SyncedInstrs[SG->getSyncID()]);
2187
2188 SG = &SyncedSchedGroups[PipelineSyncID].emplace_back(
2189 SchedGroupMask::MFMA, 1, PipelineSyncID, DAG, TII);
2190 SG->findCandidateSUnits(SyncedInstrs[SG->getSyncID()]);
2191
2192 // Interleave MFMA with DS_READ prefetch
2193 for (unsigned I = 4; I < DSRCount; ++I) {
2194 SG = &SyncedSchedGroups[PipelineSyncID].emplace_back(
2195 SchedGroupMask::DS_READ, 1, PipelineSyncID, DAG, TII);
2196 SG->findCandidateSUnits(SyncedInstrs[SG->getSyncID()]);
2197
2198 SG = &SyncedSchedGroups[PipelineSyncID].emplace_back(
2199 SchedGroupMask::MFMA, 1, PipelineSyncID, DAG, TII);
2200 SG->findCandidateSUnits(SyncedInstrs[SG->getSyncID()]);
2201 }
2202
2203 // Phase 2a: Loop carried dependency with V_PERM
2204 // Schedule VPerm & DS_WRITE as closely as possible to the VMEM_READ they
2205 // depend on. Interleave MFMA to keep XDL unit busy throughout.
2206 for (unsigned I = DSWWithSharedVMEMCount; I < DSWWithPermCount; ++I) {
2207 SG = &SyncedSchedGroups[PipelineSyncID].emplace_back(
2208 SchedGroupMask::VALU, 4, PipelineSyncID, DAG, TII);
2209 SG->addRule(std::make_shared<IsPermForDSW>(TII, SG->getSGID(), true));
2210 SG->findCandidateSUnits(SyncedInstrs[SG->getSyncID()]);
2211
2212 SG = &SyncedSchedGroups[PipelineSyncID].emplace_back(
2213 SchedGroupMask::DS_WRITE, 1, PipelineSyncID, DAG, TII);
2214 SG->addRule(std::make_shared<IsSuccOfPrevGroup>(TII, SG->getSGID()));
2215 SG->findCandidateSUnits(SyncedInstrs[SG->getSyncID()]);
2216
2217 SG = &SyncedSchedGroups[PipelineSyncID].emplace_back(
2218 SchedGroupMask::VMEM_READ, 4, PipelineSyncID, DAG, TII);
2219 SG->addRule(std::make_shared<SharesPredWithPrevNthGroup>(
2220 1, TII, SG->getSGID(), true));
2221 SG->addRule(std::make_shared<VMEMSize>(TII, SG->getSGID()));
2222 SG->findCandidateSUnits(SyncedInstrs[SG->getSyncID()]);
2223
2224 SG = &SyncedSchedGroups[PipelineSyncID].emplace_back(
2225 SchedGroupMask::MFMA, 1, PipelineSyncID, DAG, TII);
2226 SG->findCandidateSUnits(SyncedInstrs[SG->getSyncID()]);
2227
2228 SG = &SyncedSchedGroups[PipelineSyncID].emplace_back(
2229 SchedGroupMask::VMEM_READ, 4, PipelineSyncID, DAG, TII);
2230 SG->addRule(std::make_shared<SharesPredWithPrevNthGroup>(
2231 3, TII, SG->getSGID(), true));
2232 SG->addRule(std::make_shared<VMEMSize>(TII, SG->getSGID()));
2233 SG->findCandidateSUnits(SyncedInstrs[SG->getSyncID()]);
2234
2235 SG = &SyncedSchedGroups[PipelineSyncID].emplace_back(
2236 SchedGroupMask::MFMA, 1, PipelineSyncID, DAG, TII);
2237 SG->findCandidateSUnits(SyncedInstrs[SG->getSyncID()]);
2238 }
2239
2240 // Phase 2b: Loop carried dependency without V_PERM
2241 // Schedule DS_WRITE as closely as possible to the VMEM_READ they depend on.
2242 // Interleave MFMA to keep XDL unit busy throughout.
2243 for (unsigned I = DSWWithPermCount; I < DSWCount; I++) {
2244 SG = &SyncedSchedGroups[PipelineSyncID].emplace_back(
2245 SchedGroupMask::DS_WRITE, 1, PipelineSyncID, DAG, TII);
2246 SG->findCandidateSUnits(SyncedInstrs[SG->getSyncID()]);
2247
2248 SG = &SyncedSchedGroups[PipelineSyncID].emplace_back(
2249 SchedGroupMask::VMEM_READ, 4, PipelineSyncID, DAG, TII);
2250 SG->addRule(std::make_shared<VMEMSize>(TII, SG->getSGID()));
2251 SG->findCandidateSUnits(SyncedInstrs[SG->getSyncID()]);
2252
2253 SG = &SyncedSchedGroups[PipelineSyncID].emplace_back(
2254 SchedGroupMask::MFMA, 1, PipelineSyncID, DAG, TII);
2255 SG->findCandidateSUnits(SyncedInstrs[SG->getSyncID()]);
2256 }
2257
2258 // Phase 2c: Loop carried dependency with V_PERM, VMEM_READs are
2259 // ultimately used by two DS_WRITE
2260 // Schedule VPerm & DS_WRITE as closely as possible to the VMEM_READ they
2261 // depend on. Interleave MFMA to keep XDL unit busy throughout.
2262
2263 for (unsigned I = 0; I < DSWWithSharedVMEMCount; ++I) {
2264 SG = &SyncedSchedGroups[PipelineSyncID].emplace_back(
2265 SchedGroupMask::VALU, 4, PipelineSyncID, DAG, TII);
2266 SG->addRule(std::make_shared<IsPermForDSW>(TII, SG->getSGID(), true));
2267 SG->findCandidateSUnits(SyncedInstrs[SG->getSyncID()]);
2268
2269 SG = &SyncedSchedGroups[PipelineSyncID].emplace_back(
2270 SchedGroupMask::DS_WRITE, 1, PipelineSyncID, DAG, TII);
2271 SG->addRule(std::make_shared<IsSuccOfPrevGroup>(TII, SG->getSGID()));
2272 SG->findCandidateSUnits(SyncedInstrs[SG->getSyncID()]);
2273
2274 SG = &SyncedSchedGroups[PipelineSyncID].emplace_back(
2275 SchedGroupMask::MFMA, 1, PipelineSyncID, DAG, TII);
2276 SG->findCandidateSUnits(SyncedInstrs[SG->getSyncID()]);
2277
2278 SG = &SyncedSchedGroups[PipelineSyncID].emplace_back(
2279 SchedGroupMask::VALU, 4, PipelineSyncID, DAG, TII);
2280 SG->addRule(std::make_shared<IsPermForDSW>(TII, SG->getSGID(), true));
2281 SG->findCandidateSUnits(SyncedInstrs[SG->getSyncID()]);
2282
2283 SG = &SyncedSchedGroups[PipelineSyncID].emplace_back(
2284 SchedGroupMask::DS_WRITE, 1, PipelineSyncID, DAG, TII);
2285 SG->addRule(std::make_shared<IsSuccOfPrevGroup>(TII, SG->getSGID()));
2286 SG->findCandidateSUnits(SyncedInstrs[SG->getSyncID()]);
2287
2288 SG = &SyncedSchedGroups[PipelineSyncID].emplace_back(
2289 SchedGroupMask::MFMA, 1, PipelineSyncID, DAG, TII);
2290 SG->findCandidateSUnits(SyncedInstrs[SG->getSyncID()]);
2291
2292 SG = &SyncedSchedGroups[PipelineSyncID].emplace_back(
2293 SchedGroupMask::VMEM_READ, 4, PipelineSyncID, DAG, TII);
2294 SG->addRule(std::make_shared<SharesPredWithPrevNthGroup>(
2295 2, TII, SG->getSGID(), true));
2296 SG->addRule(std::make_shared<VMEMSize>(TII, SG->getSGID()));
2297 SG->findCandidateSUnits(SyncedInstrs[SG->getSyncID()]);
2298
2299 SG = &SyncedSchedGroups[PipelineSyncID].emplace_back(
2300 SchedGroupMask::MFMA, 1, PipelineSyncID, DAG, TII);
2301 SG->findCandidateSUnits(SyncedInstrs[SG->getSyncID()]);
2302
2303 SG = &SyncedSchedGroups[PipelineSyncID].emplace_back(
2304 SchedGroupMask::VMEM_READ, 4, PipelineSyncID, DAG, TII);
2305 SG->addRule(std::make_shared<SharesPredWithPrevNthGroup>(
2306 4, TII, SG->getSGID(), true));
2307 SG->addRule(std::make_shared<VMEMSize>(TII, SG->getSGID()));
2308 SG->findCandidateSUnits(SyncedInstrs[SG->getSyncID()]);
2309
2310 SG = &SyncedSchedGroups[PipelineSyncID].emplace_back(
2311 SchedGroupMask::MFMA, 1, PipelineSyncID, DAG, TII);
2312 SG->findCandidateSUnits(SyncedInstrs[SG->getSyncID()]);
2313 }
2314
2315 return true;
2316}
2317
2318static std::unique_ptr<IGLPStrategy>
2319createIGLPStrategy(IGLPStrategyID ID, ScheduleDAGInstrs *DAG,
2320 const SIInstrInfo *TII) {
2321 switch (ID) {
2322 case MFMASmallGemmOptID:
2323 return std::make_unique<MFMASmallGemmOpt>(DAG, TII);
2324 case MFMASmallGemmSingleWaveOptID:
2325 return std::make_unique<MFMASmallGemmSingleWaveOpt>(DAG, TII);
2326 case MFMAExpInterleaveID:
2327 return std::make_unique<MFMAExpInterleaveOpt>(DAG, TII);
2328 case MFMAExpSimpleInterleaveID:
2329 return std::make_unique<MFMAExpSimpleInterleaveOpt>(DAG, TII);
2330 }
2331
2332 llvm_unreachable("Unknown IGLPStrategyID");
2333}
2334
2335class IGroupLPDAGMutation : public ScheduleDAGMutation {
2336private:
2337 const SIInstrInfo *TII;
2338
2339 ScheduleDAGMI *DAG;
2340
2341 // Organize lists of SchedGroups by their SyncID. SchedGroups /
2342 // SCHED_GROUP_BARRIERs with different SyncIDs will have no edges added
2343 // between then.
2344 DenseMap<int, SmallVector<SchedGroup, 4>> SyncedSchedGroups;
2345
2346 // Used to track instructions that can be mapped to multiple sched groups
2347 DenseMap<int, SUnitsToCandidateSGsMap> SyncedInstrs;
2348
2349 // Add DAG edges that enforce SCHED_BARRIER ordering.
2350 void addSchedBarrierEdges(SUnit &SU);
2351
2352 // Use a SCHED_BARRIER's mask to identify instruction SchedGroups that should
2353 // not be reordered accross the SCHED_BARRIER. This is used for the base
2354 // SCHED_BARRIER, and not SCHED_GROUP_BARRIER. The difference is that
2355 // SCHED_BARRIER will always block all instructions that can be classified
2356 // into a particular SchedClass, whereas SCHED_GROUP_BARRIER has a fixed size
2357 // and may only synchronize with some SchedGroups. Returns the inverse of
2358 // Mask. SCHED_BARRIER's mask describes which instruction types should be
2359 // allowed to be scheduled across it. Invert the mask to get the
2360 // SchedGroupMask of instructions that should be barred.
2361 SchedGroupMask invertSchedBarrierMask(SchedGroupMask Mask) const;
2362
2363 // Create SchedGroups for a SCHED_GROUP_BARRIER.
2364 void initSchedGroupBarrierPipelineStage(
2365 std::vector<SUnit>::reverse_iterator RIter);
2366
2367 bool initIGLPOpt(SUnit &SU);
2368
2369public:
2370 void apply(ScheduleDAGInstrs *DAGInstrs) override;
2371
2372 // The order in which the PipelineSolver should process the candidate
2373 // SchedGroup for a PipelineInstr. BOTTOM_UP will try to add SUs to the last
2374 // created SchedGroup first, and will consider that as the ultimate
2375 // predecessor group when linking. TOP_DOWN instead links and processes the
2376 // first created SchedGroup first.
2377 bool IsBottomUp = true;
2378
2379 // The scheduling phase this application of IGLP corresponds with.
2380 AMDGPU::SchedulingPhase Phase = AMDGPU::SchedulingPhase::Initial;
2381
2382 IGroupLPDAGMutation() = default;
2383 IGroupLPDAGMutation(AMDGPU::SchedulingPhase Phase) : Phase(Phase) {}
2384};
2385
2386unsigned SchedGroup::NumSchedGroups = 0;
2387
2388bool SchedGroup::tryAddEdge(SUnit *A, SUnit *B) {
2389 return A != B && DAG->addEdge(B, SDep(A, SDep::Artificial));
2390}
2391
2392bool SchedGroup::canAddMI(const MachineInstr &MI) const {
2393 bool Result = false;
2394 if (MI.isMetaInstruction())
2395 Result = false;
2396
2397 else if (MI.isInlineAsm()) {
2398 const SIRegisterInfo &TRI = TII->getRegisterInfo();
2399 auto &MRI = MI.getParent()->getParent()->getRegInfo();
2400 bool SGPR_used = false, SGPR_big_def = false, VGPR_used = false,
2401 VMFMA_used = false, VReg32_used = false, MayLoad = MI.mayLoad(),
2402 MayStore = MI.mayStore();
2403 for (const MachineOperand &Operand : MI.operands())
2404 if (Operand.isReg()) {
2405 const TargetRegisterClass &RegClass =
2406 *TRI.getRegClassForOperandReg(MRI, Operand);
2407 if (TRI.hasVGPRs(&RegClass)) {
2408 VGPR_used = true;
2409 if (Operand.isUse() && TRI.getRegSizeInBits(RegClass) == 32)
2410 VReg32_used = true;
2411 }
2412 // > 128 bit registers are usually only used by MFMA instructions, so
2413 // we're using that as a heuristic to guess the schedule group mask of
2414 // the inline asm.
2415 if (TRI.hasAGPRs(&RegClass) || TRI.getRegSizeInBits(RegClass) > 128)
2416 VMFMA_used = true;
2417 if (TRI.hasSGPRs(&RegClass))
2418 SGPR_used = true;
2419 if (TRI.getRegSizeInBits(RegClass) > 64 && Operand.isDef())
2420 SGPR_big_def = true;
2421 }
2422
2423 typedef std::underlying_type_t<SchedGroupMask> SGMask_t;
2424 SGMask_t InlineAsmMask = 0;
2425 if (VGPR_used && !VMFMA_used && !MayLoad && !MayStore)
2426 InlineAsmMask |= (SGMask_t)SchedGroupMask::VALU;
2427 if (SGPR_used && !VGPR_used && !MayLoad && !MayStore)
2428 InlineAsmMask |= (SGMask_t)SchedGroupMask::SALU;
2429 if (VMFMA_used)
2430 InlineAsmMask |= (SGMask_t)SchedGroupMask::MFMA;
2431 if (VGPR_used && MayLoad)
2432 InlineAsmMask |= (SGMask_t)(VReg32_used ? SchedGroupMask::DS_READ
2433 : SchedGroupMask::VMEM_READ);
2434 if (VGPR_used && MayStore)
2435 InlineAsmMask |= (SGMask_t)(VReg32_used ? SchedGroupMask::DS_WRITE
2436 : SchedGroupMask::VMEM_WRITE);
2437 if (SGPR_big_def)
2438 InlineAsmMask |= (SGMask_t)SchedGroupMask::DS_READ;
2439 if (InlineAsmMask & (SGMask_t)SchedGroupMask::VALU ||
2440 InlineAsmMask & (SGMask_t)SchedGroupMask::SALU)
2441 InlineAsmMask |= (SGMask_t)SchedGroupMask::ALU;
2442 if (InlineAsmMask & (SGMask_t)SchedGroupMask::DS_READ ||
2443 InlineAsmMask & (SGMask_t)SchedGroupMask::DS_WRITE)
2444 InlineAsmMask |= (SGMask_t)SchedGroupMask::DS;
2445 if (InlineAsmMask & (SGMask_t)SchedGroupMask::VMEM_READ ||
2446 InlineAsmMask & (SGMask_t)SchedGroupMask::VMEM_WRITE)
2447 InlineAsmMask |= (SGMask_t)SchedGroupMask::VMEM;
2448
2449 Result = ((SGMask_t)SGMask & InlineAsmMask) != 0;
2450 }
2451
2452 else if (((SGMask & SchedGroupMask::ALU) != SchedGroupMask::NONE) &&
2453 (TII->isVALU(MI) || TII->isMFMAorWMMA(MI) || TII->isSALU(MI) ||
2454 TII->isTRANS(MI)))
2455 Result = !MI.mayLoadOrStore();
2456
2457 else if (((SGMask & SchedGroupMask::VALU) != SchedGroupMask::NONE) &&
2458 TII->isVALU(MI) && !TII->isMFMAorWMMA(MI) && !TII->isTRANS(MI)) {
2459 // Some memory instructions may be marked as VALU (e.g. BUFFER_LOAD_*_LDS).
2460 // For our purposes, these shall not be classified as VALU as this results
2461 // in unexpected behavior.
2462 Result = !MI.mayLoadOrStore();
2463 }
2464
2465 else if (((SGMask & SchedGroupMask::SALU) != SchedGroupMask::NONE) &&
2466 TII->isSALU(MI))
2467 Result = !MI.mayLoadOrStore();
2468
2469 else if (((SGMask & SchedGroupMask::MFMA) != SchedGroupMask::NONE) &&
2470 TII->isMFMAorWMMA(MI))
2471 Result = true;
2472
2473 else if (((SGMask & SchedGroupMask::VMEM) != SchedGroupMask::NONE) &&
2474 TII->isVMEM(MI))
2475 Result = true;
2476
2477 else if (((SGMask & SchedGroupMask::VMEM_READ) != SchedGroupMask::NONE) &&
2478 MI.mayLoad() && TII->isVMEM(MI))
2479 Result = true;
2480
2481 else if (((SGMask & SchedGroupMask::VMEM_WRITE) != SchedGroupMask::NONE) &&
2482 MI.mayStore() && TII->isVMEM(MI))
2483 Result = true;
2484
2485 else if (((SGMask & SchedGroupMask::DS) != SchedGroupMask::NONE) &&
2486 TII->isDS(MI))
2487 Result = true;
2488
2489 else if (((SGMask & SchedGroupMask::DS_READ) != SchedGroupMask::NONE) &&
2490 MI.mayLoad() && TII->isDS(MI))
2491 Result = true;
2492
2493 else if (((SGMask & SchedGroupMask::DS_WRITE) != SchedGroupMask::NONE) &&
2494 MI.mayStore() && TII->isDS(MI))
2495 Result = true;
2496
2497 else if (((SGMask & SchedGroupMask::TRANS) != SchedGroupMask::NONE) &&
2498 TII->isTRANS(MI))
2499 Result = true;
2500
2501 LLVM_DEBUG(
2502 dbgs() << "For SchedGroup with mask " << format_hex((int)SGMask, 10, true)
2503 << (Result ? " could classify " : " unable to classify ") << MI);
2504
2505 return Result;
2506}
2507
2508int SchedGroup::link(SUnit &SU, bool MakePred,
2509 std::list<std::pair<SUnit *, SUnit *>> &AddedEdges) {
2510 int MissedEdges = 0;
2511 for (auto *A : Collection) {
2512 SUnit *B = &SU;
2513 if (A == B || A->getInstr()->getOpcode() == AMDGPU::SCHED_GROUP_BARRIER)
2514 continue;
2515 if (MakePred)
2516 std::swap(A, B);
2517
2518 if (DAG->IsReachable(B, A))
2519 continue;
2520
2521 // tryAddEdge returns false if there is a dependency that makes adding
2522 // the A->B edge impossible, otherwise it returns true;
2523 bool Added = tryAddEdge(A, B);
2524 if (Added)
2525 AddedEdges.emplace_back(A, B);
2526 else
2527 ++MissedEdges;
2528 }
2529
2530 return MissedEdges;
2531}
2532
2533void SchedGroup::link(SUnit &SU, bool MakePred) {
2534 for (auto *A : Collection) {
2535 SUnit *B = &SU;
2536 if (A->getInstr()->getOpcode() == AMDGPU::SCHED_GROUP_BARRIER)
2537 continue;
2538 if (MakePred)
2539 std::swap(A, B);
2540
2541 tryAddEdge(A, B);
2542 }
2543}
2544
2545void SchedGroup::link(SUnit &SU,
2546 function_ref<bool(const SUnit *A, const SUnit *B)> P) {
2547 for (auto *A : Collection) {
2548 SUnit *B = &SU;
2549 if (P(A, B))
2550 std::swap(A, B);
2551
2552 tryAddEdge(A, B);
2553 }
2554}
2555
2556void SchedGroup::link(SchedGroup &OtherGroup) {
2557 for (auto *B : OtherGroup.Collection)
2558 link(*B);
2559}
2560
2561bool SchedGroup::canAddSU(SUnit &SU) const {
2562 MachineInstr &MI = *SU.getInstr();
2563 if (MI.getOpcode() != TargetOpcode::BUNDLE)
2564 return canAddMI(MI);
2565
2566 // Special case for bundled MIs.
2567 const MachineBasicBlock *MBB = MI.getParent();
2568 MachineBasicBlock::instr_iterator B = MI.getIterator(), E = ++B;
2569 while (E != MBB->end() && E->isBundledWithPred())
2570 ++E;
2571
2572 // Return true if all of the bundled MIs can be added to this group.
2573 return std::all_of(B, E, [this](MachineInstr &MI) { return canAddMI(MI); });
2574}
2575
2576template <class T>
2577void SchedGroup::findCandidateSUnits(T Begin, T End,
2578 SUnitsToCandidateSGsMap &SyncedInstrs) {
2579 for (SUnit &SU : make_range(Begin, End)) {
2580 if (canAddSU(SU))
2581 SyncedInstrs[&SU].push_back(SGID);
2582 }
2583}
2584
2585void SchedGroup::findCandidateSUnits(SUnitsToCandidateSGsMap &SyncedInstrs) {
2586 findCandidateSUnits(DAG->SUnits.rbegin(), DAG->SUnits.rend(), SyncedInstrs);
2587}
2588
2589void IGroupLPDAGMutation::apply(ScheduleDAGInstrs *DAGInstrs) {
2590 const TargetSchedModel *TSchedModel = DAGInstrs->getSchedModel();
2591 if (!TSchedModel || DAGInstrs->SUnits.empty())
2592 return;
2593
2594 LLVM_DEBUG(dbgs() << "Applying IGroupLPDAGMutation...\n");
2595 const GCNSubtarget &ST = DAGInstrs->MF.getSubtarget<GCNSubtarget>();
2596 TII = ST.getInstrInfo();
2597 DAG = static_cast<ScheduleDAGMI *>(DAGInstrs);
2598 SyncedSchedGroups.clear();
2599 SyncedInstrs.clear();
2600 bool FoundSB = false;
2601 bool FoundIGLP = false;
2602 bool ShouldApplyIGLP = false;
2603 for (auto R = DAG->SUnits.rbegin(), E = DAG->SUnits.rend(); R != E; ++R) {
2604 unsigned Opc = R->getInstr()->getOpcode();
2605 // SCHED_[GROUP_]BARRIER and IGLP are mutually exclusive.
2606 if (Opc == AMDGPU::SCHED_BARRIER) {
2607 addSchedBarrierEdges(*R);
2608 FoundSB = true;
2609 } else if (Opc == AMDGPU::SCHED_GROUP_BARRIER) {
2610 initSchedGroupBarrierPipelineStage(R);
2611 FoundSB = true;
2612 } else if (Opc == AMDGPU::IGLP_OPT) {
2613 if (!FoundSB && !FoundIGLP) {
2614 FoundIGLP = true;
2615 ShouldApplyIGLP = initIGLPOpt(*R);
2616 }
2617 }
2618 }
2619
2620 if (FoundSB || (FoundIGLP && ShouldApplyIGLP)) {
2621 PipelineSolver PS(SyncedSchedGroups, SyncedInstrs, DAG, IsBottomUp);
2622 // PipelineSolver performs the mutation by adding the edges it
2623 // determined as the best
2624 PS.solve();
2625 return;
2626 }
2627}
2628
2629void IGroupLPDAGMutation::addSchedBarrierEdges(SUnit &SchedBarrier) {
2630 MachineInstr &MI = *SchedBarrier.getInstr();
2631 assert(MI.getOpcode() == AMDGPU::SCHED_BARRIER);
2632 LLVM_DEBUG(dbgs() << "Building SchedGroup for SchedBarrier with Mask: "
2633 << MI.getOperand(0).getImm() << "\n");
2634 auto InvertedMask =
2635 invertSchedBarrierMask((SchedGroupMask)MI.getOperand(0).getImm());
2636 SchedGroup SG(InvertedMask, std::nullopt, DAG, TII);
2637
2638 for (SUnit &SU : DAG->SUnits)
2639 if (SG.canAddSU(SU))
2640 SG.add(SU);
2641
2642 // Preserve original instruction ordering relative to the SCHED_BARRIER.
2643 SG.link(
2644 SchedBarrier,
2645 (function_ref<bool(const SUnit *A, const SUnit *B)>)[](
2646 const SUnit *A, const SUnit *B) { return A->NodeNum > B->NodeNum; });
2647}
2648
2649SchedGroupMask
2650IGroupLPDAGMutation::invertSchedBarrierMask(SchedGroupMask Mask) const {
2651 // Invert mask and erase bits for types of instructions that are implied to be
2652 // allowed past the SCHED_BARRIER.
2653 SchedGroupMask InvertedMask = ~Mask;
2654
2655 // ALU implies VALU, SALU, MFMA, TRANS.
2656 if ((InvertedMask & SchedGroupMask::ALU) == SchedGroupMask::NONE)
2657 InvertedMask &= ~SchedGroupMask::VALU & ~SchedGroupMask::SALU &
2658 ~SchedGroupMask::MFMA & ~SchedGroupMask::TRANS;
2659 // VALU, SALU, MFMA, TRANS implies ALU.
2660 else if ((InvertedMask & SchedGroupMask::VALU) == SchedGroupMask::NONE ||
2661 (InvertedMask & SchedGroupMask::SALU) == SchedGroupMask::NONE ||
2662 (InvertedMask & SchedGroupMask::MFMA) == SchedGroupMask::NONE ||
2663 (InvertedMask & SchedGroupMask::TRANS) == SchedGroupMask::NONE)
2664 InvertedMask &= ~SchedGroupMask::ALU;
2665
2666 // VMEM implies VMEM_READ, VMEM_WRITE.
2667 if ((InvertedMask & SchedGroupMask::VMEM) == SchedGroupMask::NONE)
2668 InvertedMask &= ~SchedGroupMask::VMEM_READ & ~SchedGroupMask::VMEM_WRITE;
2669 // VMEM_READ, VMEM_WRITE implies VMEM.
2670 else if ((InvertedMask & SchedGroupMask::VMEM_READ) == SchedGroupMask::NONE ||
2671 (InvertedMask & SchedGroupMask::VMEM_WRITE) == SchedGroupMask::NONE)
2672 InvertedMask &= ~SchedGroupMask::VMEM;
2673
2674 // DS implies DS_READ, DS_WRITE.
2675 if ((InvertedMask & SchedGroupMask::DS) == SchedGroupMask::NONE)
2676 InvertedMask &= ~SchedGroupMask::DS_READ & ~SchedGroupMask::DS_WRITE;
2677 // DS_READ, DS_WRITE implies DS.
2678 else if ((InvertedMask & SchedGroupMask::DS_READ) == SchedGroupMask::NONE ||
2679 (InvertedMask & SchedGroupMask::DS_WRITE) == SchedGroupMask::NONE)
2680 InvertedMask &= ~SchedGroupMask::DS;
2681
2682 LLVM_DEBUG(dbgs() << "After Inverting, SchedGroup Mask: " << (int)InvertedMask
2683 << "\n");
2684
2685 return InvertedMask;
2686}
2687
2688void IGroupLPDAGMutation::initSchedGroupBarrierPipelineStage(
2689 std::vector<SUnit>::reverse_iterator RIter) {
2690 MachineInstr &SGB = *RIter->getInstr();
2691 assert(SGB.getOpcode() == AMDGPU::SCHED_GROUP_BARRIER);
2692 int32_t SGMask = SGB.getOperand(0).getImm();
2693 int32_t Size = SGB.getOperand(1).getImm();
2694 int32_t SyncID = SGB.getOperand(2).getImm();
2695
2696 Size++; // Make room for the SCHED_GROUP_BARRIER instruction
2697 auto &SG = SyncedSchedGroups[SyncID].emplace_back((SchedGroupMask)SGMask,
2698 Size, SyncID, DAG, TII);
2699 SG.add(*RIter);
2700 SG.findCandidateSUnits(RIter, SG.DAG->SUnits.rend(),
2701 SyncedInstrs[SG.getSyncID()]);
2702}
2703
2704bool IGroupLPDAGMutation::initIGLPOpt(SUnit &SU) {
2705 IGLPStrategyID StrategyID =
2706 (IGLPStrategyID)SU.getInstr()->getOperand(0).getImm();
2707 auto S = createIGLPStrategy(StrategyID, DAG, TII);
2708 if (!S->shouldApplyStrategy(DAG, Phase))
2709 return false;
2710
2711 IsBottomUp = S->IsBottomUp;
2712 return S->applyIGLPStrategy(SyncedInstrs, SyncedSchedGroups, Phase);
2713}
2714
2715} // namespace
2716
2717/// \p Phase specifes whether or not this is a reentry into the
2718/// IGroupLPDAGMutation. Since there may be multiple scheduling passes on the
2719/// same scheduling region (e.g. pre and post-RA scheduling / multiple
2720/// scheduling "phases"), we can reenter this mutation framework more than once
2721/// for a given region.
2722std::unique_ptr<ScheduleDAGMutation>
2724 return std::make_unique<IGroupLPDAGMutation>(Phase);
2725}
aarch64 falkor hwpf fix Falkor HW Prefetch Fix Late Phase
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
Provides AMDGPU specific target descriptions.
AMDGPU Rewrite AGPR Copy MFMA
MachineBasicBlock & MBB
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
This file defines the DenseMap class.
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
static std::pair< Value *, APInt > getMask(Value *WideMask, unsigned Factor, ElementCount LeafValueEC)
#define I(x, y, z)
Definition MD5.cpp:57
Register const TargetRegisterInfo * TRI
#define T
#define P(N)
Interface definition for SIInstrInfo.
#define LLVM_DEBUG(...)
Definition Debug.h:114
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
size_t size() const
size - Get the array size.
Definition ArrayRef.h:142
std::pair< iterator, bool > try_emplace(KeyT &&Key, Ts &&...Args)
Definition DenseMap.h:256
unsigned size() const
Definition DenseMap.h:110
const HexagonRegisterInfo & getRegisterInfo() const
Instructions::iterator instr_iterator
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
bool mayStore(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly modify memory.
const MachineOperand & getOperand(unsigned i) const
int64_t getImm() const
Scheduling dependency.
Definition ScheduleDAG.h:51
SUnit * getSUnit() const
@ Data
Regular data dependence (aka true-dependence).
Definition ScheduleDAG.h:55
@ Artificial
Arbitrary strong DAG edge (no real dependence).
Definition ScheduleDAG.h:74
Scheduling unit. This is a node in the scheduling DAG.
unsigned NodeNum
Entry # of node in the node vector.
LLVM_ABI void removePred(const SDep &D)
Removes the specified edge as a pred of the current node if it exists.
SmallVector< SDep, 4 > Succs
All sunit successors.
SmallVector< SDep, 4 > Preds
All sunit predecessors.
MachineInstr * getInstr() const
Returns the representative MachineInstr for this SUnit.
A ScheduleDAG for scheduling lists of MachineInstr.
const TargetSchedModel * getSchedModel() const
Gets the machine model for instruction scheduling.
bool addEdge(SUnit *SuccSU, const SDep &PredDep)
Add a DAG edge to the given SU with the given predecessor dependence data.
bool IsReachable(SUnit *SU, SUnit *TargetSU)
IsReachable - Checks if SU is reachable from TargetSU.
void dump() const override
ScheduleDAGMI is an implementation of ScheduleDAGInstrs that simply schedules machine instructions ac...
std::vector< SUnit > SUnits
The scheduling units.
MachineFunction & MF
Machine function.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An efficient, type-erasing, non-owning reference to a callable.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
void apply(Opt *O, const Mod &M, const Mods &... Ms)
initializer< Ty > init(const Ty &Val)
This is an optimization pass for GlobalISel generic memory operations.
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
Definition STLExtras.h:1669
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
std::unique_ptr< ScheduleDAGMutation > createIGroupLPDAGMutation(AMDGPU::SchedulingPhase Phase)
Phase specifes whether or not this is a reentry into the IGroupLPDAGMutation.
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1746
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
bool none_of(R &&Range, UnaryPredicate P)
Provide wrappers to std::none_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1753
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
FormattedNumber format_hex(uint64_t N, unsigned Width, bool Upper=false)
format_hex - Output N as a fixed width hexadecimal.
Definition Format.h:191
DWARFExpression::Operation Op
auto count_if(R &&Range, UnaryPredicate P)
Wrapper function around std::count_if to count the number of times an element satisfying a given pred...
Definition STLExtras.h:2019
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1772
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition STLExtras.h:1947
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition BitVector.h:872
Function object to check whether the second component of a container supported by std::get (like std:...
Definition STLExtras.h:1448