LLVM 22.0.0git
AMDGPUTargetMachine.cpp
Go to the documentation of this file.
1//===-- AMDGPUTargetMachine.cpp - TargetMachine for hw codegen targets-----===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9/// \file
10/// This file contains both AMDGPU target machine and the CodeGen pass builder.
11/// The AMDGPU target machine contains all of the hardware specific information
12/// needed to emit code for SI+ GPUs in the legacy pass manager pipeline. The
13/// CodeGen pass builder handles the pass pipeline for new pass manager.
14//
15//===----------------------------------------------------------------------===//
16
17#include "AMDGPUTargetMachine.h"
18#include "AMDGPU.h"
19#include "AMDGPUAliasAnalysis.h"
25#include "AMDGPUIGroupLP.h"
26#include "AMDGPUISelDAGToDAG.h"
28#include "AMDGPUMacroFusion.h"
35#include "AMDGPUSplitModule.h"
40#include "GCNDPPCombine.h"
42#include "GCNNSAReassign.h"
46#include "GCNSchedStrategy.h"
47#include "GCNVOPDUtils.h"
48#include "R600.h"
49#include "R600TargetMachine.h"
50#include "SIFixSGPRCopies.h"
51#include "SIFixVGPRCopies.h"
52#include "SIFoldOperands.h"
53#include "SIFormMemoryClauses.h"
55#include "SILowerControlFlow.h"
56#include "SILowerSGPRSpills.h"
57#include "SILowerWWMCopies.h"
59#include "SIMachineScheduler.h"
63#include "SIPeepholeSDWA.h"
64#include "SIPostRABundler.h"
67#include "SIWholeQuadMode.h"
88#include "llvm/CodeGen/Passes.h"
92#include "llvm/IR/IntrinsicsAMDGPU.h"
93#include "llvm/IR/PassManager.h"
102#include "llvm/Transforms/IPO.h"
127#include <optional>
128
129using namespace llvm;
130using namespace llvm::PatternMatch;
131
132namespace {
133//===----------------------------------------------------------------------===//
134// AMDGPU CodeGen Pass Builder interface.
135//===----------------------------------------------------------------------===//
136
137class AMDGPUCodeGenPassBuilder
138 : public CodeGenPassBuilder<AMDGPUCodeGenPassBuilder, GCNTargetMachine> {
139 using Base = CodeGenPassBuilder<AMDGPUCodeGenPassBuilder, GCNTargetMachine>;
140
141public:
142 AMDGPUCodeGenPassBuilder(GCNTargetMachine &TM,
143 const CGPassBuilderOption &Opts,
144 PassInstrumentationCallbacks *PIC);
145
146 void addIRPasses(PassManagerWrapper &PMW) const;
147 void addCodeGenPrepare(PassManagerWrapper &PMW) const;
148 void addPreISel(PassManagerWrapper &PMW) const;
149 void addILPOpts(PassManagerWrapper &PMWM) const;
150 void addAsmPrinter(PassManagerWrapper &PMW, CreateMCStreamer) const;
151 Error addInstSelector(PassManagerWrapper &PMW) const;
152 void addPreRewrite(PassManagerWrapper &PMW) const;
153 void addMachineSSAOptimization(PassManagerWrapper &PMW) const;
154 void addPostRegAlloc(PassManagerWrapper &PMW) const;
155 void addPreEmitPass(PassManagerWrapper &PMWM) const;
156 void addPreEmitRegAlloc(PassManagerWrapper &PMW) const;
157 Error addRegAssignmentOptimized(PassManagerWrapper &PMW) const;
158 void addPreRegAlloc(PassManagerWrapper &PMW) const;
159 void addOptimizedRegAlloc(PassManagerWrapper &PMW) const;
160 void addPreSched2(PassManagerWrapper &PMW) const;
161
162 /// Check if a pass is enabled given \p Opt option. The option always
163 /// overrides defaults if explicitly used. Otherwise its default will be used
164 /// given that a pass shall work at an optimization \p Level minimum.
165 bool isPassEnabled(const cl::opt<bool> &Opt,
166 CodeGenOptLevel Level = CodeGenOptLevel::Default) const;
167 void addEarlyCSEOrGVNPass(PassManagerWrapper &PMW) const;
168 void addStraightLineScalarOptimizationPasses(PassManagerWrapper &PMW) const;
169};
170
171class SGPRRegisterRegAlloc : public RegisterRegAllocBase<SGPRRegisterRegAlloc> {
172public:
173 SGPRRegisterRegAlloc(const char *N, const char *D, FunctionPassCtor C)
174 : RegisterRegAllocBase(N, D, C) {}
175};
176
177class VGPRRegisterRegAlloc : public RegisterRegAllocBase<VGPRRegisterRegAlloc> {
178public:
179 VGPRRegisterRegAlloc(const char *N, const char *D, FunctionPassCtor C)
180 : RegisterRegAllocBase(N, D, C) {}
181};
182
183class WWMRegisterRegAlloc : public RegisterRegAllocBase<WWMRegisterRegAlloc> {
184public:
185 WWMRegisterRegAlloc(const char *N, const char *D, FunctionPassCtor C)
186 : RegisterRegAllocBase(N, D, C) {}
187};
188
189static bool onlyAllocateSGPRs(const TargetRegisterInfo &TRI,
191 const Register Reg) {
192 const TargetRegisterClass *RC = MRI.getRegClass(Reg);
193 return static_cast<const SIRegisterInfo &>(TRI).isSGPRClass(RC);
194}
195
196static bool onlyAllocateVGPRs(const TargetRegisterInfo &TRI,
198 const Register Reg) {
199 const TargetRegisterClass *RC = MRI.getRegClass(Reg);
200 return !static_cast<const SIRegisterInfo &>(TRI).isSGPRClass(RC);
201}
202
203static bool onlyAllocateWWMRegs(const TargetRegisterInfo &TRI,
205 const Register Reg) {
206 const SIMachineFunctionInfo *MFI =
207 MRI.getMF().getInfo<SIMachineFunctionInfo>();
208 const TargetRegisterClass *RC = MRI.getRegClass(Reg);
209 return !static_cast<const SIRegisterInfo &>(TRI).isSGPRClass(RC) &&
211}
212
213/// -{sgpr|wwm|vgpr}-regalloc=... command line option.
214static FunctionPass *useDefaultRegisterAllocator() { return nullptr; }
215
216/// A dummy default pass factory indicates whether the register allocator is
217/// overridden on the command line.
218static llvm::once_flag InitializeDefaultSGPRRegisterAllocatorFlag;
219static llvm::once_flag InitializeDefaultVGPRRegisterAllocatorFlag;
220static llvm::once_flag InitializeDefaultWWMRegisterAllocatorFlag;
221
222static SGPRRegisterRegAlloc
223defaultSGPRRegAlloc("default",
224 "pick SGPR register allocator based on -O option",
226
227static cl::opt<SGPRRegisterRegAlloc::FunctionPassCtor, false,
229SGPRRegAlloc("sgpr-regalloc", cl::Hidden, cl::init(&useDefaultRegisterAllocator),
230 cl::desc("Register allocator to use for SGPRs"));
231
232static cl::opt<VGPRRegisterRegAlloc::FunctionPassCtor, false,
234VGPRRegAlloc("vgpr-regalloc", cl::Hidden, cl::init(&useDefaultRegisterAllocator),
235 cl::desc("Register allocator to use for VGPRs"));
236
237static cl::opt<WWMRegisterRegAlloc::FunctionPassCtor, false,
239 WWMRegAlloc("wwm-regalloc", cl::Hidden,
241 cl::desc("Register allocator to use for WWM registers"));
242
243static void initializeDefaultSGPRRegisterAllocatorOnce() {
244 RegisterRegAlloc::FunctionPassCtor Ctor = SGPRRegisterRegAlloc::getDefault();
245
246 if (!Ctor) {
247 Ctor = SGPRRegAlloc;
248 SGPRRegisterRegAlloc::setDefault(SGPRRegAlloc);
249 }
250}
251
252static void initializeDefaultVGPRRegisterAllocatorOnce() {
253 RegisterRegAlloc::FunctionPassCtor Ctor = VGPRRegisterRegAlloc::getDefault();
254
255 if (!Ctor) {
256 Ctor = VGPRRegAlloc;
257 VGPRRegisterRegAlloc::setDefault(VGPRRegAlloc);
258 }
259}
260
261static void initializeDefaultWWMRegisterAllocatorOnce() {
262 RegisterRegAlloc::FunctionPassCtor Ctor = WWMRegisterRegAlloc::getDefault();
263
264 if (!Ctor) {
265 Ctor = WWMRegAlloc;
266 WWMRegisterRegAlloc::setDefault(WWMRegAlloc);
267 }
268}
269
270static FunctionPass *createBasicSGPRRegisterAllocator() {
271 return createBasicRegisterAllocator(onlyAllocateSGPRs);
272}
273
274static FunctionPass *createGreedySGPRRegisterAllocator() {
275 return createGreedyRegisterAllocator(onlyAllocateSGPRs);
276}
277
278static FunctionPass *createFastSGPRRegisterAllocator() {
279 return createFastRegisterAllocator(onlyAllocateSGPRs, false);
280}
281
282static FunctionPass *createBasicVGPRRegisterAllocator() {
283 return createBasicRegisterAllocator(onlyAllocateVGPRs);
284}
285
286static FunctionPass *createGreedyVGPRRegisterAllocator() {
287 return createGreedyRegisterAllocator(onlyAllocateVGPRs);
288}
289
290static FunctionPass *createFastVGPRRegisterAllocator() {
291 return createFastRegisterAllocator(onlyAllocateVGPRs, true);
292}
293
294static FunctionPass *createBasicWWMRegisterAllocator() {
295 return createBasicRegisterAllocator(onlyAllocateWWMRegs);
296}
297
298static FunctionPass *createGreedyWWMRegisterAllocator() {
299 return createGreedyRegisterAllocator(onlyAllocateWWMRegs);
300}
301
302static FunctionPass *createFastWWMRegisterAllocator() {
303 return createFastRegisterAllocator(onlyAllocateWWMRegs, false);
304}
305
306static SGPRRegisterRegAlloc basicRegAllocSGPR(
307 "basic", "basic register allocator", createBasicSGPRRegisterAllocator);
308static SGPRRegisterRegAlloc greedyRegAllocSGPR(
309 "greedy", "greedy register allocator", createGreedySGPRRegisterAllocator);
310
311static SGPRRegisterRegAlloc fastRegAllocSGPR(
312 "fast", "fast register allocator", createFastSGPRRegisterAllocator);
313
314
315static VGPRRegisterRegAlloc basicRegAllocVGPR(
316 "basic", "basic register allocator", createBasicVGPRRegisterAllocator);
317static VGPRRegisterRegAlloc greedyRegAllocVGPR(
318 "greedy", "greedy register allocator", createGreedyVGPRRegisterAllocator);
319
320static VGPRRegisterRegAlloc fastRegAllocVGPR(
321 "fast", "fast register allocator", createFastVGPRRegisterAllocator);
322static WWMRegisterRegAlloc basicRegAllocWWMReg("basic",
323 "basic register allocator",
324 createBasicWWMRegisterAllocator);
325static WWMRegisterRegAlloc
326 greedyRegAllocWWMReg("greedy", "greedy register allocator",
327 createGreedyWWMRegisterAllocator);
328static WWMRegisterRegAlloc fastRegAllocWWMReg("fast", "fast register allocator",
329 createFastWWMRegisterAllocator);
330
334}
335} // anonymous namespace
336
337static cl::opt<bool>
339 cl::desc("Run early if-conversion"),
340 cl::init(false));
341
342static cl::opt<bool>
343OptExecMaskPreRA("amdgpu-opt-exec-mask-pre-ra", cl::Hidden,
344 cl::desc("Run pre-RA exec mask optimizations"),
345 cl::init(true));
346
347static cl::opt<bool>
348 LowerCtorDtor("amdgpu-lower-global-ctor-dtor",
349 cl::desc("Lower GPU ctor / dtors to globals on the device."),
350 cl::init(true), cl::Hidden);
351
352// Option to disable vectorizer for tests.
354 "amdgpu-load-store-vectorizer",
355 cl::desc("Enable load store vectorizer"),
356 cl::init(true),
357 cl::Hidden);
358
359// Option to control global loads scalarization
361 "amdgpu-scalarize-global-loads",
362 cl::desc("Enable global load scalarization"),
363 cl::init(true),
364 cl::Hidden);
365
366// Option to run internalize pass.
368 "amdgpu-internalize-symbols",
369 cl::desc("Enable elimination of non-kernel functions and unused globals"),
370 cl::init(false),
371 cl::Hidden);
372
373// Option to inline all early.
375 "amdgpu-early-inline-all",
376 cl::desc("Inline all functions early"),
377 cl::init(false),
378 cl::Hidden);
379
381 "amdgpu-enable-remove-incompatible-functions", cl::Hidden,
382 cl::desc("Enable removal of functions when they"
383 "use features not supported by the target GPU"),
384 cl::init(true));
385
387 "amdgpu-sdwa-peephole",
388 cl::desc("Enable SDWA peepholer"),
389 cl::init(true));
390
392 "amdgpu-dpp-combine",
393 cl::desc("Enable DPP combiner"),
394 cl::init(true));
395
396// Enable address space based alias analysis
398 cl::desc("Enable AMDGPU Alias Analysis"),
399 cl::init(true));
400
401// Enable lib calls simplifications
403 "amdgpu-simplify-libcall",
404 cl::desc("Enable amdgpu library simplifications"),
405 cl::init(true),
406 cl::Hidden);
407
409 "amdgpu-ir-lower-kernel-arguments",
410 cl::desc("Lower kernel argument loads in IR pass"),
411 cl::init(true),
412 cl::Hidden);
413
415 "amdgpu-reassign-regs",
416 cl::desc("Enable register reassign optimizations on gfx10+"),
417 cl::init(true),
418 cl::Hidden);
419
421 "amdgpu-opt-vgpr-liverange",
422 cl::desc("Enable VGPR liverange optimizations for if-else structure"),
423 cl::init(true), cl::Hidden);
424
426 "amdgpu-atomic-optimizer-strategy",
427 cl::desc("Select DPP or Iterative strategy for scan"),
430 clEnumValN(ScanOptions::DPP, "DPP", "Use DPP operations for scan"),
432 "Use Iterative approach for scan"),
433 clEnumValN(ScanOptions::None, "None", "Disable atomic optimizer")));
434
435// Enable Mode register optimization
437 "amdgpu-mode-register",
438 cl::desc("Enable mode register pass"),
439 cl::init(true),
440 cl::Hidden);
441
442// Enable GFX11+ s_delay_alu insertion
443static cl::opt<bool>
444 EnableInsertDelayAlu("amdgpu-enable-delay-alu",
445 cl::desc("Enable s_delay_alu insertion"),
446 cl::init(true), cl::Hidden);
447
448// Enable GFX11+ VOPD
449static cl::opt<bool>
450 EnableVOPD("amdgpu-enable-vopd",
451 cl::desc("Enable VOPD, dual issue of VALU in wave32"),
452 cl::init(true), cl::Hidden);
453
454// Option is used in lit tests to prevent deadcoding of patterns inspected.
455static cl::opt<bool>
456EnableDCEInRA("amdgpu-dce-in-ra",
457 cl::init(true), cl::Hidden,
458 cl::desc("Enable machine DCE inside regalloc"));
459
460static cl::opt<bool> EnableSetWavePriority("amdgpu-set-wave-priority",
461 cl::desc("Adjust wave priority"),
462 cl::init(false), cl::Hidden);
463
465 "amdgpu-scalar-ir-passes",
466 cl::desc("Enable scalar IR passes"),
467 cl::init(true),
468 cl::Hidden);
469
471 "amdgpu-enable-lower-exec-sync",
472 cl::desc("Enable lowering of execution synchronization."), cl::init(true),
473 cl::Hidden);
474
475static cl::opt<bool>
476 EnableSwLowerLDS("amdgpu-enable-sw-lower-lds",
477 cl::desc("Enable lowering of lds to global memory pass "
478 "and asan instrument resulting IR."),
479 cl::init(true), cl::Hidden);
480
482 "amdgpu-enable-lower-module-lds", cl::desc("Enable lower module lds pass"),
484 cl::Hidden);
485
487 "amdgpu-enable-pre-ra-optimizations",
488 cl::desc("Enable Pre-RA optimizations pass"), cl::init(true),
489 cl::Hidden);
490
492 "amdgpu-enable-promote-kernel-arguments",
493 cl::desc("Enable promotion of flat kernel pointer arguments to global"),
494 cl::Hidden, cl::init(true));
495
497 "amdgpu-enable-image-intrinsic-optimizer",
498 cl::desc("Enable image intrinsic optimizer pass"), cl::init(true),
499 cl::Hidden);
500
501static cl::opt<bool>
502 EnableLoopPrefetch("amdgpu-loop-prefetch",
503 cl::desc("Enable loop data prefetch on AMDGPU"),
504 cl::Hidden, cl::init(false));
505
507 AMDGPUSchedStrategy("amdgpu-sched-strategy",
508 cl::desc("Select custom AMDGPU scheduling strategy."),
509 cl::Hidden, cl::init(""));
510
512 "amdgpu-enable-rewrite-partial-reg-uses",
513 cl::desc("Enable rewrite partial reg uses pass"), cl::init(true),
514 cl::Hidden);
515
517 "amdgpu-enable-hipstdpar",
518 cl::desc("Enable HIP Standard Parallelism Offload support"), cl::init(false),
519 cl::Hidden);
520
521static cl::opt<bool>
522 EnableAMDGPUAttributor("amdgpu-attributor-enable",
523 cl::desc("Enable AMDGPUAttributorPass"),
524 cl::init(true), cl::Hidden);
525
527 "new-reg-bank-select",
528 cl::desc("Run amdgpu-regbankselect and amdgpu-regbanklegalize instead of "
529 "regbankselect"),
530 cl::init(false), cl::Hidden);
531
533 "amdgpu-link-time-closed-world",
534 cl::desc("Whether has closed-world assumption at link time"),
535 cl::init(false), cl::Hidden);
536
538 "amdgpu-enable-uniform-intrinsic-combine",
539 cl::desc("Enable/Disable the Uniform Intrinsic Combine Pass"),
540 cl::init(true), cl::Hidden);
541
543 // Register the target
546
631}
632
633static std::unique_ptr<TargetLoweringObjectFile> createTLOF(const Triple &TT) {
634 return std::make_unique<AMDGPUTargetObjectFile>();
635}
636
640
641static ScheduleDAGInstrs *
643 const GCNSubtarget &ST = C->MF->getSubtarget<GCNSubtarget>();
644 ScheduleDAGMILive *DAG =
645 new GCNScheduleDAGMILive(C, std::make_unique<GCNMaxOccupancySchedStrategy>(C));
646 DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
647 if (ST.shouldClusterStores())
648 DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
650 DAG->addMutation(createAMDGPUMacroFusionDAGMutation());
651 DAG->addMutation(createAMDGPUExportClusteringDAGMutation());
652 DAG->addMutation(createAMDGPUBarrierLatencyDAGMutation(C->MF));
653 return DAG;
654}
655
656static ScheduleDAGInstrs *
658 ScheduleDAGMILive *DAG =
659 new GCNScheduleDAGMILive(C, std::make_unique<GCNMaxILPSchedStrategy>(C));
661 return DAG;
662}
663
664static ScheduleDAGInstrs *
666 const GCNSubtarget &ST = C->MF->getSubtarget<GCNSubtarget>();
668 C, std::make_unique<GCNMaxMemoryClauseSchedStrategy>(C));
669 DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
670 if (ST.shouldClusterStores())
671 DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
672 DAG->addMutation(createAMDGPUExportClusteringDAGMutation());
673 DAG->addMutation(createAMDGPUBarrierLatencyDAGMutation(C->MF));
674 return DAG;
675}
676
677static ScheduleDAGInstrs *
679 const GCNSubtarget &ST = C->MF->getSubtarget<GCNSubtarget>();
680 auto *DAG = new GCNIterativeScheduler(
682 DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
683 if (ST.shouldClusterStores())
684 DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
686 return DAG;
687}
688
695
696static ScheduleDAGInstrs *
698 const GCNSubtarget &ST = C->MF->getSubtarget<GCNSubtarget>();
700 DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
701 if (ST.shouldClusterStores())
702 DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
703 DAG->addMutation(createAMDGPUMacroFusionDAGMutation());
705 return DAG;
706}
707
709SISchedRegistry("si", "Run SI's custom scheduler",
711
714 "Run GCN scheduler to maximize occupancy",
716
718 GCNMaxILPSchedRegistry("gcn-max-ilp", "Run GCN scheduler to maximize ilp",
720
722 "gcn-max-memory-clause", "Run GCN scheduler to maximize memory clause",
724
726 "gcn-iterative-max-occupancy-experimental",
727 "Run GCN scheduler to maximize occupancy (experimental)",
729
731 "gcn-iterative-minreg",
732 "Run GCN iterative scheduler for minimal register usage (experimental)",
734
736 "gcn-iterative-ilp",
737 "Run GCN iterative scheduler for ILP scheduling (experimental)",
739
742 if (!GPU.empty())
743 return GPU;
744
745 // Need to default to a target with flat support for HSA.
746 if (TT.isAMDGCN())
747 return TT.getOS() == Triple::AMDHSA ? "generic-hsa" : "generic";
748
749 return "r600";
750}
751
753 // The AMDGPU toolchain only supports generating shared objects, so we
754 // must always use PIC.
755 return Reloc::PIC_;
756}
757
759 StringRef CPU, StringRef FS,
760 const TargetOptions &Options,
761 std::optional<Reloc::Model> RM,
762 std::optional<CodeModel::Model> CM,
765 T, TT.computeDataLayout(), TT, getGPUOrDefault(TT, CPU), FS, Options,
767 OptLevel),
769 initAsmInfo();
770 if (TT.isAMDGCN()) {
771 if (getMCSubtargetInfo()->checkFeatures("+wavefrontsize64"))
773 else if (getMCSubtargetInfo()->checkFeatures("+wavefrontsize32"))
775 }
776}
777
780
782
784 Attribute GPUAttr = F.getFnAttribute("target-cpu");
785 return GPUAttr.isValid() ? GPUAttr.getValueAsString() : getTargetCPU();
786}
787
789 Attribute FSAttr = F.getFnAttribute("target-features");
790
791 return FSAttr.isValid() ? FSAttr.getValueAsString()
793}
794
797 const GCNSubtarget &ST = C->MF->getSubtarget<GCNSubtarget>();
799 DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
800 if (ST.shouldClusterStores())
801 DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
802 return DAG;
803}
804
805/// Predicate for Internalize pass.
806static bool mustPreserveGV(const GlobalValue &GV) {
807 if (const Function *F = dyn_cast<Function>(&GV))
808 return F->isDeclaration() || F->getName().starts_with("__asan_") ||
809 F->getName().starts_with("__sanitizer_") ||
810 AMDGPU::isEntryFunctionCC(F->getCallingConv());
811
813 return !GV.use_empty();
814}
815
819
822 if (Params.empty())
824 Params.consume_front("strategy=");
825 auto Result = StringSwitch<std::optional<ScanOptions>>(Params)
826 .Case("dpp", ScanOptions::DPP)
827 .Cases({"iterative", ""}, ScanOptions::Iterative)
828 .Case("none", ScanOptions::None)
829 .Default(std::nullopt);
830 if (Result)
831 return *Result;
832 return make_error<StringError>("invalid parameter", inconvertibleErrorCode());
833}
834
838 while (!Params.empty()) {
839 StringRef ParamName;
840 std::tie(ParamName, Params) = Params.split(';');
841 if (ParamName == "closed-world") {
842 Result.IsClosedWorld = true;
843 } else {
845 formatv("invalid AMDGPUAttributor pass parameter '{0}' ", ParamName)
846 .str(),
848 }
849 }
850 return Result;
851}
852
854
855#define GET_PASS_REGISTRY "AMDGPUPassRegistry.def"
857
858 PB.registerScalarOptimizerLateEPCallback(
859 [](FunctionPassManager &FPM, OptimizationLevel Level) {
860 if (Level == OptimizationLevel::O0)
861 return;
862
864 });
865
866 PB.registerVectorizerEndEPCallback(
867 [](FunctionPassManager &FPM, OptimizationLevel Level) {
868 if (Level == OptimizationLevel::O0)
869 return;
870
872 });
873
874 PB.registerPipelineEarlySimplificationEPCallback(
877 if (!isLTOPreLink(Phase)) {
878 // When we are not using -fgpu-rdc, we can run accelerator code
879 // selection relatively early, but still after linking to prevent
880 // eager removal of potentially reachable symbols.
881 if (EnableHipStdPar) {
884 }
886 }
887
888 if (Level == OptimizationLevel::O0)
889 return;
890
891 // We don't want to run internalization at per-module stage.
895 }
896
899 });
900
901 PB.registerPeepholeEPCallback(
902 [](FunctionPassManager &FPM, OptimizationLevel Level) {
903 if (Level == OptimizationLevel::O0)
904 return;
905
909
912 });
913
914 PB.registerCGSCCOptimizerLateEPCallback(
915 [this](CGSCCPassManager &PM, OptimizationLevel Level) {
916 if (Level == OptimizationLevel::O0)
917 return;
918
920
921 // Add promote kernel arguments pass to the opt pipeline right before
922 // infer address spaces which is needed to do actual address space
923 // rewriting.
924 if (Level.getSpeedupLevel() > OptimizationLevel::O1.getSpeedupLevel() &&
927
928 // Add infer address spaces pass to the opt pipeline after inlining
929 // but before SROA to increase SROA opportunities.
931
932 // This should run after inlining to have any chance of doing
933 // anything, and before other cleanup optimizations.
935
936 if (Level != OptimizationLevel::O0) {
937 // Promote alloca to vector before SROA and loop unroll. If we
938 // manage to eliminate allocas before unroll we may choose to unroll
939 // less.
941 }
942
943 PM.addPass(createCGSCCToFunctionPassAdaptor(std::move(FPM)));
944 });
945
946 // FIXME: Why is AMDGPUAttributor not in CGSCC?
947 PB.registerOptimizerLastEPCallback([this](ModulePassManager &MPM,
948 OptimizationLevel Level,
950 if (Level != OptimizationLevel::O0) {
951 if (!isLTOPreLink(Phase)) {
952 if (EnableAMDGPUAttributor && getTargetTriple().isAMDGCN()) {
954 MPM.addPass(AMDGPUAttributorPass(*this, Opts, Phase));
955 }
956 }
957 }
958 });
959
960 PB.registerFullLinkTimeOptimizationLastEPCallback(
961 [this](ModulePassManager &PM, OptimizationLevel Level) {
962 // When we are using -fgpu-rdc, we can only run accelerator code
963 // selection after linking to prevent, otherwise we end up removing
964 // potentially reachable symbols that were exported as external in other
965 // modules.
966 if (EnableHipStdPar) {
969 }
970 // We want to support the -lto-partitions=N option as "best effort".
971 // For that, we need to lower LDS earlier in the pipeline before the
972 // module is partitioned for codegen.
976 PM.addPass(AMDGPUSwLowerLDSPass(*this));
979 if (Level != OptimizationLevel::O0) {
980 // We only want to run this with O2 or higher since inliner and SROA
981 // don't run in O1.
982 if (Level != OptimizationLevel::O1) {
983 PM.addPass(
985 }
986 // Do we really need internalization in LTO?
987 if (InternalizeSymbols) {
990 }
991 if (EnableAMDGPUAttributor && getTargetTriple().isAMDGCN()) {
994 Opt.IsClosedWorld = true;
997 }
998 }
999 if (!NoKernelInfoEndLTO) {
1001 FPM.addPass(KernelInfoPrinter(this));
1002 PM.addPass(createModuleToFunctionPassAdaptor(std::move(FPM)));
1003 }
1004 });
1005
1006 PB.registerRegClassFilterParsingCallback(
1007 [](StringRef FilterName) -> RegAllocFilterFunc {
1008 if (FilterName == "sgpr")
1009 return onlyAllocateSGPRs;
1010 if (FilterName == "vgpr")
1011 return onlyAllocateVGPRs;
1012 if (FilterName == "wwm")
1013 return onlyAllocateWWMRegs;
1014 return nullptr;
1015 });
1016}
1017
1018int64_t AMDGPUTargetMachine::getNullPointerValue(unsigned AddrSpace) {
1019 return (AddrSpace == AMDGPUAS::LOCAL_ADDRESS ||
1020 AddrSpace == AMDGPUAS::PRIVATE_ADDRESS ||
1021 AddrSpace == AMDGPUAS::REGION_ADDRESS)
1022 ? -1
1023 : 0;
1024}
1025
1027 unsigned DestAS) const {
1028 return AMDGPU::isFlatGlobalAddrSpace(SrcAS) &&
1030}
1031
1033 if (auto *Arg = dyn_cast<Argument>(V);
1034 Arg &&
1035 AMDGPU::isModuleEntryFunctionCC(Arg->getParent()->getCallingConv()) &&
1036 !Arg->hasByRefAttr())
1038
1039 const auto *LD = dyn_cast<LoadInst>(V);
1040 if (!LD) // TODO: Handle invariant load like constant.
1042
1043 // It must be a generic pointer loaded.
1044 assert(V->getType()->getPointerAddressSpace() == AMDGPUAS::FLAT_ADDRESS);
1045
1046 const auto *Ptr = LD->getPointerOperand();
1047 if (Ptr->getType()->getPointerAddressSpace() != AMDGPUAS::CONSTANT_ADDRESS)
1049 // For a generic pointer loaded from the constant memory, it could be assumed
1050 // as a global pointer since the constant memory is only populated on the
1051 // host side. As implied by the offload programming model, only global
1052 // pointers could be referenced on the host side.
1054}
1055
1056std::pair<const Value *, unsigned>
1058 if (auto *II = dyn_cast<IntrinsicInst>(V)) {
1059 switch (II->getIntrinsicID()) {
1060 case Intrinsic::amdgcn_is_shared:
1061 return std::pair(II->getArgOperand(0), AMDGPUAS::LOCAL_ADDRESS);
1062 case Intrinsic::amdgcn_is_private:
1063 return std::pair(II->getArgOperand(0), AMDGPUAS::PRIVATE_ADDRESS);
1064 default:
1065 break;
1066 }
1067 return std::pair(nullptr, -1);
1068 }
1069 // Check the global pointer predication based on
1070 // (!is_share(p) && !is_private(p)). Note that logic 'and' is commutative and
1071 // the order of 'is_shared' and 'is_private' is not significant.
1072 Value *Ptr;
1073 if (match(
1074 const_cast<Value *>(V),
1077 m_Deferred(Ptr))))))
1078 return std::pair(Ptr, AMDGPUAS::GLOBAL_ADDRESS);
1079
1080 return std::pair(nullptr, -1);
1081}
1082
1083unsigned
1098
1100 Module &M, unsigned NumParts,
1101 function_ref<void(std::unique_ptr<Module> MPart)> ModuleCallback) {
1102 // FIXME(?): Would be better to use an already existing Analysis/PassManager,
1103 // but all current users of this API don't have one ready and would need to
1104 // create one anyway. Let's hide the boilerplate for now to keep it simple.
1105
1110
1111 PassBuilder PB(this);
1112 PB.registerModuleAnalyses(MAM);
1113 PB.registerFunctionAnalyses(FAM);
1114 PB.crossRegisterProxies(LAM, FAM, CGAM, MAM);
1115
1117 MPM.addPass(AMDGPUSplitModulePass(NumParts, ModuleCallback));
1118 MPM.run(M, MAM);
1119 return true;
1120}
1121
1122//===----------------------------------------------------------------------===//
1123// GCN Target Machine (SI+)
1124//===----------------------------------------------------------------------===//
1125
1127 StringRef CPU, StringRef FS,
1128 const TargetOptions &Options,
1129 std::optional<Reloc::Model> RM,
1130 std::optional<CodeModel::Model> CM,
1131 CodeGenOptLevel OL, bool JIT)
1132 : AMDGPUTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL) {}
1133
1134const TargetSubtargetInfo *
1136 StringRef GPU = getGPUName(F);
1138
1139 SmallString<128> SubtargetKey(GPU);
1140 SubtargetKey.append(FS);
1141
1142 auto &I = SubtargetMap[SubtargetKey];
1143 if (!I) {
1144 // This needs to be done before we create a new subtarget since any
1145 // creation will depend on the TM and the code generation flags on the
1146 // function that reside in TargetOptions.
1148 I = std::make_unique<GCNSubtarget>(TargetTriple, GPU, FS, *this);
1149 }
1150
1151 I->setScalarizeGlobalBehavior(ScalarizeGlobal);
1152
1153 return I.get();
1154}
1155
1158 return TargetTransformInfo(std::make_unique<GCNTTIImpl>(this, F));
1159}
1160
1163 CodeGenFileType FileType, const CGPassBuilderOption &Opts,
1165 AMDGPUCodeGenPassBuilder CGPB(*this, Opts, PIC);
1166 return CGPB.buildPipeline(MPM, Out, DwoOut, FileType);
1167}
1168
1171 const GCNSubtarget &ST = C->MF->getSubtarget<GCNSubtarget>();
1172 if (ST.enableSIScheduler())
1174
1175 Attribute SchedStrategyAttr =
1176 C->MF->getFunction().getFnAttribute("amdgpu-sched-strategy");
1177 StringRef SchedStrategy = SchedStrategyAttr.isValid()
1178 ? SchedStrategyAttr.getValueAsString()
1180
1181 if (SchedStrategy == "max-ilp")
1183
1184 if (SchedStrategy == "max-memory-clause")
1186
1187 if (SchedStrategy == "iterative-ilp")
1189
1190 if (SchedStrategy == "iterative-minreg")
1191 return createMinRegScheduler(C);
1192
1193 if (SchedStrategy == "iterative-maxocc")
1195
1197}
1198
1201 ScheduleDAGMI *DAG =
1202 new GCNPostScheduleDAGMILive(C, std::make_unique<PostGenericScheduler>(C),
1203 /*RemoveKillFlags=*/true);
1204 const GCNSubtarget &ST = C->MF->getSubtarget<GCNSubtarget>();
1206 if (ST.shouldClusterStores())
1209 if ((EnableVOPD.getNumOccurrences() ||
1211 EnableVOPD)
1215 return DAG;
1216}
1217//===----------------------------------------------------------------------===//
1218// AMDGPU Legacy Pass Setup
1219//===----------------------------------------------------------------------===//
1220
1221std::unique_ptr<CSEConfigBase> llvm::AMDGPUPassConfig::getCSEConfig() const {
1222 return getStandardCSEConfigForOpt(TM->getOptLevel());
1223}
1224
1225namespace {
1226
1227class GCNPassConfig final : public AMDGPUPassConfig {
1228public:
1229 GCNPassConfig(TargetMachine &TM, PassManagerBase &PM)
1230 : AMDGPUPassConfig(TM, PM) {
1231 substitutePass(&PostRASchedulerID, &PostMachineSchedulerID);
1232 }
1233
1234 GCNTargetMachine &getGCNTargetMachine() const {
1235 return getTM<GCNTargetMachine>();
1236 }
1237
1238 bool addPreISel() override;
1239 void addMachineSSAOptimization() override;
1240 bool addILPOpts() override;
1241 bool addInstSelector() override;
1242 bool addIRTranslator() override;
1243 void addPreLegalizeMachineIR() override;
1244 bool addLegalizeMachineIR() override;
1245 void addPreRegBankSelect() override;
1246 bool addRegBankSelect() override;
1247 void addPreGlobalInstructionSelect() override;
1248 bool addGlobalInstructionSelect() override;
1249 void addPreRegAlloc() override;
1250 void addFastRegAlloc() override;
1251 void addOptimizedRegAlloc() override;
1252
1253 FunctionPass *createSGPRAllocPass(bool Optimized);
1254 FunctionPass *createVGPRAllocPass(bool Optimized);
1255 FunctionPass *createWWMRegAllocPass(bool Optimized);
1256 FunctionPass *createRegAllocPass(bool Optimized) override;
1257
1258 bool addRegAssignAndRewriteFast() override;
1259 bool addRegAssignAndRewriteOptimized() override;
1260
1261 bool addPreRewrite() override;
1262 void addPostRegAlloc() override;
1263 void addPreSched2() override;
1264 void addPreEmitPass() override;
1265 void addPostBBSections() override;
1266};
1267
1268} // end anonymous namespace
1269
1271 : TargetPassConfig(TM, PM) {
1272 // Exceptions and StackMaps are not supported, so these passes will never do
1273 // anything.
1276 // Garbage collection is not supported.
1279}
1280
1287
1292 // ReassociateGEPs exposes more opportunities for SLSR. See
1293 // the example in reassociate-geps-and-slsr.ll.
1295 // SeparateConstOffsetFromGEP and SLSR creates common expressions which GVN or
1296 // EarlyCSE can reuse.
1298 // Run NaryReassociate after EarlyCSE/GVN to be more effective.
1300 // NaryReassociate on GEPs creates redundant common expressions, so run
1301 // EarlyCSE after it.
1303}
1304
1307
1308 if (RemoveIncompatibleFunctions && TM.getTargetTriple().isAMDGCN())
1310
1311 // There is no reason to run these.
1315
1317 if (LowerCtorDtor)
1319
1320 if (TM.getTargetTriple().isAMDGCN() &&
1323
1326
1327 // This can be disabled by passing ::Disable here or on the command line
1328 // with --expand-variadics-override=disable.
1330
1331 // Function calls are not supported, so make sure we inline everything.
1334
1335 // Handle uses of OpenCL image2d_t, image3d_t and sampler_t arguments.
1336 if (TM.getTargetTriple().getArch() == Triple::r600)
1338
1339 // Make enqueued block runtime handles externally visible.
1341
1342 // Lower special LDS accesses.
1345
1346 // Lower LDS accesses to global memory pass if address sanitizer is enabled.
1347 if (EnableSwLowerLDS)
1349
1350 // Runs before PromoteAlloca so the latter can account for function uses
1353 }
1354
1355 // Run atomic optimizer before Atomic Expand
1356 if ((TM.getTargetTriple().isAMDGCN()) &&
1357 (TM.getOptLevel() >= CodeGenOptLevel::Less) &&
1360 }
1361
1363
1364 if (TM.getOptLevel() > CodeGenOptLevel::None) {
1366
1369
1373 AAResults &AAR) {
1374 if (auto *WrapperPass = P.getAnalysisIfAvailable<AMDGPUAAWrapperPass>())
1375 AAR.addAAResult(WrapperPass->getResult());
1376 }));
1377 }
1378
1379 if (TM.getTargetTriple().isAMDGCN()) {
1380 // TODO: May want to move later or split into an early and late one.
1382 }
1383
1384 // Try to hoist loop invariant parts of divisions AMDGPUCodeGenPrepare may
1385 // have expanded.
1386 if (TM.getOptLevel() > CodeGenOptLevel::Less)
1388 }
1389
1391
1392 // EarlyCSE is not always strong enough to clean up what LSR produces. For
1393 // example, GVN can combine
1394 //
1395 // %0 = add %a, %b
1396 // %1 = add %b, %a
1397 //
1398 // and
1399 //
1400 // %0 = shl nsw %a, 2
1401 // %1 = shl %a, 2
1402 //
1403 // but EarlyCSE can do neither of them.
1406}
1407
1409 if (TM->getTargetTriple().isAMDGCN() &&
1410 TM->getOptLevel() > CodeGenOptLevel::None)
1412
1413 if (TM->getTargetTriple().isAMDGCN() && EnableLowerKernelArguments)
1415
1417
1420
1421 if (TM->getTargetTriple().isAMDGCN()) {
1422 // This lowering has been placed after codegenprepare to take advantage of
1423 // address mode matching (which is why it isn't put with the LDS lowerings).
1424 // It could be placed anywhere before uniformity annotations (an analysis
1425 // that it changes by splitting up fat pointers into their components)
1426 // but has been put before switch lowering and CFG flattening so that those
1427 // passes can run on the more optimized control flow this pass creates in
1428 // many cases.
1431 }
1432
1433 // LowerSwitch pass may introduce unreachable blocks that can
1434 // cause unexpected behavior for subsequent passes. Placing it
1435 // here seems better that these blocks would get cleaned up by
1436 // UnreachableBlockElim inserted next in the pass flow.
1438}
1439
1441 if (TM->getOptLevel() > CodeGenOptLevel::None)
1443 return false;
1444}
1445
1450
1452 // Do nothing. GC is not supported.
1453 return false;
1454}
1455
1456//===----------------------------------------------------------------------===//
1457// GCN Legacy Pass Setup
1458//===----------------------------------------------------------------------===//
1459
1460bool GCNPassConfig::addPreISel() {
1462
1463 if (TM->getOptLevel() > CodeGenOptLevel::None)
1464 addPass(createSinkingPass());
1465
1466 if (TM->getOptLevel() > CodeGenOptLevel::None)
1468
1469 // Merge divergent exit nodes. StructurizeCFG won't recognize the multi-exit
1470 // regions formed by them.
1472 addPass(createFixIrreduciblePass());
1473 addPass(createUnifyLoopExitsPass());
1474 addPass(createStructurizeCFGPass(false)); // true -> SkipUniformRegions
1475
1478 // TODO: Move this right after structurizeCFG to avoid extra divergence
1479 // analysis. This depends on stopping SIAnnotateControlFlow from making
1480 // control flow modifications.
1482
1483 // SDAG requires LCSSA, GlobalISel does not. Disable LCSSA for -global-isel
1484 // with -new-reg-bank-select and without any of the fallback options.
1486 !isGlobalISelAbortEnabled() || !NewRegBankSelect)
1487 addPass(createLCSSAPass());
1488
1489 if (TM->getOptLevel() > CodeGenOptLevel::Less)
1491
1492 return false;
1493}
1494
1495void GCNPassConfig::addMachineSSAOptimization() {
1497
1498 // We want to fold operands after PeepholeOptimizer has run (or as part of
1499 // it), because it will eliminate extra copies making it easier to fold the
1500 // real source operand. We want to eliminate dead instructions after, so that
1501 // we see fewer uses of the copies. We then need to clean up the dead
1502 // instructions leftover after the operands are folded as well.
1503 //
1504 // XXX - Can we get away without running DeadMachineInstructionElim again?
1505 addPass(&SIFoldOperandsLegacyID);
1506 if (EnableDPPCombine)
1507 addPass(&GCNDPPCombineLegacyID);
1509 if (isPassEnabled(EnableSDWAPeephole)) {
1510 addPass(&SIPeepholeSDWALegacyID);
1511 addPass(&EarlyMachineLICMID);
1512 addPass(&MachineCSELegacyID);
1513 addPass(&SIFoldOperandsLegacyID);
1514 }
1517}
1518
1519bool GCNPassConfig::addILPOpts() {
1521 addPass(&EarlyIfConverterLegacyID);
1522
1524 return false;
1525}
1526
1527bool GCNPassConfig::addInstSelector() {
1529 addPass(&SIFixSGPRCopiesLegacyID);
1531 return false;
1532}
1533
1534bool GCNPassConfig::addIRTranslator() {
1535 addPass(new IRTranslator(getOptLevel()));
1536 return false;
1537}
1538
1539void GCNPassConfig::addPreLegalizeMachineIR() {
1540 bool IsOptNone = getOptLevel() == CodeGenOptLevel::None;
1541 addPass(createAMDGPUPreLegalizeCombiner(IsOptNone));
1542 addPass(new Localizer());
1543}
1544
1545bool GCNPassConfig::addLegalizeMachineIR() {
1546 addPass(new Legalizer());
1547 return false;
1548}
1549
1550void GCNPassConfig::addPreRegBankSelect() {
1551 bool IsOptNone = getOptLevel() == CodeGenOptLevel::None;
1552 addPass(createAMDGPUPostLegalizeCombiner(IsOptNone));
1554}
1555
1556bool GCNPassConfig::addRegBankSelect() {
1557 if (NewRegBankSelect) {
1560 } else {
1561 addPass(new RegBankSelect());
1562 }
1563 return false;
1564}
1565
1566void GCNPassConfig::addPreGlobalInstructionSelect() {
1567 bool IsOptNone = getOptLevel() == CodeGenOptLevel::None;
1568 addPass(createAMDGPURegBankCombiner(IsOptNone));
1569}
1570
1571bool GCNPassConfig::addGlobalInstructionSelect() {
1572 addPass(new InstructionSelect(getOptLevel()));
1573 return false;
1574}
1575
1576void GCNPassConfig::addFastRegAlloc() {
1577 // FIXME: We have to disable the verifier here because of PHIElimination +
1578 // TwoAddressInstructions disabling it.
1579
1580 // This must be run immediately after phi elimination and before
1581 // TwoAddressInstructions, otherwise the processing of the tied operand of
1582 // SI_ELSE will introduce a copy of the tied operand source after the else.
1584
1586
1588}
1589
1590void GCNPassConfig::addPreRegAlloc() {
1591 if (getOptLevel() != CodeGenOptLevel::None)
1593}
1594
1595void GCNPassConfig::addOptimizedRegAlloc() {
1596 if (EnableDCEInRA)
1598
1599 // FIXME: when an instruction has a Killed operand, and the instruction is
1600 // inside a bundle, seems only the BUNDLE instruction appears as the Kills of
1601 // the register in LiveVariables, this would trigger a failure in verifier,
1602 // we should fix it and enable the verifier.
1603 if (OptVGPRLiveRange)
1605
1606 // This must be run immediately after phi elimination and before
1607 // TwoAddressInstructions, otherwise the processing of the tied operand of
1608 // SI_ELSE will introduce a copy of the tied operand source after the else.
1610
1613
1614 if (isPassEnabled(EnablePreRAOptimizations))
1616
1617 // Allow the scheduler to run before SIWholeQuadMode inserts exec manipulation
1618 // instructions that cause scheduling barriers.
1620
1621 if (OptExecMaskPreRA)
1623
1624 // This is not an essential optimization and it has a noticeable impact on
1625 // compilation time, so we only enable it from O2.
1626 if (TM->getOptLevel() > CodeGenOptLevel::Less)
1628
1630}
1631
1632bool GCNPassConfig::addPreRewrite() {
1634 addPass(&GCNNSAReassignID);
1635
1637 return true;
1638}
1639
1640FunctionPass *GCNPassConfig::createSGPRAllocPass(bool Optimized) {
1641 // Initialize the global default.
1642 llvm::call_once(InitializeDefaultSGPRRegisterAllocatorFlag,
1643 initializeDefaultSGPRRegisterAllocatorOnce);
1644
1645 RegisterRegAlloc::FunctionPassCtor Ctor = SGPRRegisterRegAlloc::getDefault();
1646 if (Ctor != useDefaultRegisterAllocator)
1647 return Ctor();
1648
1649 if (Optimized)
1650 return createGreedyRegisterAllocator(onlyAllocateSGPRs);
1651
1652 return createFastRegisterAllocator(onlyAllocateSGPRs, false);
1653}
1654
1655FunctionPass *GCNPassConfig::createVGPRAllocPass(bool Optimized) {
1656 // Initialize the global default.
1657 llvm::call_once(InitializeDefaultVGPRRegisterAllocatorFlag,
1658 initializeDefaultVGPRRegisterAllocatorOnce);
1659
1660 RegisterRegAlloc::FunctionPassCtor Ctor = VGPRRegisterRegAlloc::getDefault();
1661 if (Ctor != useDefaultRegisterAllocator)
1662 return Ctor();
1663
1664 if (Optimized)
1665 return createGreedyVGPRRegisterAllocator();
1666
1667 return createFastVGPRRegisterAllocator();
1668}
1669
1670FunctionPass *GCNPassConfig::createWWMRegAllocPass(bool Optimized) {
1671 // Initialize the global default.
1672 llvm::call_once(InitializeDefaultWWMRegisterAllocatorFlag,
1673 initializeDefaultWWMRegisterAllocatorOnce);
1674
1675 RegisterRegAlloc::FunctionPassCtor Ctor = WWMRegisterRegAlloc::getDefault();
1676 if (Ctor != useDefaultRegisterAllocator)
1677 return Ctor();
1678
1679 if (Optimized)
1680 return createGreedyWWMRegisterAllocator();
1681
1682 return createFastWWMRegisterAllocator();
1683}
1684
1685FunctionPass *GCNPassConfig::createRegAllocPass(bool Optimized) {
1686 llvm_unreachable("should not be used");
1687}
1688
1690 "-regalloc not supported with amdgcn. Use -sgpr-regalloc, -wwm-regalloc, "
1691 "and -vgpr-regalloc";
1692
1693bool GCNPassConfig::addRegAssignAndRewriteFast() {
1694 if (!usingDefaultRegAlloc())
1696
1697 addPass(&GCNPreRALongBranchRegID);
1698
1699 addPass(createSGPRAllocPass(false));
1700
1701 // Equivalent of PEI for SGPRs.
1702 addPass(&SILowerSGPRSpillsLegacyID);
1703
1704 // To Allocate wwm registers used in whole quad mode operations (for shaders).
1706
1707 // For allocating other wwm register operands.
1708 addPass(createWWMRegAllocPass(false));
1709
1710 addPass(&SILowerWWMCopiesLegacyID);
1712
1713 // For allocating per-thread VGPRs.
1714 addPass(createVGPRAllocPass(false));
1715
1716 return true;
1717}
1718
1719bool GCNPassConfig::addRegAssignAndRewriteOptimized() {
1720 if (!usingDefaultRegAlloc())
1722
1723 addPass(&GCNPreRALongBranchRegID);
1724
1725 addPass(createSGPRAllocPass(true));
1726
1727 // Commit allocated register changes. This is mostly necessary because too
1728 // many things rely on the use lists of the physical registers, such as the
1729 // verifier. This is only necessary with allocators which use LiveIntervals,
1730 // since FastRegAlloc does the replacements itself.
1731 addPass(createVirtRegRewriter(false));
1732
1733 // At this point, the sgpr-regalloc has been done and it is good to have the
1734 // stack slot coloring to try to optimize the SGPR spill stack indices before
1735 // attempting the custom SGPR spill lowering.
1736 addPass(&StackSlotColoringID);
1737
1738 // Equivalent of PEI for SGPRs.
1739 addPass(&SILowerSGPRSpillsLegacyID);
1740
1741 // To Allocate wwm registers used in whole quad mode operations (for shaders).
1743
1744 // For allocating other whole wave mode registers.
1745 addPass(createWWMRegAllocPass(true));
1746 addPass(&SILowerWWMCopiesLegacyID);
1747 addPass(createVirtRegRewriter(false));
1749
1750 // For allocating per-thread VGPRs.
1751 addPass(createVGPRAllocPass(true));
1752
1753 addPreRewrite();
1754 addPass(&VirtRegRewriterID);
1755
1757
1758 return true;
1759}
1760
1761void GCNPassConfig::addPostRegAlloc() {
1762 addPass(&SIFixVGPRCopiesID);
1763 if (getOptLevel() > CodeGenOptLevel::None)
1766}
1767
1768void GCNPassConfig::addPreSched2() {
1769 if (TM->getOptLevel() > CodeGenOptLevel::None)
1771 addPass(&SIPostRABundlerLegacyID);
1772}
1773
1774void GCNPassConfig::addPreEmitPass() {
1775 if (isPassEnabled(EnableVOPD, CodeGenOptLevel::Less))
1776 addPass(&GCNCreateVOPDID);
1777 addPass(createSIMemoryLegalizerPass());
1778 addPass(createSIInsertWaitcntsPass());
1779
1780 addPass(createSIModeRegisterPass());
1781
1782 if (getOptLevel() > CodeGenOptLevel::None)
1783 addPass(&SIInsertHardClausesID);
1784
1786 if (isPassEnabled(EnableSetWavePriority, CodeGenOptLevel::Less))
1788 if (getOptLevel() > CodeGenOptLevel::None)
1789 addPass(&SIPreEmitPeepholeID);
1790 // The hazard recognizer that runs as part of the post-ra scheduler does not
1791 // guarantee to be able handle all hazards correctly. This is because if there
1792 // are multiple scheduling regions in a basic block, the regions are scheduled
1793 // bottom up, so when we begin to schedule a region we don't know what
1794 // instructions were emitted directly before it.
1795 //
1796 // Here we add a stand-alone hazard recognizer pass which can handle all
1797 // cases.
1798 addPass(&PostRAHazardRecognizerID);
1799
1801
1803
1804 if (isPassEnabled(EnableInsertDelayAlu, CodeGenOptLevel::Less))
1805 addPass(&AMDGPUInsertDelayAluID);
1806
1807 addPass(&BranchRelaxationPassID);
1808}
1809
1810void GCNPassConfig::addPostBBSections() {
1811 // We run this later to avoid passes like livedebugvalues and BBSections
1812 // having to deal with the apparent multi-entry functions we may generate.
1814}
1815
1817 return new GCNPassConfig(*this, PM);
1818}
1819
1825
1832
1836
1843
1846 SMDiagnostic &Error, SMRange &SourceRange) const {
1847 const yaml::SIMachineFunctionInfo &YamlMFI =
1848 static_cast<const yaml::SIMachineFunctionInfo &>(MFI_);
1849 MachineFunction &MF = PFS.MF;
1851 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
1852
1853 if (MFI->initializeBaseYamlFields(YamlMFI, MF, PFS, Error, SourceRange))
1854 return true;
1855
1856 if (MFI->Occupancy == 0) {
1857 // Fixup the subtarget dependent default value.
1858 MFI->Occupancy = ST.getOccupancyWithWorkGroupSizes(MF).second;
1859 }
1860
1861 auto parseRegister = [&](const yaml::StringValue &RegName, Register &RegVal) {
1862 Register TempReg;
1863 if (parseNamedRegisterReference(PFS, TempReg, RegName.Value, Error)) {
1864 SourceRange = RegName.SourceRange;
1865 return true;
1866 }
1867 RegVal = TempReg;
1868
1869 return false;
1870 };
1871
1872 auto parseOptionalRegister = [&](const yaml::StringValue &RegName,
1873 Register &RegVal) {
1874 return !RegName.Value.empty() && parseRegister(RegName, RegVal);
1875 };
1876
1877 if (parseOptionalRegister(YamlMFI.VGPRForAGPRCopy, MFI->VGPRForAGPRCopy))
1878 return true;
1879
1880 if (parseOptionalRegister(YamlMFI.SGPRForEXECCopy, MFI->SGPRForEXECCopy))
1881 return true;
1882
1883 if (parseOptionalRegister(YamlMFI.LongBranchReservedReg,
1884 MFI->LongBranchReservedReg))
1885 return true;
1886
1887 auto diagnoseRegisterClass = [&](const yaml::StringValue &RegName) {
1888 // Create a diagnostic for a the register string literal.
1889 const MemoryBuffer &Buffer =
1890 *PFS.SM->getMemoryBuffer(PFS.SM->getMainFileID());
1891 Error = SMDiagnostic(*PFS.SM, SMLoc(), Buffer.getBufferIdentifier(), 1,
1892 RegName.Value.size(), SourceMgr::DK_Error,
1893 "incorrect register class for field", RegName.Value,
1894 {}, {});
1895 SourceRange = RegName.SourceRange;
1896 return true;
1897 };
1898
1899 if (parseRegister(YamlMFI.ScratchRSrcReg, MFI->ScratchRSrcReg) ||
1900 parseRegister(YamlMFI.FrameOffsetReg, MFI->FrameOffsetReg) ||
1901 parseRegister(YamlMFI.StackPtrOffsetReg, MFI->StackPtrOffsetReg))
1902 return true;
1903
1904 if (MFI->ScratchRSrcReg != AMDGPU::PRIVATE_RSRC_REG &&
1905 !AMDGPU::SGPR_128RegClass.contains(MFI->ScratchRSrcReg)) {
1906 return diagnoseRegisterClass(YamlMFI.ScratchRSrcReg);
1907 }
1908
1909 if (MFI->FrameOffsetReg != AMDGPU::FP_REG &&
1910 !AMDGPU::SGPR_32RegClass.contains(MFI->FrameOffsetReg)) {
1911 return diagnoseRegisterClass(YamlMFI.FrameOffsetReg);
1912 }
1913
1914 if (MFI->StackPtrOffsetReg != AMDGPU::SP_REG &&
1915 !AMDGPU::SGPR_32RegClass.contains(MFI->StackPtrOffsetReg)) {
1916 return diagnoseRegisterClass(YamlMFI.StackPtrOffsetReg);
1917 }
1918
1919 for (const auto &YamlReg : YamlMFI.WWMReservedRegs) {
1920 Register ParsedReg;
1921 if (parseRegister(YamlReg, ParsedReg))
1922 return true;
1923
1924 MFI->reserveWWMRegister(ParsedReg);
1925 }
1926
1927 for (const auto &[_, Info] : PFS.VRegInfosNamed) {
1928 MFI->setFlag(Info->VReg, Info->Flags);
1929 }
1930 for (const auto &[_, Info] : PFS.VRegInfos) {
1931 MFI->setFlag(Info->VReg, Info->Flags);
1932 }
1933
1934 for (const auto &YamlRegStr : YamlMFI.SpillPhysVGPRS) {
1935 Register ParsedReg;
1936 if (parseRegister(YamlRegStr, ParsedReg))
1937 return true;
1938 MFI->SpillPhysVGPRs.push_back(ParsedReg);
1939 }
1940
1941 auto parseAndCheckArgument = [&](const std::optional<yaml::SIArgument> &A,
1942 const TargetRegisterClass &RC,
1943 ArgDescriptor &Arg, unsigned UserSGPRs,
1944 unsigned SystemSGPRs) {
1945 // Skip parsing if it's not present.
1946 if (!A)
1947 return false;
1948
1949 if (A->IsRegister) {
1950 Register Reg;
1951 if (parseNamedRegisterReference(PFS, Reg, A->RegisterName.Value, Error)) {
1952 SourceRange = A->RegisterName.SourceRange;
1953 return true;
1954 }
1955 if (!RC.contains(Reg))
1956 return diagnoseRegisterClass(A->RegisterName);
1958 } else
1959 Arg = ArgDescriptor::createStack(A->StackOffset);
1960 // Check and apply the optional mask.
1961 if (A->Mask)
1962 Arg = ArgDescriptor::createArg(Arg, *A->Mask);
1963
1964 MFI->NumUserSGPRs += UserSGPRs;
1965 MFI->NumSystemSGPRs += SystemSGPRs;
1966 return false;
1967 };
1968
1969 if (YamlMFI.ArgInfo &&
1970 (parseAndCheckArgument(YamlMFI.ArgInfo->PrivateSegmentBuffer,
1971 AMDGPU::SGPR_128RegClass,
1972 MFI->ArgInfo.PrivateSegmentBuffer, 4, 0) ||
1973 parseAndCheckArgument(YamlMFI.ArgInfo->DispatchPtr,
1974 AMDGPU::SReg_64RegClass, MFI->ArgInfo.DispatchPtr,
1975 2, 0) ||
1976 parseAndCheckArgument(YamlMFI.ArgInfo->QueuePtr, AMDGPU::SReg_64RegClass,
1977 MFI->ArgInfo.QueuePtr, 2, 0) ||
1978 parseAndCheckArgument(YamlMFI.ArgInfo->KernargSegmentPtr,
1979 AMDGPU::SReg_64RegClass,
1980 MFI->ArgInfo.KernargSegmentPtr, 2, 0) ||
1981 parseAndCheckArgument(YamlMFI.ArgInfo->DispatchID,
1982 AMDGPU::SReg_64RegClass, MFI->ArgInfo.DispatchID,
1983 2, 0) ||
1984 parseAndCheckArgument(YamlMFI.ArgInfo->FlatScratchInit,
1985 AMDGPU::SReg_64RegClass,
1986 MFI->ArgInfo.FlatScratchInit, 2, 0) ||
1987 parseAndCheckArgument(YamlMFI.ArgInfo->PrivateSegmentSize,
1988 AMDGPU::SGPR_32RegClass,
1989 MFI->ArgInfo.PrivateSegmentSize, 0, 0) ||
1990 parseAndCheckArgument(YamlMFI.ArgInfo->LDSKernelId,
1991 AMDGPU::SGPR_32RegClass,
1992 MFI->ArgInfo.LDSKernelId, 0, 1) ||
1993 parseAndCheckArgument(YamlMFI.ArgInfo->WorkGroupIDX,
1994 AMDGPU::SGPR_32RegClass, MFI->ArgInfo.WorkGroupIDX,
1995 0, 1) ||
1996 parseAndCheckArgument(YamlMFI.ArgInfo->WorkGroupIDY,
1997 AMDGPU::SGPR_32RegClass, MFI->ArgInfo.WorkGroupIDY,
1998 0, 1) ||
1999 parseAndCheckArgument(YamlMFI.ArgInfo->WorkGroupIDZ,
2000 AMDGPU::SGPR_32RegClass, MFI->ArgInfo.WorkGroupIDZ,
2001 0, 1) ||
2002 parseAndCheckArgument(YamlMFI.ArgInfo->WorkGroupInfo,
2003 AMDGPU::SGPR_32RegClass,
2004 MFI->ArgInfo.WorkGroupInfo, 0, 1) ||
2005 parseAndCheckArgument(YamlMFI.ArgInfo->PrivateSegmentWaveByteOffset,
2006 AMDGPU::SGPR_32RegClass,
2007 MFI->ArgInfo.PrivateSegmentWaveByteOffset, 0, 1) ||
2008 parseAndCheckArgument(YamlMFI.ArgInfo->ImplicitArgPtr,
2009 AMDGPU::SReg_64RegClass,
2010 MFI->ArgInfo.ImplicitArgPtr, 0, 0) ||
2011 parseAndCheckArgument(YamlMFI.ArgInfo->ImplicitBufferPtr,
2012 AMDGPU::SReg_64RegClass,
2013 MFI->ArgInfo.ImplicitBufferPtr, 2, 0) ||
2014 parseAndCheckArgument(YamlMFI.ArgInfo->WorkItemIDX,
2015 AMDGPU::VGPR_32RegClass,
2016 MFI->ArgInfo.WorkItemIDX, 0, 0) ||
2017 parseAndCheckArgument(YamlMFI.ArgInfo->WorkItemIDY,
2018 AMDGPU::VGPR_32RegClass,
2019 MFI->ArgInfo.WorkItemIDY, 0, 0) ||
2020 parseAndCheckArgument(YamlMFI.ArgInfo->WorkItemIDZ,
2021 AMDGPU::VGPR_32RegClass,
2022 MFI->ArgInfo.WorkItemIDZ, 0, 0)))
2023 return true;
2024
2025 // Parse FirstKernArgPreloadReg separately, since it's a Register,
2026 // not ArgDescriptor.
2027 if (YamlMFI.ArgInfo && YamlMFI.ArgInfo->FirstKernArgPreloadReg) {
2028 const yaml::SIArgument &A = *YamlMFI.ArgInfo->FirstKernArgPreloadReg;
2029
2030 if (!A.IsRegister) {
2031 // For stack arguments, we don't have RegisterName.SourceRange,
2032 // but we should have some location info from the YAML parser
2033 const MemoryBuffer &Buffer =
2034 *PFS.SM->getMemoryBuffer(PFS.SM->getMainFileID());
2035 // Create a minimal valid source range
2037 SMRange Range(Loc, Loc);
2038
2040 *PFS.SM, Loc, Buffer.getBufferIdentifier(), 1, 0, SourceMgr::DK_Error,
2041 "firstKernArgPreloadReg must be a register, not a stack location", "",
2042 {}, {});
2043
2044 SourceRange = Range;
2045 return true;
2046 }
2047
2048 Register Reg;
2049 if (parseNamedRegisterReference(PFS, Reg, A.RegisterName.Value, Error)) {
2050 SourceRange = A.RegisterName.SourceRange;
2051 return true;
2052 }
2053
2054 if (!AMDGPU::SGPR_32RegClass.contains(Reg))
2055 return diagnoseRegisterClass(A.RegisterName);
2056
2057 MFI->ArgInfo.FirstKernArgPreloadReg = Reg;
2058 MFI->NumUserSGPRs += YamlMFI.NumKernargPreloadSGPRs;
2059 }
2060
2061 if (ST.hasIEEEMode())
2062 MFI->Mode.IEEE = YamlMFI.Mode.IEEE;
2063 if (ST.hasDX10ClampMode())
2064 MFI->Mode.DX10Clamp = YamlMFI.Mode.DX10Clamp;
2065
2066 // FIXME: Move proper support for denormal-fp-math into base MachineFunction
2067 MFI->Mode.FP32Denormals.Input = YamlMFI.Mode.FP32InputDenormals
2070 MFI->Mode.FP32Denormals.Output = YamlMFI.Mode.FP32OutputDenormals
2073
2080
2081 if (YamlMFI.HasInitWholeWave)
2082 MFI->setInitWholeWave();
2083
2084 return false;
2085}
2086
2087//===----------------------------------------------------------------------===//
2088// AMDGPU CodeGen Pass Builder interface.
2089//===----------------------------------------------------------------------===//
2090
2091AMDGPUCodeGenPassBuilder::AMDGPUCodeGenPassBuilder(
2092 GCNTargetMachine &TM, const CGPassBuilderOption &Opts,
2094 : CodeGenPassBuilder(TM, Opts, PIC) {
2095 Opt.MISchedPostRA = true;
2096 Opt.RequiresCodeGenSCCOrder = true;
2097 // Exceptions and StackMaps are not supported, so these passes will never do
2098 // anything.
2099 // Garbage collection is not supported.
2100 disablePass<StackMapLivenessPass, FuncletLayoutPass,
2102}
2103
2104void AMDGPUCodeGenPassBuilder::addIRPasses(PassManagerWrapper &PMW) const {
2105 if (RemoveIncompatibleFunctions && TM.getTargetTriple().isAMDGCN()) {
2106 flushFPMsToMPM(PMW);
2107 addModulePass(AMDGPURemoveIncompatibleFunctionsPass(TM), PMW);
2108 }
2109
2110 flushFPMsToMPM(PMW);
2111 addModulePass(AMDGPUPrintfRuntimeBindingPass(), PMW);
2112 if (LowerCtorDtor)
2113 addModulePass(AMDGPUCtorDtorLoweringPass(), PMW);
2114
2115 if (isPassEnabled(EnableImageIntrinsicOptimizer))
2116 addFunctionPass(AMDGPUImageIntrinsicOptimizerPass(TM), PMW);
2117
2119 addFunctionPass(AMDGPUUniformIntrinsicCombinePass(), PMW);
2120 // This can be disabled by passing ::Disable here or on the command line
2121 // with --expand-variadics-override=disable.
2122 flushFPMsToMPM(PMW);
2124
2125 addModulePass(AMDGPUAlwaysInlinePass(), PMW);
2126 addModulePass(AlwaysInlinerPass(), PMW);
2127
2128 addModulePass(AMDGPUExportKernelRuntimeHandlesPass(), PMW);
2129
2131 addModulePass(AMDGPULowerExecSyncPass(), PMW);
2132
2133 if (EnableSwLowerLDS)
2134 addModulePass(AMDGPUSwLowerLDSPass(TM), PMW);
2135
2136 // Runs before PromoteAlloca so the latter can account for function uses
2138 addModulePass(AMDGPULowerModuleLDSPass(TM), PMW);
2139
2140 // Run atomic optimizer before Atomic Expand
2141 if (TM.getOptLevel() >= CodeGenOptLevel::Less &&
2143 addFunctionPass(
2145
2146 addFunctionPass(AtomicExpandPass(TM), PMW);
2147
2148 if (TM.getOptLevel() > CodeGenOptLevel::None) {
2149 addFunctionPass(AMDGPUPromoteAllocaPass(TM), PMW);
2150 if (isPassEnabled(EnableScalarIRPasses))
2151 addStraightLineScalarOptimizationPasses(PMW);
2152
2153 // TODO: Handle EnableAMDGPUAliasAnalysis
2154
2155 // TODO: May want to move later or split into an early and late one.
2156 addFunctionPass(AMDGPUCodeGenPreparePass(TM), PMW);
2157
2158 // Try to hoist loop invariant parts of divisions AMDGPUCodeGenPrepare may
2159 // have expanded.
2160 if (TM.getOptLevel() > CodeGenOptLevel::Less) {
2162 /*UseMemorySSA=*/true),
2163 PMW);
2164 }
2165 }
2166
2167 Base::addIRPasses(PMW);
2168
2169 // EarlyCSE is not always strong enough to clean up what LSR produces. For
2170 // example, GVN can combine
2171 //
2172 // %0 = add %a, %b
2173 // %1 = add %b, %a
2174 //
2175 // and
2176 //
2177 // %0 = shl nsw %a, 2
2178 // %1 = shl %a, 2
2179 //
2180 // but EarlyCSE can do neither of them.
2181 if (isPassEnabled(EnableScalarIRPasses))
2182 addEarlyCSEOrGVNPass(PMW);
2183}
2184
2185void AMDGPUCodeGenPassBuilder::addCodeGenPrepare(
2186 PassManagerWrapper &PMW) const {
2187 if (TM.getOptLevel() > CodeGenOptLevel::None) {
2188 flushFPMsToMPM(PMW);
2189 addModulePass(AMDGPUPreloadKernelArgumentsPass(TM), PMW);
2190 }
2191
2193 addFunctionPass(AMDGPULowerKernelArgumentsPass(TM), PMW);
2194
2195 Base::addCodeGenPrepare(PMW);
2196
2197 if (isPassEnabled(EnableLoadStoreVectorizer))
2198 addFunctionPass(LoadStoreVectorizerPass(), PMW);
2199
2200 // This lowering has been placed after codegenprepare to take advantage of
2201 // address mode matching (which is why it isn't put with the LDS lowerings).
2202 // It could be placed anywhere before uniformity annotations (an analysis
2203 // that it changes by splitting up fat pointers into their components)
2204 // but has been put before switch lowering and CFG flattening so that those
2205 // passes can run on the more optimized control flow this pass creates in
2206 // many cases.
2207 flushFPMsToMPM(PMW);
2208 addModulePass(AMDGPULowerBufferFatPointersPass(TM), PMW);
2209 flushFPMsToMPM(PMW);
2210 requireCGSCCOrder(PMW);
2211
2212 addModulePass(AMDGPULowerIntrinsicsPass(TM), PMW);
2213
2214 // LowerSwitch pass may introduce unreachable blocks that can cause unexpected
2215 // behavior for subsequent passes. Placing it here seems better that these
2216 // blocks would get cleaned up by UnreachableBlockElim inserted next in the
2217 // pass flow.
2218 addFunctionPass(LowerSwitchPass(), PMW);
2219}
2220
2221void AMDGPUCodeGenPassBuilder::addPreISel(PassManagerWrapper &PMW) const {
2222
2223 // Require AMDGPUArgumentUsageAnalysis so that it's available during ISel.
2224 flushFPMsToMPM(PMW);
2226 PMW);
2227
2228 if (TM.getOptLevel() > CodeGenOptLevel::None) {
2229 addFunctionPass(FlattenCFGPass(), PMW);
2230 addFunctionPass(SinkingPass(), PMW);
2231 addFunctionPass(AMDGPULateCodeGenPreparePass(TM), PMW);
2232 }
2233
2234 // Merge divergent exit nodes. StructurizeCFG won't recognize the multi-exit
2235 // regions formed by them.
2236
2237 addFunctionPass(AMDGPUUnifyDivergentExitNodesPass(), PMW);
2238 addFunctionPass(FixIrreduciblePass(), PMW);
2239 addFunctionPass(UnifyLoopExitsPass(), PMW);
2240 addFunctionPass(StructurizeCFGPass(/*SkipUniformRegions=*/false), PMW);
2241
2242 addFunctionPass(AMDGPUAnnotateUniformValuesPass(), PMW);
2243
2244 addFunctionPass(SIAnnotateControlFlowPass(TM), PMW);
2245
2246 // TODO: Move this right after structurizeCFG to avoid extra divergence
2247 // analysis. This depends on stopping SIAnnotateControlFlow from making
2248 // control flow modifications.
2249 addFunctionPass(AMDGPURewriteUndefForPHIPass(), PMW);
2250
2252 !isGlobalISelAbortEnabled() || !NewRegBankSelect)
2253 addFunctionPass(LCSSAPass(), PMW);
2254
2255 if (TM.getOptLevel() > CodeGenOptLevel::Less) {
2256 flushFPMsToMPM(PMW);
2257 addModulePass(AMDGPUPerfHintAnalysisPass(TM), PMW);
2258 }
2259
2260 // FIXME: Why isn't this queried as required from AMDGPUISelDAGToDAG, and why
2261 // isn't this in addInstSelector?
2263 /*Force=*/true);
2264}
2265
2266void AMDGPUCodeGenPassBuilder::addILPOpts(PassManagerWrapper &PMW) const {
2268 addMachineFunctionPass(EarlyIfConverterPass(), PMW);
2269
2270 Base::addILPOpts(PMW);
2271}
2272
2273void AMDGPUCodeGenPassBuilder::addAsmPrinter(PassManagerWrapper &PMW,
2274 CreateMCStreamer) const {
2275 // TODO: Add AsmPrinter.
2276}
2277
2278Error AMDGPUCodeGenPassBuilder::addInstSelector(PassManagerWrapper &PMW) const {
2279 addMachineFunctionPass(AMDGPUISelDAGToDAGPass(TM), PMW);
2280 addMachineFunctionPass(SIFixSGPRCopiesPass(), PMW);
2281 addMachineFunctionPass(SILowerI1CopiesPass(), PMW);
2282 return Error::success();
2283}
2284
2285void AMDGPUCodeGenPassBuilder::addPreRewrite(PassManagerWrapper &PMW) const {
2286 if (EnableRegReassign) {
2287 addMachineFunctionPass(GCNNSAReassignPass(), PMW);
2288 }
2289}
2290
2291void AMDGPUCodeGenPassBuilder::addMachineSSAOptimization(
2292 PassManagerWrapper &PMW) const {
2293 Base::addMachineSSAOptimization(PMW);
2294
2295 addMachineFunctionPass(SIFoldOperandsPass(), PMW);
2296 if (EnableDPPCombine) {
2297 addMachineFunctionPass(GCNDPPCombinePass(), PMW);
2298 }
2299 addMachineFunctionPass(SILoadStoreOptimizerPass(), PMW);
2300 if (isPassEnabled(EnableSDWAPeephole)) {
2301 addMachineFunctionPass(SIPeepholeSDWAPass(), PMW);
2302 addMachineFunctionPass(EarlyMachineLICMPass(), PMW);
2303 addMachineFunctionPass(MachineCSEPass(), PMW);
2304 addMachineFunctionPass(SIFoldOperandsPass(), PMW);
2305 }
2306 addMachineFunctionPass(DeadMachineInstructionElimPass(), PMW);
2307 addMachineFunctionPass(SIShrinkInstructionsPass(), PMW);
2308}
2309
2310void AMDGPUCodeGenPassBuilder::addOptimizedRegAlloc(
2311 PassManagerWrapper &PMW) const {
2312 if (EnableDCEInRA)
2313 insertPass<DetectDeadLanesPass>(DeadMachineInstructionElimPass());
2314
2315 // FIXME: when an instruction has a Killed operand, and the instruction is
2316 // inside a bundle, seems only the BUNDLE instruction appears as the Kills of
2317 // the register in LiveVariables, this would trigger a failure in verifier,
2318 // we should fix it and enable the verifier.
2319 if (OptVGPRLiveRange)
2320 insertPass<RequireAnalysisPass<LiveVariablesAnalysis, MachineFunction>>(
2322
2323 // This must be run immediately after phi elimination and before
2324 // TwoAddressInstructions, otherwise the processing of the tied operand of
2325 // SI_ELSE will introduce a copy of the tied operand source after the else.
2326 insertPass<PHIEliminationPass>(SILowerControlFlowPass());
2327
2329 insertPass<RenameIndependentSubregsPass>(GCNRewritePartialRegUsesPass());
2330
2331 if (isPassEnabled(EnablePreRAOptimizations))
2332 insertPass<MachineSchedulerPass>(GCNPreRAOptimizationsPass());
2333
2334 // Allow the scheduler to run before SIWholeQuadMode inserts exec manipulation
2335 // instructions that cause scheduling barriers.
2336 insertPass<MachineSchedulerPass>(SIWholeQuadModePass());
2337
2338 if (OptExecMaskPreRA)
2339 insertPass<MachineSchedulerPass>(SIOptimizeExecMaskingPreRAPass());
2340
2341 // This is not an essential optimization and it has a noticeable impact on
2342 // compilation time, so we only enable it from O2.
2343 if (TM.getOptLevel() > CodeGenOptLevel::Less)
2344 insertPass<MachineSchedulerPass>(SIFormMemoryClausesPass());
2345
2346 Base::addOptimizedRegAlloc(PMW);
2347}
2348
2349void AMDGPUCodeGenPassBuilder::addPreRegAlloc(PassManagerWrapper &PMW) const {
2350 if (getOptLevel() != CodeGenOptLevel::None)
2351 addMachineFunctionPass(AMDGPUPrepareAGPRAllocPass(), PMW);
2352}
2353
2354Error AMDGPUCodeGenPassBuilder::addRegAssignmentOptimized(
2355 PassManagerWrapper &PMW) const {
2356 // TODO: Check --regalloc-npm option
2357
2358 addMachineFunctionPass(GCNPreRALongBranchRegPass(), PMW);
2359
2360 addMachineFunctionPass(RAGreedyPass({onlyAllocateSGPRs, "sgpr"}), PMW);
2361
2362 // Commit allocated register changes. This is mostly necessary because too
2363 // many things rely on the use lists of the physical registers, such as the
2364 // verifier. This is only necessary with allocators which use LiveIntervals,
2365 // since FastRegAlloc does the replacements itself.
2366 addMachineFunctionPass(VirtRegRewriterPass(false), PMW);
2367
2368 // At this point, the sgpr-regalloc has been done and it is good to have the
2369 // stack slot coloring to try to optimize the SGPR spill stack indices before
2370 // attempting the custom SGPR spill lowering.
2371 addMachineFunctionPass(StackSlotColoringPass(), PMW);
2372
2373 // Equivalent of PEI for SGPRs.
2374 addMachineFunctionPass(SILowerSGPRSpillsPass(), PMW);
2375
2376 // To Allocate wwm registers used in whole quad mode operations (for shaders).
2377 addMachineFunctionPass(SIPreAllocateWWMRegsPass(), PMW);
2378
2379 // For allocating other wwm register operands.
2380 addMachineFunctionPass(RAGreedyPass({onlyAllocateWWMRegs, "wwm"}), PMW);
2381 addMachineFunctionPass(SILowerWWMCopiesPass(), PMW);
2382 addMachineFunctionPass(VirtRegRewriterPass(false), PMW);
2383 addMachineFunctionPass(AMDGPUReserveWWMRegsPass(), PMW);
2384
2385 // For allocating per-thread VGPRs.
2386 addMachineFunctionPass(RAGreedyPass({onlyAllocateVGPRs, "vgpr"}), PMW);
2387
2388 addPreRewrite(PMW);
2389 addMachineFunctionPass(VirtRegRewriterPass(true), PMW);
2390
2391 addMachineFunctionPass(AMDGPUMarkLastScratchLoadPass(), PMW);
2392 return Error::success();
2393}
2394
2395void AMDGPUCodeGenPassBuilder::addPostRegAlloc(PassManagerWrapper &PMW) const {
2396 addMachineFunctionPass(SIFixVGPRCopiesPass(), PMW);
2397 if (TM.getOptLevel() > CodeGenOptLevel::None)
2398 addMachineFunctionPass(SIOptimizeExecMaskingPass(), PMW);
2399 Base::addPostRegAlloc(PMW);
2400}
2401
2402void AMDGPUCodeGenPassBuilder::addPreSched2(PassManagerWrapper &PMW) const {
2403 if (TM.getOptLevel() > CodeGenOptLevel::None)
2404 addMachineFunctionPass(SIShrinkInstructionsPass(), PMW);
2405 addMachineFunctionPass(SIPostRABundlerPass(), PMW);
2406}
2407
2408void AMDGPUCodeGenPassBuilder::addPreEmitPass(PassManagerWrapper &PMW) const {
2409 if (isPassEnabled(EnableVOPD, CodeGenOptLevel::Less)) {
2410 addMachineFunctionPass(GCNCreateVOPDPass(), PMW);
2411 }
2412
2413 addMachineFunctionPass(SIMemoryLegalizerPass(), PMW);
2414 addMachineFunctionPass(SIInsertWaitcntsPass(), PMW);
2415
2416 addMachineFunctionPass(SIModeRegisterPass(), PMW);
2417
2418 if (TM.getOptLevel() > CodeGenOptLevel::None)
2419 addMachineFunctionPass(SIInsertHardClausesPass(), PMW);
2420
2421 addMachineFunctionPass(SILateBranchLoweringPass(), PMW);
2422
2423 if (isPassEnabled(EnableSetWavePriority, CodeGenOptLevel::Less))
2424 addMachineFunctionPass(AMDGPUSetWavePriorityPass(), PMW);
2425
2426 if (TM.getOptLevel() > CodeGenOptLevel::None)
2427 addMachineFunctionPass(SIPreEmitPeepholePass(), PMW);
2428
2429 // The hazard recognizer that runs as part of the post-ra scheduler does not
2430 // guarantee to be able handle all hazards correctly. This is because if there
2431 // are multiple scheduling regions in a basic block, the regions are scheduled
2432 // bottom up, so when we begin to schedule a region we don't know what
2433 // instructions were emitted directly before it.
2434 //
2435 // Here we add a stand-alone hazard recognizer pass which can handle all
2436 // cases.
2437 addMachineFunctionPass(PostRAHazardRecognizerPass(), PMW);
2438 addMachineFunctionPass(AMDGPUWaitSGPRHazardsPass(), PMW);
2439 addMachineFunctionPass(AMDGPULowerVGPREncodingPass(), PMW);
2440
2441 if (isPassEnabled(EnableInsertDelayAlu, CodeGenOptLevel::Less)) {
2442 addMachineFunctionPass(AMDGPUInsertDelayAluPass(), PMW);
2443 }
2444
2445 addMachineFunctionPass(BranchRelaxationPass(), PMW);
2446}
2447
2448bool AMDGPUCodeGenPassBuilder::isPassEnabled(const cl::opt<bool> &Opt,
2449 CodeGenOptLevel Level) const {
2450 if (Opt.getNumOccurrences())
2451 return Opt;
2452 if (TM.getOptLevel() < Level)
2453 return false;
2454 return Opt;
2455}
2456
2457void AMDGPUCodeGenPassBuilder::addEarlyCSEOrGVNPass(
2458 PassManagerWrapper &PMW) const {
2459 if (TM.getOptLevel() == CodeGenOptLevel::Aggressive)
2460 addFunctionPass(GVNPass(), PMW);
2461 else
2462 addFunctionPass(EarlyCSEPass(), PMW);
2463}
2464
2465void AMDGPUCodeGenPassBuilder::addStraightLineScalarOptimizationPasses(
2466 PassManagerWrapper &PMW) const {
2468 addFunctionPass(LoopDataPrefetchPass(), PMW);
2469
2470 addFunctionPass(SeparateConstOffsetFromGEPPass(), PMW);
2471
2472 // ReassociateGEPs exposes more opportunities for SLSR. See
2473 // the example in reassociate-geps-and-slsr.ll.
2474 addFunctionPass(StraightLineStrengthReducePass(), PMW);
2475
2476 // SeparateConstOffsetFromGEP and SLSR creates common expressions which GVN or
2477 // EarlyCSE can reuse.
2478 addEarlyCSEOrGVNPass(PMW);
2479
2480 // Run NaryReassociate after EarlyCSE/GVN to be more effective.
2481 addFunctionPass(NaryReassociatePass(), PMW);
2482
2483 // NaryReassociate on GEPs creates redundant common expressions, so run
2484 // EarlyCSE after it.
2485 addFunctionPass(EarlyCSEPass(), PMW);
2486}
unsigned const MachineRegisterInfo * MRI
aarch64 falkor hwpf fix Falkor HW Prefetch Fix Late Phase
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static cl::opt< bool > EnableEarlyIfConversion("aarch64-enable-early-ifcvt", cl::Hidden, cl::desc("Run early if-conversion"), cl::init(true))
static std::unique_ptr< TargetLoweringObjectFile > createTLOF(const Triple &TT)
This is the AMGPU address space based alias analysis pass.
Defines an instruction selector for the AMDGPU target.
Analyzes if a function potentially memory bound and if a kernel kernel may benefit from limiting numb...
Analyzes how many registers and other resources are used by functions.
static cl::opt< bool > EnableDCEInRA("amdgpu-dce-in-ra", cl::init(true), cl::Hidden, cl::desc("Enable machine DCE inside regalloc"))
static cl::opt< bool, true > EnableLowerModuleLDS("amdgpu-enable-lower-module-lds", cl::desc("Enable lower module lds pass"), cl::location(AMDGPUTargetMachine::EnableLowerModuleLDS), cl::init(true), cl::Hidden)
static MachineSchedRegistry GCNMaxMemoryClauseSchedRegistry("gcn-max-memory-clause", "Run GCN scheduler to maximize memory clause", createGCNMaxMemoryClauseMachineScheduler)
static Reloc::Model getEffectiveRelocModel()
static cl::opt< bool > EnableUniformIntrinsicCombine("amdgpu-enable-uniform-intrinsic-combine", cl::desc("Enable/Disable the Uniform Intrinsic Combine Pass"), cl::init(true), cl::Hidden)
static MachineSchedRegistry SISchedRegistry("si", "Run SI's custom scheduler", createSIMachineScheduler)
static ScheduleDAGInstrs * createIterativeILPMachineScheduler(MachineSchedContext *C)
static cl::opt< bool > EarlyInlineAll("amdgpu-early-inline-all", cl::desc("Inline all functions early"), cl::init(false), cl::Hidden)
static cl::opt< bool > EnableSwLowerLDS("amdgpu-enable-sw-lower-lds", cl::desc("Enable lowering of lds to global memory pass " "and asan instrument resulting IR."), cl::init(true), cl::Hidden)
static cl::opt< bool > EnableLowerKernelArguments("amdgpu-ir-lower-kernel-arguments", cl::desc("Lower kernel argument loads in IR pass"), cl::init(true), cl::Hidden)
static ScheduleDAGInstrs * createGCNMaxILPMachineScheduler(MachineSchedContext *C)
static cl::opt< bool > EnableSDWAPeephole("amdgpu-sdwa-peephole", cl::desc("Enable SDWA peepholer"), cl::init(true))
static MachineSchedRegistry GCNMinRegSchedRegistry("gcn-iterative-minreg", "Run GCN iterative scheduler for minimal register usage (experimental)", createMinRegScheduler)
static cl::opt< bool > EnableImageIntrinsicOptimizer("amdgpu-enable-image-intrinsic-optimizer", cl::desc("Enable image intrinsic optimizer pass"), cl::init(true), cl::Hidden)
static cl::opt< bool > HasClosedWorldAssumption("amdgpu-link-time-closed-world", cl::desc("Whether has closed-world assumption at link time"), cl::init(false), cl::Hidden)
static ScheduleDAGInstrs * createGCNMaxMemoryClauseMachineScheduler(MachineSchedContext *C)
static cl::opt< bool > EnableSIModeRegisterPass("amdgpu-mode-register", cl::desc("Enable mode register pass"), cl::init(true), cl::Hidden)
static cl::opt< std::string > AMDGPUSchedStrategy("amdgpu-sched-strategy", cl::desc("Select custom AMDGPU scheduling strategy."), cl::Hidden, cl::init(""))
static cl::opt< bool > EnableDPPCombine("amdgpu-dpp-combine", cl::desc("Enable DPP combiner"), cl::init(true))
static MachineSchedRegistry IterativeGCNMaxOccupancySchedRegistry("gcn-iterative-max-occupancy-experimental", "Run GCN scheduler to maximize occupancy (experimental)", createIterativeGCNMaxOccupancyMachineScheduler)
static cl::opt< bool > EnableSetWavePriority("amdgpu-set-wave-priority", cl::desc("Adjust wave priority"), cl::init(false), cl::Hidden)
static cl::opt< bool > LowerCtorDtor("amdgpu-lower-global-ctor-dtor", cl::desc("Lower GPU ctor / dtors to globals on the device."), cl::init(true), cl::Hidden)
static cl::opt< bool > OptExecMaskPreRA("amdgpu-opt-exec-mask-pre-ra", cl::Hidden, cl::desc("Run pre-RA exec mask optimizations"), cl::init(true))
static cl::opt< bool > EnablePromoteKernelArguments("amdgpu-enable-promote-kernel-arguments", cl::desc("Enable promotion of flat kernel pointer arguments to global"), cl::Hidden, cl::init(true))
LLVM_ABI LLVM_EXTERNAL_VISIBILITY void LLVMInitializeAMDGPUTarget()
static cl::opt< bool > EnableRewritePartialRegUses("amdgpu-enable-rewrite-partial-reg-uses", cl::desc("Enable rewrite partial reg uses pass"), cl::init(true), cl::Hidden)
static cl::opt< bool > EnableLibCallSimplify("amdgpu-simplify-libcall", cl::desc("Enable amdgpu library simplifications"), cl::init(true), cl::Hidden)
static MachineSchedRegistry GCNMaxILPSchedRegistry("gcn-max-ilp", "Run GCN scheduler to maximize ilp", createGCNMaxILPMachineScheduler)
static cl::opt< bool > InternalizeSymbols("amdgpu-internalize-symbols", cl::desc("Enable elimination of non-kernel functions and unused globals"), cl::init(false), cl::Hidden)
static cl::opt< bool > EnableAMDGPUAttributor("amdgpu-attributor-enable", cl::desc("Enable AMDGPUAttributorPass"), cl::init(true), cl::Hidden)
static LLVM_READNONE StringRef getGPUOrDefault(const Triple &TT, StringRef GPU)
Expected< AMDGPUAttributorOptions > parseAMDGPUAttributorPassOptions(StringRef Params)
static cl::opt< bool > EnableAMDGPUAliasAnalysis("enable-amdgpu-aa", cl::Hidden, cl::desc("Enable AMDGPU Alias Analysis"), cl::init(true))
static Expected< ScanOptions > parseAMDGPUAtomicOptimizerStrategy(StringRef Params)
static ScheduleDAGInstrs * createMinRegScheduler(MachineSchedContext *C)
static cl::opt< bool > EnableHipStdPar("amdgpu-enable-hipstdpar", cl::desc("Enable HIP Standard Parallelism Offload support"), cl::init(false), cl::Hidden)
static cl::opt< bool > EnableInsertDelayAlu("amdgpu-enable-delay-alu", cl::desc("Enable s_delay_alu insertion"), cl::init(true), cl::Hidden)
static ScheduleDAGInstrs * createIterativeGCNMaxOccupancyMachineScheduler(MachineSchedContext *C)
static cl::opt< bool > EnableLoadStoreVectorizer("amdgpu-load-store-vectorizer", cl::desc("Enable load store vectorizer"), cl::init(true), cl::Hidden)
static bool mustPreserveGV(const GlobalValue &GV)
Predicate for Internalize pass.
static cl::opt< bool > EnableLoopPrefetch("amdgpu-loop-prefetch", cl::desc("Enable loop data prefetch on AMDGPU"), cl::Hidden, cl::init(false))
static cl::opt< bool > NewRegBankSelect("new-reg-bank-select", cl::desc("Run amdgpu-regbankselect and amdgpu-regbanklegalize instead of " "regbankselect"), cl::init(false), cl::Hidden)
static cl::opt< bool > RemoveIncompatibleFunctions("amdgpu-enable-remove-incompatible-functions", cl::Hidden, cl::desc("Enable removal of functions when they" "use features not supported by the target GPU"), cl::init(true))
static cl::opt< bool > EnableScalarIRPasses("amdgpu-scalar-ir-passes", cl::desc("Enable scalar IR passes"), cl::init(true), cl::Hidden)
static cl::opt< bool > EnableRegReassign("amdgpu-reassign-regs", cl::desc("Enable register reassign optimizations on gfx10+"), cl::init(true), cl::Hidden)
static cl::opt< bool > OptVGPRLiveRange("amdgpu-opt-vgpr-liverange", cl::desc("Enable VGPR liverange optimizations for if-else structure"), cl::init(true), cl::Hidden)
static ScheduleDAGInstrs * createSIMachineScheduler(MachineSchedContext *C)
static cl::opt< bool > EnablePreRAOptimizations("amdgpu-enable-pre-ra-optimizations", cl::desc("Enable Pre-RA optimizations pass"), cl::init(true), cl::Hidden)
static cl::opt< ScanOptions > AMDGPUAtomicOptimizerStrategy("amdgpu-atomic-optimizer-strategy", cl::desc("Select DPP or Iterative strategy for scan"), cl::init(ScanOptions::Iterative), cl::values(clEnumValN(ScanOptions::DPP, "DPP", "Use DPP operations for scan"), clEnumValN(ScanOptions::Iterative, "Iterative", "Use Iterative approach for scan"), clEnumValN(ScanOptions::None, "None", "Disable atomic optimizer")))
static cl::opt< bool > EnableVOPD("amdgpu-enable-vopd", cl::desc("Enable VOPD, dual issue of VALU in wave32"), cl::init(true), cl::Hidden)
static cl::opt< bool > EnableEarlyIfConversion("amdgpu-early-ifcvt", cl::Hidden, cl::desc("Run early if-conversion"), cl::init(false))
static ScheduleDAGInstrs * createGCNMaxOccupancyMachineScheduler(MachineSchedContext *C)
static cl::opt< bool > EnableLowerExecSync("amdgpu-enable-lower-exec-sync", cl::desc("Enable lowering of execution synchronization."), cl::init(true), cl::Hidden)
static MachineSchedRegistry GCNILPSchedRegistry("gcn-iterative-ilp", "Run GCN iterative scheduler for ILP scheduling (experimental)", createIterativeILPMachineScheduler)
static cl::opt< bool > ScalarizeGlobal("amdgpu-scalarize-global-loads", cl::desc("Enable global load scalarization"), cl::init(true), cl::Hidden)
static const char RegAllocOptNotSupportedMessage[]
static MachineSchedRegistry GCNMaxOccupancySchedRegistry("gcn-max-occupancy", "Run GCN scheduler to maximize occupancy", createGCNMaxOccupancyMachineScheduler)
The AMDGPU TargetMachine interface definition for hw codegen targets.
This file declares the AMDGPU-specific subclass of TargetLoweringObjectFile.
This file a TargetTransformInfoImplBase conforming object specific to the AMDGPU target machine.
Provides passes to inlining "always_inline" functions.
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
This header provides classes for managing passes over SCCs of the call graph.
Provides analysis for continuously CSEing during GISel passes.
Interfaces for producing common pass manager configurations.
#define clEnumValN(ENUMVAL, FLAGNAME, DESC)
#define LLVM_READNONE
Definition Compiler.h:315
#define LLVM_ABI
Definition Compiler.h:213
#define LLVM_EXTERNAL_VISIBILITY
Definition Compiler.h:132
DXIL Legalizer
This file provides the interface for a simple, fast CSE pass.
This file defines the class GCNIterativeScheduler, which uses an iterative approach to find a best sc...
This file provides the interface for LLVM's Global Value Numbering pass which eliminates fully redund...
#define _
AcceleratorCodeSelection - Identify all functions reachable from a kernel, removing those that are un...
This file declares the IRTranslator pass.
This header defines various interfaces for pass management in LLVM.
#define RegName(no)
This file provides the interface for LLVM's Loop Data Prefetching Pass.
This header provides classes for managing a pipeline of passes over loops in LLVM IR.
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
Register Reg
Register const TargetRegisterInfo * TRI
#define T
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
uint64_t IntrinsicInst * II
#define P(N)
CGSCCAnalysisManager CGAM
LoopAnalysisManager LAM
FunctionAnalysisManager FAM
ModuleAnalysisManager MAM
PassInstrumentationCallbacks PIC
PassBuilder PB(Machine, PassOpts->PTO, std::nullopt, &PIC)
static bool isLTOPreLink(ThinOrFullLTOPhase Phase)
The AMDGPU TargetMachine interface definition for hw codegen targets.
This file describes the interface of the MachineFunctionPass responsible for assigning the generic vi...
const GCNTargetMachine & getTM(const GCNSubtarget *STI)
SI Machine Scheduler interface.
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
Definition Value.cpp:487
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
static FunctionPass * useDefaultRegisterAllocator()
-regalloc=... command line option.
static cl::opt< cl::boolOrDefault > EnableGlobalISelOption("global-isel", cl::Hidden, cl::desc("Enable the \"global\" instruction selector"))
Target-Independent Code Generator Pass Configuration Options pass.
LLVM IR instance of the generic uniformity analysis.
static std::unique_ptr< TargetLoweringObjectFile > createTLOF()
A manager for alias analyses.
void registerFunctionAnalysis()
Register a specific AA result.
void addAAResult(AAResultT &AAResult)
Register a specific AA result.
Legacy wrapper pass to provide the AMDGPUAAResult object.
Analysis pass providing a never-invalidated alias analysis result.
Lower llvm.global_ctors and llvm.global_dtors to special kernels.
AMDGPUTargetMachine & getAMDGPUTargetMachine() const
std::unique_ptr< CSEConfigBase > getCSEConfig() const override
Returns the CSEConfig object to use for the current optimization level.
bool isPassEnabled(const cl::opt< bool > &Opt, CodeGenOptLevel Level=CodeGenOptLevel::Default) const
Check if a pass is enabled given Opt option.
bool addPreISel() override
Methods with trivial inline returns are convenient points in the common codegen pass pipeline where t...
bool addInstSelector() override
addInstSelector - This method should install an instruction selector pass, which converts from LLVM c...
bool addGCPasses() override
addGCPasses - Add late codegen passes that analyze code for garbage collection.
AMDGPUPassConfig(TargetMachine &TM, PassManagerBase &PM)
void addIRPasses() override
Add common target configurable passes that perform LLVM IR to IR transforms following machine indepen...
void addCodeGenPrepare() override
Add pass to prepare the LLVM IR for code generation.
Splits the module M into N linkable partitions.
std::unique_ptr< TargetLoweringObjectFile > TLOF
static int64_t getNullPointerValue(unsigned AddrSpace)
Get the integer value of a null pointer in the given address space.
unsigned getAddressSpaceForPseudoSourceKind(unsigned Kind) const override
getAddressSpaceForPseudoSourceKind - Given the kind of memory (e.g.
const TargetSubtargetInfo * getSubtargetImpl() const
void registerDefaultAliasAnalyses(AAManager &) override
Allow the target to register alias analyses with the AAManager for use with the new pass manager.
std::pair< const Value *, unsigned > getPredicatedAddrSpace(const Value *V) const override
If the specified predicate checks whether a generic pointer falls within a specified address space,...
StringRef getFeatureString(const Function &F) const
ScheduleDAGInstrs * createMachineScheduler(MachineSchedContext *C) const override
Create an instance of ScheduleDAGInstrs to be run within the standard MachineScheduler pass for this ...
AMDGPUTargetMachine(const Target &T, const Triple &TT, StringRef CPU, StringRef FS, const TargetOptions &Options, std::optional< Reloc::Model > RM, std::optional< CodeModel::Model > CM, CodeGenOptLevel OL)
bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const override
Returns true if a cast between SrcAS and DestAS is a noop.
void registerPassBuilderCallbacks(PassBuilder &PB) override
Allow the target to modify the pass pipeline.
StringRef getGPUName(const Function &F) const
unsigned getAssumedAddrSpace(const Value *V) const override
If the specified generic pointer could be assumed as a pointer to a specific address space,...
bool splitModule(Module &M, unsigned NumParts, function_ref< void(std::unique_ptr< Module > MPart)> ModuleCallback) override
Entry point for module splitting.
Inlines functions marked as "always_inline".
Functions, function parameters, and return types can have attributes to indicate how they should be t...
Definition Attributes.h:69
LLVM_ABI StringRef getValueAsString() const
Return the attribute's value as a string.
bool isValid() const
Return true if the attribute is any kind of attribute.
Definition Attributes.h:223
This class provides access to building LLVM's passes.
CodeGenTargetMachineImpl(const Target &T, StringRef DataLayoutString, const Triple &TT, StringRef CPU, StringRef FS, const TargetOptions &Options, Reloc::Model RM, CodeModel::Model CM, CodeGenOptLevel OL)
LLVM_ABI void removeDeadConstantUsers() const
If there are any dead constant users dangling off of this constant, remove them.
Lightweight error class with error context and mandatory checking.
Definition Error.h:159
static ErrorSuccess success()
Create a success value.
Definition Error.h:336
Tagged union holding either a T or a Error.
Definition Error.h:485
FunctionPass class - This class is used to implement most global optimizations.
Definition Pass.h:314
const SIRegisterInfo * getRegisterInfo() const override
TargetTransformInfo getTargetTransformInfo(const Function &F) const override
Get a TargetTransformInfo implementation for the target.
ScheduleDAGInstrs * createPostMachineScheduler(MachineSchedContext *C) const override
Similar to createMachineScheduler but used when postRA machine scheduling is enabled.
ScheduleDAGInstrs * createMachineScheduler(MachineSchedContext *C) const override
Create an instance of ScheduleDAGInstrs to be run within the standard MachineScheduler pass for this ...
void registerMachineRegisterInfoCallback(MachineFunction &MF) const override
bool parseMachineFunctionInfo(const yaml::MachineFunctionInfo &, PerFunctionMIParsingState &PFS, SMDiagnostic &Error, SMRange &SourceRange) const override
Parse out the target's MachineFunctionInfo from the YAML reprsentation.
yaml::MachineFunctionInfo * convertFuncInfoToYAML(const MachineFunction &MF) const override
Allocate and initialize an instance of the YAML representation of the MachineFunctionInfo.
Error buildCodeGenPipeline(ModulePassManager &MPM, raw_pwrite_stream &Out, raw_pwrite_stream *DwoOut, CodeGenFileType FileType, const CGPassBuilderOption &Opts, PassInstrumentationCallbacks *PIC) override
yaml::MachineFunctionInfo * createDefaultFuncInfoYAML() const override
Allocate and return a default initialized instance of the YAML representation for the MachineFunction...
TargetPassConfig * createPassConfig(PassManagerBase &PM) override
Create a pass configuration object to be used by addPassToEmitX methods for generating a pipeline of ...
GCNTargetMachine(const Target &T, const Triple &TT, StringRef CPU, StringRef FS, const TargetOptions &Options, std::optional< Reloc::Model > RM, std::optional< CodeModel::Model > CM, CodeGenOptLevel OL, bool JIT)
MachineFunctionInfo * createMachineFunctionInfo(BumpPtrAllocator &Allocator, const Function &F, const TargetSubtargetInfo *STI) const override
Create the target's instance of MachineFunctionInfo.
The core GVN pass object.
Definition GVN.h:128
Pass to remove unused function declarations.
Definition GlobalDCE.h:38
This pass is responsible for selecting generic machine instructions to target-specific instructions.
A pass that internalizes all functions and variables other than those that must be preserved accordin...
Definition Internalize.h:37
Converts loops into loop-closed SSA form.
Definition LCSSA.h:38
Performs Loop Invariant Code Motion Pass.
Definition LICM.h:66
This pass implements the localization mechanism described at the top of this file.
Definition Localizer.h:43
An optimization pass inserting data prefetches in loops.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
void addDelegate(Delegate *delegate)
MachineSchedRegistry provides a selection of available machine instruction schedulers.
This interface provides simple read-only access to a block of memory, and provides simple methods for...
virtual StringRef getBufferIdentifier() const
Return an identifier for this buffer, typically the filename it was read from.
const char * getBufferStart() const
A Module instance is used to store all the information related to an LLVM module.
Definition Module.h:67
static LLVM_ABI const OptimizationLevel O0
Disable as many optimizations as possible.
static LLVM_ABI const OptimizationLevel O1
Optimize quickly without destroying debuggability.
This class provides access to building LLVM's passes.
This class manages callbacks registration, as well as provides a way for PassInstrumentation to pass ...
LLVM_ATTRIBUTE_MINSIZE std::enable_if_t<!std::is_same_v< PassT, PassManager > > addPass(PassT &&Pass)
PreservedAnalyses run(IRUnitT &IR, AnalysisManagerT &AM, ExtraArgTs... ExtraArgs)
Run all of the passes in this manager over the given unit of IR.
PassRegistry - This class manages the registration and intitialization of the pass subsystem as appli...
static LLVM_ABI PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
Pass interface - Implemented by all 'passes'.
Definition Pass.h:99
This pass implements the reg bank selector pass used in the GlobalISel pipeline.
RegisterPassParser class - Handle the addition of new machine passes.
RegisterRegAllocBase class - Track the registration of register allocators.
Wrapper class representing virtual and physical registers.
Definition Register.h:20
This class keeps track of the SPI_SP_INPUT_ADDR config register, which tells the hardware which inter...
bool initializeBaseYamlFields(const yaml::SIMachineFunctionInfo &YamlMFI, const MachineFunction &MF, PerFunctionMIParsingState &PFS, SMDiagnostic &Error, SMRange &SourceRange)
void setFlag(Register Reg, uint8_t Flag)
bool checkFlag(Register Reg, uint8_t Flag) const
Instances of this class encapsulate one diagnostic report, allowing printing to a raw_ostream as a ca...
Definition SourceMgr.h:297
Represents a location in source code.
Definition SMLoc.h:22
static SMLoc getFromPointer(const char *Ptr)
Definition SMLoc.h:35
Represents a range in source code.
Definition SMLoc.h:47
A ScheduleDAG for scheduling lists of MachineInstr.
ScheduleDAGMILive is an implementation of ScheduleDAGInstrs that schedules machine instructions while...
ScheduleDAGMI is an implementation of ScheduleDAGInstrs that simply schedules machine instructions ac...
void addMutation(std::unique_ptr< ScheduleDAGMutation > Mutation)
Add a postprocessing step to the DAG builder.
const TargetInstrInfo * TII
Target instruction information.
const TargetRegisterInfo * TRI
Target processor register info.
Move instructions into successor blocks when possible.
Definition Sink.h:24
SmallString - A SmallString is just a SmallVector with methods and accessors that make it work better...
Definition SmallString.h:26
void append(StringRef RHS)
Append from a StringRef.
Definition SmallString.h:68
unsigned getMainFileID() const
Definition SourceMgr.h:148
const MemoryBuffer * getMemoryBuffer(unsigned i) const
Definition SourceMgr.h:141
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
std::pair< StringRef, StringRef > split(char Separator) const
Split into two substrings around the first occurrence of a separator character.
Definition StringRef.h:712
constexpr bool empty() const
empty - Check if the string is empty.
Definition StringRef.h:143
bool consume_front(char Prefix)
Returns true if this StringRef has the given prefix and removes that prefix.
Definition StringRef.h:637
A switch()-like statement whose cases are string literals.
StringSwitch & Cases(std::initializer_list< StringLiteral > CaseStrings, T Value)
Primary interface to the complete machine description for the target machine.
CodeGenOptLevel getOptLevel() const
Returns the optimization level: None, Less, Default, or Aggressive.
Triple TargetTriple
Triple string, CPU name, and target feature strings the TargetMachine instance is created with.
const Triple & getTargetTriple() const
const MCSubtargetInfo * getMCSubtargetInfo() const
StringRef getTargetFeatureString() const
StringRef getTargetCPU() const
std::unique_ptr< const MCSubtargetInfo > STI
TargetOptions Options
void resetTargetOptions(const Function &F) const
Reset the target options based on the function's attributes.
std::unique_ptr< const MCRegisterInfo > MRI
CodeGenOptLevel OptLevel
Target-Independent Code Generator Pass Configuration Options.
virtual void addCodeGenPrepare()
Add pass to prepare the LLVM IR for code generation.
virtual bool addILPOpts()
Add passes that optimize instruction level parallelism for out-of-order targets.
virtual void addPostRegAlloc()
This method may be implemented by targets that want to run passes after register allocation pass pipe...
CodeGenOptLevel getOptLevel() const
virtual void addOptimizedRegAlloc()
addOptimizedRegAlloc - Add passes related to register allocation.
virtual void addIRPasses()
Add common target configurable passes that perform LLVM IR to IR transforms following machine indepen...
virtual void addFastRegAlloc()
addFastRegAlloc - Add the minimum set of target-independent passes that are required for fast registe...
virtual void addMachineSSAOptimization()
addMachineSSAOptimization - Add standard passes that optimize machine instructions in SSA form.
void disablePass(AnalysisID PassID)
Allow the target to disable a specific standard pass by default.
AnalysisID addPass(AnalysisID PassID)
Utilities for targets to add passes to the pass manager.
TargetPassConfig(TargetMachine &TM, PassManagerBase &PM)
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
TargetSubtargetInfo - Generic base class for all target subtargets.
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
Target - Wrapper for Target specific information.
Triple - Helper class for working with autoconf configuration names.
Definition Triple.h:47
LLVM Value Representation.
Definition Value.h:75
bool use_empty() const
Definition Value.h:346
int getNumOccurrences() const
An efficient, type-erasing, non-owning reference to a callable.
PassManagerBase - An abstract interface to allow code to add passes to a pass manager without having ...
An abstract base class for streams implementations that also support a pwrite operation.
Interfaces for registering analysis passes, producing common pass manager configurations,...
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ REGION_ADDRESS
Address space for region memory. (GDS)
@ LOCAL_ADDRESS
Address space for local memory.
@ CONSTANT_ADDRESS
Address space for constant memory (VTX2).
@ FLAT_ADDRESS
Address space for flat memory.
@ GLOBAL_ADDRESS
Address space for global memory (RAT0, VTX0).
@ PRIVATE_ADDRESS
Address space for private memory.
bool isFlatGlobalAddrSpace(unsigned AS)
LLVM_READNONE constexpr bool isModuleEntryFunctionCC(CallingConv::ID CC)
LLVM_READNONE constexpr bool isEntryFunctionCC(CallingConv::ID CC)
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
BinaryOp_match< SrcTy, SpecificConstantMatch, TargetOpcode::G_XOR, true > m_Not(const SrcTy &&Src)
Matches a register not-ed by a G_XOR.
BinaryOp_match< LHS, RHS, Instruction::And, true > m_c_And(const LHS &L, const RHS &R)
Matches an And with LHS and RHS in either order.
bool match(Val *V, const Pattern &P)
IntrinsicID_match m_Intrinsic()
Match intrinsic calls like this: m_Intrinsic<Intrinsic::fabs>(m_Value(X))
deferredval_ty< Value > m_Deferred(Value *const &V)
Like m_Specific(), but works if the specific value to match is determined as part of the same match()...
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
template class LLVM_TEMPLATE_ABI opt< bool >
ValuesClass values(OptsTy... Options)
Helper to build a ValuesClass by forwarding a variable number of arguments as an initializer list to ...
initializer< Ty > init(const Ty &Val)
LocationClass< Ty > location(Ty &L)
This is an optimization pass for GlobalISel generic memory operations.
ScheduleDAGMILive * createSchedLive(MachineSchedContext *C)
Create the standard converging machine scheduler.
LLVM_ABI FunctionPass * createFlattenCFGPass()
std::unique_ptr< ScheduleDAGMutation > createAMDGPUBarrierLatencyDAGMutation(MachineFunction *MF)
LLVM_ABI FunctionPass * createFastRegisterAllocator()
FastRegisterAllocation Pass - This pass register allocates as fast as possible.
LLVM_ABI char & EarlyMachineLICMID
This pass performs loop invariant code motion on machine instructions.
ImmutablePass * createAMDGPUAAWrapperPass()
LLVM_ABI char & PostRAHazardRecognizerID
PostRAHazardRecognizer - This pass runs the post-ra hazard recognizer.
std::function< bool(const TargetRegisterInfo &TRI, const MachineRegisterInfo &MRI, const Register Reg)> RegAllocFilterFunc
Filter function for register classes during regalloc.
FunctionPass * createAMDGPUSetWavePriorityPass()
LLVM_ABI Pass * createLCSSAPass()
Definition LCSSA.cpp:525
void initializeAMDGPUMarkLastScratchLoadLegacyPass(PassRegistry &)
void initializeAMDGPUInsertDelayAluLegacyPass(PassRegistry &)
void initializeSIOptimizeExecMaskingPreRALegacyPass(PassRegistry &)
char & GCNPreRAOptimizationsID
LLVM_ABI char & GCLoweringID
GCLowering Pass - Used by gc.root to perform its default lowering operations.
void initializeSIInsertHardClausesLegacyPass(PassRegistry &)
ModulePass * createExpandVariadicsPass(ExpandVariadicsMode)
FunctionPass * createSIAnnotateControlFlowLegacyPass()
Create the annotation pass.
FunctionPass * createSIModeRegisterPass()
void initializeGCNPreRAOptimizationsLegacyPass(PassRegistry &)
void initializeSILowerWWMCopiesLegacyPass(PassRegistry &)
LLVM_ABI FunctionPass * createGreedyRegisterAllocator()
Greedy register allocation pass - This pass implements a global register allocator for optimized buil...
void initializeAMDGPUAAWrapperPassPass(PassRegistry &)
void initializeSIShrinkInstructionsLegacyPass(PassRegistry &)
ModulePass * createAMDGPULowerBufferFatPointersPass()
void initializeR600ClauseMergePassPass(PassRegistry &)
ModulePass * createAMDGPUCtorDtorLoweringLegacyPass()
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
ModuleToFunctionPassAdaptor createModuleToFunctionPassAdaptor(FunctionPassT &&Pass, bool EagerlyInvalidate=false)
A function to deduce a function pass type and wrap it in the templated adaptor.
ModulePass * createAMDGPUSwLowerLDSLegacyPass(const AMDGPUTargetMachine *TM=nullptr)
void initializeGCNRewritePartialRegUsesLegacyPass(llvm::PassRegistry &)
void initializeAMDGPURewriteUndefForPHILegacyPass(PassRegistry &)
char & GCNRewritePartialRegUsesID
void initializeAMDGPUSwLowerLDSLegacyPass(PassRegistry &)
LLVM_ABI std::error_code inconvertibleErrorCode()
The value returned by this function can be returned from convertToErrorCode for Error values where no...
Definition Error.cpp:98
void initializeAMDGPULowerVGPREncodingLegacyPass(PassRegistry &)
char & AMDGPUWaitSGPRHazardsLegacyID
void initializeSILowerSGPRSpillsLegacyPass(PassRegistry &)
LLVM_ABI Pass * createLoadStoreVectorizerPass()
Create a legacy pass manager instance of the LoadStoreVectorizer pass.
std::unique_ptr< ScheduleDAGMutation > createIGroupLPDAGMutation(AMDGPU::SchedulingPhase Phase)
Phase specifes whether or not this is a reentry into the IGroupLPDAGMutation.
void initializeAMDGPUDAGToDAGISelLegacyPass(PassRegistry &)
FunctionPass * createAMDGPURegBankCombiner(bool IsOptNone)
LLVM_ABI FunctionPass * createNaryReassociatePass()
char & AMDGPUReserveWWMRegsLegacyID
void initializeAMDGPUWaitSGPRHazardsLegacyPass(PassRegistry &)
LLVM_ABI char & PatchableFunctionID
This pass implements the "patchable-function" attribute.
char & SIOptimizeExecMaskingLegacyID
LLVM_ABI char & PostRASchedulerID
PostRAScheduler - This pass performs post register allocation scheduling.
void initializeR600ExpandSpecialInstrsPassPass(PassRegistry &)
void initializeR600PacketizerPass(PassRegistry &)
std::unique_ptr< ScheduleDAGMutation > createVOPDPairingMutation()
ModulePass * createAMDGPUExportKernelRuntimeHandlesLegacyPass()
ModulePass * createAMDGPUAlwaysInlinePass(bool GlobalOpt=true)
void initializeAMDGPUAsmPrinterPass(PassRegistry &)
void initializeSIFoldOperandsLegacyPass(PassRegistry &)
char & SILoadStoreOptimizerLegacyID
void initializeAMDGPUGlobalISelDivergenceLoweringPass(PassRegistry &)
PassManager< LazyCallGraph::SCC, CGSCCAnalysisManager, LazyCallGraph &, CGSCCUpdateResult & > CGSCCPassManager
The CGSCC pass manager.
LLVM_ABI std::unique_ptr< CSEConfigBase > getStandardCSEConfigForOpt(CodeGenOptLevel Level)
Definition CSEInfo.cpp:89
Target & getTheR600Target()
The target for R600 GPUs.
LLVM_ABI char & MachineSchedulerID
MachineScheduler - This pass schedules machine instructions.
LLVM_ABI Pass * createStructurizeCFGPass(bool SkipUniformRegions=false)
When SkipUniformRegions is true the structizer will not structurize regions that only contain uniform...
LLVM_ABI char & PostMachineSchedulerID
PostMachineScheduler - This pass schedules machine instructions postRA.
LLVM_ABI Pass * createLICMPass()
Definition LICM.cpp:386
char & SIFormMemoryClausesID
void initializeSILoadStoreOptimizerLegacyPass(PassRegistry &)
void initializeAMDGPULowerModuleLDSLegacyPass(PassRegistry &)
AnalysisManager< LazyCallGraph::SCC, LazyCallGraph & > CGSCCAnalysisManager
The CGSCC analysis manager.
void initializeAMDGPUCtorDtorLoweringLegacyPass(PassRegistry &)
LLVM_ABI char & EarlyIfConverterLegacyID
EarlyIfConverter - This pass performs if-conversion on SSA form by inserting cmov instructions.
AnalysisManager< Loop, LoopStandardAnalysisResults & > LoopAnalysisManager
The loop analysis manager.
FunctionPass * createAMDGPUUniformIntrinsicCombineLegacyPass()
void initializeAMDGPURegBankCombinerPass(PassRegistry &)
ThinOrFullLTOPhase
This enumerates the LLVM full LTO or ThinLTO optimization phases.
Definition Pass.h:77
@ FullLTOPreLink
Full LTO prelink phase.
Definition Pass.h:85
@ FullLTOPostLink
Full LTO postlink (backend compile) phase.
Definition Pass.h:87
@ ThinLTOPreLink
ThinLTO prelink (summary) phase.
Definition Pass.h:81
char & AMDGPUUnifyDivergentExitNodesID
void initializeAMDGPUPrepareAGPRAllocLegacyPass(PassRegistry &)
FunctionPass * createAMDGPUAtomicOptimizerPass(ScanOptions ScanStrategy)
FunctionPass * createAMDGPUPreloadKernArgPrologLegacyPass()
char & SIOptimizeVGPRLiveRangeLegacyID
LLVM_ABI char & ShadowStackGCLoweringID
ShadowStackGCLowering - Implements the custom lowering mechanism used by the shadow stack GC.
char & GCNNSAReassignID
void initializeAMDGPURewriteOutArgumentsPass(PassRegistry &)
static Reloc::Model getEffectiveRelocModel(std::optional< Reloc::Model > RM)
void initializeAMDGPUExternalAAWrapperPass(PassRegistry &)
auto formatv(bool Validate, const char *Fmt, Ts &&...Vals)
void initializeAMDGPULowerKernelArgumentsPass(PassRegistry &)
void initializeSIModeRegisterLegacyPass(PassRegistry &)
CodeModel::Model getEffectiveCodeModel(std::optional< CodeModel::Model > CM, CodeModel::Model Default)
Helper method for getting the code model, returning Default if CM does not have a value.
void initializeAMDGPUPreloadKernelArgumentsLegacyPass(PassRegistry &)
char & SILateBranchLoweringPassID
FunctionToLoopPassAdaptor createFunctionToLoopPassAdaptor(LoopPassT &&Pass, bool UseMemorySSA=false)
A function to deduce a loop pass type and wrap it in the templated adaptor.
LLVM_ABI char & BranchRelaxationPassID
BranchRelaxation - This pass replaces branches that need to jump further than is supported by a branc...
LLVM_ABI FunctionPass * createSinkingPass()
Definition Sink.cpp:275
CGSCCToFunctionPassAdaptor createCGSCCToFunctionPassAdaptor(FunctionPassT &&Pass, bool EagerlyInvalidate=false, bool NoRerun=false)
A function to deduce a function pass type and wrap it in the templated adaptor.
void initializeSIMemoryLegalizerLegacyPass(PassRegistry &)
ModulePass * createAMDGPULowerIntrinsicsLegacyPass()
void initializeR600MachineCFGStructurizerPass(PassRegistry &)
CodeGenFileType
These enums are meant to be passed into addPassesToEmitFile to indicate what type of file to emit,...
Definition CodeGen.h:111
char & GCNDPPCombineLegacyID
PassManager< Module > ModulePassManager
Convenience typedef for a pass manager over modules.
LLVM_ABI std::unique_ptr< ScheduleDAGMutation > createStoreClusterDAGMutation(const TargetInstrInfo *TII, const TargetRegisterInfo *TRI, bool ReorderWhileClustering=false)
If ReorderWhileClustering is set to true, no attempt will be made to reduce reordering due to store c...
LLVM_ABI FunctionPass * createLoopDataPrefetchPass()
FunctionPass * createAMDGPULowerKernelArgumentsPass()
char & AMDGPUInsertDelayAluID
std::unique_ptr< ScheduleDAGMutation > createAMDGPUMacroFusionDAGMutation()
Note that you have to add: DAG.addMutation(createAMDGPUMacroFusionDAGMutation()); to AMDGPUTargetMach...
LLVM_ABI char & StackMapLivenessID
StackMapLiveness - This pass analyses the register live-out set of stackmap/patchpoint intrinsics and...
void initializeGCNPreRALongBranchRegLegacyPass(PassRegistry &)
char & SILowerWWMCopiesLegacyID
LLVM_ABI FunctionPass * createUnifyLoopExitsPass()
char & SIOptimizeExecMaskingPreRAID
LLVM_ABI FunctionPass * createFixIrreduciblePass()
void initializeR600EmitClauseMarkersPass(PassRegistry &)
LLVM_ABI char & FuncletLayoutID
This pass lays out funclets contiguously.
LLVM_ABI char & DetectDeadLanesID
This pass adds dead/undef flags after analyzing subregister lanes.
void initializeAMDGPULowerExecSyncLegacyPass(PassRegistry &)
void initializeAMDGPUPostLegalizerCombinerPass(PassRegistry &)
void initializeAMDGPUExportKernelRuntimeHandlesLegacyPass(PassRegistry &)
CodeGenOptLevel
Code generation optimization level.
Definition CodeGen.h:82
void initializeSIInsertWaitcntsLegacyPass(PassRegistry &)
ModulePass * createAMDGPUPreloadKernelArgumentsLegacyPass(const TargetMachine *)
ModulePass * createAMDGPUPrintfRuntimeBinding()
LLVM_ABI char & StackSlotColoringID
StackSlotColoring - This pass performs stack slot coloring.
LLVM_ABI Pass * createAlwaysInlinerLegacyPass(bool InsertLifetime=true)
Create a legacy pass manager instance of a pass to inline and remove functions marked as "always_inli...
void initializeR600ControlFlowFinalizerPass(PassRegistry &)
void initializeAMDGPUImageIntrinsicOptimizerPass(PassRegistry &)
void initializeSILateBranchLoweringLegacyPass(PassRegistry &)
void initializeSILowerControlFlowLegacyPass(PassRegistry &)
void initializeSIFormMemoryClausesLegacyPass(PassRegistry &)
char & SIPreAllocateWWMRegsLegacyID
Error make_error(ArgTs &&... Args)
Make a Error instance representing failure using the given error info type.
Definition Error.h:340
ModulePass * createAMDGPULowerModuleLDSLegacyPass(const AMDGPUTargetMachine *TM=nullptr)
void initializeAMDGPUPreLegalizerCombinerPass(PassRegistry &)
FunctionPass * createAMDGPUPromoteAlloca()
void initializeAMDGPUArgumentUsageInfoWrapperLegacyPass(PassRegistry &)
LLVM_ABI FunctionPass * createSeparateConstOffsetFromGEPPass(bool LowerGEP=false)
void initializeAMDGPUReserveWWMRegsLegacyPass(PassRegistry &)
char & SIPreEmitPeepholeID
char & SIPostRABundlerLegacyID
ModulePass * createAMDGPURemoveIncompatibleFunctionsPass(const TargetMachine *)
void initializeGCNRegPressurePrinterPass(PassRegistry &)
void initializeSILowerI1CopiesLegacyPass(PassRegistry &)
char & SILowerSGPRSpillsLegacyID
LLVM_ABI FunctionPass * createBasicRegisterAllocator()
BasicRegisterAllocation Pass - This pass implements a degenerate global register allocator using the ...
LLVM_ABI void initializeGlobalISel(PassRegistry &)
Initialize all passes linked into the GlobalISel library.
char & SILowerControlFlowLegacyID
ModulePass * createR600OpenCLImageTypeLoweringPass()
FunctionPass * createAMDGPUCodeGenPreparePass()
void initializeSIAnnotateControlFlowLegacyPass(PassRegistry &)
FunctionPass * createAMDGPUISelDag(TargetMachine &TM, CodeGenOptLevel OptLevel)
This pass converts a legalized DAG into a AMDGPU-specific.
void initializeGCNCreateVOPDLegacyPass(PassRegistry &)
void initializeAMDGPUUniformIntrinsicCombineLegacyPass(PassRegistry &)
void initializeSIPreAllocateWWMRegsLegacyPass(PassRegistry &)
void initializeSIFixVGPRCopiesLegacyPass(PassRegistry &)
Target & getTheGCNTarget()
The target for GCN GPUs.
void initializeSIFixSGPRCopiesLegacyPass(PassRegistry &)
void initializeAMDGPUAtomicOptimizerPass(PassRegistry &)
void initializeAMDGPULowerIntrinsicsLegacyPass(PassRegistry &)
LLVM_ABI FunctionPass * createGVNPass()
Create a legacy GVN pass.
Definition GVN.cpp:3402
void initializeAMDGPURewriteAGPRCopyMFMALegacyPass(PassRegistry &)
void initializeSIPostRABundlerLegacyPass(PassRegistry &)
FunctionPass * createAMDGPURegBankSelectPass()
FunctionPass * createAMDGPURegBankLegalizePass()
LLVM_ABI char & MachineCSELegacyID
MachineCSE - This pass performs global CSE on machine instructions.
char & SIWholeQuadModeID
LLVM_ABI std::unique_ptr< ScheduleDAGMutation > createLoadClusterDAGMutation(const TargetInstrInfo *TII, const TargetRegisterInfo *TRI, bool ReorderWhileClustering=false)
If ReorderWhileClustering is set to true, no attempt will be made to reduce reordering due to store c...
PassManager< Function > FunctionPassManager
Convenience typedef for a pass manager over functions.
LLVM_ABI char & LiveVariablesID
LiveVariables pass - This pass computes the set of blocks in which each variable is life and sets mac...
void initializeAMDGPUCodeGenPreparePass(PassRegistry &)
FunctionPass * createAMDGPURewriteUndefForPHILegacyPass()
void initializeSIOptimizeExecMaskingLegacyPass(PassRegistry &)
void call_once(once_flag &flag, Function &&F, Args &&... ArgList)
Execute the function specified as a parameter once.
Definition Threading.h:86
FunctionPass * createSILowerI1CopiesLegacyPass()
FunctionPass * createAMDGPUPostLegalizeCombiner(bool IsOptNone)
void initializeAMDGPULowerKernelAttributesPass(PassRegistry &)
char & SIInsertHardClausesID
char & SIFixSGPRCopiesLegacyID
void initializeGCNDPPCombineLegacyPass(PassRegistry &)
char & GCNCreateVOPDID
char & SIPeepholeSDWALegacyID
LLVM_ABI char & VirtRegRewriterID
VirtRegRewriter pass.
char & SIFixVGPRCopiesID
char & SIFoldOperandsLegacyID
void initializeGCNNSAReassignLegacyPass(PassRegistry &)
char & AMDGPUPrepareAGPRAllocLegacyID
LLVM_ABI FunctionPass * createLowerSwitchPass()
void initializeAMDGPUPreloadKernArgPrologLegacyPass(PassRegistry &)
LLVM_ABI FunctionPass * createVirtRegRewriter(bool ClearVirtRegs=true)
void initializeR600VectorRegMergerPass(PassRegistry &)
char & AMDGPURewriteAGPRCopyMFMALegacyID
ModulePass * createAMDGPULowerExecSyncLegacyPass()
char & AMDGPULowerVGPREncodingLegacyID
FunctionPass * createAMDGPUGlobalISelDivergenceLoweringPass()
FunctionPass * createSIMemoryLegalizerPass()
void initializeAMDGPULateCodeGenPrepareLegacyPass(PassRegistry &)
void initializeSIOptimizeVGPRLiveRangeLegacyPass(PassRegistry &)
void initializeSIPeepholeSDWALegacyPass(PassRegistry &)
void initializeAMDGPURegBankLegalizePass(PassRegistry &)
LLVM_ABI char & TwoAddressInstructionPassID
TwoAddressInstruction - This pass reduces two-address instructions to use two operands.
AnalysisManager< Function > FunctionAnalysisManager
Convenience typedef for the Function analysis manager.
FunctionPass * createAMDGPUPreLegalizeCombiner(bool IsOptNone)
void initializeAMDGPURegBankSelectPass(PassRegistry &)
FunctionPass * createAMDGPULateCodeGenPrepareLegacyPass()
LLVM_ABI FunctionPass * createAtomicExpandLegacyPass()
AtomicExpandPass - At IR level this pass replace atomic instructions with __atomic_* library calls,...
MCRegisterInfo * createGCNMCRegisterInfo(AMDGPUDwarfFlavour DwarfFlavour)
LLVM_ABI FunctionPass * createStraightLineStrengthReducePass()
BumpPtrAllocatorImpl<> BumpPtrAllocator
The standard BumpPtrAllocator which just uses the default template parameters.
Definition Allocator.h:383
FunctionPass * createAMDGPUImageIntrinsicOptimizerPass(const TargetMachine *)
void initializeAMDGPUUnifyDivergentExitNodesPass(PassRegistry &)
void initializeAMDGPULowerBufferFatPointersPass(PassRegistry &)
FunctionPass * createSIInsertWaitcntsPass()
FunctionPass * createAMDGPUAnnotateUniformValuesLegacy()
LLVM_ABI FunctionPass * createEarlyCSEPass(bool UseMemorySSA=false)
void initializeSIWholeQuadModeLegacyPass(PassRegistry &)
LLVM_ABI char & PHIEliminationID
PHIElimination - This pass eliminates machine instruction PHI nodes by inserting copy instructions.
LLVM_ABI llvm::cl::opt< bool > NoKernelInfoEndLTO
bool parseNamedRegisterReference(PerFunctionMIParsingState &PFS, Register &Reg, StringRef Src, SMDiagnostic &Error)
void initializeAMDGPUResourceUsageAnalysisWrapperPassPass(PassRegistry &)
FunctionPass * createSIShrinkInstructionsLegacyPass()
char & AMDGPUMarkLastScratchLoadID
LLVM_ABI char & RenameIndependentSubregsID
This pass detects subregister lanes in a virtual register that are used independently of other lanes ...
void initializeAMDGPUAnnotateUniformValuesLegacyPass(PassRegistry &)
std::unique_ptr< ScheduleDAGMutation > createAMDGPUExportClusteringDAGMutation()
void initializeAMDGPUPrintfRuntimeBindingPass(PassRegistry &)
void initializeAMDGPUPromoteAllocaPass(PassRegistry &)
void initializeAMDGPURemoveIncompatibleFunctionsLegacyPass(PassRegistry &)
void initializeAMDGPUAlwaysInlinePass(PassRegistry &)
LLVM_ABI char & DeadMachineInstructionElimID
DeadMachineInstructionElim - This pass removes dead machine instructions.
void initializeSIPreEmitPeepholeLegacyPass(PassRegistry &)
AnalysisManager< Module > ModuleAnalysisManager
Convenience typedef for the Module analysis manager.
Definition MIRParser.h:39
char & AMDGPUPerfHintAnalysisLegacyID
LLVM_ABI ImmutablePass * createExternalAAWrapperPass(std::function< void(Pass &, Function &, AAResults &)> Callback)
A wrapper pass around a callback which can be used to populate the AAResults in the AAResultsWrapperP...
char & GCNPreRALongBranchRegID
LLVM_ABI CGPassBuilderOption getCGPassBuilderOption()
LLVM_ABI void reportFatalUsageError(Error Err)
Report a fatal error that does not indicate a bug in LLVM.
Definition Error.cpp:180
void initializeAMDGPUPromoteKernelArgumentsPass(PassRegistry &)
#define N
static ArgDescriptor createStack(unsigned Offset, unsigned Mask=~0u)
static ArgDescriptor createArg(const ArgDescriptor &Arg, unsigned Mask)
static ArgDescriptor createRegister(Register Reg, unsigned Mask=~0u)
DenormalModeKind Input
Denormal treatment kind for floating point instruction inputs in the default floating-point environme...
@ PreserveSign
The sign of a flushed-to-zero number is preserved in the sign of 0.
@ IEEE
IEEE-754 denormal numbers preserved.
DenormalModeKind Output
Denormal flushing mode for floating point instruction results in the default floating point environme...
A simple and fast domtree-based CSE pass.
Definition EarlyCSE.h:31
MachineFunctionInfo - This class can be derived from and used by targets to hold private target-speci...
static FuncInfoTy * create(BumpPtrAllocator &Allocator, const Function &F, const SubtargetTy *STI)
Factory function: default behavior is to call new using the supplied allocator.
MachineSchedContext provides enough context from the MachineScheduler pass for the target to instanti...
StringMap< VRegInfo * > VRegInfosNamed
Definition MIParser.h:177
DenseMap< Register, VRegInfo * > VRegInfos
Definition MIParser.h:176
RegisterTargetMachine - Helper template for registering a target machine implementation,...
A utility pass template to force an analysis result to be available.
bool DX10Clamp
Used by the vector ALU to force DX10-style treatment of NaNs: when set, clamp NaN to zero; otherwise,...
DenormalMode FP64FP16Denormals
If this is set, neither input or output denormals are flushed for both f64 and f16/v2f16 instructions...
bool IEEE
Floating point opcodes that support exception flag gathering quiet and propagate signaling NaN inputs...
DenormalMode FP32Denormals
If this is set, neither input or output denormals are flushed for most f32 instructions.
The llvm::once_flag structure.
Definition Threading.h:67
Targets should override this in a way that mirrors the implementation of llvm::MachineFunctionInfo.
SmallVector< StringValue > WWMReservedRegs
std::optional< SIArgumentInfo > ArgInfo
SmallVector< StringValue, 2 > SpillPhysVGPRS
A wrapper around std::string which contains a source range that's being set during parsing.