1 //===-- AMDGPUTargetMachine.cpp - TargetMachine for hw codegen targets-----===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
11 /// \brief The AMDGPU target machine contains all of the hardware specific
12 /// information needed to emit code for R600 and SI GPUs.
14 //===----------------------------------------------------------------------===//
16 #include "AMDGPUTargetMachine.h"
18 #include "AMDGPUAliasAnalysis.h"
19 #include "AMDGPUCallLowering.h"
20 #include "AMDGPUInstructionSelector.h"
21 #include "AMDGPULegalizerInfo.h"
22 #ifdef LLVM_BUILD_GLOBAL_ISEL
23 #include "AMDGPURegisterBankInfo.h"
25 #include "AMDGPUTargetObjectFile.h"
26 #include "AMDGPUTargetTransformInfo.h"
27 #include "GCNIterativeScheduler.h"
28 #include "GCNSchedStrategy.h"
29 #include "R600MachineScheduler.h"
30 #include "SIMachineScheduler.h"
31 #include "llvm/CodeGen/GlobalISel/InstructionSelect.h"
32 #include "llvm/CodeGen/GlobalISel/IRTranslator.h"
33 #include "llvm/CodeGen/GlobalISel/Legalizer.h"
34 #include "llvm/CodeGen/GlobalISel/RegBankSelect.h"
35 #include "llvm/CodeGen/Passes.h"
36 #include "llvm/CodeGen/TargetPassConfig.h"
37 #include "llvm/Support/TargetRegistry.h"
38 #include "llvm/Transforms/IPO.h"
39 #include "llvm/Transforms/IPO/AlwaysInliner.h"
40 #include "llvm/Transforms/IPO/PassManagerBuilder.h"
41 #include "llvm/Transforms/Scalar.h"
42 #include "llvm/Transforms/Scalar/GVN.h"
43 #include "llvm/Transforms/Vectorize.h"
44 #include "llvm/IR/Attributes.h"
45 #include "llvm/IR/Function.h"
46 #include "llvm/IR/LegacyPassManager.h"
47 #include "llvm/Pass.h"
48 #include "llvm/Support/CommandLine.h"
49 #include "llvm/Support/Compiler.h"
50 #include "llvm/Target/TargetLoweringObjectFile.h"
55 static cl::opt<bool> EnableR600StructurizeCFG(
56 "r600-ir-structurize",
57 cl::desc("Use StructurizeCFG IR pass"),
60 static cl::opt<bool> EnableSROA(
62 cl::desc("Run SROA after promote alloca pass"),
67 EnableEarlyIfConversion("amdgpu-early-ifcvt", cl::Hidden,
68 cl::desc("Run early if-conversion"),
71 static cl::opt<bool> EnableR600IfConvert(
73 cl::desc("Use if conversion pass"),
77 // Option to disable vectorizer for tests.
78 static cl::opt<bool> EnableLoadStoreVectorizer(
79 "amdgpu-load-store-vectorizer",
80 cl::desc("Enable load store vectorizer"),
84 // Option to to control global loads scalarization
85 static cl::opt<bool> ScalarizeGlobal(
86 "amdgpu-scalarize-global-loads",
87 cl::desc("Enable global load scalarization"),
91 // Option to run internalize pass.
92 static cl::opt<bool> InternalizeSymbols(
93 "amdgpu-internalize-symbols",
94 cl::desc("Enable elimination of non-kernel functions and unused globals"),
98 static cl::opt<bool> EnableSDWAPeephole(
99 "amdgpu-sdwa-peephole",
100 cl::desc("Enable SDWA peepholer"),
103 // Enable address space based alias analysis
104 static cl::opt<bool> EnableAMDGPUAliasAnalysis("enable-amdgpu-aa", cl::Hidden,
105 cl::desc("Enable AMDGPU Alias Analysis"),
108 extern "C" void LLVMInitializeAMDGPUTarget() {
109 // Register the target
110 RegisterTargetMachine<R600TargetMachine> X(getTheAMDGPUTarget());
111 RegisterTargetMachine<GCNTargetMachine> Y(getTheGCNTarget());
113 PassRegistry *PR = PassRegistry::getPassRegistry();
114 initializeSILowerI1CopiesPass(*PR);
115 initializeSIFixSGPRCopiesPass(*PR);
116 initializeSIFixVGPRCopiesPass(*PR);
117 initializeSIFoldOperandsPass(*PR);
118 initializeSIPeepholeSDWAPass(*PR);
119 initializeSIShrinkInstructionsPass(*PR);
120 initializeSIFixControlFlowLiveIntervalsPass(*PR);
121 initializeSILoadStoreOptimizerPass(*PR);
122 initializeAMDGPUAnnotateKernelFeaturesPass(*PR);
123 initializeAMDGPUAnnotateUniformValuesPass(*PR);
124 initializeAMDGPULowerIntrinsicsPass(*PR);
125 initializeAMDGPUPromoteAllocaPass(*PR);
126 initializeAMDGPUCodeGenPreparePass(*PR);
127 initializeAMDGPUUnifyMetadataPass(*PR);
128 initializeSIAnnotateControlFlowPass(*PR);
129 initializeSIInsertWaitsPass(*PR);
130 initializeSIWholeQuadModePass(*PR);
131 initializeSILowerControlFlowPass(*PR);
132 initializeSIInsertSkipsPass(*PR);
133 initializeSIDebuggerInsertNopsPass(*PR);
134 initializeSIOptimizeExecMaskingPass(*PR);
135 initializeAMDGPUUnifyDivergentExitNodesPass(*PR);
136 initializeAMDGPUAAWrapperPassPass(*PR);
139 static std::unique_ptr<TargetLoweringObjectFile> createTLOF(const Triple &TT) {
140 return llvm::make_unique<AMDGPUTargetObjectFile>();
143 static ScheduleDAGInstrs *createR600MachineScheduler(MachineSchedContext *C) {
144 return new ScheduleDAGMILive(C, llvm::make_unique<R600SchedStrategy>());
147 static ScheduleDAGInstrs *createSIMachineScheduler(MachineSchedContext *C) {
148 return new SIScheduleDAGMI(C);
151 static ScheduleDAGInstrs *
152 createGCNMaxOccupancyMachineScheduler(MachineSchedContext *C) {
153 ScheduleDAGMILive *DAG =
154 new GCNScheduleDAGMILive(C, make_unique<GCNMaxOccupancySchedStrategy>(C));
155 DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
156 DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
160 static ScheduleDAGInstrs *
161 createIterativeGCNMaxOccupancyMachineScheduler(MachineSchedContext *C) {
162 auto DAG = new GCNIterativeScheduler(C,
163 GCNIterativeScheduler::SCHEDULE_LEGACYMAXOCCUPANCY);
164 DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
165 DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
169 static ScheduleDAGInstrs *createMinRegScheduler(MachineSchedContext *C) {
170 return new GCNIterativeScheduler(C,
171 GCNIterativeScheduler::SCHEDULE_MINREGFORCED);
174 static MachineSchedRegistry
175 R600SchedRegistry("r600", "Run R600's custom scheduler",
176 createR600MachineScheduler);
178 static MachineSchedRegistry
179 SISchedRegistry("si", "Run SI's custom scheduler",
180 createSIMachineScheduler);
182 static MachineSchedRegistry
183 GCNMaxOccupancySchedRegistry("gcn-max-occupancy",
184 "Run GCN scheduler to maximize occupancy",
185 createGCNMaxOccupancyMachineScheduler);
187 static MachineSchedRegistry
188 IterativeGCNMaxOccupancySchedRegistry("gcn-max-occupancy-experimental",
189 "Run GCN scheduler to maximize occupancy (experimental)",
190 createIterativeGCNMaxOccupancyMachineScheduler);
192 static MachineSchedRegistry
193 GCNMinRegSchedRegistry("gcn-minreg",
194 "Run GCN iterative scheduler for minimal register usage (experimental)",
195 createMinRegScheduler);
197 static StringRef computeDataLayout(const Triple &TT) {
198 if (TT.getArch() == Triple::r600) {
200 return "e-p:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128"
201 "-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64";
204 // 32-bit private, local, and region pointers. 64-bit global, constant and
206 if (TT.getEnvironmentName() == "amdgiz" ||
207 TT.getEnvironmentName() == "amdgizcl")
208 return "e-p:64:64-p1:64:64-p2:64:64-p3:32:32-p4:64:64-p5:32:32"
209 "-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128"
210 "-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64";
211 return "e-p:32:32-p1:64:64-p2:64:64-p3:32:32-p4:64:64-p5:32:32"
212 "-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128"
213 "-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64";
217 static StringRef getGPUOrDefault(const Triple &TT, StringRef GPU) {
221 // HSA only supports CI+, so change the default GPU to a CI for HSA.
222 if (TT.getArch() == Triple::amdgcn)
223 return (TT.getOS() == Triple::AMDHSA) ? "kaveri" : "tahiti";
228 static Reloc::Model getEffectiveRelocModel(Optional<Reloc::Model> RM) {
229 // The AMDGPU toolchain only supports generating shared objects, so we
230 // must always use PIC.
234 AMDGPUTargetMachine::AMDGPUTargetMachine(const Target &T, const Triple &TT,
235 StringRef CPU, StringRef FS,
236 TargetOptions Options,
237 Optional<Reloc::Model> RM,
239 CodeGenOpt::Level OptLevel)
240 : LLVMTargetMachine(T, computeDataLayout(TT), TT, getGPUOrDefault(TT, CPU),
241 FS, Options, getEffectiveRelocModel(RM), CM, OptLevel),
242 TLOF(createTLOF(getTargetTriple())) {
243 AS = AMDGPU::getAMDGPUAS(TT);
247 AMDGPUTargetMachine::~AMDGPUTargetMachine() = default;
249 StringRef AMDGPUTargetMachine::getGPUName(const Function &F) const {
250 Attribute GPUAttr = F.getFnAttribute("target-cpu");
251 return GPUAttr.hasAttribute(Attribute::None) ?
252 getTargetCPU() : GPUAttr.getValueAsString();
255 StringRef AMDGPUTargetMachine::getFeatureString(const Function &F) const {
256 Attribute FSAttr = F.getFnAttribute("target-features");
258 return FSAttr.hasAttribute(Attribute::None) ?
259 getTargetFeatureString() :
260 FSAttr.getValueAsString();
263 static ImmutablePass *createAMDGPUExternalAAWrapperPass() {
264 return createExternalAAWrapperPass([](Pass &P, Function &, AAResults &AAR) {
265 if (auto *WrapperPass = P.getAnalysisIfAvailable<AMDGPUAAWrapperPass>())
266 AAR.addAAResult(WrapperPass->getResult());
270 void AMDGPUTargetMachine::adjustPassManager(PassManagerBuilder &Builder) {
271 Builder.DivergentTarget = true;
273 bool Internalize = InternalizeSymbols &&
274 (getOptLevel() > CodeGenOpt::None) &&
275 (getTargetTriple().getArch() == Triple::amdgcn);
276 bool AMDGPUAA = EnableAMDGPUAliasAnalysis && getOptLevel() > CodeGenOpt::None;
278 Builder.addExtension(
279 PassManagerBuilder::EP_ModuleOptimizerEarly,
280 [Internalize, AMDGPUAA](const PassManagerBuilder &,
281 legacy::PassManagerBase &PM) {
283 PM.add(createAMDGPUAAWrapperPass());
284 PM.add(createAMDGPUExternalAAWrapperPass());
286 PM.add(createAMDGPUUnifyMetadataPass());
288 PM.add(createInternalizePass([=](const GlobalValue &GV) -> bool {
289 if (const Function *F = dyn_cast<Function>(&GV)) {
290 if (F->isDeclaration())
292 switch (F->getCallingConv()) {
295 case CallingConv::AMDGPU_VS:
296 case CallingConv::AMDGPU_GS:
297 case CallingConv::AMDGPU_PS:
298 case CallingConv::AMDGPU_CS:
299 case CallingConv::AMDGPU_KERNEL:
300 case CallingConv::SPIR_KERNEL:
304 return !GV.use_empty();
306 PM.add(createGlobalDCEPass());
307 PM.add(createAMDGPUAlwaysInlinePass());
311 Builder.addExtension(
312 PassManagerBuilder::EP_EarlyAsPossible,
313 [AMDGPUAA](const PassManagerBuilder &, legacy::PassManagerBase &PM) {
315 PM.add(createAMDGPUAAWrapperPass());
316 PM.add(createAMDGPUExternalAAWrapperPass());
321 //===----------------------------------------------------------------------===//
322 // R600 Target Machine (R600 -> Cayman)
323 //===----------------------------------------------------------------------===//
325 R600TargetMachine::R600TargetMachine(const Target &T, const Triple &TT,
326 StringRef CPU, StringRef FS,
327 TargetOptions Options,
328 Optional<Reloc::Model> RM,
329 CodeModel::Model CM, CodeGenOpt::Level OL)
330 : AMDGPUTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL) {
331 setRequiresStructuredCFG(true);
334 const R600Subtarget *R600TargetMachine::getSubtargetImpl(
335 const Function &F) const {
336 StringRef GPU = getGPUName(F);
337 StringRef FS = getFeatureString(F);
339 SmallString<128> SubtargetKey(GPU);
340 SubtargetKey.append(FS);
342 auto &I = SubtargetMap[SubtargetKey];
344 // This needs to be done before we create a new subtarget since any
345 // creation will depend on the TM and the code generation flags on the
346 // function that reside in TargetOptions.
347 resetTargetOptions(F);
348 I = llvm::make_unique<R600Subtarget>(TargetTriple, GPU, FS, *this);
354 //===----------------------------------------------------------------------===//
355 // GCN Target Machine (SI+)
356 //===----------------------------------------------------------------------===//
358 #ifdef LLVM_BUILD_GLOBAL_ISEL
361 struct SIGISelActualAccessor : public GISelAccessor {
362 std::unique_ptr<AMDGPUCallLowering> CallLoweringInfo;
363 std::unique_ptr<InstructionSelector> InstSelector;
364 std::unique_ptr<LegalizerInfo> Legalizer;
365 std::unique_ptr<RegisterBankInfo> RegBankInfo;
366 const AMDGPUCallLowering *getCallLowering() const override {
367 return CallLoweringInfo.get();
369 const InstructionSelector *getInstructionSelector() const override {
370 return InstSelector.get();
372 const LegalizerInfo *getLegalizerInfo() const override {
373 return Legalizer.get();
375 const RegisterBankInfo *getRegBankInfo() const override {
376 return RegBankInfo.get();
380 } // end anonymous namespace
383 GCNTargetMachine::GCNTargetMachine(const Target &T, const Triple &TT,
384 StringRef CPU, StringRef FS,
385 TargetOptions Options,
386 Optional<Reloc::Model> RM,
387 CodeModel::Model CM, CodeGenOpt::Level OL)
388 : AMDGPUTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL) {}
390 const SISubtarget *GCNTargetMachine::getSubtargetImpl(const Function &F) const {
391 StringRef GPU = getGPUName(F);
392 StringRef FS = getFeatureString(F);
394 SmallString<128> SubtargetKey(GPU);
395 SubtargetKey.append(FS);
397 auto &I = SubtargetMap[SubtargetKey];
399 // This needs to be done before we create a new subtarget since any
400 // creation will depend on the TM and the code generation flags on the
401 // function that reside in TargetOptions.
402 resetTargetOptions(F);
403 I = llvm::make_unique<SISubtarget>(TargetTriple, GPU, FS, *this);
405 #ifndef LLVM_BUILD_GLOBAL_ISEL
406 GISelAccessor *GISel = new GISelAccessor();
408 SIGISelActualAccessor *GISel = new SIGISelActualAccessor();
409 GISel->CallLoweringInfo.reset(
410 new AMDGPUCallLowering(*I->getTargetLowering()));
411 GISel->Legalizer.reset(new AMDGPULegalizerInfo());
413 GISel->RegBankInfo.reset(new AMDGPURegisterBankInfo(*I->getRegisterInfo()));
414 GISel->InstSelector.reset(new AMDGPUInstructionSelector(*I,
415 *static_cast<AMDGPURegisterBankInfo*>(GISel->RegBankInfo.get())));
418 I->setGISelAccessor(*GISel);
421 I->setScalarizeGlobalBehavior(ScalarizeGlobal);
426 //===----------------------------------------------------------------------===//
428 //===----------------------------------------------------------------------===//
432 class AMDGPUPassConfig : public TargetPassConfig {
434 AMDGPUPassConfig(TargetMachine *TM, PassManagerBase &PM)
435 : TargetPassConfig(TM, PM) {
436 // Exceptions and StackMaps are not supported, so these passes will never do
438 disablePass(&StackMapLivenessID);
439 disablePass(&FuncletLayoutID);
442 AMDGPUTargetMachine &getAMDGPUTargetMachine() const {
443 return getTM<AMDGPUTargetMachine>();
447 createMachineScheduler(MachineSchedContext *C) const override {
448 ScheduleDAGMILive *DAG = createGenericSchedLive(C);
449 DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
450 DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
454 void addEarlyCSEOrGVNPass();
455 void addStraightLineScalarOptimizationPasses();
456 void addIRPasses() override;
457 void addCodeGenPrepare() override;
458 bool addPreISel() override;
459 bool addInstSelector() override;
460 bool addGCPasses() override;
463 class R600PassConfig final : public AMDGPUPassConfig {
465 R600PassConfig(TargetMachine *TM, PassManagerBase &PM)
466 : AMDGPUPassConfig(TM, PM) {}
468 ScheduleDAGInstrs *createMachineScheduler(
469 MachineSchedContext *C) const override {
470 return createR600MachineScheduler(C);
473 bool addPreISel() override;
474 void addPreRegAlloc() override;
475 void addPreSched2() override;
476 void addPreEmitPass() override;
479 class GCNPassConfig final : public AMDGPUPassConfig {
481 GCNPassConfig(TargetMachine *TM, PassManagerBase &PM)
482 : AMDGPUPassConfig(TM, PM) {}
484 GCNTargetMachine &getGCNTargetMachine() const {
485 return getTM<GCNTargetMachine>();
489 createMachineScheduler(MachineSchedContext *C) const override;
491 bool addPreISel() override;
492 void addMachineSSAOptimization() override;
493 bool addILPOpts() override;
494 bool addInstSelector() override;
495 #ifdef LLVM_BUILD_GLOBAL_ISEL
496 bool addIRTranslator() override;
497 bool addLegalizeMachineIR() override;
498 bool addRegBankSelect() override;
499 bool addGlobalInstructionSelect() override;
501 void addFastRegAlloc(FunctionPass *RegAllocPass) override;
502 void addOptimizedRegAlloc(FunctionPass *RegAllocPass) override;
503 void addPreRegAlloc() override;
504 void addPostRegAlloc() override;
505 void addPreSched2() override;
506 void addPreEmitPass() override;
509 } // end anonymous namespace
511 TargetIRAnalysis AMDGPUTargetMachine::getTargetIRAnalysis() {
512 return TargetIRAnalysis([this](const Function &F) {
513 return TargetTransformInfo(AMDGPUTTIImpl(this, F));
517 void AMDGPUPassConfig::addEarlyCSEOrGVNPass() {
518 if (getOptLevel() == CodeGenOpt::Aggressive)
519 addPass(createGVNPass());
521 addPass(createEarlyCSEPass());
524 void AMDGPUPassConfig::addStraightLineScalarOptimizationPasses() {
525 addPass(createSeparateConstOffsetFromGEPPass());
526 addPass(createSpeculativeExecutionPass());
527 // ReassociateGEPs exposes more opportunites for SLSR. See
528 // the example in reassociate-geps-and-slsr.ll.
529 addPass(createStraightLineStrengthReducePass());
530 // SeparateConstOffsetFromGEP and SLSR creates common expressions which GVN or
531 // EarlyCSE can reuse.
532 addEarlyCSEOrGVNPass();
533 // Run NaryReassociate after EarlyCSE/GVN to be more effective.
534 addPass(createNaryReassociatePass());
535 // NaryReassociate on GEPs creates redundant common expressions, so run
536 // EarlyCSE after it.
537 addPass(createEarlyCSEPass());
540 void AMDGPUPassConfig::addIRPasses() {
541 // There is no reason to run these.
542 disablePass(&StackMapLivenessID);
543 disablePass(&FuncletLayoutID);
544 disablePass(&PatchableFunctionID);
546 addPass(createAMDGPULowerIntrinsicsPass());
548 // Function calls are not supported, so make sure we inline everything.
549 addPass(createAMDGPUAlwaysInlinePass());
550 addPass(createAlwaysInlinerLegacyPass());
551 // We need to add the barrier noop pass, otherwise adding the function
552 // inlining pass will cause all of the PassConfigs passes to be run
553 // one function at a time, which means if we have a nodule with two
554 // functions, then we will generate code for the first function
555 // without ever running any passes on the second.
556 addPass(createBarrierNoopPass());
558 const AMDGPUTargetMachine &TM = getAMDGPUTargetMachine();
560 if (TM.getTargetTriple().getArch() == Triple::amdgcn) {
561 // TODO: May want to move later or split into an early and late one.
563 addPass(createAMDGPUCodeGenPreparePass(
564 static_cast<const GCNTargetMachine *>(&TM)));
567 // Handle uses of OpenCL image2d_t, image3d_t and sampler_t arguments.
568 addPass(createAMDGPUOpenCLImageTypeLoweringPass());
570 if (TM.getOptLevel() > CodeGenOpt::None) {
571 addPass(createInferAddressSpacesPass());
572 addPass(createAMDGPUPromoteAlloca(&TM));
575 addPass(createSROAPass());
577 addStraightLineScalarOptimizationPasses();
579 if (EnableAMDGPUAliasAnalysis) {
580 addPass(createAMDGPUAAWrapperPass());
581 addPass(createExternalAAWrapperPass([](Pass &P, Function &,
583 if (auto *WrapperPass = P.getAnalysisIfAvailable<AMDGPUAAWrapperPass>())
584 AAR.addAAResult(WrapperPass->getResult());
589 TargetPassConfig::addIRPasses();
591 // EarlyCSE is not always strong enough to clean up what LSR produces. For
592 // example, GVN can combine
599 // %0 = shl nsw %a, 2
602 // but EarlyCSE can do neither of them.
603 if (getOptLevel() != CodeGenOpt::None)
604 addEarlyCSEOrGVNPass();
607 void AMDGPUPassConfig::addCodeGenPrepare() {
608 TargetPassConfig::addCodeGenPrepare();
610 if (EnableLoadStoreVectorizer)
611 addPass(createLoadStoreVectorizerPass());
614 bool AMDGPUPassConfig::addPreISel() {
615 addPass(createFlattenCFGPass());
619 bool AMDGPUPassConfig::addInstSelector() {
620 addPass(createAMDGPUISelDag(getAMDGPUTargetMachine(), getOptLevel()));
624 bool AMDGPUPassConfig::addGCPasses() {
625 // Do nothing. GC is not supported.
629 //===----------------------------------------------------------------------===//
631 //===----------------------------------------------------------------------===//
633 bool R600PassConfig::addPreISel() {
634 AMDGPUPassConfig::addPreISel();
636 if (EnableR600StructurizeCFG)
637 addPass(createStructurizeCFGPass());
641 void R600PassConfig::addPreRegAlloc() {
642 addPass(createR600VectorRegMerger(*TM));
645 void R600PassConfig::addPreSched2() {
646 addPass(createR600EmitClauseMarkers(), false);
647 if (EnableR600IfConvert)
648 addPass(&IfConverterID, false);
649 addPass(createR600ClauseMergePass(*TM), false);
652 void R600PassConfig::addPreEmitPass() {
653 addPass(createAMDGPUCFGStructurizerPass(), false);
654 addPass(createR600ExpandSpecialInstrsPass(*TM), false);
655 addPass(&FinalizeMachineBundlesID, false);
656 addPass(createR600Packetizer(*TM), false);
657 addPass(createR600ControlFlowFinalizer(*TM), false);
660 TargetPassConfig *R600TargetMachine::createPassConfig(PassManagerBase &PM) {
661 return new R600PassConfig(this, PM);
664 //===----------------------------------------------------------------------===//
666 //===----------------------------------------------------------------------===//
668 ScheduleDAGInstrs *GCNPassConfig::createMachineScheduler(
669 MachineSchedContext *C) const {
670 const SISubtarget &ST = C->MF->getSubtarget<SISubtarget>();
671 if (ST.enableSIScheduler())
672 return createSIMachineScheduler(C);
673 return createGCNMaxOccupancyMachineScheduler(C);
676 bool GCNPassConfig::addPreISel() {
677 AMDGPUPassConfig::addPreISel();
679 // FIXME: We need to run a pass to propagate the attributes when calls are
681 const AMDGPUTargetMachine &TM = getAMDGPUTargetMachine();
682 addPass(createAMDGPUAnnotateKernelFeaturesPass(&TM));
684 // Merge divergent exit nodes. StructurizeCFG won't recognize the multi-exit
685 // regions formed by them.
686 addPass(&AMDGPUUnifyDivergentExitNodesID);
687 addPass(createStructurizeCFGPass(true)); // true -> SkipUniformRegions
688 addPass(createSinkingPass());
689 addPass(createSITypeRewriter());
690 addPass(createAMDGPUAnnotateUniformValues());
691 addPass(createSIAnnotateControlFlowPass());
696 void GCNPassConfig::addMachineSSAOptimization() {
697 TargetPassConfig::addMachineSSAOptimization();
699 // We want to fold operands after PeepholeOptimizer has run (or as part of
700 // it), because it will eliminate extra copies making it easier to fold the
701 // real source operand. We want to eliminate dead instructions after, so that
702 // we see fewer uses of the copies. We then need to clean up the dead
703 // instructions leftover after the operands are folded as well.
705 // XXX - Can we get away without running DeadMachineInstructionElim again?
706 addPass(&SIFoldOperandsID);
707 addPass(&DeadMachineInstructionElimID);
708 addPass(&SILoadStoreOptimizerID);
711 bool GCNPassConfig::addILPOpts() {
712 if (EnableEarlyIfConversion)
713 addPass(&EarlyIfConverterID);
715 TargetPassConfig::addILPOpts();
719 bool GCNPassConfig::addInstSelector() {
720 AMDGPUPassConfig::addInstSelector();
721 addPass(createSILowerI1CopiesPass());
722 addPass(&SIFixSGPRCopiesID);
726 #ifdef LLVM_BUILD_GLOBAL_ISEL
727 bool GCNPassConfig::addIRTranslator() {
728 addPass(new IRTranslator());
732 bool GCNPassConfig::addLegalizeMachineIR() {
733 addPass(new Legalizer());
737 bool GCNPassConfig::addRegBankSelect() {
738 addPass(new RegBankSelect());
742 bool GCNPassConfig::addGlobalInstructionSelect() {
743 addPass(new InstructionSelect());
749 void GCNPassConfig::addPreRegAlloc() {
750 addPass(createSIShrinkInstructionsPass());
751 if (EnableSDWAPeephole) {
752 addPass(&SIPeepholeSDWAID);
753 addPass(&DeadMachineInstructionElimID);
755 addPass(createSIWholeQuadModePass());
758 void GCNPassConfig::addFastRegAlloc(FunctionPass *RegAllocPass) {
759 // FIXME: We have to disable the verifier here because of PHIElimination +
760 // TwoAddressInstructions disabling it.
762 // This must be run immediately after phi elimination and before
763 // TwoAddressInstructions, otherwise the processing of the tied operand of
764 // SI_ELSE will introduce a copy of the tied operand source after the else.
765 insertPass(&PHIEliminationID, &SILowerControlFlowID, false);
767 TargetPassConfig::addFastRegAlloc(RegAllocPass);
770 void GCNPassConfig::addOptimizedRegAlloc(FunctionPass *RegAllocPass) {
771 // This needs to be run directly before register allocation because earlier
772 // passes might recompute live intervals.
773 insertPass(&MachineSchedulerID, &SIFixControlFlowLiveIntervalsID);
775 // This must be run immediately after phi elimination and before
776 // TwoAddressInstructions, otherwise the processing of the tied operand of
777 // SI_ELSE will introduce a copy of the tied operand source after the else.
778 insertPass(&PHIEliminationID, &SILowerControlFlowID, false);
780 TargetPassConfig::addOptimizedRegAlloc(RegAllocPass);
783 void GCNPassConfig::addPostRegAlloc() {
784 addPass(&SIFixVGPRCopiesID);
785 addPass(&SIOptimizeExecMaskingID);
786 TargetPassConfig::addPostRegAlloc();
789 void GCNPassConfig::addPreSched2() {
792 void GCNPassConfig::addPreEmitPass() {
793 // The hazard recognizer that runs as part of the post-ra scheduler does not
794 // guarantee to be able handle all hazards correctly. This is because if there
795 // are multiple scheduling regions in a basic block, the regions are scheduled
796 // bottom up, so when we begin to schedule a region we don't know what
797 // instructions were emitted directly before it.
799 // Here we add a stand-alone hazard recognizer pass which can handle all
801 addPass(&PostRAHazardRecognizerID);
803 addPass(createSIInsertWaitsPass());
804 addPass(createSIShrinkInstructionsPass());
805 addPass(&SIInsertSkipsPassID);
806 addPass(createSIDebuggerInsertNopsPass());
807 addPass(&BranchRelaxationPassID);
810 TargetPassConfig *GCNTargetMachine::createPassConfig(PassManagerBase &PM) {
811 return new GCNPassConfig(this, PM);