1 //===-- SIFixSGPRCopies.cpp - Remove potential VGPR => SGPR copies --------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
11 /// Copies from VGPR to SGPR registers are illegal and the register coalescer
12 /// will sometimes generate these illegal copies in situations like this:
14 /// Register Class <vsrc> is the union of <vgpr> and <sgpr>
17 /// %vreg0 <sgpr> = SCALAR_INST
18 /// %vreg1 <vsrc> = COPY %vreg0 <sgpr>
20 /// BRANCH %cond BB1, BB2
22 /// %vreg2 <vgpr> = VECTOR_INST
23 /// %vreg3 <vsrc> = COPY %vreg2 <vgpr>
25 /// %vreg4 <vsrc> = PHI %vreg1 <vsrc>, <BB#0>, %vreg3 <vrsc>, <BB#1>
26 /// %vreg5 <vgpr> = VECTOR_INST %vreg4 <vsrc>
29 /// The coalescer will begin at BB0 and eliminate its copy, then the resulting
30 /// code will look like this:
33 /// %vreg0 <sgpr> = SCALAR_INST
35 /// BRANCH %cond BB1, BB2
37 /// %vreg2 <vgpr> = VECTOR_INST
38 /// %vreg3 <vsrc> = COPY %vreg2 <vgpr>
40 /// %vreg4 <sgpr> = PHI %vreg0 <sgpr>, <BB#0>, %vreg3 <vsrc>, <BB#1>
41 /// %vreg5 <vgpr> = VECTOR_INST %vreg4 <sgpr>
43 /// Now that the result of the PHI instruction is an SGPR, the register
44 /// allocator is now forced to constrain the register class of %vreg3 to
45 /// <sgpr> so we end up with final code like this:
48 /// %vreg0 <sgpr> = SCALAR_INST
50 /// BRANCH %cond BB1, BB2
52 /// %vreg2 <vgpr> = VECTOR_INST
53 /// %vreg3 <sgpr> = COPY %vreg2 <vgpr>
55 /// %vreg4 <sgpr> = PHI %vreg0 <sgpr>, <BB#0>, %vreg3 <sgpr>, <BB#1>
56 /// %vreg5 <vgpr> = VECTOR_INST %vreg4 <sgpr>
58 /// Now this code contains an illegal copy from a VGPR to an SGPR.
60 /// In order to avoid this problem, this pass searches for PHI instructions
61 /// which define a <vsrc> register and constrains its definition class to
62 /// <vgpr> if the user of the PHI's definition register is a vector instruction.
63 /// If the PHI's definition class is constrained to <vgpr> then the coalescer
64 /// will be unable to perform the COPY removal from the above example which
65 /// ultimately led to the creation of an illegal COPY.
66 //===----------------------------------------------------------------------===//
68 #include "llvm/ADT/DenseSet.h"
70 #include "AMDGPUSubtarget.h"
71 #include "SIInstrInfo.h"
72 #include "llvm/CodeGen/MachineDominators.h"
73 #include "llvm/CodeGen/MachineFunctionPass.h"
74 #include "llvm/CodeGen/MachineInstrBuilder.h"
75 #include "llvm/CodeGen/MachineRegisterInfo.h"
76 #include "llvm/Support/Debug.h"
77 #include "llvm/Support/raw_ostream.h"
78 #include "llvm/Target/TargetMachine.h"
82 #define DEBUG_TYPE "si-fix-sgpr-copies"
84 static cl::opt<bool> EnableM0Merge(
85 "amdgpu-enable-merge-m0",
86 cl::desc("Merge and hoist M0 initializations"),
91 class SIFixSGPRCopies : public MachineFunctionPass {
93 MachineDominatorTree *MDT;
98 SIFixSGPRCopies() : MachineFunctionPass(ID) { }
100 bool runOnMachineFunction(MachineFunction &MF) override;
102 StringRef getPassName() const override { return "SI Fix SGPR copies"; }
104 void getAnalysisUsage(AnalysisUsage &AU) const override {
105 AU.addRequired<MachineDominatorTree>();
106 AU.addPreserved<MachineDominatorTree>();
107 AU.setPreservesCFG();
108 MachineFunctionPass::getAnalysisUsage(AU);
112 } // End anonymous namespace
114 INITIALIZE_PASS_BEGIN(SIFixSGPRCopies, DEBUG_TYPE,
115 "SI Fix SGPR copies", false, false)
116 INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
117 INITIALIZE_PASS_END(SIFixSGPRCopies, DEBUG_TYPE,
118 "SI Fix SGPR copies", false, false)
121 char SIFixSGPRCopies::ID = 0;
123 char &llvm::SIFixSGPRCopiesID = SIFixSGPRCopies::ID;
125 FunctionPass *llvm::createSIFixSGPRCopiesPass() {
126 return new SIFixSGPRCopies();
129 static bool hasVGPROperands(const MachineInstr &MI, const SIRegisterInfo *TRI) {
130 const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
131 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
132 if (!MI.getOperand(i).isReg() ||
133 !TargetRegisterInfo::isVirtualRegister(MI.getOperand(i).getReg()))
136 if (TRI->hasVGPRs(MRI.getRegClass(MI.getOperand(i).getReg())))
142 static std::pair<const TargetRegisterClass *, const TargetRegisterClass *>
143 getCopyRegClasses(const MachineInstr &Copy,
144 const SIRegisterInfo &TRI,
145 const MachineRegisterInfo &MRI) {
146 unsigned DstReg = Copy.getOperand(0).getReg();
147 unsigned SrcReg = Copy.getOperand(1).getReg();
149 const TargetRegisterClass *SrcRC =
150 TargetRegisterInfo::isVirtualRegister(SrcReg) ?
151 MRI.getRegClass(SrcReg) :
152 TRI.getPhysRegClass(SrcReg);
154 // We don't really care about the subregister here.
155 // SrcRC = TRI.getSubRegClass(SrcRC, Copy.getOperand(1).getSubReg());
157 const TargetRegisterClass *DstRC =
158 TargetRegisterInfo::isVirtualRegister(DstReg) ?
159 MRI.getRegClass(DstReg) :
160 TRI.getPhysRegClass(DstReg);
162 return std::make_pair(SrcRC, DstRC);
165 static bool isVGPRToSGPRCopy(const TargetRegisterClass *SrcRC,
166 const TargetRegisterClass *DstRC,
167 const SIRegisterInfo &TRI) {
168 return TRI.isSGPRClass(DstRC) && TRI.hasVGPRs(SrcRC);
171 static bool isSGPRToVGPRCopy(const TargetRegisterClass *SrcRC,
172 const TargetRegisterClass *DstRC,
173 const SIRegisterInfo &TRI) {
174 return TRI.isSGPRClass(SrcRC) && TRI.hasVGPRs(DstRC);
177 // Distribute an SGPR->VGPR copy of a REG_SEQUENCE into a VGPR REG_SEQUENCE.
180 // SGPRy = REG_SEQUENCE SGPRx, sub0 ...
181 // VGPRz = COPY SGPRy
185 // VGPRx = COPY SGPRx
186 // VGPRz = REG_SEQUENCE VGPRx, sub0
188 // This exposes immediate folding opportunities when materializing 64-bit
190 static bool foldVGPRCopyIntoRegSequence(MachineInstr &MI,
191 const SIRegisterInfo *TRI,
192 const SIInstrInfo *TII,
193 MachineRegisterInfo &MRI) {
194 assert(MI.isRegSequence());
196 unsigned DstReg = MI.getOperand(0).getReg();
197 if (!TRI->isSGPRClass(MRI.getRegClass(DstReg)))
200 if (!MRI.hasOneUse(DstReg))
203 MachineInstr &CopyUse = *MRI.use_instr_begin(DstReg);
204 if (!CopyUse.isCopy())
207 // It is illegal to have vreg inputs to a physreg defining reg_sequence.
208 if (TargetRegisterInfo::isPhysicalRegister(CopyUse.getOperand(0).getReg()))
211 const TargetRegisterClass *SrcRC, *DstRC;
212 std::tie(SrcRC, DstRC) = getCopyRegClasses(CopyUse, *TRI, MRI);
214 if (!isSGPRToVGPRCopy(SrcRC, DstRC, *TRI))
217 // TODO: Could have multiple extracts?
218 unsigned SubReg = CopyUse.getOperand(1).getSubReg();
219 if (SubReg != AMDGPU::NoSubRegister)
222 MRI.setRegClass(DstReg, DstRC);
225 // SGPRy = REG_SEQUENCE SGPRx, sub0 ...
226 // VGPRz = COPY SGPRy
229 // VGPRx = COPY SGPRx
230 // VGPRz = REG_SEQUENCE VGPRx, sub0
232 MI.getOperand(0).setReg(CopyUse.getOperand(0).getReg());
234 for (unsigned I = 1, N = MI.getNumOperands(); I != N; I += 2) {
235 unsigned SrcReg = MI.getOperand(I).getReg();
236 unsigned SrcSubReg = MI.getOperand(I).getSubReg();
238 const TargetRegisterClass *SrcRC = MRI.getRegClass(SrcReg);
239 assert(TRI->isSGPRClass(SrcRC) &&
240 "Expected SGPR REG_SEQUENCE to only have SGPR inputs");
242 SrcRC = TRI->getSubRegClass(SrcRC, SrcSubReg);
243 const TargetRegisterClass *NewSrcRC = TRI->getEquivalentVGPRClass(SrcRC);
245 unsigned TmpReg = MRI.createVirtualRegister(NewSrcRC);
247 BuildMI(*MI.getParent(), &MI, MI.getDebugLoc(), TII->get(AMDGPU::COPY),
249 .add(MI.getOperand(I));
251 MI.getOperand(I).setReg(TmpReg);
254 CopyUse.eraseFromParent();
258 static bool phiHasVGPROperands(const MachineInstr &PHI,
259 const MachineRegisterInfo &MRI,
260 const SIRegisterInfo *TRI,
261 const SIInstrInfo *TII) {
263 for (unsigned i = 1; i < PHI.getNumOperands(); i += 2) {
264 unsigned Reg = PHI.getOperand(i).getReg();
265 if (TRI->hasVGPRs(MRI.getRegClass(Reg)))
270 static bool phiHasBreakDef(const MachineInstr &PHI,
271 const MachineRegisterInfo &MRI,
272 SmallSet<unsigned, 8> &Visited) {
274 for (unsigned i = 1; i < PHI.getNumOperands(); i += 2) {
275 unsigned Reg = PHI.getOperand(i).getReg();
276 if (Visited.count(Reg))
281 MachineInstr *DefInstr = MRI.getVRegDef(Reg);
282 switch (DefInstr->getOpcode()) {
285 case AMDGPU::SI_BREAK:
286 case AMDGPU::SI_IF_BREAK:
287 case AMDGPU::SI_ELSE_BREAK:
290 if (phiHasBreakDef(*DefInstr, MRI, Visited))
297 static bool hasTerminatorThatModifiesExec(const MachineBasicBlock &MBB,
298 const TargetRegisterInfo &TRI) {
299 for (MachineBasicBlock::const_iterator I = MBB.getFirstTerminator(),
300 E = MBB.end(); I != E; ++I) {
301 if (I->modifiesRegister(AMDGPU::EXEC, &TRI))
307 static bool isSafeToFoldImmIntoCopy(const MachineInstr *Copy,
308 const MachineInstr *MoveImm,
309 const SIInstrInfo *TII,
313 if (!MoveImm->isMoveImmediate())
316 const MachineOperand *ImmOp =
317 TII->getNamedOperand(*MoveImm, AMDGPU::OpName::src0);
321 // FIXME: Handle copies with sub-regs.
322 if (Copy->getOperand(0).getSubReg())
325 switch (MoveImm->getOpcode()) {
328 case AMDGPU::V_MOV_B32_e32:
329 SMovOp = AMDGPU::S_MOV_B32;
331 case AMDGPU::V_MOV_B64_PSEUDO:
332 SMovOp = AMDGPU::S_MOV_B64;
335 Imm = ImmOp->getImm();
339 template <class UnaryPredicate>
340 bool searchPredecessors(const MachineBasicBlock *MBB,
341 const MachineBasicBlock *CutOff,
342 UnaryPredicate Predicate) {
347 DenseSet<const MachineBasicBlock*> Visited;
348 SmallVector<MachineBasicBlock*, 4> Worklist(MBB->pred_begin(),
351 while (!Worklist.empty()) {
352 MachineBasicBlock *MBB = Worklist.pop_back_val();
354 if (!Visited.insert(MBB).second)
361 Worklist.append(MBB->pred_begin(), MBB->pred_end());
367 static bool predsHasDivergentTerminator(MachineBasicBlock *MBB,
368 const TargetRegisterInfo *TRI) {
369 return searchPredecessors(MBB, nullptr, [TRI](MachineBasicBlock *MBB) {
370 return hasTerminatorThatModifiesExec(*MBB, *TRI); });
373 // Checks if there is potential path From instruction To instruction.
374 // If CutOff is specified and it sits in between of that path we ignore
375 // a higher portion of the path and report it is not reachable.
376 static bool isReachable(const MachineInstr *From,
377 const MachineInstr *To,
378 const MachineBasicBlock *CutOff,
379 MachineDominatorTree &MDT) {
380 // If either From block dominates To block or instructions are in the same
381 // block and From is higher.
382 if (MDT.dominates(From, To))
385 const MachineBasicBlock *MBBFrom = From->getParent();
386 const MachineBasicBlock *MBBTo = To->getParent();
387 if (MBBFrom == MBBTo)
390 // Instructions are in different blocks, do predecessor search.
391 // We should almost never get here since we do not usually produce M0 stores
393 return searchPredecessors(MBBTo, CutOff, [MBBFrom]
394 (const MachineBasicBlock *MBB) { return MBB == MBBFrom; });
397 // Hoist and merge identical SGPR initializations into a common predecessor.
398 // This is intended to combine M0 initializations, but can work with any
399 // SGPR. A VGPR cannot be processed since we cannot guarantee vector
401 static bool hoistAndMergeSGPRInits(unsigned Reg,
402 const MachineRegisterInfo &MRI,
403 MachineDominatorTree &MDT) {
404 // List of inits by immediate value.
405 typedef std::map<unsigned, std::list<MachineInstr*>> InitListMap;
407 // List of clobbering instructions.
408 SmallVector<MachineInstr*, 8> Clobbers;
409 bool Changed = false;
411 for (auto &MI : MRI.def_instructions(Reg)) {
412 MachineOperand *Imm = nullptr;
413 for (auto &MO: MI.operands()) {
414 if ((MO.isReg() && ((MO.isDef() && MO.getReg() != Reg) || !MO.isDef())) ||
415 (!MO.isImm() && !MO.isReg()) || (MO.isImm() && Imm)) {
418 } else if (MO.isImm())
422 Inits[Imm->getImm()].push_front(&MI);
424 Clobbers.push_back(&MI);
427 for (auto &Init : Inits) {
428 auto &Defs = Init.second;
430 for (auto I1 = Defs.begin(), E = Defs.end(); I1 != E; ) {
431 MachineInstr *MI1 = *I1;
433 for (auto I2 = std::next(I1); I2 != E; ) {
434 MachineInstr *MI2 = *I2;
436 // Check any possible interference
437 auto intereferes = [&](MachineBasicBlock::iterator From,
438 MachineBasicBlock::iterator To) -> bool {
440 assert(MDT.dominates(&*To, &*From));
442 auto interferes = [&MDT, From, To](MachineInstr* &Clobber) -> bool {
443 const MachineBasicBlock *MBBFrom = From->getParent();
444 const MachineBasicBlock *MBBTo = To->getParent();
445 bool MayClobberFrom = isReachable(Clobber, &*From, MBBTo, MDT);
446 bool MayClobberTo = isReachable(Clobber, &*To, MBBTo, MDT);
447 if (!MayClobberFrom && !MayClobberTo)
449 if ((MayClobberFrom && !MayClobberTo) ||
450 (!MayClobberFrom && MayClobberTo))
452 // Both can clobber, this is not an interference only if both are
453 // dominated by Clobber and belong to the same block or if Clobber
454 // properly dominates To, given that To >> From, so it dominates
455 // both and located in a common dominator.
456 return !((MBBFrom == MBBTo &&
457 MDT.dominates(Clobber, &*From) &&
458 MDT.dominates(Clobber, &*To)) ||
459 MDT.properlyDominates(Clobber->getParent(), MBBTo));
462 return (any_of(Clobbers, interferes)) ||
463 (any_of(Inits, [&](InitListMap::value_type &C) {
464 return C.first != Init.first && any_of(C.second, interferes);
468 if (MDT.dominates(MI1, MI2)) {
469 if (!intereferes(MI2, MI1)) {
470 DEBUG(dbgs() << "Erasing from BB#" << MI2->getParent()->getNumber()
472 MI2->eraseFromParent();
477 } else if (MDT.dominates(MI2, MI1)) {
478 if (!intereferes(MI1, MI2)) {
479 DEBUG(dbgs() << "Erasing from BB#" << MI1->getParent()->getNumber()
481 MI1->eraseFromParent();
487 auto *MBB = MDT.findNearestCommonDominator(MI1->getParent(),
494 MachineBasicBlock::iterator I = MBB->getFirstNonPHI();
495 if (!intereferes(MI1, I) && !intereferes(MI2, I)) {
496 DEBUG(dbgs() << "Erasing from BB#" << MI1->getParent()->getNumber()
497 << " " << *MI1 << "and moving from BB#"
498 << MI2->getParent()->getNumber() << " to BB#"
499 << I->getParent()->getNumber() << " " << *MI2);
500 I->getParent()->splice(I, MI2->getParent(), MI2);
501 MI1->eraseFromParent();
514 MRI.clearKillFlags(Reg);
519 bool SIFixSGPRCopies::runOnMachineFunction(MachineFunction &MF) {
520 const SISubtarget &ST = MF.getSubtarget<SISubtarget>();
521 MachineRegisterInfo &MRI = MF.getRegInfo();
522 const SIRegisterInfo *TRI = ST.getRegisterInfo();
523 const SIInstrInfo *TII = ST.getInstrInfo();
524 MDT = &getAnalysis<MachineDominatorTree>();
526 SmallVector<MachineInstr *, 16> Worklist;
528 for (MachineFunction::iterator BI = MF.begin(), BE = MF.end();
531 MachineBasicBlock &MBB = *BI;
532 for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end();
534 MachineInstr &MI = *I;
536 switch (MI.getOpcode()) {
540 // If the destination register is a physical register there isn't really
541 // much we can do to fix this.
542 if (!TargetRegisterInfo::isVirtualRegister(MI.getOperand(0).getReg()))
545 const TargetRegisterClass *SrcRC, *DstRC;
546 std::tie(SrcRC, DstRC) = getCopyRegClasses(MI, *TRI, MRI);
547 if (isVGPRToSGPRCopy(SrcRC, DstRC, *TRI)) {
548 unsigned SrcReg = MI.getOperand(1).getReg();
549 if (!TargetRegisterInfo::isVirtualRegister(SrcReg)) {
554 MachineInstr *DefMI = MRI.getVRegDef(SrcReg);
557 // If we are just copying an immediate, we can replace the copy with
559 if (isSafeToFoldImmIntoCopy(&MI, DefMI, TII, SMovOp, Imm)) {
560 MI.getOperand(1).ChangeToImmediate(Imm);
561 MI.addImplicitDefUseOperands(MF);
562 MI.setDesc(TII->get(SMovOp));
571 unsigned Reg = MI.getOperand(0).getReg();
572 if (!TRI->isSGPRClass(MRI.getRegClass(Reg)))
575 // We don't need to fix the PHI if the common dominator of the
576 // two incoming blocks terminates with a uniform branch.
577 if (MI.getNumExplicitOperands() == 5) {
578 MachineBasicBlock *MBB0 = MI.getOperand(2).getMBB();
579 MachineBasicBlock *MBB1 = MI.getOperand(4).getMBB();
581 if (!predsHasDivergentTerminator(MBB0, TRI) &&
582 !predsHasDivergentTerminator(MBB1, TRI)) {
583 DEBUG(dbgs() << "Not fixing PHI for uniform branch: " << MI << '\n');
588 // If a PHI node defines an SGPR and any of its operands are VGPRs,
589 // then we need to move it to the VALU.
591 // Also, if a PHI node defines an SGPR and has all SGPR operands
592 // we must move it to the VALU, because the SGPR operands will
593 // all end up being assigned the same register, which means
594 // there is a potential for a conflict if different threads take
595 // different control flow paths.
603 // sgpr2 = PHI sgpr0, sgpr1
614 // The one exception to this rule is when one of the operands
615 // is defined by a SI_BREAK, SI_IF_BREAK, or SI_ELSE_BREAK
616 // instruction. In this case, there we know the program will
617 // never enter the second block (the loop) without entering
618 // the first block (where the condition is computed), so there
619 // is no chance for values to be over-written.
621 SmallSet<unsigned, 8> Visited;
622 if (phiHasVGPROperands(MI, MRI, TRI, TII) ||
623 !phiHasBreakDef(MI, MRI, Visited)) {
624 DEBUG(dbgs() << "Fixing PHI: " << MI);
629 case AMDGPU::REG_SEQUENCE: {
630 if (TRI->hasVGPRs(TII->getOpRegClass(MI, 0)) ||
631 !hasVGPROperands(MI, TRI)) {
632 foldVGPRCopyIntoRegSequence(MI, TRI, TII, MRI);
636 DEBUG(dbgs() << "Fixing REG_SEQUENCE: " << MI);
641 case AMDGPU::INSERT_SUBREG: {
642 const TargetRegisterClass *DstRC, *Src0RC, *Src1RC;
643 DstRC = MRI.getRegClass(MI.getOperand(0).getReg());
644 Src0RC = MRI.getRegClass(MI.getOperand(1).getReg());
645 Src1RC = MRI.getRegClass(MI.getOperand(2).getReg());
646 if (TRI->isSGPRClass(DstRC) &&
647 (TRI->hasVGPRs(Src0RC) || TRI->hasVGPRs(Src1RC))) {
648 DEBUG(dbgs() << " Fixing INSERT_SUBREG: " << MI);
657 if (MF.getTarget().getOptLevel() > CodeGenOpt::None && EnableM0Merge)
658 hoistAndMergeSGPRInits(AMDGPU::M0, MRI, *MDT);