1 //===-- SILowerControlFlow.cpp - Use predicates for control flow ----------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
11 /// \brief This pass lowers the pseudo control flow instructions to real
12 /// machine instructions.
14 /// All control flow is handled using predicated instructions and
15 /// a predicate stack. Each Scalar ALU controls the operations of 64 Vector
16 /// ALUs. The Scalar ALU can update the predicate for any of the Vector ALUs
17 /// by writting to the 64-bit EXEC register (each bit corresponds to a
18 /// single vector ALU). Typically, for predicates, a vector ALU will write
19 /// to its bit of the VCC register (like EXEC VCC is 64-bits, one for each
20 /// Vector ALU) and then the ScalarALU will AND the VCC register with the
21 /// EXEC to update the predicates.
24 /// %VCC = V_CMP_GT_F32 %VGPR1, %VGPR2
25 /// %SGPR0 = SI_IF %VCC
26 /// %VGPR0 = V_ADD_F32 %VGPR0, %VGPR0
27 /// %SGPR0 = SI_ELSE %SGPR0
28 /// %VGPR0 = V_SUB_F32 %VGPR0, %VGPR0
33 /// %SGPR0 = S_AND_SAVEEXEC_B64 %VCC // Save and update the exec mask
34 /// %SGPR0 = S_XOR_B64 %SGPR0, %EXEC // Clear live bits from saved exec mask
35 /// S_CBRANCH_EXECZ label0 // This instruction is an optional
36 /// // optimization which allows us to
37 /// // branch if all the bits of
39 /// %VGPR0 = V_ADD_F32 %VGPR0, %VGPR0 // Do the IF block of the branch
42 /// %SGPR0 = S_OR_SAVEEXEC_B64 %EXEC // Restore the exec mask for the Then block
43 /// %EXEC = S_XOR_B64 %SGPR0, %EXEC // Clear live bits from saved exec mask
44 /// S_BRANCH_EXECZ label1 // Use our branch optimization
45 /// // instruction again.
46 /// %VGPR0 = V_SUB_F32 %VGPR0, %VGPR // Do the THEN block
48 /// %EXEC = S_OR_B64 %EXEC, %SGPR0 // Re-enable saved exec mask bits
49 //===----------------------------------------------------------------------===//
52 #include "AMDGPUSubtarget.h"
53 #include "SIInstrInfo.h"
54 #include "llvm/ADT/SmallVector.h"
55 #include "llvm/ADT/StringRef.h"
56 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
57 #include "llvm/CodeGen/MachineBasicBlock.h"
58 #include "llvm/CodeGen/MachineFunction.h"
59 #include "llvm/CodeGen/MachineFunctionPass.h"
60 #include "llvm/CodeGen/MachineInstr.h"
61 #include "llvm/CodeGen/MachineInstrBuilder.h"
62 #include "llvm/CodeGen/MachineOperand.h"
63 #include "llvm/CodeGen/MachineRegisterInfo.h"
64 #include "llvm/CodeGen/Passes.h"
65 #include "llvm/CodeGen/SlotIndexes.h"
66 #include "llvm/MC/MCRegisterInfo.h"
67 #include "llvm/Pass.h"
68 #include "llvm/Target/TargetRegisterInfo.h"
74 #define DEBUG_TYPE "si-lower-control-flow"
78 class SILowerControlFlow : public MachineFunctionPass {
80 const SIRegisterInfo *TRI = nullptr;
81 const SIInstrInfo *TII = nullptr;
82 LiveIntervals *LIS = nullptr;
83 MachineRegisterInfo *MRI = nullptr;
85 void emitIf(MachineInstr &MI);
86 void emitElse(MachineInstr &MI);
87 void emitBreak(MachineInstr &MI);
88 void emitIfBreak(MachineInstr &MI);
89 void emitElseBreak(MachineInstr &MI);
90 void emitLoop(MachineInstr &MI);
91 void emitEndCf(MachineInstr &MI);
93 void findMaskOperands(MachineInstr &MI, unsigned OpNo,
94 SmallVectorImpl<MachineOperand> &Src) const;
96 void combineMasks(MachineInstr &MI);
101 SILowerControlFlow() : MachineFunctionPass(ID) {}
103 bool runOnMachineFunction(MachineFunction &MF) override;
105 StringRef getPassName() const override {
106 return "SI Lower control flow pseudo instructions";
109 void getAnalysisUsage(AnalysisUsage &AU) const override {
110 // Should preserve the same set that TwoAddressInstructions does.
111 AU.addPreserved<SlotIndexes>();
112 AU.addPreserved<LiveIntervals>();
113 AU.addPreservedID(LiveVariablesID);
114 AU.addPreservedID(MachineLoopInfoID);
115 AU.addPreservedID(MachineDominatorsID);
116 AU.setPreservesCFG();
117 MachineFunctionPass::getAnalysisUsage(AU);
121 } // end anonymous namespace
123 char SILowerControlFlow::ID = 0;
125 INITIALIZE_PASS(SILowerControlFlow, DEBUG_TYPE,
126 "SI lower control flow", false, false)
128 static void setImpSCCDefDead(MachineInstr &MI, bool IsDead) {
129 MachineOperand &ImpDefSCC = MI.getOperand(3);
130 assert(ImpDefSCC.getReg() == AMDGPU::SCC && ImpDefSCC.isDef());
132 ImpDefSCC.setIsDead(IsDead);
135 char &llvm::SILowerControlFlowID = SILowerControlFlow::ID;
137 void SILowerControlFlow::emitIf(MachineInstr &MI) {
138 MachineBasicBlock &MBB = *MI.getParent();
139 const DebugLoc &DL = MI.getDebugLoc();
140 MachineBasicBlock::iterator I(&MI);
142 MachineOperand &SaveExec = MI.getOperand(0);
143 MachineOperand &Cond = MI.getOperand(1);
144 assert(SaveExec.getSubReg() == AMDGPU::NoSubRegister &&
145 Cond.getSubReg() == AMDGPU::NoSubRegister);
147 unsigned SaveExecReg = SaveExec.getReg();
149 MachineOperand &ImpDefSCC = MI.getOperand(4);
150 assert(ImpDefSCC.getReg() == AMDGPU::SCC && ImpDefSCC.isDef());
152 // Add an implicit def of exec to discourage scheduling VALU after this which
153 // will interfere with trying to form s_and_saveexec_b64 later.
154 unsigned CopyReg = MRI->createVirtualRegister(&AMDGPU::SReg_64RegClass);
155 MachineInstr *CopyExec =
156 BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), CopyReg)
157 .addReg(AMDGPU::EXEC)
158 .addReg(AMDGPU::EXEC, RegState::ImplicitDefine);
160 unsigned Tmp = MRI->createVirtualRegister(&AMDGPU::SReg_64RegClass);
163 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_AND_B64), Tmp)
165 //.addReg(AMDGPU::EXEC)
166 .addReg(Cond.getReg());
167 setImpSCCDefDead(*And, true);
170 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_XOR_B64), SaveExecReg)
173 setImpSCCDefDead(*Xor, ImpDefSCC.isDead());
175 // Use a copy that is a terminator to get correct spill code placement it with
177 MachineInstr *SetExec =
178 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_MOV_B64_term), AMDGPU::EXEC)
179 .addReg(Tmp, RegState::Kill);
181 // Insert a pseudo terminator to help keep the verifier happy. This will also
182 // be used later when inserting skips.
183 MachineInstr *NewBr = BuildMI(MBB, I, DL, TII->get(AMDGPU::SI_MASK_BRANCH))
184 .add(MI.getOperand(2));
187 MI.eraseFromParent();
191 LIS->InsertMachineInstrInMaps(*CopyExec);
193 // Replace with and so we don't need to fix the live interval for condition
195 LIS->ReplaceMachineInstrInMaps(MI, *And);
197 LIS->InsertMachineInstrInMaps(*Xor);
198 LIS->InsertMachineInstrInMaps(*SetExec);
199 LIS->InsertMachineInstrInMaps(*NewBr);
201 LIS->removeRegUnit(*MCRegUnitIterator(AMDGPU::EXEC, TRI));
202 MI.eraseFromParent();
204 // FIXME: Is there a better way of adjusting the liveness? It shouldn't be
205 // hard to add another def here but I'm not sure how to correctly update the
207 LIS->removeInterval(SaveExecReg);
208 LIS->createAndComputeVirtRegInterval(SaveExecReg);
209 LIS->createAndComputeVirtRegInterval(Tmp);
210 LIS->createAndComputeVirtRegInterval(CopyReg);
213 void SILowerControlFlow::emitElse(MachineInstr &MI) {
214 MachineBasicBlock &MBB = *MI.getParent();
215 const DebugLoc &DL = MI.getDebugLoc();
217 unsigned DstReg = MI.getOperand(0).getReg();
218 assert(MI.getOperand(0).getSubReg() == AMDGPU::NoSubRegister);
220 bool ExecModified = MI.getOperand(3).getImm() != 0;
221 MachineBasicBlock::iterator Start = MBB.begin();
223 // We are running before TwoAddressInstructions, and si_else's operands are
224 // tied. In order to correctly tie the registers, split this into a copy of
225 // the src like it does.
226 unsigned CopyReg = MRI->createVirtualRegister(&AMDGPU::SReg_64RegClass);
227 MachineInstr *CopyExec =
228 BuildMI(MBB, Start, DL, TII->get(AMDGPU::COPY), CopyReg)
229 .add(MI.getOperand(1)); // Saved EXEC
231 // This must be inserted before phis and any spill code inserted before the
233 unsigned SaveReg = ExecModified ?
234 MRI->createVirtualRegister(&AMDGPU::SReg_64RegClass) : DstReg;
235 MachineInstr *OrSaveExec =
236 BuildMI(MBB, Start, DL, TII->get(AMDGPU::S_OR_SAVEEXEC_B64), SaveReg)
239 MachineBasicBlock *DestBB = MI.getOperand(2).getMBB();
241 MachineBasicBlock::iterator ElsePt(MI);
245 BuildMI(MBB, ElsePt, DL, TII->get(AMDGPU::S_AND_B64), DstReg)
246 .addReg(AMDGPU::EXEC)
250 LIS->InsertMachineInstrInMaps(*And);
254 BuildMI(MBB, ElsePt, DL, TII->get(AMDGPU::S_XOR_B64_term), AMDGPU::EXEC)
255 .addReg(AMDGPU::EXEC)
258 MachineInstr *Branch =
259 BuildMI(MBB, ElsePt, DL, TII->get(AMDGPU::SI_MASK_BRANCH))
263 MI.eraseFromParent();
267 LIS->RemoveMachineInstrFromMaps(MI);
268 MI.eraseFromParent();
270 LIS->InsertMachineInstrInMaps(*CopyExec);
271 LIS->InsertMachineInstrInMaps(*OrSaveExec);
273 LIS->InsertMachineInstrInMaps(*Xor);
274 LIS->InsertMachineInstrInMaps(*Branch);
276 // src reg is tied to dst reg.
277 LIS->removeInterval(DstReg);
278 LIS->createAndComputeVirtRegInterval(DstReg);
279 LIS->createAndComputeVirtRegInterval(CopyReg);
281 LIS->createAndComputeVirtRegInterval(SaveReg);
283 // Let this be recomputed.
284 LIS->removeRegUnit(*MCRegUnitIterator(AMDGPU::EXEC, TRI));
287 void SILowerControlFlow::emitBreak(MachineInstr &MI) {
288 MachineBasicBlock &MBB = *MI.getParent();
289 const DebugLoc &DL = MI.getDebugLoc();
290 unsigned Dst = MI.getOperand(0).getReg();
292 MachineInstr *Or = BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_OR_B64), Dst)
293 .addReg(AMDGPU::EXEC)
294 .add(MI.getOperand(1));
297 LIS->ReplaceMachineInstrInMaps(MI, *Or);
298 MI.eraseFromParent();
301 void SILowerControlFlow::emitIfBreak(MachineInstr &MI) {
302 MI.setDesc(TII->get(AMDGPU::S_OR_B64));
305 void SILowerControlFlow::emitElseBreak(MachineInstr &MI) {
306 MI.setDesc(TII->get(AMDGPU::S_OR_B64));
309 void SILowerControlFlow::emitLoop(MachineInstr &MI) {
310 MachineBasicBlock &MBB = *MI.getParent();
311 const DebugLoc &DL = MI.getDebugLoc();
313 MachineInstr *AndN2 =
314 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_ANDN2_B64_term), AMDGPU::EXEC)
315 .addReg(AMDGPU::EXEC)
316 .add(MI.getOperand(0));
318 MachineInstr *Branch =
319 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_CBRANCH_EXECNZ))
320 .add(MI.getOperand(1));
323 LIS->ReplaceMachineInstrInMaps(MI, *AndN2);
324 LIS->InsertMachineInstrInMaps(*Branch);
327 MI.eraseFromParent();
330 void SILowerControlFlow::emitEndCf(MachineInstr &MI) {
331 MachineBasicBlock &MBB = *MI.getParent();
332 const DebugLoc &DL = MI.getDebugLoc();
334 MachineBasicBlock::iterator InsPt = MBB.begin();
335 MachineInstr *NewMI =
336 BuildMI(MBB, InsPt, DL, TII->get(AMDGPU::S_OR_B64), AMDGPU::EXEC)
337 .addReg(AMDGPU::EXEC)
338 .add(MI.getOperand(0));
341 LIS->ReplaceMachineInstrInMaps(MI, *NewMI);
343 MI.eraseFromParent();
346 LIS->handleMove(*NewMI);
349 // Returns replace operands for a logical operation, either single result
350 // for exec or two operands if source was another equivalent operation.
351 void SILowerControlFlow::findMaskOperands(MachineInstr &MI, unsigned OpNo,
352 SmallVectorImpl<MachineOperand> &Src) const {
353 MachineOperand &Op = MI.getOperand(OpNo);
354 if (!Op.isReg() || !TargetRegisterInfo::isVirtualRegister(Op.getReg())) {
359 MachineInstr *Def = MRI->getUniqueVRegDef(Op.getReg());
360 if (!Def || Def->getParent() != MI.getParent() ||
361 !(Def->isFullCopy() || (Def->getOpcode() == MI.getOpcode())))
364 // Make sure we do not modify exec between def and use.
365 // A copy with implcitly defined exec inserted earlier is an exclusion, it
366 // does not really modify exec.
367 for (auto I = Def->getIterator(); I != MI.getIterator(); ++I)
368 if (I->modifiesRegister(AMDGPU::EXEC, TRI) &&
369 !(I->isCopy() && I->getOperand(0).getReg() != AMDGPU::EXEC))
372 for (const auto &SrcOp : Def->explicit_operands())
373 if (SrcOp.isUse() && (!SrcOp.isReg() ||
374 TargetRegisterInfo::isVirtualRegister(SrcOp.getReg()) ||
375 SrcOp.getReg() == AMDGPU::EXEC))
376 Src.push_back(SrcOp);
379 // Search and combine pairs of equivalent instructions, like
380 // S_AND_B64 x, (S_AND_B64 x, y) => S_AND_B64 x, y
381 // S_OR_B64 x, (S_OR_B64 x, y) => S_OR_B64 x, y
382 // One of the operands is exec mask.
383 void SILowerControlFlow::combineMasks(MachineInstr &MI) {
384 assert(MI.getNumExplicitOperands() == 3);
385 SmallVector<MachineOperand, 4> Ops;
386 unsigned OpToReplace = 1;
387 findMaskOperands(MI, 1, Ops);
388 if (Ops.size() == 1) OpToReplace = 2; // First operand can be exec or its copy
389 findMaskOperands(MI, 2, Ops);
390 if (Ops.size() != 3) return;
392 unsigned UniqueOpndIdx;
393 if (Ops[0].isIdenticalTo(Ops[1])) UniqueOpndIdx = 2;
394 else if (Ops[0].isIdenticalTo(Ops[2])) UniqueOpndIdx = 1;
395 else if (Ops[1].isIdenticalTo(Ops[2])) UniqueOpndIdx = 1;
398 unsigned Reg = MI.getOperand(OpToReplace).getReg();
399 MI.RemoveOperand(OpToReplace);
400 MI.addOperand(Ops[UniqueOpndIdx]);
401 if (MRI->use_empty(Reg))
402 MRI->getUniqueVRegDef(Reg)->eraseFromParent();
405 bool SILowerControlFlow::runOnMachineFunction(MachineFunction &MF) {
406 const SISubtarget &ST = MF.getSubtarget<SISubtarget>();
407 TII = ST.getInstrInfo();
408 TRI = &TII->getRegisterInfo();
410 // This doesn't actually need LiveIntervals, but we can preserve them.
411 LIS = getAnalysisIfAvailable<LiveIntervals>();
412 MRI = &MF.getRegInfo();
414 MachineFunction::iterator NextBB;
415 for (MachineFunction::iterator BI = MF.begin(), BE = MF.end();
416 BI != BE; BI = NextBB) {
417 NextBB = std::next(BI);
418 MachineBasicBlock &MBB = *BI;
420 MachineBasicBlock::iterator I, Next, Last;
422 for (I = MBB.begin(), Last = MBB.end(); I != MBB.end(); I = Next) {
424 MachineInstr &MI = *I;
426 switch (MI.getOpcode()) {
431 case AMDGPU::SI_ELSE:
435 case AMDGPU::SI_BREAK:
439 case AMDGPU::SI_IF_BREAK:
443 case AMDGPU::SI_ELSE_BREAK:
447 case AMDGPU::SI_LOOP:
451 case AMDGPU::SI_END_CF:
455 case AMDGPU::S_AND_B64:
456 case AMDGPU::S_OR_B64:
457 // Cleanup bit manipulations on exec mask
467 // Replay newly inserted code to combine masks
468 Next = (Last == MBB.end()) ? MBB.begin() : Last;