1 //===-- R600ControlFlowFinalizer.cpp - Finalize Control Flow Inst----------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
11 /// This pass compute turns all control flow pseudo instructions into native one
12 /// computing their address on the fly ; it also sets STACK_SIZE info.
13 //===----------------------------------------------------------------------===//
15 #include "llvm/Support/Debug.h"
17 #include "AMDGPUSubtarget.h"
18 #include "R600Defines.h"
19 #include "R600InstrInfo.h"
20 #include "R600MachineFunctionInfo.h"
21 #include "R600RegisterInfo.h"
22 #include "llvm/CodeGen/MachineFunctionPass.h"
23 #include "llvm/CodeGen/MachineInstrBuilder.h"
24 #include "llvm/CodeGen/MachineRegisterInfo.h"
25 #include "llvm/Support/raw_ostream.h"
29 #define DEBUG_TYPE "r600cf"
38 FIRST_NON_WQM_PUSH = 2,
39 FIRST_NON_WQM_PUSH_W_FULL_ENTRY = 3
42 const R600Subtarget *ST;
43 std::vector<StackItem> BranchStack;
44 std::vector<StackItem> LoopStack;
45 unsigned MaxStackSize;
46 unsigned CurrentEntries;
47 unsigned CurrentSubEntries;
49 CFStack(const R600Subtarget *st, CallingConv::ID cc) : ST(st),
50 // We need to reserve a stack entry for CALL_FS in vertex shaders.
51 MaxStackSize(cc == CallingConv::AMDGPU_VS ? 1 : 0),
52 CurrentEntries(0), CurrentSubEntries(0) { }
54 unsigned getLoopDepth();
55 bool branchStackContains(CFStack::StackItem);
56 bool requiresWorkAroundForInst(unsigned Opcode);
57 unsigned getSubEntrySize(CFStack::StackItem Item);
58 void updateMaxStackSize();
59 void pushBranch(unsigned Opcode, bool isWQM = false);
65 unsigned CFStack::getLoopDepth() {
66 return LoopStack.size();
69 bool CFStack::branchStackContains(CFStack::StackItem Item) {
70 for (std::vector<CFStack::StackItem>::const_iterator I = BranchStack.begin(),
71 E = BranchStack.end(); I != E; ++I) {
78 bool CFStack::requiresWorkAroundForInst(unsigned Opcode) {
79 if (Opcode == AMDGPU::CF_ALU_PUSH_BEFORE && ST->hasCaymanISA() &&
83 if (!ST->hasCFAluBug())
87 default: return false;
88 case AMDGPU::CF_ALU_PUSH_BEFORE:
89 case AMDGPU::CF_ALU_ELSE_AFTER:
90 case AMDGPU::CF_ALU_BREAK:
91 case AMDGPU::CF_ALU_CONTINUE:
92 if (CurrentSubEntries == 0)
94 if (ST->getWavefrontSize() == 64) {
95 // We are being conservative here. We only require this work-around if
96 // CurrentSubEntries > 3 &&
97 // (CurrentSubEntries % 4 == 3 || CurrentSubEntries % 4 == 0)
99 // We have to be conservative, because we don't know for certain that
100 // our stack allocation algorithm for Evergreen/NI is correct. Applying this
101 // work-around when CurrentSubEntries > 3 allows us to over-allocate stack
102 // resources without any problems.
103 return CurrentSubEntries > 3;
105 assert(ST->getWavefrontSize() == 32);
106 // We are being conservative here. We only require the work-around if
107 // CurrentSubEntries > 7 &&
108 // (CurrentSubEntries % 8 == 7 || CurrentSubEntries % 8 == 0)
109 // See the comment on the wavefront size == 64 case for why we are
110 // being conservative.
111 return CurrentSubEntries > 7;
116 unsigned CFStack::getSubEntrySize(CFStack::StackItem Item) {
120 case CFStack::FIRST_NON_WQM_PUSH:
121 assert(!ST->hasCaymanISA());
122 if (ST->getGeneration() <= R600Subtarget::R700) {
123 // +1 For the push operation.
124 // +2 Extra space required.
127 // Some documentation says that this is not necessary on Evergreen,
128 // but experimentation has show that we need to allocate 1 extra
129 // sub-entry for the first non-WQM push.
130 // +1 For the push operation.
131 // +1 Extra space required.
134 case CFStack::FIRST_NON_WQM_PUSH_W_FULL_ENTRY:
135 assert(ST->getGeneration() >= R600Subtarget::EVERGREEN);
136 // +1 For the push operation.
137 // +1 Extra space required.
139 case CFStack::SUB_ENTRY:
144 void CFStack::updateMaxStackSize() {
145 unsigned CurrentStackSize =
146 CurrentEntries + (alignTo(CurrentSubEntries, 4) / 4);
147 MaxStackSize = std::max(CurrentStackSize, MaxStackSize);
150 void CFStack::pushBranch(unsigned Opcode, bool isWQM) {
151 CFStack::StackItem Item = CFStack::ENTRY;
153 case AMDGPU::CF_PUSH_EG:
154 case AMDGPU::CF_ALU_PUSH_BEFORE:
156 if (!ST->hasCaymanISA() &&
157 !branchStackContains(CFStack::FIRST_NON_WQM_PUSH))
158 Item = CFStack::FIRST_NON_WQM_PUSH; // May not be required on Evergreen/NI
160 // CFStack::getSubEntrySize()
161 else if (CurrentEntries > 0 &&
162 ST->getGeneration() > R600Subtarget::EVERGREEN &&
163 !ST->hasCaymanISA() &&
164 !branchStackContains(CFStack::FIRST_NON_WQM_PUSH_W_FULL_ENTRY))
165 Item = CFStack::FIRST_NON_WQM_PUSH_W_FULL_ENTRY;
167 Item = CFStack::SUB_ENTRY;
169 Item = CFStack::ENTRY;
172 BranchStack.push_back(Item);
173 if (Item == CFStack::ENTRY)
176 CurrentSubEntries += getSubEntrySize(Item);
177 updateMaxStackSize();
180 void CFStack::pushLoop() {
181 LoopStack.push_back(CFStack::ENTRY);
183 updateMaxStackSize();
186 void CFStack::popBranch() {
187 CFStack::StackItem Top = BranchStack.back();
188 if (Top == CFStack::ENTRY)
191 CurrentSubEntries-= getSubEntrySize(Top);
192 BranchStack.pop_back();
195 void CFStack::popLoop() {
197 LoopStack.pop_back();
200 class R600ControlFlowFinalizer : public MachineFunctionPass {
203 typedef std::pair<MachineInstr *, std::vector<MachineInstr *> > ClauseFile;
205 enum ControlFlowInstruction {
220 const R600InstrInfo *TII;
221 const R600RegisterInfo *TRI;
222 unsigned MaxFetchInst;
223 const R600Subtarget *ST;
225 bool IsTrivialInst(MachineInstr &MI) const {
226 switch (MI.getOpcode()) {
235 const MCInstrDesc &getHWInstrDesc(ControlFlowInstruction CFI) const {
237 bool isEg = (ST->getGeneration() >= R600Subtarget::EVERGREEN);
240 Opcode = isEg ? AMDGPU::CF_TC_EG : AMDGPU::CF_TC_R600;
243 Opcode = isEg ? AMDGPU::CF_VC_EG : AMDGPU::CF_VC_R600;
246 Opcode = isEg ? AMDGPU::CF_CALL_FS_EG : AMDGPU::CF_CALL_FS_R600;
249 Opcode = isEg ? AMDGPU::WHILE_LOOP_EG : AMDGPU::WHILE_LOOP_R600;
252 Opcode = isEg ? AMDGPU::END_LOOP_EG : AMDGPU::END_LOOP_R600;
255 Opcode = isEg ? AMDGPU::LOOP_BREAK_EG : AMDGPU::LOOP_BREAK_R600;
257 case CF_LOOP_CONTINUE:
258 Opcode = isEg ? AMDGPU::CF_CONTINUE_EG : AMDGPU::CF_CONTINUE_R600;
261 Opcode = isEg ? AMDGPU::CF_JUMP_EG : AMDGPU::CF_JUMP_R600;
264 Opcode = isEg ? AMDGPU::CF_ELSE_EG : AMDGPU::CF_ELSE_R600;
267 Opcode = isEg ? AMDGPU::POP_EG : AMDGPU::POP_R600;
270 if (ST->hasCaymanISA()) {
271 Opcode = AMDGPU::CF_END_CM;
274 Opcode = isEg ? AMDGPU::CF_END_EG : AMDGPU::CF_END_R600;
277 assert (Opcode && "No opcode selected");
278 return TII->get(Opcode);
281 bool isCompatibleWithClause(const MachineInstr &MI,
282 std::set<unsigned> &DstRegs) const {
283 unsigned DstMI, SrcMI;
284 for (MachineInstr::const_mop_iterator I = MI.operands_begin(),
285 E = MI.operands_end();
287 const MachineOperand &MO = *I;
291 unsigned Reg = MO.getReg();
292 if (AMDGPU::R600_Reg128RegClass.contains(Reg))
295 DstMI = TRI->getMatchingSuperReg(Reg,
296 TRI->getSubRegFromChannel(TRI->getHWRegChan(Reg)),
297 &AMDGPU::R600_Reg128RegClass);
300 unsigned Reg = MO.getReg();
301 if (AMDGPU::R600_Reg128RegClass.contains(Reg))
304 SrcMI = TRI->getMatchingSuperReg(Reg,
305 TRI->getSubRegFromChannel(TRI->getHWRegChan(Reg)),
306 &AMDGPU::R600_Reg128RegClass);
309 if ((DstRegs.find(SrcMI) == DstRegs.end())) {
310 DstRegs.insert(DstMI);
317 MakeFetchClause(MachineBasicBlock &MBB, MachineBasicBlock::iterator &I)
319 MachineBasicBlock::iterator ClauseHead = I;
320 std::vector<MachineInstr *> ClauseContent;
321 unsigned AluInstCount = 0;
322 bool IsTex = TII->usesTextureCache(*ClauseHead);
323 std::set<unsigned> DstRegs;
324 for (MachineBasicBlock::iterator E = MBB.end(); I != E; ++I) {
325 if (IsTrivialInst(*I))
327 if (AluInstCount >= MaxFetchInst)
329 if ((IsTex && !TII->usesTextureCache(*I)) ||
330 (!IsTex && !TII->usesVertexCache(*I)))
332 if (!isCompatibleWithClause(*I, DstRegs))
335 ClauseContent.push_back(&*I);
337 MachineInstr *MIb = BuildMI(MBB, ClauseHead, MBB.findDebugLoc(ClauseHead),
338 getHWInstrDesc(IsTex?CF_TC:CF_VC))
340 .addImm(AluInstCount - 1); // COUNT
341 return ClauseFile(MIb, std::move(ClauseContent));
344 void getLiteral(MachineInstr &MI, std::vector<MachineOperand *> &Lits) const {
345 static const unsigned LiteralRegs[] = {
346 AMDGPU::ALU_LITERAL_X,
347 AMDGPU::ALU_LITERAL_Y,
348 AMDGPU::ALU_LITERAL_Z,
349 AMDGPU::ALU_LITERAL_W
351 const SmallVector<std::pair<MachineOperand *, int64_t>, 3> Srcs =
353 for (const auto &Src:Srcs) {
354 if (Src.first->getReg() != AMDGPU::ALU_LITERAL_X)
356 int64_t Imm = Src.second;
357 std::vector<MachineOperand *>::iterator It =
358 find_if(Lits, [&](MachineOperand *val) {
359 return val->isImm() && (val->getImm() == Imm);
362 // Get corresponding Operand
363 MachineOperand &Operand = MI.getOperand(
364 TII->getOperandIdx(MI.getOpcode(), AMDGPU::OpName::literal));
366 if (It != Lits.end()) {
367 // Reuse existing literal reg
368 unsigned Index = It - Lits.begin();
369 Src.first->setReg(LiteralRegs[Index]);
371 // Allocate new literal reg
372 assert(Lits.size() < 4 && "Too many literals in Instruction Group");
373 Src.first->setReg(LiteralRegs[Lits.size()]);
374 Lits.push_back(&Operand);
379 MachineBasicBlock::iterator insertLiterals(
380 MachineBasicBlock::iterator InsertPos,
381 const std::vector<unsigned> &Literals) const {
382 MachineBasicBlock *MBB = InsertPos->getParent();
383 for (unsigned i = 0, e = Literals.size(); i < e; i+=2) {
384 unsigned LiteralPair0 = Literals[i];
385 unsigned LiteralPair1 = (i + 1 < e)?Literals[i + 1]:0;
386 InsertPos = BuildMI(MBB, InsertPos->getDebugLoc(),
387 TII->get(AMDGPU::LITERALS))
388 .addImm(LiteralPair0)
389 .addImm(LiteralPair1);
395 MakeALUClause(MachineBasicBlock &MBB, MachineBasicBlock::iterator &I)
397 MachineInstr &ClauseHead = *I;
398 std::vector<MachineInstr *> ClauseContent;
400 for (MachineBasicBlock::instr_iterator E = MBB.instr_end(); I != E;) {
401 if (IsTrivialInst(*I)) {
405 if (!I->isBundle() && !TII->isALUInstr(I->getOpcode()))
407 std::vector<MachineOperand *>Literals;
409 MachineInstr &DeleteMI = *I;
410 MachineBasicBlock::instr_iterator BI = I.getInstrIterator();
411 while (++BI != E && BI->isBundledWithPred()) {
412 BI->unbundleFromPred();
413 for (MachineOperand &MO : BI->operands()) {
414 if (MO.isReg() && MO.isInternalRead())
415 MO.setIsInternalRead(false);
417 getLiteral(*BI, Literals);
418 ClauseContent.push_back(&*BI);
421 DeleteMI.eraseFromParent();
423 getLiteral(*I, Literals);
424 ClauseContent.push_back(&*I);
427 for (unsigned i = 0, e = Literals.size(); i < e; i += 2) {
428 MachineInstrBuilder MILit = BuildMI(MBB, I, I->getDebugLoc(),
429 TII->get(AMDGPU::LITERALS));
430 if (Literals[i]->isImm()) {
431 MILit.addImm(Literals[i]->getImm());
433 MILit.addGlobalAddress(Literals[i]->getGlobal(),
434 Literals[i]->getOffset());
437 if (Literals[i + 1]->isImm()) {
438 MILit.addImm(Literals[i + 1]->getImm());
440 MILit.addGlobalAddress(Literals[i + 1]->getGlobal(),
441 Literals[i + 1]->getOffset());
445 ClauseContent.push_back(MILit);
448 assert(ClauseContent.size() < 128 && "ALU clause is too big");
449 ClauseHead.getOperand(7).setImm(ClauseContent.size() - 1);
450 return ClauseFile(&ClauseHead, std::move(ClauseContent));
453 void EmitFetchClause(MachineBasicBlock::iterator InsertPos,
454 const DebugLoc &DL, ClauseFile &Clause,
456 CounterPropagateAddr(*Clause.first, CfCount);
457 MachineBasicBlock *BB = Clause.first->getParent();
458 BuildMI(BB, DL, TII->get(AMDGPU::FETCH_CLAUSE)).addImm(CfCount);
459 for (unsigned i = 0, e = Clause.second.size(); i < e; ++i) {
460 BB->splice(InsertPos, BB, Clause.second[i]);
462 CfCount += 2 * Clause.second.size();
465 void EmitALUClause(MachineBasicBlock::iterator InsertPos, const DebugLoc &DL,
466 ClauseFile &Clause, unsigned &CfCount) {
467 Clause.first->getOperand(0).setImm(0);
468 CounterPropagateAddr(*Clause.first, CfCount);
469 MachineBasicBlock *BB = Clause.first->getParent();
470 BuildMI(BB, DL, TII->get(AMDGPU::ALU_CLAUSE)).addImm(CfCount);
471 for (unsigned i = 0, e = Clause.second.size(); i < e; ++i) {
472 BB->splice(InsertPos, BB, Clause.second[i]);
474 CfCount += Clause.second.size();
477 void CounterPropagateAddr(MachineInstr &MI, unsigned Addr) const {
478 MI.getOperand(0).setImm(Addr + MI.getOperand(0).getImm());
480 void CounterPropagateAddr(const std::set<MachineInstr *> &MIs,
481 unsigned Addr) const {
482 for (MachineInstr *MI : MIs) {
483 CounterPropagateAddr(*MI, Addr);
488 R600ControlFlowFinalizer(TargetMachine &tm)
489 : MachineFunctionPass(ID), TII(nullptr), TRI(nullptr), ST(nullptr) {}
491 bool runOnMachineFunction(MachineFunction &MF) override {
492 ST = &MF.getSubtarget<R600Subtarget>();
493 MaxFetchInst = ST->getTexVTXClauseSize();
494 TII = ST->getInstrInfo();
495 TRI = ST->getRegisterInfo();
497 R600MachineFunctionInfo *MFI = MF.getInfo<R600MachineFunctionInfo>();
499 CFStack CFStack(ST, MF.getFunction()->getCallingConv());
500 for (MachineFunction::iterator MB = MF.begin(), ME = MF.end(); MB != ME;
502 MachineBasicBlock &MBB = *MB;
503 unsigned CfCount = 0;
504 std::vector<std::pair<unsigned, std::set<MachineInstr *> > > LoopStack;
505 std::vector<MachineInstr * > IfThenElseStack;
506 if (MF.getFunction()->getCallingConv() == CallingConv::AMDGPU_VS) {
507 BuildMI(MBB, MBB.begin(), MBB.findDebugLoc(MBB.begin()),
508 getHWInstrDesc(CF_CALL_FS));
511 std::vector<ClauseFile> FetchClauses, AluClauses;
512 std::vector<MachineInstr *> LastAlu(1);
513 std::vector<MachineInstr *> ToPopAfter;
515 for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end();
517 if (TII->usesTextureCache(*I) || TII->usesVertexCache(*I)) {
518 DEBUG(dbgs() << CfCount << ":"; I->dump(););
519 FetchClauses.push_back(MakeFetchClause(MBB, I));
521 LastAlu.back() = nullptr;
525 MachineBasicBlock::iterator MI = I;
526 if (MI->getOpcode() != AMDGPU::ENDIF)
527 LastAlu.back() = nullptr;
528 if (MI->getOpcode() == AMDGPU::CF_ALU)
529 LastAlu.back() = &*MI;
531 bool RequiresWorkAround =
532 CFStack.requiresWorkAroundForInst(MI->getOpcode());
533 switch (MI->getOpcode()) {
534 case AMDGPU::CF_ALU_PUSH_BEFORE:
535 if (RequiresWorkAround) {
536 DEBUG(dbgs() << "Applying bug work-around for ALU_PUSH_BEFORE\n");
537 BuildMI(MBB, MI, MBB.findDebugLoc(MI), TII->get(AMDGPU::CF_PUSH_EG))
540 MI->setDesc(TII->get(AMDGPU::CF_ALU));
542 CFStack.pushBranch(AMDGPU::CF_PUSH_EG);
544 CFStack.pushBranch(AMDGPU::CF_ALU_PUSH_BEFORE);
548 AluClauses.push_back(MakeALUClause(MBB, I));
549 DEBUG(dbgs() << CfCount << ":"; MI->dump(););
552 case AMDGPU::WHILELOOP: {
554 MachineInstr *MIb = BuildMI(MBB, MI, MBB.findDebugLoc(MI),
555 getHWInstrDesc(CF_WHILE_LOOP))
557 std::pair<unsigned, std::set<MachineInstr *> > Pair(CfCount,
558 std::set<MachineInstr *>());
559 Pair.second.insert(MIb);
560 LoopStack.push_back(std::move(Pair));
561 MI->eraseFromParent();
565 case AMDGPU::ENDLOOP: {
567 std::pair<unsigned, std::set<MachineInstr *> > Pair =
568 std::move(LoopStack.back());
569 LoopStack.pop_back();
570 CounterPropagateAddr(Pair.second, CfCount);
571 BuildMI(MBB, MI, MBB.findDebugLoc(MI), getHWInstrDesc(CF_END_LOOP))
572 .addImm(Pair.first + 1);
573 MI->eraseFromParent();
577 case AMDGPU::IF_PREDICATE_SET: {
578 LastAlu.push_back(nullptr);
579 MachineInstr *MIb = BuildMI(MBB, MI, MBB.findDebugLoc(MI),
580 getHWInstrDesc(CF_JUMP))
583 IfThenElseStack.push_back(MIb);
584 DEBUG(dbgs() << CfCount << ":"; MIb->dump(););
585 MI->eraseFromParent();
590 MachineInstr * JumpInst = IfThenElseStack.back();
591 IfThenElseStack.pop_back();
592 CounterPropagateAddr(*JumpInst, CfCount);
593 MachineInstr *MIb = BuildMI(MBB, MI, MBB.findDebugLoc(MI),
594 getHWInstrDesc(CF_ELSE))
597 DEBUG(dbgs() << CfCount << ":"; MIb->dump(););
598 IfThenElseStack.push_back(MIb);
599 MI->eraseFromParent();
603 case AMDGPU::ENDIF: {
605 if (LastAlu.back()) {
606 ToPopAfter.push_back(LastAlu.back());
608 MachineInstr *MIb = BuildMI(MBB, MI, MBB.findDebugLoc(MI),
609 getHWInstrDesc(CF_POP))
613 DEBUG(dbgs() << CfCount << ":"; MIb->dump(););
617 MachineInstr *IfOrElseInst = IfThenElseStack.back();
618 IfThenElseStack.pop_back();
619 CounterPropagateAddr(*IfOrElseInst, CfCount);
620 IfOrElseInst->getOperand(1).setImm(1);
622 MI->eraseFromParent();
625 case AMDGPU::BREAK: {
627 MachineInstr *MIb = BuildMI(MBB, MI, MBB.findDebugLoc(MI),
628 getHWInstrDesc(CF_LOOP_BREAK))
630 LoopStack.back().second.insert(MIb);
631 MI->eraseFromParent();
634 case AMDGPU::CONTINUE: {
635 MachineInstr *MIb = BuildMI(MBB, MI, MBB.findDebugLoc(MI),
636 getHWInstrDesc(CF_LOOP_CONTINUE))
638 LoopStack.back().second.insert(MIb);
639 MI->eraseFromParent();
643 case AMDGPU::RETURN: {
644 DebugLoc DL = MBB.findDebugLoc(MI);
645 BuildMI(MBB, MI, DL, getHWInstrDesc(CF_END));
648 BuildMI(MBB, I, DL, TII->get(AMDGPU::PAD));
651 MI->eraseFromParent();
652 for (unsigned i = 0, e = FetchClauses.size(); i < e; i++)
653 EmitFetchClause(I, DL, FetchClauses[i], CfCount);
654 for (unsigned i = 0, e = AluClauses.size(); i < e; i++)
655 EmitALUClause(I, DL, AluClauses[i], CfCount);
659 if (TII->isExport(MI->getOpcode())) {
660 DEBUG(dbgs() << CfCount << ":"; MI->dump(););
666 for (unsigned i = 0, e = ToPopAfter.size(); i < e; ++i) {
667 MachineInstr *Alu = ToPopAfter[i];
668 BuildMI(MBB, Alu, MBB.findDebugLoc((MachineBasicBlock::iterator)Alu),
669 TII->get(AMDGPU::CF_ALU_POP_AFTER))
670 .addImm(Alu->getOperand(0).getImm())
671 .addImm(Alu->getOperand(1).getImm())
672 .addImm(Alu->getOperand(2).getImm())
673 .addImm(Alu->getOperand(3).getImm())
674 .addImm(Alu->getOperand(4).getImm())
675 .addImm(Alu->getOperand(5).getImm())
676 .addImm(Alu->getOperand(6).getImm())
677 .addImm(Alu->getOperand(7).getImm())
678 .addImm(Alu->getOperand(8).getImm());
679 Alu->eraseFromParent();
681 MFI->CFStackSize = CFStack.MaxStackSize;
687 const char *getPassName() const override {
688 return "R600 Control Flow Finalizer Pass";
692 char R600ControlFlowFinalizer::ID = 0;
694 } // end anonymous namespace
697 llvm::FunctionPass *llvm::createR600ControlFlowFinalizer(TargetMachine &TM) {
698 return new R600ControlFlowFinalizer(TM);