1 //===-- SIRegisterInfo.cpp - SI Register Information ---------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
11 /// \brief SI implementation of the TargetRegisterInfo class.
13 //===----------------------------------------------------------------------===//
15 #include "SIRegisterInfo.h"
16 #include "SIInstrInfo.h"
17 #include "SIMachineFunctionInfo.h"
18 #include "AMDGPUSubtarget.h"
19 #include "llvm/CodeGen/MachineFrameInfo.h"
20 #include "llvm/CodeGen/MachineInstrBuilder.h"
21 #include "llvm/CodeGen/RegisterScavenging.h"
22 #include "llvm/IR/Function.h"
23 #include "llvm/IR/LLVMContext.h"
27 static cl::opt<bool> EnableSpillSGPRToSMEM(
28 "amdgpu-spill-sgpr-to-smem",
29 cl::desc("Use scalar stores to spill SGPRs if supported by subtarget"),
33 static bool hasPressureSet(const int *PSets, unsigned PSetID) {
34 for (unsigned i = 0; PSets[i] != -1; ++i) {
35 if (PSets[i] == (int)PSetID)
41 void SIRegisterInfo::classifyPressureSet(unsigned PSetID, unsigned Reg,
42 BitVector &PressureSets) const {
43 for (MCRegUnitIterator U(Reg, this); U.isValid(); ++U) {
44 const int *PSets = getRegUnitPressureSets(*U);
45 if (hasPressureSet(PSets, PSetID)) {
46 PressureSets.set(PSetID);
52 SIRegisterInfo::SIRegisterInfo() : AMDGPURegisterInfo(),
53 SGPRPressureSets(getNumRegPressureSets()),
54 VGPRPressureSets(getNumRegPressureSets()) {
55 unsigned NumRegPressureSets = getNumRegPressureSets();
57 SGPRSetID = NumRegPressureSets;
58 VGPRSetID = NumRegPressureSets;
60 for (unsigned i = 0; i < NumRegPressureSets; ++i) {
61 classifyPressureSet(i, AMDGPU::SGPR0, SGPRPressureSets);
62 classifyPressureSet(i, AMDGPU::VGPR0, VGPRPressureSets);
65 // Determine the number of reg units for each pressure set.
66 std::vector<unsigned> PressureSetRegUnits(NumRegPressureSets, 0);
67 for (unsigned i = 0, e = getNumRegUnits(); i != e; ++i) {
68 const int *PSets = getRegUnitPressureSets(i);
69 for (unsigned j = 0; PSets[j] != -1; ++j) {
70 ++PressureSetRegUnits[PSets[j]];
74 unsigned VGPRMax = 0, SGPRMax = 0;
75 for (unsigned i = 0; i < NumRegPressureSets; ++i) {
76 if (isVGPRPressureSet(i) && PressureSetRegUnits[i] > VGPRMax) {
78 VGPRMax = PressureSetRegUnits[i];
81 if (isSGPRPressureSet(i) && PressureSetRegUnits[i] > SGPRMax) {
83 SGPRMax = PressureSetRegUnits[i];
87 assert(SGPRSetID < NumRegPressureSets &&
88 VGPRSetID < NumRegPressureSets);
91 void SIRegisterInfo::reserveRegisterTuples(BitVector &Reserved, unsigned Reg) const {
92 MCRegAliasIterator R(Reg, this, true);
94 for (; R.isValid(); ++R)
98 unsigned SIRegisterInfo::reservedPrivateSegmentBufferReg(
99 const MachineFunction &MF) const {
100 unsigned BaseIdx = alignDown(getMaxNumSGPRs(MF), 4) - 4;
101 unsigned BaseReg(AMDGPU::SGPR_32RegClass.getRegister(BaseIdx));
102 return getMatchingSuperReg(BaseReg, AMDGPU::sub0, &AMDGPU::SReg_128RegClass);
105 unsigned SIRegisterInfo::reservedPrivateSegmentWaveByteOffsetReg(
106 const MachineFunction &MF) const {
107 unsigned RegCount = getMaxNumSGPRs(MF);
110 // Try to place it in a hole after PrivateSegmentbufferReg.
112 // We cannot put the segment buffer in (Idx - 4) ... (Idx - 1) due to
113 // alignment constraints, so we have a hole where can put the wave offset.
116 // We can put the segment buffer in (Idx - 4) ... (Idx - 1) and put the
117 // wave offset before it.
120 return AMDGPU::SGPR_32RegClass.getRegister(Reg);
123 BitVector SIRegisterInfo::getReservedRegs(const MachineFunction &MF) const {
124 BitVector Reserved(getNumRegs());
125 Reserved.set(AMDGPU::INDIRECT_BASE_ADDR);
127 // EXEC_LO and EXEC_HI could be allocated and used as regular register, but
128 // this seems likely to result in bugs, so I'm marking them as reserved.
129 reserveRegisterTuples(Reserved, AMDGPU::EXEC);
130 reserveRegisterTuples(Reserved, AMDGPU::FLAT_SCR);
132 // Reserve Trap Handler registers - support is not implemented in Codegen.
133 reserveRegisterTuples(Reserved, AMDGPU::TBA);
134 reserveRegisterTuples(Reserved, AMDGPU::TMA);
135 reserveRegisterTuples(Reserved, AMDGPU::TTMP0_TTMP1);
136 reserveRegisterTuples(Reserved, AMDGPU::TTMP2_TTMP3);
137 reserveRegisterTuples(Reserved, AMDGPU::TTMP4_TTMP5);
138 reserveRegisterTuples(Reserved, AMDGPU::TTMP6_TTMP7);
139 reserveRegisterTuples(Reserved, AMDGPU::TTMP8_TTMP9);
140 reserveRegisterTuples(Reserved, AMDGPU::TTMP10_TTMP11);
142 unsigned MaxNumSGPRs = getMaxNumSGPRs(MF);
143 unsigned TotalNumSGPRs = AMDGPU::SGPR_32RegClass.getNumRegs();
144 for (unsigned i = MaxNumSGPRs; i < TotalNumSGPRs; ++i) {
145 unsigned Reg = AMDGPU::SGPR_32RegClass.getRegister(i);
146 reserveRegisterTuples(Reserved, Reg);
149 unsigned MaxNumVGPRs = getMaxNumVGPRs(MF);
150 unsigned TotalNumVGPRs = AMDGPU::VGPR_32RegClass.getNumRegs();
151 for (unsigned i = MaxNumVGPRs; i < TotalNumVGPRs; ++i) {
152 unsigned Reg = AMDGPU::VGPR_32RegClass.getRegister(i);
153 reserveRegisterTuples(Reserved, Reg);
156 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
158 unsigned ScratchWaveOffsetReg = MFI->getScratchWaveOffsetReg();
159 if (ScratchWaveOffsetReg != AMDGPU::NoRegister) {
160 // Reserve 1 SGPR for scratch wave offset in case we need to spill.
161 reserveRegisterTuples(Reserved, ScratchWaveOffsetReg);
164 unsigned ScratchRSrcReg = MFI->getScratchRSrcReg();
165 if (ScratchRSrcReg != AMDGPU::NoRegister) {
166 // Reserve 4 SGPRs for the scratch buffer resource descriptor in case we need
168 // TODO: May need to reserve a VGPR if doing LDS spilling.
169 reserveRegisterTuples(Reserved, ScratchRSrcReg);
170 assert(!isSubRegister(ScratchRSrcReg, ScratchWaveOffsetReg));
176 bool SIRegisterInfo::requiresRegisterScavenging(const MachineFunction &Fn) const {
177 return Fn.getFrameInfo().hasStackObjects();
181 SIRegisterInfo::requiresFrameIndexScavenging(const MachineFunction &MF) const {
182 return MF.getFrameInfo().hasStackObjects();
185 bool SIRegisterInfo::requiresVirtualBaseRegisters(
186 const MachineFunction &) const {
187 // There are no special dedicated stack or frame pointers.
191 bool SIRegisterInfo::trackLivenessAfterRegAlloc(const MachineFunction &MF) const {
192 // This helps catch bugs as verifier errors.
196 int64_t SIRegisterInfo::getFrameIndexInstrOffset(const MachineInstr *MI,
198 if (!SIInstrInfo::isMUBUF(*MI))
201 assert(Idx == AMDGPU::getNamedOperandIdx(MI->getOpcode(),
202 AMDGPU::OpName::vaddr) &&
203 "Should never see frame index on non-address operand");
205 int OffIdx = AMDGPU::getNamedOperandIdx(MI->getOpcode(),
206 AMDGPU::OpName::offset);
207 return MI->getOperand(OffIdx).getImm();
210 bool SIRegisterInfo::needsFrameBaseReg(MachineInstr *MI, int64_t Offset) const {
211 return MI->mayLoadOrStore();
214 void SIRegisterInfo::materializeFrameBaseRegister(MachineBasicBlock *MBB,
217 int64_t Offset) const {
218 MachineBasicBlock::iterator Ins = MBB->begin();
219 DebugLoc DL; // Defaults to "unknown"
221 if (Ins != MBB->end())
222 DL = Ins->getDebugLoc();
224 MachineFunction *MF = MBB->getParent();
225 const SISubtarget &Subtarget = MF->getSubtarget<SISubtarget>();
226 const SIInstrInfo *TII = Subtarget.getInstrInfo();
229 BuildMI(*MBB, Ins, DL, TII->get(AMDGPU::V_MOV_B32_e32), BaseReg)
230 .addFrameIndex(FrameIdx);
234 MachineRegisterInfo &MRI = MF->getRegInfo();
235 unsigned UnusedCarry = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
236 unsigned OffsetReg = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
238 BuildMI(*MBB, Ins, DL, TII->get(AMDGPU::S_MOV_B32), OffsetReg)
240 BuildMI(*MBB, Ins, DL, TII->get(AMDGPU::V_ADD_I32_e64), BaseReg)
241 .addReg(UnusedCarry, RegState::Define | RegState::Dead)
242 .addReg(OffsetReg, RegState::Kill)
243 .addFrameIndex(FrameIdx);
246 void SIRegisterInfo::resolveFrameIndex(MachineInstr &MI, unsigned BaseReg,
247 int64_t Offset) const {
249 MachineBasicBlock *MBB = MI.getParent();
250 MachineFunction *MF = MBB->getParent();
251 const SISubtarget &Subtarget = MF->getSubtarget<SISubtarget>();
252 const SIInstrInfo *TII = Subtarget.getInstrInfo();
255 // FIXME: Is it possible to be storing a frame index to itself?
257 for (const MachineOperand &MO: MI.operands()) {
260 llvm_unreachable("should not see multiple frame indices");
267 MachineOperand *FIOp = TII->getNamedOperand(MI, AMDGPU::OpName::vaddr);
268 assert(FIOp && FIOp->isFI() && "frame index must be address operand");
270 assert(TII->isMUBUF(MI));
272 MachineOperand *OffsetOp = TII->getNamedOperand(MI, AMDGPU::OpName::offset);
273 int64_t NewOffset = OffsetOp->getImm() + Offset;
274 assert(isUInt<12>(NewOffset) && "offset should be legal");
276 FIOp->ChangeToRegister(BaseReg, false);
277 OffsetOp->setImm(NewOffset);
280 bool SIRegisterInfo::isFrameOffsetLegal(const MachineInstr *MI,
282 int64_t Offset) const {
283 return SIInstrInfo::isMUBUF(*MI) && isUInt<12>(Offset);
286 const TargetRegisterClass *SIRegisterInfo::getPointerRegClass(
287 const MachineFunction &MF, unsigned Kind) const {
288 // This is inaccurate. It depends on the instruction and address space. The
289 // only place where we should hit this is for dealing with frame indexes /
290 // private accesses, so this is correct in that case.
291 return &AMDGPU::VGPR_32RegClass;
294 static unsigned getNumSubRegsForSpillOp(unsigned Op) {
297 case AMDGPU::SI_SPILL_S512_SAVE:
298 case AMDGPU::SI_SPILL_S512_RESTORE:
299 case AMDGPU::SI_SPILL_V512_SAVE:
300 case AMDGPU::SI_SPILL_V512_RESTORE:
302 case AMDGPU::SI_SPILL_S256_SAVE:
303 case AMDGPU::SI_SPILL_S256_RESTORE:
304 case AMDGPU::SI_SPILL_V256_SAVE:
305 case AMDGPU::SI_SPILL_V256_RESTORE:
307 case AMDGPU::SI_SPILL_S128_SAVE:
308 case AMDGPU::SI_SPILL_S128_RESTORE:
309 case AMDGPU::SI_SPILL_V128_SAVE:
310 case AMDGPU::SI_SPILL_V128_RESTORE:
312 case AMDGPU::SI_SPILL_V96_SAVE:
313 case AMDGPU::SI_SPILL_V96_RESTORE:
315 case AMDGPU::SI_SPILL_S64_SAVE:
316 case AMDGPU::SI_SPILL_S64_RESTORE:
317 case AMDGPU::SI_SPILL_V64_SAVE:
318 case AMDGPU::SI_SPILL_V64_RESTORE:
320 case AMDGPU::SI_SPILL_S32_SAVE:
321 case AMDGPU::SI_SPILL_S32_RESTORE:
322 case AMDGPU::SI_SPILL_V32_SAVE:
323 case AMDGPU::SI_SPILL_V32_RESTORE:
325 default: llvm_unreachable("Invalid spill opcode");
329 static int getOffsetMUBUFStore(unsigned Opc) {
331 case AMDGPU::BUFFER_STORE_DWORD_OFFEN:
332 return AMDGPU::BUFFER_STORE_DWORD_OFFSET;
333 case AMDGPU::BUFFER_STORE_BYTE_OFFEN:
334 return AMDGPU::BUFFER_STORE_BYTE_OFFSET;
335 case AMDGPU::BUFFER_STORE_SHORT_OFFEN:
336 return AMDGPU::BUFFER_STORE_SHORT_OFFSET;
337 case AMDGPU::BUFFER_STORE_DWORDX2_OFFEN:
338 return AMDGPU::BUFFER_STORE_DWORDX2_OFFSET;
339 case AMDGPU::BUFFER_STORE_DWORDX4_OFFEN:
340 return AMDGPU::BUFFER_STORE_DWORDX4_OFFSET;
346 static int getOffsetMUBUFLoad(unsigned Opc) {
348 case AMDGPU::BUFFER_LOAD_DWORD_OFFEN:
349 return AMDGPU::BUFFER_LOAD_DWORD_OFFSET;
350 case AMDGPU::BUFFER_LOAD_UBYTE_OFFEN:
351 return AMDGPU::BUFFER_LOAD_UBYTE_OFFSET;
352 case AMDGPU::BUFFER_LOAD_SBYTE_OFFEN:
353 return AMDGPU::BUFFER_LOAD_SBYTE_OFFSET;
354 case AMDGPU::BUFFER_LOAD_USHORT_OFFEN:
355 return AMDGPU::BUFFER_LOAD_USHORT_OFFSET;
356 case AMDGPU::BUFFER_LOAD_SSHORT_OFFEN:
357 return AMDGPU::BUFFER_LOAD_SSHORT_OFFSET;
358 case AMDGPU::BUFFER_LOAD_DWORDX2_OFFEN:
359 return AMDGPU::BUFFER_LOAD_DWORDX2_OFFSET;
360 case AMDGPU::BUFFER_LOAD_DWORDX4_OFFEN:
361 return AMDGPU::BUFFER_LOAD_DWORDX4_OFFSET;
367 // This differs from buildSpillLoadStore by only scavenging a VGPR. It does not
368 // need to handle the case where an SGPR may need to be spilled while spilling.
369 static bool buildMUBUFOffsetLoadStore(const SIInstrInfo *TII,
370 MachineFrameInfo &MFI,
371 MachineBasicBlock::iterator MI,
374 MachineBasicBlock *MBB = MI->getParent();
375 const DebugLoc &DL = MI->getDebugLoc();
376 bool IsStore = MI->mayStore();
378 unsigned Opc = MI->getOpcode();
379 int LoadStoreOp = IsStore ?
380 getOffsetMUBUFStore(Opc) : getOffsetMUBUFLoad(Opc);
381 if (LoadStoreOp == -1)
384 unsigned Reg = TII->getNamedOperand(*MI, AMDGPU::OpName::vdata)->getReg();
386 BuildMI(*MBB, MI, DL, TII->get(LoadStoreOp))
387 .addReg(Reg, getDefRegState(!IsStore))
388 .addOperand(*TII->getNamedOperand(*MI, AMDGPU::OpName::srsrc))
389 .addOperand(*TII->getNamedOperand(*MI, AMDGPU::OpName::soffset))
394 .setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
398 void SIRegisterInfo::buildSpillLoadStore(MachineBasicBlock::iterator MI,
399 unsigned LoadStoreOp,
400 const MachineOperand *SrcDst,
401 unsigned ScratchRsrcReg,
402 unsigned ScratchOffset,
404 RegScavenger *RS) const {
405 unsigned Value = SrcDst->getReg();
406 bool IsKill = SrcDst->isKill();
407 MachineBasicBlock *MBB = MI->getParent();
408 MachineFunction *MF = MI->getParent()->getParent();
409 const SISubtarget &ST = MF->getSubtarget<SISubtarget>();
410 const SIInstrInfo *TII = ST.getInstrInfo();
412 DebugLoc DL = MI->getDebugLoc();
413 bool IsStore = MI->mayStore();
415 bool RanOutOfSGPRs = false;
416 bool Scavenged = false;
417 unsigned SOffset = ScratchOffset;
418 unsigned OriginalImmOffset = Offset;
420 unsigned NumSubRegs = getNumSubRegsForSpillOp(MI->getOpcode());
421 unsigned Size = NumSubRegs * 4;
423 if (!isUInt<12>(Offset + Size)) {
424 SOffset = AMDGPU::NoRegister;
426 // We don't have access to the register scavenger if this function is called
427 // during PEI::scavengeFrameVirtualRegs().
429 SOffset = RS->FindUnusedReg(&AMDGPU::SGPR_32RegClass);
431 if (SOffset == AMDGPU::NoRegister) {
432 // There are no free SGPRs, and since we are in the process of spilling
433 // VGPRs too. Since we need a VGPR in order to spill SGPRs (this is true
434 // on SI/CI and on VI it is true until we implement spilling using scalar
435 // stores), we have no way to free up an SGPR. Our solution here is to
436 // add the offset directly to the ScratchOffset register, and then
437 // subtract the offset after the spill to return ScratchOffset to it's
439 RanOutOfSGPRs = true;
440 SOffset = ScratchOffset;
444 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_ADD_U32), SOffset)
445 .addReg(ScratchOffset)
450 for (unsigned i = 0, e = NumSubRegs; i != e; ++i, Offset += 4) {
451 unsigned SubReg = NumSubRegs == 1 ?
452 Value : getSubReg(Value, getSubRegFromChannel(i));
454 unsigned SOffsetRegState = 0;
455 unsigned SrcDstRegState = getDefRegState(!IsStore);
457 SOffsetRegState |= getKillRegState(Scavenged);
458 // The last implicit use carries the "Kill" flag.
459 SrcDstRegState |= getKillRegState(IsKill);
462 BuildMI(*MBB, MI, DL, TII->get(LoadStoreOp))
463 .addReg(SubReg, getDefRegState(!IsStore))
464 .addReg(ScratchRsrcReg)
465 .addReg(SOffset, SOffsetRegState)
470 .addReg(Value, RegState::Implicit | SrcDstRegState)
471 .setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
474 // Subtract the offset we added to the ScratchOffset register.
475 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_SUB_U32), ScratchOffset)
476 .addReg(ScratchOffset)
477 .addImm(OriginalImmOffset);
481 void SIRegisterInfo::spillSGPR(MachineBasicBlock::iterator MI,
483 RegScavenger *RS) const {
484 MachineBasicBlock *MBB = MI->getParent();
485 MachineFunction *MF = MBB->getParent();
486 MachineRegisterInfo &MRI = MF->getRegInfo();
487 const SISubtarget &ST = MF->getSubtarget<SISubtarget>();
488 const SIInstrInfo *TII = ST.getInstrInfo();
490 unsigned NumSubRegs = getNumSubRegsForSpillOp(MI->getOpcode());
491 unsigned SuperReg = MI->getOperand(0).getReg();
492 bool IsKill = MI->getOperand(0).isKill();
493 const DebugLoc &DL = MI->getDebugLoc();
495 SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
496 MachineFrameInfo &FrameInfo = MF->getFrameInfo();
498 bool SpillToSMEM = ST.hasScalarStores() && EnableSpillSGPRToSMEM;
500 // SubReg carries the "Kill" flag when SubReg == SuperReg.
501 unsigned SubKillState = getKillRegState((NumSubRegs == 1) && IsKill);
502 for (unsigned i = 0, e = NumSubRegs; i < e; ++i) {
503 unsigned SubReg = NumSubRegs == 1 ?
504 SuperReg : getSubReg(SuperReg, getSubRegFromChannel(i));
507 if (SuperReg == AMDGPU::M0) {
508 assert(NumSubRegs == 1);
510 = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
512 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::COPY), CopyM0)
513 .addReg(AMDGPU::M0, getKillRegState(IsKill));
515 // The real spill now kills the temp copy.
516 SubReg = SuperReg = CopyM0;
520 int64_t FrOffset = FrameInfo.getObjectOffset(Index);
521 unsigned Size = FrameInfo.getObjectSize(Index);
522 unsigned Align = FrameInfo.getObjectAlignment(Index);
523 MachinePointerInfo PtrInfo
524 = MachinePointerInfo::getFixedStack(*MF, Index);
525 MachineMemOperand *MMO
526 = MF->getMachineMemOperand(PtrInfo, MachineMemOperand::MOStore,
529 unsigned OffsetReg = AMDGPU::M0;
530 // Add i * 4 wave offset.
532 // SMEM instructions only support a single offset, so increment the wave
535 int64_t Offset = ST.getWavefrontSize() * (FrOffset + 4 * i);
537 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_ADD_U32), OffsetReg)
538 .addReg(MFI->getScratchWaveOffsetReg())
541 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_MOV_B32), OffsetReg)
542 .addReg(MFI->getScratchWaveOffsetReg());
545 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_BUFFER_STORE_DWORD_SGPR))
546 .addReg(SubReg, getKillRegState(IsKill)) // sdata
547 .addReg(MFI->getScratchRSrcReg()) // sbase
548 .addReg(OffsetReg) // soff
555 struct SIMachineFunctionInfo::SpilledReg Spill =
556 MFI->getSpilledReg(MF, Index, i);
557 if (Spill.hasReg()) {
558 if (SuperReg == AMDGPU::M0) {
559 assert(NumSubRegs == 1);
561 = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
562 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_MOV_B32), CopyM0)
563 .addReg(SuperReg, getKillRegState(IsKill));
565 // The real spill now kills the temp copy.
566 SubReg = SuperReg = CopyM0;
570 BuildMI(*MBB, MI, DL,
571 TII->getMCOpcodeFromPseudo(AMDGPU::V_WRITELANE_B32),
573 .addReg(SubReg, getKillRegState(IsKill))
576 // FIXME: Since this spills to another register instead of an actual
577 // frame index, we should delete the frame index when all references to
580 // Spill SGPR to a frame index.
581 // TODO: Should VI try to spill to VGPR and then spill to SMEM?
582 unsigned TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
583 // TODO: Should VI try to spill to VGPR and then spill to SMEM?
585 MachineInstrBuilder Mov
586 = BuildMI(*MBB, MI, DL, TII->get(AMDGPU::V_MOV_B32_e32), TmpReg)
587 .addReg(SubReg, SubKillState);
590 // There could be undef components of a spilled super register.
591 // TODO: Can we detect this and skip the spill?
592 if (NumSubRegs > 1) {
593 // The last implicit use of the SuperReg carries the "Kill" flag.
594 unsigned SuperKillState = 0;
596 SuperKillState |= getKillRegState(IsKill);
597 Mov.addReg(SuperReg, RegState::Implicit | SuperKillState);
600 unsigned Size = FrameInfo.getObjectSize(Index);
601 unsigned Align = FrameInfo.getObjectAlignment(Index);
602 MachinePointerInfo PtrInfo
603 = MachinePointerInfo::getFixedStack(*MF, Index);
604 MachineMemOperand *MMO
605 = MF->getMachineMemOperand(PtrInfo, MachineMemOperand::MOStore,
607 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::SI_SPILL_V32_SAVE))
608 .addReg(TmpReg, RegState::Kill) // src
609 .addFrameIndex(Index) // vaddr
610 .addReg(MFI->getScratchRSrcReg()) // srrsrc
611 .addReg(MFI->getScratchWaveOffsetReg()) // soffset
612 .addImm(i * 4) // offset
617 MI->eraseFromParent();
618 MFI->addToSpilledSGPRs(NumSubRegs);
621 void SIRegisterInfo::restoreSGPR(MachineBasicBlock::iterator MI,
623 RegScavenger *RS) const {
624 MachineFunction *MF = MI->getParent()->getParent();
625 MachineRegisterInfo &MRI = MF->getRegInfo();
626 MachineBasicBlock *MBB = MI->getParent();
627 SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
628 MachineFrameInfo &FrameInfo = MF->getFrameInfo();
629 const SISubtarget &ST = MF->getSubtarget<SISubtarget>();
630 const SIInstrInfo *TII = ST.getInstrInfo();
631 const DebugLoc &DL = MI->getDebugLoc();
633 unsigned NumSubRegs = getNumSubRegsForSpillOp(MI->getOpcode());
634 unsigned SuperReg = MI->getOperand(0).getReg();
635 bool SpillToSMEM = ST.hasScalarStores() && EnableSpillSGPRToSMEM;
637 // m0 is not allowed as with readlane/writelane, so a temporary SGPR and
638 // extra copy is needed.
639 bool IsM0 = (SuperReg == AMDGPU::M0);
641 assert(NumSubRegs == 1);
642 SuperReg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
645 int64_t FrOffset = FrameInfo.getObjectOffset(Index);
647 for (unsigned i = 0, e = NumSubRegs; i < e; ++i) {
648 unsigned SubReg = NumSubRegs == 1 ?
649 SuperReg : getSubReg(SuperReg, getSubRegFromChannel(i));
652 unsigned Size = FrameInfo.getObjectSize(Index);
653 unsigned Align = FrameInfo.getObjectAlignment(Index);
654 MachinePointerInfo PtrInfo
655 = MachinePointerInfo::getFixedStack(*MF, Index);
656 MachineMemOperand *MMO
657 = MF->getMachineMemOperand(PtrInfo, MachineMemOperand::MOLoad,
660 unsigned OffsetReg = AMDGPU::M0;
663 int64_t Offset = ST.getWavefrontSize() * (FrOffset + 4 * i);
665 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_ADD_U32), OffsetReg)
666 .addReg(MFI->getScratchWaveOffsetReg())
669 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_MOV_B32), OffsetReg)
670 .addReg(MFI->getScratchWaveOffsetReg());
673 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_BUFFER_LOAD_DWORD_SGPR), SubReg)
674 .addReg(MFI->getScratchRSrcReg()) // sbase
675 .addReg(OffsetReg) // soff
678 .addReg(MI->getOperand(0).getReg(), RegState::ImplicitDefine);
683 SIMachineFunctionInfo::SpilledReg Spill
684 = MFI->getSpilledReg(MF, Index, i);
686 if (Spill.hasReg()) {
687 BuildMI(*MBB, MI, DL,
688 TII->getMCOpcodeFromPseudo(AMDGPU::V_READLANE_B32),
692 .addReg(MI->getOperand(0).getReg(), RegState::ImplicitDefine);
694 // Restore SGPR from a stack slot.
695 // FIXME: We should use S_LOAD_DWORD here for VI.
697 unsigned TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
698 unsigned Align = FrameInfo.getObjectAlignment(Index);
699 unsigned Size = FrameInfo.getObjectSize(Index);
701 MachinePointerInfo PtrInfo
702 = MachinePointerInfo::getFixedStack(*MF, Index);
704 MachineMemOperand *MMO = MF->getMachineMemOperand(
705 PtrInfo, MachineMemOperand::MOLoad, Size, Align);
707 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::SI_SPILL_V32_RESTORE), TmpReg)
708 .addFrameIndex(Index) // vaddr
709 .addReg(MFI->getScratchRSrcReg()) // srsrc
710 .addReg(MFI->getScratchWaveOffsetReg()) // soffset
711 .addImm(i * 4) // offset
713 BuildMI(*MBB, MI, DL,
714 TII->get(AMDGPU::V_READFIRSTLANE_B32), SubReg)
715 .addReg(TmpReg, RegState::Kill)
716 .addReg(MI->getOperand(0).getReg(), RegState::ImplicitDefine);
720 if (IsM0 && SuperReg != AMDGPU::M0) {
721 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0)
725 MI->eraseFromParent();
728 void SIRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator MI,
729 int SPAdj, unsigned FIOperandNum,
730 RegScavenger *RS) const {
731 MachineFunction *MF = MI->getParent()->getParent();
732 MachineRegisterInfo &MRI = MF->getRegInfo();
733 MachineBasicBlock *MBB = MI->getParent();
734 SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
735 MachineFrameInfo &FrameInfo = MF->getFrameInfo();
736 const SISubtarget &ST = MF->getSubtarget<SISubtarget>();
737 const SIInstrInfo *TII = ST.getInstrInfo();
738 DebugLoc DL = MI->getDebugLoc();
740 MachineOperand &FIOp = MI->getOperand(FIOperandNum);
741 int Index = MI->getOperand(FIOperandNum).getIndex();
743 switch (MI->getOpcode()) {
744 // SGPR register spill
745 case AMDGPU::SI_SPILL_S512_SAVE:
746 case AMDGPU::SI_SPILL_S256_SAVE:
747 case AMDGPU::SI_SPILL_S128_SAVE:
748 case AMDGPU::SI_SPILL_S64_SAVE:
749 case AMDGPU::SI_SPILL_S32_SAVE: {
750 spillSGPR(MI, Index, RS);
754 // SGPR register restore
755 case AMDGPU::SI_SPILL_S512_RESTORE:
756 case AMDGPU::SI_SPILL_S256_RESTORE:
757 case AMDGPU::SI_SPILL_S128_RESTORE:
758 case AMDGPU::SI_SPILL_S64_RESTORE:
759 case AMDGPU::SI_SPILL_S32_RESTORE: {
760 restoreSGPR(MI, Index, RS);
764 // VGPR register spill
765 case AMDGPU::SI_SPILL_V512_SAVE:
766 case AMDGPU::SI_SPILL_V256_SAVE:
767 case AMDGPU::SI_SPILL_V128_SAVE:
768 case AMDGPU::SI_SPILL_V96_SAVE:
769 case AMDGPU::SI_SPILL_V64_SAVE:
770 case AMDGPU::SI_SPILL_V32_SAVE:
771 buildSpillLoadStore(MI, AMDGPU::BUFFER_STORE_DWORD_OFFSET,
772 TII->getNamedOperand(*MI, AMDGPU::OpName::vdata),
773 TII->getNamedOperand(*MI, AMDGPU::OpName::srsrc)->getReg(),
774 TII->getNamedOperand(*MI, AMDGPU::OpName::soffset)->getReg(),
775 FrameInfo.getObjectOffset(Index) +
776 TII->getNamedOperand(*MI, AMDGPU::OpName::offset)->getImm(), RS);
777 MFI->addToSpilledVGPRs(getNumSubRegsForSpillOp(MI->getOpcode()));
778 MI->eraseFromParent();
780 case AMDGPU::SI_SPILL_V32_RESTORE:
781 case AMDGPU::SI_SPILL_V64_RESTORE:
782 case AMDGPU::SI_SPILL_V96_RESTORE:
783 case AMDGPU::SI_SPILL_V128_RESTORE:
784 case AMDGPU::SI_SPILL_V256_RESTORE:
785 case AMDGPU::SI_SPILL_V512_RESTORE: {
786 buildSpillLoadStore(MI, AMDGPU::BUFFER_LOAD_DWORD_OFFSET,
787 TII->getNamedOperand(*MI, AMDGPU::OpName::vdata),
788 TII->getNamedOperand(*MI, AMDGPU::OpName::srsrc)->getReg(),
789 TII->getNamedOperand(*MI, AMDGPU::OpName::soffset)->getReg(),
790 FrameInfo.getObjectOffset(Index) +
791 TII->getNamedOperand(*MI, AMDGPU::OpName::offset)->getImm(), RS);
792 MI->eraseFromParent();
797 if (TII->isMUBUF(*MI)) {
798 // Disable offen so we don't need a 0 vgpr base.
799 assert(static_cast<int>(FIOperandNum) ==
800 AMDGPU::getNamedOperandIdx(MI->getOpcode(),
801 AMDGPU::OpName::vaddr));
803 int64_t Offset = FrameInfo.getObjectOffset(Index);
805 = TII->getNamedOperand(*MI, AMDGPU::OpName::offset)->getImm();
806 int64_t NewOffset = OldImm + Offset;
808 if (isUInt<12>(NewOffset) &&
809 buildMUBUFOffsetLoadStore(TII, FrameInfo, MI, Index, NewOffset)) {
810 MI->eraseFromParent();
815 int64_t Offset = FrameInfo.getObjectOffset(Index);
816 FIOp.ChangeToImmediate(Offset);
817 if (!TII->isImmOperandLegal(*MI, FIOperandNum, FIOp)) {
818 unsigned TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
819 BuildMI(*MBB, MI, MI->getDebugLoc(),
820 TII->get(AMDGPU::V_MOV_B32_e32), TmpReg)
822 FIOp.ChangeToRegister(TmpReg, false, false, true);
828 // FIXME: This is very slow. It might be worth creating a map from physreg to
830 const TargetRegisterClass *SIRegisterInfo::getPhysRegClass(unsigned Reg) const {
831 assert(!TargetRegisterInfo::isVirtualRegister(Reg));
833 static const TargetRegisterClass *const BaseClasses[] = {
834 &AMDGPU::VGPR_32RegClass,
835 &AMDGPU::SReg_32RegClass,
836 &AMDGPU::VReg_64RegClass,
837 &AMDGPU::SReg_64RegClass,
838 &AMDGPU::VReg_96RegClass,
839 &AMDGPU::VReg_128RegClass,
840 &AMDGPU::SReg_128RegClass,
841 &AMDGPU::VReg_256RegClass,
842 &AMDGPU::SReg_256RegClass,
843 &AMDGPU::VReg_512RegClass,
844 &AMDGPU::SReg_512RegClass,
845 &AMDGPU::SCC_CLASSRegClass,
848 for (const TargetRegisterClass *BaseClass : BaseClasses) {
849 if (BaseClass->contains(Reg)) {
856 // TODO: It might be helpful to have some target specific flags in
857 // TargetRegisterClass to mark which classes are VGPRs to make this trivial.
858 bool SIRegisterInfo::hasVGPRs(const TargetRegisterClass *RC) const {
859 switch (RC->getSize()) {
860 case 0: return false;
861 case 1: return false;
863 return getCommonSubClass(&AMDGPU::VGPR_32RegClass, RC) != nullptr;
865 return getCommonSubClass(&AMDGPU::VReg_64RegClass, RC) != nullptr;
867 return getCommonSubClass(&AMDGPU::VReg_96RegClass, RC) != nullptr;
869 return getCommonSubClass(&AMDGPU::VReg_128RegClass, RC) != nullptr;
871 return getCommonSubClass(&AMDGPU::VReg_256RegClass, RC) != nullptr;
873 return getCommonSubClass(&AMDGPU::VReg_512RegClass, RC) != nullptr;
875 llvm_unreachable("Invalid register class size");
879 const TargetRegisterClass *SIRegisterInfo::getEquivalentVGPRClass(
880 const TargetRegisterClass *SRC) const {
881 switch (SRC->getSize()) {
883 return &AMDGPU::VGPR_32RegClass;
885 return &AMDGPU::VReg_64RegClass;
887 return &AMDGPU::VReg_96RegClass;
889 return &AMDGPU::VReg_128RegClass;
891 return &AMDGPU::VReg_256RegClass;
893 return &AMDGPU::VReg_512RegClass;
895 llvm_unreachable("Invalid register class size");
899 const TargetRegisterClass *SIRegisterInfo::getEquivalentSGPRClass(
900 const TargetRegisterClass *VRC) const {
901 switch (VRC->getSize()) {
903 return &AMDGPU::SGPR_32RegClass;
905 return &AMDGPU::SReg_64RegClass;
907 return &AMDGPU::SReg_128RegClass;
909 return &AMDGPU::SReg_256RegClass;
911 return &AMDGPU::SReg_512RegClass;
913 llvm_unreachable("Invalid register class size");
917 const TargetRegisterClass *SIRegisterInfo::getSubRegClass(
918 const TargetRegisterClass *RC, unsigned SubIdx) const {
919 if (SubIdx == AMDGPU::NoSubRegister)
922 // We can assume that each lane corresponds to one 32-bit register.
923 unsigned Count = countPopulation(getSubRegIndexLaneMask(SubIdx));
924 if (isSGPRClass(RC)) {
927 return &AMDGPU::SGPR_32RegClass;
929 return &AMDGPU::SReg_64RegClass;
931 return &AMDGPU::SReg_128RegClass;
933 return &AMDGPU::SReg_256RegClass;
934 case 16: /* fall-through */
936 llvm_unreachable("Invalid sub-register class size");
941 return &AMDGPU::VGPR_32RegClass;
943 return &AMDGPU::VReg_64RegClass;
945 return &AMDGPU::VReg_96RegClass;
947 return &AMDGPU::VReg_128RegClass;
949 return &AMDGPU::VReg_256RegClass;
950 case 16: /* fall-through */
952 llvm_unreachable("Invalid sub-register class size");
957 bool SIRegisterInfo::shouldRewriteCopySrc(
958 const TargetRegisterClass *DefRC,
960 const TargetRegisterClass *SrcRC,
961 unsigned SrcSubReg) const {
962 // We want to prefer the smallest register class possible, so we don't want to
963 // stop and rewrite on anything that looks like a subregister
964 // extract. Operations mostly don't care about the super register class, so we
965 // only want to stop on the most basic of copies between the same register
968 // e.g. if we have something like
971 // vreg2 = REG_SEQUENCE vreg0, sub0, vreg1, sub1, vreg2, sub2
972 // vreg3 = COPY vreg2, sub0
974 // We want to look through the COPY to find:
975 // => vreg3 = COPY vreg0
978 return getCommonSubClass(DefRC, SrcRC) != nullptr;
981 bool SIRegisterInfo::opCanUseLiteralConstant(unsigned OpType) const {
982 return OpType == AMDGPU::OPERAND_REG_IMM32_INT ||
983 OpType == AMDGPU::OPERAND_REG_IMM32_FP;
986 bool SIRegisterInfo::opCanUseInlineConstant(unsigned OpType) const {
987 if (opCanUseLiteralConstant(OpType))
990 return OpType == AMDGPU::OPERAND_REG_INLINE_C_INT ||
991 OpType == AMDGPU::OPERAND_REG_INLINE_C_FP;
994 // FIXME: Most of these are flexible with HSA and we don't need to reserve them
995 // as input registers if unused. Whether the dispatch ptr is necessary should be
996 // easy to detect from used intrinsics. Scratch setup is harder to know.
997 unsigned SIRegisterInfo::getPreloadedValue(const MachineFunction &MF,
998 enum PreloadedValue Value) const {
1000 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
1001 const SISubtarget &ST = MF.getSubtarget<SISubtarget>();
1004 case SIRegisterInfo::WORKGROUP_ID_X:
1005 assert(MFI->hasWorkGroupIDX());
1006 return MFI->WorkGroupIDXSystemSGPR;
1007 case SIRegisterInfo::WORKGROUP_ID_Y:
1008 assert(MFI->hasWorkGroupIDY());
1009 return MFI->WorkGroupIDYSystemSGPR;
1010 case SIRegisterInfo::WORKGROUP_ID_Z:
1011 assert(MFI->hasWorkGroupIDZ());
1012 return MFI->WorkGroupIDZSystemSGPR;
1013 case SIRegisterInfo::PRIVATE_SEGMENT_WAVE_BYTE_OFFSET:
1014 return MFI->PrivateSegmentWaveByteOffsetSystemSGPR;
1015 case SIRegisterInfo::PRIVATE_SEGMENT_BUFFER:
1016 assert(ST.isAmdCodeObjectV2() &&
1017 "Non-CodeObjectV2 ABI currently uses relocations");
1018 assert(MFI->hasPrivateSegmentBuffer());
1019 return MFI->PrivateSegmentBufferUserSGPR;
1020 case SIRegisterInfo::KERNARG_SEGMENT_PTR:
1021 assert(MFI->hasKernargSegmentPtr());
1022 return MFI->KernargSegmentPtrUserSGPR;
1023 case SIRegisterInfo::DISPATCH_ID:
1024 assert(MFI->hasDispatchID());
1025 return MFI->DispatchIDUserSGPR;
1026 case SIRegisterInfo::FLAT_SCRATCH_INIT:
1027 assert(MFI->hasFlatScratchInit());
1028 return MFI->FlatScratchInitUserSGPR;
1029 case SIRegisterInfo::DISPATCH_PTR:
1030 assert(MFI->hasDispatchPtr());
1031 return MFI->DispatchPtrUserSGPR;
1032 case SIRegisterInfo::QUEUE_PTR:
1033 assert(MFI->hasQueuePtr());
1034 return MFI->QueuePtrUserSGPR;
1035 case SIRegisterInfo::WORKITEM_ID_X:
1036 assert(MFI->hasWorkItemIDX());
1037 return AMDGPU::VGPR0;
1038 case SIRegisterInfo::WORKITEM_ID_Y:
1039 assert(MFI->hasWorkItemIDY());
1040 return AMDGPU::VGPR1;
1041 case SIRegisterInfo::WORKITEM_ID_Z:
1042 assert(MFI->hasWorkItemIDZ());
1043 return AMDGPU::VGPR2;
1045 llvm_unreachable("unexpected preloaded value type");
1048 /// \brief Returns a register that is not used at any point in the function.
1049 /// If all registers are used, then this function will return
1050 // AMDGPU::NoRegister.
1052 SIRegisterInfo::findUnusedRegister(const MachineRegisterInfo &MRI,
1053 const TargetRegisterClass *RC,
1054 const MachineFunction &MF) const {
1056 for (unsigned Reg : *RC)
1057 if (MRI.isAllocatable(Reg) && !MRI.isPhysRegUsed(Reg))
1059 return AMDGPU::NoRegister;
1062 bool SIRegisterInfo::isVGPR(const MachineRegisterInfo &MRI,
1063 unsigned Reg) const {
1064 const TargetRegisterClass *RC;
1065 if (TargetRegisterInfo::isVirtualRegister(Reg))
1066 RC = MRI.getRegClass(Reg);
1068 RC = getPhysRegClass(Reg);
1070 return hasVGPRs(RC);
1073 unsigned SIRegisterInfo::getTotalNumSGPRs(const SISubtarget &ST) const {
1074 if (ST.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS)
1079 unsigned SIRegisterInfo::getNumAddressableSGPRs(const SISubtarget &ST) const {
1080 if (ST.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS)
1085 unsigned SIRegisterInfo::getNumReservedSGPRs(const SISubtarget &ST) const {
1086 if (ST.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS)
1087 return 6; // VCC, FLAT_SCRATCH, XNACK.
1091 unsigned SIRegisterInfo::getMinNumSGPRs(const SISubtarget &ST,
1092 unsigned WavesPerEU) const {
1093 if (ST.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) {
1094 switch (WavesPerEU) {
1102 switch (WavesPerEU) {
1115 unsigned SIRegisterInfo::getMaxNumSGPRs(const SISubtarget &ST,
1116 unsigned WavesPerEU) const {
1117 if (ST.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) {
1118 switch (WavesPerEU) {
1123 default: return getNumAddressableSGPRs(ST);
1126 switch (WavesPerEU) {
1134 default: return getNumAddressableSGPRs(ST);
1139 unsigned SIRegisterInfo::getMaxNumSGPRs(const MachineFunction &MF) const {
1140 const Function &F = *MF.getFunction();
1142 const SISubtarget &ST = MF.getSubtarget<SISubtarget>();
1143 const SIMachineFunctionInfo &MFI = *MF.getInfo<SIMachineFunctionInfo>();
1145 // Compute maximum number of SGPRs function can use using default/requested
1146 // minimum number of waves per execution unit.
1147 std::pair<unsigned, unsigned> WavesPerEU = MFI.getWavesPerEU();
1148 unsigned MaxNumSGPRs = getMaxNumSGPRs(ST, WavesPerEU.first);
1150 // Check if maximum number of SGPRs was explicitly requested using
1151 // "amdgpu-num-sgpr" attribute.
1152 if (F.hasFnAttribute("amdgpu-num-sgpr")) {
1153 unsigned Requested = AMDGPU::getIntegerAttribute(
1154 F, "amdgpu-num-sgpr", MaxNumSGPRs);
1156 // Make sure requested value does not violate subtarget's specifications.
1157 if (Requested && (Requested <= getNumReservedSGPRs(ST)))
1160 // If more SGPRs are required to support the input user/system SGPRs,
1161 // increase to accomodate them.
1163 // FIXME: This really ends up using the requested number of SGPRs + number
1164 // of reserved special registers in total. Theoretically you could re-use
1165 // the last input registers for these special registers, but this would
1166 // require a lot of complexity to deal with the weird aliasing.
1167 unsigned NumInputSGPRs = MFI.getNumPreloadedSGPRs();
1168 if (Requested && Requested < NumInputSGPRs)
1169 Requested = NumInputSGPRs;
1171 // Make sure requested value is compatible with values implied by
1172 // default/requested minimum/maximum number of waves per execution unit.
1173 if (Requested && Requested > getMaxNumSGPRs(ST, WavesPerEU.first))
1175 if (WavesPerEU.second &&
1176 Requested && Requested < getMinNumSGPRs(ST, WavesPerEU.second))
1180 MaxNumSGPRs = Requested;
1183 if (ST.hasSGPRInitBug())
1184 MaxNumSGPRs = SISubtarget::FIXED_SGPR_COUNT_FOR_INIT_BUG;
1186 return MaxNumSGPRs - getNumReservedSGPRs(ST);
1189 unsigned SIRegisterInfo::getNumDebuggerReservedVGPRs(
1190 const SISubtarget &ST) const {
1191 if (ST.debuggerReserveRegs())
1196 unsigned SIRegisterInfo::getMinNumVGPRs(unsigned WavesPerEU) const {
1197 switch (WavesPerEU) {
1208 default: return 129;
1212 unsigned SIRegisterInfo::getMaxNumVGPRs(unsigned WavesPerEU) const {
1213 switch (WavesPerEU) {
1224 default: return getTotalNumVGPRs();
1228 unsigned SIRegisterInfo::getMaxNumVGPRs(const MachineFunction &MF) const {
1229 const Function &F = *MF.getFunction();
1231 const SISubtarget &ST = MF.getSubtarget<SISubtarget>();
1232 const SIMachineFunctionInfo &MFI = *MF.getInfo<SIMachineFunctionInfo>();
1234 // Compute maximum number of VGPRs function can use using default/requested
1235 // minimum number of waves per execution unit.
1236 std::pair<unsigned, unsigned> WavesPerEU = MFI.getWavesPerEU();
1237 unsigned MaxNumVGPRs = getMaxNumVGPRs(WavesPerEU.first);
1239 // Check if maximum number of VGPRs was explicitly requested using
1240 // "amdgpu-num-vgpr" attribute.
1241 if (F.hasFnAttribute("amdgpu-num-vgpr")) {
1242 unsigned Requested = AMDGPU::getIntegerAttribute(
1243 F, "amdgpu-num-vgpr", MaxNumVGPRs);
1245 // Make sure requested value does not violate subtarget's specifications.
1246 if (Requested && Requested <= getNumDebuggerReservedVGPRs(ST))
1249 // Make sure requested value is compatible with values implied by
1250 // default/requested minimum/maximum number of waves per execution unit.
1251 if (Requested && Requested > getMaxNumVGPRs(WavesPerEU.first))
1253 if (WavesPerEU.second &&
1254 Requested && Requested < getMinNumVGPRs(WavesPerEU.second))
1258 MaxNumVGPRs = Requested;
1261 return MaxNumVGPRs - getNumDebuggerReservedVGPRs(ST);
1264 ArrayRef<int16_t> SIRegisterInfo::getRegSplitParts(const TargetRegisterClass *RC,
1265 unsigned EltSize) const {
1267 static const int16_t Sub0_15[] = {
1268 AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, AMDGPU::sub3,
1269 AMDGPU::sub4, AMDGPU::sub5, AMDGPU::sub6, AMDGPU::sub7,
1270 AMDGPU::sub8, AMDGPU::sub9, AMDGPU::sub10, AMDGPU::sub11,
1271 AMDGPU::sub12, AMDGPU::sub13, AMDGPU::sub14, AMDGPU::sub15,
1274 static const int16_t Sub0_7[] = {
1275 AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, AMDGPU::sub3,
1276 AMDGPU::sub4, AMDGPU::sub5, AMDGPU::sub6, AMDGPU::sub7,
1279 static const int16_t Sub0_3[] = {
1280 AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, AMDGPU::sub3,
1283 static const int16_t Sub0_2[] = {
1284 AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2,
1287 static const int16_t Sub0_1[] = {
1288 AMDGPU::sub0, AMDGPU::sub1,
1291 switch (AMDGPU::getRegBitWidth(*RC->MC)) {
1295 return makeArrayRef(Sub0_1);
1297 return makeArrayRef(Sub0_2);
1299 return makeArrayRef(Sub0_3);
1301 return makeArrayRef(Sub0_7);
1303 return makeArrayRef(Sub0_15);
1305 llvm_unreachable("unhandled register size");
1310 static const int16_t Sub0_15_64[] = {
1311 AMDGPU::sub0_sub1, AMDGPU::sub2_sub3,
1312 AMDGPU::sub4_sub5, AMDGPU::sub6_sub7,
1313 AMDGPU::sub8_sub9, AMDGPU::sub10_sub11,
1314 AMDGPU::sub12_sub13, AMDGPU::sub14_sub15
1317 static const int16_t Sub0_7_64[] = {
1318 AMDGPU::sub0_sub1, AMDGPU::sub2_sub3,
1319 AMDGPU::sub4_sub5, AMDGPU::sub6_sub7
1323 static const int16_t Sub0_3_64[] = {
1324 AMDGPU::sub0_sub1, AMDGPU::sub2_sub3
1327 switch (AMDGPU::getRegBitWidth(*RC->MC)) {
1331 return makeArrayRef(Sub0_3_64);
1333 return makeArrayRef(Sub0_7_64);
1335 return makeArrayRef(Sub0_15_64);
1337 llvm_unreachable("unhandled register size");
1341 assert(EltSize == 16 && "unhandled register spill split size");
1343 static const int16_t Sub0_15_128[] = {
1344 AMDGPU::sub0_sub1_sub2_sub3,
1345 AMDGPU::sub4_sub5_sub6_sub7,
1346 AMDGPU::sub8_sub9_sub10_sub11,
1347 AMDGPU::sub12_sub13_sub14_sub15
1350 static const int16_t Sub0_7_128[] = {
1351 AMDGPU::sub0_sub1_sub2_sub3,
1352 AMDGPU::sub4_sub5_sub6_sub7
1355 switch (AMDGPU::getRegBitWidth(*RC->MC)) {
1359 return makeArrayRef(Sub0_7_128);
1361 return makeArrayRef(Sub0_15_128);
1363 llvm_unreachable("unhandled register size");