1 //===-- AMDILISelDAGToDAG.cpp - A dag to dag inst selector for AMDIL ------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //==-----------------------------------------------------------------------===//
11 /// \brief Defines an instruction selector for the AMDGPU target.
13 //===----------------------------------------------------------------------===//
14 #include "AMDGPUInstrInfo.h"
15 #include "AMDGPUISelLowering.h" // For AMDGPUISD
16 #include "AMDGPURegisterInfo.h"
17 #include "R600InstrInfo.h"
18 #include "SIISelLowering.h"
19 #include "llvm/ADT/ValueMap.h"
20 #include "llvm/Analysis/ValueTracking.h"
21 #include "llvm/CodeGen/MachineRegisterInfo.h"
22 #include "llvm/CodeGen/PseudoSourceValue.h"
23 #include "llvm/CodeGen/SelectionDAG.h"
24 #include "llvm/CodeGen/SelectionDAGISel.h"
25 #include "llvm/Support/Compiler.h"
31 //===----------------------------------------------------------------------===//
32 // Instruction Selector Implementation
33 //===----------------------------------------------------------------------===//
36 /// AMDGPU specific code to select AMDGPU machine instructions for
37 /// SelectionDAG operations.
38 class AMDGPUDAGToDAGISel : public SelectionDAGISel {
39 // Subtarget - Keep a pointer to the AMDGPU Subtarget around so that we can
40 // make the right decision when generating code for different targets.
41 const AMDGPUSubtarget &Subtarget;
43 AMDGPUDAGToDAGISel(TargetMachine &TM);
44 virtual ~AMDGPUDAGToDAGISel();
46 SDNode *Select(SDNode *N);
47 virtual const char *getPassName() const;
48 virtual void PostprocessISelDAG();
51 inline SDValue getSmallIPtrImm(unsigned Imm);
52 bool FoldOperand(SDValue &Src, SDValue &Sel, SDValue &Neg, SDValue &Abs,
53 const R600InstrInfo *TII);
54 bool FoldOperands(unsigned, const R600InstrInfo *, std::vector<SDValue> &);
55 bool FoldDotOperands(unsigned, const R600InstrInfo *, std::vector<SDValue> &);
57 // Complex pattern selectors
58 bool SelectADDRParam(SDValue Addr, SDValue& R1, SDValue& R2);
59 bool SelectADDR(SDValue N, SDValue &R1, SDValue &R2);
60 bool SelectADDR64(SDValue N, SDValue &R1, SDValue &R2);
62 static bool checkType(const Value *ptr, unsigned int addrspace);
64 static bool isGlobalStore(const StoreSDNode *N);
65 static bool isPrivateStore(const StoreSDNode *N);
66 static bool isLocalStore(const StoreSDNode *N);
67 static bool isRegionStore(const StoreSDNode *N);
69 bool isCPLoad(const LoadSDNode *N) const;
70 bool isConstantLoad(const LoadSDNode *N, int cbID) const;
71 bool isGlobalLoad(const LoadSDNode *N) const;
72 bool isParamLoad(const LoadSDNode *N) const;
73 bool isPrivateLoad(const LoadSDNode *N) const;
74 bool isLocalLoad(const LoadSDNode *N) const;
75 bool isRegionLoad(const LoadSDNode *N) const;
77 bool SelectGlobalValueConstantOffset(SDValue Addr, SDValue& IntPtr);
78 bool SelectGlobalValueVariableOffset(SDValue Addr,
79 SDValue &BaseReg, SDValue& Offset);
80 bool SelectADDRVTX_READ(SDValue Addr, SDValue &Base, SDValue &Offset);
81 bool SelectADDRIndirect(SDValue Addr, SDValue &Base, SDValue &Offset);
83 // Include the pieces autogenerated from the target description.
84 #include "AMDGPUGenDAGISel.inc"
86 } // end anonymous namespace
88 /// \brief This pass converts a legalized DAG into a AMDGPU-specific
89 // DAG, ready for instruction scheduling.
90 FunctionPass *llvm::createAMDGPUISelDag(TargetMachine &TM
92 return new AMDGPUDAGToDAGISel(TM);
95 AMDGPUDAGToDAGISel::AMDGPUDAGToDAGISel(TargetMachine &TM)
96 : SelectionDAGISel(TM), Subtarget(TM.getSubtarget<AMDGPUSubtarget>()) {
99 AMDGPUDAGToDAGISel::~AMDGPUDAGToDAGISel() {
102 SDValue AMDGPUDAGToDAGISel::getSmallIPtrImm(unsigned int Imm) {
103 return CurDAG->getTargetConstant(Imm, MVT::i32);
106 bool AMDGPUDAGToDAGISel::SelectADDRParam(
107 SDValue Addr, SDValue& R1, SDValue& R2) {
109 if (Addr.getOpcode() == ISD::FrameIndex) {
110 if (FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(Addr)) {
111 R1 = CurDAG->getTargetFrameIndex(FIN->getIndex(), MVT::i32);
112 R2 = CurDAG->getTargetConstant(0, MVT::i32);
115 R2 = CurDAG->getTargetConstant(0, MVT::i32);
117 } else if (Addr.getOpcode() == ISD::ADD) {
118 R1 = Addr.getOperand(0);
119 R2 = Addr.getOperand(1);
122 R2 = CurDAG->getTargetConstant(0, MVT::i32);
127 bool AMDGPUDAGToDAGISel::SelectADDR(SDValue Addr, SDValue& R1, SDValue& R2) {
128 if (Addr.getOpcode() == ISD::TargetExternalSymbol ||
129 Addr.getOpcode() == ISD::TargetGlobalAddress) {
132 return SelectADDRParam(Addr, R1, R2);
136 bool AMDGPUDAGToDAGISel::SelectADDR64(SDValue Addr, SDValue& R1, SDValue& R2) {
137 if (Addr.getOpcode() == ISD::TargetExternalSymbol ||
138 Addr.getOpcode() == ISD::TargetGlobalAddress) {
142 if (Addr.getOpcode() == ISD::FrameIndex) {
143 if (FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(Addr)) {
144 R1 = CurDAG->getTargetFrameIndex(FIN->getIndex(), MVT::i64);
145 R2 = CurDAG->getTargetConstant(0, MVT::i64);
148 R2 = CurDAG->getTargetConstant(0, MVT::i64);
150 } else if (Addr.getOpcode() == ISD::ADD) {
151 R1 = Addr.getOperand(0);
152 R2 = Addr.getOperand(1);
155 R2 = CurDAG->getTargetConstant(0, MVT::i64);
160 SDNode *AMDGPUDAGToDAGISel::Select(SDNode *N) {
161 const R600InstrInfo *TII =
162 static_cast<const R600InstrInfo*>(TM.getInstrInfo());
163 unsigned int Opc = N->getOpcode();
164 if (N->isMachineOpcode()) {
165 return NULL; // Already selected.
169 case AMDGPUISD::CONST_ADDRESS: {
170 for (SDNode::use_iterator I = N->use_begin(), Next = llvm::next(I);
171 I != SDNode::use_end(); I = Next) {
172 Next = llvm::next(I);
173 if (!I->isMachineOpcode()) {
176 unsigned Opcode = I->getMachineOpcode();
177 bool HasDst = TII->getOperandIdx(Opcode, AMDGPU::OpName::dst) > -1;
178 int SrcIdx = I.getOperandNo();
180 // Unlike MachineInstrs, SDNodes do not have results in their operand
181 // list, so we need to increment the SrcIdx, since
182 // R600InstrInfo::getOperandIdx is based on the MachineInstr indices.
187 SelIdx = TII->getSelIdx(I->getMachineOpcode(), SrcIdx);
193 if (N->getValueType(0).isVector() ||
194 !SelectGlobalValueConstantOffset(N->getOperand(0), CstOffset))
197 // Gather constants values
199 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0),
200 TII->getOperandIdx(Opcode, AMDGPU::OpName::src1),
201 TII->getOperandIdx(Opcode, AMDGPU::OpName::src2),
202 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_X),
203 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_Y),
204 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_Z),
205 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_W),
206 TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_X),
207 TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_Y),
208 TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_Z),
209 TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_W)
211 std::vector<unsigned> Consts;
212 for (unsigned i = 0; i < sizeof(SrcIndices) / sizeof(int); i++) {
213 int OtherSrcIdx = SrcIndices[i];
214 int OtherSelIdx = TII->getSelIdx(Opcode, OtherSrcIdx);
215 if (OtherSrcIdx < 0 || OtherSelIdx < 0) {
222 if (RegisterSDNode *Reg =
223 dyn_cast<RegisterSDNode>(I->getOperand(OtherSrcIdx))) {
224 if (Reg->getReg() == AMDGPU::ALU_CONST) {
225 ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(I->getOperand(OtherSelIdx));
226 Consts.push_back(Cst->getZExtValue());
231 ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(CstOffset);
232 Consts.push_back(Cst->getZExtValue());
233 if (!TII->fitsConstReadLimitations(Consts))
236 // Convert back to SDNode indices
241 std::vector<SDValue> Ops;
242 for (int i = 0, e = I->getNumOperands(); i != e; ++i) {
244 Ops.push_back(CurDAG->getRegister(AMDGPU::ALU_CONST, MVT::f32));
245 } else if (i == SelIdx) {
246 Ops.push_back(CstOffset);
248 Ops.push_back(I->getOperand(i));
251 CurDAG->UpdateNodeOperands(*I, Ops.data(), Ops.size());
255 case ISD::BUILD_VECTOR: {
256 const AMDGPUSubtarget &ST = TM.getSubtarget<AMDGPUSubtarget>();
257 if (ST.getGeneration() > AMDGPUSubtarget::NORTHERN_ISLANDS) {
260 // BUILD_VECTOR is usually lowered into an IMPLICIT_DEF + 4 INSERT_SUBREG
261 // that adds a 128 bits reg copy when going through TwoAddressInstructions
262 // pass. We want to avoid 128 bits copies as much as possible because they
263 // can't be bundled by our scheduler.
264 SDValue RegSeqArgs[9] = {
265 CurDAG->getTargetConstant(AMDGPU::R600_Reg128RegClassID, MVT::i32),
266 SDValue(), CurDAG->getTargetConstant(AMDGPU::sub0, MVT::i32),
267 SDValue(), CurDAG->getTargetConstant(AMDGPU::sub1, MVT::i32),
268 SDValue(), CurDAG->getTargetConstant(AMDGPU::sub2, MVT::i32),
269 SDValue(), CurDAG->getTargetConstant(AMDGPU::sub3, MVT::i32)
271 bool IsRegSeq = true;
272 for (unsigned i = 0; i < N->getNumOperands(); i++) {
273 if (dyn_cast<RegisterSDNode>(N->getOperand(i))) {
277 RegSeqArgs[2 * i + 1] = N->getOperand(i);
281 return CurDAG->SelectNodeTo(N, AMDGPU::REG_SEQUENCE, N->getVTList(),
282 RegSeqArgs, 2 * N->getNumOperands() + 1);
284 case ISD::BUILD_PAIR: {
285 SDValue RC, SubReg0, SubReg1;
286 const AMDGPUSubtarget &ST = TM.getSubtarget<AMDGPUSubtarget>();
287 if (ST.getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS) {
290 if (N->getValueType(0) == MVT::i128) {
291 RC = CurDAG->getTargetConstant(AMDGPU::SReg_128RegClassID, MVT::i32);
292 SubReg0 = CurDAG->getTargetConstant(AMDGPU::sub0_sub1, MVT::i32);
293 SubReg1 = CurDAG->getTargetConstant(AMDGPU::sub2_sub3, MVT::i32);
294 } else if (N->getValueType(0) == MVT::i64) {
295 RC = CurDAG->getTargetConstant(AMDGPU::SReg_64RegClassID, MVT::i32);
296 SubReg0 = CurDAG->getTargetConstant(AMDGPU::sub0, MVT::i32);
297 SubReg1 = CurDAG->getTargetConstant(AMDGPU::sub1, MVT::i32);
299 llvm_unreachable("Unhandled value type for BUILD_PAIR");
301 const SDValue Ops[] = { RC, N->getOperand(0), SubReg0,
302 N->getOperand(1), SubReg1 };
303 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE,
304 SDLoc(N), N->getValueType(0), Ops);
307 case ISD::ConstantFP:
308 case ISD::Constant: {
309 const AMDGPUSubtarget &ST = TM.getSubtarget<AMDGPUSubtarget>();
310 // XXX: Custom immediate lowering not implemented yet. Instead we use
311 // pseudo instructions defined in SIInstructions.td
312 if (ST.getGeneration() > AMDGPUSubtarget::NORTHERN_ISLANDS) {
316 uint64_t ImmValue = 0;
317 unsigned ImmReg = AMDGPU::ALU_LITERAL_X;
319 if (N->getOpcode() == ISD::ConstantFP) {
320 // XXX: 64-bit Immediates not supported yet
321 assert(N->getValueType(0) != MVT::f64);
323 ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N);
324 APFloat Value = C->getValueAPF();
325 float FloatValue = Value.convertToFloat();
326 if (FloatValue == 0.0) {
327 ImmReg = AMDGPU::ZERO;
328 } else if (FloatValue == 0.5) {
329 ImmReg = AMDGPU::HALF;
330 } else if (FloatValue == 1.0) {
331 ImmReg = AMDGPU::ONE;
333 ImmValue = Value.bitcastToAPInt().getZExtValue();
336 // XXX: 64-bit Immediates not supported yet
337 assert(N->getValueType(0) != MVT::i64);
339 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N);
340 if (C->getZExtValue() == 0) {
341 ImmReg = AMDGPU::ZERO;
342 } else if (C->getZExtValue() == 1) {
343 ImmReg = AMDGPU::ONE_INT;
345 ImmValue = C->getZExtValue();
349 for (SDNode::use_iterator Use = N->use_begin(), Next = llvm::next(Use);
350 Use != SDNode::use_end(); Use = Next) {
351 Next = llvm::next(Use);
352 std::vector<SDValue> Ops;
353 for (unsigned i = 0; i < Use->getNumOperands(); ++i) {
354 Ops.push_back(Use->getOperand(i));
357 if (!Use->isMachineOpcode()) {
358 if (ImmReg == AMDGPU::ALU_LITERAL_X) {
359 // We can only use literal constants (e.g. AMDGPU::ZERO,
360 // AMDGPU::ONE, etc) in machine opcodes.
364 if (!TII->isALUInstr(Use->getMachineOpcode()) ||
365 (TII->get(Use->getMachineOpcode()).TSFlags &
366 R600_InstFlag::VECTOR)) {
370 int ImmIdx = TII->getOperandIdx(Use->getMachineOpcode(),
371 AMDGPU::OpName::literal);
376 if (TII->getOperandIdx(Use->getMachineOpcode(),
377 AMDGPU::OpName::dst) != -1) {
378 // subtract one from ImmIdx, because the DST operand is usually index
379 // 0 for MachineInstrs, but we have no DST in the Ops vector.
383 // Check that we aren't already using an immediate.
384 // XXX: It's possible for an instruction to have more than one
385 // immediate operand, but this is not supported yet.
386 if (ImmReg == AMDGPU::ALU_LITERAL_X) {
387 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Use->getOperand(ImmIdx));
390 if (C->getZExtValue() != 0) {
391 // This instruction is already using an immediate.
395 // Set the immediate value
396 Ops[ImmIdx] = CurDAG->getTargetConstant(ImmValue, MVT::i32);
399 // Set the immediate register
400 Ops[Use.getOperandNo()] = CurDAG->getRegister(ImmReg, MVT::i32);
402 CurDAG->UpdateNodeOperands(*Use, Ops.data(), Use->getNumOperands());
407 SDNode *Result = SelectCode(N);
409 // Fold operands of selected node
411 const AMDGPUSubtarget &ST = TM.getSubtarget<AMDGPUSubtarget>();
412 if (ST.getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS) {
413 const R600InstrInfo *TII =
414 static_cast<const R600InstrInfo*>(TM.getInstrInfo());
415 if (Result && Result->isMachineOpcode() && Result->getMachineOpcode() == AMDGPU::DOT_4) {
416 bool IsModified = false;
418 std::vector<SDValue> Ops;
419 for(SDNode::op_iterator I = Result->op_begin(), E = Result->op_end();
422 IsModified = FoldDotOperands(Result->getMachineOpcode(), TII, Ops);
424 Result = CurDAG->UpdateNodeOperands(Result, Ops.data(), Ops.size());
426 } while (IsModified);
429 if (Result && Result->isMachineOpcode() &&
430 !(TII->get(Result->getMachineOpcode()).TSFlags & R600_InstFlag::VECTOR)
431 && TII->hasInstrModifiers(Result->getMachineOpcode())) {
433 // TODO: Isel can generate multiple MachineInst, we need to recursively
435 bool IsModified = false;
437 std::vector<SDValue> Ops;
438 for(SDNode::op_iterator I = Result->op_begin(), E = Result->op_end();
441 IsModified = FoldOperands(Result->getMachineOpcode(), TII, Ops);
443 Result = CurDAG->UpdateNodeOperands(Result, Ops.data(), Ops.size());
445 } while (IsModified);
447 // If node has a single use which is CLAMP_R600, folds it
448 if (Result->hasOneUse() && Result->isMachineOpcode()) {
449 SDNode *PotentialClamp = *Result->use_begin();
450 if (PotentialClamp->isMachineOpcode() &&
451 PotentialClamp->getMachineOpcode() == AMDGPU::CLAMP_R600) {
453 TII->getOperandIdx(Result->getMachineOpcode(), AMDGPU::OpName::clamp);
454 std::vector<SDValue> Ops;
455 unsigned NumOp = Result->getNumOperands();
456 for (unsigned i = 0; i < NumOp; ++i) {
457 Ops.push_back(Result->getOperand(i));
459 Ops[ClampIdx - 1] = CurDAG->getTargetConstant(1, MVT::i32);
460 Result = CurDAG->SelectNodeTo(PotentialClamp,
461 Result->getMachineOpcode(), PotentialClamp->getVTList(),
471 bool AMDGPUDAGToDAGISel::FoldOperand(SDValue &Src, SDValue &Sel, SDValue &Neg,
472 SDValue &Abs, const R600InstrInfo *TII) {
473 switch (Src.getOpcode()) {
475 Src = Src.getOperand(0);
476 Neg = CurDAG->getTargetConstant(1, MVT::i32);
481 Src = Src.getOperand(0);
482 Abs = CurDAG->getTargetConstant(1, MVT::i32);
485 Src = Src.getOperand(0);
492 bool AMDGPUDAGToDAGISel::FoldOperands(unsigned Opcode,
493 const R600InstrInfo *TII, std::vector<SDValue> &Ops) {
495 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0),
496 TII->getOperandIdx(Opcode, AMDGPU::OpName::src1),
497 TII->getOperandIdx(Opcode, AMDGPU::OpName::src2)
500 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_sel),
501 TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_sel),
502 TII->getOperandIdx(Opcode, AMDGPU::OpName::src2_sel)
505 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_neg),
506 TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_neg),
507 TII->getOperandIdx(Opcode, AMDGPU::OpName::src2_neg)
510 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_abs),
511 TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_abs),
516 for (unsigned i = 0; i < 3; i++) {
517 if (OperandIdx[i] < 0)
519 SDValue &Src = Ops[OperandIdx[i] - 1];
520 SDValue &Sel = Ops[SelIdx[i] - 1];
521 SDValue &Neg = Ops[NegIdx[i] - 1];
523 SDValue &Abs = (AbsIdx[i] > -1) ? Ops[AbsIdx[i] - 1] : FakeAbs;
524 if (FoldOperand(Src, Sel, Neg, Abs, TII))
530 bool AMDGPUDAGToDAGISel::FoldDotOperands(unsigned Opcode,
531 const R600InstrInfo *TII, std::vector<SDValue> &Ops) {
533 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_X),
534 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_Y),
535 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_Z),
536 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_W),
537 TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_X),
538 TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_Y),
539 TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_Z),
540 TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_W)
543 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_sel_X),
544 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_sel_Y),
545 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_sel_Z),
546 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_sel_W),
547 TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_sel_X),
548 TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_sel_Y),
549 TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_sel_Z),
550 TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_sel_W)
553 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_neg_X),
554 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_neg_Y),
555 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_neg_Z),
556 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_neg_W),
557 TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_neg_X),
558 TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_neg_Y),
559 TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_neg_Z),
560 TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_neg_W)
563 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_abs_X),
564 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_abs_Y),
565 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_abs_Z),
566 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_abs_W),
567 TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_abs_X),
568 TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_abs_Y),
569 TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_abs_Z),
570 TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_abs_W)
573 for (unsigned i = 0; i < 8; i++) {
574 if (OperandIdx[i] < 0)
576 SDValue &Src = Ops[OperandIdx[i] - 1];
577 SDValue &Sel = Ops[SelIdx[i] - 1];
578 SDValue &Neg = Ops[NegIdx[i] - 1];
579 SDValue &Abs = Ops[AbsIdx[i] - 1];
580 if (FoldOperand(Src, Sel, Neg, Abs, TII))
586 bool AMDGPUDAGToDAGISel::checkType(const Value *ptr, unsigned int addrspace) {
590 Type *ptrType = ptr->getType();
591 return dyn_cast<PointerType>(ptrType)->getAddressSpace() == addrspace;
594 bool AMDGPUDAGToDAGISel::isGlobalStore(const StoreSDNode *N) {
595 return checkType(N->getSrcValue(), AMDGPUAS::GLOBAL_ADDRESS);
598 bool AMDGPUDAGToDAGISel::isPrivateStore(const StoreSDNode *N) {
599 return (!checkType(N->getSrcValue(), AMDGPUAS::LOCAL_ADDRESS)
600 && !checkType(N->getSrcValue(), AMDGPUAS::GLOBAL_ADDRESS)
601 && !checkType(N->getSrcValue(), AMDGPUAS::REGION_ADDRESS));
604 bool AMDGPUDAGToDAGISel::isLocalStore(const StoreSDNode *N) {
605 return checkType(N->getSrcValue(), AMDGPUAS::LOCAL_ADDRESS);
608 bool AMDGPUDAGToDAGISel::isRegionStore(const StoreSDNode *N) {
609 return checkType(N->getSrcValue(), AMDGPUAS::REGION_ADDRESS);
612 bool AMDGPUDAGToDAGISel::isConstantLoad(const LoadSDNode *N, int CbId) const {
614 return checkType(N->getSrcValue(), AMDGPUAS::CONSTANT_ADDRESS);
616 return checkType(N->getSrcValue(), AMDGPUAS::CONSTANT_BUFFER_0 + CbId);
619 bool AMDGPUDAGToDAGISel::isGlobalLoad(const LoadSDNode *N) const {
620 return checkType(N->getSrcValue(), AMDGPUAS::GLOBAL_ADDRESS);
623 bool AMDGPUDAGToDAGISel::isParamLoad(const LoadSDNode *N) const {
624 return checkType(N->getSrcValue(), AMDGPUAS::PARAM_I_ADDRESS);
627 bool AMDGPUDAGToDAGISel::isLocalLoad(const LoadSDNode *N) const {
628 return checkType(N->getSrcValue(), AMDGPUAS::LOCAL_ADDRESS);
631 bool AMDGPUDAGToDAGISel::isRegionLoad(const LoadSDNode *N) const {
632 return checkType(N->getSrcValue(), AMDGPUAS::REGION_ADDRESS);
635 bool AMDGPUDAGToDAGISel::isCPLoad(const LoadSDNode *N) const {
636 MachineMemOperand *MMO = N->getMemOperand();
637 if (checkType(N->getSrcValue(), AMDGPUAS::PRIVATE_ADDRESS)) {
639 const Value *V = MMO->getValue();
640 const PseudoSourceValue *PSV = dyn_cast<PseudoSourceValue>(V);
641 if (PSV && PSV == PseudoSourceValue::getConstantPool()) {
649 bool AMDGPUDAGToDAGISel::isPrivateLoad(const LoadSDNode *N) const {
650 if (checkType(N->getSrcValue(), AMDGPUAS::PRIVATE_ADDRESS)) {
651 // Check to make sure we are not a constant pool load or a constant load
652 // that is marked as a private load
653 if (isCPLoad(N) || isConstantLoad(N, -1)) {
657 if (!checkType(N->getSrcValue(), AMDGPUAS::LOCAL_ADDRESS)
658 && !checkType(N->getSrcValue(), AMDGPUAS::GLOBAL_ADDRESS)
659 && !checkType(N->getSrcValue(), AMDGPUAS::REGION_ADDRESS)
660 && !checkType(N->getSrcValue(), AMDGPUAS::CONSTANT_ADDRESS)
661 && !checkType(N->getSrcValue(), AMDGPUAS::PARAM_D_ADDRESS)
662 && !checkType(N->getSrcValue(), AMDGPUAS::PARAM_I_ADDRESS)) {
668 const char *AMDGPUDAGToDAGISel::getPassName() const {
669 return "AMDGPU DAG->DAG Pattern Instruction Selection";
677 ///==== AMDGPU Functions ====///
679 bool AMDGPUDAGToDAGISel::SelectGlobalValueConstantOffset(SDValue Addr,
681 if (ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(Addr)) {
682 IntPtr = CurDAG->getIntPtrConstant(Cst->getZExtValue() / 4, true);
688 bool AMDGPUDAGToDAGISel::SelectGlobalValueVariableOffset(SDValue Addr,
689 SDValue& BaseReg, SDValue &Offset) {
690 if (!dyn_cast<ConstantSDNode>(Addr)) {
692 Offset = CurDAG->getIntPtrConstant(0, true);
698 bool AMDGPUDAGToDAGISel::SelectADDRVTX_READ(SDValue Addr, SDValue &Base,
700 ConstantSDNode * IMMOffset;
702 if (Addr.getOpcode() == ISD::ADD
703 && (IMMOffset = dyn_cast<ConstantSDNode>(Addr.getOperand(1)))
704 && isInt<16>(IMMOffset->getZExtValue())) {
706 Base = Addr.getOperand(0);
707 Offset = CurDAG->getTargetConstant(IMMOffset->getZExtValue(), MVT::i32);
709 // If the pointer address is constant, we can move it to the offset field.
710 } else if ((IMMOffset = dyn_cast<ConstantSDNode>(Addr))
711 && isInt<16>(IMMOffset->getZExtValue())) {
712 Base = CurDAG->getCopyFromReg(CurDAG->getEntryNode(),
713 SDLoc(CurDAG->getEntryNode()),
714 AMDGPU::ZERO, MVT::i32);
715 Offset = CurDAG->getTargetConstant(IMMOffset->getZExtValue(), MVT::i32);
719 // Default case, no offset
721 Offset = CurDAG->getTargetConstant(0, MVT::i32);
725 bool AMDGPUDAGToDAGISel::SelectADDRIndirect(SDValue Addr, SDValue &Base,
729 if ((C = dyn_cast<ConstantSDNode>(Addr))) {
730 Base = CurDAG->getRegister(AMDGPU::INDIRECT_BASE_ADDR, MVT::i32);
731 Offset = CurDAG->getTargetConstant(C->getZExtValue(), MVT::i32);
732 } else if ((Addr.getOpcode() == ISD::ADD || Addr.getOpcode() == ISD::OR) &&
733 (C = dyn_cast<ConstantSDNode>(Addr.getOperand(1)))) {
734 Base = Addr.getOperand(0);
735 Offset = CurDAG->getTargetConstant(C->getZExtValue(), MVT::i32);
738 Offset = CurDAG->getTargetConstant(0, MVT::i32);
744 void AMDGPUDAGToDAGISel::PostprocessISelDAG() {
746 if (Subtarget.getGeneration() < AMDGPUSubtarget::SOUTHERN_ISLANDS) {
750 // Go over all selected nodes and try to fold them a bit more
751 const AMDGPUTargetLowering& Lowering =
752 (*(const AMDGPUTargetLowering*)getTargetLowering());
753 for (SelectionDAG::allnodes_iterator I = CurDAG->allnodes_begin(),
754 E = CurDAG->allnodes_end(); I != E; ++I) {
757 switch (Node->getOpcode()) {
758 // Fix the register class in copy to CopyToReg nodes - ISel will always
759 // use SReg classes for 64-bit copies, but this is not always what we want.
760 case ISD::CopyToReg: {
761 unsigned Reg = cast<RegisterSDNode>(Node->getOperand(1))->getReg();
762 SDValue Val = Node->getOperand(2);
763 const TargetRegisterClass *RC = RegInfo->getRegClass(Reg);
764 if (RC != &AMDGPU::SReg_64RegClass) {
768 if (!Val.getNode()->isMachineOpcode() ||
769 Val.getNode()->getMachineOpcode() == AMDGPU::IMPLICIT_DEF) {
773 const MCInstrDesc Desc = TM.getInstrInfo()->get(Val.getNode()->getMachineOpcode());
774 const TargetRegisterInfo *TRI = TM.getRegisterInfo();
775 RegInfo->setRegClass(Reg, TRI->getRegClass(Desc.OpInfo[0].RegClass));
780 MachineSDNode *MachineNode = dyn_cast<MachineSDNode>(I);
784 SDNode *ResNode = Lowering.PostISelFolding(MachineNode, *CurDAG);
785 if (ResNode != Node) {
786 ReplaceUses(Node, ResNode);