1 //===-- AMDILISelDAGToDAG.cpp - A dag to dag inst selector for AMDIL ------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //==-----------------------------------------------------------------------===//
11 /// \brief Defines an instruction selector for the AMDGPU target.
13 //===----------------------------------------------------------------------===//
14 #include "AMDGPUInstrInfo.h"
15 #include "AMDGPUISelLowering.h" // For AMDGPUISD
16 #include "AMDGPURegisterInfo.h"
17 #include "R600InstrInfo.h"
18 #include "SIISelLowering.h"
19 #include "llvm/ADT/ValueMap.h"
20 #include "llvm/Analysis/ValueTracking.h"
21 #include "llvm/CodeGen/MachineRegisterInfo.h"
22 #include "llvm/CodeGen/PseudoSourceValue.h"
23 #include "llvm/CodeGen/SelectionDAG.h"
24 #include "llvm/CodeGen/SelectionDAGISel.h"
25 #include "llvm/Support/Compiler.h"
31 //===----------------------------------------------------------------------===//
32 // Instruction Selector Implementation
33 //===----------------------------------------------------------------------===//
36 /// AMDGPU specific code to select AMDGPU machine instructions for
37 /// SelectionDAG operations.
38 class AMDGPUDAGToDAGISel : public SelectionDAGISel {
39 // Subtarget - Keep a pointer to the AMDGPU Subtarget around so that we can
40 // make the right decision when generating code for different targets.
41 const AMDGPUSubtarget &Subtarget;
43 AMDGPUDAGToDAGISel(TargetMachine &TM);
44 virtual ~AMDGPUDAGToDAGISel();
46 SDNode *Select(SDNode *N);
47 virtual const char *getPassName() const;
48 virtual void PostprocessISelDAG();
51 inline SDValue getSmallIPtrImm(unsigned Imm);
52 bool FoldOperand(SDValue &Src, SDValue &Sel, SDValue &Neg, SDValue &Abs,
53 const R600InstrInfo *TII);
54 bool FoldOperands(unsigned, const R600InstrInfo *, std::vector<SDValue> &);
55 bool FoldDotOperands(unsigned, const R600InstrInfo *, std::vector<SDValue> &);
57 // Complex pattern selectors
58 bool SelectADDRParam(SDValue Addr, SDValue& R1, SDValue& R2);
59 bool SelectADDR(SDValue N, SDValue &R1, SDValue &R2);
60 bool SelectADDR64(SDValue N, SDValue &R1, SDValue &R2);
61 SDValue SimplifyI24(SDValue &Op);
62 bool SelectI24(SDValue Addr, SDValue &Op);
63 bool SelectU24(SDValue Addr, SDValue &Op);
65 static bool checkType(const Value *ptr, unsigned int addrspace);
67 static bool isGlobalStore(const StoreSDNode *N);
68 static bool isPrivateStore(const StoreSDNode *N);
69 static bool isLocalStore(const StoreSDNode *N);
70 static bool isRegionStore(const StoreSDNode *N);
72 bool isCPLoad(const LoadSDNode *N) const;
73 bool isConstantLoad(const LoadSDNode *N, int cbID) const;
74 bool isGlobalLoad(const LoadSDNode *N) const;
75 bool isParamLoad(const LoadSDNode *N) const;
76 bool isPrivateLoad(const LoadSDNode *N) const;
77 bool isLocalLoad(const LoadSDNode *N) const;
78 bool isRegionLoad(const LoadSDNode *N) const;
80 const TargetRegisterClass *getOperandRegClass(SDNode *N, unsigned OpNo) const;
81 bool SelectGlobalValueConstantOffset(SDValue Addr, SDValue& IntPtr);
82 bool SelectGlobalValueVariableOffset(SDValue Addr,
83 SDValue &BaseReg, SDValue& Offset);
84 bool SelectADDRVTX_READ(SDValue Addr, SDValue &Base, SDValue &Offset);
85 bool SelectADDRIndirect(SDValue Addr, SDValue &Base, SDValue &Offset);
87 // Include the pieces autogenerated from the target description.
88 #include "AMDGPUGenDAGISel.inc"
90 } // end anonymous namespace
92 /// \brief This pass converts a legalized DAG into a AMDGPU-specific
93 // DAG, ready for instruction scheduling.
94 FunctionPass *llvm::createAMDGPUISelDag(TargetMachine &TM
96 return new AMDGPUDAGToDAGISel(TM);
99 AMDGPUDAGToDAGISel::AMDGPUDAGToDAGISel(TargetMachine &TM)
100 : SelectionDAGISel(TM), Subtarget(TM.getSubtarget<AMDGPUSubtarget>()) {
103 AMDGPUDAGToDAGISel::~AMDGPUDAGToDAGISel() {
106 /// \brief Determine the register class for \p OpNo
107 /// \returns The register class of the virtual register that will be used for
108 /// the given operand number \OpNo or NULL if the register class cannot be
110 const TargetRegisterClass *AMDGPUDAGToDAGISel::getOperandRegClass(SDNode *N,
111 unsigned OpNo) const {
112 if (!N->isMachineOpcode()) {
115 switch (N->getMachineOpcode()) {
117 const MCInstrDesc &Desc = TM.getInstrInfo()->get(N->getMachineOpcode());
118 unsigned OpIdx = Desc.getNumDefs() + OpNo;
119 if (OpIdx >= Desc.getNumOperands())
121 int RegClass = Desc.OpInfo[OpIdx].RegClass;
122 if (RegClass == -1) {
125 return TM.getRegisterInfo()->getRegClass(RegClass);
127 case AMDGPU::REG_SEQUENCE: {
128 const TargetRegisterClass *SuperRC = TM.getRegisterInfo()->getRegClass(
129 cast<ConstantSDNode>(N->getOperand(0))->getZExtValue());
131 dyn_cast<ConstantSDNode>(N->getOperand(OpNo + 1))->getZExtValue();
132 return TM.getRegisterInfo()->getSubClassWithSubReg(SuperRC, SubRegIdx);
137 SDValue AMDGPUDAGToDAGISel::getSmallIPtrImm(unsigned int Imm) {
138 return CurDAG->getTargetConstant(Imm, MVT::i32);
141 bool AMDGPUDAGToDAGISel::SelectADDRParam(
142 SDValue Addr, SDValue& R1, SDValue& R2) {
144 if (Addr.getOpcode() == ISD::FrameIndex) {
145 if (FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(Addr)) {
146 R1 = CurDAG->getTargetFrameIndex(FIN->getIndex(), MVT::i32);
147 R2 = CurDAG->getTargetConstant(0, MVT::i32);
150 R2 = CurDAG->getTargetConstant(0, MVT::i32);
152 } else if (Addr.getOpcode() == ISD::ADD) {
153 R1 = Addr.getOperand(0);
154 R2 = Addr.getOperand(1);
157 R2 = CurDAG->getTargetConstant(0, MVT::i32);
162 bool AMDGPUDAGToDAGISel::SelectADDR(SDValue Addr, SDValue& R1, SDValue& R2) {
163 if (Addr.getOpcode() == ISD::TargetExternalSymbol ||
164 Addr.getOpcode() == ISD::TargetGlobalAddress) {
167 return SelectADDRParam(Addr, R1, R2);
171 bool AMDGPUDAGToDAGISel::SelectADDR64(SDValue Addr, SDValue& R1, SDValue& R2) {
172 if (Addr.getOpcode() == ISD::TargetExternalSymbol ||
173 Addr.getOpcode() == ISD::TargetGlobalAddress) {
177 if (Addr.getOpcode() == ISD::FrameIndex) {
178 if (FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(Addr)) {
179 R1 = CurDAG->getTargetFrameIndex(FIN->getIndex(), MVT::i64);
180 R2 = CurDAG->getTargetConstant(0, MVT::i64);
183 R2 = CurDAG->getTargetConstant(0, MVT::i64);
185 } else if (Addr.getOpcode() == ISD::ADD) {
186 R1 = Addr.getOperand(0);
187 R2 = Addr.getOperand(1);
190 R2 = CurDAG->getTargetConstant(0, MVT::i64);
195 SDNode *AMDGPUDAGToDAGISel::Select(SDNode *N) {
196 const R600InstrInfo *TII =
197 static_cast<const R600InstrInfo*>(TM.getInstrInfo());
198 unsigned int Opc = N->getOpcode();
199 if (N->isMachineOpcode()) {
200 return NULL; // Already selected.
204 case AMDGPUISD::CONST_ADDRESS: {
205 for (SDNode::use_iterator I = N->use_begin(), Next = llvm::next(I);
206 I != SDNode::use_end(); I = Next) {
207 Next = llvm::next(I);
208 if (!I->isMachineOpcode()) {
211 unsigned Opcode = I->getMachineOpcode();
212 bool HasDst = TII->getOperandIdx(Opcode, AMDGPU::OpName::dst) > -1;
213 int SrcIdx = I.getOperandNo();
215 // Unlike MachineInstrs, SDNodes do not have results in their operand
216 // list, so we need to increment the SrcIdx, since
217 // R600InstrInfo::getOperandIdx is based on the MachineInstr indices.
222 SelIdx = TII->getSelIdx(I->getMachineOpcode(), SrcIdx);
228 if (N->getValueType(0).isVector() ||
229 !SelectGlobalValueConstantOffset(N->getOperand(0), CstOffset))
232 // Gather constants values
234 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0),
235 TII->getOperandIdx(Opcode, AMDGPU::OpName::src1),
236 TII->getOperandIdx(Opcode, AMDGPU::OpName::src2),
237 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_X),
238 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_Y),
239 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_Z),
240 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_W),
241 TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_X),
242 TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_Y),
243 TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_Z),
244 TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_W)
246 std::vector<unsigned> Consts;
247 for (unsigned i = 0; i < sizeof(SrcIndices) / sizeof(int); i++) {
248 int OtherSrcIdx = SrcIndices[i];
249 int OtherSelIdx = TII->getSelIdx(Opcode, OtherSrcIdx);
250 if (OtherSrcIdx < 0 || OtherSelIdx < 0) {
257 if (RegisterSDNode *Reg =
258 dyn_cast<RegisterSDNode>(I->getOperand(OtherSrcIdx))) {
259 if (Reg->getReg() == AMDGPU::ALU_CONST) {
260 ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(I->getOperand(OtherSelIdx));
261 Consts.push_back(Cst->getZExtValue());
266 ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(CstOffset);
267 Consts.push_back(Cst->getZExtValue());
268 if (!TII->fitsConstReadLimitations(Consts))
271 // Convert back to SDNode indices
276 std::vector<SDValue> Ops;
277 for (int i = 0, e = I->getNumOperands(); i != e; ++i) {
279 Ops.push_back(CurDAG->getRegister(AMDGPU::ALU_CONST, MVT::f32));
280 } else if (i == SelIdx) {
281 Ops.push_back(CstOffset);
283 Ops.push_back(I->getOperand(i));
286 CurDAG->UpdateNodeOperands(*I, Ops.data(), Ops.size());
290 case ISD::BUILD_VECTOR: {
292 const AMDGPUSubtarget &ST = TM.getSubtarget<AMDGPUSubtarget>();
293 const AMDGPURegisterInfo *TRI =
294 static_cast<const AMDGPURegisterInfo*>(TM.getRegisterInfo());
295 const SIRegisterInfo *SIRI =
296 static_cast<const SIRegisterInfo*>(TM.getRegisterInfo());
297 EVT VT = N->getValueType(0);
298 unsigned NumVectorElts = VT.getVectorNumElements();
299 assert(VT.getVectorElementType().bitsEq(MVT::i32));
300 if (ST.getGeneration() >= AMDGPUSubtarget::SOUTHERN_ISLANDS) {
302 for (SDNode::use_iterator U = N->use_begin(), E = SDNode::use_end();
304 if (!U->isMachineOpcode()) {
307 const TargetRegisterClass *RC = getOperandRegClass(*U, U.getOperandNo());
311 if (SIRI->isSGPRClass(RC)) {
315 switch(NumVectorElts) {
316 case 1: RegClassID = UseVReg ? AMDGPU::VReg_32RegClassID :
317 AMDGPU::SReg_32RegClassID;
319 case 2: RegClassID = UseVReg ? AMDGPU::VReg_64RegClassID :
320 AMDGPU::SReg_64RegClassID;
322 case 4: RegClassID = UseVReg ? AMDGPU::VReg_128RegClassID :
323 AMDGPU::SReg_128RegClassID;
325 case 8: RegClassID = UseVReg ? AMDGPU::VReg_256RegClassID :
326 AMDGPU::SReg_256RegClassID;
328 case 16: RegClassID = UseVReg ? AMDGPU::VReg_512RegClassID :
329 AMDGPU::SReg_512RegClassID;
331 default: llvm_unreachable("Do not know how to lower this BUILD_VECTOR");
334 // BUILD_VECTOR was lowered into an IMPLICIT_DEF + 4 INSERT_SUBREG
335 // that adds a 128 bits reg copy when going through TwoAddressInstructions
336 // pass. We want to avoid 128 bits copies as much as possible because they
337 // can't be bundled by our scheduler.
338 switch(NumVectorElts) {
339 case 2: RegClassID = AMDGPU::R600_Reg64RegClassID; break;
340 case 4: RegClassID = AMDGPU::R600_Reg128RegClassID; break;
341 default: llvm_unreachable("Do not know how to lower this BUILD_VECTOR");
345 SDValue RegClass = CurDAG->getTargetConstant(RegClassID, MVT::i32);
347 if (NumVectorElts == 1) {
348 return CurDAG->SelectNodeTo(N, AMDGPU::COPY_TO_REGCLASS,
349 VT.getVectorElementType(),
350 N->getOperand(0), RegClass);
353 assert(NumVectorElts <= 16 && "Vectors with more than 16 elements not "
355 // 16 = Max Num Vector Elements
356 // 2 = 2 REG_SEQUENCE operands per element (value, subreg index)
357 // 1 = Vector Register Class
358 SDValue RegSeqArgs[16 * 2 + 1];
360 RegSeqArgs[0] = CurDAG->getTargetConstant(RegClassID, MVT::i32);
361 bool IsRegSeq = true;
362 for (unsigned i = 0; i < N->getNumOperands(); i++) {
363 // XXX: Why is this here?
364 if (dyn_cast<RegisterSDNode>(N->getOperand(i))) {
368 RegSeqArgs[1 + (2 * i)] = N->getOperand(i);
369 RegSeqArgs[1 + (2 * i) + 1] =
370 CurDAG->getTargetConstant(TRI->getSubRegFromChannel(i), MVT::i32);
374 return CurDAG->SelectNodeTo(N, AMDGPU::REG_SEQUENCE, N->getVTList(),
375 RegSeqArgs, 2 * N->getNumOperands() + 1);
377 case ISD::BUILD_PAIR: {
378 SDValue RC, SubReg0, SubReg1;
379 const AMDGPUSubtarget &ST = TM.getSubtarget<AMDGPUSubtarget>();
380 if (ST.getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS) {
383 if (N->getValueType(0) == MVT::i128) {
384 RC = CurDAG->getTargetConstant(AMDGPU::SReg_128RegClassID, MVT::i32);
385 SubReg0 = CurDAG->getTargetConstant(AMDGPU::sub0_sub1, MVT::i32);
386 SubReg1 = CurDAG->getTargetConstant(AMDGPU::sub2_sub3, MVT::i32);
387 } else if (N->getValueType(0) == MVT::i64) {
388 RC = CurDAG->getTargetConstant(AMDGPU::VSrc_64RegClassID, MVT::i32);
389 SubReg0 = CurDAG->getTargetConstant(AMDGPU::sub0, MVT::i32);
390 SubReg1 = CurDAG->getTargetConstant(AMDGPU::sub1, MVT::i32);
392 llvm_unreachable("Unhandled value type for BUILD_PAIR");
394 const SDValue Ops[] = { RC, N->getOperand(0), SubReg0,
395 N->getOperand(1), SubReg1 };
396 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE,
397 SDLoc(N), N->getValueType(0), Ops);
400 case ISD::ConstantFP:
401 case ISD::Constant: {
402 const AMDGPUSubtarget &ST = TM.getSubtarget<AMDGPUSubtarget>();
403 // XXX: Custom immediate lowering not implemented yet. Instead we use
404 // pseudo instructions defined in SIInstructions.td
405 if (ST.getGeneration() > AMDGPUSubtarget::NORTHERN_ISLANDS) {
409 uint64_t ImmValue = 0;
410 unsigned ImmReg = AMDGPU::ALU_LITERAL_X;
412 if (N->getOpcode() == ISD::ConstantFP) {
413 // XXX: 64-bit Immediates not supported yet
414 assert(N->getValueType(0) != MVT::f64);
416 ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N);
417 APFloat Value = C->getValueAPF();
418 float FloatValue = Value.convertToFloat();
419 if (FloatValue == 0.0) {
420 ImmReg = AMDGPU::ZERO;
421 } else if (FloatValue == 0.5) {
422 ImmReg = AMDGPU::HALF;
423 } else if (FloatValue == 1.0) {
424 ImmReg = AMDGPU::ONE;
426 ImmValue = Value.bitcastToAPInt().getZExtValue();
429 // XXX: 64-bit Immediates not supported yet
430 assert(N->getValueType(0) != MVT::i64);
432 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N);
433 if (C->getZExtValue() == 0) {
434 ImmReg = AMDGPU::ZERO;
435 } else if (C->getZExtValue() == 1) {
436 ImmReg = AMDGPU::ONE_INT;
438 ImmValue = C->getZExtValue();
442 for (SDNode::use_iterator Use = N->use_begin(), Next = llvm::next(Use);
443 Use != SDNode::use_end(); Use = Next) {
444 Next = llvm::next(Use);
445 std::vector<SDValue> Ops;
446 for (unsigned i = 0; i < Use->getNumOperands(); ++i) {
447 Ops.push_back(Use->getOperand(i));
450 if (!Use->isMachineOpcode()) {
451 if (ImmReg == AMDGPU::ALU_LITERAL_X) {
452 // We can only use literal constants (e.g. AMDGPU::ZERO,
453 // AMDGPU::ONE, etc) in machine opcodes.
457 switch(Use->getMachineOpcode()) {
458 case AMDGPU::REG_SEQUENCE: break;
460 if (!TII->isALUInstr(Use->getMachineOpcode()) ||
461 (TII->get(Use->getMachineOpcode()).TSFlags &
462 R600_InstFlag::VECTOR)) {
467 // Check that we aren't already using an immediate.
468 // XXX: It's possible for an instruction to have more than one
469 // immediate operand, but this is not supported yet.
470 if (ImmReg == AMDGPU::ALU_LITERAL_X) {
471 int ImmIdx = TII->getOperandIdx(Use->getMachineOpcode(),
472 AMDGPU::OpName::literal);
477 if (TII->getOperandIdx(Use->getMachineOpcode(),
478 AMDGPU::OpName::dst) != -1) {
479 // subtract one from ImmIdx, because the DST operand is usually index
480 // 0 for MachineInstrs, but we have no DST in the Ops vector.
483 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Use->getOperand(ImmIdx));
486 if (C->getZExtValue() != 0) {
487 // This instruction is already using an immediate.
491 // Set the immediate value
492 Ops[ImmIdx] = CurDAG->getTargetConstant(ImmValue, MVT::i32);
495 // Set the immediate register
496 Ops[Use.getOperandNo()] = CurDAG->getRegister(ImmReg, MVT::i32);
498 CurDAG->UpdateNodeOperands(*Use, Ops.data(), Use->getNumOperands());
503 SDNode *Result = SelectCode(N);
505 // Fold operands of selected node
507 const AMDGPUSubtarget &ST = TM.getSubtarget<AMDGPUSubtarget>();
508 if (ST.getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS) {
509 const R600InstrInfo *TII =
510 static_cast<const R600InstrInfo*>(TM.getInstrInfo());
511 if (Result && Result->isMachineOpcode() && Result->getMachineOpcode() == AMDGPU::DOT_4) {
512 bool IsModified = false;
514 std::vector<SDValue> Ops;
515 for(SDNode::op_iterator I = Result->op_begin(), E = Result->op_end();
518 IsModified = FoldDotOperands(Result->getMachineOpcode(), TII, Ops);
520 Result = CurDAG->UpdateNodeOperands(Result, Ops.data(), Ops.size());
522 } while (IsModified);
525 if (Result && Result->isMachineOpcode() &&
526 !(TII->get(Result->getMachineOpcode()).TSFlags & R600_InstFlag::VECTOR)
527 && TII->hasInstrModifiers(Result->getMachineOpcode())) {
529 // TODO: Isel can generate multiple MachineInst, we need to recursively
531 bool IsModified = false;
533 std::vector<SDValue> Ops;
534 for(SDNode::op_iterator I = Result->op_begin(), E = Result->op_end();
537 IsModified = FoldOperands(Result->getMachineOpcode(), TII, Ops);
539 Result = CurDAG->UpdateNodeOperands(Result, Ops.data(), Ops.size());
541 } while (IsModified);
543 // If node has a single use which is CLAMP_R600, folds it
544 if (Result->hasOneUse() && Result->isMachineOpcode()) {
545 SDNode *PotentialClamp = *Result->use_begin();
546 if (PotentialClamp->isMachineOpcode() &&
547 PotentialClamp->getMachineOpcode() == AMDGPU::CLAMP_R600) {
549 TII->getOperandIdx(Result->getMachineOpcode(), AMDGPU::OpName::clamp);
550 std::vector<SDValue> Ops;
551 unsigned NumOp = Result->getNumOperands();
552 for (unsigned i = 0; i < NumOp; ++i) {
553 Ops.push_back(Result->getOperand(i));
555 Ops[ClampIdx - 1] = CurDAG->getTargetConstant(1, MVT::i32);
556 Result = CurDAG->SelectNodeTo(PotentialClamp,
557 Result->getMachineOpcode(), PotentialClamp->getVTList(),
567 bool AMDGPUDAGToDAGISel::FoldOperand(SDValue &Src, SDValue &Sel, SDValue &Neg,
568 SDValue &Abs, const R600InstrInfo *TII) {
569 switch (Src.getOpcode()) {
571 Src = Src.getOperand(0);
572 Neg = CurDAG->getTargetConstant(1, MVT::i32);
577 Src = Src.getOperand(0);
578 Abs = CurDAG->getTargetConstant(1, MVT::i32);
581 Src = Src.getOperand(0);
588 bool AMDGPUDAGToDAGISel::FoldOperands(unsigned Opcode,
589 const R600InstrInfo *TII, std::vector<SDValue> &Ops) {
591 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0),
592 TII->getOperandIdx(Opcode, AMDGPU::OpName::src1),
593 TII->getOperandIdx(Opcode, AMDGPU::OpName::src2)
596 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_sel),
597 TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_sel),
598 TII->getOperandIdx(Opcode, AMDGPU::OpName::src2_sel)
601 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_neg),
602 TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_neg),
603 TII->getOperandIdx(Opcode, AMDGPU::OpName::src2_neg)
606 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_abs),
607 TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_abs),
612 for (unsigned i = 0; i < 3; i++) {
613 if (OperandIdx[i] < 0)
615 SDValue &Src = Ops[OperandIdx[i] - 1];
616 SDValue &Sel = Ops[SelIdx[i] - 1];
617 SDValue &Neg = Ops[NegIdx[i] - 1];
619 SDValue &Abs = (AbsIdx[i] > -1) ? Ops[AbsIdx[i] - 1] : FakeAbs;
620 if (FoldOperand(Src, Sel, Neg, Abs, TII))
626 bool AMDGPUDAGToDAGISel::FoldDotOperands(unsigned Opcode,
627 const R600InstrInfo *TII, std::vector<SDValue> &Ops) {
629 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_X),
630 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_Y),
631 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_Z),
632 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_W),
633 TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_X),
634 TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_Y),
635 TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_Z),
636 TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_W)
639 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_sel_X),
640 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_sel_Y),
641 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_sel_Z),
642 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_sel_W),
643 TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_sel_X),
644 TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_sel_Y),
645 TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_sel_Z),
646 TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_sel_W)
649 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_neg_X),
650 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_neg_Y),
651 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_neg_Z),
652 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_neg_W),
653 TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_neg_X),
654 TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_neg_Y),
655 TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_neg_Z),
656 TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_neg_W)
659 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_abs_X),
660 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_abs_Y),
661 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_abs_Z),
662 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_abs_W),
663 TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_abs_X),
664 TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_abs_Y),
665 TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_abs_Z),
666 TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_abs_W)
669 for (unsigned i = 0; i < 8; i++) {
670 if (OperandIdx[i] < 0)
672 SDValue &Src = Ops[OperandIdx[i] - 1];
673 SDValue &Sel = Ops[SelIdx[i] - 1];
674 SDValue &Neg = Ops[NegIdx[i] - 1];
675 SDValue &Abs = Ops[AbsIdx[i] - 1];
676 if (FoldOperand(Src, Sel, Neg, Abs, TII))
682 bool AMDGPUDAGToDAGISel::checkType(const Value *ptr, unsigned int addrspace) {
686 Type *ptrType = ptr->getType();
687 return dyn_cast<PointerType>(ptrType)->getAddressSpace() == addrspace;
690 bool AMDGPUDAGToDAGISel::isGlobalStore(const StoreSDNode *N) {
691 return checkType(N->getSrcValue(), AMDGPUAS::GLOBAL_ADDRESS);
694 bool AMDGPUDAGToDAGISel::isPrivateStore(const StoreSDNode *N) {
695 return (!checkType(N->getSrcValue(), AMDGPUAS::LOCAL_ADDRESS)
696 && !checkType(N->getSrcValue(), AMDGPUAS::GLOBAL_ADDRESS)
697 && !checkType(N->getSrcValue(), AMDGPUAS::REGION_ADDRESS));
700 bool AMDGPUDAGToDAGISel::isLocalStore(const StoreSDNode *N) {
701 return checkType(N->getSrcValue(), AMDGPUAS::LOCAL_ADDRESS);
704 bool AMDGPUDAGToDAGISel::isRegionStore(const StoreSDNode *N) {
705 return checkType(N->getSrcValue(), AMDGPUAS::REGION_ADDRESS);
708 bool AMDGPUDAGToDAGISel::isConstantLoad(const LoadSDNode *N, int CbId) const {
710 return checkType(N->getSrcValue(), AMDGPUAS::CONSTANT_ADDRESS);
712 return checkType(N->getSrcValue(), AMDGPUAS::CONSTANT_BUFFER_0 + CbId);
715 bool AMDGPUDAGToDAGISel::isGlobalLoad(const LoadSDNode *N) const {
716 if (N->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS) {
717 const AMDGPUSubtarget &ST = TM.getSubtarget<AMDGPUSubtarget>();
718 if (ST.getGeneration() < AMDGPUSubtarget::SOUTHERN_ISLANDS ||
719 N->getMemoryVT().bitsLT(MVT::i32)) {
723 return checkType(N->getSrcValue(), AMDGPUAS::GLOBAL_ADDRESS);
726 bool AMDGPUDAGToDAGISel::isParamLoad(const LoadSDNode *N) const {
727 return checkType(N->getSrcValue(), AMDGPUAS::PARAM_I_ADDRESS);
730 bool AMDGPUDAGToDAGISel::isLocalLoad(const LoadSDNode *N) const {
731 return checkType(N->getSrcValue(), AMDGPUAS::LOCAL_ADDRESS);
734 bool AMDGPUDAGToDAGISel::isRegionLoad(const LoadSDNode *N) const {
735 return checkType(N->getSrcValue(), AMDGPUAS::REGION_ADDRESS);
738 bool AMDGPUDAGToDAGISel::isCPLoad(const LoadSDNode *N) const {
739 MachineMemOperand *MMO = N->getMemOperand();
740 if (checkType(N->getSrcValue(), AMDGPUAS::PRIVATE_ADDRESS)) {
742 const Value *V = MMO->getValue();
743 const PseudoSourceValue *PSV = dyn_cast<PseudoSourceValue>(V);
744 if (PSV && PSV == PseudoSourceValue::getConstantPool()) {
752 bool AMDGPUDAGToDAGISel::isPrivateLoad(const LoadSDNode *N) const {
753 if (checkType(N->getSrcValue(), AMDGPUAS::PRIVATE_ADDRESS)) {
754 // Check to make sure we are not a constant pool load or a constant load
755 // that is marked as a private load
756 if (isCPLoad(N) || isConstantLoad(N, -1)) {
760 if (!checkType(N->getSrcValue(), AMDGPUAS::LOCAL_ADDRESS)
761 && !checkType(N->getSrcValue(), AMDGPUAS::GLOBAL_ADDRESS)
762 && !checkType(N->getSrcValue(), AMDGPUAS::REGION_ADDRESS)
763 && !checkType(N->getSrcValue(), AMDGPUAS::CONSTANT_ADDRESS)
764 && !checkType(N->getSrcValue(), AMDGPUAS::PARAM_D_ADDRESS)
765 && !checkType(N->getSrcValue(), AMDGPUAS::PARAM_I_ADDRESS)) {
771 const char *AMDGPUDAGToDAGISel::getPassName() const {
772 return "AMDGPU DAG->DAG Pattern Instruction Selection";
780 //===----------------------------------------------------------------------===//
782 //===----------------------------------------------------------------------===//
784 bool AMDGPUDAGToDAGISel::SelectGlobalValueConstantOffset(SDValue Addr,
786 if (ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(Addr)) {
787 IntPtr = CurDAG->getIntPtrConstant(Cst->getZExtValue() / 4, true);
793 bool AMDGPUDAGToDAGISel::SelectGlobalValueVariableOffset(SDValue Addr,
794 SDValue& BaseReg, SDValue &Offset) {
795 if (!dyn_cast<ConstantSDNode>(Addr)) {
797 Offset = CurDAG->getIntPtrConstant(0, true);
803 bool AMDGPUDAGToDAGISel::SelectADDRVTX_READ(SDValue Addr, SDValue &Base,
805 ConstantSDNode * IMMOffset;
807 if (Addr.getOpcode() == ISD::ADD
808 && (IMMOffset = dyn_cast<ConstantSDNode>(Addr.getOperand(1)))
809 && isInt<16>(IMMOffset->getZExtValue())) {
811 Base = Addr.getOperand(0);
812 Offset = CurDAG->getTargetConstant(IMMOffset->getZExtValue(), MVT::i32);
814 // If the pointer address is constant, we can move it to the offset field.
815 } else if ((IMMOffset = dyn_cast<ConstantSDNode>(Addr))
816 && isInt<16>(IMMOffset->getZExtValue())) {
817 Base = CurDAG->getCopyFromReg(CurDAG->getEntryNode(),
818 SDLoc(CurDAG->getEntryNode()),
819 AMDGPU::ZERO, MVT::i32);
820 Offset = CurDAG->getTargetConstant(IMMOffset->getZExtValue(), MVT::i32);
824 // Default case, no offset
826 Offset = CurDAG->getTargetConstant(0, MVT::i32);
830 bool AMDGPUDAGToDAGISel::SelectADDRIndirect(SDValue Addr, SDValue &Base,
834 if ((C = dyn_cast<ConstantSDNode>(Addr))) {
835 Base = CurDAG->getRegister(AMDGPU::INDIRECT_BASE_ADDR, MVT::i32);
836 Offset = CurDAG->getTargetConstant(C->getZExtValue(), MVT::i32);
837 } else if ((Addr.getOpcode() == ISD::ADD || Addr.getOpcode() == ISD::OR) &&
838 (C = dyn_cast<ConstantSDNode>(Addr.getOperand(1)))) {
839 Base = Addr.getOperand(0);
840 Offset = CurDAG->getTargetConstant(C->getZExtValue(), MVT::i32);
843 Offset = CurDAG->getTargetConstant(0, MVT::i32);
849 SDValue AMDGPUDAGToDAGISel::SimplifyI24(SDValue &Op) {
850 APInt Demanded = APInt(32, 0x00FFFFFF);
851 APInt KnownZero, KnownOne;
852 TargetLowering::TargetLoweringOpt TLO(*CurDAG, true, true);
853 const TargetLowering *TLI = getTargetLowering();
854 if (TLI->SimplifyDemandedBits(Op, Demanded, KnownZero, KnownOne, TLO)) {
855 CurDAG->ReplaceAllUsesWith(Op, TLO.New);
856 CurDAG->RepositionNode(Op.getNode(), TLO.New.getNode());
857 return SimplifyI24(TLO.New);
863 bool AMDGPUDAGToDAGISel::SelectI24(SDValue Op, SDValue &I24) {
865 assert(Op.getValueType() == MVT::i32);
867 if (CurDAG->ComputeNumSignBits(Op) == 9) {
868 I24 = SimplifyI24(Op);
874 bool AMDGPUDAGToDAGISel::SelectU24(SDValue Op, SDValue &U24) {
877 CurDAG->ComputeMaskedBits(Op, KnownZero, KnownOne);
879 assert (Op.getValueType() == MVT::i32);
881 // ANY_EXTEND and EXTLOAD operations can only be done on types smaller than
882 // i32. These smaller types are legal to use with the i24 instructions.
883 if ((KnownZero & APInt(KnownZero.getBitWidth(), 0xFF000000)) == 0xFF000000 ||
884 Op.getOpcode() == ISD::ANY_EXTEND ||
885 ISD::isEXTLoad(Op.getNode())) {
886 U24 = SimplifyI24(Op);
892 void AMDGPUDAGToDAGISel::PostprocessISelDAG() {
894 if (Subtarget.getGeneration() < AMDGPUSubtarget::SOUTHERN_ISLANDS) {
898 // Go over all selected nodes and try to fold them a bit more
899 const AMDGPUTargetLowering& Lowering =
900 (*(const AMDGPUTargetLowering*)getTargetLowering());
901 for (SelectionDAG::allnodes_iterator I = CurDAG->allnodes_begin(),
902 E = CurDAG->allnodes_end(); I != E; ++I) {
906 MachineSDNode *MachineNode = dyn_cast<MachineSDNode>(I);
910 SDNode *ResNode = Lowering.PostISelFolding(MachineNode, *CurDAG);
911 if (ResNode != Node) {
912 ReplaceUses(Node, ResNode);