1 //===-- PPCISelLowering.h - PPC32 DAG Lowering Interface --------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines the interfaces that PPC uses to lower LLVM code into a
13 //===----------------------------------------------------------------------===//
15 #ifndef LLVM_LIB_TARGET_POWERPC_PPCISELLOWERING_H
16 #define LLVM_LIB_TARGET_POWERPC_PPCISELLOWERING_H
19 #include "PPCInstrInfo.h"
20 #include "PPCRegisterInfo.h"
21 #include "llvm/CodeGen/CallingConvLower.h"
22 #include "llvm/CodeGen/SelectionDAG.h"
23 #include "llvm/Target/TargetLowering.h"
27 enum NodeType : unsigned {
28 // Start the numbering where the builtin ops and target ops leave off.
29 FIRST_NUMBER = ISD::BUILTIN_OP_END,
31 /// FSEL - Traditional three-operand fsel node.
35 /// FCFID - The FCFID instruction, taking an f64 operand and producing
36 /// and f64 value containing the FP representation of the integer that
37 /// was temporarily in the f64 operand.
40 /// Newer FCFID[US] integer-to-floating-point conversion instructions for
41 /// unsigned integers and single-precision outputs.
42 FCFIDU, FCFIDS, FCFIDUS,
44 /// FCTI[D,W]Z - The FCTIDZ and FCTIWZ instructions, taking an f32 or f64
45 /// operand, producing an f64 value containing the integer representation
49 /// Newer FCTI[D,W]UZ floating-point-to-integer conversion instructions for
50 /// unsigned integers.
53 /// Reciprocal estimate instructions (unary FP ops).
56 // VMADDFP, VNMSUBFP - The VMADDFP and VNMSUBFP instructions, taking
57 // three v4f32 operands and producing a v4f32 result.
60 /// VPERM - The PPC VPERM Instruction.
64 /// XXSPLT - The PPC VSX splat instructions
68 /// The CMPB instruction (takes two operands of i32 or i64).
71 /// Hi/Lo - These represent the high and low 16-bit parts of a global
72 /// address respectively. These nodes have two operands, the first of
73 /// which must be a TargetGlobalAddress, and the second of which must be a
74 /// Constant. Selected naively, these turn into 'lis G+C' and 'li G+C',
75 /// though these are usually folded into other nodes.
78 /// The following two target-specific nodes are used for calls through
79 /// function pointers in the 64-bit SVR4 ABI.
81 /// OPRC, CHAIN = DYNALLOC(CHAIN, NEGSIZE, FRAME_INDEX)
82 /// This instruction is lowered in PPCRegisterInfo::eliminateFrameIndex to
83 /// compute an allocation on the stack.
86 /// This instruction is lowered in PPCRegisterInfo::eliminateFrameIndex to
87 /// compute an offset from native SP to the address of the most recent
91 /// GlobalBaseReg - On Darwin, this node represents the result of the mflr
92 /// at function entry, used for PIC code.
95 /// These nodes represent the 32-bit PPC shifts that operate on 6-bit
96 /// shift amounts. These nodes are generated by the multi-precision shift
100 /// The combination of sra[wd]i and addze used to implemented signed
101 /// integer division by a power of 2. The first operand is the dividend,
102 /// and the second is the constant shift amount (representing the
106 /// CALL - A direct function call.
107 /// CALL_NOP is a call with the special NOP which follows 64-bit
111 /// CHAIN,FLAG = MTCTR(VAL, CHAIN[, INFLAG]) - Directly corresponds to a
112 /// MTCTR instruction.
115 /// CHAIN,FLAG = BCTRL(CHAIN, INFLAG) - Directly corresponds to a
116 /// BCTRL instruction.
119 /// CHAIN,FLAG = BCTRL(CHAIN, ADDR, INFLAG) - The combination of a bctrl
120 /// instruction and the TOC reload required on SVR4 PPC64.
123 /// Return with a flag operand, matched by 'blr'
126 /// R32 = MFOCRF(CRREG, INFLAG) - Represents the MFOCRF instruction.
127 /// This copies the bits corresponding to the specified CRREG into the
128 /// resultant GPR. Bits corresponding to other CR regs are undefined.
131 /// Direct move from a VSX register to a GPR
134 /// Direct move from a GPR to a VSX register (algebraic)
137 /// Direct move from a GPR to a VSX register (zero)
140 /// Extract a subvector from signed integer vector and convert to FP.
141 /// It is primarily used to convert a (widened) illegal integer vector
142 /// type to a legal floating point vector type.
143 /// For example v2i32 -> widened to v4i32 -> v2f64
146 /// Extract a subvector from unsigned integer vector and convert to FP.
147 /// As with SINT_VEC_TO_FP, used for converting illegal types.
150 // FIXME: Remove these once the ANDI glue bug is fixed:
151 /// i1 = ANDIo_1_[EQ|GT]_BIT(i32 or i64 x) - Represents the result of the
152 /// eq or gt bit of CR0 after executing andi. x, 1. This is used to
153 /// implement truncation of i32 or i64 to i1.
154 ANDIo_1_EQ_BIT, ANDIo_1_GT_BIT,
156 // READ_TIME_BASE - A read of the 64-bit time-base register on a 32-bit
157 // target (returns (Lo, Hi)). It takes a chain operand.
160 // EH_SJLJ_SETJMP - SjLj exception handling setjmp.
163 // EH_SJLJ_LONGJMP - SjLj exception handling longjmp.
166 /// RESVEC = VCMP(LHS, RHS, OPC) - Represents one of the altivec VCMP*
167 /// instructions. For lack of better number, we use the opcode number
168 /// encoding for the OPC field to identify the compare. For example, 838
172 /// RESVEC, OUTFLAG = VCMPo(LHS, RHS, OPC) - Represents one of the
173 /// altivec VCMP*o instructions. For lack of better number, we use the
174 /// opcode number encoding for the OPC field to identify the compare. For
175 /// example, 838 is VCMPGTSH.
178 /// CHAIN = COND_BRANCH CHAIN, CRRC, OPC, DESTBB [, INFLAG] - This
179 /// corresponds to the COND_BRANCH pseudo instruction. CRRC is the
180 /// condition register to branch on, OPC is the branch opcode to use (e.g.
181 /// PPC::BLE), DESTBB is the destination block to branch to, and INFLAG is
182 /// an optional input flag argument.
185 /// CHAIN = BDNZ CHAIN, DESTBB - These are used to create counter-based
189 /// F8RC = FADDRTZ F8RC, F8RC - This is an FADD done with rounding
190 /// towards zero. Used only as part of the long double-to-int
191 /// conversion sequence.
194 /// F8RC = MFFS - This moves the FPSCR (not modeled) into the register.
197 /// TC_RETURN - A tail call return.
199 /// operand #1 callee (register or absolute)
200 /// operand #2 stack adjustment
201 /// operand #3 optional in flag
204 /// ch, gl = CR6[UN]SET ch, inglue - Toggle CR bit 6 for SVR4 vararg calls
208 /// GPRC = address of _GLOBAL_OFFSET_TABLE_. Used by initial-exec TLS
212 /// GPRC = address of _GLOBAL_OFFSET_TABLE_. Used by general dynamic and
213 /// local dynamic TLS on PPC32.
216 /// G8RC = ADDIS_GOT_TPREL_HA %X2, Symbol - Used by the initial-exec
217 /// TLS model, produces an ADDIS8 instruction that adds the GOT
218 /// base to sym\@got\@tprel\@ha.
221 /// G8RC = LD_GOT_TPREL_L Symbol, G8RReg - Used by the initial-exec
222 /// TLS model, produces a LD instruction with base register G8RReg
223 /// and offset sym\@got\@tprel\@l. This completes the addition that
224 /// finds the offset of "sym" relative to the thread pointer.
227 /// G8RC = ADD_TLS G8RReg, Symbol - Used by the initial-exec TLS
228 /// model, produces an ADD instruction that adds the contents of
229 /// G8RReg to the thread pointer. Symbol contains a relocation
230 /// sym\@tls which is to be replaced by the thread pointer and
231 /// identifies to the linker that the instruction is part of a
235 /// G8RC = ADDIS_TLSGD_HA %X2, Symbol - For the general-dynamic TLS
236 /// model, produces an ADDIS8 instruction that adds the GOT base
237 /// register to sym\@got\@tlsgd\@ha.
240 /// %X3 = ADDI_TLSGD_L G8RReg, Symbol - For the general-dynamic TLS
241 /// model, produces an ADDI8 instruction that adds G8RReg to
242 /// sym\@got\@tlsgd\@l and stores the result in X3. Hidden by
243 /// ADDIS_TLSGD_L_ADDR until after register assignment.
246 /// %X3 = GET_TLS_ADDR %X3, Symbol - For the general-dynamic TLS
247 /// model, produces a call to __tls_get_addr(sym\@tlsgd). Hidden by
248 /// ADDIS_TLSGD_L_ADDR until after register assignment.
251 /// G8RC = ADDI_TLSGD_L_ADDR G8RReg, Symbol, Symbol - Op that
252 /// combines ADDI_TLSGD_L and GET_TLS_ADDR until expansion following
253 /// register assignment.
256 /// G8RC = ADDIS_TLSLD_HA %X2, Symbol - For the local-dynamic TLS
257 /// model, produces an ADDIS8 instruction that adds the GOT base
258 /// register to sym\@got\@tlsld\@ha.
261 /// %X3 = ADDI_TLSLD_L G8RReg, Symbol - For the local-dynamic TLS
262 /// model, produces an ADDI8 instruction that adds G8RReg to
263 /// sym\@got\@tlsld\@l and stores the result in X3. Hidden by
264 /// ADDIS_TLSLD_L_ADDR until after register assignment.
267 /// %X3 = GET_TLSLD_ADDR %X3, Symbol - For the local-dynamic TLS
268 /// model, produces a call to __tls_get_addr(sym\@tlsld). Hidden by
269 /// ADDIS_TLSLD_L_ADDR until after register assignment.
272 /// G8RC = ADDI_TLSLD_L_ADDR G8RReg, Symbol, Symbol - Op that
273 /// combines ADDI_TLSLD_L and GET_TLSLD_ADDR until expansion
274 /// following register assignment.
277 /// G8RC = ADDIS_DTPREL_HA %X3, Symbol - For the local-dynamic TLS
278 /// model, produces an ADDIS8 instruction that adds X3 to
282 /// G8RC = ADDI_DTPREL_L G8RReg, Symbol - For the local-dynamic TLS
283 /// model, produces an ADDI8 instruction that adds G8RReg to
284 /// sym\@got\@dtprel\@l.
287 /// VRRC = VADD_SPLAT Elt, EltSize - Temporary node to be expanded
288 /// during instruction selection to optimize a BUILD_VECTOR into
289 /// operations on splats. This is necessary to avoid losing these
290 /// optimizations due to constant folding.
293 /// CHAIN = SC CHAIN, Imm128 - System call. The 7-bit unsigned
294 /// operand identifies the operating system entry point.
297 /// CHAIN = CLRBHRB CHAIN - Clear branch history rolling buffer.
300 /// GPRC, CHAIN = MFBHRBE CHAIN, Entry, Dummy - Move from branch
301 /// history rolling buffer entry.
304 /// CHAIN = RFEBB CHAIN, State - Return from event-based branch.
307 /// VSRC, CHAIN = XXSWAPD CHAIN, VSRC - Occurs only for little
308 /// endian. Maps to an xxswapd instruction that corrects an lxvd2x
309 /// or stxvd2x instruction. The chain is necessary because the
310 /// sequence replaces a load and needs to provide the same number
314 /// QVFPERM = This corresponds to the QPX qvfperm instruction.
317 /// QVGPCI = This corresponds to the QPX qvgpci instruction.
320 /// QVALIGNI = This corresponds to the QPX qvaligni instruction.
323 /// QVESPLATI = This corresponds to the QPX qvesplati instruction.
326 /// QBFLT = Access the underlying QPX floating-point boolean
330 /// CHAIN = STBRX CHAIN, GPRC, Ptr, Type - This is a
331 /// byte-swapping store instruction. It byte-swaps the low "Type" bits of
332 /// the GPRC input, then stores it through Ptr. Type can be either i16 or
334 STBRX = ISD::FIRST_TARGET_MEMORY_OPCODE,
336 /// GPRC, CHAIN = LBRX CHAIN, Ptr, Type - This is a
337 /// byte-swapping load instruction. It loads "Type" bits, byte swaps it,
338 /// then puts it in the bottom bits of the GPRC. TYPE can be either i16
342 /// STFIWX - The STFIWX instruction. The first operand is an input token
343 /// chain, then an f64 value to store, then an address to store it to.
346 /// GPRC, CHAIN = LFIWAX CHAIN, Ptr - This is a floating-point
347 /// load which sign-extends from a 32-bit integer value into the
348 /// destination 64-bit register.
351 /// GPRC, CHAIN = LFIWZX CHAIN, Ptr - This is a floating-point
352 /// load which zero-extends from a 32-bit integer value into the
353 /// destination 64-bit register.
356 /// VSRC, CHAIN = LXVD2X_LE CHAIN, Ptr - Occurs only for little endian.
357 /// Maps directly to an lxvd2x instruction that will be followed by
361 /// CHAIN = STXVD2X CHAIN, VSRC, Ptr - Occurs only for little endian.
362 /// Maps directly to an stxvd2x instruction that will be preceded by
366 /// QBRC, CHAIN = QVLFSb CHAIN, Ptr
367 /// The 4xf32 load used for v4i1 constants.
370 /// GPRC = TOC_ENTRY GA, TOC
371 /// Loads the entry for GA from the TOC, where the TOC base is given by
372 /// the last operand.
377 /// Define some predicates that are used for node matching.
379 /// isVPKUHUMShuffleMask - Return true if this is the shuffle mask for a
380 /// VPKUHUM instruction.
381 bool isVPKUHUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind,
384 /// isVPKUWUMShuffleMask - Return true if this is the shuffle mask for a
385 /// VPKUWUM instruction.
386 bool isVPKUWUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind,
389 /// isVPKUDUMShuffleMask - Return true if this is the shuffle mask for a
390 /// VPKUDUM instruction.
391 bool isVPKUDUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind,
394 /// isVMRGLShuffleMask - Return true if this is a shuffle mask suitable for
395 /// a VRGL* instruction with the specified unit size (1,2 or 4 bytes).
396 bool isVMRGLShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize,
397 unsigned ShuffleKind, SelectionDAG &DAG);
399 /// isVMRGHShuffleMask - Return true if this is a shuffle mask suitable for
400 /// a VRGH* instruction with the specified unit size (1,2 or 4 bytes).
401 bool isVMRGHShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize,
402 unsigned ShuffleKind, SelectionDAG &DAG);
404 /// isVMRGEOShuffleMask - Return true if this is a shuffle mask suitable for
405 /// a VMRGEW or VMRGOW instruction
406 bool isVMRGEOShuffleMask(ShuffleVectorSDNode *N, bool CheckEven,
407 unsigned ShuffleKind, SelectionDAG &DAG);
409 /// isVSLDOIShuffleMask - If this is a vsldoi shuffle mask, return the
410 /// shift amount, otherwise return -1.
411 int isVSLDOIShuffleMask(SDNode *N, unsigned ShuffleKind,
414 /// isSplatShuffleMask - Return true if the specified VECTOR_SHUFFLE operand
415 /// specifies a splat of a single element that is suitable for input to
416 /// VSPLTB/VSPLTH/VSPLTW.
417 bool isSplatShuffleMask(ShuffleVectorSDNode *N, unsigned EltSize);
419 /// getVSPLTImmediate - Return the appropriate VSPLT* immediate to splat the
420 /// specified isSplatShuffleMask VECTOR_SHUFFLE mask.
421 unsigned getVSPLTImmediate(SDNode *N, unsigned EltSize, SelectionDAG &DAG);
423 /// get_VSPLTI_elt - If this is a build_vector of constants which can be
424 /// formed by using a vspltis[bhw] instruction of the specified element
425 /// size, return the constant being splatted. The ByteSize field indicates
426 /// the number of bytes of each element [124] -> [bhw].
427 SDValue get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG);
429 /// If this is a qvaligni shuffle mask, return the shift
430 /// amount, otherwise return -1.
431 int isQVALIGNIShuffleMask(SDNode *N);
434 class PPCTargetLowering : public TargetLowering {
435 const PPCSubtarget &Subtarget;
438 explicit PPCTargetLowering(const PPCTargetMachine &TM,
439 const PPCSubtarget &STI);
441 /// getTargetNodeName() - This method returns the name of a target specific
443 const char *getTargetNodeName(unsigned Opcode) const override;
445 /// getPreferredVectorAction - The code we generate when vector types are
446 /// legalized by promoting the integer element type is often much worse
447 /// than code we generate if we widen the type for applicable vector types.
448 /// The issue with promoting is that the vector is scalaraized, individual
449 /// elements promoted and then the vector is rebuilt. So say we load a pair
450 /// of v4i8's and shuffle them. This will turn into a mess of 8 extending
451 /// loads, moves back into VSR's (or memory ops if we don't have moves) and
452 /// then the VPERM for the shuffle. All in all a very slow sequence.
453 TargetLoweringBase::LegalizeTypeAction getPreferredVectorAction(EVT VT)
455 if (VT.getVectorElementType().getSizeInBits() % 8 == 0)
456 return TypeWidenVector;
457 return TargetLoweringBase::getPreferredVectorAction(VT);
459 bool useSoftFloat() const override;
461 MVT getScalarShiftAmountTy(const DataLayout &, EVT) const override {
465 bool isCheapToSpeculateCttz() const override {
469 bool isCheapToSpeculateCtlz() const override {
473 bool supportSplitCSR(MachineFunction *MF) const override {
475 MF->getFunction()->getCallingConv() == CallingConv::CXX_FAST_TLS &&
476 MF->getFunction()->hasFnAttribute(Attribute::NoUnwind);
479 void initializeSplitCSR(MachineBasicBlock *Entry) const override;
481 void insertCopiesSplitCSR(
482 MachineBasicBlock *Entry,
483 const SmallVectorImpl<MachineBasicBlock *> &Exits) const override;
485 /// getSetCCResultType - Return the ISD::SETCC ValueType
486 EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context,
487 EVT VT) const override;
489 /// Return true if target always beneficiates from combining into FMA for a
490 /// given value type. This must typically return false on targets where FMA
491 /// takes more cycles to execute than FADD.
492 bool enableAggressiveFMAFusion(EVT VT) const override;
494 /// getPreIndexedAddressParts - returns true by value, base pointer and
495 /// offset pointer and addressing mode by reference if the node's address
496 /// can be legally represented as pre-indexed load / store address.
497 bool getPreIndexedAddressParts(SDNode *N, SDValue &Base,
499 ISD::MemIndexedMode &AM,
500 SelectionDAG &DAG) const override;
502 /// SelectAddressRegReg - Given the specified addressed, check to see if it
503 /// can be represented as an indexed [r+r] operation. Returns false if it
504 /// can be more efficiently represented with [r+imm].
505 bool SelectAddressRegReg(SDValue N, SDValue &Base, SDValue &Index,
506 SelectionDAG &DAG) const;
508 /// SelectAddressRegImm - Returns true if the address N can be represented
509 /// by a base register plus a signed 16-bit displacement [r+imm], and if it
510 /// is not better represented as reg+reg. If Aligned is true, only accept
511 /// displacements suitable for STD and friends, i.e. multiples of 4.
512 bool SelectAddressRegImm(SDValue N, SDValue &Disp, SDValue &Base,
513 SelectionDAG &DAG, bool Aligned) const;
515 /// SelectAddressRegRegOnly - Given the specified addressed, force it to be
516 /// represented as an indexed [r+r] operation.
517 bool SelectAddressRegRegOnly(SDValue N, SDValue &Base, SDValue &Index,
518 SelectionDAG &DAG) const;
520 Sched::Preference getSchedulingPreference(SDNode *N) const override;
522 /// LowerOperation - Provide custom lowering hooks for some operations.
524 SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;
526 /// ReplaceNodeResults - Replace the results of node with an illegal result
527 /// type with new values built out of custom code.
529 void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue>&Results,
530 SelectionDAG &DAG) const override;
532 SDValue expandVSXLoadForLE(SDNode *N, DAGCombinerInfo &DCI) const;
533 SDValue expandVSXStoreForLE(SDNode *N, DAGCombinerInfo &DCI) const;
535 SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override;
537 SDValue BuildSDIVPow2(SDNode *N, const APInt &Divisor, SelectionDAG &DAG,
538 std::vector<SDNode *> *Created) const override;
540 unsigned getRegisterByName(const char* RegName, EVT VT,
541 SelectionDAG &DAG) const override;
543 void computeKnownBitsForTargetNode(const SDValue Op,
546 const SelectionDAG &DAG,
547 unsigned Depth = 0) const override;
549 unsigned getPrefLoopAlignment(MachineLoop *ML) const override;
551 bool shouldInsertFencesForAtomic(const Instruction *I) const override {
555 Instruction* emitLeadingFence(IRBuilder<> &Builder, AtomicOrdering Ord,
556 bool IsStore, bool IsLoad) const override;
557 Instruction* emitTrailingFence(IRBuilder<> &Builder, AtomicOrdering Ord,
558 bool IsStore, bool IsLoad) const override;
561 EmitInstrWithCustomInserter(MachineInstr &MI,
562 MachineBasicBlock *MBB) const override;
563 MachineBasicBlock *EmitAtomicBinary(MachineInstr &MI,
564 MachineBasicBlock *MBB,
566 unsigned BinOpcode) const;
567 MachineBasicBlock *EmitPartwordAtomicBinary(MachineInstr &MI,
568 MachineBasicBlock *MBB,
570 unsigned Opcode) const;
572 MachineBasicBlock *emitEHSjLjSetJmp(MachineInstr &MI,
573 MachineBasicBlock *MBB) const;
575 MachineBasicBlock *emitEHSjLjLongJmp(MachineInstr &MI,
576 MachineBasicBlock *MBB) const;
578 ConstraintType getConstraintType(StringRef Constraint) const override;
580 /// Examine constraint string and operand type and determine a weight value.
581 /// The operand object must already have been set up with the operand type.
582 ConstraintWeight getSingleConstraintMatchWeight(
583 AsmOperandInfo &info, const char *constraint) const override;
585 std::pair<unsigned, const TargetRegisterClass *>
586 getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
587 StringRef Constraint, MVT VT) const override;
589 /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate
590 /// function arguments in the caller parameter area. This is the actual
591 /// alignment, not its logarithm.
592 unsigned getByValTypeAlignment(Type *Ty,
593 const DataLayout &DL) const override;
595 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
596 /// vector. If it is invalid, don't add anything to Ops.
597 void LowerAsmOperandForConstraint(SDValue Op,
598 std::string &Constraint,
599 std::vector<SDValue> &Ops,
600 SelectionDAG &DAG) const override;
603 getInlineAsmMemConstraint(StringRef ConstraintCode) const override {
604 if (ConstraintCode == "es")
605 return InlineAsm::Constraint_es;
606 else if (ConstraintCode == "o")
607 return InlineAsm::Constraint_o;
608 else if (ConstraintCode == "Q")
609 return InlineAsm::Constraint_Q;
610 else if (ConstraintCode == "Z")
611 return InlineAsm::Constraint_Z;
612 else if (ConstraintCode == "Zy")
613 return InlineAsm::Constraint_Zy;
614 return TargetLowering::getInlineAsmMemConstraint(ConstraintCode);
617 /// isLegalAddressingMode - Return true if the addressing mode represented
618 /// by AM is legal for this target, for a load/store of the specified type.
619 bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM,
620 Type *Ty, unsigned AS) const override;
622 /// isLegalICmpImmediate - Return true if the specified immediate is legal
623 /// icmp immediate, that is the target has icmp instructions which can
624 /// compare a register against the immediate without having to materialize
625 /// the immediate into a register.
626 bool isLegalICmpImmediate(int64_t Imm) const override;
628 /// isLegalAddImmediate - Return true if the specified immediate is legal
629 /// add immediate, that is the target has add instructions which can
630 /// add a register and the immediate without having to materialize
631 /// the immediate into a register.
632 bool isLegalAddImmediate(int64_t Imm) const override;
634 /// isTruncateFree - Return true if it's free to truncate a value of
635 /// type Ty1 to type Ty2. e.g. On PPC it's free to truncate a i64 value in
636 /// register X1 to i32 by referencing its sub-register R1.
637 bool isTruncateFree(Type *Ty1, Type *Ty2) const override;
638 bool isTruncateFree(EVT VT1, EVT VT2) const override;
640 bool isZExtFree(SDValue Val, EVT VT2) const override;
642 bool isFPExtFree(EVT VT) const override;
644 /// \brief Returns true if it is beneficial to convert a load of a constant
645 /// to just the constant itself.
646 bool shouldConvertConstantLoadToIntImm(const APInt &Imm,
647 Type *Ty) const override;
649 bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override;
651 bool getTgtMemIntrinsic(IntrinsicInfo &Info,
653 unsigned Intrinsic) const override;
655 /// getOptimalMemOpType - Returns the target specific optimal type for load
656 /// and store operations as a result of memset, memcpy, and memmove
657 /// lowering. If DstAlign is zero that means it's safe to destination
658 /// alignment can satisfy any constraint. Similarly if SrcAlign is zero it
659 /// means there isn't a need to check it against alignment requirement,
660 /// probably because the source does not need to be loaded. If 'IsMemset' is
661 /// true, that means it's expanding a memset. If 'ZeroMemset' is true, that
662 /// means it's a memset of zero. 'MemcpyStrSrc' indicates whether the memcpy
663 /// source is constant so it does not need to be loaded.
664 /// It returns EVT::Other if the type should be determined using generic
665 /// target-independent logic.
667 getOptimalMemOpType(uint64_t Size, unsigned DstAlign, unsigned SrcAlign,
668 bool IsMemset, bool ZeroMemset, bool MemcpyStrSrc,
669 MachineFunction &MF) const override;
671 /// Is unaligned memory access allowed for the given type, and is it fast
672 /// relative to software emulation.
673 bool allowsMisalignedMemoryAccesses(EVT VT,
676 bool *Fast = nullptr) const override;
678 /// isFMAFasterThanFMulAndFAdd - Return true if an FMA operation is faster
679 /// than a pair of fmul and fadd instructions. fmuladd intrinsics will be
680 /// expanded to FMAs when this method returns true, otherwise fmuladd is
681 /// expanded to fmul + fadd.
682 bool isFMAFasterThanFMulAndFAdd(EVT VT) const override;
684 const MCPhysReg *getScratchRegisters(CallingConv::ID CC) const override;
686 // Should we expand the build vector with shuffles?
688 shouldExpandBuildVectorWithShuffles(EVT VT,
689 unsigned DefinedValues) const override;
691 /// createFastISel - This method returns a target-specific FastISel object,
692 /// or null if the target does not support "fast" instruction selection.
693 FastISel *createFastISel(FunctionLoweringInfo &FuncInfo,
694 const TargetLibraryInfo *LibInfo) const override;
696 /// \brief Returns true if an argument of type Ty needs to be passed in a
697 /// contiguous block of registers in calling convention CallConv.
698 bool functionArgumentNeedsConsecutiveRegisters(
699 Type *Ty, CallingConv::ID CallConv, bool isVarArg) const override {
700 // We support any array type as "consecutive" block in the parameter
701 // save area. The element type defines the alignment requirement and
702 // whether the argument should go in GPRs, FPRs, or VRs if available.
704 // Note that clang uses this capability both to implement the ELFv2
705 // homogeneous float/vector aggregate ABI, and to avoid having to use
706 // "byval" when passing aggregates that might fully fit in registers.
707 return Ty->isArrayTy();
710 /// If a physical register, this returns the register that receives the
711 /// exception address on entry to an EH pad.
713 getExceptionPointerRegister(const Constant *PersonalityFn) const override;
715 /// If a physical register, this returns the register that receives the
716 /// exception typeid on entry to a landing pad.
718 getExceptionSelectorRegister(const Constant *PersonalityFn) const override;
720 /// Override to support customized stack guard loading.
721 bool useLoadStackGuardNode() const override;
722 void insertSSPDeclarations(Module &M) const override;
725 struct ReuseLoadInfo {
729 MachinePointerInfo MPI;
733 const MDNode *Ranges;
735 ReuseLoadInfo() : IsInvariant(false), Alignment(0), Ranges(nullptr) {}
738 bool canReuseLoadAddress(SDValue Op, EVT MemVT, ReuseLoadInfo &RLI,
740 ISD::LoadExtType ET = ISD::NON_EXTLOAD) const;
741 void spliceIntoChain(SDValue ResChain, SDValue NewResChain,
742 SelectionDAG &DAG) const;
744 void LowerFP_TO_INTForReuse(SDValue Op, ReuseLoadInfo &RLI,
745 SelectionDAG &DAG, const SDLoc &dl) const;
746 SDValue LowerFP_TO_INTDirectMove(SDValue Op, SelectionDAG &DAG,
747 const SDLoc &dl) const;
748 SDValue LowerINT_TO_FPDirectMove(SDValue Op, SelectionDAG &DAG,
749 const SDLoc &dl) const;
751 SDValue getFramePointerFrameIndex(SelectionDAG & DAG) const;
752 SDValue getReturnAddrFrameIndex(SelectionDAG & DAG) const;
755 IsEligibleForTailCallOptimization(SDValue Callee,
756 CallingConv::ID CalleeCC,
758 const SmallVectorImpl<ISD::InputArg> &Ins,
759 SelectionDAG& DAG) const;
762 IsEligibleForTailCallOptimization_64SVR4(
764 CallingConv::ID CalleeCC,
765 ImmutableCallSite *CS,
767 const SmallVectorImpl<ISD::OutputArg> &Outs,
768 const SmallVectorImpl<ISD::InputArg> &Ins,
769 SelectionDAG& DAG) const;
771 SDValue EmitTailCallLoadFPAndRetAddr(SelectionDAG &DAG, int SPDiff,
772 SDValue Chain, SDValue &LROpOut,
773 SDValue &FPOpOut, bool isDarwinABI,
774 const SDLoc &dl) const;
776 SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
777 SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const;
778 SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const;
779 SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const;
780 SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
781 SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const;
782 SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG) const;
783 SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG) const;
784 SDValue LowerINIT_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const;
785 SDValue LowerADJUST_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const;
786 SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) const;
787 SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG) const;
788 SDValue LowerVACOPY(SDValue Op, SelectionDAG &DAG) const;
789 SDValue LowerSTACKRESTORE(SDValue Op, SelectionDAG &DAG) const;
790 SDValue LowerGET_DYNAMIC_AREA_OFFSET(SDValue Op, SelectionDAG &DAG) const;
791 SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const;
792 SDValue LowerLOAD(SDValue Op, SelectionDAG &DAG) const;
793 SDValue LowerSTORE(SDValue Op, SelectionDAG &DAG) const;
794 SDValue LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const;
795 SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const;
796 SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG,
797 const SDLoc &dl) const;
798 SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) const;
799 SDValue LowerFLT_ROUNDS_(SDValue Op, SelectionDAG &DAG) const;
800 SDValue LowerSHL_PARTS(SDValue Op, SelectionDAG &DAG) const;
801 SDValue LowerSRL_PARTS(SDValue Op, SelectionDAG &DAG) const;
802 SDValue LowerSRA_PARTS(SDValue Op, SelectionDAG &DAG) const;
803 SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const;
804 SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const;
805 SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
806 SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const;
807 SDValue LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) const;
808 SDValue LowerSIGN_EXTEND_INREG(SDValue Op, SelectionDAG &DAG) const;
809 SDValue LowerMUL(SDValue Op, SelectionDAG &DAG) const;
811 SDValue LowerVectorLoad(SDValue Op, SelectionDAG &DAG) const;
812 SDValue LowerVectorStore(SDValue Op, SelectionDAG &DAG) const;
814 SDValue LowerCallResult(SDValue Chain, SDValue InFlag,
815 CallingConv::ID CallConv, bool isVarArg,
816 const SmallVectorImpl<ISD::InputArg> &Ins,
817 const SDLoc &dl, SelectionDAG &DAG,
818 SmallVectorImpl<SDValue> &InVals) const;
819 SDValue FinishCall(CallingConv::ID CallConv, const SDLoc &dl,
820 bool isTailCall, bool isVarArg, bool IsPatchPoint,
821 bool hasNest, SelectionDAG &DAG,
822 SmallVector<std::pair<unsigned, SDValue>, 8> &RegsToPass,
823 SDValue InFlag, SDValue Chain, SDValue CallSeqStart,
824 SDValue &Callee, int SPDiff, unsigned NumBytes,
825 const SmallVectorImpl<ISD::InputArg> &Ins,
826 SmallVectorImpl<SDValue> &InVals,
827 ImmutableCallSite *CS) const;
830 LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
831 const SmallVectorImpl<ISD::InputArg> &Ins,
832 const SDLoc &dl, SelectionDAG &DAG,
833 SmallVectorImpl<SDValue> &InVals) const override;
836 LowerCall(TargetLowering::CallLoweringInfo &CLI,
837 SmallVectorImpl<SDValue> &InVals) const override;
840 CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF,
842 const SmallVectorImpl<ISD::OutputArg> &Outs,
843 LLVMContext &Context) const override;
845 SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
846 const SmallVectorImpl<ISD::OutputArg> &Outs,
847 const SmallVectorImpl<SDValue> &OutVals,
848 const SDLoc &dl, SelectionDAG &DAG) const override;
850 SDValue extendArgForPPC64(ISD::ArgFlagsTy Flags, EVT ObjectVT,
851 SelectionDAG &DAG, SDValue ArgVal,
852 const SDLoc &dl) const;
854 SDValue LowerFormalArguments_Darwin(
855 SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
856 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
857 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const;
858 SDValue LowerFormalArguments_64SVR4(
859 SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
860 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
861 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const;
862 SDValue LowerFormalArguments_32SVR4(
863 SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
864 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
865 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const;
867 SDValue createMemcpyOutsideCallSeq(SDValue Arg, SDValue PtrOff,
868 SDValue CallSeqStart,
869 ISD::ArgFlagsTy Flags, SelectionDAG &DAG,
870 const SDLoc &dl) const;
872 SDValue LowerCall_Darwin(SDValue Chain, SDValue Callee,
873 CallingConv::ID CallConv, bool isVarArg,
874 bool isTailCall, bool IsPatchPoint,
875 const SmallVectorImpl<ISD::OutputArg> &Outs,
876 const SmallVectorImpl<SDValue> &OutVals,
877 const SmallVectorImpl<ISD::InputArg> &Ins,
878 const SDLoc &dl, SelectionDAG &DAG,
879 SmallVectorImpl<SDValue> &InVals,
880 ImmutableCallSite *CS) const;
881 SDValue LowerCall_64SVR4(SDValue Chain, SDValue Callee,
882 CallingConv::ID CallConv, bool isVarArg,
883 bool isTailCall, bool IsPatchPoint,
884 const SmallVectorImpl<ISD::OutputArg> &Outs,
885 const SmallVectorImpl<SDValue> &OutVals,
886 const SmallVectorImpl<ISD::InputArg> &Ins,
887 const SDLoc &dl, SelectionDAG &DAG,
888 SmallVectorImpl<SDValue> &InVals,
889 ImmutableCallSite *CS) const;
890 SDValue LowerCall_32SVR4(SDValue Chain, SDValue Callee,
891 CallingConv::ID CallConv, bool isVarArg,
892 bool isTailCall, bool IsPatchPoint,
893 const SmallVectorImpl<ISD::OutputArg> &Outs,
894 const SmallVectorImpl<SDValue> &OutVals,
895 const SmallVectorImpl<ISD::InputArg> &Ins,
896 const SDLoc &dl, SelectionDAG &DAG,
897 SmallVectorImpl<SDValue> &InVals,
898 ImmutableCallSite *CS) const;
900 SDValue lowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG) const;
901 SDValue lowerEH_SJLJ_LONGJMP(SDValue Op, SelectionDAG &DAG) const;
903 SDValue DAGCombineExtBoolTrunc(SDNode *N, DAGCombinerInfo &DCI) const;
904 SDValue DAGCombineBuildVector(SDNode *N, DAGCombinerInfo &DCI) const;
905 SDValue DAGCombineTruncBoolExt(SDNode *N, DAGCombinerInfo &DCI) const;
906 SDValue combineFPToIntToFP(SDNode *N, DAGCombinerInfo &DCI) const;
908 SDValue getRsqrtEstimate(SDValue Operand, DAGCombinerInfo &DCI,
909 unsigned &RefinementSteps,
910 bool &UseOneConstNR) const override;
911 SDValue getRecipEstimate(SDValue Operand, DAGCombinerInfo &DCI,
912 unsigned &RefinementSteps) const override;
913 unsigned combineRepeatedFPDivisors() const override;
915 CCAssignFn *useFastISelCCs(unsigned Flag) const;
919 FastISel *createFastISel(FunctionLoweringInfo &FuncInfo,
920 const TargetLibraryInfo *LibInfo);
923 bool CC_PPC32_SVR4_Custom_Dummy(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
924 CCValAssign::LocInfo &LocInfo,
925 ISD::ArgFlagsTy &ArgFlags,
928 bool CC_PPC32_SVR4_Custom_AlignArgRegs(unsigned &ValNo, MVT &ValVT,
930 CCValAssign::LocInfo &LocInfo,
931 ISD::ArgFlagsTy &ArgFlags,
934 bool CC_PPC32_SVR4_Custom_AlignFPArgRegs(unsigned &ValNo, MVT &ValVT,
936 CCValAssign::LocInfo &LocInfo,
937 ISD::ArgFlagsTy &ArgFlags,
941 #endif // LLVM_TARGET_POWERPC_PPC32ISELLOWERING_H