1 //===-- PPCISelLowering.h - PPC32 DAG Lowering Interface --------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines the interfaces that PPC uses to lower LLVM code into a
13 //===----------------------------------------------------------------------===//
15 #ifndef LLVM_LIB_TARGET_POWERPC_PPCISELLOWERING_H
16 #define LLVM_LIB_TARGET_POWERPC_PPCISELLOWERING_H
19 #include "PPCInstrInfo.h"
20 #include "llvm/CodeGen/CallingConvLower.h"
21 #include "llvm/CodeGen/MachineFunction.h"
22 #include "llvm/CodeGen/MachineMemOperand.h"
23 #include "llvm/CodeGen/SelectionDAG.h"
24 #include "llvm/CodeGen/SelectionDAGNodes.h"
25 #include "llvm/CodeGen/TargetLowering.h"
26 #include "llvm/CodeGen/ValueTypes.h"
27 #include "llvm/IR/Attributes.h"
28 #include "llvm/IR/CallingConv.h"
29 #include "llvm/IR/Function.h"
30 #include "llvm/IR/InlineAsm.h"
31 #include "llvm/IR/Metadata.h"
32 #include "llvm/IR/Type.h"
33 #include "llvm/Support/MachineValueType.h"
40 // When adding a NEW PPCISD node please add it to the correct position in
41 // the enum. The order of elements in this enum matters!
42 // Values that are added after this entry:
43 // STBRX = ISD::FIRST_TARGET_MEMORY_OPCODE
44 // are considerd memory opcodes and are treated differently than entries
45 // that come before it. For example, ADD or MUL should be placed before
46 // the ISD::FIRST_TARGET_MEMORY_OPCODE while a LOAD or STORE should come
48 enum NodeType : unsigned {
49 // Start the numbering where the builtin ops and target ops leave off.
50 FIRST_NUMBER = ISD::BUILTIN_OP_END,
52 /// FSEL - Traditional three-operand fsel node.
56 /// FCFID - The FCFID instruction, taking an f64 operand and producing
57 /// and f64 value containing the FP representation of the integer that
58 /// was temporarily in the f64 operand.
61 /// Newer FCFID[US] integer-to-floating-point conversion instructions for
62 /// unsigned integers and single-precision outputs.
63 FCFIDU, FCFIDS, FCFIDUS,
65 /// FCTI[D,W]Z - The FCTIDZ and FCTIWZ instructions, taking an f32 or f64
66 /// operand, producing an f64 value containing the integer representation
70 /// Newer FCTI[D,W]UZ floating-point-to-integer conversion instructions for
71 /// unsigned integers with round toward zero.
74 /// Floating-point-to-interger conversion instructions
75 FP_TO_UINT_IN_VSR, FP_TO_SINT_IN_VSR,
77 /// VEXTS, ByteWidth - takes an input in VSFRC and produces an output in
78 /// VSFRC that is sign-extended from ByteWidth to a 64-byte integer.
81 /// SExtVElems, takes an input vector of a smaller type and sign
82 /// extends to an output vector of a larger type.
85 /// Reciprocal estimate instructions (unary FP ops).
88 // VMADDFP, VNMSUBFP - The VMADDFP and VNMSUBFP instructions, taking
89 // three v4f32 operands and producing a v4f32 result.
92 /// VPERM - The PPC VPERM Instruction.
96 /// XXSPLT - The PPC VSX splat instructions
100 /// VECINSERT - The PPC vector insert instruction
104 /// XXREVERSE - The PPC VSX reverse instruction
108 /// VECSHL - The PPC vector shift left instruction
112 /// XXPERMDI - The PPC XXPERMDI instruction
116 /// The CMPB instruction (takes two operands of i32 or i64).
119 /// Hi/Lo - These represent the high and low 16-bit parts of a global
120 /// address respectively. These nodes have two operands, the first of
121 /// which must be a TargetGlobalAddress, and the second of which must be a
122 /// Constant. Selected naively, these turn into 'lis G+C' and 'li G+C',
123 /// though these are usually folded into other nodes.
126 /// The following two target-specific nodes are used for calls through
127 /// function pointers in the 64-bit SVR4 ABI.
129 /// OPRC, CHAIN = DYNALLOC(CHAIN, NEGSIZE, FRAME_INDEX)
130 /// This instruction is lowered in PPCRegisterInfo::eliminateFrameIndex to
131 /// compute an allocation on the stack.
134 /// This instruction is lowered in PPCRegisterInfo::eliminateFrameIndex to
135 /// compute an offset from native SP to the address of the most recent
139 /// GlobalBaseReg - On Darwin, this node represents the result of the mflr
140 /// at function entry, used for PIC code.
143 /// These nodes represent PPC shifts.
145 /// For scalar types, only the last `n + 1` bits of the shift amounts
146 /// are used, where n is log2(sizeof(element) * 8). See sld/slw, etc.
147 /// for exact behaviors.
149 /// For vector types, only the last n bits are used. See vsld.
152 /// EXTSWSLI = The PPC extswsli instruction, which does an extend-sign
153 /// word and shift left immediate.
156 /// The combination of sra[wd]i and addze used to implemented signed
157 /// integer division by a power of 2. The first operand is the dividend,
158 /// and the second is the constant shift amount (representing the
162 /// CALL - A direct function call.
163 /// CALL_NOP is a call with the special NOP which follows 64-bit
167 /// CHAIN,FLAG = MTCTR(VAL, CHAIN[, INFLAG]) - Directly corresponds to a
168 /// MTCTR instruction.
171 /// CHAIN,FLAG = BCTRL(CHAIN, INFLAG) - Directly corresponds to a
172 /// BCTRL instruction.
175 /// CHAIN,FLAG = BCTRL(CHAIN, ADDR, INFLAG) - The combination of a bctrl
176 /// instruction and the TOC reload required on SVR4 PPC64.
179 /// Return with a flag operand, matched by 'blr'
182 /// R32 = MFOCRF(CRREG, INFLAG) - Represents the MFOCRF instruction.
183 /// This copies the bits corresponding to the specified CRREG into the
184 /// resultant GPR. Bits corresponding to other CR regs are undefined.
187 /// Direct move from a VSX register to a GPR
190 /// Direct move from a GPR to a VSX register (algebraic)
193 /// Direct move from a GPR to a VSX register (zero)
196 /// Direct move of 2 consective GPR to a VSX register.
199 /// Extract a subvector from signed integer vector and convert to FP.
200 /// It is primarily used to convert a (widened) illegal integer vector
201 /// type to a legal floating point vector type.
202 /// For example v2i32 -> widened to v4i32 -> v2f64
205 /// Extract a subvector from unsigned integer vector and convert to FP.
206 /// As with SINT_VEC_TO_FP, used for converting illegal types.
209 // FIXME: Remove these once the ANDI glue bug is fixed:
210 /// i1 = ANDIo_1_[EQ|GT]_BIT(i32 or i64 x) - Represents the result of the
211 /// eq or gt bit of CR0 after executing andi. x, 1. This is used to
212 /// implement truncation of i32 or i64 to i1.
213 ANDIo_1_EQ_BIT, ANDIo_1_GT_BIT,
215 // READ_TIME_BASE - A read of the 64-bit time-base register on a 32-bit
216 // target (returns (Lo, Hi)). It takes a chain operand.
219 // EH_SJLJ_SETJMP - SjLj exception handling setjmp.
222 // EH_SJLJ_LONGJMP - SjLj exception handling longjmp.
225 /// RESVEC = VCMP(LHS, RHS, OPC) - Represents one of the altivec VCMP*
226 /// instructions. For lack of better number, we use the opcode number
227 /// encoding for the OPC field to identify the compare. For example, 838
231 /// RESVEC, OUTFLAG = VCMPo(LHS, RHS, OPC) - Represents one of the
232 /// altivec VCMP*o instructions. For lack of better number, we use the
233 /// opcode number encoding for the OPC field to identify the compare. For
234 /// example, 838 is VCMPGTSH.
237 /// CHAIN = COND_BRANCH CHAIN, CRRC, OPC, DESTBB [, INFLAG] - This
238 /// corresponds to the COND_BRANCH pseudo instruction. CRRC is the
239 /// condition register to branch on, OPC is the branch opcode to use (e.g.
240 /// PPC::BLE), DESTBB is the destination block to branch to, and INFLAG is
241 /// an optional input flag argument.
244 /// CHAIN = BDNZ CHAIN, DESTBB - These are used to create counter-based
248 /// F8RC = FADDRTZ F8RC, F8RC - This is an FADD done with rounding
249 /// towards zero. Used only as part of the long double-to-int
250 /// conversion sequence.
253 /// F8RC = MFFS - This moves the FPSCR (not modeled) into the register.
256 /// TC_RETURN - A tail call return.
258 /// operand #1 callee (register or absolute)
259 /// operand #2 stack adjustment
260 /// operand #3 optional in flag
263 /// ch, gl = CR6[UN]SET ch, inglue - Toggle CR bit 6 for SVR4 vararg calls
267 /// GPRC = address of _GLOBAL_OFFSET_TABLE_. Used by initial-exec TLS
271 /// GPRC = address of _GLOBAL_OFFSET_TABLE_. Used by general dynamic and
272 /// local dynamic TLS on PPC32.
275 /// G8RC = ADDIS_GOT_TPREL_HA %x2, Symbol - Used by the initial-exec
276 /// TLS model, produces an ADDIS8 instruction that adds the GOT
277 /// base to sym\@got\@tprel\@ha.
280 /// G8RC = LD_GOT_TPREL_L Symbol, G8RReg - Used by the initial-exec
281 /// TLS model, produces a LD instruction with base register G8RReg
282 /// and offset sym\@got\@tprel\@l. This completes the addition that
283 /// finds the offset of "sym" relative to the thread pointer.
286 /// G8RC = ADD_TLS G8RReg, Symbol - Used by the initial-exec TLS
287 /// model, produces an ADD instruction that adds the contents of
288 /// G8RReg to the thread pointer. Symbol contains a relocation
289 /// sym\@tls which is to be replaced by the thread pointer and
290 /// identifies to the linker that the instruction is part of a
294 /// G8RC = ADDIS_TLSGD_HA %x2, Symbol - For the general-dynamic TLS
295 /// model, produces an ADDIS8 instruction that adds the GOT base
296 /// register to sym\@got\@tlsgd\@ha.
299 /// %x3 = ADDI_TLSGD_L G8RReg, Symbol - For the general-dynamic TLS
300 /// model, produces an ADDI8 instruction that adds G8RReg to
301 /// sym\@got\@tlsgd\@l and stores the result in X3. Hidden by
302 /// ADDIS_TLSGD_L_ADDR until after register assignment.
305 /// %x3 = GET_TLS_ADDR %x3, Symbol - For the general-dynamic TLS
306 /// model, produces a call to __tls_get_addr(sym\@tlsgd). Hidden by
307 /// ADDIS_TLSGD_L_ADDR until after register assignment.
310 /// G8RC = ADDI_TLSGD_L_ADDR G8RReg, Symbol, Symbol - Op that
311 /// combines ADDI_TLSGD_L and GET_TLS_ADDR until expansion following
312 /// register assignment.
315 /// G8RC = ADDIS_TLSLD_HA %x2, Symbol - For the local-dynamic TLS
316 /// model, produces an ADDIS8 instruction that adds the GOT base
317 /// register to sym\@got\@tlsld\@ha.
320 /// %x3 = ADDI_TLSLD_L G8RReg, Symbol - For the local-dynamic TLS
321 /// model, produces an ADDI8 instruction that adds G8RReg to
322 /// sym\@got\@tlsld\@l and stores the result in X3. Hidden by
323 /// ADDIS_TLSLD_L_ADDR until after register assignment.
326 /// %x3 = GET_TLSLD_ADDR %x3, Symbol - For the local-dynamic TLS
327 /// model, produces a call to __tls_get_addr(sym\@tlsld). Hidden by
328 /// ADDIS_TLSLD_L_ADDR until after register assignment.
331 /// G8RC = ADDI_TLSLD_L_ADDR G8RReg, Symbol, Symbol - Op that
332 /// combines ADDI_TLSLD_L and GET_TLSLD_ADDR until expansion
333 /// following register assignment.
336 /// G8RC = ADDIS_DTPREL_HA %x3, Symbol - For the local-dynamic TLS
337 /// model, produces an ADDIS8 instruction that adds X3 to
341 /// G8RC = ADDI_DTPREL_L G8RReg, Symbol - For the local-dynamic TLS
342 /// model, produces an ADDI8 instruction that adds G8RReg to
343 /// sym\@got\@dtprel\@l.
346 /// VRRC = VADD_SPLAT Elt, EltSize - Temporary node to be expanded
347 /// during instruction selection to optimize a BUILD_VECTOR into
348 /// operations on splats. This is necessary to avoid losing these
349 /// optimizations due to constant folding.
352 /// CHAIN = SC CHAIN, Imm128 - System call. The 7-bit unsigned
353 /// operand identifies the operating system entry point.
356 /// CHAIN = CLRBHRB CHAIN - Clear branch history rolling buffer.
359 /// GPRC, CHAIN = MFBHRBE CHAIN, Entry, Dummy - Move from branch
360 /// history rolling buffer entry.
363 /// CHAIN = RFEBB CHAIN, State - Return from event-based branch.
366 /// VSRC, CHAIN = XXSWAPD CHAIN, VSRC - Occurs only for little
367 /// endian. Maps to an xxswapd instruction that corrects an lxvd2x
368 /// or stxvd2x instruction. The chain is necessary because the
369 /// sequence replaces a load and needs to provide the same number
373 /// An SDNode for swaps that are not associated with any loads/stores
374 /// and thereby have no chain.
377 /// An SDNode for Power9 vector absolute value difference.
378 /// operand #0 vector
379 /// operand #1 vector
380 /// operand #2 constant i32 0 or 1, to indicate whether needs to patch
381 /// the most significant bit for signed i32
383 /// Power9 VABSD* instructions are designed to support unsigned integer
384 /// vectors (byte/halfword/word), if we want to make use of them for signed
385 /// integer vectors, we have to flip their sign bits first. To flip sign bit
386 /// for byte/halfword integer vector would become inefficient, but for word
387 /// integer vector, we can leverage XVNEGSP to make it efficiently. eg:
388 /// abs(sub(a,b)) => VABSDUW(a+0x80000000, b+0x80000000)
389 /// => VABSDUW((XVNEGSP a), (XVNEGSP b))
392 /// QVFPERM = This corresponds to the QPX qvfperm instruction.
395 /// QVGPCI = This corresponds to the QPX qvgpci instruction.
398 /// QVALIGNI = This corresponds to the QPX qvaligni instruction.
401 /// QVESPLATI = This corresponds to the QPX qvesplati instruction.
404 /// QBFLT = Access the underlying QPX floating-point boolean
408 /// CHAIN = STBRX CHAIN, GPRC, Ptr, Type - This is a
409 /// byte-swapping store instruction. It byte-swaps the low "Type" bits of
410 /// the GPRC input, then stores it through Ptr. Type can be either i16 or
412 STBRX = ISD::FIRST_TARGET_MEMORY_OPCODE,
414 /// GPRC, CHAIN = LBRX CHAIN, Ptr, Type - This is a
415 /// byte-swapping load instruction. It loads "Type" bits, byte swaps it,
416 /// then puts it in the bottom bits of the GPRC. TYPE can be either i16
420 /// STFIWX - The STFIWX instruction. The first operand is an input token
421 /// chain, then an f64 value to store, then an address to store it to.
424 /// GPRC, CHAIN = LFIWAX CHAIN, Ptr - This is a floating-point
425 /// load which sign-extends from a 32-bit integer value into the
426 /// destination 64-bit register.
429 /// GPRC, CHAIN = LFIWZX CHAIN, Ptr - This is a floating-point
430 /// load which zero-extends from a 32-bit integer value into the
431 /// destination 64-bit register.
434 /// GPRC, CHAIN = LXSIZX, CHAIN, Ptr, ByteWidth - This is a load of an
435 /// integer smaller than 64 bits into a VSR. The integer is zero-extended.
436 /// This can be used for converting loaded integers to floating point.
439 /// STXSIX - The STXSI[bh]X instruction. The first operand is an input
440 /// chain, then an f64 value to store, then an address to store it to,
441 /// followed by a byte-width for the store.
444 /// VSRC, CHAIN = LXVD2X_LE CHAIN, Ptr - Occurs only for little endian.
445 /// Maps directly to an lxvd2x instruction that will be followed by
449 /// CHAIN = STXVD2X CHAIN, VSRC, Ptr - Occurs only for little endian.
450 /// Maps directly to an stxvd2x instruction that will be preceded by
454 /// Store scalar integers from VSR.
457 /// QBRC, CHAIN = QVLFSb CHAIN, Ptr
458 /// The 4xf32 load used for v4i1 constants.
461 /// ATOMIC_CMP_SWAP - the exact same as the target-independent nodes
462 /// except they ensure that the compare input is zero-extended for
463 /// sub-word versions because the atomic loads zero-extend.
464 ATOMIC_CMP_SWAP_8, ATOMIC_CMP_SWAP_16,
466 /// GPRC = TOC_ENTRY GA, TOC
467 /// Loads the entry for GA from the TOC, where the TOC base is given by
468 /// the last operand.
472 } // end namespace PPCISD
474 /// Define some predicates that are used for node matching.
477 /// isVPKUHUMShuffleMask - Return true if this is the shuffle mask for a
478 /// VPKUHUM instruction.
479 bool isVPKUHUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind,
482 /// isVPKUWUMShuffleMask - Return true if this is the shuffle mask for a
483 /// VPKUWUM instruction.
484 bool isVPKUWUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind,
487 /// isVPKUDUMShuffleMask - Return true if this is the shuffle mask for a
488 /// VPKUDUM instruction.
489 bool isVPKUDUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind,
492 /// isVMRGLShuffleMask - Return true if this is a shuffle mask suitable for
493 /// a VRGL* instruction with the specified unit size (1,2 or 4 bytes).
494 bool isVMRGLShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize,
495 unsigned ShuffleKind, SelectionDAG &DAG);
497 /// isVMRGHShuffleMask - Return true if this is a shuffle mask suitable for
498 /// a VRGH* instruction with the specified unit size (1,2 or 4 bytes).
499 bool isVMRGHShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize,
500 unsigned ShuffleKind, SelectionDAG &DAG);
502 /// isVMRGEOShuffleMask - Return true if this is a shuffle mask suitable for
503 /// a VMRGEW or VMRGOW instruction
504 bool isVMRGEOShuffleMask(ShuffleVectorSDNode *N, bool CheckEven,
505 unsigned ShuffleKind, SelectionDAG &DAG);
506 /// isXXSLDWIShuffleMask - Return true if this is a shuffle mask suitable
507 /// for a XXSLDWI instruction.
508 bool isXXSLDWIShuffleMask(ShuffleVectorSDNode *N, unsigned &ShiftElts,
509 bool &Swap, bool IsLE);
511 /// isXXBRHShuffleMask - Return true if this is a shuffle mask suitable
512 /// for a XXBRH instruction.
513 bool isXXBRHShuffleMask(ShuffleVectorSDNode *N);
515 /// isXXBRWShuffleMask - Return true if this is a shuffle mask suitable
516 /// for a XXBRW instruction.
517 bool isXXBRWShuffleMask(ShuffleVectorSDNode *N);
519 /// isXXBRDShuffleMask - Return true if this is a shuffle mask suitable
520 /// for a XXBRD instruction.
521 bool isXXBRDShuffleMask(ShuffleVectorSDNode *N);
523 /// isXXBRQShuffleMask - Return true if this is a shuffle mask suitable
524 /// for a XXBRQ instruction.
525 bool isXXBRQShuffleMask(ShuffleVectorSDNode *N);
527 /// isXXPERMDIShuffleMask - Return true if this is a shuffle mask suitable
528 /// for a XXPERMDI instruction.
529 bool isXXPERMDIShuffleMask(ShuffleVectorSDNode *N, unsigned &ShiftElts,
530 bool &Swap, bool IsLE);
532 /// isVSLDOIShuffleMask - If this is a vsldoi shuffle mask, return the
533 /// shift amount, otherwise return -1.
534 int isVSLDOIShuffleMask(SDNode *N, unsigned ShuffleKind,
537 /// isSplatShuffleMask - Return true if the specified VECTOR_SHUFFLE operand
538 /// specifies a splat of a single element that is suitable for input to
539 /// VSPLTB/VSPLTH/VSPLTW.
540 bool isSplatShuffleMask(ShuffleVectorSDNode *N, unsigned EltSize);
542 /// isXXINSERTWMask - Return true if this VECTOR_SHUFFLE can be handled by
543 /// the XXINSERTW instruction introduced in ISA 3.0. This is essentially any
544 /// shuffle of v4f32/v4i32 vectors that just inserts one element from one
545 /// vector into the other. This function will also set a couple of
546 /// output parameters for how much the source vector needs to be shifted and
547 /// what byte number needs to be specified for the instruction to put the
548 /// element in the desired location of the target vector.
549 bool isXXINSERTWMask(ShuffleVectorSDNode *N, unsigned &ShiftElts,
550 unsigned &InsertAtByte, bool &Swap, bool IsLE);
552 /// getVSPLTImmediate - Return the appropriate VSPLT* immediate to splat the
553 /// specified isSplatShuffleMask VECTOR_SHUFFLE mask.
554 unsigned getVSPLTImmediate(SDNode *N, unsigned EltSize, SelectionDAG &DAG);
556 /// get_VSPLTI_elt - If this is a build_vector of constants which can be
557 /// formed by using a vspltis[bhw] instruction of the specified element
558 /// size, return the constant being splatted. The ByteSize field indicates
559 /// the number of bytes of each element [124] -> [bhw].
560 SDValue get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG);
562 /// If this is a qvaligni shuffle mask, return the shift
563 /// amount, otherwise return -1.
564 int isQVALIGNIShuffleMask(SDNode *N);
566 } // end namespace PPC
568 class PPCTargetLowering : public TargetLowering {
569 const PPCSubtarget &Subtarget;
572 explicit PPCTargetLowering(const PPCTargetMachine &TM,
573 const PPCSubtarget &STI);
575 /// getTargetNodeName() - This method returns the name of a target specific
577 const char *getTargetNodeName(unsigned Opcode) const override;
579 /// getPreferredVectorAction - The code we generate when vector types are
580 /// legalized by promoting the integer element type is often much worse
581 /// than code we generate if we widen the type for applicable vector types.
582 /// The issue with promoting is that the vector is scalaraized, individual
583 /// elements promoted and then the vector is rebuilt. So say we load a pair
584 /// of v4i8's and shuffle them. This will turn into a mess of 8 extending
585 /// loads, moves back into VSR's (or memory ops if we don't have moves) and
586 /// then the VPERM for the shuffle. All in all a very slow sequence.
587 TargetLoweringBase::LegalizeTypeAction getPreferredVectorAction(MVT VT)
589 if (VT.getScalarSizeInBits() % 8 == 0)
590 return TypeWidenVector;
591 return TargetLoweringBase::getPreferredVectorAction(VT);
594 bool useSoftFloat() const override;
598 MVT getScalarShiftAmountTy(const DataLayout &, EVT) const override {
602 bool isCheapToSpeculateCttz() const override {
606 bool isCheapToSpeculateCtlz() const override {
610 bool isCtlzFast() const override {
614 bool hasAndNotCompare(SDValue) const override {
618 bool convertSetCCLogicToBitwiseLogic(EVT VT) const override {
619 return VT.isScalarInteger();
622 bool supportSplitCSR(MachineFunction *MF) const override {
624 MF->getFunction().getCallingConv() == CallingConv::CXX_FAST_TLS &&
625 MF->getFunction().hasFnAttribute(Attribute::NoUnwind);
628 void initializeSplitCSR(MachineBasicBlock *Entry) const override;
630 void insertCopiesSplitCSR(
631 MachineBasicBlock *Entry,
632 const SmallVectorImpl<MachineBasicBlock *> &Exits) const override;
634 /// getSetCCResultType - Return the ISD::SETCC ValueType
635 EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context,
636 EVT VT) const override;
638 /// Return true if target always beneficiates from combining into FMA for a
639 /// given value type. This must typically return false on targets where FMA
640 /// takes more cycles to execute than FADD.
641 bool enableAggressiveFMAFusion(EVT VT) const override;
643 /// getPreIndexedAddressParts - returns true by value, base pointer and
644 /// offset pointer and addressing mode by reference if the node's address
645 /// can be legally represented as pre-indexed load / store address.
646 bool getPreIndexedAddressParts(SDNode *N, SDValue &Base,
648 ISD::MemIndexedMode &AM,
649 SelectionDAG &DAG) const override;
651 /// SelectAddressRegReg - Given the specified addressed, check to see if it
652 /// can be represented as an indexed [r+r] operation. Returns false if it
653 /// can be more efficiently represented with [r+imm].
654 bool SelectAddressRegReg(SDValue N, SDValue &Base, SDValue &Index,
655 SelectionDAG &DAG) const;
657 /// SelectAddressRegImm - Returns true if the address N can be represented
658 /// by a base register plus a signed 16-bit displacement [r+imm], and if it
659 /// is not better represented as reg+reg. If Aligned is true, only accept
660 /// displacements suitable for STD and friends, i.e. multiples of 4.
661 bool SelectAddressRegImm(SDValue N, SDValue &Disp, SDValue &Base,
662 SelectionDAG &DAG, unsigned Alignment) const;
664 /// SelectAddressRegRegOnly - Given the specified addressed, force it to be
665 /// represented as an indexed [r+r] operation.
666 bool SelectAddressRegRegOnly(SDValue N, SDValue &Base, SDValue &Index,
667 SelectionDAG &DAG) const;
669 Sched::Preference getSchedulingPreference(SDNode *N) const override;
671 /// LowerOperation - Provide custom lowering hooks for some operations.
673 SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;
675 /// ReplaceNodeResults - Replace the results of node with an illegal result
676 /// type with new values built out of custom code.
678 void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue>&Results,
679 SelectionDAG &DAG) const override;
681 SDValue expandVSXLoadForLE(SDNode *N, DAGCombinerInfo &DCI) const;
682 SDValue expandVSXStoreForLE(SDNode *N, DAGCombinerInfo &DCI) const;
684 SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override;
686 SDValue BuildSDIVPow2(SDNode *N, const APInt &Divisor, SelectionDAG &DAG,
687 SmallVectorImpl<SDNode *> &Created) const override;
689 unsigned getRegisterByName(const char* RegName, EVT VT,
690 SelectionDAG &DAG) const override;
692 void computeKnownBitsForTargetNode(const SDValue Op,
694 const APInt &DemandedElts,
695 const SelectionDAG &DAG,
696 unsigned Depth = 0) const override;
698 unsigned getPrefLoopAlignment(MachineLoop *ML) const override;
700 bool shouldInsertFencesForAtomic(const Instruction *I) const override {
704 Instruction *emitLeadingFence(IRBuilder<> &Builder, Instruction *Inst,
705 AtomicOrdering Ord) const override;
706 Instruction *emitTrailingFence(IRBuilder<> &Builder, Instruction *Inst,
707 AtomicOrdering Ord) const override;
710 EmitInstrWithCustomInserter(MachineInstr &MI,
711 MachineBasicBlock *MBB) const override;
712 MachineBasicBlock *EmitAtomicBinary(MachineInstr &MI,
713 MachineBasicBlock *MBB,
716 unsigned CmpOpcode = 0,
717 unsigned CmpPred = 0) const;
718 MachineBasicBlock *EmitPartwordAtomicBinary(MachineInstr &MI,
719 MachineBasicBlock *MBB,
722 unsigned CmpOpcode = 0,
723 unsigned CmpPred = 0) const;
725 MachineBasicBlock *emitEHSjLjSetJmp(MachineInstr &MI,
726 MachineBasicBlock *MBB) const;
728 MachineBasicBlock *emitEHSjLjLongJmp(MachineInstr &MI,
729 MachineBasicBlock *MBB) const;
731 ConstraintType getConstraintType(StringRef Constraint) const override;
733 /// Examine constraint string and operand type and determine a weight value.
734 /// The operand object must already have been set up with the operand type.
735 ConstraintWeight getSingleConstraintMatchWeight(
736 AsmOperandInfo &info, const char *constraint) const override;
738 std::pair<unsigned, const TargetRegisterClass *>
739 getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
740 StringRef Constraint, MVT VT) const override;
742 /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate
743 /// function arguments in the caller parameter area. This is the actual
744 /// alignment, not its logarithm.
745 unsigned getByValTypeAlignment(Type *Ty,
746 const DataLayout &DL) const override;
748 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
749 /// vector. If it is invalid, don't add anything to Ops.
750 void LowerAsmOperandForConstraint(SDValue Op,
751 std::string &Constraint,
752 std::vector<SDValue> &Ops,
753 SelectionDAG &DAG) const override;
756 getInlineAsmMemConstraint(StringRef ConstraintCode) const override {
757 if (ConstraintCode == "es")
758 return InlineAsm::Constraint_es;
759 else if (ConstraintCode == "o")
760 return InlineAsm::Constraint_o;
761 else if (ConstraintCode == "Q")
762 return InlineAsm::Constraint_Q;
763 else if (ConstraintCode == "Z")
764 return InlineAsm::Constraint_Z;
765 else if (ConstraintCode == "Zy")
766 return InlineAsm::Constraint_Zy;
767 return TargetLowering::getInlineAsmMemConstraint(ConstraintCode);
770 /// isLegalAddressingMode - Return true if the addressing mode represented
771 /// by AM is legal for this target, for a load/store of the specified type.
772 bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM,
773 Type *Ty, unsigned AS,
774 Instruction *I = nullptr) const override;
776 /// isLegalICmpImmediate - Return true if the specified immediate is legal
777 /// icmp immediate, that is the target has icmp instructions which can
778 /// compare a register against the immediate without having to materialize
779 /// the immediate into a register.
780 bool isLegalICmpImmediate(int64_t Imm) const override;
782 /// isLegalAddImmediate - Return true if the specified immediate is legal
783 /// add immediate, that is the target has add instructions which can
784 /// add a register and the immediate without having to materialize
785 /// the immediate into a register.
786 bool isLegalAddImmediate(int64_t Imm) const override;
788 /// isTruncateFree - Return true if it's free to truncate a value of
789 /// type Ty1 to type Ty2. e.g. On PPC it's free to truncate a i64 value in
790 /// register X1 to i32 by referencing its sub-register R1.
791 bool isTruncateFree(Type *Ty1, Type *Ty2) const override;
792 bool isTruncateFree(EVT VT1, EVT VT2) const override;
794 bool isZExtFree(SDValue Val, EVT VT2) const override;
796 bool isFPExtFree(EVT DestVT, EVT SrcVT) const override;
798 /// Returns true if it is beneficial to convert a load of a constant
799 /// to just the constant itself.
800 bool shouldConvertConstantLoadToIntImm(const APInt &Imm,
801 Type *Ty) const override;
803 bool convertSelectOfConstantsToMath(EVT VT) const override {
807 // Returns true if the address of the global is stored in TOC entry.
808 bool isAccessedAsGotIndirect(SDValue N) const;
810 bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override;
812 bool getTgtMemIntrinsic(IntrinsicInfo &Info,
815 unsigned Intrinsic) const override;
817 /// getOptimalMemOpType - Returns the target specific optimal type for load
818 /// and store operations as a result of memset, memcpy, and memmove
819 /// lowering. If DstAlign is zero that means it's safe to destination
820 /// alignment can satisfy any constraint. Similarly if SrcAlign is zero it
821 /// means there isn't a need to check it against alignment requirement,
822 /// probably because the source does not need to be loaded. If 'IsMemset' is
823 /// true, that means it's expanding a memset. If 'ZeroMemset' is true, that
824 /// means it's a memset of zero. 'MemcpyStrSrc' indicates whether the memcpy
825 /// source is constant so it does not need to be loaded.
826 /// It returns EVT::Other if the type should be determined using generic
827 /// target-independent logic.
829 getOptimalMemOpType(uint64_t Size, unsigned DstAlign, unsigned SrcAlign,
830 bool IsMemset, bool ZeroMemset, bool MemcpyStrSrc,
831 MachineFunction &MF) const override;
833 /// Is unaligned memory access allowed for the given type, and is it fast
834 /// relative to software emulation.
835 bool allowsMisalignedMemoryAccesses(EVT VT,
838 bool *Fast = nullptr) const override;
840 /// isFMAFasterThanFMulAndFAdd - Return true if an FMA operation is faster
841 /// than a pair of fmul and fadd instructions. fmuladd intrinsics will be
842 /// expanded to FMAs when this method returns true, otherwise fmuladd is
843 /// expanded to fmul + fadd.
844 bool isFMAFasterThanFMulAndFAdd(EVT VT) const override;
846 const MCPhysReg *getScratchRegisters(CallingConv::ID CC) const override;
848 // Should we expand the build vector with shuffles?
850 shouldExpandBuildVectorWithShuffles(EVT VT,
851 unsigned DefinedValues) const override;
853 /// createFastISel - This method returns a target-specific FastISel object,
854 /// or null if the target does not support "fast" instruction selection.
855 FastISel *createFastISel(FunctionLoweringInfo &FuncInfo,
856 const TargetLibraryInfo *LibInfo) const override;
858 /// Returns true if an argument of type Ty needs to be passed in a
859 /// contiguous block of registers in calling convention CallConv.
860 bool functionArgumentNeedsConsecutiveRegisters(
861 Type *Ty, CallingConv::ID CallConv, bool isVarArg) const override {
862 // We support any array type as "consecutive" block in the parameter
863 // save area. The element type defines the alignment requirement and
864 // whether the argument should go in GPRs, FPRs, or VRs if available.
866 // Note that clang uses this capability both to implement the ELFv2
867 // homogeneous float/vector aggregate ABI, and to avoid having to use
868 // "byval" when passing aggregates that might fully fit in registers.
869 return Ty->isArrayTy();
872 /// If a physical register, this returns the register that receives the
873 /// exception address on entry to an EH pad.
875 getExceptionPointerRegister(const Constant *PersonalityFn) const override;
877 /// If a physical register, this returns the register that receives the
878 /// exception typeid on entry to a landing pad.
880 getExceptionSelectorRegister(const Constant *PersonalityFn) const override;
882 /// Override to support customized stack guard loading.
883 bool useLoadStackGuardNode() const override;
884 void insertSSPDeclarations(Module &M) const override;
886 bool isFPImmLegal(const APFloat &Imm, EVT VT) const override;
888 unsigned getJumpTableEncoding() const override;
889 bool isJumpTableRelative() const override;
890 SDValue getPICJumpTableRelocBase(SDValue Table,
891 SelectionDAG &DAG) const override;
892 const MCExpr *getPICJumpTableRelocBaseExpr(const MachineFunction *MF,
894 MCContext &Ctx) const override;
896 unsigned getNumRegistersForCallingConv(LLVMContext &Context,
898 EVT VT) const override;
900 MVT getRegisterTypeForCallingConv(LLVMContext &Context,
902 EVT VT) const override;
905 struct ReuseLoadInfo {
909 MachinePointerInfo MPI;
910 bool IsDereferenceable = false;
911 bool IsInvariant = false;
912 unsigned Alignment = 0;
914 const MDNode *Ranges = nullptr;
916 ReuseLoadInfo() = default;
918 MachineMemOperand::Flags MMOFlags() const {
919 MachineMemOperand::Flags F = MachineMemOperand::MONone;
920 if (IsDereferenceable)
921 F |= MachineMemOperand::MODereferenceable;
923 F |= MachineMemOperand::MOInvariant;
928 bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const override {
929 // Addrspacecasts are always noops.
933 bool canReuseLoadAddress(SDValue Op, EVT MemVT, ReuseLoadInfo &RLI,
935 ISD::LoadExtType ET = ISD::NON_EXTLOAD) const;
936 void spliceIntoChain(SDValue ResChain, SDValue NewResChain,
937 SelectionDAG &DAG) const;
939 void LowerFP_TO_INTForReuse(SDValue Op, ReuseLoadInfo &RLI,
940 SelectionDAG &DAG, const SDLoc &dl) const;
941 SDValue LowerFP_TO_INTDirectMove(SDValue Op, SelectionDAG &DAG,
942 const SDLoc &dl) const;
944 bool directMoveIsProfitable(const SDValue &Op) const;
945 SDValue LowerINT_TO_FPDirectMove(SDValue Op, SelectionDAG &DAG,
946 const SDLoc &dl) const;
948 SDValue LowerINT_TO_FPVector(SDValue Op, SelectionDAG &DAG,
949 const SDLoc &dl) const;
951 SDValue getFramePointerFrameIndex(SelectionDAG & DAG) const;
952 SDValue getReturnAddrFrameIndex(SelectionDAG & DAG) const;
955 IsEligibleForTailCallOptimization(SDValue Callee,
956 CallingConv::ID CalleeCC,
958 const SmallVectorImpl<ISD::InputArg> &Ins,
959 SelectionDAG& DAG) const;
962 IsEligibleForTailCallOptimization_64SVR4(
964 CallingConv::ID CalleeCC,
965 ImmutableCallSite CS,
967 const SmallVectorImpl<ISD::OutputArg> &Outs,
968 const SmallVectorImpl<ISD::InputArg> &Ins,
969 SelectionDAG& DAG) const;
971 SDValue EmitTailCallLoadFPAndRetAddr(SelectionDAG &DAG, int SPDiff,
972 SDValue Chain, SDValue &LROpOut,
974 const SDLoc &dl) const;
976 SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
977 SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const;
978 SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const;
979 SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const;
980 SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
981 SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const;
982 SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG) const;
983 SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG) const;
984 SDValue LowerINIT_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const;
985 SDValue LowerADJUST_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const;
986 SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) const;
987 SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG) const;
988 SDValue LowerVACOPY(SDValue Op, SelectionDAG &DAG) const;
989 SDValue LowerSTACKRESTORE(SDValue Op, SelectionDAG &DAG) const;
990 SDValue LowerGET_DYNAMIC_AREA_OFFSET(SDValue Op, SelectionDAG &DAG) const;
991 SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const;
992 SDValue LowerEH_DWARF_CFA(SDValue Op, SelectionDAG &DAG) const;
993 SDValue LowerLOAD(SDValue Op, SelectionDAG &DAG) const;
994 SDValue LowerSTORE(SDValue Op, SelectionDAG &DAG) const;
995 SDValue LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const;
996 SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const;
997 SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG,
998 const SDLoc &dl) const;
999 SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) const;
1000 SDValue LowerFLT_ROUNDS_(SDValue Op, SelectionDAG &DAG) const;
1001 SDValue LowerSHL_PARTS(SDValue Op, SelectionDAG &DAG) const;
1002 SDValue LowerSRL_PARTS(SDValue Op, SelectionDAG &DAG) const;
1003 SDValue LowerSRA_PARTS(SDValue Op, SelectionDAG &DAG) const;
1004 SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const;
1005 SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const;
1006 SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
1007 SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
1008 SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const;
1009 SDValue LowerINTRINSIC_VOID(SDValue Op, SelectionDAG &DAG) const;
1010 SDValue LowerREM(SDValue Op, SelectionDAG &DAG) const;
1011 SDValue LowerBSWAP(SDValue Op, SelectionDAG &DAG) const;
1012 SDValue LowerATOMIC_CMP_SWAP(SDValue Op, SelectionDAG &DAG) const;
1013 SDValue LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) const;
1014 SDValue LowerSIGN_EXTEND_INREG(SDValue Op, SelectionDAG &DAG) const;
1015 SDValue LowerMUL(SDValue Op, SelectionDAG &DAG) const;
1016 SDValue LowerABS(SDValue Op, SelectionDAG &DAG) const;
1018 SDValue LowerVectorLoad(SDValue Op, SelectionDAG &DAG) const;
1019 SDValue LowerVectorStore(SDValue Op, SelectionDAG &DAG) const;
1021 SDValue LowerCallResult(SDValue Chain, SDValue InFlag,
1022 CallingConv::ID CallConv, bool isVarArg,
1023 const SmallVectorImpl<ISD::InputArg> &Ins,
1024 const SDLoc &dl, SelectionDAG &DAG,
1025 SmallVectorImpl<SDValue> &InVals) const;
1026 SDValue FinishCall(CallingConv::ID CallConv, const SDLoc &dl,
1027 bool isTailCall, bool isVarArg, bool isPatchPoint,
1028 bool hasNest, SelectionDAG &DAG,
1029 SmallVector<std::pair<unsigned, SDValue>, 8> &RegsToPass,
1030 SDValue InFlag, SDValue Chain, SDValue CallSeqStart,
1031 SDValue &Callee, int SPDiff, unsigned NumBytes,
1032 const SmallVectorImpl<ISD::InputArg> &Ins,
1033 SmallVectorImpl<SDValue> &InVals,
1034 ImmutableCallSite CS) const;
1037 LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
1038 const SmallVectorImpl<ISD::InputArg> &Ins,
1039 const SDLoc &dl, SelectionDAG &DAG,
1040 SmallVectorImpl<SDValue> &InVals) const override;
1042 SDValue LowerCall(TargetLowering::CallLoweringInfo &CLI,
1043 SmallVectorImpl<SDValue> &InVals) const override;
1045 bool CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF,
1047 const SmallVectorImpl<ISD::OutputArg> &Outs,
1048 LLVMContext &Context) const override;
1050 SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
1051 const SmallVectorImpl<ISD::OutputArg> &Outs,
1052 const SmallVectorImpl<SDValue> &OutVals,
1053 const SDLoc &dl, SelectionDAG &DAG) const override;
1055 SDValue extendArgForPPC64(ISD::ArgFlagsTy Flags, EVT ObjectVT,
1056 SelectionDAG &DAG, SDValue ArgVal,
1057 const SDLoc &dl) const;
1059 SDValue LowerFormalArguments_Darwin(
1060 SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
1061 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
1062 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const;
1063 SDValue LowerFormalArguments_64SVR4(
1064 SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
1065 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
1066 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const;
1067 SDValue LowerFormalArguments_32SVR4(
1068 SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
1069 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
1070 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const;
1072 SDValue createMemcpyOutsideCallSeq(SDValue Arg, SDValue PtrOff,
1073 SDValue CallSeqStart,
1074 ISD::ArgFlagsTy Flags, SelectionDAG &DAG,
1075 const SDLoc &dl) const;
1077 SDValue LowerCall_Darwin(SDValue Chain, SDValue Callee,
1078 CallingConv::ID CallConv, bool isVarArg,
1079 bool isTailCall, bool isPatchPoint,
1080 const SmallVectorImpl<ISD::OutputArg> &Outs,
1081 const SmallVectorImpl<SDValue> &OutVals,
1082 const SmallVectorImpl<ISD::InputArg> &Ins,
1083 const SDLoc &dl, SelectionDAG &DAG,
1084 SmallVectorImpl<SDValue> &InVals,
1085 ImmutableCallSite CS) const;
1086 SDValue LowerCall_64SVR4(SDValue Chain, SDValue Callee,
1087 CallingConv::ID CallConv, bool isVarArg,
1088 bool isTailCall, bool isPatchPoint,
1089 const SmallVectorImpl<ISD::OutputArg> &Outs,
1090 const SmallVectorImpl<SDValue> &OutVals,
1091 const SmallVectorImpl<ISD::InputArg> &Ins,
1092 const SDLoc &dl, SelectionDAG &DAG,
1093 SmallVectorImpl<SDValue> &InVals,
1094 ImmutableCallSite CS) const;
1095 SDValue LowerCall_32SVR4(SDValue Chain, SDValue Callee,
1096 CallingConv::ID CallConv, bool isVarArg,
1097 bool isTailCall, bool isPatchPoint,
1098 const SmallVectorImpl<ISD::OutputArg> &Outs,
1099 const SmallVectorImpl<SDValue> &OutVals,
1100 const SmallVectorImpl<ISD::InputArg> &Ins,
1101 const SDLoc &dl, SelectionDAG &DAG,
1102 SmallVectorImpl<SDValue> &InVals,
1103 ImmutableCallSite CS) const;
1105 SDValue lowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG) const;
1106 SDValue lowerEH_SJLJ_LONGJMP(SDValue Op, SelectionDAG &DAG) const;
1107 SDValue LowerBITCAST(SDValue Op, SelectionDAG &DAG) const;
1109 SDValue DAGCombineExtBoolTrunc(SDNode *N, DAGCombinerInfo &DCI) const;
1110 SDValue DAGCombineBuildVector(SDNode *N, DAGCombinerInfo &DCI) const;
1111 SDValue DAGCombineTruncBoolExt(SDNode *N, DAGCombinerInfo &DCI) const;
1112 SDValue combineStoreFPToInt(SDNode *N, DAGCombinerInfo &DCI) const;
1113 SDValue combineFPToIntToFP(SDNode *N, DAGCombinerInfo &DCI) const;
1114 SDValue combineSHL(SDNode *N, DAGCombinerInfo &DCI) const;
1115 SDValue combineSRA(SDNode *N, DAGCombinerInfo &DCI) const;
1116 SDValue combineSRL(SDNode *N, DAGCombinerInfo &DCI) const;
1117 SDValue combineADD(SDNode *N, DAGCombinerInfo &DCI) const;
1118 SDValue combineTRUNCATE(SDNode *N, DAGCombinerInfo &DCI) const;
1119 SDValue combineSetCC(SDNode *N, DAGCombinerInfo &DCI) const;
1120 SDValue combineABS(SDNode *N, DAGCombinerInfo &DCI) const;
1121 SDValue combineVSelect(SDNode *N, DAGCombinerInfo &DCI) const;
1123 /// ConvertSETCCToSubtract - looks at SETCC that compares ints. It replaces
1124 /// SETCC with integer subtraction when (1) there is a legal way of doing it
1125 /// (2) keeping the result of comparison in GPR has performance benefit.
1126 SDValue ConvertSETCCToSubtract(SDNode *N, DAGCombinerInfo &DCI) const;
1128 SDValue getSqrtEstimate(SDValue Operand, SelectionDAG &DAG, int Enabled,
1129 int &RefinementSteps, bool &UseOneConstNR,
1130 bool Reciprocal) const override;
1131 SDValue getRecipEstimate(SDValue Operand, SelectionDAG &DAG, int Enabled,
1132 int &RefinementSteps) const override;
1133 unsigned combineRepeatedFPDivisors() const override;
1135 CCAssignFn *useFastISelCCs(unsigned Flag) const;
1138 combineElementTruncationToVectorTruncation(SDNode *N,
1139 DAGCombinerInfo &DCI) const;
1141 /// lowerToVINSERTH - Return the SDValue if this VECTOR_SHUFFLE can be
1142 /// handled by the VINSERTH instruction introduced in ISA 3.0. This is
1143 /// essentially any shuffle of v8i16 vectors that just inserts one element
1144 /// from one vector into the other.
1145 SDValue lowerToVINSERTH(ShuffleVectorSDNode *N, SelectionDAG &DAG) const;
1147 /// lowerToVINSERTB - Return the SDValue if this VECTOR_SHUFFLE can be
1148 /// handled by the VINSERTB instruction introduced in ISA 3.0. This is
1149 /// essentially v16i8 vector version of VINSERTH.
1150 SDValue lowerToVINSERTB(ShuffleVectorSDNode *N, SelectionDAG &DAG) const;
1152 // Return whether the call instruction can potentially be optimized to a
1153 // tail call. This will cause the optimizers to attempt to move, or
1154 // duplicate return instructions to help enable tail call optimizations.
1155 bool mayBeEmittedAsTailCall(const CallInst *CI) const override;
1156 bool hasBitPreservingFPLogic(EVT VT) const override;
1157 bool isMaskAndCmp0FoldingBeneficial(const Instruction &AndI) const override;
1158 }; // end class PPCTargetLowering
1162 FastISel *createFastISel(FunctionLoweringInfo &FuncInfo,
1163 const TargetLibraryInfo *LibInfo);
1165 } // end namespace PPC
1167 bool CC_PPC32_SVR4_Custom_Dummy(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
1168 CCValAssign::LocInfo &LocInfo,
1169 ISD::ArgFlagsTy &ArgFlags,
1172 bool CC_PPC32_SVR4_Custom_AlignArgRegs(unsigned &ValNo, MVT &ValVT,
1174 CCValAssign::LocInfo &LocInfo,
1175 ISD::ArgFlagsTy &ArgFlags,
1179 CC_PPC32_SVR4_Custom_SkipLastArgRegsPPCF128(unsigned &ValNo, MVT &ValVT,
1181 CCValAssign::LocInfo &LocInfo,
1182 ISD::ArgFlagsTy &ArgFlags,
1185 bool CC_PPC32_SVR4_Custom_AlignFPArgRegs(unsigned &ValNo, MVT &ValVT,
1187 CCValAssign::LocInfo &LocInfo,
1188 ISD::ArgFlagsTy &ArgFlags,
1191 bool isIntS16Immediate(SDNode *N, int16_t &Imm);
1192 bool isIntS16Immediate(SDValue Op, int16_t &Imm);
1194 } // end namespace llvm
1196 #endif // LLVM_TARGET_POWERPC_PPC32ISELLOWERING_H