1 //===-- ARMISelLowering.h - ARM DAG Lowering Interface ----------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines the interfaces that ARM uses to lower LLVM code into a
13 //===----------------------------------------------------------------------===//
15 #ifndef ARMISELLOWERING_H
16 #define ARMISELLOWERING_H
18 #include "ARMSubtarget.h"
19 #include "llvm/Target/TargetLowering.h"
20 #include "llvm/Target/TargetRegisterInfo.h"
21 #include "llvm/CodeGen/FastISel.h"
22 #include "llvm/CodeGen/SelectionDAG.h"
23 #include "llvm/CodeGen/CallingConvLower.h"
27 class ARMConstantPoolValue;
30 // ARM Specific DAG Nodes
32 // Start the numbering where the builtin ops and target ops leave off.
33 FIRST_NUMBER = ISD::BUILTIN_OP_END,
35 Wrapper, // Wrapper - A wrapper node for TargetConstantPool,
36 // TargetExternalSymbol, and TargetGlobalAddress.
37 WrapperDYN, // WrapperDYN - A wrapper node for TargetGlobalAddress in
39 WrapperPIC, // WrapperPIC - A wrapper node for TargetGlobalAddress in
41 WrapperJT, // WrapperJT - A wrapper node for TargetJumpTable
43 CALL, // Function call.
44 CALL_PRED, // Function call that's predicable.
45 CALL_NOLINK, // Function call with branch not branch-and-link.
46 tCALL, // Thumb function call.
47 BRCOND, // Conditional branch.
48 BR_JT, // Jumptable branch.
49 BR2_JT, // Jumptable branch (2 level - jumptable entry is a jump).
50 RET_FLAG, // Return with a flag operand.
52 PIC_ADD, // Add with a PC operand and a PIC label.
54 CMP, // ARM compare instructions.
55 CMPZ, // ARM compare that sets only Z flag.
56 CMPFP, // ARM VFP compare instruction, sets FPSCR.
57 CMPFPw0, // ARM VFP compare against zero instruction, sets FPSCR.
58 FMSTAT, // ARM fmstat instruction.
59 CMOV, // ARM conditional move instructions.
63 RBIT, // ARM bitreverse instruction
65 FTOSI, // FP to sint within a FP register.
66 FTOUI, // FP to uint within a FP register.
67 SITOF, // sint to FP within a FP register.
68 UITOF, // uint to FP within a FP register.
70 SRL_FLAG, // V,Flag = srl_flag X -> srl X, 1 + save carry out.
71 SRA_FLAG, // V,Flag = sra_flag X -> sra X, 1 + save carry out.
72 RRX, // V = RRX X, Flag -> srl X, 1 + shift in carry flag.
74 ADDC, // Add with carry
75 ADDE, // Add using carry
76 SUBC, // Sub with carry
77 SUBE, // Sub using carry
79 VMOVRRD, // double to two gprs.
80 VMOVDRR, // Two gprs to double.
82 EH_SJLJ_SETJMP, // SjLj exception handling setjmp.
83 EH_SJLJ_LONGJMP, // SjLj exception handling longjmp.
84 EH_SJLJ_DISPATCHSETUP, // SjLj exception handling dispatch setup.
86 TC_RETURN, // Tail call return pseudo.
90 DYN_ALLOC, // Dynamic allocation on the stack.
92 MEMBARRIER, // Memory barrier (DMB)
93 MEMBARRIER_MCR, // Memory barrier (MCR)
97 VCEQ, // Vector compare equal.
98 VCEQZ, // Vector compare equal to zero.
99 VCGE, // Vector compare greater than or equal.
100 VCGEZ, // Vector compare greater than or equal to zero.
101 VCLEZ, // Vector compare less than or equal to zero.
102 VCGEU, // Vector compare unsigned greater than or equal.
103 VCGT, // Vector compare greater than.
104 VCGTZ, // Vector compare greater than zero.
105 VCLTZ, // Vector compare less than zero.
106 VCGTU, // Vector compare unsigned greater than.
107 VTST, // Vector test bits.
109 // Vector shift by immediate:
111 VSHRs, // ...right (signed)
112 VSHRu, // ...right (unsigned)
113 VSHLLs, // ...left long (signed)
114 VSHLLu, // ...left long (unsigned)
115 VSHLLi, // ...left long (with maximum shift count)
116 VSHRN, // ...right narrow
118 // Vector rounding shift by immediate:
119 VRSHRs, // ...right (signed)
120 VRSHRu, // ...right (unsigned)
121 VRSHRN, // ...right narrow
123 // Vector saturating shift by immediate:
124 VQSHLs, // ...left (signed)
125 VQSHLu, // ...left (unsigned)
126 VQSHLsu, // ...left (signed to unsigned)
127 VQSHRNs, // ...right narrow (signed)
128 VQSHRNu, // ...right narrow (unsigned)
129 VQSHRNsu, // ...right narrow (signed to unsigned)
131 // Vector saturating rounding shift by immediate:
132 VQRSHRNs, // ...right narrow (signed)
133 VQRSHRNu, // ...right narrow (unsigned)
134 VQRSHRNsu, // ...right narrow (signed to unsigned)
136 // Vector shift and insert:
140 // Vector get lane (VMOV scalar to ARM core register)
141 // (These are used for 8- and 16-bit element types only.)
142 VGETLANEu, // zero-extend vector extract element
143 VGETLANEs, // sign-extend vector extract element
145 // Vector move immediate and move negated immediate:
149 // Vector move f32 immediate:
158 VREV64, // reverse elements within 64-bit doublewords
159 VREV32, // reverse elements within 32-bit words
160 VREV16, // reverse elements within 16-bit halfwords
161 VZIP, // zip (interleave)
162 VUZP, // unzip (deinterleave)
164 VTBL1, // 1-register shuffle with mask
165 VTBL2, // 2-register shuffle with mask
167 // Vector multiply long:
169 VMULLu, // ...unsigned
171 // Operands of the standard BUILD_VECTOR node are not legalized, which
172 // is fine if BUILD_VECTORs are always lowered to shuffles or other
173 // operations, but for ARM some BUILD_VECTORs are legal as-is and their
174 // operands need to be legalized. Define an ARM-specific version of
175 // BUILD_VECTOR for this purpose.
178 // Floating-point max and min:
185 // Vector OR with immediate
187 // Vector AND with NOT of immediate
190 // Vector bitwise select
193 // Vector load N-element structure to all lanes:
194 VLD2DUP = ISD::FIRST_TARGET_MEMORY_OPCODE,
198 // NEON loads with post-increment base updates:
210 // NEON stores with post-increment base updates:
219 // 64-bit atomic ops (value split into two registers)
231 /// Define some predicates that are used for node matching.
233 bool isBitFieldInvertedMask(unsigned v);
236 //===--------------------------------------------------------------------===//
237 // ARMTargetLowering - ARM Implementation of the TargetLowering interface
239 class ARMTargetLowering : public TargetLowering {
241 explicit ARMTargetLowering(TargetMachine &TM);
243 virtual unsigned getJumpTableEncoding(void) const;
245 virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const;
247 /// ReplaceNodeResults - Replace the results of node with an illegal result
248 /// type with new values built out of custom code.
250 virtual void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue>&Results,
251 SelectionDAG &DAG) const;
253 virtual const char *getTargetNodeName(unsigned Opcode) const;
255 /// getSetCCResultType - Return the value type to use for ISD::SETCC.
256 virtual EVT getSetCCResultType(EVT VT) const;
258 virtual MachineBasicBlock *
259 EmitInstrWithCustomInserter(MachineInstr *MI,
260 MachineBasicBlock *MBB) const;
263 AdjustInstrPostInstrSelection(MachineInstr *MI, SDNode *Node) const;
265 SDValue PerformCMOVCombine(SDNode *N, SelectionDAG &DAG) const;
266 virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const;
268 bool isDesirableToTransformToIntegerOp(unsigned Opc, EVT VT) const;
270 /// allowsUnalignedMemoryAccesses - Returns true if the target allows
271 /// unaligned memory accesses. of the specified type.
272 virtual bool allowsUnalignedMemoryAccesses(EVT VT) const;
274 virtual EVT getOptimalMemOpType(uint64_t Size,
275 unsigned DstAlign, unsigned SrcAlign,
278 MachineFunction &MF) const;
280 /// isLegalAddressingMode - Return true if the addressing mode represented
281 /// by AM is legal for this target, for a load/store of the specified type.
282 virtual bool isLegalAddressingMode(const AddrMode &AM, Type *Ty)const;
283 bool isLegalT2ScaledAddressingMode(const AddrMode &AM, EVT VT) const;
285 /// isLegalICmpImmediate - Return true if the specified immediate is legal
286 /// icmp immediate, that is the target has icmp instructions which can
287 /// compare a register against the immediate without having to materialize
288 /// the immediate into a register.
289 virtual bool isLegalICmpImmediate(int64_t Imm) const;
291 /// isLegalAddImmediate - Return true if the specified immediate is legal
292 /// add immediate, that is the target has add instructions which can
293 /// add a register and the immediate without having to materialize
294 /// the immediate into a register.
295 virtual bool isLegalAddImmediate(int64_t Imm) const;
297 /// getPreIndexedAddressParts - returns true by value, base pointer and
298 /// offset pointer and addressing mode by reference if the node's address
299 /// can be legally represented as pre-indexed load / store address.
300 virtual bool getPreIndexedAddressParts(SDNode *N, SDValue &Base,
302 ISD::MemIndexedMode &AM,
303 SelectionDAG &DAG) const;
305 /// getPostIndexedAddressParts - returns true by value, base pointer and
306 /// offset pointer and addressing mode by reference if this node can be
307 /// combined with a load / store to form a post-indexed load / store.
308 virtual bool getPostIndexedAddressParts(SDNode *N, SDNode *Op,
309 SDValue &Base, SDValue &Offset,
310 ISD::MemIndexedMode &AM,
311 SelectionDAG &DAG) const;
313 virtual void computeMaskedBitsForTargetNode(const SDValue Op,
317 const SelectionDAG &DAG,
318 unsigned Depth) const;
321 virtual bool ExpandInlineAsm(CallInst *CI) const;
323 ConstraintType getConstraintType(const std::string &Constraint) const;
325 /// Examine constraint string and operand type and determine a weight value.
326 /// The operand object must already have been set up with the operand type.
327 ConstraintWeight getSingleConstraintMatchWeight(
328 AsmOperandInfo &info, const char *constraint) const;
330 std::pair<unsigned, const TargetRegisterClass*>
331 getRegForInlineAsmConstraint(const std::string &Constraint,
334 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
335 /// vector. If it is invalid, don't add anything to Ops. If hasMemory is
336 /// true it means one of the asm constraint of the inline asm instruction
337 /// being processed is 'm'.
338 virtual void LowerAsmOperandForConstraint(SDValue Op,
339 std::string &Constraint,
340 std::vector<SDValue> &Ops,
341 SelectionDAG &DAG) const;
343 const ARMSubtarget* getSubtarget() const {
347 /// getRegClassFor - Return the register class that should be used for the
348 /// specified value type.
349 virtual TargetRegisterClass *getRegClassFor(EVT VT) const;
351 /// getMaximalGlobalOffset - Returns the maximal possible offset which can
352 /// be used for loads / stores from the global.
353 virtual unsigned getMaximalGlobalOffset() const;
355 /// createFastISel - This method returns a target specific FastISel object,
356 /// or null if the target does not support "fast" ISel.
357 virtual FastISel *createFastISel(FunctionLoweringInfo &funcInfo) const;
359 Sched::Preference getSchedulingPreference(SDNode *N) const;
361 bool isShuffleMaskLegal(const SmallVectorImpl<int> &M, EVT VT) const;
362 bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const;
364 /// isFPImmLegal - Returns true if the target can instruction select the
365 /// specified FP immediate natively. If false, the legalizer will
366 /// materialize the FP immediate as a load from a constant pool.
367 virtual bool isFPImmLegal(const APFloat &Imm, EVT VT) const;
369 virtual bool getTgtMemIntrinsic(IntrinsicInfo &Info,
371 unsigned Intrinsic) const;
373 std::pair<const TargetRegisterClass*, uint8_t>
374 findRepresentativeClass(EVT VT) const;
377 /// Subtarget - Keep a pointer to the ARMSubtarget around so that we can
378 /// make the right decision when generating code for different targets.
379 const ARMSubtarget *Subtarget;
381 const TargetRegisterInfo *RegInfo;
383 const InstrItineraryData *Itins;
385 /// ARMPCLabelIndex - Keep track of the number of ARM PC labels created.
387 unsigned ARMPCLabelIndex;
389 void addTypeForNEON(EVT VT, EVT PromotedLdStVT, EVT PromotedBitwiseVT);
390 void addDRTypeForNEON(EVT VT);
391 void addQRTypeForNEON(EVT VT);
393 typedef SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPassVector;
394 void PassF64ArgInRegs(DebugLoc dl, SelectionDAG &DAG,
395 SDValue Chain, SDValue &Arg,
396 RegsToPassVector &RegsToPass,
397 CCValAssign &VA, CCValAssign &NextVA,
399 SmallVector<SDValue, 8> &MemOpChains,
400 ISD::ArgFlagsTy Flags) const;
401 SDValue GetF64FormalArgument(CCValAssign &VA, CCValAssign &NextVA,
402 SDValue &Root, SelectionDAG &DAG,
405 CCAssignFn *CCAssignFnForNode(CallingConv::ID CC, bool Return,
406 bool isVarArg) const;
407 SDValue LowerMemOpCallTo(SDValue Chain, SDValue StackPtr, SDValue Arg,
408 DebugLoc dl, SelectionDAG &DAG,
409 const CCValAssign &VA,
410 ISD::ArgFlagsTy Flags) const;
411 SDValue LowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG) const;
412 SDValue LowerEH_SJLJ_LONGJMP(SDValue Op, SelectionDAG &DAG) const;
413 SDValue LowerEH_SJLJ_DISPATCHSETUP(SDValue Op, SelectionDAG &DAG) const;
414 SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG,
415 const ARMSubtarget *Subtarget) const;
416 SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const;
417 SDValue LowerGlobalAddressDarwin(SDValue Op, SelectionDAG &DAG) const;
418 SDValue LowerGlobalAddressELF(SDValue Op, SelectionDAG &DAG) const;
419 SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
420 SDValue LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA,
421 SelectionDAG &DAG) const;
422 SDValue LowerToTLSExecModels(GlobalAddressSDNode *GA,
423 SelectionDAG &DAG) const;
424 SDValue LowerGLOBAL_OFFSET_TABLE(SDValue Op, SelectionDAG &DAG) const;
425 SDValue LowerBR_JT(SDValue Op, SelectionDAG &DAG) const;
426 SDValue LowerSELECT(SDValue Op, SelectionDAG &DAG) const;
427 SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const;
428 SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG) const;
429 SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const;
430 SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
431 SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const;
432 SDValue LowerShiftRightParts(SDValue Op, SelectionDAG &DAG) const;
433 SDValue LowerShiftLeftParts(SDValue Op, SelectionDAG &DAG) const;
434 SDValue LowerFLT_ROUNDS_(SDValue Op, SelectionDAG &DAG) const;
435 SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
436 const ARMSubtarget *ST) const;
438 SDValue ReconstructShuffle(SDValue Op, SelectionDAG &DAG) const;
440 SDValue LowerCallResult(SDValue Chain, SDValue InFlag,
441 CallingConv::ID CallConv, bool isVarArg,
442 const SmallVectorImpl<ISD::InputArg> &Ins,
443 DebugLoc dl, SelectionDAG &DAG,
444 SmallVectorImpl<SDValue> &InVals) const;
447 LowerFormalArguments(SDValue Chain,
448 CallingConv::ID CallConv, bool isVarArg,
449 const SmallVectorImpl<ISD::InputArg> &Ins,
450 DebugLoc dl, SelectionDAG &DAG,
451 SmallVectorImpl<SDValue> &InVals) const;
453 void VarArgStyleRegisters(CCState &CCInfo, SelectionDAG &DAG,
454 DebugLoc dl, SDValue &Chain, unsigned ArgOffset)
457 void computeRegArea(CCState &CCInfo, MachineFunction &MF,
458 unsigned &VARegSize, unsigned &VARegSaveSize) const;
461 LowerCall(SDValue Chain, SDValue Callee,
462 CallingConv::ID CallConv, bool isVarArg,
464 const SmallVectorImpl<ISD::OutputArg> &Outs,
465 const SmallVectorImpl<SDValue> &OutVals,
466 const SmallVectorImpl<ISD::InputArg> &Ins,
467 DebugLoc dl, SelectionDAG &DAG,
468 SmallVectorImpl<SDValue> &InVals) const;
470 /// HandleByVal - Target-specific cleanup for ByVal support.
471 virtual void HandleByVal(CCState *, unsigned &) const;
473 /// IsEligibleForTailCallOptimization - Check whether the call is eligible
474 /// for tail call optimization. Targets which want to do tail call
475 /// optimization should implement this function.
476 bool IsEligibleForTailCallOptimization(SDValue Callee,
477 CallingConv::ID CalleeCC,
479 bool isCalleeStructRet,
480 bool isCallerStructRet,
481 const SmallVectorImpl<ISD::OutputArg> &Outs,
482 const SmallVectorImpl<SDValue> &OutVals,
483 const SmallVectorImpl<ISD::InputArg> &Ins,
484 SelectionDAG& DAG) const;
486 LowerReturn(SDValue Chain,
487 CallingConv::ID CallConv, bool isVarArg,
488 const SmallVectorImpl<ISD::OutputArg> &Outs,
489 const SmallVectorImpl<SDValue> &OutVals,
490 DebugLoc dl, SelectionDAG &DAG) const;
492 virtual bool isUsedByReturnOnly(SDNode *N) const;
494 virtual bool mayBeEmittedAsTailCall(CallInst *CI) const;
496 SDValue getARMCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC,
497 SDValue &ARMcc, SelectionDAG &DAG, DebugLoc dl) const;
498 SDValue getVFPCmp(SDValue LHS, SDValue RHS,
499 SelectionDAG &DAG, DebugLoc dl) const;
500 SDValue duplicateCmp(SDValue Cmp, SelectionDAG &DAG) const;
502 SDValue OptimizeVFPBrcond(SDValue Op, SelectionDAG &DAG) const;
504 MachineBasicBlock *EmitAtomicCmpSwap(MachineInstr *MI,
505 MachineBasicBlock *BB,
506 unsigned Size) const;
507 MachineBasicBlock *EmitAtomicBinary(MachineInstr *MI,
508 MachineBasicBlock *BB,
510 unsigned BinOpcode) const;
511 MachineBasicBlock *EmitAtomicBinary64(MachineInstr *MI,
512 MachineBasicBlock *BB,
515 bool NeedsCarry = false,
516 bool IsCmpxchg = false) const;
517 MachineBasicBlock * EmitAtomicBinaryMinMax(MachineInstr *MI,
518 MachineBasicBlock *BB,
521 ARMCC::CondCodes Cond) const;
523 void EmitBasePointerRecalculation(MachineInstr *MI, MachineBasicBlock *MBB,
524 MachineBasicBlock *DispatchBB) const;
526 void SetupEntryBlockForSjLj(MachineInstr *MI,
527 MachineBasicBlock *MBB,
528 MachineBasicBlock *DispatchBB, int FI) const;
530 MachineBasicBlock *EmitSjLjDispatchBlock(MachineInstr *MI,
531 MachineBasicBlock *MBB) const;
533 bool RemapAddSubWithFlags(MachineInstr *MI, MachineBasicBlock *BB) const;
536 enum NEONModImmType {
544 FastISel *createFastISel(FunctionLoweringInfo &funcInfo);
548 #endif // ARMISELLOWERING_H