1 //===- llvm/CodeGen/TargetLowering.h - Target Lowering Info -----*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
11 /// This file describes how to lower LLVM code to machine code. This has two
14 /// 1. Which ValueTypes are natively supported by the target.
15 /// 2. Which operations are supported for supported ValueTypes.
16 /// 3. Cost thresholds for alternative implementations of certain operations.
18 /// In addition it has a few other components, like information about FP
21 //===----------------------------------------------------------------------===//
23 #ifndef LLVM_CODEGEN_TARGETLOWERING_H
24 #define LLVM_CODEGEN_TARGETLOWERING_H
26 #include "llvm/ADT/APInt.h"
27 #include "llvm/ADT/ArrayRef.h"
28 #include "llvm/ADT/DenseMap.h"
29 #include "llvm/ADT/STLExtras.h"
30 #include "llvm/ADT/SmallVector.h"
31 #include "llvm/ADT/StringRef.h"
32 #include "llvm/Analysis/LegacyDivergenceAnalysis.h"
33 #include "llvm/CodeGen/DAGCombine.h"
34 #include "llvm/CodeGen/ISDOpcodes.h"
35 #include "llvm/CodeGen/RuntimeLibcalls.h"
36 #include "llvm/CodeGen/SelectionDAG.h"
37 #include "llvm/CodeGen/SelectionDAGNodes.h"
38 #include "llvm/CodeGen/TargetCallingConv.h"
39 #include "llvm/CodeGen/ValueTypes.h"
40 #include "llvm/IR/Attributes.h"
41 #include "llvm/IR/CallSite.h"
42 #include "llvm/IR/CallingConv.h"
43 #include "llvm/IR/DataLayout.h"
44 #include "llvm/IR/DerivedTypes.h"
45 #include "llvm/IR/Function.h"
46 #include "llvm/IR/IRBuilder.h"
47 #include "llvm/IR/InlineAsm.h"
48 #include "llvm/IR/Instruction.h"
49 #include "llvm/IR/Instructions.h"
50 #include "llvm/IR/Type.h"
51 #include "llvm/MC/MCRegisterInfo.h"
52 #include "llvm/Support/AtomicOrdering.h"
53 #include "llvm/Support/Casting.h"
54 #include "llvm/Support/ErrorHandling.h"
55 #include "llvm/Support/MachineValueType.h"
56 #include "llvm/Target/TargetMachine.h"
69 class BranchProbability;
74 class FunctionLoweringInfo;
79 class MachineBasicBlock;
80 class MachineFunction;
82 class MachineJumpTableInfo;
84 class MachineRegisterInfo;
88 class TargetRegisterClass;
89 class TargetLibraryInfo;
90 class TargetRegisterInfo;
96 None, // No preference
97 Source, // Follow source order.
98 RegPressure, // Scheduling for lowest register pressure.
99 Hybrid, // Scheduling for both latency and register pressure.
100 ILP, // Scheduling for ILP in low register pressure mode.
101 VLIW // Scheduling for VLIW targets.
104 } // end namespace Sched
106 /// This base class for TargetLowering contains the SelectionDAG-independent
107 /// parts that can be used from the rest of CodeGen.
108 class TargetLoweringBase {
110 /// This enum indicates whether operations are valid for a target, and if not,
111 /// what action should be used to make them valid.
112 enum LegalizeAction : uint8_t {
113 Legal, // The target natively supports this operation.
114 Promote, // This operation should be executed in a larger type.
115 Expand, // Try to expand this to other ops, otherwise use a libcall.
116 LibCall, // Don't try to expand this to other ops, always use a libcall.
117 Custom // Use the LowerOperation hook to implement custom lowering.
120 /// This enum indicates whether a types are legal for a target, and if not,
121 /// what action should be used to make them valid.
122 enum LegalizeTypeAction : uint8_t {
123 TypeLegal, // The target natively supports this type.
124 TypePromoteInteger, // Replace this integer with a larger one.
125 TypeExpandInteger, // Split this integer into two of half the size.
126 TypeSoftenFloat, // Convert this float to a same size integer type,
127 // if an operation is not supported in target HW.
128 TypeExpandFloat, // Split this float into two of half the size.
129 TypeScalarizeVector, // Replace this one-element vector with its element.
130 TypeSplitVector, // Split this vector into two of half the size.
131 TypeWidenVector, // This vector should be widened into a larger vector.
132 TypePromoteFloat // Replace this float with a larger one.
135 /// LegalizeKind holds the legalization kind that needs to happen to EVT
136 /// in order to type-legalize it.
137 using LegalizeKind = std::pair<LegalizeTypeAction, EVT>;
139 /// Enum that describes how the target represents true/false values.
140 enum BooleanContent {
141 UndefinedBooleanContent, // Only bit 0 counts, the rest can hold garbage.
142 ZeroOrOneBooleanContent, // All bits zero except for bit 0.
143 ZeroOrNegativeOneBooleanContent // All bits equal to bit 0.
146 /// Enum that describes what type of support for selects the target has.
147 enum SelectSupportKind {
148 ScalarValSelect, // The target supports scalar selects (ex: cmov).
149 ScalarCondVectorVal, // The target supports selects with a scalar condition
150 // and vector values (ex: cmov).
151 VectorMaskSelect // The target supports vector selects with a vector
152 // mask (ex: x86 blends).
155 /// Enum that specifies what an atomic load/AtomicRMWInst is expanded
156 /// to, if at all. Exists because different targets have different levels of
157 /// support for these atomic instructions, and also have different options
158 /// w.r.t. what they should expand to.
159 enum class AtomicExpansionKind {
160 None, // Don't expand the instruction.
161 LLSC, // Expand the instruction into loadlinked/storeconditional; used
163 LLOnly, // Expand the (load) instruction into just a load-linked, which has
164 // greater atomic guarantees than a normal load.
165 CmpXChg, // Expand the instruction into cmpxchg; used by at least X86.
166 MaskedIntrinsic, // Use a target-specific intrinsic for the LL/SC loop.
169 /// Enum that specifies when a multiplication should be expanded.
170 enum class MulExpansionKind {
171 Always, // Always expand the instruction.
172 OnlyLegalOrCustom, // Only expand when the resulting instructions are legal
178 Value *Val = nullptr;
179 SDValue Node = SDValue();
189 bool IsSwiftSelf : 1;
190 bool IsSwiftError : 1;
191 uint16_t Alignment = 0;
194 : IsSExt(false), IsZExt(false), IsInReg(false), IsSRet(false),
195 IsNest(false), IsByVal(false), IsInAlloca(false), IsReturned(false),
196 IsSwiftSelf(false), IsSwiftError(false) {}
198 void setAttributes(ImmutableCallSite *CS, unsigned ArgIdx);
200 using ArgListTy = std::vector<ArgListEntry>;
202 virtual void markLibCallAttributes(MachineFunction *MF, unsigned CC,
203 ArgListTy &Args) const {};
205 static ISD::NodeType getExtendForContent(BooleanContent Content) {
207 case UndefinedBooleanContent:
208 // Extend by adding rubbish bits.
209 return ISD::ANY_EXTEND;
210 case ZeroOrOneBooleanContent:
211 // Extend by adding zero bits.
212 return ISD::ZERO_EXTEND;
213 case ZeroOrNegativeOneBooleanContent:
214 // Extend by copying the sign bit.
215 return ISD::SIGN_EXTEND;
217 llvm_unreachable("Invalid content kind");
220 /// NOTE: The TargetMachine owns TLOF.
221 explicit TargetLoweringBase(const TargetMachine &TM);
222 TargetLoweringBase(const TargetLoweringBase &) = delete;
223 TargetLoweringBase &operator=(const TargetLoweringBase &) = delete;
224 virtual ~TargetLoweringBase() = default;
227 /// Initialize all of the actions to default values.
231 const TargetMachine &getTargetMachine() const { return TM; }
233 virtual bool useSoftFloat() const { return false; }
235 /// Return the pointer type for the given address space, defaults to
236 /// the pointer type from the data layout.
237 /// FIXME: The default needs to be removed once all the code is updated.
238 MVT getPointerTy(const DataLayout &DL, uint32_t AS = 0) const {
239 return MVT::getIntegerVT(DL.getPointerSizeInBits(AS));
242 /// Return the type for frame index, which is determined by
243 /// the alloca address space specified through the data layout.
244 MVT getFrameIndexTy(const DataLayout &DL) const {
245 return getPointerTy(DL, DL.getAllocaAddrSpace());
248 /// Return the type for operands of fence.
249 /// TODO: Let fence operands be of i32 type and remove this.
250 virtual MVT getFenceOperandTy(const DataLayout &DL) const {
251 return getPointerTy(DL);
254 /// EVT is not used in-tree, but is used by out-of-tree target.
255 /// A documentation for this function would be nice...
256 virtual MVT getScalarShiftAmountTy(const DataLayout &, EVT) const;
258 EVT getShiftAmountTy(EVT LHSTy, const DataLayout &DL,
259 bool LegalTypes = true) const;
261 /// Returns the type to be used for the index operand of:
262 /// ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT,
263 /// ISD::INSERT_SUBVECTOR, and ISD::EXTRACT_SUBVECTOR
264 virtual MVT getVectorIdxTy(const DataLayout &DL) const {
265 return getPointerTy(DL);
268 virtual bool isSelectSupported(SelectSupportKind /*kind*/) const {
272 /// Return true if it is profitable to convert a select of FP constants into
273 /// a constant pool load whose address depends on the select condition. The
274 /// parameter may be used to differentiate a select with FP compare from
276 virtual bool reduceSelectOfFPConstantLoads(bool IsFPSetCC) const {
280 /// Return true if multiple condition registers are available.
281 bool hasMultipleConditionRegisters() const {
282 return HasMultipleConditionRegisters;
285 /// Return true if the target has BitExtract instructions.
286 bool hasExtractBitsInsn() const { return HasExtractBitsInsn; }
288 /// Return the preferred vector type legalization action.
289 virtual TargetLoweringBase::LegalizeTypeAction
290 getPreferredVectorAction(MVT VT) const {
291 // The default action for one element vectors is to scalarize
292 if (VT.getVectorNumElements() == 1)
293 return TypeScalarizeVector;
294 // The default action for other vectors is to promote
295 return TypePromoteInteger;
298 // There are two general methods for expanding a BUILD_VECTOR node:
299 // 1. Use SCALAR_TO_VECTOR on the defined scalar values and then shuffle
301 // 2. Build the vector on the stack and then load it.
302 // If this function returns true, then method (1) will be used, subject to
303 // the constraint that all of the necessary shuffles are legal (as determined
304 // by isShuffleMaskLegal). If this function returns false, then method (2) is
305 // always used. The vector type, and the number of defined values, are
308 shouldExpandBuildVectorWithShuffles(EVT /* VT */,
309 unsigned DefinedValues) const {
310 return DefinedValues < 3;
313 /// Return true if integer divide is usually cheaper than a sequence of
314 /// several shifts, adds, and multiplies for this target.
315 /// The definition of "cheaper" may depend on whether we're optimizing
316 /// for speed or for size.
317 virtual bool isIntDivCheap(EVT VT, AttributeList Attr) const { return false; }
319 /// Return true if the target can handle a standalone remainder operation.
320 virtual bool hasStandaloneRem(EVT VT) const {
324 /// Return true if SQRT(X) shouldn't be replaced with X*RSQRT(X).
325 virtual bool isFsqrtCheap(SDValue X, SelectionDAG &DAG) const {
326 // Default behavior is to replace SQRT(X) with X*RSQRT(X).
330 /// Reciprocal estimate status values used by the functions below.
331 enum ReciprocalEstimate : int {
337 /// Return a ReciprocalEstimate enum value for a square root of the given type
338 /// based on the function's attributes. If the operation is not overridden by
339 /// the function's attributes, "Unspecified" is returned and target defaults
340 /// are expected to be used for instruction selection.
341 int getRecipEstimateSqrtEnabled(EVT VT, MachineFunction &MF) const;
343 /// Return a ReciprocalEstimate enum value for a division of the given type
344 /// based on the function's attributes. If the operation is not overridden by
345 /// the function's attributes, "Unspecified" is returned and target defaults
346 /// are expected to be used for instruction selection.
347 int getRecipEstimateDivEnabled(EVT VT, MachineFunction &MF) const;
349 /// Return the refinement step count for a square root of the given type based
350 /// on the function's attributes. If the operation is not overridden by
351 /// the function's attributes, "Unspecified" is returned and target defaults
352 /// are expected to be used for instruction selection.
353 int getSqrtRefinementSteps(EVT VT, MachineFunction &MF) const;
355 /// Return the refinement step count for a division of the given type based
356 /// on the function's attributes. If the operation is not overridden by
357 /// the function's attributes, "Unspecified" is returned and target defaults
358 /// are expected to be used for instruction selection.
359 int getDivRefinementSteps(EVT VT, MachineFunction &MF) const;
361 /// Returns true if target has indicated at least one type should be bypassed.
362 bool isSlowDivBypassed() const { return !BypassSlowDivWidths.empty(); }
364 /// Returns map of slow types for division or remainder with corresponding
366 const DenseMap<unsigned int, unsigned int> &getBypassSlowDivWidths() const {
367 return BypassSlowDivWidths;
370 /// Return true if Flow Control is an expensive operation that should be
372 bool isJumpExpensive() const { return JumpIsExpensive; }
374 /// Return true if selects are only cheaper than branches if the branch is
375 /// unlikely to be predicted right.
376 bool isPredictableSelectExpensive() const {
377 return PredictableSelectIsExpensive;
380 /// If a branch or a select condition is skewed in one direction by more than
381 /// this factor, it is very likely to be predicted correctly.
382 virtual BranchProbability getPredictableBranchThreshold() const;
384 /// Return true if the following transform is beneficial:
385 /// fold (conv (load x)) -> (load (conv*)x)
386 /// On architectures that don't natively support some vector loads
387 /// efficiently, casting the load to a smaller vector of larger types and
388 /// loading is more efficient, however, this can be undone by optimizations in
390 virtual bool isLoadBitCastBeneficial(EVT LoadVT,
391 EVT BitcastVT) const {
392 // Don't do if we could do an indexed load on the original type, but not on
394 if (!LoadVT.isSimple() || !BitcastVT.isSimple())
397 MVT LoadMVT = LoadVT.getSimpleVT();
399 // Don't bother doing this if it's just going to be promoted again later, as
400 // doing so might interfere with other combines.
401 if (getOperationAction(ISD::LOAD, LoadMVT) == Promote &&
402 getTypeToPromoteTo(ISD::LOAD, LoadMVT) == BitcastVT.getSimpleVT())
408 /// Return true if the following transform is beneficial:
409 /// (store (y (conv x)), y*)) -> (store x, (x*))
410 virtual bool isStoreBitCastBeneficial(EVT StoreVT, EVT BitcastVT) const {
411 // Default to the same logic as loads.
412 return isLoadBitCastBeneficial(StoreVT, BitcastVT);
415 /// Return true if it is expected to be cheaper to do a store of a non-zero
416 /// vector constant with the given size and type for the address space than to
417 /// store the individual scalar element constants.
418 virtual bool storeOfVectorConstantIsCheap(EVT MemVT,
420 unsigned AddrSpace) const {
424 /// Allow store merging after legalization in addition to before legalization.
425 /// This may catch stores that do not exist earlier (eg, stores created from
427 virtual bool mergeStoresAfterLegalization() const { return true; }
429 /// Returns if it's reasonable to merge stores to MemVT size.
430 virtual bool canMergeStoresTo(unsigned AS, EVT MemVT,
431 const SelectionDAG &DAG) const {
435 /// Return true if it is cheap to speculate a call to intrinsic cttz.
436 virtual bool isCheapToSpeculateCttz() const {
440 /// Return true if it is cheap to speculate a call to intrinsic ctlz.
441 virtual bool isCheapToSpeculateCtlz() const {
445 /// Return true if ctlz instruction is fast.
446 virtual bool isCtlzFast() const {
450 /// Return true if it is safe to transform an integer-domain bitwise operation
451 /// into the equivalent floating-point operation. This should be set to true
452 /// if the target has IEEE-754-compliant fabs/fneg operations for the input
454 virtual bool hasBitPreservingFPLogic(EVT VT) const {
458 /// Return true if it is cheaper to split the store of a merged int val
459 /// from a pair of smaller values into multiple stores.
460 virtual bool isMultiStoresCheaperThanBitsMerge(EVT LTy, EVT HTy) const {
464 /// Return if the target supports combining a
467 /// %andResult = and %val1, #mask
468 /// %icmpResult = icmp %andResult, 0
470 /// into a single machine instruction of a form like:
472 /// cc = test %register, #mask
474 virtual bool isMaskAndCmp0FoldingBeneficial(const Instruction &AndI) const {
478 /// Use bitwise logic to make pairs of compares more efficient. For example:
479 /// and (seteq A, B), (seteq C, D) --> seteq (or (xor A, B), (xor C, D)), 0
480 /// This should be true when it takes more than one instruction to lower
481 /// setcc (cmp+set on x86 scalar), when bitwise ops are faster than logic on
482 /// condition bits (crand on PowerPC), and/or when reducing cmp+br is a win.
483 virtual bool convertSetCCLogicToBitwiseLogic(EVT VT) const {
487 /// Return the preferred operand type if the target has a quick way to compare
488 /// integer values of the given size. Assume that any legal integer type can
489 /// be compared efficiently. Targets may override this to allow illegal wide
490 /// types to return a vector type if there is support to compare that type.
491 virtual MVT hasFastEqualityCompare(unsigned NumBits) const {
492 MVT VT = MVT::getIntegerVT(NumBits);
493 return isTypeLegal(VT) ? VT : MVT::INVALID_SIMPLE_VALUE_TYPE;
496 /// Return true if the target should transform:
497 /// (X & Y) == Y ---> (~X & Y) == 0
498 /// (X & Y) != Y ---> (~X & Y) != 0
500 /// This may be profitable if the target has a bitwise and-not operation that
501 /// sets comparison flags. A target may want to limit the transformation based
502 /// on the type of Y or if Y is a constant.
504 /// Note that the transform will not occur if Y is known to be a power-of-2
505 /// because a mask and compare of a single bit can be handled by inverting the
506 /// predicate, for example:
507 /// (X & 8) == 8 ---> (X & 8) != 0
508 virtual bool hasAndNotCompare(SDValue Y) const {
512 /// Return true if the target has a bitwise and-not operation:
514 /// This can be used to simplify select or other instructions.
515 virtual bool hasAndNot(SDValue X) const {
516 // If the target has the more complex version of this operation, assume that
517 // it has this operation too.
518 return hasAndNotCompare(X);
521 /// There are two ways to clear extreme bits (either low or high):
522 /// Mask: x & (-1 << y) (the instcombine canonical form)
523 /// Shifts: x >> y << y
524 /// Return true if the variant with 2 shifts is preferred.
525 /// Return false if there is no preference.
526 virtual bool preferShiftsToClearExtremeBits(SDValue X) const {
527 // By default, let's assume that no one prefers shifts.
531 /// Should we tranform the IR-optimal check for whether given truncation
532 /// down into KeptBits would be truncating or not:
533 /// (add %x, (1 << (KeptBits-1))) srccond (1 << KeptBits)
534 /// Into it's more traditional form:
535 /// ((%x << C) a>> C) dstcond %x
536 /// Return true if we should transform.
537 /// Return false if there is no preference.
538 virtual bool shouldTransformSignedTruncationCheck(EVT XVT,
539 unsigned KeptBits) const {
540 // By default, let's assume that no one prefers shifts.
544 /// Return true if the target wants to use the optimization that
545 /// turns ext(promotableInst1(...(promotableInstN(load)))) into
546 /// promotedInst1(...(promotedInstN(ext(load)))).
547 bool enableExtLdPromotion() const { return EnableExtLdPromotion; }
549 /// Return true if the target can combine store(extractelement VectorTy,
551 /// \p Cost[out] gives the cost of that transformation when this is true.
552 virtual bool canCombineStoreAndExtract(Type *VectorTy, Value *Idx,
553 unsigned &Cost) const {
557 /// Return true if inserting a scalar into a variable element of an undef
558 /// vector is more efficiently handled by splatting the scalar instead.
559 virtual bool shouldSplatInsEltVarIndex(EVT) const {
563 /// Return true if target supports floating point exceptions.
564 bool hasFloatingPointExceptions() const {
565 return HasFloatingPointExceptions;
568 /// Return true if target always beneficiates from combining into FMA for a
569 /// given value type. This must typically return false on targets where FMA
570 /// takes more cycles to execute than FADD.
571 virtual bool enableAggressiveFMAFusion(EVT VT) const {
575 /// Return the ValueType of the result of SETCC operations.
576 virtual EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context,
579 /// Return the ValueType for comparison libcalls. Comparions libcalls include
580 /// floating point comparion calls, and Ordered/Unordered check calls on
581 /// floating point numbers.
583 MVT::SimpleValueType getCmpLibcallReturnType() const;
585 /// For targets without i1 registers, this gives the nature of the high-bits
586 /// of boolean values held in types wider than i1.
588 /// "Boolean values" are special true/false values produced by nodes like
589 /// SETCC and consumed (as the condition) by nodes like SELECT and BRCOND.
590 /// Not to be confused with general values promoted from i1. Some cpus
591 /// distinguish between vectors of boolean and scalars; the isVec parameter
592 /// selects between the two kinds. For example on X86 a scalar boolean should
593 /// be zero extended from i1, while the elements of a vector of booleans
594 /// should be sign extended from i1.
596 /// Some cpus also treat floating point types the same way as they treat
597 /// vectors instead of the way they treat scalars.
598 BooleanContent getBooleanContents(bool isVec, bool isFloat) const {
600 return BooleanVectorContents;
601 return isFloat ? BooleanFloatContents : BooleanContents;
604 BooleanContent getBooleanContents(EVT Type) const {
605 return getBooleanContents(Type.isVector(), Type.isFloatingPoint());
608 /// Return target scheduling preference.
609 Sched::Preference getSchedulingPreference() const {
610 return SchedPreferenceInfo;
613 /// Some scheduler, e.g. hybrid, can switch to different scheduling heuristics
614 /// for different nodes. This function returns the preference (or none) for
616 virtual Sched::Preference getSchedulingPreference(SDNode *) const {
620 /// Return the register class that should be used for the specified value
622 virtual const TargetRegisterClass *getRegClassFor(MVT VT) const {
623 const TargetRegisterClass *RC = RegClassForVT[VT.SimpleTy];
624 assert(RC && "This value type is not natively supported!");
628 /// Return the 'representative' register class for the specified value
631 /// The 'representative' register class is the largest legal super-reg
632 /// register class for the register class of the value type. For example, on
633 /// i386 the rep register class for i8, i16, and i32 are GR32; while the rep
634 /// register class is GR64 on x86_64.
635 virtual const TargetRegisterClass *getRepRegClassFor(MVT VT) const {
636 const TargetRegisterClass *RC = RepRegClassForVT[VT.SimpleTy];
640 /// Return the cost of the 'representative' register class for the specified
642 virtual uint8_t getRepRegClassCostFor(MVT VT) const {
643 return RepRegClassCostForVT[VT.SimpleTy];
646 /// Return true if the target has native support for the specified value type.
647 /// This means that it has a register that directly holds it without
648 /// promotions or expansions.
649 bool isTypeLegal(EVT VT) const {
650 assert(!VT.isSimple() ||
651 (unsigned)VT.getSimpleVT().SimpleTy < array_lengthof(RegClassForVT));
652 return VT.isSimple() && RegClassForVT[VT.getSimpleVT().SimpleTy] != nullptr;
655 class ValueTypeActionImpl {
656 /// ValueTypeActions - For each value type, keep a LegalizeTypeAction enum
657 /// that indicates how instruction selection should deal with the type.
658 LegalizeTypeAction ValueTypeActions[MVT::LAST_VALUETYPE];
661 ValueTypeActionImpl() {
662 std::fill(std::begin(ValueTypeActions), std::end(ValueTypeActions),
666 LegalizeTypeAction getTypeAction(MVT VT) const {
667 return ValueTypeActions[VT.SimpleTy];
670 void setTypeAction(MVT VT, LegalizeTypeAction Action) {
671 ValueTypeActions[VT.SimpleTy] = Action;
675 const ValueTypeActionImpl &getValueTypeActions() const {
676 return ValueTypeActions;
679 /// Return how we should legalize values of this type, either it is already
680 /// legal (return 'Legal') or we need to promote it to a larger type (return
681 /// 'Promote'), or we need to expand it into multiple registers of smaller
682 /// integer type (return 'Expand'). 'Custom' is not an option.
683 LegalizeTypeAction getTypeAction(LLVMContext &Context, EVT VT) const {
684 return getTypeConversion(Context, VT).first;
686 LegalizeTypeAction getTypeAction(MVT VT) const {
687 return ValueTypeActions.getTypeAction(VT);
690 /// For types supported by the target, this is an identity function. For
691 /// types that must be promoted to larger types, this returns the larger type
692 /// to promote to. For integer types that are larger than the largest integer
693 /// register, this contains one step in the expansion to get to the smaller
694 /// register. For illegal floating point types, this returns the integer type
696 EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const {
697 return getTypeConversion(Context, VT).second;
700 /// For types supported by the target, this is an identity function. For
701 /// types that must be expanded (i.e. integer types that are larger than the
702 /// largest integer register or illegal floating point types), this returns
703 /// the largest legal type it will be expanded to.
704 EVT getTypeToExpandTo(LLVMContext &Context, EVT VT) const {
705 assert(!VT.isVector());
707 switch (getTypeAction(Context, VT)) {
710 case TypeExpandInteger:
711 VT = getTypeToTransformTo(Context, VT);
714 llvm_unreachable("Type is not legal nor is it to be expanded!");
719 /// Vector types are broken down into some number of legal first class types.
720 /// For example, EVT::v8f32 maps to 2 EVT::v4f32 with Altivec or SSE1, or 8
721 /// promoted EVT::f64 values with the X86 FP stack. Similarly, EVT::v2i64
722 /// turns into 4 EVT::i32 values with both PPC and X86.
724 /// This method returns the number of registers needed, and the VT for each
725 /// register. It also returns the VT and quantity of the intermediate values
726 /// before they are promoted/expanded.
727 unsigned getVectorTypeBreakdown(LLVMContext &Context, EVT VT,
729 unsigned &NumIntermediates,
730 MVT &RegisterVT) const;
732 /// Certain targets such as MIPS require that some types such as vectors are
733 /// always broken down into scalars in some contexts. This occurs even if the
734 /// vector type is legal.
735 virtual unsigned getVectorTypeBreakdownForCallingConv(
736 LLVMContext &Context, CallingConv::ID CC, EVT VT, EVT &IntermediateVT,
737 unsigned &NumIntermediates, MVT &RegisterVT) const {
738 return getVectorTypeBreakdown(Context, VT, IntermediateVT, NumIntermediates,
742 struct IntrinsicInfo {
743 unsigned opc = 0; // target opcode
744 EVT memVT; // memory VT
746 // value representing memory location
747 PointerUnion<const Value *, const PseudoSourceValue *> ptrVal;
749 int offset = 0; // offset off of ptrVal
750 unsigned size = 0; // the size of the memory location
751 // (taken from memVT if zero)
752 unsigned align = 1; // alignment
754 MachineMemOperand::Flags flags = MachineMemOperand::MONone;
755 IntrinsicInfo() = default;
758 /// Given an intrinsic, checks if on the target the intrinsic will need to map
759 /// to a MemIntrinsicNode (touches memory). If this is the case, it returns
760 /// true and store the intrinsic information into the IntrinsicInfo that was
761 /// passed to the function.
762 virtual bool getTgtMemIntrinsic(IntrinsicInfo &, const CallInst &,
764 unsigned /*Intrinsic*/) const {
768 /// Returns true if the target can instruction select the specified FP
769 /// immediate natively. If false, the legalizer will materialize the FP
770 /// immediate as a load from a constant pool.
771 virtual bool isFPImmLegal(const APFloat &/*Imm*/, EVT /*VT*/) const {
775 /// Targets can use this to indicate that they only support *some*
776 /// VECTOR_SHUFFLE operations, those with specific masks. By default, if a
777 /// target supports the VECTOR_SHUFFLE node, all mask values are assumed to be
779 virtual bool isShuffleMaskLegal(ArrayRef<int> /*Mask*/, EVT /*VT*/) const {
783 /// Returns true if the operation can trap for the value type.
785 /// VT must be a legal type. By default, we optimistically assume most
786 /// operations don't trap except for integer divide and remainder.
787 virtual bool canOpTrap(unsigned Op, EVT VT) const;
789 /// Similar to isShuffleMaskLegal. Targets can use this to indicate if there
790 /// is a suitable VECTOR_SHUFFLE that can be used to replace a VAND with a
791 /// constant pool entry.
792 virtual bool isVectorClearMaskLegal(ArrayRef<int> /*Mask*/,
797 /// Return how this operation should be treated: either it is legal, needs to
798 /// be promoted to a larger size, needs to be expanded to some other code
799 /// sequence, or the target has a custom expander for it.
800 LegalizeAction getOperationAction(unsigned Op, EVT VT) const {
801 if (VT.isExtended()) return Expand;
802 // If a target-specific SDNode requires legalization, require the target
803 // to provide custom legalization for it.
804 if (Op >= array_lengthof(OpActions[0])) return Custom;
805 return OpActions[(unsigned)VT.getSimpleVT().SimpleTy][Op];
808 /// Custom method defined by each target to indicate if an operation which
809 /// may require a scale is supported natively by the target.
810 /// If not, the operation is illegal.
811 virtual bool isSupportedFixedPointOperation(unsigned Op, EVT VT,
812 unsigned Scale) const {
816 /// Some fixed point operations may be natively supported by the target but
817 /// only for specific scales. This method allows for checking
818 /// if the width is supported by the target for a given operation that may
820 LegalizeAction getFixedPointOperationAction(unsigned Op, EVT VT,
821 unsigned Scale) const {
822 auto Action = getOperationAction(Op, VT);
826 // This operation is supported in this type but may only work on specific
831 llvm_unreachable("Unexpected fixed point operation.");
833 Supported = isSupportedFixedPointOperation(Op, VT, Scale);
837 return Supported ? Action : Expand;
840 LegalizeAction getStrictFPOperationAction(unsigned Op, EVT VT) const {
843 default: llvm_unreachable("Unexpected FP pseudo-opcode");
844 case ISD::STRICT_FADD: EqOpc = ISD::FADD; break;
845 case ISD::STRICT_FSUB: EqOpc = ISD::FSUB; break;
846 case ISD::STRICT_FMUL: EqOpc = ISD::FMUL; break;
847 case ISD::STRICT_FDIV: EqOpc = ISD::FDIV; break;
848 case ISD::STRICT_FREM: EqOpc = ISD::FREM; break;
849 case ISD::STRICT_FSQRT: EqOpc = ISD::FSQRT; break;
850 case ISD::STRICT_FPOW: EqOpc = ISD::FPOW; break;
851 case ISD::STRICT_FPOWI: EqOpc = ISD::FPOWI; break;
852 case ISD::STRICT_FMA: EqOpc = ISD::FMA; break;
853 case ISD::STRICT_FSIN: EqOpc = ISD::FSIN; break;
854 case ISD::STRICT_FCOS: EqOpc = ISD::FCOS; break;
855 case ISD::STRICT_FEXP: EqOpc = ISD::FEXP; break;
856 case ISD::STRICT_FEXP2: EqOpc = ISD::FEXP2; break;
857 case ISD::STRICT_FLOG: EqOpc = ISD::FLOG; break;
858 case ISD::STRICT_FLOG10: EqOpc = ISD::FLOG10; break;
859 case ISD::STRICT_FLOG2: EqOpc = ISD::FLOG2; break;
860 case ISD::STRICT_FRINT: EqOpc = ISD::FRINT; break;
861 case ISD::STRICT_FNEARBYINT: EqOpc = ISD::FNEARBYINT; break;
862 case ISD::STRICT_FMAXNUM: EqOpc = ISD::FMAXNUM; break;
863 case ISD::STRICT_FMINNUM: EqOpc = ISD::FMINNUM; break;
864 case ISD::STRICT_FCEIL: EqOpc = ISD::FCEIL; break;
865 case ISD::STRICT_FFLOOR: EqOpc = ISD::FFLOOR; break;
866 case ISD::STRICT_FROUND: EqOpc = ISD::FROUND; break;
867 case ISD::STRICT_FTRUNC: EqOpc = ISD::FTRUNC; break;
870 auto Action = getOperationAction(EqOpc, VT);
872 // We don't currently handle Custom or Promote for strict FP pseudo-ops.
873 // For now, we just expand for those cases.
880 /// Return true if the specified operation is legal on this target or can be
881 /// made legal with custom lowering. This is used to help guide high-level
882 /// lowering decisions.
883 bool isOperationLegalOrCustom(unsigned Op, EVT VT) const {
884 return (VT == MVT::Other || isTypeLegal(VT)) &&
885 (getOperationAction(Op, VT) == Legal ||
886 getOperationAction(Op, VT) == Custom);
889 /// Return true if the specified operation is legal on this target or can be
890 /// made legal using promotion. This is used to help guide high-level lowering
892 bool isOperationLegalOrPromote(unsigned Op, EVT VT) const {
893 return (VT == MVT::Other || isTypeLegal(VT)) &&
894 (getOperationAction(Op, VT) == Legal ||
895 getOperationAction(Op, VT) == Promote);
898 /// Return true if the specified operation is legal on this target or can be
899 /// made legal with custom lowering or using promotion. This is used to help
900 /// guide high-level lowering decisions.
901 bool isOperationLegalOrCustomOrPromote(unsigned Op, EVT VT) const {
902 return (VT == MVT::Other || isTypeLegal(VT)) &&
903 (getOperationAction(Op, VT) == Legal ||
904 getOperationAction(Op, VT) == Custom ||
905 getOperationAction(Op, VT) == Promote);
908 /// Return true if the operation uses custom lowering, regardless of whether
909 /// the type is legal or not.
910 bool isOperationCustom(unsigned Op, EVT VT) const {
911 return getOperationAction(Op, VT) == Custom;
914 /// Return true if lowering to a jump table is allowed.
915 virtual bool areJTsAllowed(const Function *Fn) const {
916 if (Fn->getFnAttribute("no-jump-tables").getValueAsString() == "true")
919 return isOperationLegalOrCustom(ISD::BR_JT, MVT::Other) ||
920 isOperationLegalOrCustom(ISD::BRIND, MVT::Other);
923 /// Check whether the range [Low,High] fits in a machine word.
924 bool rangeFitsInWord(const APInt &Low, const APInt &High,
925 const DataLayout &DL) const {
926 // FIXME: Using the pointer type doesn't seem ideal.
927 uint64_t BW = DL.getIndexSizeInBits(0u);
928 uint64_t Range = (High - Low).getLimitedValue(UINT64_MAX - 1) + 1;
932 /// Return true if lowering to a jump table is suitable for a set of case
933 /// clusters which may contain \p NumCases cases, \p Range range of values.
934 /// FIXME: This function check the maximum table size and density, but the
935 /// minimum size is not checked. It would be nice if the minimum size is
936 /// also combined within this function. Currently, the minimum size check is
937 /// performed in findJumpTable() in SelectionDAGBuiler and
938 /// getEstimatedNumberOfCaseClusters() in BasicTTIImpl.
939 virtual bool isSuitableForJumpTable(const SwitchInst *SI, uint64_t NumCases,
940 uint64_t Range) const {
941 const bool OptForSize = SI->getParent()->getParent()->optForSize();
942 const unsigned MinDensity = getMinimumJumpTableDensity(OptForSize);
943 const unsigned MaxJumpTableSize =
944 OptForSize || getMaximumJumpTableSize() == 0
946 : getMaximumJumpTableSize();
947 // Check whether a range of clusters is dense enough for a jump table.
948 if (Range <= MaxJumpTableSize &&
949 (NumCases * 100 >= Range * MinDensity)) {
955 /// Return true if lowering to a bit test is suitable for a set of case
956 /// clusters which contains \p NumDests unique destinations, \p Low and
957 /// \p High as its lowest and highest case values, and expects \p NumCmps
958 /// case value comparisons. Check if the number of destinations, comparison
959 /// metric, and range are all suitable.
960 bool isSuitableForBitTests(unsigned NumDests, unsigned NumCmps,
961 const APInt &Low, const APInt &High,
962 const DataLayout &DL) const {
963 // FIXME: I don't think NumCmps is the correct metric: a single case and a
964 // range of cases both require only one branch to lower. Just looking at the
965 // number of clusters and destinations should be enough to decide whether to
968 // To lower a range with bit tests, the range must fit the bitwidth of a
970 if (!rangeFitsInWord(Low, High, DL))
973 // Decide whether it's profitable to lower this range with bit tests. Each
974 // destination requires a bit test and branch, and there is an overall range
975 // check branch. For a small number of clusters, separate comparisons might
976 // be cheaper, and for many destinations, splitting the range might be
978 return (NumDests == 1 && NumCmps >= 3) || (NumDests == 2 && NumCmps >= 5) ||
979 (NumDests == 3 && NumCmps >= 6);
982 /// Return true if the specified operation is illegal on this target or
983 /// unlikely to be made legal with custom lowering. This is used to help guide
984 /// high-level lowering decisions.
985 bool isOperationExpand(unsigned Op, EVT VT) const {
986 return (!isTypeLegal(VT) || getOperationAction(Op, VT) == Expand);
989 /// Return true if the specified operation is legal on this target.
990 bool isOperationLegal(unsigned Op, EVT VT) const {
991 return (VT == MVT::Other || isTypeLegal(VT)) &&
992 getOperationAction(Op, VT) == Legal;
995 /// Return how this load with extension should be treated: either it is legal,
996 /// needs to be promoted to a larger size, needs to be expanded to some other
997 /// code sequence, or the target has a custom expander for it.
998 LegalizeAction getLoadExtAction(unsigned ExtType, EVT ValVT,
1000 if (ValVT.isExtended() || MemVT.isExtended()) return Expand;
1001 unsigned ValI = (unsigned) ValVT.getSimpleVT().SimpleTy;
1002 unsigned MemI = (unsigned) MemVT.getSimpleVT().SimpleTy;
1003 assert(ExtType < ISD::LAST_LOADEXT_TYPE && ValI < MVT::LAST_VALUETYPE &&
1004 MemI < MVT::LAST_VALUETYPE && "Table isn't big enough!");
1005 unsigned Shift = 4 * ExtType;
1006 return (LegalizeAction)((LoadExtActions[ValI][MemI] >> Shift) & 0xf);
1009 /// Return true if the specified load with extension is legal on this target.
1010 bool isLoadExtLegal(unsigned ExtType, EVT ValVT, EVT MemVT) const {
1011 return getLoadExtAction(ExtType, ValVT, MemVT) == Legal;
1014 /// Return true if the specified load with extension is legal or custom
1016 bool isLoadExtLegalOrCustom(unsigned ExtType, EVT ValVT, EVT MemVT) const {
1017 return getLoadExtAction(ExtType, ValVT, MemVT) == Legal ||
1018 getLoadExtAction(ExtType, ValVT, MemVT) == Custom;
1021 /// Return how this store with truncation should be treated: either it is
1022 /// legal, needs to be promoted to a larger size, needs to be expanded to some
1023 /// other code sequence, or the target has a custom expander for it.
1024 LegalizeAction getTruncStoreAction(EVT ValVT, EVT MemVT) const {
1025 if (ValVT.isExtended() || MemVT.isExtended()) return Expand;
1026 unsigned ValI = (unsigned) ValVT.getSimpleVT().SimpleTy;
1027 unsigned MemI = (unsigned) MemVT.getSimpleVT().SimpleTy;
1028 assert(ValI < MVT::LAST_VALUETYPE && MemI < MVT::LAST_VALUETYPE &&
1029 "Table isn't big enough!");
1030 return TruncStoreActions[ValI][MemI];
1033 /// Return true if the specified store with truncation is legal on this
1035 bool isTruncStoreLegal(EVT ValVT, EVT MemVT) const {
1036 return isTypeLegal(ValVT) && getTruncStoreAction(ValVT, MemVT) == Legal;
1039 /// Return true if the specified store with truncation has solution on this
1041 bool isTruncStoreLegalOrCustom(EVT ValVT, EVT MemVT) const {
1042 return isTypeLegal(ValVT) &&
1043 (getTruncStoreAction(ValVT, MemVT) == Legal ||
1044 getTruncStoreAction(ValVT, MemVT) == Custom);
1047 /// Return how the indexed load should be treated: either it is legal, needs
1048 /// to be promoted to a larger size, needs to be expanded to some other code
1049 /// sequence, or the target has a custom expander for it.
1051 getIndexedLoadAction(unsigned IdxMode, MVT VT) const {
1052 assert(IdxMode < ISD::LAST_INDEXED_MODE && VT.isValid() &&
1053 "Table isn't big enough!");
1054 unsigned Ty = (unsigned)VT.SimpleTy;
1055 return (LegalizeAction)((IndexedModeActions[Ty][IdxMode] & 0xf0) >> 4);
1058 /// Return true if the specified indexed load is legal on this target.
1059 bool isIndexedLoadLegal(unsigned IdxMode, EVT VT) const {
1060 return VT.isSimple() &&
1061 (getIndexedLoadAction(IdxMode, VT.getSimpleVT()) == Legal ||
1062 getIndexedLoadAction(IdxMode, VT.getSimpleVT()) == Custom);
1065 /// Return how the indexed store should be treated: either it is legal, needs
1066 /// to be promoted to a larger size, needs to be expanded to some other code
1067 /// sequence, or the target has a custom expander for it.
1069 getIndexedStoreAction(unsigned IdxMode, MVT VT) const {
1070 assert(IdxMode < ISD::LAST_INDEXED_MODE && VT.isValid() &&
1071 "Table isn't big enough!");
1072 unsigned Ty = (unsigned)VT.SimpleTy;
1073 return (LegalizeAction)(IndexedModeActions[Ty][IdxMode] & 0x0f);
1076 /// Return true if the specified indexed load is legal on this target.
1077 bool isIndexedStoreLegal(unsigned IdxMode, EVT VT) const {
1078 return VT.isSimple() &&
1079 (getIndexedStoreAction(IdxMode, VT.getSimpleVT()) == Legal ||
1080 getIndexedStoreAction(IdxMode, VT.getSimpleVT()) == Custom);
1083 /// Return how the condition code should be treated: either it is legal, needs
1084 /// to be expanded to some other code sequence, or the target has a custom
1085 /// expander for it.
1087 getCondCodeAction(ISD::CondCode CC, MVT VT) const {
1088 assert((unsigned)CC < array_lengthof(CondCodeActions) &&
1089 ((unsigned)VT.SimpleTy >> 3) < array_lengthof(CondCodeActions[0]) &&
1090 "Table isn't big enough!");
1091 // See setCondCodeAction for how this is encoded.
1092 uint32_t Shift = 4 * (VT.SimpleTy & 0x7);
1093 uint32_t Value = CondCodeActions[CC][VT.SimpleTy >> 3];
1094 LegalizeAction Action = (LegalizeAction) ((Value >> Shift) & 0xF);
1095 assert(Action != Promote && "Can't promote condition code!");
1099 /// Return true if the specified condition code is legal on this target.
1100 bool isCondCodeLegal(ISD::CondCode CC, MVT VT) const {
1101 return getCondCodeAction(CC, VT) == Legal;
1104 /// Return true if the specified condition code is legal or custom on this
1106 bool isCondCodeLegalOrCustom(ISD::CondCode CC, MVT VT) const {
1107 return getCondCodeAction(CC, VT) == Legal ||
1108 getCondCodeAction(CC, VT) == Custom;
1111 /// If the action for this operation is to promote, this method returns the
1112 /// ValueType to promote to.
1113 MVT getTypeToPromoteTo(unsigned Op, MVT VT) const {
1114 assert(getOperationAction(Op, VT) == Promote &&
1115 "This operation isn't promoted!");
1117 // See if this has an explicit type specified.
1118 std::map<std::pair<unsigned, MVT::SimpleValueType>,
1119 MVT::SimpleValueType>::const_iterator PTTI =
1120 PromoteToType.find(std::make_pair(Op, VT.SimpleTy));
1121 if (PTTI != PromoteToType.end()) return PTTI->second;
1123 assert((VT.isInteger() || VT.isFloatingPoint()) &&
1124 "Cannot autopromote this type, add it with AddPromotedToType.");
1128 NVT = (MVT::SimpleValueType)(NVT.SimpleTy+1);
1129 assert(NVT.isInteger() == VT.isInteger() && NVT != MVT::isVoid &&
1130 "Didn't find type to promote to!");
1131 } while (!isTypeLegal(NVT) ||
1132 getOperationAction(Op, NVT) == Promote);
1136 /// Return the EVT corresponding to this LLVM type. This is fixed by the LLVM
1137 /// operations except for the pointer size. If AllowUnknown is true, this
1138 /// will return MVT::Other for types with no EVT counterpart (e.g. structs),
1139 /// otherwise it will assert.
1140 EVT getValueType(const DataLayout &DL, Type *Ty,
1141 bool AllowUnknown = false) const {
1142 // Lower scalar pointers to native pointer types.
1143 if (PointerType *PTy = dyn_cast<PointerType>(Ty))
1144 return getPointerTy(DL, PTy->getAddressSpace());
1146 if (Ty->isVectorTy()) {
1147 VectorType *VTy = cast<VectorType>(Ty);
1148 Type *Elm = VTy->getElementType();
1149 // Lower vectors of pointers to native pointer types.
1150 if (PointerType *PT = dyn_cast<PointerType>(Elm)) {
1151 EVT PointerTy(getPointerTy(DL, PT->getAddressSpace()));
1152 Elm = PointerTy.getTypeForEVT(Ty->getContext());
1155 return EVT::getVectorVT(Ty->getContext(), EVT::getEVT(Elm, false),
1156 VTy->getNumElements());
1158 return EVT::getEVT(Ty, AllowUnknown);
1161 /// Return the MVT corresponding to this LLVM type. See getValueType.
1162 MVT getSimpleValueType(const DataLayout &DL, Type *Ty,
1163 bool AllowUnknown = false) const {
1164 return getValueType(DL, Ty, AllowUnknown).getSimpleVT();
1167 /// Return the desired alignment for ByVal or InAlloca aggregate function
1168 /// arguments in the caller parameter area. This is the actual alignment, not
1170 virtual unsigned getByValTypeAlignment(Type *Ty, const DataLayout &DL) const;
1172 /// Return the type of registers that this ValueType will eventually require.
1173 MVT getRegisterType(MVT VT) const {
1174 assert((unsigned)VT.SimpleTy < array_lengthof(RegisterTypeForVT));
1175 return RegisterTypeForVT[VT.SimpleTy];
1178 /// Return the type of registers that this ValueType will eventually require.
1179 MVT getRegisterType(LLVMContext &Context, EVT VT) const {
1180 if (VT.isSimple()) {
1181 assert((unsigned)VT.getSimpleVT().SimpleTy <
1182 array_lengthof(RegisterTypeForVT));
1183 return RegisterTypeForVT[VT.getSimpleVT().SimpleTy];
1185 if (VT.isVector()) {
1188 unsigned NumIntermediates;
1189 (void)getVectorTypeBreakdown(Context, VT, VT1,
1190 NumIntermediates, RegisterVT);
1193 if (VT.isInteger()) {
1194 return getRegisterType(Context, getTypeToTransformTo(Context, VT));
1196 llvm_unreachable("Unsupported extended type!");
1199 /// Return the number of registers that this ValueType will eventually
1202 /// This is one for any types promoted to live in larger registers, but may be
1203 /// more than one for types (like i64) that are split into pieces. For types
1204 /// like i140, which are first promoted then expanded, it is the number of
1205 /// registers needed to hold all the bits of the original type. For an i140
1206 /// on a 32 bit machine this means 5 registers.
1207 unsigned getNumRegisters(LLVMContext &Context, EVT VT) const {
1208 if (VT.isSimple()) {
1209 assert((unsigned)VT.getSimpleVT().SimpleTy <
1210 array_lengthof(NumRegistersForVT));
1211 return NumRegistersForVT[VT.getSimpleVT().SimpleTy];
1213 if (VT.isVector()) {
1216 unsigned NumIntermediates;
1217 return getVectorTypeBreakdown(Context, VT, VT1, NumIntermediates, VT2);
1219 if (VT.isInteger()) {
1220 unsigned BitWidth = VT.getSizeInBits();
1221 unsigned RegWidth = getRegisterType(Context, VT).getSizeInBits();
1222 return (BitWidth + RegWidth - 1) / RegWidth;
1224 llvm_unreachable("Unsupported extended type!");
1227 /// Certain combinations of ABIs, Targets and features require that types
1228 /// are legal for some operations and not for other operations.
1229 /// For MIPS all vector types must be passed through the integer register set.
1230 virtual MVT getRegisterTypeForCallingConv(LLVMContext &Context,
1231 CallingConv::ID CC, EVT VT) const {
1232 return getRegisterType(Context, VT);
1235 /// Certain targets require unusual breakdowns of certain types. For MIPS,
1236 /// this occurs when a vector type is used, as vector are passed through the
1237 /// integer register set.
1238 virtual unsigned getNumRegistersForCallingConv(LLVMContext &Context,
1241 return getNumRegisters(Context, VT);
1244 /// Certain targets have context senstive alignment requirements, where one
1245 /// type has the alignment requirement of another type.
1246 virtual unsigned getABIAlignmentForCallingConv(Type *ArgTy,
1247 DataLayout DL) const {
1248 return DL.getABITypeAlignment(ArgTy);
1251 /// If true, then instruction selection should seek to shrink the FP constant
1252 /// of the specified type to a smaller type in order to save space and / or
1254 virtual bool ShouldShrinkFPConstant(EVT) const { return true; }
1256 /// Return true if it is profitable to reduce a load to a smaller type.
1257 /// Example: (i16 (trunc (i32 (load x))) -> i16 load x
1258 virtual bool shouldReduceLoadWidth(SDNode *Load, ISD::LoadExtType ExtTy,
1260 // By default, assume that it is cheaper to extract a subvector from a wide
1261 // vector load rather than creating multiple narrow vector loads.
1262 if (NewVT.isVector() && !Load->hasOneUse())
1268 /// When splitting a value of the specified type into parts, does the Lo
1269 /// or Hi part come first? This usually follows the endianness, except
1270 /// for ppcf128, where the Hi part always comes first.
1271 bool hasBigEndianPartOrdering(EVT VT, const DataLayout &DL) const {
1272 return DL.isBigEndian() || VT == MVT::ppcf128;
1275 /// If true, the target has custom DAG combine transformations that it can
1276 /// perform for the specified node.
1277 bool hasTargetDAGCombine(ISD::NodeType NT) const {
1278 assert(unsigned(NT >> 3) < array_lengthof(TargetDAGCombineArray));
1279 return TargetDAGCombineArray[NT >> 3] & (1 << (NT&7));
1282 unsigned getGatherAllAliasesMaxDepth() const {
1283 return GatherAllAliasesMaxDepth;
1286 /// Returns the size of the platform's va_list object.
1287 virtual unsigned getVaListSizeInBits(const DataLayout &DL) const {
1288 return getPointerTy(DL).getSizeInBits();
1291 /// Get maximum # of store operations permitted for llvm.memset
1293 /// This function returns the maximum number of store operations permitted
1294 /// to replace a call to llvm.memset. The value is set by the target at the
1295 /// performance threshold for such a replacement. If OptSize is true,
1296 /// return the limit for functions that have OptSize attribute.
1297 unsigned getMaxStoresPerMemset(bool OptSize) const {
1298 return OptSize ? MaxStoresPerMemsetOptSize : MaxStoresPerMemset;
1301 /// Get maximum # of store operations permitted for llvm.memcpy
1303 /// This function returns the maximum number of store operations permitted
1304 /// to replace a call to llvm.memcpy. The value is set by the target at the
1305 /// performance threshold for such a replacement. If OptSize is true,
1306 /// return the limit for functions that have OptSize attribute.
1307 unsigned getMaxStoresPerMemcpy(bool OptSize) const {
1308 return OptSize ? MaxStoresPerMemcpyOptSize : MaxStoresPerMemcpy;
1311 /// \brief Get maximum # of store operations to be glued together
1313 /// This function returns the maximum number of store operations permitted
1314 /// to glue together during lowering of llvm.memcpy. The value is set by
1315 // the target at the performance threshold for such a replacement.
1316 virtual unsigned getMaxGluedStoresPerMemcpy() const {
1317 return MaxGluedStoresPerMemcpy;
1320 /// Get maximum # of load operations permitted for memcmp
1322 /// This function returns the maximum number of load operations permitted
1323 /// to replace a call to memcmp. The value is set by the target at the
1324 /// performance threshold for such a replacement. If OptSize is true,
1325 /// return the limit for functions that have OptSize attribute.
1326 unsigned getMaxExpandSizeMemcmp(bool OptSize) const {
1327 return OptSize ? MaxLoadsPerMemcmpOptSize : MaxLoadsPerMemcmp;
1330 /// For memcmp expansion when the memcmp result is only compared equal or
1331 /// not-equal to 0, allow up to this number of load pairs per block. As an
1332 /// example, this may allow 'memcmp(a, b, 3) == 0' in a single block:
1333 /// a0 = load2bytes &a[0]
1334 /// b0 = load2bytes &b[0]
1335 /// a2 = load1byte &a[2]
1336 /// b2 = load1byte &b[2]
1337 /// r = cmp eq (a0 ^ b0 | a2 ^ b2), 0
1338 virtual unsigned getMemcmpEqZeroLoadsPerBlock() const {
1342 /// Get maximum # of store operations permitted for llvm.memmove
1344 /// This function returns the maximum number of store operations permitted
1345 /// to replace a call to llvm.memmove. The value is set by the target at the
1346 /// performance threshold for such a replacement. If OptSize is true,
1347 /// return the limit for functions that have OptSize attribute.
1348 unsigned getMaxStoresPerMemmove(bool OptSize) const {
1349 return OptSize ? MaxStoresPerMemmoveOptSize : MaxStoresPerMemmove;
1352 /// Determine if the target supports unaligned memory accesses.
1354 /// This function returns true if the target allows unaligned memory accesses
1355 /// of the specified type in the given address space. If true, it also returns
1356 /// whether the unaligned memory access is "fast" in the last argument by
1357 /// reference. This is used, for example, in situations where an array
1358 /// copy/move/set is converted to a sequence of store operations. Its use
1359 /// helps to ensure that such replacements don't generate code that causes an
1360 /// alignment error (trap) on the target machine.
1361 virtual bool allowsMisalignedMemoryAccesses(EVT,
1362 unsigned AddrSpace = 0,
1364 bool * /*Fast*/ = nullptr) const {
1368 /// Return true if the target supports a memory access of this type for the
1369 /// given address space and alignment. If the access is allowed, the optional
1370 /// final parameter returns if the access is also fast (as defined by the
1372 bool allowsMemoryAccess(LLVMContext &Context, const DataLayout &DL, EVT VT,
1373 unsigned AddrSpace = 0, unsigned Alignment = 1,
1374 bool *Fast = nullptr) const;
1376 /// Returns the target specific optimal type for load and store operations as
1377 /// a result of memset, memcpy, and memmove lowering.
1379 /// If DstAlign is zero that means it's safe to destination alignment can
1380 /// satisfy any constraint. Similarly if SrcAlign is zero it means there isn't
1381 /// a need to check it against alignment requirement, probably because the
1382 /// source does not need to be loaded. If 'IsMemset' is true, that means it's
1383 /// expanding a memset. If 'ZeroMemset' is true, that means it's a memset of
1384 /// zero. 'MemcpyStrSrc' indicates whether the memcpy source is constant so it
1385 /// does not need to be loaded. It returns EVT::Other if the type should be
1386 /// determined using generic target-independent logic.
1387 virtual EVT getOptimalMemOpType(uint64_t /*Size*/,
1388 unsigned /*DstAlign*/, unsigned /*SrcAlign*/,
1390 bool /*ZeroMemset*/,
1391 bool /*MemcpyStrSrc*/,
1392 MachineFunction &/*MF*/) const {
1396 /// Returns true if it's safe to use load / store of the specified type to
1397 /// expand memcpy / memset inline.
1399 /// This is mostly true for all types except for some special cases. For
1400 /// example, on X86 targets without SSE2 f64 load / store are done with fldl /
1401 /// fstpl which also does type conversion. Note the specified type doesn't
1402 /// have to be legal as the hook is used before type legalization.
1403 virtual bool isSafeMemOpType(MVT /*VT*/) const { return true; }
1405 /// Determine if we should use _setjmp or setjmp to implement llvm.setjmp.
1406 bool usesUnderscoreSetJmp() const {
1407 return UseUnderscoreSetJmp;
1410 /// Determine if we should use _longjmp or longjmp to implement llvm.longjmp.
1411 bool usesUnderscoreLongJmp() const {
1412 return UseUnderscoreLongJmp;
1415 /// Return lower limit for number of blocks in a jump table.
1416 virtual unsigned getMinimumJumpTableEntries() const;
1418 /// Return lower limit of the density in a jump table.
1419 unsigned getMinimumJumpTableDensity(bool OptForSize) const;
1421 /// Return upper limit for number of entries in a jump table.
1422 /// Zero if no limit.
1423 unsigned getMaximumJumpTableSize() const;
1425 virtual bool isJumpTableRelative() const {
1426 return TM.isPositionIndependent();
1429 /// If a physical register, this specifies the register that
1430 /// llvm.savestack/llvm.restorestack should save and restore.
1431 unsigned getStackPointerRegisterToSaveRestore() const {
1432 return StackPointerRegisterToSaveRestore;
1435 /// If a physical register, this returns the register that receives the
1436 /// exception address on entry to an EH pad.
1438 getExceptionPointerRegister(const Constant *PersonalityFn) const {
1439 // 0 is guaranteed to be the NoRegister value on all targets
1443 /// If a physical register, this returns the register that receives the
1444 /// exception typeid on entry to a landing pad.
1446 getExceptionSelectorRegister(const Constant *PersonalityFn) const {
1447 // 0 is guaranteed to be the NoRegister value on all targets
1451 virtual bool needsFixedCatchObjects() const {
1452 report_fatal_error("Funclet EH is not implemented for this target");
1455 /// Returns the target's jmp_buf size in bytes (if never set, the default is
1457 unsigned getJumpBufSize() const {
1461 /// Returns the target's jmp_buf alignment in bytes (if never set, the default
1463 unsigned getJumpBufAlignment() const {
1464 return JumpBufAlignment;
1467 /// Return the minimum stack alignment of an argument.
1468 unsigned getMinStackArgumentAlignment() const {
1469 return MinStackArgumentAlignment;
1472 /// Return the minimum function alignment.
1473 unsigned getMinFunctionAlignment() const {
1474 return MinFunctionAlignment;
1477 /// Return the preferred function alignment.
1478 unsigned getPrefFunctionAlignment() const {
1479 return PrefFunctionAlignment;
1482 /// Return the preferred loop alignment.
1483 virtual unsigned getPrefLoopAlignment(MachineLoop *ML = nullptr) const {
1484 return PrefLoopAlignment;
1487 /// Should loops be aligned even when the function is marked OptSize (but not
1489 virtual bool alignLoopsWithOptSize() const {
1493 /// If the target has a standard location for the stack protector guard,
1494 /// returns the address of that location. Otherwise, returns nullptr.
1495 /// DEPRECATED: please override useLoadStackGuardNode and customize
1496 /// LOAD_STACK_GUARD, or customize \@llvm.stackguard().
1497 virtual Value *getIRStackGuard(IRBuilder<> &IRB) const;
1499 /// Inserts necessary declarations for SSP (stack protection) purpose.
1500 /// Should be used only when getIRStackGuard returns nullptr.
1501 virtual void insertSSPDeclarations(Module &M) const;
1503 /// Return the variable that's previously inserted by insertSSPDeclarations,
1504 /// if any, otherwise return nullptr. Should be used only when
1505 /// getIRStackGuard returns nullptr.
1506 virtual Value *getSDagStackGuard(const Module &M) const;
1508 /// If this function returns true, stack protection checks should XOR the
1509 /// frame pointer (or whichever pointer is used to address locals) into the
1510 /// stack guard value before checking it. getIRStackGuard must return nullptr
1511 /// if this returns true.
1512 virtual bool useStackGuardXorFP() const { return false; }
1514 /// If the target has a standard stack protection check function that
1515 /// performs validation and error handling, returns the function. Otherwise,
1516 /// returns nullptr. Must be previously inserted by insertSSPDeclarations.
1517 /// Should be used only when getIRStackGuard returns nullptr.
1518 virtual Value *getSSPStackGuardCheck(const Module &M) const;
1521 Value *getDefaultSafeStackPointerLocation(IRBuilder<> &IRB,
1525 /// Returns the target-specific address of the unsafe stack pointer.
1526 virtual Value *getSafeStackPointerLocation(IRBuilder<> &IRB) const;
1528 /// Returns the name of the symbol used to emit stack probes or the empty
1529 /// string if not applicable.
1530 virtual StringRef getStackProbeSymbolName(MachineFunction &MF) const {
1534 /// Returns true if a cast between SrcAS and DestAS is a noop.
1535 virtual bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const {
1539 /// Returns true if a cast from SrcAS to DestAS is "cheap", such that e.g. we
1540 /// are happy to sink it into basic blocks.
1541 virtual bool isCheapAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const {
1542 return isNoopAddrSpaceCast(SrcAS, DestAS);
1545 /// Return true if the pointer arguments to CI should be aligned by aligning
1546 /// the object whose address is being passed. If so then MinSize is set to the
1547 /// minimum size the object must be to be aligned and PrefAlign is set to the
1548 /// preferred alignment.
1549 virtual bool shouldAlignPointerArgs(CallInst * /*CI*/, unsigned & /*MinSize*/,
1550 unsigned & /*PrefAlign*/) const {
1554 //===--------------------------------------------------------------------===//
1555 /// \name Helpers for TargetTransformInfo implementations
1558 /// Get the ISD node that corresponds to the Instruction class opcode.
1559 int InstructionOpcodeToISD(unsigned Opcode) const;
1561 /// Estimate the cost of type-legalization and the legalized type.
1562 std::pair<int, MVT> getTypeLegalizationCost(const DataLayout &DL,
1567 //===--------------------------------------------------------------------===//
1568 /// \name Helpers for atomic expansion.
1571 /// Returns the maximum atomic operation size (in bits) supported by
1572 /// the backend. Atomic operations greater than this size (as well
1573 /// as ones that are not naturally aligned), will be expanded by
1574 /// AtomicExpandPass into an __atomic_* library call.
1575 unsigned getMaxAtomicSizeInBitsSupported() const {
1576 return MaxAtomicSizeInBitsSupported;
1579 /// Returns the size of the smallest cmpxchg or ll/sc instruction
1580 /// the backend supports. Any smaller operations are widened in
1581 /// AtomicExpandPass.
1583 /// Note that *unlike* operations above the maximum size, atomic ops
1584 /// are still natively supported below the minimum; they just
1585 /// require a more complex expansion.
1586 unsigned getMinCmpXchgSizeInBits() const { return MinCmpXchgSizeInBits; }
1588 /// Whether the target supports unaligned atomic operations.
1589 bool supportsUnalignedAtomics() const { return SupportsUnalignedAtomics; }
1591 /// Whether AtomicExpandPass should automatically insert fences and reduce
1592 /// ordering for this atomic. This should be true for most architectures with
1593 /// weak memory ordering. Defaults to false.
1594 virtual bool shouldInsertFencesForAtomic(const Instruction *I) const {
1598 /// Perform a load-linked operation on Addr, returning a "Value *" with the
1599 /// corresponding pointee type. This may entail some non-trivial operations to
1600 /// truncate or reconstruct types that will be illegal in the backend. See
1601 /// ARMISelLowering for an example implementation.
1602 virtual Value *emitLoadLinked(IRBuilder<> &Builder, Value *Addr,
1603 AtomicOrdering Ord) const {
1604 llvm_unreachable("Load linked unimplemented on this target");
1607 /// Perform a store-conditional operation to Addr. Return the status of the
1608 /// store. This should be 0 if the store succeeded, non-zero otherwise.
1609 virtual Value *emitStoreConditional(IRBuilder<> &Builder, Value *Val,
1610 Value *Addr, AtomicOrdering Ord) const {
1611 llvm_unreachable("Store conditional unimplemented on this target");
1614 /// Perform a masked atomicrmw using a target-specific intrinsic. This
1615 /// represents the core LL/SC loop which will be lowered at a late stage by
1617 virtual Value *emitMaskedAtomicRMWIntrinsic(IRBuilder<> &Builder,
1619 Value *AlignedAddr, Value *Incr,
1620 Value *Mask, Value *ShiftAmt,
1621 AtomicOrdering Ord) const {
1622 llvm_unreachable("Masked atomicrmw expansion unimplemented on this target");
1625 /// Perform a masked cmpxchg using a target-specific intrinsic. This
1626 /// represents the core LL/SC loop which will be lowered at a late stage by
1628 virtual Value *emitMaskedAtomicCmpXchgIntrinsic(
1629 IRBuilder<> &Builder, AtomicCmpXchgInst *CI, Value *AlignedAddr,
1630 Value *CmpVal, Value *NewVal, Value *Mask, AtomicOrdering Ord) const {
1631 llvm_unreachable("Masked cmpxchg expansion unimplemented on this target");
1634 /// Inserts in the IR a target-specific intrinsic specifying a fence.
1635 /// It is called by AtomicExpandPass before expanding an
1636 /// AtomicRMW/AtomicCmpXchg/AtomicStore/AtomicLoad
1637 /// if shouldInsertFencesForAtomic returns true.
1639 /// Inst is the original atomic instruction, prior to other expansions that
1640 /// may be performed.
1642 /// This function should either return a nullptr, or a pointer to an IR-level
1643 /// Instruction*. Even complex fence sequences can be represented by a
1644 /// single Instruction* through an intrinsic to be lowered later.
1645 /// Backends should override this method to produce target-specific intrinsic
1646 /// for their fences.
1647 /// FIXME: Please note that the default implementation here in terms of
1648 /// IR-level fences exists for historical/compatibility reasons and is
1649 /// *unsound* ! Fences cannot, in general, be used to restore sequential
1650 /// consistency. For example, consider the following example:
1651 /// atomic<int> x = y = 0;
1652 /// int r1, r2, r3, r4;
1663 /// r1 = r3 = 1 and r2 = r4 = 0 is impossible as long as the accesses are all
1664 /// seq_cst. But if they are lowered to monotonic accesses, no amount of
1665 /// IR-level fences can prevent it.
1667 virtual Instruction *emitLeadingFence(IRBuilder<> &Builder, Instruction *Inst,
1668 AtomicOrdering Ord) const {
1669 if (isReleaseOrStronger(Ord) && Inst->hasAtomicStore())
1670 return Builder.CreateFence(Ord);
1675 virtual Instruction *emitTrailingFence(IRBuilder<> &Builder,
1677 AtomicOrdering Ord) const {
1678 if (isAcquireOrStronger(Ord))
1679 return Builder.CreateFence(Ord);
1685 // Emits code that executes when the comparison result in the ll/sc
1686 // expansion of a cmpxchg instruction is such that the store-conditional will
1687 // not execute. This makes it possible to balance out the load-linked with
1688 // a dedicated instruction, if desired.
1689 // E.g., on ARM, if ldrex isn't followed by strex, the exclusive monitor would
1690 // be unnecessarily held, except if clrex, inserted by this hook, is executed.
1691 virtual void emitAtomicCmpXchgNoStoreLLBalance(IRBuilder<> &Builder) const {}
1693 /// Returns true if the given (atomic) store should be expanded by the
1694 /// IR-level AtomicExpand pass into an "atomic xchg" which ignores its input.
1695 virtual bool shouldExpandAtomicStoreInIR(StoreInst *SI) const {
1699 /// Returns true if arguments should be sign-extended in lib calls.
1700 virtual bool shouldSignExtendTypeInLibCall(EVT Type, bool IsSigned) const {
1704 /// Returns how the given (atomic) load should be expanded by the
1705 /// IR-level AtomicExpand pass.
1706 virtual AtomicExpansionKind shouldExpandAtomicLoadInIR(LoadInst *LI) const {
1707 return AtomicExpansionKind::None;
1710 /// Returns how the given atomic cmpxchg should be expanded by the IR-level
1711 /// AtomicExpand pass.
1712 virtual AtomicExpansionKind
1713 shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const {
1714 return AtomicExpansionKind::None;
1717 /// Returns how the IR-level AtomicExpand pass should expand the given
1718 /// AtomicRMW, if at all. Default is to never expand.
1719 virtual AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *) const {
1720 return AtomicExpansionKind::None;
1723 /// On some platforms, an AtomicRMW that never actually modifies the value
1724 /// (such as fetch_add of 0) can be turned into a fence followed by an
1725 /// atomic load. This may sound useless, but it makes it possible for the
1726 /// processor to keep the cacheline shared, dramatically improving
1727 /// performance. And such idempotent RMWs are useful for implementing some
1728 /// kinds of locks, see for example (justification + benchmarks):
1729 /// http://www.hpl.hp.com/techreports/2012/HPL-2012-68.pdf
1730 /// This method tries doing that transformation, returning the atomic load if
1731 /// it succeeds, and nullptr otherwise.
1732 /// If shouldExpandAtomicLoadInIR returns true on that load, it will undergo
1733 /// another round of expansion.
1735 lowerIdempotentRMWIntoFencedLoad(AtomicRMWInst *RMWI) const {
1739 /// Returns how the platform's atomic operations are extended (ZERO_EXTEND,
1740 /// SIGN_EXTEND, or ANY_EXTEND).
1741 virtual ISD::NodeType getExtendForAtomicOps() const {
1742 return ISD::ZERO_EXTEND;
1747 /// Returns true if we should normalize
1748 /// select(N0&N1, X, Y) => select(N0, select(N1, X, Y), Y) and
1749 /// select(N0|N1, X, Y) => select(N0, select(N1, X, Y, Y)) if it is likely
1750 /// that it saves us from materializing N0 and N1 in an integer register.
1751 /// Targets that are able to perform and/or on flags should return false here.
1752 virtual bool shouldNormalizeToSelectSequence(LLVMContext &Context,
1754 // If a target has multiple condition registers, then it likely has logical
1755 // operations on those registers.
1756 if (hasMultipleConditionRegisters())
1758 // Only do the transform if the value won't be split into multiple
1760 LegalizeTypeAction Action = getTypeAction(Context, VT);
1761 return Action != TypeExpandInteger && Action != TypeExpandFloat &&
1762 Action != TypeSplitVector;
1765 /// Return true if a select of constants (select Cond, C1, C2) should be
1766 /// transformed into simple math ops with the condition value. For example:
1767 /// select Cond, C1, C1-1 --> add (zext Cond), C1-1
1768 virtual bool convertSelectOfConstantsToMath(EVT VT) const {
1772 /// Return true if it is profitable to transform an integer
1773 /// multiplication-by-constant into simpler operations like shifts and adds.
1774 /// This may be true if the target does not directly support the
1775 /// multiplication operation for the specified type or the sequence of simpler
1776 /// ops is faster than the multiply.
1777 virtual bool decomposeMulByConstant(EVT VT, SDValue C) const {
1781 /// Return true if it is more correct/profitable to use strict FP_TO_INT
1782 /// conversion operations - canonicalizing the FP source value instead of
1783 /// converting all cases and then selecting based on value.
1784 /// This may be true if the target throws exceptions for out of bounds
1785 /// conversions or has fast FP CMOV.
1786 virtual bool shouldUseStrictFP_TO_INT(EVT FpVT, EVT IntVT,
1787 bool IsSigned) const {
1791 //===--------------------------------------------------------------------===//
1792 // TargetLowering Configuration Methods - These methods should be invoked by
1793 // the derived class constructor to configure this object for the target.
1796 /// Specify how the target extends the result of integer and floating point
1797 /// boolean values from i1 to a wider type. See getBooleanContents.
1798 void setBooleanContents(BooleanContent Ty) {
1799 BooleanContents = Ty;
1800 BooleanFloatContents = Ty;
1803 /// Specify how the target extends the result of integer and floating point
1804 /// boolean values from i1 to a wider type. See getBooleanContents.
1805 void setBooleanContents(BooleanContent IntTy, BooleanContent FloatTy) {
1806 BooleanContents = IntTy;
1807 BooleanFloatContents = FloatTy;
1810 /// Specify how the target extends the result of a vector boolean value from a
1811 /// vector of i1 to a wider type. See getBooleanContents.
1812 void setBooleanVectorContents(BooleanContent Ty) {
1813 BooleanVectorContents = Ty;
1816 /// Specify the target scheduling preference.
1817 void setSchedulingPreference(Sched::Preference Pref) {
1818 SchedPreferenceInfo = Pref;
1821 /// Indicate whether this target prefers to use _setjmp to implement
1822 /// llvm.setjmp or the version without _. Defaults to false.
1823 void setUseUnderscoreSetJmp(bool Val) {
1824 UseUnderscoreSetJmp = Val;
1827 /// Indicate whether this target prefers to use _longjmp to implement
1828 /// llvm.longjmp or the version without _. Defaults to false.
1829 void setUseUnderscoreLongJmp(bool Val) {
1830 UseUnderscoreLongJmp = Val;
1833 /// Indicate the minimum number of blocks to generate jump tables.
1834 void setMinimumJumpTableEntries(unsigned Val);
1836 /// Indicate the maximum number of entries in jump tables.
1837 /// Set to zero to generate unlimited jump tables.
1838 void setMaximumJumpTableSize(unsigned);
1840 /// If set to a physical register, this specifies the register that
1841 /// llvm.savestack/llvm.restorestack should save and restore.
1842 void setStackPointerRegisterToSaveRestore(unsigned R) {
1843 StackPointerRegisterToSaveRestore = R;
1846 /// Tells the code generator that the target has multiple (allocatable)
1847 /// condition registers that can be used to store the results of comparisons
1848 /// for use by selects and conditional branches. With multiple condition
1849 /// registers, the code generator will not aggressively sink comparisons into
1850 /// the blocks of their users.
1851 void setHasMultipleConditionRegisters(bool hasManyRegs = true) {
1852 HasMultipleConditionRegisters = hasManyRegs;
1855 /// Tells the code generator that the target has BitExtract instructions.
1856 /// The code generator will aggressively sink "shift"s into the blocks of
1857 /// their users if the users will generate "and" instructions which can be
1858 /// combined with "shift" to BitExtract instructions.
1859 void setHasExtractBitsInsn(bool hasExtractInsn = true) {
1860 HasExtractBitsInsn = hasExtractInsn;
1863 /// Tells the code generator not to expand logic operations on comparison
1864 /// predicates into separate sequences that increase the amount of flow
1866 void setJumpIsExpensive(bool isExpensive = true);
1868 /// Tells the code generator that this target supports floating point
1869 /// exceptions and cares about preserving floating point exception behavior.
1870 void setHasFloatingPointExceptions(bool FPExceptions = true) {
1871 HasFloatingPointExceptions = FPExceptions;
1874 /// Tells the code generator which bitwidths to bypass.
1875 void addBypassSlowDiv(unsigned int SlowBitWidth, unsigned int FastBitWidth) {
1876 BypassSlowDivWidths[SlowBitWidth] = FastBitWidth;
1879 /// Add the specified register class as an available regclass for the
1880 /// specified value type. This indicates the selector can handle values of
1881 /// that class natively.
1882 void addRegisterClass(MVT VT, const TargetRegisterClass *RC) {
1883 assert((unsigned)VT.SimpleTy < array_lengthof(RegClassForVT));
1884 RegClassForVT[VT.SimpleTy] = RC;
1887 /// Return the largest legal super-reg register class of the register class
1888 /// for the specified type and its associated "cost".
1889 virtual std::pair<const TargetRegisterClass *, uint8_t>
1890 findRepresentativeClass(const TargetRegisterInfo *TRI, MVT VT) const;
1892 /// Once all of the register classes are added, this allows us to compute
1893 /// derived properties we expose.
1894 void computeRegisterProperties(const TargetRegisterInfo *TRI);
1896 /// Indicate that the specified operation does not work with the specified
1897 /// type and indicate what to do about it. Note that VT may refer to either
1898 /// the type of a result or that of an operand of Op.
1899 void setOperationAction(unsigned Op, MVT VT,
1900 LegalizeAction Action) {
1901 assert(Op < array_lengthof(OpActions[0]) && "Table isn't big enough!");
1902 OpActions[(unsigned)VT.SimpleTy][Op] = Action;
1905 /// Indicate that the specified load with extension does not work with the
1906 /// specified type and indicate what to do about it.
1907 void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT,
1908 LegalizeAction Action) {
1909 assert(ExtType < ISD::LAST_LOADEXT_TYPE && ValVT.isValid() &&
1910 MemVT.isValid() && "Table isn't big enough!");
1911 assert((unsigned)Action < 0x10 && "too many bits for bitfield array");
1912 unsigned Shift = 4 * ExtType;
1913 LoadExtActions[ValVT.SimpleTy][MemVT.SimpleTy] &= ~((uint16_t)0xF << Shift);
1914 LoadExtActions[ValVT.SimpleTy][MemVT.SimpleTy] |= (uint16_t)Action << Shift;
1917 /// Indicate that the specified truncating store does not work with the
1918 /// specified type and indicate what to do about it.
1919 void setTruncStoreAction(MVT ValVT, MVT MemVT,
1920 LegalizeAction Action) {
1921 assert(ValVT.isValid() && MemVT.isValid() && "Table isn't big enough!");
1922 TruncStoreActions[(unsigned)ValVT.SimpleTy][MemVT.SimpleTy] = Action;
1925 /// Indicate that the specified indexed load does or does not work with the
1926 /// specified type and indicate what to do abort it.
1928 /// NOTE: All indexed mode loads are initialized to Expand in
1929 /// TargetLowering.cpp
1930 void setIndexedLoadAction(unsigned IdxMode, MVT VT,
1931 LegalizeAction Action) {
1932 assert(VT.isValid() && IdxMode < ISD::LAST_INDEXED_MODE &&
1933 (unsigned)Action < 0xf && "Table isn't big enough!");
1934 // Load action are kept in the upper half.
1935 IndexedModeActions[(unsigned)VT.SimpleTy][IdxMode] &= ~0xf0;
1936 IndexedModeActions[(unsigned)VT.SimpleTy][IdxMode] |= ((uint8_t)Action) <<4;
1939 /// Indicate that the specified indexed store does or does not work with the
1940 /// specified type and indicate what to do about it.
1942 /// NOTE: All indexed mode stores are initialized to Expand in
1943 /// TargetLowering.cpp
1944 void setIndexedStoreAction(unsigned IdxMode, MVT VT,
1945 LegalizeAction Action) {
1946 assert(VT.isValid() && IdxMode < ISD::LAST_INDEXED_MODE &&
1947 (unsigned)Action < 0xf && "Table isn't big enough!");
1948 // Store action are kept in the lower half.
1949 IndexedModeActions[(unsigned)VT.SimpleTy][IdxMode] &= ~0x0f;
1950 IndexedModeActions[(unsigned)VT.SimpleTy][IdxMode] |= ((uint8_t)Action);
1953 /// Indicate that the specified condition code is or isn't supported on the
1954 /// target and indicate what to do about it.
1955 void setCondCodeAction(ISD::CondCode CC, MVT VT,
1956 LegalizeAction Action) {
1957 assert(VT.isValid() && (unsigned)CC < array_lengthof(CondCodeActions) &&
1958 "Table isn't big enough!");
1959 assert((unsigned)Action < 0x10 && "too many bits for bitfield array");
1960 /// The lower 3 bits of the SimpleTy index into Nth 4bit set from the 32-bit
1961 /// value and the upper 29 bits index into the second dimension of the array
1962 /// to select what 32-bit value to use.
1963 uint32_t Shift = 4 * (VT.SimpleTy & 0x7);
1964 CondCodeActions[CC][VT.SimpleTy >> 3] &= ~((uint32_t)0xF << Shift);
1965 CondCodeActions[CC][VT.SimpleTy >> 3] |= (uint32_t)Action << Shift;
1968 /// If Opc/OrigVT is specified as being promoted, the promotion code defaults
1969 /// to trying a larger integer/fp until it can find one that works. If that
1970 /// default is insufficient, this method can be used by the target to override
1972 void AddPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT) {
1973 PromoteToType[std::make_pair(Opc, OrigVT.SimpleTy)] = DestVT.SimpleTy;
1976 /// Convenience method to set an operation to Promote and specify the type
1977 /// in a single call.
1978 void setOperationPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT) {
1979 setOperationAction(Opc, OrigVT, Promote);
1980 AddPromotedToType(Opc, OrigVT, DestVT);
1983 /// Targets should invoke this method for each target independent node that
1984 /// they want to provide a custom DAG combiner for by implementing the
1985 /// PerformDAGCombine virtual method.
1986 void setTargetDAGCombine(ISD::NodeType NT) {
1987 assert(unsigned(NT >> 3) < array_lengthof(TargetDAGCombineArray));
1988 TargetDAGCombineArray[NT >> 3] |= 1 << (NT&7);
1991 /// Set the target's required jmp_buf buffer size (in bytes); default is 200
1992 void setJumpBufSize(unsigned Size) {
1996 /// Set the target's required jmp_buf buffer alignment (in bytes); default is
1998 void setJumpBufAlignment(unsigned Align) {
1999 JumpBufAlignment = Align;
2002 /// Set the target's minimum function alignment (in log2(bytes))
2003 void setMinFunctionAlignment(unsigned Align) {
2004 MinFunctionAlignment = Align;
2007 /// Set the target's preferred function alignment. This should be set if
2008 /// there is a performance benefit to higher-than-minimum alignment (in
2010 void setPrefFunctionAlignment(unsigned Align) {
2011 PrefFunctionAlignment = Align;
2014 /// Set the target's preferred loop alignment. Default alignment is zero, it
2015 /// means the target does not care about loop alignment. The alignment is
2016 /// specified in log2(bytes). The target may also override
2017 /// getPrefLoopAlignment to provide per-loop values.
2018 void setPrefLoopAlignment(unsigned Align) {
2019 PrefLoopAlignment = Align;
2022 /// Set the minimum stack alignment of an argument (in log2(bytes)).
2023 void setMinStackArgumentAlignment(unsigned Align) {
2024 MinStackArgumentAlignment = Align;
2027 /// Set the maximum atomic operation size supported by the
2028 /// backend. Atomic operations greater than this size (as well as
2029 /// ones that are not naturally aligned), will be expanded by
2030 /// AtomicExpandPass into an __atomic_* library call.
2031 void setMaxAtomicSizeInBitsSupported(unsigned SizeInBits) {
2032 MaxAtomicSizeInBitsSupported = SizeInBits;
2035 /// Sets the minimum cmpxchg or ll/sc size supported by the backend.
2036 void setMinCmpXchgSizeInBits(unsigned SizeInBits) {
2037 MinCmpXchgSizeInBits = SizeInBits;
2040 /// Sets whether unaligned atomic operations are supported.
2041 void setSupportsUnalignedAtomics(bool UnalignedSupported) {
2042 SupportsUnalignedAtomics = UnalignedSupported;
2046 //===--------------------------------------------------------------------===//
2047 // Addressing mode description hooks (used by LSR etc).
2050 /// CodeGenPrepare sinks address calculations into the same BB as Load/Store
2051 /// instructions reading the address. This allows as much computation as
2052 /// possible to be done in the address mode for that operand. This hook lets
2053 /// targets also pass back when this should be done on intrinsics which
2055 virtual bool getAddrModeArguments(IntrinsicInst * /*I*/,
2056 SmallVectorImpl<Value*> &/*Ops*/,
2057 Type *&/*AccessTy*/) const {
2061 /// This represents an addressing mode of:
2062 /// BaseGV + BaseOffs + BaseReg + Scale*ScaleReg
2063 /// If BaseGV is null, there is no BaseGV.
2064 /// If BaseOffs is zero, there is no base offset.
2065 /// If HasBaseReg is false, there is no base register.
2066 /// If Scale is zero, there is no ScaleReg. Scale of 1 indicates a reg with
2069 GlobalValue *BaseGV = nullptr;
2070 int64_t BaseOffs = 0;
2071 bool HasBaseReg = false;
2073 AddrMode() = default;
2076 /// Return true if the addressing mode represented by AM is legal for this
2077 /// target, for a load/store of the specified type.
2079 /// The type may be VoidTy, in which case only return true if the addressing
2080 /// mode is legal for a load/store of any legal type. TODO: Handle
2081 /// pre/postinc as well.
2083 /// If the address space cannot be determined, it will be -1.
2085 /// TODO: Remove default argument
2086 virtual bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM,
2087 Type *Ty, unsigned AddrSpace,
2088 Instruction *I = nullptr) const;
2090 /// Return the cost of the scaling factor used in the addressing mode
2091 /// represented by AM for this target, for a load/store of the specified type.
2093 /// If the AM is supported, the return value must be >= 0.
2094 /// If the AM is not supported, it returns a negative value.
2095 /// TODO: Handle pre/postinc as well.
2096 /// TODO: Remove default argument
2097 virtual int getScalingFactorCost(const DataLayout &DL, const AddrMode &AM,
2098 Type *Ty, unsigned AS = 0) const {
2099 // Default: assume that any scaling factor used in a legal AM is free.
2100 if (isLegalAddressingMode(DL, AM, Ty, AS))
2105 /// Return true if the specified immediate is legal icmp immediate, that is
2106 /// the target has icmp instructions which can compare a register against the
2107 /// immediate without having to materialize the immediate into a register.
2108 virtual bool isLegalICmpImmediate(int64_t) const {
2112 /// Return true if the specified immediate is legal add immediate, that is the
2113 /// target has add instructions which can add a register with the immediate
2114 /// without having to materialize the immediate into a register.
2115 virtual bool isLegalAddImmediate(int64_t) const {
2119 /// Return true if the specified immediate is legal for the value input of a
2120 /// store instruction.
2121 virtual bool isLegalStoreImmediate(int64_t Value) const {
2122 // Default implementation assumes that at least 0 works since it is likely
2123 // that a zero register exists or a zero immediate is allowed.
2127 /// Return true if it's significantly cheaper to shift a vector by a uniform
2128 /// scalar than by an amount which will vary across each lane. On x86, for
2129 /// example, there is a "psllw" instruction for the former case, but no simple
2130 /// instruction for a general "a << b" operation on vectors.
2131 virtual bool isVectorShiftByScalarCheap(Type *Ty) const {
2135 /// Returns true if the opcode is a commutative binary operation.
2136 virtual bool isCommutativeBinOp(unsigned Opcode) const {
2137 // FIXME: This should get its info from the td file.
2147 case ISD::SMUL_LOHI:
2148 case ISD::UMUL_LOHI:
2163 default: return false;
2167 /// Return true if it's free to truncate a value of type FromTy to type
2168 /// ToTy. e.g. On x86 it's free to truncate a i32 value in register EAX to i16
2169 /// by referencing its sub-register AX.
2170 /// Targets must return false when FromTy <= ToTy.
2171 virtual bool isTruncateFree(Type *FromTy, Type *ToTy) const {
2175 /// Return true if a truncation from FromTy to ToTy is permitted when deciding
2176 /// whether a call is in tail position. Typically this means that both results
2177 /// would be assigned to the same register or stack slot, but it could mean
2178 /// the target performs adequate checks of its own before proceeding with the
2179 /// tail call. Targets must return false when FromTy <= ToTy.
2180 virtual bool allowTruncateForTailCall(Type *FromTy, Type *ToTy) const {
2184 virtual bool isTruncateFree(EVT FromVT, EVT ToVT) const {
2188 virtual bool isProfitableToHoist(Instruction *I) const { return true; }
2190 /// Return true if the extension represented by \p I is free.
2191 /// Unlikely the is[Z|FP]ExtFree family which is based on types,
2192 /// this method can use the context provided by \p I to decide
2193 /// whether or not \p I is free.
2194 /// This method extends the behavior of the is[Z|FP]ExtFree family.
2195 /// In other words, if is[Z|FP]Free returns true, then this method
2196 /// returns true as well. The converse is not true.
2197 /// The target can perform the adequate checks by overriding isExtFreeImpl.
2198 /// \pre \p I must be a sign, zero, or fp extension.
2199 bool isExtFree(const Instruction *I) const {
2200 switch (I->getOpcode()) {
2201 case Instruction::FPExt:
2202 if (isFPExtFree(EVT::getEVT(I->getType()),
2203 EVT::getEVT(I->getOperand(0)->getType())))
2206 case Instruction::ZExt:
2207 if (isZExtFree(I->getOperand(0)->getType(), I->getType()))
2210 case Instruction::SExt:
2213 llvm_unreachable("Instruction is not an extension");
2215 return isExtFreeImpl(I);
2218 /// Return true if \p Load and \p Ext can form an ExtLoad.
2219 /// For example, in AArch64
2220 /// %L = load i8, i8* %ptr
2221 /// %E = zext i8 %L to i32
2222 /// can be lowered into one load instruction
2224 bool isExtLoad(const LoadInst *Load, const Instruction *Ext,
2225 const DataLayout &DL) const {
2226 EVT VT = getValueType(DL, Ext->getType());
2227 EVT LoadVT = getValueType(DL, Load->getType());
2229 // If the load has other users and the truncate is not free, the ext
2230 // probably isn't free.
2231 if (!Load->hasOneUse() && (isTypeLegal(LoadVT) || !isTypeLegal(VT)) &&
2232 !isTruncateFree(Ext->getType(), Load->getType()))
2235 // Check whether the target supports casts folded into loads.
2237 if (isa<ZExtInst>(Ext))
2238 LType = ISD::ZEXTLOAD;
2240 assert(isa<SExtInst>(Ext) && "Unexpected ext type!");
2241 LType = ISD::SEXTLOAD;
2244 return isLoadExtLegal(LType, VT, LoadVT);
2247 /// Return true if any actual instruction that defines a value of type FromTy
2248 /// implicitly zero-extends the value to ToTy in the result register.
2250 /// The function should return true when it is likely that the truncate can
2251 /// be freely folded with an instruction defining a value of FromTy. If
2252 /// the defining instruction is unknown (because you're looking at a
2253 /// function argument, PHI, etc.) then the target may require an
2254 /// explicit truncate, which is not necessarily free, but this function
2255 /// does not deal with those cases.
2256 /// Targets must return false when FromTy >= ToTy.
2257 virtual bool isZExtFree(Type *FromTy, Type *ToTy) const {
2261 virtual bool isZExtFree(EVT FromTy, EVT ToTy) const {
2265 /// Return true if sign-extension from FromTy to ToTy is cheaper than
2267 virtual bool isSExtCheaperThanZExt(EVT FromTy, EVT ToTy) const {
2271 /// Return true if the target supplies and combines to a paired load
2272 /// two loaded values of type LoadedType next to each other in memory.
2273 /// RequiredAlignment gives the minimal alignment constraints that must be met
2274 /// to be able to select this paired load.
2276 /// This information is *not* used to generate actual paired loads, but it is
2277 /// used to generate a sequence of loads that is easier to combine into a
2279 /// For instance, something like this:
2280 /// a = load i64* addr
2281 /// b = trunc i64 a to i32
2282 /// c = lshr i64 a, 32
2283 /// d = trunc i64 c to i32
2284 /// will be optimized into:
2285 /// b = load i32* addr1
2286 /// d = load i32* addr2
2287 /// Where addr1 = addr2 +/- sizeof(i32).
2289 /// In other words, unless the target performs a post-isel load combining,
2290 /// this information should not be provided because it will generate more
2292 virtual bool hasPairedLoad(EVT /*LoadedType*/,
2293 unsigned & /*RequiredAlignment*/) const {
2297 /// Return true if the target has a vector blend instruction.
2298 virtual bool hasVectorBlend() const { return false; }
2300 /// Get the maximum supported factor for interleaved memory accesses.
2301 /// Default to be the minimum interleave factor: 2.
2302 virtual unsigned getMaxSupportedInterleaveFactor() const { return 2; }
2304 /// Lower an interleaved load to target specific intrinsics. Return
2305 /// true on success.
2307 /// \p LI is the vector load instruction.
2308 /// \p Shuffles is the shufflevector list to DE-interleave the loaded vector.
2309 /// \p Indices is the corresponding indices for each shufflevector.
2310 /// \p Factor is the interleave factor.
2311 virtual bool lowerInterleavedLoad(LoadInst *LI,
2312 ArrayRef<ShuffleVectorInst *> Shuffles,
2313 ArrayRef<unsigned> Indices,
2314 unsigned Factor) const {
2318 /// Lower an interleaved store to target specific intrinsics. Return
2319 /// true on success.
2321 /// \p SI is the vector store instruction.
2322 /// \p SVI is the shufflevector to RE-interleave the stored vector.
2323 /// \p Factor is the interleave factor.
2324 virtual bool lowerInterleavedStore(StoreInst *SI, ShuffleVectorInst *SVI,
2325 unsigned Factor) const {
2329 /// Return true if zero-extending the specific node Val to type VT2 is free
2330 /// (either because it's implicitly zero-extended such as ARM ldrb / ldrh or
2331 /// because it's folded such as X86 zero-extending loads).
2332 virtual bool isZExtFree(SDValue Val, EVT VT2) const {
2333 return isZExtFree(Val.getValueType(), VT2);
2336 /// Return true if an fpext operation is free (for instance, because
2337 /// single-precision floating-point numbers are implicitly extended to
2338 /// double-precision).
2339 virtual bool isFPExtFree(EVT DestVT, EVT SrcVT) const {
2340 assert(SrcVT.isFloatingPoint() && DestVT.isFloatingPoint() &&
2341 "invalid fpext types");
2345 /// Return true if an fpext operation input to an \p Opcode operation is free
2346 /// (for instance, because half-precision floating-point numbers are
2347 /// implicitly extended to float-precision) for an FMA instruction.
2348 virtual bool isFPExtFoldable(unsigned Opcode, EVT DestVT, EVT SrcVT) const {
2349 assert(DestVT.isFloatingPoint() && SrcVT.isFloatingPoint() &&
2350 "invalid fpext types");
2351 return isFPExtFree(DestVT, SrcVT);
2354 /// Return true if folding a vector load into ExtVal (a sign, zero, or any
2355 /// extend node) is profitable.
2356 virtual bool isVectorLoadExtDesirable(SDValue ExtVal) const { return false; }
2358 /// Return true if an fneg operation is free to the point where it is never
2359 /// worthwhile to replace it with a bitwise operation.
2360 virtual bool isFNegFree(EVT VT) const {
2361 assert(VT.isFloatingPoint());
2365 /// Return true if an fabs operation is free to the point where it is never
2366 /// worthwhile to replace it with a bitwise operation.
2367 virtual bool isFAbsFree(EVT VT) const {
2368 assert(VT.isFloatingPoint());
2372 /// Return true if an FMA operation is faster than a pair of fmul and fadd
2373 /// instructions. fmuladd intrinsics will be expanded to FMAs when this method
2374 /// returns true, otherwise fmuladd is expanded to fmul + fadd.
2376 /// NOTE: This may be called before legalization on types for which FMAs are
2377 /// not legal, but should return true if those types will eventually legalize
2378 /// to types that support FMAs. After legalization, it will only be called on
2379 /// types that support FMAs (via Legal or Custom actions)
2380 virtual bool isFMAFasterThanFMulAndFAdd(EVT) const {
2384 /// Return true if it's profitable to narrow operations of type VT1 to
2385 /// VT2. e.g. on x86, it's profitable to narrow from i32 to i8 but not from
2387 virtual bool isNarrowingProfitable(EVT /*VT1*/, EVT /*VT2*/) const {
2391 /// Return true if it is beneficial to convert a load of a constant to
2392 /// just the constant itself.
2393 /// On some targets it might be more efficient to use a combination of
2394 /// arithmetic instructions to materialize the constant instead of loading it
2395 /// from a constant pool.
2396 virtual bool shouldConvertConstantLoadToIntImm(const APInt &Imm,
2401 /// Return true if EXTRACT_SUBVECTOR is cheap for extracting this result type
2402 /// from this source type with this index. This is needed because
2403 /// EXTRACT_SUBVECTOR usually has custom lowering that depends on the index of
2404 /// the first element, and only the target knows which lowering is cheap.
2405 virtual bool isExtractSubvectorCheap(EVT ResVT, EVT SrcVT,
2406 unsigned Index) const {
2410 // Return true if it is profitable to use a scalar input to a BUILD_VECTOR
2411 // even if the vector itself has multiple uses.
2412 virtual bool aggressivelyPreferBuildVectorSources(EVT VecVT) const {
2416 // Return true if CodeGenPrepare should consider splitting large offset of a
2417 // GEP to make the GEP fit into the addressing mode and can be sunk into the
2418 // same blocks of its users.
2419 virtual bool shouldConsiderGEPOffsetSplit() const { return false; }
2421 //===--------------------------------------------------------------------===//
2422 // Runtime Library hooks
2425 /// Rename the default libcall routine name for the specified libcall.
2426 void setLibcallName(RTLIB::Libcall Call, const char *Name) {
2427 LibcallRoutineNames[Call] = Name;
2430 /// Get the libcall routine name for the specified libcall.
2431 const char *getLibcallName(RTLIB::Libcall Call) const {
2432 return LibcallRoutineNames[Call];
2435 /// Override the default CondCode to be used to test the result of the
2436 /// comparison libcall against zero.
2437 void setCmpLibcallCC(RTLIB::Libcall Call, ISD::CondCode CC) {
2438 CmpLibcallCCs[Call] = CC;
2441 /// Get the CondCode that's to be used to test the result of the comparison
2442 /// libcall against zero.
2443 ISD::CondCode getCmpLibcallCC(RTLIB::Libcall Call) const {
2444 return CmpLibcallCCs[Call];
2447 /// Set the CallingConv that should be used for the specified libcall.
2448 void setLibcallCallingConv(RTLIB::Libcall Call, CallingConv::ID CC) {
2449 LibcallCallingConvs[Call] = CC;
2452 /// Get the CallingConv that should be used for the specified libcall.
2453 CallingConv::ID getLibcallCallingConv(RTLIB::Libcall Call) const {
2454 return LibcallCallingConvs[Call];
2457 /// Execute target specific actions to finalize target lowering.
2458 /// This is used to set extra flags in MachineFrameInformation and freezing
2459 /// the set of reserved registers.
2460 /// The default implementation just freezes the set of reserved registers.
2461 virtual void finalizeLowering(MachineFunction &MF) const;
2464 const TargetMachine &TM;
2466 /// Tells the code generator that the target has multiple (allocatable)
2467 /// condition registers that can be used to store the results of comparisons
2468 /// for use by selects and conditional branches. With multiple condition
2469 /// registers, the code generator will not aggressively sink comparisons into
2470 /// the blocks of their users.
2471 bool HasMultipleConditionRegisters;
2473 /// Tells the code generator that the target has BitExtract instructions.
2474 /// The code generator will aggressively sink "shift"s into the blocks of
2475 /// their users if the users will generate "and" instructions which can be
2476 /// combined with "shift" to BitExtract instructions.
2477 bool HasExtractBitsInsn;
2479 /// Tells the code generator to bypass slow divide or remainder
2480 /// instructions. For example, BypassSlowDivWidths[32,8] tells the code
2481 /// generator to bypass 32-bit integer div/rem with an 8-bit unsigned integer
2482 /// div/rem when the operands are positive and less than 256.
2483 DenseMap <unsigned int, unsigned int> BypassSlowDivWidths;
2485 /// Tells the code generator that it shouldn't generate extra flow control
2486 /// instructions and should attempt to combine flow control instructions via
2488 bool JumpIsExpensive;
2490 /// Whether the target supports or cares about preserving floating point
2491 /// exception behavior.
2492 bool HasFloatingPointExceptions;
2494 /// This target prefers to use _setjmp to implement llvm.setjmp.
2496 /// Defaults to false.
2497 bool UseUnderscoreSetJmp;
2499 /// This target prefers to use _longjmp to implement llvm.longjmp.
2501 /// Defaults to false.
2502 bool UseUnderscoreLongJmp;
2504 /// Information about the contents of the high-bits in boolean values held in
2505 /// a type wider than i1. See getBooleanContents.
2506 BooleanContent BooleanContents;
2508 /// Information about the contents of the high-bits in boolean values held in
2509 /// a type wider than i1. See getBooleanContents.
2510 BooleanContent BooleanFloatContents;
2512 /// Information about the contents of the high-bits in boolean vector values
2513 /// when the element type is wider than i1. See getBooleanContents.
2514 BooleanContent BooleanVectorContents;
2516 /// The target scheduling preference: shortest possible total cycles or lowest
2518 Sched::Preference SchedPreferenceInfo;
2520 /// The size, in bytes, of the target's jmp_buf buffers
2521 unsigned JumpBufSize;
2523 /// The alignment, in bytes, of the target's jmp_buf buffers
2524 unsigned JumpBufAlignment;
2526 /// The minimum alignment that any argument on the stack needs to have.
2527 unsigned MinStackArgumentAlignment;
2529 /// The minimum function alignment (used when optimizing for size, and to
2530 /// prevent explicitly provided alignment from leading to incorrect code).
2531 unsigned MinFunctionAlignment;
2533 /// The preferred function alignment (used when alignment unspecified and
2534 /// optimizing for speed).
2535 unsigned PrefFunctionAlignment;
2537 /// The preferred loop alignment.
2538 unsigned PrefLoopAlignment;
2540 /// Size in bits of the maximum atomics size the backend supports.
2541 /// Accesses larger than this will be expanded by AtomicExpandPass.
2542 unsigned MaxAtomicSizeInBitsSupported;
2544 /// Size in bits of the minimum cmpxchg or ll/sc operation the
2545 /// backend supports.
2546 unsigned MinCmpXchgSizeInBits;
2548 /// This indicates if the target supports unaligned atomic operations.
2549 bool SupportsUnalignedAtomics;
2551 /// If set to a physical register, this specifies the register that
2552 /// llvm.savestack/llvm.restorestack should save and restore.
2553 unsigned StackPointerRegisterToSaveRestore;
2555 /// This indicates the default register class to use for each ValueType the
2556 /// target supports natively.
2557 const TargetRegisterClass *RegClassForVT[MVT::LAST_VALUETYPE];
2558 unsigned char NumRegistersForVT[MVT::LAST_VALUETYPE];
2559 MVT RegisterTypeForVT[MVT::LAST_VALUETYPE];
2561 /// This indicates the "representative" register class to use for each
2562 /// ValueType the target supports natively. This information is used by the
2563 /// scheduler to track register pressure. By default, the representative
2564 /// register class is the largest legal super-reg register class of the
2565 /// register class of the specified type. e.g. On x86, i8, i16, and i32's
2566 /// representative class would be GR32.
2567 const TargetRegisterClass *RepRegClassForVT[MVT::LAST_VALUETYPE];
2569 /// This indicates the "cost" of the "representative" register class for each
2570 /// ValueType. The cost is used by the scheduler to approximate register
2572 uint8_t RepRegClassCostForVT[MVT::LAST_VALUETYPE];
2574 /// For any value types we are promoting or expanding, this contains the value
2575 /// type that we are changing to. For Expanded types, this contains one step
2576 /// of the expand (e.g. i64 -> i32), even if there are multiple steps required
2577 /// (e.g. i64 -> i16). For types natively supported by the system, this holds
2578 /// the same type (e.g. i32 -> i32).
2579 MVT TransformToType[MVT::LAST_VALUETYPE];
2581 /// For each operation and each value type, keep a LegalizeAction that
2582 /// indicates how instruction selection should deal with the operation. Most
2583 /// operations are Legal (aka, supported natively by the target), but
2584 /// operations that are not should be described. Note that operations on
2585 /// non-legal value types are not described here.
2586 LegalizeAction OpActions[MVT::LAST_VALUETYPE][ISD::BUILTIN_OP_END];
2588 /// For each load extension type and each value type, keep a LegalizeAction
2589 /// that indicates how instruction selection should deal with a load of a
2590 /// specific value type and extension type. Uses 4-bits to store the action
2591 /// for each of the 4 load ext types.
2592 uint16_t LoadExtActions[MVT::LAST_VALUETYPE][MVT::LAST_VALUETYPE];
2594 /// For each value type pair keep a LegalizeAction that indicates whether a
2595 /// truncating store of a specific value type and truncating type is legal.
2596 LegalizeAction TruncStoreActions[MVT::LAST_VALUETYPE][MVT::LAST_VALUETYPE];
2598 /// For each indexed mode and each value type, keep a pair of LegalizeAction
2599 /// that indicates how instruction selection should deal with the load /
2602 /// The first dimension is the value_type for the reference. The second
2603 /// dimension represents the various modes for load store.
2604 uint8_t IndexedModeActions[MVT::LAST_VALUETYPE][ISD::LAST_INDEXED_MODE];
2606 /// For each condition code (ISD::CondCode) keep a LegalizeAction that
2607 /// indicates how instruction selection should deal with the condition code.
2609 /// Because each CC action takes up 4 bits, we need to have the array size be
2610 /// large enough to fit all of the value types. This can be done by rounding
2611 /// up the MVT::LAST_VALUETYPE value to the next multiple of 8.
2612 uint32_t CondCodeActions[ISD::SETCC_INVALID][(MVT::LAST_VALUETYPE + 7) / 8];
2615 ValueTypeActionImpl ValueTypeActions;
2618 LegalizeKind getTypeConversion(LLVMContext &Context, EVT VT) const;
2620 /// Targets can specify ISD nodes that they would like PerformDAGCombine
2621 /// callbacks for by calling setTargetDAGCombine(), which sets a bit in this
2624 TargetDAGCombineArray[(ISD::BUILTIN_OP_END+CHAR_BIT-1)/CHAR_BIT];
2626 /// For operations that must be promoted to a specific type, this holds the
2627 /// destination type. This map should be sparse, so don't hold it as an
2630 /// Targets add entries to this map with AddPromotedToType(..), clients access
2631 /// this with getTypeToPromoteTo(..).
2632 std::map<std::pair<unsigned, MVT::SimpleValueType>, MVT::SimpleValueType>
2635 /// Stores the name each libcall.
2636 const char *LibcallRoutineNames[RTLIB::UNKNOWN_LIBCALL + 1];
2638 /// The ISD::CondCode that should be used to test the result of each of the
2639 /// comparison libcall against zero.
2640 ISD::CondCode CmpLibcallCCs[RTLIB::UNKNOWN_LIBCALL];
2642 /// Stores the CallingConv that should be used for each libcall.
2643 CallingConv::ID LibcallCallingConvs[RTLIB::UNKNOWN_LIBCALL];
2645 /// Set default libcall names and calling conventions.
2646 void InitLibcalls(const Triple &TT);
2649 /// Return true if the extension represented by \p I is free.
2650 /// \pre \p I is a sign, zero, or fp extension and
2651 /// is[Z|FP]ExtFree of the related types is not true.
2652 virtual bool isExtFreeImpl(const Instruction *I) const { return false; }
2654 /// Depth that GatherAllAliases should should continue looking for chain
2655 /// dependencies when trying to find a more preferable chain. As an
2656 /// approximation, this should be more than the number of consecutive stores
2657 /// expected to be merged.
2658 unsigned GatherAllAliasesMaxDepth;
2660 /// Specify maximum number of store instructions per memset call.
2662 /// When lowering \@llvm.memset this field specifies the maximum number of
2663 /// store operations that may be substituted for the call to memset. Targets
2664 /// must set this value based on the cost threshold for that target. Targets
2665 /// should assume that the memset will be done using as many of the largest
2666 /// store operations first, followed by smaller ones, if necessary, per
2667 /// alignment restrictions. For example, storing 9 bytes on a 32-bit machine
2668 /// with 16-bit alignment would result in four 2-byte stores and one 1-byte
2669 /// store. This only applies to setting a constant array of a constant size.
2670 unsigned MaxStoresPerMemset;
2672 /// Maximum number of stores operations that may be substituted for the call
2673 /// to memset, used for functions with OptSize attribute.
2674 unsigned MaxStoresPerMemsetOptSize;
2676 /// Specify maximum bytes of store instructions per memcpy call.
2678 /// When lowering \@llvm.memcpy this field specifies the maximum number of
2679 /// store operations that may be substituted for a call to memcpy. Targets
2680 /// must set this value based on the cost threshold for that target. Targets
2681 /// should assume that the memcpy will be done using as many of the largest
2682 /// store operations first, followed by smaller ones, if necessary, per
2683 /// alignment restrictions. For example, storing 7 bytes on a 32-bit machine
2684 /// with 32-bit alignment would result in one 4-byte store, a one 2-byte store
2685 /// and one 1-byte store. This only applies to copying a constant array of
2687 unsigned MaxStoresPerMemcpy;
2690 /// \brief Specify max number of store instructions to glue in inlined memcpy.
2692 /// When memcpy is inlined based on MaxStoresPerMemcpy, specify maximum number
2693 /// of store instructions to keep together. This helps in pairing and
2694 // vectorization later on.
2695 unsigned MaxGluedStoresPerMemcpy = 0;
2697 /// Maximum number of store operations that may be substituted for a call to
2698 /// memcpy, used for functions with OptSize attribute.
2699 unsigned MaxStoresPerMemcpyOptSize;
2700 unsigned MaxLoadsPerMemcmp;
2701 unsigned MaxLoadsPerMemcmpOptSize;
2703 /// Specify maximum bytes of store instructions per memmove call.
2705 /// When lowering \@llvm.memmove this field specifies the maximum number of
2706 /// store instructions that may be substituted for a call to memmove. Targets
2707 /// must set this value based on the cost threshold for that target. Targets
2708 /// should assume that the memmove will be done using as many of the largest
2709 /// store operations first, followed by smaller ones, if necessary, per
2710 /// alignment restrictions. For example, moving 9 bytes on a 32-bit machine
2711 /// with 8-bit alignment would result in nine 1-byte stores. This only
2712 /// applies to copying a constant array of constant size.
2713 unsigned MaxStoresPerMemmove;
2715 /// Maximum number of store instructions that may be substituted for a call to
2716 /// memmove, used for functions with OptSize attribute.
2717 unsigned MaxStoresPerMemmoveOptSize;
2719 /// Tells the code generator that select is more expensive than a branch if
2720 /// the branch is usually predicted right.
2721 bool PredictableSelectIsExpensive;
2723 /// \see enableExtLdPromotion.
2724 bool EnableExtLdPromotion;
2726 /// Return true if the value types that can be represented by the specified
2727 /// register class are all legal.
2728 bool isLegalRC(const TargetRegisterInfo &TRI,
2729 const TargetRegisterClass &RC) const;
2731 /// Replace/modify any TargetFrameIndex operands with a targte-dependent
2732 /// sequence of memory operands that is recognized by PrologEpilogInserter.
2733 MachineBasicBlock *emitPatchPoint(MachineInstr &MI,
2734 MachineBasicBlock *MBB) const;
2736 /// Replace/modify the XRay custom event operands with target-dependent
2738 MachineBasicBlock *emitXRayCustomEvent(MachineInstr &MI,
2739 MachineBasicBlock *MBB) const;
2741 /// Replace/modify the XRay typed event operands with target-dependent
2743 MachineBasicBlock *emitXRayTypedEvent(MachineInstr &MI,
2744 MachineBasicBlock *MBB) const;
2747 /// This class defines information used to lower LLVM code to legal SelectionDAG
2748 /// operators that the target instruction selector can accept natively.
2750 /// This class also defines callbacks that targets must implement to lower
2751 /// target-specific constructs to SelectionDAG operators.
2752 class TargetLowering : public TargetLoweringBase {
2754 struct DAGCombinerInfo;
2756 TargetLowering(const TargetLowering &) = delete;
2757 TargetLowering &operator=(const TargetLowering &) = delete;
2759 /// NOTE: The TargetMachine owns TLOF.
2760 explicit TargetLowering(const TargetMachine &TM);
2762 bool isPositionIndependent() const;
2764 virtual bool isSDNodeSourceOfDivergence(const SDNode *N,
2765 FunctionLoweringInfo *FLI,
2766 LegacyDivergenceAnalysis *DA) const {
2770 virtual bool isSDNodeAlwaysUniform(const SDNode * N) const {
2774 /// Returns true by value, base pointer and offset pointer and addressing mode
2775 /// by reference if the node's address can be legally represented as
2776 /// pre-indexed load / store address.
2777 virtual bool getPreIndexedAddressParts(SDNode * /*N*/, SDValue &/*Base*/,
2778 SDValue &/*Offset*/,
2779 ISD::MemIndexedMode &/*AM*/,
2780 SelectionDAG &/*DAG*/) const {
2784 /// Returns true by value, base pointer and offset pointer and addressing mode
2785 /// by reference if this node can be combined with a load / store to form a
2786 /// post-indexed load / store.
2787 virtual bool getPostIndexedAddressParts(SDNode * /*N*/, SDNode * /*Op*/,
2789 SDValue &/*Offset*/,
2790 ISD::MemIndexedMode &/*AM*/,
2791 SelectionDAG &/*DAG*/) const {
2795 /// Return the entry encoding for a jump table in the current function. The
2796 /// returned value is a member of the MachineJumpTableInfo::JTEntryKind enum.
2797 virtual unsigned getJumpTableEncoding() const;
2799 virtual const MCExpr *
2800 LowerCustomJumpTableEntry(const MachineJumpTableInfo * /*MJTI*/,
2801 const MachineBasicBlock * /*MBB*/, unsigned /*uid*/,
2802 MCContext &/*Ctx*/) const {
2803 llvm_unreachable("Need to implement this hook if target has custom JTIs");
2806 /// Returns relocation base for the given PIC jumptable.
2807 virtual SDValue getPICJumpTableRelocBase(SDValue Table,
2808 SelectionDAG &DAG) const;
2810 /// This returns the relocation base for the given PIC jumptable, the same as
2811 /// getPICJumpTableRelocBase, but as an MCExpr.
2812 virtual const MCExpr *
2813 getPICJumpTableRelocBaseExpr(const MachineFunction *MF,
2814 unsigned JTI, MCContext &Ctx) const;
2816 /// Return true if folding a constant offset with the given GlobalAddress is
2817 /// legal. It is frequently not legal in PIC relocation models.
2818 virtual bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const;
2820 bool isInTailCallPosition(SelectionDAG &DAG, SDNode *Node,
2821 SDValue &Chain) const;
2823 void softenSetCCOperands(SelectionDAG &DAG, EVT VT, SDValue &NewLHS,
2824 SDValue &NewRHS, ISD::CondCode &CCCode,
2825 const SDLoc &DL) const;
2827 /// Returns a pair of (return value, chain).
2828 /// It is an error to pass RTLIB::UNKNOWN_LIBCALL as \p LC.
2829 std::pair<SDValue, SDValue> makeLibCall(SelectionDAG &DAG, RTLIB::Libcall LC,
2830 EVT RetVT, ArrayRef<SDValue> Ops,
2831 bool isSigned, const SDLoc &dl,
2832 bool doesNotReturn = false,
2833 bool isReturnValueUsed = true) const;
2835 /// Check whether parameters to a call that are passed in callee saved
2836 /// registers are the same as from the calling function. This needs to be
2837 /// checked for tail call eligibility.
2838 bool parametersInCSRMatch(const MachineRegisterInfo &MRI,
2839 const uint32_t *CallerPreservedMask,
2840 const SmallVectorImpl<CCValAssign> &ArgLocs,
2841 const SmallVectorImpl<SDValue> &OutVals) const;
2843 //===--------------------------------------------------------------------===//
2844 // TargetLowering Optimization Methods
2847 /// A convenience struct that encapsulates a DAG, and two SDValues for
2848 /// returning information from TargetLowering to its clients that want to
2850 struct TargetLoweringOpt {
2857 explicit TargetLoweringOpt(SelectionDAG &InDAG,
2859 DAG(InDAG), LegalTys(LT), LegalOps(LO) {}
2861 bool LegalTypes() const { return LegalTys; }
2862 bool LegalOperations() const { return LegalOps; }
2864 bool CombineTo(SDValue O, SDValue N) {
2871 /// Check to see if the specified operand of the specified instruction is a
2872 /// constant integer. If so, check to see if there are any bits set in the
2873 /// constant that are not demanded. If so, shrink the constant and return
2875 bool ShrinkDemandedConstant(SDValue Op, const APInt &Demanded,
2876 TargetLoweringOpt &TLO) const;
2878 // Target hook to do target-specific const optimization, which is called by
2879 // ShrinkDemandedConstant. This function should return true if the target
2880 // doesn't want ShrinkDemandedConstant to further optimize the constant.
2881 virtual bool targetShrinkDemandedConstant(SDValue Op, const APInt &Demanded,
2882 TargetLoweringOpt &TLO) const {
2886 /// Convert x+y to (VT)((SmallVT)x+(SmallVT)y) if the casts are free. This
2887 /// uses isZExtFree and ZERO_EXTEND for the widening cast, but it could be
2888 /// generalized for targets with other types of implicit widening casts.
2889 bool ShrinkDemandedOp(SDValue Op, unsigned BitWidth, const APInt &Demanded,
2890 TargetLoweringOpt &TLO) const;
2892 /// Helper for SimplifyDemandedBits that can simplify an operation with
2893 /// multiple uses. This function simplifies operand \p OpIdx of \p User and
2894 /// then updates \p User with the simplified version. No other uses of
2895 /// \p OpIdx are updated. If \p User is the only user of \p OpIdx, this
2896 /// function behaves exactly like function SimplifyDemandedBits declared
2897 /// below except that it also updates the DAG by calling
2898 /// DCI.CommitTargetLoweringOpt.
2899 bool SimplifyDemandedBits(SDNode *User, unsigned OpIdx, const APInt &Demanded,
2900 DAGCombinerInfo &DCI, TargetLoweringOpt &TLO) const;
2902 /// Look at Op. At this point, we know that only the DemandedMask bits of the
2903 /// result of Op are ever used downstream. If we can use this information to
2904 /// simplify Op, create a new simplified DAG node and return true, returning
2905 /// the original and new nodes in Old and New. Otherwise, analyze the
2906 /// expression and return a mask of KnownOne and KnownZero bits for the
2907 /// expression (used to simplify the caller). The KnownZero/One bits may only
2908 /// be accurate for those bits in the DemandedMask.
2909 /// \p AssumeSingleUse When this parameter is true, this function will
2910 /// attempt to simplify \p Op even if there are multiple uses.
2911 /// Callers are responsible for correctly updating the DAG based on the
2912 /// results of this function, because simply replacing replacing TLO.Old
2913 /// with TLO.New will be incorrect when this parameter is true and TLO.Old
2914 /// has multiple uses.
2915 bool SimplifyDemandedBits(SDValue Op, const APInt &DemandedMask,
2917 TargetLoweringOpt &TLO,
2919 bool AssumeSingleUse = false) const;
2921 /// Helper wrapper around SimplifyDemandedBits.
2922 /// Adds Op back to the worklist upon success.
2923 bool SimplifyDemandedBits(SDValue Op, const APInt &DemandedMask,
2924 DAGCombinerInfo &DCI) const;
2926 /// Look at Vector Op. At this point, we know that only the DemandedElts
2927 /// elements of the result of Op are ever used downstream. If we can use
2928 /// this information to simplify Op, create a new simplified DAG node and
2929 /// return true, storing the original and new nodes in TLO.
2930 /// Otherwise, analyze the expression and return a mask of KnownUndef and
2931 /// KnownZero elements for the expression (used to simplify the caller).
2932 /// The KnownUndef/Zero elements may only be accurate for those bits
2933 /// in the DemandedMask.
2934 /// \p AssumeSingleUse When this parameter is true, this function will
2935 /// attempt to simplify \p Op even if there are multiple uses.
2936 /// Callers are responsible for correctly updating the DAG based on the
2937 /// results of this function, because simply replacing replacing TLO.Old
2938 /// with TLO.New will be incorrect when this parameter is true and TLO.Old
2939 /// has multiple uses.
2940 bool SimplifyDemandedVectorElts(SDValue Op, const APInt &DemandedEltMask,
2941 APInt &KnownUndef, APInt &KnownZero,
2942 TargetLoweringOpt &TLO, unsigned Depth = 0,
2943 bool AssumeSingleUse = false) const;
2945 /// Helper wrapper around SimplifyDemandedVectorElts.
2946 /// Adds Op back to the worklist upon success.
2947 bool SimplifyDemandedVectorElts(SDValue Op, const APInt &DemandedElts,
2948 APInt &KnownUndef, APInt &KnownZero,
2949 DAGCombinerInfo &DCI) const;
2951 /// Determine which of the bits specified in Mask are known to be either zero
2952 /// or one and return them in the KnownZero/KnownOne bitsets. The DemandedElts
2953 /// argument allows us to only collect the known bits that are shared by the
2954 /// requested vector elements.
2955 virtual void computeKnownBitsForTargetNode(const SDValue Op,
2957 const APInt &DemandedElts,
2958 const SelectionDAG &DAG,
2959 unsigned Depth = 0) const;
2961 /// Determine which of the bits of FrameIndex \p FIOp are known to be 0.
2962 /// Default implementation computes low bits based on alignment
2963 /// information. This should preserve known bits passed into it.
2964 virtual void computeKnownBitsForFrameIndex(const SDValue FIOp,
2966 const APInt &DemandedElts,
2967 const SelectionDAG &DAG,
2968 unsigned Depth = 0) const;
2970 /// This method can be implemented by targets that want to expose additional
2971 /// information about sign bits to the DAG Combiner. The DemandedElts
2972 /// argument allows us to only collect the minimum sign bits that are shared
2973 /// by the requested vector elements.
2974 virtual unsigned ComputeNumSignBitsForTargetNode(SDValue Op,
2975 const APInt &DemandedElts,
2976 const SelectionDAG &DAG,
2977 unsigned Depth = 0) const;
2979 /// Attempt to simplify any target nodes based on the demanded vector
2980 /// elements, returning true on success. Otherwise, analyze the expression and
2981 /// return a mask of KnownUndef and KnownZero elements for the expression
2982 /// (used to simplify the caller). The KnownUndef/Zero elements may only be
2983 /// accurate for those bits in the DemandedMask.
2984 virtual bool SimplifyDemandedVectorEltsForTargetNode(
2985 SDValue Op, const APInt &DemandedElts, APInt &KnownUndef,
2986 APInt &KnownZero, TargetLoweringOpt &TLO, unsigned Depth = 0) const;
2988 /// Attempt to simplify any target nodes based on the demanded bits,
2989 /// returning true on success. Otherwise, analyze the
2990 /// expression and return a mask of KnownOne and KnownZero bits for the
2991 /// expression (used to simplify the caller). The KnownZero/One bits may only
2992 /// be accurate for those bits in the DemandedMask.
2993 virtual bool SimplifyDemandedBitsForTargetNode(SDValue Op,
2994 const APInt &DemandedBits,
2996 TargetLoweringOpt &TLO,
2997 unsigned Depth = 0) const;
2999 /// If \p SNaN is false, \returns true if \p Op is known to never be any
3000 /// NaN. If \p sNaN is true, returns if \p Op is known to never be a signaling
3002 virtual bool isKnownNeverNaNForTargetNode(SDValue Op,
3003 const SelectionDAG &DAG,
3005 unsigned Depth = 0) const;
3006 struct DAGCombinerInfo {
3007 void *DC; // The DAG Combiner object.
3009 bool CalledByLegalizer;
3014 DAGCombinerInfo(SelectionDAG &dag, CombineLevel level, bool cl, void *dc)
3015 : DC(dc), Level(level), CalledByLegalizer(cl), DAG(dag) {}
3017 bool isBeforeLegalize() const { return Level == BeforeLegalizeTypes; }
3018 bool isBeforeLegalizeOps() const { return Level < AfterLegalizeVectorOps; }
3019 bool isAfterLegalizeDAG() const {
3020 return Level == AfterLegalizeDAG;
3022 CombineLevel getDAGCombineLevel() { return Level; }
3023 bool isCalledByLegalizer() const { return CalledByLegalizer; }
3025 void AddToWorklist(SDNode *N);
3026 SDValue CombineTo(SDNode *N, ArrayRef<SDValue> To, bool AddTo = true);
3027 SDValue CombineTo(SDNode *N, SDValue Res, bool AddTo = true);
3028 SDValue CombineTo(SDNode *N, SDValue Res0, SDValue Res1, bool AddTo = true);
3030 void CommitTargetLoweringOpt(const TargetLoweringOpt &TLO);
3033 /// Return if the N is a constant or constant vector equal to the true value
3034 /// from getBooleanContents().
3035 bool isConstTrueVal(const SDNode *N) const;
3037 /// Return if the N is a constant or constant vector equal to the false value
3038 /// from getBooleanContents().
3039 bool isConstFalseVal(const SDNode *N) const;
3041 /// Return if \p N is a True value when extended to \p VT.
3042 bool isExtendedTrueVal(const ConstantSDNode *N, EVT VT, bool SExt) const;
3044 /// Try to simplify a setcc built with the specified operands and cc. If it is
3045 /// unable to simplify it, return a null SDValue.
3046 SDValue SimplifySetCC(EVT VT, SDValue N0, SDValue N1, ISD::CondCode Cond,
3047 bool foldBooleans, DAGCombinerInfo &DCI,
3048 const SDLoc &dl) const;
3050 // For targets which wrap address, unwrap for analysis.
3051 virtual SDValue unwrapAddress(SDValue N) const { return N; }
3053 /// Returns true (and the GlobalValue and the offset) if the node is a
3054 /// GlobalAddress + offset.
3056 isGAPlusOffset(SDNode *N, const GlobalValue* &GA, int64_t &Offset) const;
3058 /// This method will be invoked for all target nodes and for any
3059 /// target-independent nodes that the target has registered with invoke it
3062 /// The semantics are as follows:
3064 /// SDValue.Val == 0 - No change was made
3065 /// SDValue.Val == N - N was replaced, is dead, and is already handled.
3066 /// otherwise - N should be replaced by the returned Operand.
3068 /// In addition, methods provided by DAGCombinerInfo may be used to perform
3069 /// more complex transformations.
3071 virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const;
3073 /// Return true if it is profitable to move this shift by a constant amount
3074 /// though its operand, adjusting any immediate operands as necessary to
3075 /// preserve semantics. This transformation may not be desirable if it
3076 /// disrupts a particularly auspicious target-specific tree (e.g. bitfield
3077 /// extraction in AArch64). By default, it returns true.
3079 /// @param N the shift node
3080 /// @param Level the current DAGCombine legalization level.
3081 virtual bool isDesirableToCommuteWithShift(const SDNode *N,
3082 CombineLevel Level) const {
3086 // Return true if it is profitable to combine a BUILD_VECTOR with a stride-pattern
3087 // to a shuffle and a truncate.
3088 // Example of such a combine:
3089 // v4i32 build_vector((extract_elt V, 1),
3090 // (extract_elt V, 3),
3091 // (extract_elt V, 5),
3092 // (extract_elt V, 7))
3094 // v4i32 truncate (bitcast (shuffle<1,u,3,u,5,u,7,u> V, u) to v4i64)
3095 virtual bool isDesirableToCombineBuildVectorToShuffleTruncate(
3096 ArrayRef<int> ShuffleMask, EVT SrcVT, EVT TruncVT) const {
3100 /// Return true if the target has native support for the specified value type
3101 /// and it is 'desirable' to use the type for the given node type. e.g. On x86
3102 /// i16 is legal, but undesirable since i16 instruction encodings are longer
3103 /// and some i16 instructions are slow.
3104 virtual bool isTypeDesirableForOp(unsigned /*Opc*/, EVT VT) const {
3105 // By default, assume all legal types are desirable.
3106 return isTypeLegal(VT);
3109 /// Return true if it is profitable for dag combiner to transform a floating
3110 /// point op of specified opcode to a equivalent op of an integer
3111 /// type. e.g. f32 load -> i32 load can be profitable on ARM.
3112 virtual bool isDesirableToTransformToIntegerOp(unsigned /*Opc*/,
3117 /// This method query the target whether it is beneficial for dag combiner to
3118 /// promote the specified node. If true, it should return the desired
3119 /// promotion type by reference.
3120 virtual bool IsDesirableToPromoteOp(SDValue /*Op*/, EVT &/*PVT*/) const {
3124 /// Return true if the target supports swifterror attribute. It optimizes
3125 /// loads and stores to reading and writing a specific register.
3126 virtual bool supportSwiftError() const {
3130 /// Return true if the target supports that a subset of CSRs for the given
3131 /// machine function is handled explicitly via copies.
3132 virtual bool supportSplitCSR(MachineFunction *MF) const {
3136 /// Perform necessary initialization to handle a subset of CSRs explicitly
3137 /// via copies. This function is called at the beginning of instruction
3139 virtual void initializeSplitCSR(MachineBasicBlock *Entry) const {
3140 llvm_unreachable("Not Implemented");
3143 /// Insert explicit copies in entry and exit blocks. We copy a subset of
3144 /// CSRs to virtual registers in the entry block, and copy them back to
3145 /// physical registers in the exit blocks. This function is called at the end
3146 /// of instruction selection.
3147 virtual void insertCopiesSplitCSR(
3148 MachineBasicBlock *Entry,
3149 const SmallVectorImpl<MachineBasicBlock *> &Exits) const {
3150 llvm_unreachable("Not Implemented");
3153 //===--------------------------------------------------------------------===//
3154 // Lowering methods - These methods must be implemented by targets so that
3155 // the SelectionDAGBuilder code knows how to lower these.
3158 /// This hook must be implemented to lower the incoming (formal) arguments,
3159 /// described by the Ins array, into the specified DAG. The implementation
3160 /// should fill in the InVals array with legal-type argument values, and
3161 /// return the resulting token chain value.
3162 virtual SDValue LowerFormalArguments(
3163 SDValue /*Chain*/, CallingConv::ID /*CallConv*/, bool /*isVarArg*/,
3164 const SmallVectorImpl<ISD::InputArg> & /*Ins*/, const SDLoc & /*dl*/,
3165 SelectionDAG & /*DAG*/, SmallVectorImpl<SDValue> & /*InVals*/) const {
3166 llvm_unreachable("Not Implemented");
3169 /// This structure contains all information that is necessary for lowering
3170 /// calls. It is passed to TLI::LowerCallTo when the SelectionDAG builder
3171 /// needs to lower a call, and targets will see this struct in their LowerCall
3173 struct CallLoweringInfo {
3175 Type *RetTy = nullptr;
3180 bool DoesNotReturn : 1;
3181 bool IsReturnValueUsed : 1;
3182 bool IsConvergent : 1;
3183 bool IsPatchPoint : 1;
3185 // IsTailCall should be modified by implementations of
3186 // TargetLowering::LowerCall that perform tail call conversions.
3187 bool IsTailCall = false;
3189 // Is Call lowering done post SelectionDAG type legalization.
3190 bool IsPostTypeLegalization = false;
3192 unsigned NumFixedArgs = -1;
3193 CallingConv::ID CallConv = CallingConv::C;
3198 ImmutableCallSite CS;
3199 SmallVector<ISD::OutputArg, 32> Outs;
3200 SmallVector<SDValue, 32> OutVals;
3201 SmallVector<ISD::InputArg, 32> Ins;
3202 SmallVector<SDValue, 4> InVals;
3204 CallLoweringInfo(SelectionDAG &DAG)
3205 : RetSExt(false), RetZExt(false), IsVarArg(false), IsInReg(false),
3206 DoesNotReturn(false), IsReturnValueUsed(true), IsConvergent(false),
3207 IsPatchPoint(false), DAG(DAG) {}
3209 CallLoweringInfo &setDebugLoc(const SDLoc &dl) {
3214 CallLoweringInfo &setChain(SDValue InChain) {
3219 // setCallee with target/module-specific attributes
3220 CallLoweringInfo &setLibCallee(CallingConv::ID CC, Type *ResultType,
3221 SDValue Target, ArgListTy &&ArgsList) {
3225 NumFixedArgs = ArgsList.size();
3226 Args = std::move(ArgsList);
3228 DAG.getTargetLoweringInfo().markLibCallAttributes(
3229 &(DAG.getMachineFunction()), CC, Args);
3233 CallLoweringInfo &setCallee(CallingConv::ID CC, Type *ResultType,
3234 SDValue Target, ArgListTy &&ArgsList) {
3238 NumFixedArgs = ArgsList.size();
3239 Args = std::move(ArgsList);
3243 CallLoweringInfo &setCallee(Type *ResultType, FunctionType *FTy,
3244 SDValue Target, ArgListTy &&ArgsList,
3245 ImmutableCallSite Call) {
3248 IsInReg = Call.hasRetAttr(Attribute::InReg);
3250 Call.doesNotReturn() ||
3251 (!Call.isInvoke() &&
3252 isa<UnreachableInst>(Call.getInstruction()->getNextNode()));
3253 IsVarArg = FTy->isVarArg();
3254 IsReturnValueUsed = !Call.getInstruction()->use_empty();
3255 RetSExt = Call.hasRetAttr(Attribute::SExt);
3256 RetZExt = Call.hasRetAttr(Attribute::ZExt);
3260 CallConv = Call.getCallingConv();
3261 NumFixedArgs = FTy->getNumParams();
3262 Args = std::move(ArgsList);
3269 CallLoweringInfo &setInRegister(bool Value = true) {
3274 CallLoweringInfo &setNoReturn(bool Value = true) {
3275 DoesNotReturn = Value;
3279 CallLoweringInfo &setVarArg(bool Value = true) {
3284 CallLoweringInfo &setTailCall(bool Value = true) {
3289 CallLoweringInfo &setDiscardResult(bool Value = true) {
3290 IsReturnValueUsed = !Value;
3294 CallLoweringInfo &setConvergent(bool Value = true) {
3295 IsConvergent = Value;
3299 CallLoweringInfo &setSExtResult(bool Value = true) {
3304 CallLoweringInfo &setZExtResult(bool Value = true) {
3309 CallLoweringInfo &setIsPatchPoint(bool Value = true) {
3310 IsPatchPoint = Value;
3314 CallLoweringInfo &setIsPostTypeLegalization(bool Value=true) {
3315 IsPostTypeLegalization = Value;
3319 ArgListTy &getArgs() {
3324 /// This function lowers an abstract call to a function into an actual call.
3325 /// This returns a pair of operands. The first element is the return value
3326 /// for the function (if RetTy is not VoidTy). The second element is the
3327 /// outgoing token chain. It calls LowerCall to do the actual lowering.
3328 std::pair<SDValue, SDValue> LowerCallTo(CallLoweringInfo &CLI) const;
3330 /// This hook must be implemented to lower calls into the specified
3331 /// DAG. The outgoing arguments to the call are described by the Outs array,
3332 /// and the values to be returned by the call are described by the Ins
3333 /// array. The implementation should fill in the InVals array with legal-type
3334 /// return values from the call, and return the resulting token chain value.
3336 LowerCall(CallLoweringInfo &/*CLI*/,
3337 SmallVectorImpl<SDValue> &/*InVals*/) const {
3338 llvm_unreachable("Not Implemented");
3341 /// Target-specific cleanup for formal ByVal parameters.
3342 virtual void HandleByVal(CCState *, unsigned &, unsigned) const {}
3344 /// This hook should be implemented to check whether the return values
3345 /// described by the Outs array can fit into the return registers. If false
3346 /// is returned, an sret-demotion is performed.
3347 virtual bool CanLowerReturn(CallingConv::ID /*CallConv*/,
3348 MachineFunction &/*MF*/, bool /*isVarArg*/,
3349 const SmallVectorImpl<ISD::OutputArg> &/*Outs*/,
3350 LLVMContext &/*Context*/) const
3352 // Return true by default to get preexisting behavior.
3356 /// This hook must be implemented to lower outgoing return values, described
3357 /// by the Outs array, into the specified DAG. The implementation should
3358 /// return the resulting token chain value.
3359 virtual SDValue LowerReturn(SDValue /*Chain*/, CallingConv::ID /*CallConv*/,
3361 const SmallVectorImpl<ISD::OutputArg> & /*Outs*/,
3362 const SmallVectorImpl<SDValue> & /*OutVals*/,
3363 const SDLoc & /*dl*/,
3364 SelectionDAG & /*DAG*/) const {
3365 llvm_unreachable("Not Implemented");
3368 /// Return true if result of the specified node is used by a return node
3369 /// only. It also compute and return the input chain for the tail call.
3371 /// This is used to determine whether it is possible to codegen a libcall as
3372 /// tail call at legalization time.
3373 virtual bool isUsedByReturnOnly(SDNode *, SDValue &/*Chain*/) const {
3377 /// Return true if the target may be able emit the call instruction as a tail
3378 /// call. This is used by optimization passes to determine if it's profitable
3379 /// to duplicate return instructions to enable tailcall optimization.
3380 virtual bool mayBeEmittedAsTailCall(const CallInst *) const {
3384 /// Return the builtin name for the __builtin___clear_cache intrinsic
3385 /// Default is to invoke the clear cache library call
3386 virtual const char * getClearCacheBuiltinName() const {
3387 return "__clear_cache";
3390 /// Return the register ID of the name passed in. Used by named register
3391 /// global variables extension. There is no target-independent behaviour
3392 /// so the default action is to bail.
3393 virtual unsigned getRegisterByName(const char* RegName, EVT VT,
3394 SelectionDAG &DAG) const {
3395 report_fatal_error("Named registers not implemented for this target");
3398 /// Return the type that should be used to zero or sign extend a
3399 /// zeroext/signext integer return value. FIXME: Some C calling conventions
3400 /// require the return type to be promoted, but this is not true all the time,
3401 /// e.g. i1/i8/i16 on x86/x86_64. It is also not necessary for non-C calling
3402 /// conventions. The frontend should handle this and include all of the
3403 /// necessary information.
3404 virtual EVT getTypeForExtReturn(LLVMContext &Context, EVT VT,
3405 ISD::NodeType /*ExtendKind*/) const {
3406 EVT MinVT = getRegisterType(Context, MVT::i32);
3407 return VT.bitsLT(MinVT) ? MinVT : VT;
3410 /// For some targets, an LLVM struct type must be broken down into multiple
3411 /// simple types, but the calling convention specifies that the entire struct
3412 /// must be passed in a block of consecutive registers.
3414 functionArgumentNeedsConsecutiveRegisters(Type *Ty, CallingConv::ID CallConv,
3415 bool isVarArg) const {
3419 /// Returns a 0 terminated array of registers that can be safely used as
3420 /// scratch registers.
3421 virtual const MCPhysReg *getScratchRegisters(CallingConv::ID CC) const {
3425 /// This callback is used to prepare for a volatile or atomic load.
3426 /// It takes a chain node as input and returns the chain for the load itself.
3428 /// Having a callback like this is necessary for targets like SystemZ,
3429 /// which allows a CPU to reuse the result of a previous load indefinitely,
3430 /// even if a cache-coherent store is performed by another CPU. The default
3431 /// implementation does nothing.
3432 virtual SDValue prepareVolatileOrAtomicLoad(SDValue Chain, const SDLoc &DL,
3433 SelectionDAG &DAG) const {
3437 /// This callback is used to inspect load/store instructions and add
3438 /// target-specific MachineMemOperand flags to them. The default
3439 /// implementation does nothing.
3440 virtual MachineMemOperand::Flags getMMOFlags(const Instruction &I) const {
3441 return MachineMemOperand::MONone;
3444 /// This callback is invoked by the type legalizer to legalize nodes with an
3445 /// illegal operand type but legal result types. It replaces the
3446 /// LowerOperation callback in the type Legalizer. The reason we can not do
3447 /// away with LowerOperation entirely is that LegalizeDAG isn't yet ready to
3448 /// use this callback.
3450 /// TODO: Consider merging with ReplaceNodeResults.
3452 /// The target places new result values for the node in Results (their number
3453 /// and types must exactly match those of the original return values of
3454 /// the node), or leaves Results empty, which indicates that the node is not
3455 /// to be custom lowered after all.
3456 /// The default implementation calls LowerOperation.
3457 virtual void LowerOperationWrapper(SDNode *N,
3458 SmallVectorImpl<SDValue> &Results,
3459 SelectionDAG &DAG) const;
3461 /// This callback is invoked for operations that are unsupported by the
3462 /// target, which are registered to use 'custom' lowering, and whose defined
3463 /// values are all legal. If the target has no operations that require custom
3464 /// lowering, it need not implement this. The default implementation of this
3466 virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const;
3468 /// This callback is invoked when a node result type is illegal for the
3469 /// target, and the operation was registered to use 'custom' lowering for that
3470 /// result type. The target places new result values for the node in Results
3471 /// (their number and types must exactly match those of the original return
3472 /// values of the node), or leaves Results empty, which indicates that the
3473 /// node is not to be custom lowered after all.
3475 /// If the target has no operations that require custom lowering, it need not
3476 /// implement this. The default implementation aborts.
3477 virtual void ReplaceNodeResults(SDNode * /*N*/,
3478 SmallVectorImpl<SDValue> &/*Results*/,
3479 SelectionDAG &/*DAG*/) const {
3480 llvm_unreachable("ReplaceNodeResults not implemented for this target!");
3483 /// This method returns the name of a target specific DAG node.
3484 virtual const char *getTargetNodeName(unsigned Opcode) const;
3486 /// This method returns a target specific FastISel object, or null if the
3487 /// target does not support "fast" ISel.
3488 virtual FastISel *createFastISel(FunctionLoweringInfo &,
3489 const TargetLibraryInfo *) const {
3493 bool verifyReturnAddressArgumentIsConstant(SDValue Op,
3494 SelectionDAG &DAG) const;
3496 //===--------------------------------------------------------------------===//
3497 // Inline Asm Support hooks
3500 /// This hook allows the target to expand an inline asm call to be explicit
3501 /// llvm code if it wants to. This is useful for turning simple inline asms
3502 /// into LLVM intrinsics, which gives the compiler more information about the
3503 /// behavior of the code.
3504 virtual bool ExpandInlineAsm(CallInst *) const {
3508 enum ConstraintType {
3509 C_Register, // Constraint represents specific register(s).
3510 C_RegisterClass, // Constraint represents any of register(s) in class.
3511 C_Memory, // Memory constraint.
3512 C_Other, // Something else.
3513 C_Unknown // Unsupported constraint.
3516 enum ConstraintWeight {
3518 CW_Invalid = -1, // No match.
3519 CW_Okay = 0, // Acceptable.
3520 CW_Good = 1, // Good weight.
3521 CW_Better = 2, // Better weight.
3522 CW_Best = 3, // Best weight.
3524 // Well-known weights.
3525 CW_SpecificReg = CW_Okay, // Specific register operands.
3526 CW_Register = CW_Good, // Register operands.
3527 CW_Memory = CW_Better, // Memory operands.
3528 CW_Constant = CW_Best, // Constant operand.
3529 CW_Default = CW_Okay // Default or don't know type.
3532 /// This contains information for each constraint that we are lowering.
3533 struct AsmOperandInfo : public InlineAsm::ConstraintInfo {
3534 /// This contains the actual string for the code, like "m". TargetLowering
3535 /// picks the 'best' code from ConstraintInfo::Codes that most closely
3536 /// matches the operand.
3537 std::string ConstraintCode;
3539 /// Information about the constraint code, e.g. Register, RegisterClass,
3540 /// Memory, Other, Unknown.
3541 TargetLowering::ConstraintType ConstraintType = TargetLowering::C_Unknown;
3543 /// If this is the result output operand or a clobber, this is null,
3544 /// otherwise it is the incoming operand to the CallInst. This gets
3545 /// modified as the asm is processed.
3546 Value *CallOperandVal = nullptr;
3548 /// The ValueType for the operand value.
3549 MVT ConstraintVT = MVT::Other;
3551 /// Copy constructor for copying from a ConstraintInfo.
3552 AsmOperandInfo(InlineAsm::ConstraintInfo Info)
3553 : InlineAsm::ConstraintInfo(std::move(Info)) {}
3555 /// Return true of this is an input operand that is a matching constraint
3557 bool isMatchingInputConstraint() const;
3559 /// If this is an input matching constraint, this method returns the output
3560 /// operand it matches.
3561 unsigned getMatchedOperand() const;
3564 using AsmOperandInfoVector = std::vector<AsmOperandInfo>;
3566 /// Split up the constraint string from the inline assembly value into the
3567 /// specific constraints and their prefixes, and also tie in the associated
3568 /// operand values. If this returns an empty vector, and if the constraint
3569 /// string itself isn't empty, there was an error parsing.
3570 virtual AsmOperandInfoVector ParseConstraints(const DataLayout &DL,
3571 const TargetRegisterInfo *TRI,
3572 ImmutableCallSite CS) const;
3574 /// Examine constraint type and operand type and determine a weight value.
3575 /// The operand object must already have been set up with the operand type.
3576 virtual ConstraintWeight getMultipleConstraintMatchWeight(
3577 AsmOperandInfo &info, int maIndex) const;
3579 /// Examine constraint string and operand type and determine a weight value.
3580 /// The operand object must already have been set up with the operand type.
3581 virtual ConstraintWeight getSingleConstraintMatchWeight(
3582 AsmOperandInfo &info, const char *constraint) const;
3584 /// Determines the constraint code and constraint type to use for the specific
3585 /// AsmOperandInfo, setting OpInfo.ConstraintCode and OpInfo.ConstraintType.
3586 /// If the actual operand being passed in is available, it can be passed in as
3587 /// Op, otherwise an empty SDValue can be passed.
3588 virtual void ComputeConstraintToUse(AsmOperandInfo &OpInfo,
3590 SelectionDAG *DAG = nullptr) const;
3592 /// Given a constraint, return the type of constraint it is for this target.
3593 virtual ConstraintType getConstraintType(StringRef Constraint) const;
3595 /// Given a physical register constraint (e.g. {edx}), return the register
3596 /// number and the register class for the register.
3598 /// Given a register class constraint, like 'r', if this corresponds directly
3599 /// to an LLVM register class, return a register of 0 and the register class
3602 /// This should only be used for C_Register constraints. On error, this
3603 /// returns a register number of 0 and a null register class pointer.
3604 virtual std::pair<unsigned, const TargetRegisterClass *>
3605 getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
3606 StringRef Constraint, MVT VT) const;
3608 virtual unsigned getInlineAsmMemConstraint(StringRef ConstraintCode) const {
3609 if (ConstraintCode == "i")
3610 return InlineAsm::Constraint_i;
3611 else if (ConstraintCode == "m")
3612 return InlineAsm::Constraint_m;
3613 return InlineAsm::Constraint_Unknown;
3616 /// Try to replace an X constraint, which matches anything, with another that
3617 /// has more specific requirements based on the type of the corresponding
3618 /// operand. This returns null if there is no replacement to make.
3619 virtual const char *LowerXConstraint(EVT ConstraintVT) const;
3621 /// Lower the specified operand into the Ops vector. If it is invalid, don't
3622 /// add anything to Ops.
3623 virtual void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint,
3624 std::vector<SDValue> &Ops,
3625 SelectionDAG &DAG) const;
3627 //===--------------------------------------------------------------------===//
3628 // Div utility functions
3630 SDValue BuildSDIV(SDNode *N, SelectionDAG &DAG, bool IsAfterLegalization,
3631 SmallVectorImpl<SDNode *> &Created) const;
3632 SDValue BuildUDIV(SDNode *N, SelectionDAG &DAG, bool IsAfterLegalization,
3633 SmallVectorImpl<SDNode *> &Created) const;
3635 /// Targets may override this function to provide custom SDIV lowering for
3636 /// power-of-2 denominators. If the target returns an empty SDValue, LLVM
3637 /// assumes SDIV is expensive and replaces it with a series of other integer
3639 virtual SDValue BuildSDIVPow2(SDNode *N, const APInt &Divisor,
3641 SmallVectorImpl<SDNode *> &Created) const;
3643 /// Indicate whether this target prefers to combine FDIVs with the same
3644 /// divisor. If the transform should never be done, return zero. If the
3645 /// transform should be done, return the minimum number of divisor uses
3646 /// that must exist.
3647 virtual unsigned combineRepeatedFPDivisors() const {
3651 /// Hooks for building estimates in place of slower divisions and square
3654 /// Return either a square root or its reciprocal estimate value for the input
3656 /// \p Enabled is a ReciprocalEstimate enum with value either 'Unspecified' or
3657 /// 'Enabled' as set by a potential default override attribute.
3658 /// If \p RefinementSteps is 'Unspecified', the number of Newton-Raphson
3659 /// refinement iterations required to generate a sufficient (though not
3660 /// necessarily IEEE-754 compliant) estimate is returned in that parameter.
3661 /// The boolean UseOneConstNR output is used to select a Newton-Raphson
3662 /// algorithm implementation that uses either one or two constants.
3663 /// The boolean Reciprocal is used to select whether the estimate is for the
3664 /// square root of the input operand or the reciprocal of its square root.
3665 /// A target may choose to implement its own refinement within this function.
3666 /// If that's true, then return '0' as the number of RefinementSteps to avoid
3667 /// any further refinement of the estimate.
3668 /// An empty SDValue return means no estimate sequence can be created.
3669 virtual SDValue getSqrtEstimate(SDValue Operand, SelectionDAG &DAG,
3670 int Enabled, int &RefinementSteps,
3671 bool &UseOneConstNR, bool Reciprocal) const {
3675 /// Return a reciprocal estimate value for the input operand.
3676 /// \p Enabled is a ReciprocalEstimate enum with value either 'Unspecified' or
3677 /// 'Enabled' as set by a potential default override attribute.
3678 /// If \p RefinementSteps is 'Unspecified', the number of Newton-Raphson
3679 /// refinement iterations required to generate a sufficient (though not
3680 /// necessarily IEEE-754 compliant) estimate is returned in that parameter.
3681 /// A target may choose to implement its own refinement within this function.
3682 /// If that's true, then return '0' as the number of RefinementSteps to avoid
3683 /// any further refinement of the estimate.
3684 /// An empty SDValue return means no estimate sequence can be created.
3685 virtual SDValue getRecipEstimate(SDValue Operand, SelectionDAG &DAG,
3686 int Enabled, int &RefinementSteps) const {
3690 //===--------------------------------------------------------------------===//
3691 // Legalization utility functions
3694 /// Expand a MUL or [US]MUL_LOHI of n-bit values into two or four nodes,
3695 /// respectively, each computing an n/2-bit part of the result.
3696 /// \param Result A vector that will be filled with the parts of the result
3697 /// in little-endian order.
3698 /// \param LL Low bits of the LHS of the MUL. You can use this parameter
3699 /// if you want to control how low bits are extracted from the LHS.
3700 /// \param LH High bits of the LHS of the MUL. See LL for meaning.
3701 /// \param RL Low bits of the RHS of the MUL. See LL for meaning
3702 /// \param RH High bits of the RHS of the MUL. See LL for meaning.
3703 /// \returns true if the node has been expanded, false if it has not
3704 bool expandMUL_LOHI(unsigned Opcode, EVT VT, SDLoc dl, SDValue LHS,
3705 SDValue RHS, SmallVectorImpl<SDValue> &Result, EVT HiLoVT,
3706 SelectionDAG &DAG, MulExpansionKind Kind,
3707 SDValue LL = SDValue(), SDValue LH = SDValue(),
3708 SDValue RL = SDValue(), SDValue RH = SDValue()) const;
3710 /// Expand a MUL into two nodes. One that computes the high bits of
3711 /// the result and one that computes the low bits.
3712 /// \param HiLoVT The value type to use for the Lo and Hi nodes.
3713 /// \param LL Low bits of the LHS of the MUL. You can use this parameter
3714 /// if you want to control how low bits are extracted from the LHS.
3715 /// \param LH High bits of the LHS of the MUL. See LL for meaning.
3716 /// \param RL Low bits of the RHS of the MUL. See LL for meaning
3717 /// \param RH High bits of the RHS of the MUL. See LL for meaning.
3718 /// \returns true if the node has been expanded. false if it has not
3719 bool expandMUL(SDNode *N, SDValue &Lo, SDValue &Hi, EVT HiLoVT,
3720 SelectionDAG &DAG, MulExpansionKind Kind,
3721 SDValue LL = SDValue(), SDValue LH = SDValue(),
3722 SDValue RL = SDValue(), SDValue RH = SDValue()) const;
3724 /// Expand funnel shift.
3725 /// \param N Node to expand
3726 /// \param Result output after conversion
3727 /// \returns True, if the expansion was successful, false otherwise
3728 bool expandFunnelShift(SDNode *N, SDValue &Result, SelectionDAG &DAG) const;
3730 /// Expand float(f32) to SINT(i64) conversion
3731 /// \param N Node to expand
3732 /// \param Result output after conversion
3733 /// \returns True, if the expansion was successful, false otherwise
3734 bool expandFP_TO_SINT(SDNode *N, SDValue &Result, SelectionDAG &DAG) const;
3736 /// Expand float to UINT conversion
3737 /// \param N Node to expand
3738 /// \param Result output after conversion
3739 /// \returns True, if the expansion was successful, false otherwise
3740 bool expandFP_TO_UINT(SDNode *N, SDValue &Result, SelectionDAG &DAG) const;
3742 /// Expand UINT(i64) to double(f64) conversion
3743 /// \param N Node to expand
3744 /// \param Result output after conversion
3745 /// \returns True, if the expansion was successful, false otherwise
3746 bool expandUINT_TO_FP(SDNode *N, SDValue &Result, SelectionDAG &DAG) const;
3748 /// Expand fminnum/fmaxnum into fminnum_ieee/fmaxnum_ieee with quieted inputs.
3749 SDValue expandFMINNUM_FMAXNUM(SDNode *N, SelectionDAG &DAG) const;
3751 /// Expand CTPOP nodes. Expands vector/scalar CTPOP nodes,
3752 /// vector nodes can only succeed if all operations are legal/custom.
3753 /// \param N Node to expand
3754 /// \param Result output after conversion
3755 /// \returns True, if the expansion was successful, false otherwise
3756 bool expandCTPOP(SDNode *N, SDValue &Result, SelectionDAG &DAG) const;
3758 /// Expand CTLZ/CTLZ_ZERO_UNDEF nodes. Expands vector/scalar CTLZ nodes,
3759 /// vector nodes can only succeed if all operations are legal/custom.
3760 /// \param N Node to expand
3761 /// \param Result output after conversion
3762 /// \returns True, if the expansion was successful, false otherwise
3763 bool expandCTLZ(SDNode *N, SDValue &Result, SelectionDAG &DAG) const;
3765 /// Expand CTTZ/CTTZ_ZERO_UNDEF nodes. Expands vector/scalar CTTZ nodes,
3766 /// vector nodes can only succeed if all operations are legal/custom.
3767 /// \param N Node to expand
3768 /// \param Result output after conversion
3769 /// \returns True, if the expansion was successful, false otherwise
3770 bool expandCTTZ(SDNode *N, SDValue &Result, SelectionDAG &DAG) const;
3772 /// Turn load of vector type into a load of the individual elements.
3773 /// \param LD load to expand
3774 /// \returns MERGE_VALUEs of the scalar loads with their chains.
3775 SDValue scalarizeVectorLoad(LoadSDNode *LD, SelectionDAG &DAG) const;
3777 // Turn a store of a vector type into stores of the individual elements.
3778 /// \param ST Store with a vector value type
3779 /// \returns MERGE_VALUs of the individual store chains.
3780 SDValue scalarizeVectorStore(StoreSDNode *ST, SelectionDAG &DAG) const;
3782 /// Expands an unaligned load to 2 half-size loads for an integer, and
3783 /// possibly more for vectors.
3784 std::pair<SDValue, SDValue> expandUnalignedLoad(LoadSDNode *LD,
3785 SelectionDAG &DAG) const;
3787 /// Expands an unaligned store to 2 half-size stores for integer values, and
3788 /// possibly more for vectors.
3789 SDValue expandUnalignedStore(StoreSDNode *ST, SelectionDAG &DAG) const;
3791 /// Increments memory address \p Addr according to the type of the value
3792 /// \p DataVT that should be stored. If the data is stored in compressed
3793 /// form, the memory address should be incremented according to the number of
3794 /// the stored elements. This number is equal to the number of '1's bits
3796 /// \p DataVT is a vector type. \p Mask is a vector value.
3797 /// \p DataVT and \p Mask have the same number of vector elements.
3798 SDValue IncrementMemoryAddress(SDValue Addr, SDValue Mask, const SDLoc &DL,
3799 EVT DataVT, SelectionDAG &DAG,
3800 bool IsCompressedMemory) const;
3802 /// Get a pointer to vector element \p Idx located in memory for a vector of
3803 /// type \p VecVT starting at a base address of \p VecPtr. If \p Idx is out of
3804 /// bounds the returned pointer is unspecified, but will be within the vector
3806 SDValue getVectorElementPointer(SelectionDAG &DAG, SDValue VecPtr, EVT VecVT,
3807 SDValue Index) const;
3809 /// Method for building the DAG expansion of ISD::[US][ADD|SUB]SAT. This
3810 /// method accepts integers as its arguments.
3811 SDValue getExpandedSaturationAdditionSubtraction(SDNode *Node,
3812 SelectionDAG &DAG) const;
3814 /// Method for building the DAG expansion of ISD::SMULFIX. This method accepts
3815 /// integers as its arguments.
3816 SDValue getExpandedFixedPointMultiplication(SDNode *Node,
3817 SelectionDAG &DAG) const;
3819 //===--------------------------------------------------------------------===//
3820 // Instruction Emitting Hooks
3823 /// This method should be implemented by targets that mark instructions with
3824 /// the 'usesCustomInserter' flag. These instructions are special in various
3825 /// ways, which require special support to insert. The specified MachineInstr
3826 /// is created but not inserted into any basic blocks, and this method is
3827 /// called to expand it into a sequence of instructions, potentially also
3828 /// creating new basic blocks and control flow.
3829 /// As long as the returned basic block is different (i.e., we created a new
3830 /// one), the custom inserter is free to modify the rest of \p MBB.
3831 virtual MachineBasicBlock *
3832 EmitInstrWithCustomInserter(MachineInstr &MI, MachineBasicBlock *MBB) const;
3834 /// This method should be implemented by targets that mark instructions with
3835 /// the 'hasPostISelHook' flag. These instructions must be adjusted after
3836 /// instruction selection by target hooks. e.g. To fill in optional defs for
3837 /// ARM 's' setting instructions.
3838 virtual void AdjustInstrPostInstrSelection(MachineInstr &MI,
3839 SDNode *Node) const;
3841 /// If this function returns true, SelectionDAGBuilder emits a
3842 /// LOAD_STACK_GUARD node when it is lowering Intrinsic::stackprotector.
3843 virtual bool useLoadStackGuardNode() const {
3847 virtual SDValue emitStackGuardXorFP(SelectionDAG &DAG, SDValue Val,
3848 const SDLoc &DL) const {
3849 llvm_unreachable("not implemented for this target");
3852 /// Lower TLS global address SDNode for target independent emulated TLS model.
3853 virtual SDValue LowerToTLSEmulatedModel(const GlobalAddressSDNode *GA,
3854 SelectionDAG &DAG) const;
3856 /// Expands target specific indirect branch for the case of JumpTable
3858 virtual SDValue expandIndirectJTBranch(const SDLoc& dl, SDValue Value, SDValue Addr,
3859 SelectionDAG &DAG) const {
3860 return DAG.getNode(ISD::BRIND, dl, MVT::Other, Value, Addr);
3863 // seteq(x, 0) -> truncate(srl(ctlz(zext(x)), log2(#bits)))
3864 // If we're comparing for equality to zero and isCtlzFast is true, expose the
3865 // fact that this can be implemented as a ctlz/srl pair, so that the dag
3866 // combiner can fold the new nodes.
3867 SDValue lowerCmpEqZeroToCtlzSrl(SDValue Op, SelectionDAG &DAG) const;
3870 SDValue simplifySetCCWithAnd(EVT VT, SDValue N0, SDValue N1,
3871 ISD::CondCode Cond, DAGCombinerInfo &DCI,
3872 const SDLoc &DL) const;
3874 SDValue optimizeSetCCOfSignedTruncationCheck(EVT SCCVT, SDValue N0,
3875 SDValue N1, ISD::CondCode Cond,
3876 DAGCombinerInfo &DCI,
3877 const SDLoc &DL) const;
3880 /// Given an LLVM IR type and return type attributes, compute the return value
3881 /// EVTs and flags, and optionally also the offsets, if the return value is
3882 /// being lowered to memory.
3883 void GetReturnInfo(CallingConv::ID CC, Type *ReturnType, AttributeList attr,
3884 SmallVectorImpl<ISD::OutputArg> &Outs,
3885 const TargetLowering &TLI, const DataLayout &DL);
3887 } // end namespace llvm
3889 #endif // LLVM_CODEGEN_TARGETLOWERING_H