1 //===- TargetTransformInfo.h ------------------------------------*- C++ -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 /// This pass exposes codegen information to IR-level passes. Every
10 /// transformation that uses codegen information is broken into three parts:
11 /// 1. The IR-level analysis pass.
12 /// 2. The IR-level transformation interface which provides the needed
14 /// 3. Codegen-level implementation which uses target-specific hooks.
16 /// This file defines #2, which is the interface that IR-level transformations
17 /// use for querying the codegen.
19 //===----------------------------------------------------------------------===//
21 #ifndef LLVM_ANALYSIS_TARGETTRANSFORMINFO_H
22 #define LLVM_ANALYSIS_TARGETTRANSFORMINFO_H
24 #include "llvm/ADT/Optional.h"
25 #include "llvm/IR/Operator.h"
26 #include "llvm/IR/PassManager.h"
27 #include "llvm/Pass.h"
28 #include "llvm/Support/AtomicOrdering.h"
29 #include "llvm/Support/DataTypes.h"
30 #include "llvm/Analysis/LoopInfo.h"
31 #include "llvm/Analysis/ScalarEvolution.h"
32 #include "llvm/IR/Dominators.h"
33 #include "llvm/Analysis/AssumptionCache.h"
42 class AssumptionCache;
50 class ScalarEvolution;
53 class TargetLibraryInfo;
58 /// Information about a load/store intrinsic defined by the target.
59 struct MemIntrinsicInfo {
60 /// This is the pointer that the intrinsic is loading from or storing to.
61 /// If this is non-null, then analysis/optimization passes can assume that
62 /// this intrinsic is functionally equivalent to a load/store from this
64 Value *PtrVal = nullptr;
66 // Ordering for atomic operations.
67 AtomicOrdering Ordering = AtomicOrdering::NotAtomic;
69 // Same Id is set by the target for corresponding load/store intrinsics.
70 unsigned short MatchingId = 0;
73 bool WriteMem = false;
74 bool IsVolatile = false;
76 bool isUnordered() const {
77 return (Ordering == AtomicOrdering::NotAtomic ||
78 Ordering == AtomicOrdering::Unordered) && !IsVolatile;
82 /// Attributes of a target dependent hardware loop.
83 struct HardwareLoopInfo {
84 HardwareLoopInfo() = delete;
85 HardwareLoopInfo(Loop *L) : L(L) {}
87 BasicBlock *ExitBlock = nullptr;
88 BranchInst *ExitBranch = nullptr;
89 const SCEV *ExitCount = nullptr;
90 IntegerType *CountType = nullptr;
91 Value *LoopDecrement = nullptr; // Decrement the loop counter by this
92 // value in every iteration.
93 bool IsNestingLegal = false; // Can a hardware loop be a parent to
94 // another hardware loop?
95 bool CounterInReg = false; // Should loop counter be updated in
96 // the loop via a phi?
97 bool PerformEntryTest = false; // Generate the intrinsic which also performs
98 // icmp ne zero on the loop counter value and
99 // produces an i1 to guard the loop entry.
100 bool isHardwareLoopCandidate(ScalarEvolution &SE, LoopInfo &LI,
101 DominatorTree &DT, bool ForceNestedLoop = false,
102 bool ForceHardwareLoopPHI = false);
103 bool canAnalyze(LoopInfo &LI);
106 /// This pass provides access to the codegen interfaces that are needed
107 /// for IR-level transformations.
108 class TargetTransformInfo {
110 /// Construct a TTI object using a type implementing the \c Concept
113 /// This is used by targets to construct a TTI wrapping their target-specific
114 /// implementation that encodes appropriate costs for their target.
115 template <typename T> TargetTransformInfo(T Impl);
117 /// Construct a baseline TTI object using a minimal implementation of
118 /// the \c Concept API below.
120 /// The TTI implementation will reflect the information in the DataLayout
121 /// provided if non-null.
122 explicit TargetTransformInfo(const DataLayout &DL);
124 // Provide move semantics.
125 TargetTransformInfo(TargetTransformInfo &&Arg);
126 TargetTransformInfo &operator=(TargetTransformInfo &&RHS);
128 // We need to define the destructor out-of-line to define our sub-classes
130 ~TargetTransformInfo();
132 /// Handle the invalidation of this information.
134 /// When used as a result of \c TargetIRAnalysis this method will be called
135 /// when the function this was computed for changes. When it returns false,
136 /// the information is preserved across those changes.
137 bool invalidate(Function &, const PreservedAnalyses &,
138 FunctionAnalysisManager::Invalidator &) {
139 // FIXME: We should probably in some way ensure that the subtarget
140 // information for a function hasn't changed.
144 /// \name Generic Target Information
147 /// The kind of cost model.
149 /// There are several different cost models that can be customized by the
150 /// target. The normalization of each cost model may be target specific.
151 enum TargetCostKind {
152 TCK_RecipThroughput, ///< Reciprocal throughput.
153 TCK_Latency, ///< The latency of instruction.
154 TCK_CodeSize ///< Instruction code size.
157 /// Query the cost of a specified instruction.
159 /// Clients should use this interface to query the cost of an existing
160 /// instruction. The instruction must have a valid parent (basic block).
162 /// Note, this method does not cache the cost calculation and it
163 /// can be expensive in some cases.
164 int getInstructionCost(const Instruction *I, enum TargetCostKind kind) const {
166 case TCK_RecipThroughput:
167 return getInstructionThroughput(I);
170 return getInstructionLatency(I);
173 return getUserCost(I);
175 llvm_unreachable("Unknown instruction cost kind");
178 /// Underlying constants for 'cost' values in this interface.
180 /// Many APIs in this interface return a cost. This enum defines the
181 /// fundamental values that should be used to interpret (and produce) those
182 /// costs. The costs are returned as an int rather than a member of this
183 /// enumeration because it is expected that the cost of one IR instruction
184 /// may have a multiplicative factor to it or otherwise won't fit directly
185 /// into the enum. Moreover, it is common to sum or average costs which works
186 /// better as simple integral values. Thus this enum only provides constants.
187 /// Also note that the returned costs are signed integers to make it natural
188 /// to add, subtract, and test with zero (a common boundary condition). It is
189 /// not expected that 2^32 is a realistic cost to be modeling at any point.
191 /// Note that these costs should usually reflect the intersection of code-size
192 /// cost and execution cost. A free instruction is typically one that folds
193 /// into another instruction. For example, reg-to-reg moves can often be
194 /// skipped by renaming the registers in the CPU, but they still are encoded
195 /// and thus wouldn't be considered 'free' here.
196 enum TargetCostConstants {
197 TCC_Free = 0, ///< Expected to fold away in lowering.
198 TCC_Basic = 1, ///< The cost of a typical 'add' instruction.
199 TCC_Expensive = 4 ///< The cost of a 'div' instruction on x86.
202 /// Estimate the cost of a specific operation when lowered.
204 /// Note that this is designed to work on an arbitrary synthetic opcode, and
205 /// thus work for hypothetical queries before an instruction has even been
206 /// formed. However, this does *not* work for GEPs, and must not be called
207 /// for a GEP instruction. Instead, use the dedicated getGEPCost interface as
208 /// analyzing a GEP's cost required more information.
210 /// Typically only the result type is required, and the operand type can be
211 /// omitted. However, if the opcode is one of the cast instructions, the
212 /// operand type is required.
214 /// The returned cost is defined in terms of \c TargetCostConstants, see its
215 /// comments for a detailed explanation of the cost values.
216 int getOperationCost(unsigned Opcode, Type *Ty, Type *OpTy = nullptr) const;
218 /// Estimate the cost of a GEP operation when lowered.
220 /// The contract for this function is the same as \c getOperationCost except
221 /// that it supports an interface that provides extra information specific to
222 /// the GEP operation.
223 int getGEPCost(Type *PointeeType, const Value *Ptr,
224 ArrayRef<const Value *> Operands) const;
226 /// Estimate the cost of a EXT operation when lowered.
228 /// The contract for this function is the same as \c getOperationCost except
229 /// that it supports an interface that provides extra information specific to
230 /// the EXT operation.
231 int getExtCost(const Instruction *I, const Value *Src) const;
233 /// Estimate the cost of a function call when lowered.
235 /// The contract for this is the same as \c getOperationCost except that it
236 /// supports an interface that provides extra information specific to call
239 /// This is the most basic query for estimating call cost: it only knows the
240 /// function type and (potentially) the number of arguments at the call site.
241 /// The latter is only interesting for varargs function types.
242 int getCallCost(FunctionType *FTy, int NumArgs = -1,
243 const User *U = nullptr) const;
245 /// Estimate the cost of calling a specific function when lowered.
247 /// This overload adds the ability to reason about the particular function
248 /// being called in the event it is a library call with special lowering.
249 int getCallCost(const Function *F, int NumArgs = -1,
250 const User *U = nullptr) const;
252 /// Estimate the cost of calling a specific function when lowered.
254 /// This overload allows specifying a set of candidate argument values.
255 int getCallCost(const Function *F, ArrayRef<const Value *> Arguments,
256 const User *U = nullptr) const;
258 /// \returns A value by which our inlining threshold should be multiplied.
259 /// This is primarily used to bump up the inlining threshold wholesale on
260 /// targets where calls are unusually expensive.
262 /// TODO: This is a rather blunt instrument. Perhaps altering the costs of
263 /// individual classes of instructions would be better.
264 unsigned getInliningThresholdMultiplier() const;
266 /// Estimate the cost of an intrinsic when lowered.
268 /// Mirrors the \c getCallCost method but uses an intrinsic identifier.
269 int getIntrinsicCost(Intrinsic::ID IID, Type *RetTy,
270 ArrayRef<Type *> ParamTys,
271 const User *U = nullptr) const;
273 /// Estimate the cost of an intrinsic when lowered.
275 /// Mirrors the \c getCallCost method but uses an intrinsic identifier.
276 int getIntrinsicCost(Intrinsic::ID IID, Type *RetTy,
277 ArrayRef<const Value *> Arguments,
278 const User *U = nullptr) const;
280 /// \return the expected cost of a memcpy, which could e.g. depend on the
281 /// source/destination type and alignment and the number of bytes copied.
282 int getMemcpyCost(const Instruction *I) const;
284 /// \return The estimated number of case clusters when lowering \p 'SI'.
285 /// \p JTSize Set a jump table size only when \p SI is suitable for a jump
287 unsigned getEstimatedNumberOfCaseClusters(const SwitchInst &SI,
288 unsigned &JTSize) const;
290 /// Estimate the cost of a given IR user when lowered.
292 /// This can estimate the cost of either a ConstantExpr or Instruction when
293 /// lowered. It has two primary advantages over the \c getOperationCost and
294 /// \c getGEPCost above, and one significant disadvantage: it can only be
295 /// used when the IR construct has already been formed.
297 /// The advantages are that it can inspect the SSA use graph to reason more
298 /// accurately about the cost. For example, all-constant-GEPs can often be
299 /// folded into a load or other instruction, but if they are used in some
300 /// other context they may not be folded. This routine can distinguish such
303 /// \p Operands is a list of operands which can be a result of transformations
304 /// of the current operands. The number of the operands on the list must equal
305 /// to the number of the current operands the IR user has. Their order on the
306 /// list must be the same as the order of the current operands the IR user
309 /// The returned cost is defined in terms of \c TargetCostConstants, see its
310 /// comments for a detailed explanation of the cost values.
311 int getUserCost(const User *U, ArrayRef<const Value *> Operands) const;
313 /// This is a helper function which calls the two-argument getUserCost
314 /// with \p Operands which are the current operands U has.
315 int getUserCost(const User *U) const {
316 SmallVector<const Value *, 4> Operands(U->value_op_begin(),
318 return getUserCost(U, Operands);
321 /// Return true if branch divergence exists.
323 /// Branch divergence has a significantly negative impact on GPU performance
324 /// when threads in the same wavefront take different paths due to conditional
326 bool hasBranchDivergence() const;
328 /// Returns whether V is a source of divergence.
330 /// This function provides the target-dependent information for
331 /// the target-independent LegacyDivergenceAnalysis. LegacyDivergenceAnalysis first
332 /// builds the dependency graph, and then runs the reachability algorithm
333 /// starting with the sources of divergence.
334 bool isSourceOfDivergence(const Value *V) const;
336 // Returns true for the target specific
337 // set of operations which produce uniform result
338 // even taking non-uniform arguments
339 bool isAlwaysUniform(const Value *V) const;
341 /// Returns the address space ID for a target's 'flat' address space. Note
342 /// this is not necessarily the same as addrspace(0), which LLVM sometimes
343 /// refers to as the generic address space. The flat address space is a
344 /// generic address space that can be used access multiple segments of memory
345 /// with different address spaces. Access of a memory location through a
346 /// pointer with this address space is expected to be legal but slower
347 /// compared to the same memory location accessed through a pointer with a
348 /// different address space.
350 /// This is for targets with different pointer representations which can
351 /// be converted with the addrspacecast instruction. If a pointer is converted
352 /// to this address space, optimizations should attempt to replace the access
353 /// with the source address space.
355 /// \returns ~0u if the target does not have such a flat address space to
357 unsigned getFlatAddressSpace() const;
359 /// Test whether calls to a function lower to actual program function
362 /// The idea is to test whether the program is likely to require a 'call'
363 /// instruction or equivalent in order to call the given function.
365 /// FIXME: It's not clear that this is a good or useful query API. Client's
366 /// should probably move to simpler cost metrics using the above.
367 /// Alternatively, we could split the cost interface into distinct code-size
368 /// and execution-speed costs. This would allow modelling the core of this
369 /// query more accurately as a call is a single small instruction, but
370 /// incurs significant execution cost.
371 bool isLoweredToCall(const Function *F) const;
374 /// TODO: Some of these could be merged. Also, a lexical ordering
375 /// isn't always optimal.
380 unsigned NumBaseAdds;
386 /// Parameters that control the generic loop unrolling transformation.
387 struct UnrollingPreferences {
388 /// The cost threshold for the unrolled loop. Should be relative to the
389 /// getUserCost values returned by this API, and the expectation is that
390 /// the unrolled loop's instructions when run through that interface should
391 /// not exceed this cost. However, this is only an estimate. Also, specific
392 /// loops may be unrolled even with a cost above this threshold if deemed
393 /// profitable. Set this to UINT_MAX to disable the loop body cost
396 /// If complete unrolling will reduce the cost of the loop, we will boost
397 /// the Threshold by a certain percent to allow more aggressive complete
398 /// unrolling. This value provides the maximum boost percentage that we
399 /// can apply to Threshold (The value should be no less than 100).
400 /// BoostedThreshold = Threshold * min(RolledCost / UnrolledCost,
401 /// MaxPercentThresholdBoost / 100)
402 /// E.g. if complete unrolling reduces the loop execution time by 50%
403 /// then we boost the threshold by the factor of 2x. If unrolling is not
404 /// expected to reduce the running time, then we do not increase the
406 unsigned MaxPercentThresholdBoost;
407 /// The cost threshold for the unrolled loop when optimizing for size (set
408 /// to UINT_MAX to disable).
409 unsigned OptSizeThreshold;
410 /// The cost threshold for the unrolled loop, like Threshold, but used
411 /// for partial/runtime unrolling (set to UINT_MAX to disable).
412 unsigned PartialThreshold;
413 /// The cost threshold for the unrolled loop when optimizing for size, like
414 /// OptSizeThreshold, but used for partial/runtime unrolling (set to
415 /// UINT_MAX to disable).
416 unsigned PartialOptSizeThreshold;
417 /// A forced unrolling factor (the number of concatenated bodies of the
418 /// original loop in the unrolled loop body). When set to 0, the unrolling
419 /// transformation will select an unrolling factor based on the current cost
420 /// threshold and other factors.
422 /// A forced peeling factor (the number of bodied of the original loop
423 /// that should be peeled off before the loop body). When set to 0, the
424 /// unrolling transformation will select a peeling factor based on profile
425 /// information and other factors.
427 /// Default unroll count for loops with run-time trip count.
428 unsigned DefaultUnrollRuntimeCount;
429 // Set the maximum unrolling factor. The unrolling factor may be selected
430 // using the appropriate cost threshold, but may not exceed this number
431 // (set to UINT_MAX to disable). This does not apply in cases where the
432 // loop is being fully unrolled.
434 /// Set the maximum unrolling factor for full unrolling. Like MaxCount, but
435 /// applies even if full unrolling is selected. This allows a target to fall
436 /// back to Partial unrolling if full unrolling is above FullUnrollMaxCount.
437 unsigned FullUnrollMaxCount;
438 // Represents number of instructions optimized when "back edge"
439 // becomes "fall through" in unrolled loop.
440 // For now we count a conditional branch on a backedge and a comparison
443 /// Allow partial unrolling (unrolling of loops to expand the size of the
444 /// loop body, not only to eliminate small constant-trip-count loops).
446 /// Allow runtime unrolling (unrolling of loops to expand the size of the
447 /// loop body even when the number of loop iterations is not known at
450 /// Allow generation of a loop remainder (extra iterations after unroll).
452 /// Allow emitting expensive instructions (such as divisions) when computing
453 /// the trip count of a loop for runtime unrolling.
454 bool AllowExpensiveTripCount;
455 /// Apply loop unroll on any kind of loop
456 /// (mainly to loops that fail runtime unrolling).
458 /// Allow using trip count upper bound to unroll loops.
460 /// Allow peeling off loop iterations for loops with low dynamic tripcount.
462 /// Allow unrolling of all the iterations of the runtime loop remainder.
463 bool UnrollRemainder;
464 /// Allow unroll and jam. Used to enable unroll and jam for the target.
466 /// Threshold for unroll and jam, for inner loop size. The 'Threshold'
467 /// value above is used during unroll and jam for the outer loop size.
468 /// This value is used in the same manner to limit the size of the inner
470 unsigned UnrollAndJamInnerLoopThreshold;
473 /// Get target-customized preferences for the generic loop unrolling
474 /// transformation. The caller will initialize UP with the current
475 /// target-independent defaults.
476 void getUnrollingPreferences(Loop *L, ScalarEvolution &,
477 UnrollingPreferences &UP) const;
479 /// Query the target whether it would be profitable to convert the given loop
480 /// into a hardware loop.
481 bool isHardwareLoopProfitable(Loop *L, ScalarEvolution &SE,
483 TargetLibraryInfo *LibInfo,
484 HardwareLoopInfo &HWLoopInfo) const;
488 /// \name Scalar Target Information
491 /// Flags indicating the kind of support for population count.
493 /// Compared to the SW implementation, HW support is supposed to
494 /// significantly boost the performance when the population is dense, and it
495 /// may or may not degrade performance if the population is sparse. A HW
496 /// support is considered as "Fast" if it can outperform, or is on a par
497 /// with, SW implementation when the population is sparse; otherwise, it is
498 /// considered as "Slow".
499 enum PopcntSupportKind { PSK_Software, PSK_SlowHardware, PSK_FastHardware };
501 /// Return true if the specified immediate is legal add immediate, that
502 /// is the target has add instructions which can add a register with the
503 /// immediate without having to materialize the immediate into a register.
504 bool isLegalAddImmediate(int64_t Imm) const;
506 /// Return true if the specified immediate is legal icmp immediate,
507 /// that is the target has icmp instructions which can compare a register
508 /// against the immediate without having to materialize the immediate into a
510 bool isLegalICmpImmediate(int64_t Imm) const;
512 /// Return true if the addressing mode represented by AM is legal for
513 /// this target, for a load/store of the specified type.
514 /// The type may be VoidTy, in which case only return true if the addressing
515 /// mode is legal for a load/store of any legal type.
516 /// If target returns true in LSRWithInstrQueries(), I may be valid.
517 /// TODO: Handle pre/postinc as well.
518 bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset,
519 bool HasBaseReg, int64_t Scale,
520 unsigned AddrSpace = 0,
521 Instruction *I = nullptr) const;
523 /// Return true if LSR cost of C1 is lower than C1.
524 bool isLSRCostLess(TargetTransformInfo::LSRCost &C1,
525 TargetTransformInfo::LSRCost &C2) const;
527 /// Return true if the target can fuse a compare and branch.
528 /// Loop-strength-reduction (LSR) uses that knowledge to adjust its cost
529 /// calculation for the instructions in a loop.
530 bool canMacroFuseCmp() const;
532 /// Return true if the target can save a compare for loop count, for example
533 /// hardware loop saves a compare.
534 bool canSaveCmp(Loop *L, BranchInst **BI, ScalarEvolution *SE, LoopInfo *LI,
535 DominatorTree *DT, AssumptionCache *AC,
536 TargetLibraryInfo *LibInfo) const;
538 /// \return True is LSR should make efforts to create/preserve post-inc
539 /// addressing mode expressions.
540 bool shouldFavorPostInc() const;
542 /// Return true if LSR should make efforts to generate indexed addressing
543 /// modes that operate across loop iterations.
544 bool shouldFavorBackedgeIndex(const Loop *L) const;
546 /// Return true if the target supports masked load.
547 bool isLegalMaskedStore(Type *DataType) const;
548 /// Return true if the target supports masked store.
549 bool isLegalMaskedLoad(Type *DataType) const;
551 /// Return true if the target supports nontemporal store.
552 bool isLegalNTStore(Type *DataType, unsigned Alignment) const;
553 /// Return true if the target supports nontemporal load.
554 bool isLegalNTLoad(Type *DataType, unsigned Alignment) const;
556 /// Return true if the target supports masked scatter.
557 bool isLegalMaskedScatter(Type *DataType) const;
558 /// Return true if the target supports masked gather.
559 bool isLegalMaskedGather(Type *DataType) const;
561 /// Return true if the target supports masked compress store.
562 bool isLegalMaskedCompressStore(Type *DataType) const;
563 /// Return true if the target supports masked expand load.
564 bool isLegalMaskedExpandLoad(Type *DataType) const;
566 /// Return true if the target has a unified operation to calculate division
567 /// and remainder. If so, the additional implicit multiplication and
568 /// subtraction required to calculate a remainder from division are free. This
569 /// can enable more aggressive transformations for division and remainder than
570 /// would typically be allowed using throughput or size cost models.
571 bool hasDivRemOp(Type *DataType, bool IsSigned) const;
573 /// Return true if the given instruction (assumed to be a memory access
574 /// instruction) has a volatile variant. If that's the case then we can avoid
575 /// addrspacecast to generic AS for volatile loads/stores. Default
576 /// implementation returns false, which prevents address space inference for
577 /// volatile loads/stores.
578 bool hasVolatileVariant(Instruction *I, unsigned AddrSpace) const;
580 /// Return true if target doesn't mind addresses in vectors.
581 bool prefersVectorizedAddressing() const;
583 /// Return the cost of the scaling factor used in the addressing
584 /// mode represented by AM for this target, for a load/store
585 /// of the specified type.
586 /// If the AM is supported, the return value must be >= 0.
587 /// If the AM is not supported, it returns a negative value.
588 /// TODO: Handle pre/postinc as well.
589 int getScalingFactorCost(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset,
590 bool HasBaseReg, int64_t Scale,
591 unsigned AddrSpace = 0) const;
593 /// Return true if the loop strength reduce pass should make
594 /// Instruction* based TTI queries to isLegalAddressingMode(). This is
595 /// needed on SystemZ, where e.g. a memcpy can only have a 12 bit unsigned
596 /// immediate offset and no index register.
597 bool LSRWithInstrQueries() const;
599 /// Return true if it's free to truncate a value of type Ty1 to type
600 /// Ty2. e.g. On x86 it's free to truncate a i32 value in register EAX to i16
601 /// by referencing its sub-register AX.
602 bool isTruncateFree(Type *Ty1, Type *Ty2) const;
604 /// Return true if it is profitable to hoist instruction in the
605 /// then/else to before if.
606 bool isProfitableToHoist(Instruction *I) const;
610 /// Return true if this type is legal.
611 bool isTypeLegal(Type *Ty) const;
613 /// Returns the target's jmp_buf alignment in bytes.
614 unsigned getJumpBufAlignment() const;
616 /// Returns the target's jmp_buf size in bytes.
617 unsigned getJumpBufSize() const;
619 /// Return true if switches should be turned into lookup tables for the
621 bool shouldBuildLookupTables() const;
623 /// Return true if switches should be turned into lookup tables
624 /// containing this constant value for the target.
625 bool shouldBuildLookupTablesForConstant(Constant *C) const;
627 /// Return true if the input function which is cold at all call sites,
628 /// should use coldcc calling convention.
629 bool useColdCCForColdCall(Function &F) const;
631 unsigned getScalarizationOverhead(Type *Ty, bool Insert, bool Extract) const;
633 unsigned getOperandsScalarizationOverhead(ArrayRef<const Value *> Args,
636 /// If target has efficient vector element load/store instructions, it can
637 /// return true here so that insertion/extraction costs are not added to
638 /// the scalarization cost of a load/store.
639 bool supportsEfficientVectorElementLoadStore() const;
641 /// Don't restrict interleaved unrolling to small loops.
642 bool enableAggressiveInterleaving(bool LoopHasReductions) const;
644 /// Returns options for expansion of memcmp. IsZeroCmp is
645 // true if this is the expansion of memcmp(p1, p2, s) == 0.
646 struct MemCmpExpansionOptions {
647 // Return true if memcmp expansion is enabled.
648 operator bool() const { return MaxNumLoads > 0; }
650 // Maximum number of load operations.
651 unsigned MaxNumLoads = 0;
653 // The list of available load sizes (in bytes), sorted in decreasing order.
654 SmallVector<unsigned, 8> LoadSizes;
656 // For memcmp expansion when the memcmp result is only compared equal or
657 // not-equal to 0, allow up to this number of load pairs per block. As an
658 // example, this may allow 'memcmp(a, b, 3) == 0' in a single block:
659 // a0 = load2bytes &a[0]
660 // b0 = load2bytes &b[0]
661 // a2 = load1byte &a[2]
662 // b2 = load1byte &b[2]
663 // r = cmp eq (a0 ^ b0 | a2 ^ b2), 0
664 unsigned NumLoadsPerBlock = 1;
666 // Set to true to allow overlapping loads. For example, 7-byte compares can
667 // be done with two 4-byte compares instead of 4+2+1-byte compares. This
668 // requires all loads in LoadSizes to be doable in an unaligned way.
669 bool AllowOverlappingLoads = false;
671 MemCmpExpansionOptions enableMemCmpExpansion(bool OptSize,
672 bool IsZeroCmp) const;
674 /// Enable matching of interleaved access groups.
675 bool enableInterleavedAccessVectorization() const;
677 /// Enable matching of interleaved access groups that contain predicated
678 /// accesses or gaps and therefore vectorized using masked
679 /// vector loads/stores.
680 bool enableMaskedInterleavedAccessVectorization() const;
682 /// Indicate that it is potentially unsafe to automatically vectorize
683 /// floating-point operations because the semantics of vector and scalar
684 /// floating-point semantics may differ. For example, ARM NEON v7 SIMD math
685 /// does not support IEEE-754 denormal numbers, while depending on the
686 /// platform, scalar floating-point math does.
687 /// This applies to floating-point math operations and calls, not memory
688 /// operations, shuffles, or casts.
689 bool isFPVectorizationPotentiallyUnsafe() const;
691 /// Determine if the target supports unaligned memory accesses.
692 bool allowsMisalignedMemoryAccesses(LLVMContext &Context,
693 unsigned BitWidth, unsigned AddressSpace = 0,
694 unsigned Alignment = 1,
695 bool *Fast = nullptr) const;
697 /// Return hardware support for population count.
698 PopcntSupportKind getPopcntSupport(unsigned IntTyWidthInBit) const;
700 /// Return true if the hardware has a fast square-root instruction.
701 bool haveFastSqrt(Type *Ty) const;
703 /// Return true if it is faster to check if a floating-point value is NaN
704 /// (or not-NaN) versus a comparison against a constant FP zero value.
705 /// Targets should override this if materializing a 0.0 for comparison is
706 /// generally as cheap as checking for ordered/unordered.
707 bool isFCmpOrdCheaperThanFCmpZero(Type *Ty) const;
709 /// Return the expected cost of supporting the floating point operation
710 /// of the specified type.
711 int getFPOpCost(Type *Ty) const;
713 /// Return the expected cost of materializing for the given integer
714 /// immediate of the specified type.
715 int getIntImmCost(const APInt &Imm, Type *Ty) const;
717 /// Return the expected cost of materialization for the given integer
718 /// immediate of the specified type for a given instruction. The cost can be
719 /// zero if the immediate can be folded into the specified instruction.
720 int getIntImmCost(unsigned Opc, unsigned Idx, const APInt &Imm,
722 int getIntImmCost(Intrinsic::ID IID, unsigned Idx, const APInt &Imm,
725 /// Return the expected cost for the given integer when optimising
726 /// for size. This is different than the other integer immediate cost
727 /// functions in that it is subtarget agnostic. This is useful when you e.g.
728 /// target one ISA such as Aarch32 but smaller encodings could be possible
729 /// with another such as Thumb. This return value is used as a penalty when
730 /// the total costs for a constant is calculated (the bigger the cost, the
731 /// more beneficial constant hoisting is).
732 int getIntImmCodeSizeCost(unsigned Opc, unsigned Idx, const APInt &Imm,
736 /// \name Vector Target Information
739 /// The various kinds of shuffle patterns for vector queries.
741 SK_Broadcast, ///< Broadcast element 0 to all other elements.
742 SK_Reverse, ///< Reverse the order of the vector.
743 SK_Select, ///< Selects elements from the corresponding lane of
744 ///< either source operand. This is equivalent to a
745 ///< vector select with a constant condition operand.
746 SK_Transpose, ///< Transpose two vectors.
747 SK_InsertSubvector, ///< InsertSubvector. Index indicates start offset.
748 SK_ExtractSubvector,///< ExtractSubvector Index indicates start offset.
749 SK_PermuteTwoSrc, ///< Merge elements from two source vectors into one
750 ///< with any shuffle mask.
751 SK_PermuteSingleSrc ///< Shuffle elements of single source vector with any
755 /// Additional information about an operand's possible values.
756 enum OperandValueKind {
757 OK_AnyValue, // Operand can have any value.
758 OK_UniformValue, // Operand is uniform (splat of a value).
759 OK_UniformConstantValue, // Operand is uniform constant.
760 OK_NonUniformConstantValue // Operand is a non uniform constant value.
763 /// Additional properties of an operand's values.
764 enum OperandValueProperties { OP_None = 0, OP_PowerOf2 = 1 };
766 /// \return The number of scalar or vector registers that the target has.
767 /// If 'Vectors' is true, it returns the number of vector registers. If it is
768 /// set to false, it returns the number of scalar registers.
769 unsigned getNumberOfRegisters(bool Vector) const;
771 /// \return The width of the largest scalar or vector register type.
772 unsigned getRegisterBitWidth(bool Vector) const;
774 /// \return The width of the smallest vector register type.
775 unsigned getMinVectorRegisterBitWidth() const;
777 /// \return True if the vectorization factor should be chosen to
778 /// make the vector of the smallest element type match the size of a
779 /// vector register. For wider element types, this could result in
780 /// creating vectors that span multiple vector registers.
781 /// If false, the vectorization factor will be chosen based on the
782 /// size of the widest element type.
783 bool shouldMaximizeVectorBandwidth(bool OptSize) const;
785 /// \return The minimum vectorization factor for types of given element
786 /// bit width, or 0 if there is no minimum VF. The returned value only
787 /// applies when shouldMaximizeVectorBandwidth returns true.
788 unsigned getMinimumVF(unsigned ElemWidth) const;
790 /// \return True if it should be considered for address type promotion.
791 /// \p AllowPromotionWithoutCommonHeader Set true if promoting \p I is
792 /// profitable without finding other extensions fed by the same input.
793 bool shouldConsiderAddressTypePromotion(
794 const Instruction &I, bool &AllowPromotionWithoutCommonHeader) const;
796 /// \return The size of a cache line in bytes.
797 unsigned getCacheLineSize() const;
799 /// The possible cache levels
800 enum class CacheLevel {
801 L1D, // The L1 data cache
802 L2D, // The L2 data cache
804 // We currently do not model L3 caches, as their sizes differ widely between
805 // microarchitectures. Also, we currently do not have a use for L3 cache
806 // size modeling yet.
809 /// \return The size of the cache level in bytes, if available.
810 llvm::Optional<unsigned> getCacheSize(CacheLevel Level) const;
812 /// \return The associativity of the cache level, if available.
813 llvm::Optional<unsigned> getCacheAssociativity(CacheLevel Level) const;
815 /// \return How much before a load we should place the prefetch instruction.
816 /// This is currently measured in number of instructions.
817 unsigned getPrefetchDistance() const;
819 /// \return Some HW prefetchers can handle accesses up to a certain constant
820 /// stride. This is the minimum stride in bytes where it makes sense to start
821 /// adding SW prefetches. The default is 1, i.e. prefetch with any stride.
822 unsigned getMinPrefetchStride() const;
824 /// \return The maximum number of iterations to prefetch ahead. If the
825 /// required number of iterations is more than this number, no prefetching is
827 unsigned getMaxPrefetchIterationsAhead() const;
829 /// \return The maximum interleave factor that any transform should try to
830 /// perform for this target. This number depends on the level of parallelism
831 /// and the number of execution units in the CPU.
832 unsigned getMaxInterleaveFactor(unsigned VF) const;
834 /// Collect properties of V used in cost analysis, e.g. OP_PowerOf2.
835 static OperandValueKind getOperandInfo(Value *V,
836 OperandValueProperties &OpProps);
838 /// This is an approximation of reciprocal throughput of a math/logic op.
839 /// A higher cost indicates less expected throughput.
840 /// From Agner Fog's guides, reciprocal throughput is "the average number of
841 /// clock cycles per instruction when the instructions are not part of a
842 /// limiting dependency chain."
843 /// Therefore, costs should be scaled to account for multiple execution units
844 /// on the target that can process this type of instruction. For example, if
845 /// there are 5 scalar integer units and 2 vector integer units that can
846 /// calculate an 'add' in a single cycle, this model should indicate that the
847 /// cost of the vector add instruction is 2.5 times the cost of the scalar
849 /// \p Args is an optional argument which holds the instruction operands
850 /// values so the TTI can analyze those values searching for special
851 /// cases or optimizations based on those values.
852 int getArithmeticInstrCost(
853 unsigned Opcode, Type *Ty, OperandValueKind Opd1Info = OK_AnyValue,
854 OperandValueKind Opd2Info = OK_AnyValue,
855 OperandValueProperties Opd1PropInfo = OP_None,
856 OperandValueProperties Opd2PropInfo = OP_None,
857 ArrayRef<const Value *> Args = ArrayRef<const Value *>()) const;
859 /// \return The cost of a shuffle instruction of kind Kind and of type Tp.
860 /// The index and subtype parameters are used by the subvector insertion and
861 /// extraction shuffle kinds to show the insert/extract point and the type of
862 /// the subvector being inserted/extracted.
863 /// NOTE: For subvector extractions Tp represents the source type.
864 int getShuffleCost(ShuffleKind Kind, Type *Tp, int Index = 0,
865 Type *SubTp = nullptr) const;
867 /// \return The expected cost of cast instructions, such as bitcast, trunc,
868 /// zext, etc. If there is an existing instruction that holds Opcode, it
869 /// may be passed in the 'I' parameter.
870 int getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src,
871 const Instruction *I = nullptr) const;
873 /// \return The expected cost of a sign- or zero-extended vector extract. Use
874 /// -1 to indicate that there is no information about the index value.
875 int getExtractWithExtendCost(unsigned Opcode, Type *Dst, VectorType *VecTy,
876 unsigned Index = -1) const;
878 /// \return The expected cost of control-flow related instructions such as
880 int getCFInstrCost(unsigned Opcode) const;
882 /// \returns The expected cost of compare and select instructions. If there
883 /// is an existing instruction that holds Opcode, it may be passed in the
885 int getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
886 Type *CondTy = nullptr, const Instruction *I = nullptr) const;
888 /// \return The expected cost of vector Insert and Extract.
889 /// Use -1 to indicate that there is no information on the index value.
890 int getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index = -1) const;
892 /// \return The cost of Load and Store instructions.
893 int getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
894 unsigned AddressSpace, const Instruction *I = nullptr) const;
896 /// \return The cost of masked Load and Store instructions.
897 int getMaskedMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
898 unsigned AddressSpace) const;
900 /// \return The cost of Gather or Scatter operation
901 /// \p Opcode - is a type of memory access Load or Store
902 /// \p DataTy - a vector type of the data to be loaded or stored
903 /// \p Ptr - pointer [or vector of pointers] - address[es] in memory
904 /// \p VariableMask - true when the memory access is predicated with a mask
905 /// that is not a compile-time constant
906 /// \p Alignment - alignment of single element
907 int getGatherScatterOpCost(unsigned Opcode, Type *DataTy, Value *Ptr,
908 bool VariableMask, unsigned Alignment) const;
910 /// \return The cost of the interleaved memory operation.
911 /// \p Opcode is the memory operation code
912 /// \p VecTy is the vector type of the interleaved access.
913 /// \p Factor is the interleave factor
914 /// \p Indices is the indices for interleaved load members (as interleaved
915 /// load allows gaps)
916 /// \p Alignment is the alignment of the memory operation
917 /// \p AddressSpace is address space of the pointer.
918 /// \p UseMaskForCond indicates if the memory access is predicated.
919 /// \p UseMaskForGaps indicates if gaps should be masked.
920 int getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy, unsigned Factor,
921 ArrayRef<unsigned> Indices, unsigned Alignment,
922 unsigned AddressSpace,
923 bool UseMaskForCond = false,
924 bool UseMaskForGaps = false) const;
926 /// Calculate the cost of performing a vector reduction.
928 /// This is the cost of reducing the vector value of type \p Ty to a scalar
929 /// value using the operation denoted by \p Opcode. The form of the reduction
930 /// can either be a pairwise reduction or a reduction that splits the vector
931 /// at every reduction level.
935 /// ((v0+v1), (v2+v3), undef, undef)
938 /// ((v0+v2), (v1+v3), undef, undef)
939 int getArithmeticReductionCost(unsigned Opcode, Type *Ty,
940 bool IsPairwiseForm) const;
941 int getMinMaxReductionCost(Type *Ty, Type *CondTy, bool IsPairwiseForm,
942 bool IsUnsigned) const;
944 /// \returns The cost of Intrinsic instructions. Analyses the real arguments.
945 /// Three cases are handled: 1. scalar instruction 2. vector instruction
946 /// 3. scalar instruction which is to be vectorized with VF.
947 int getIntrinsicInstrCost(Intrinsic::ID ID, Type *RetTy,
948 ArrayRef<Value *> Args, FastMathFlags FMF,
949 unsigned VF = 1) const;
951 /// \returns The cost of Intrinsic instructions. Types analysis only.
952 /// If ScalarizationCostPassed is UINT_MAX, the cost of scalarizing the
953 /// arguments and the return value will be computed based on types.
954 int getIntrinsicInstrCost(Intrinsic::ID ID, Type *RetTy,
955 ArrayRef<Type *> Tys, FastMathFlags FMF,
956 unsigned ScalarizationCostPassed = UINT_MAX) const;
958 /// \returns The cost of Call instructions.
959 int getCallInstrCost(Function *F, Type *RetTy, ArrayRef<Type *> Tys) const;
961 /// \returns The number of pieces into which the provided type must be
962 /// split during legalization. Zero is returned when the answer is unknown.
963 unsigned getNumberOfParts(Type *Tp) const;
965 /// \returns The cost of the address computation. For most targets this can be
966 /// merged into the instruction indexing mode. Some targets might want to
967 /// distinguish between address computation for memory operations on vector
968 /// types and scalar types. Such targets should override this function.
969 /// The 'SE' parameter holds pointer for the scalar evolution object which
970 /// is used in order to get the Ptr step value in case of constant stride.
971 /// The 'Ptr' parameter holds SCEV of the access pointer.
972 int getAddressComputationCost(Type *Ty, ScalarEvolution *SE = nullptr,
973 const SCEV *Ptr = nullptr) const;
975 /// \returns The cost, if any, of keeping values of the given types alive
978 /// Some types may require the use of register classes that do not have
979 /// any callee-saved registers, so would require a spill and fill.
980 unsigned getCostOfKeepingLiveOverCall(ArrayRef<Type *> Tys) const;
982 /// \returns True if the intrinsic is a supported memory intrinsic. Info
983 /// will contain additional information - whether the intrinsic may write
984 /// or read to memory, volatility and the pointer. Info is undefined
985 /// if false is returned.
986 bool getTgtMemIntrinsic(IntrinsicInst *Inst, MemIntrinsicInfo &Info) const;
988 /// \returns The maximum element size, in bytes, for an element
989 /// unordered-atomic memory intrinsic.
990 unsigned getAtomicMemIntrinsicMaxElementSize() const;
992 /// \returns A value which is the result of the given memory intrinsic. New
993 /// instructions may be created to extract the result from the given intrinsic
994 /// memory operation. Returns nullptr if the target cannot create a result
995 /// from the given intrinsic.
996 Value *getOrCreateResultFromMemIntrinsic(IntrinsicInst *Inst,
997 Type *ExpectedType) const;
999 /// \returns The type to use in a loop expansion of a memcpy call.
1000 Type *getMemcpyLoopLoweringType(LLVMContext &Context, Value *Length,
1001 unsigned SrcAlign, unsigned DestAlign) const;
1003 /// \param[out] OpsOut The operand types to copy RemainingBytes of memory.
1004 /// \param RemainingBytes The number of bytes to copy.
1006 /// Calculates the operand types to use when copying \p RemainingBytes of
1007 /// memory, where source and destination alignments are \p SrcAlign and
1008 /// \p DestAlign respectively.
1009 void getMemcpyLoopResidualLoweringType(SmallVectorImpl<Type *> &OpsOut,
1010 LLVMContext &Context,
1011 unsigned RemainingBytes,
1013 unsigned DestAlign) const;
1015 /// \returns True if the two functions have compatible attributes for inlining
1017 bool areInlineCompatible(const Function *Caller,
1018 const Function *Callee) const;
1020 /// \returns True if the caller and callee agree on how \p Args will be passed
1022 /// \param[out] Args The list of compatible arguments. The implementation may
1023 /// filter out any incompatible args from this list.
1024 bool areFunctionArgsABICompatible(const Function *Caller,
1025 const Function *Callee,
1026 SmallPtrSetImpl<Argument *> &Args) const;
1028 /// The type of load/store indexing.
1029 enum MemIndexedMode {
1030 MIM_Unindexed, ///< No indexing.
1031 MIM_PreInc, ///< Pre-incrementing.
1032 MIM_PreDec, ///< Pre-decrementing.
1033 MIM_PostInc, ///< Post-incrementing.
1034 MIM_PostDec ///< Post-decrementing.
1037 /// \returns True if the specified indexed load for the given type is legal.
1038 bool isIndexedLoadLegal(enum MemIndexedMode Mode, Type *Ty) const;
1040 /// \returns True if the specified indexed store for the given type is legal.
1041 bool isIndexedStoreLegal(enum MemIndexedMode Mode, Type *Ty) const;
1043 /// \returns The bitwidth of the largest vector type that should be used to
1044 /// load/store in the given address space.
1045 unsigned getLoadStoreVecRegBitWidth(unsigned AddrSpace) const;
1047 /// \returns True if the load instruction is legal to vectorize.
1048 bool isLegalToVectorizeLoad(LoadInst *LI) const;
1050 /// \returns True if the store instruction is legal to vectorize.
1051 bool isLegalToVectorizeStore(StoreInst *SI) const;
1053 /// \returns True if it is legal to vectorize the given load chain.
1054 bool isLegalToVectorizeLoadChain(unsigned ChainSizeInBytes,
1056 unsigned AddrSpace) const;
1058 /// \returns True if it is legal to vectorize the given store chain.
1059 bool isLegalToVectorizeStoreChain(unsigned ChainSizeInBytes,
1061 unsigned AddrSpace) const;
1063 /// \returns The new vector factor value if the target doesn't support \p
1064 /// SizeInBytes loads or has a better vector factor.
1065 unsigned getLoadVectorFactor(unsigned VF, unsigned LoadSize,
1066 unsigned ChainSizeInBytes,
1067 VectorType *VecTy) const;
1069 /// \returns The new vector factor value if the target doesn't support \p
1070 /// SizeInBytes stores or has a better vector factor.
1071 unsigned getStoreVectorFactor(unsigned VF, unsigned StoreSize,
1072 unsigned ChainSizeInBytes,
1073 VectorType *VecTy) const;
1075 /// Flags describing the kind of vector reduction.
1076 struct ReductionFlags {
1077 ReductionFlags() : IsMaxOp(false), IsSigned(false), NoNaN(false) {}
1078 bool IsMaxOp; ///< If the op a min/max kind, true if it's a max operation.
1079 bool IsSigned; ///< Whether the operation is a signed int reduction.
1080 bool NoNaN; ///< If op is an fp min/max, whether NaNs may be present.
1083 /// \returns True if the target wants to handle the given reduction idiom in
1084 /// the intrinsics form instead of the shuffle form.
1085 bool useReductionIntrinsic(unsigned Opcode, Type *Ty,
1086 ReductionFlags Flags) const;
1088 /// \returns True if the target wants to expand the given reduction intrinsic
1089 /// into a shuffle sequence.
1090 bool shouldExpandReduction(const IntrinsicInst *II) const;
1092 /// \returns the size cost of rematerializing a GlobalValue address relative
1093 /// to a stack reload.
1094 unsigned getGISelRematGlobalCost() const;
1099 /// Estimate the latency of specified instruction.
1100 /// Returns 1 as the default value.
1101 int getInstructionLatency(const Instruction *I) const;
1103 /// Returns the expected throughput cost of the instruction.
1104 /// Returns -1 if the cost is unknown.
1105 int getInstructionThroughput(const Instruction *I) const;
1107 /// The abstract base class used to type erase specific TTI
1108 /// implementations.
1111 /// The template model for the base class which wraps a concrete
1112 /// implementation in a type erased interface.
1113 template <typename T> class Model;
1115 std::unique_ptr<Concept> TTIImpl;
1118 class TargetTransformInfo::Concept {
1120 virtual ~Concept() = 0;
1121 virtual const DataLayout &getDataLayout() const = 0;
1122 virtual int getOperationCost(unsigned Opcode, Type *Ty, Type *OpTy) = 0;
1123 virtual int getGEPCost(Type *PointeeType, const Value *Ptr,
1124 ArrayRef<const Value *> Operands) = 0;
1125 virtual int getExtCost(const Instruction *I, const Value *Src) = 0;
1126 virtual int getCallCost(FunctionType *FTy, int NumArgs, const User *U) = 0;
1127 virtual int getCallCost(const Function *F, int NumArgs, const User *U) = 0;
1128 virtual int getCallCost(const Function *F,
1129 ArrayRef<const Value *> Arguments, const User *U) = 0;
1130 virtual unsigned getInliningThresholdMultiplier() = 0;
1131 virtual int getIntrinsicCost(Intrinsic::ID IID, Type *RetTy,
1132 ArrayRef<Type *> ParamTys, const User *U) = 0;
1133 virtual int getIntrinsicCost(Intrinsic::ID IID, Type *RetTy,
1134 ArrayRef<const Value *> Arguments,
1136 virtual int getMemcpyCost(const Instruction *I) = 0;
1137 virtual unsigned getEstimatedNumberOfCaseClusters(const SwitchInst &SI,
1138 unsigned &JTSize) = 0;
1140 getUserCost(const User *U, ArrayRef<const Value *> Operands) = 0;
1141 virtual bool hasBranchDivergence() = 0;
1142 virtual bool isSourceOfDivergence(const Value *V) = 0;
1143 virtual bool isAlwaysUniform(const Value *V) = 0;
1144 virtual unsigned getFlatAddressSpace() = 0;
1145 virtual bool isLoweredToCall(const Function *F) = 0;
1146 virtual void getUnrollingPreferences(Loop *L, ScalarEvolution &,
1147 UnrollingPreferences &UP) = 0;
1148 virtual bool isHardwareLoopProfitable(Loop *L, ScalarEvolution &SE,
1149 AssumptionCache &AC,
1150 TargetLibraryInfo *LibInfo,
1151 HardwareLoopInfo &HWLoopInfo) = 0;
1152 virtual bool isLegalAddImmediate(int64_t Imm) = 0;
1153 virtual bool isLegalICmpImmediate(int64_t Imm) = 0;
1154 virtual bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV,
1155 int64_t BaseOffset, bool HasBaseReg,
1158 Instruction *I) = 0;
1159 virtual bool isLSRCostLess(TargetTransformInfo::LSRCost &C1,
1160 TargetTransformInfo::LSRCost &C2) = 0;
1161 virtual bool canMacroFuseCmp() = 0;
1162 virtual bool canSaveCmp(Loop *L, BranchInst **BI, ScalarEvolution *SE,
1163 LoopInfo *LI, DominatorTree *DT, AssumptionCache *AC,
1164 TargetLibraryInfo *LibInfo) = 0;
1165 virtual bool shouldFavorPostInc() const = 0;
1166 virtual bool shouldFavorBackedgeIndex(const Loop *L) const = 0;
1167 virtual bool isLegalMaskedStore(Type *DataType) = 0;
1168 virtual bool isLegalMaskedLoad(Type *DataType) = 0;
1169 virtual bool isLegalNTStore(Type *DataType, unsigned Alignment) = 0;
1170 virtual bool isLegalNTLoad(Type *DataType, unsigned Alignment) = 0;
1171 virtual bool isLegalMaskedScatter(Type *DataType) = 0;
1172 virtual bool isLegalMaskedGather(Type *DataType) = 0;
1173 virtual bool isLegalMaskedCompressStore(Type *DataType) = 0;
1174 virtual bool isLegalMaskedExpandLoad(Type *DataType) = 0;
1175 virtual bool hasDivRemOp(Type *DataType, bool IsSigned) = 0;
1176 virtual bool hasVolatileVariant(Instruction *I, unsigned AddrSpace) = 0;
1177 virtual bool prefersVectorizedAddressing() = 0;
1178 virtual int getScalingFactorCost(Type *Ty, GlobalValue *BaseGV,
1179 int64_t BaseOffset, bool HasBaseReg,
1180 int64_t Scale, unsigned AddrSpace) = 0;
1181 virtual bool LSRWithInstrQueries() = 0;
1182 virtual bool isTruncateFree(Type *Ty1, Type *Ty2) = 0;
1183 virtual bool isProfitableToHoist(Instruction *I) = 0;
1184 virtual bool useAA() = 0;
1185 virtual bool isTypeLegal(Type *Ty) = 0;
1186 virtual unsigned getJumpBufAlignment() = 0;
1187 virtual unsigned getJumpBufSize() = 0;
1188 virtual bool shouldBuildLookupTables() = 0;
1189 virtual bool shouldBuildLookupTablesForConstant(Constant *C) = 0;
1190 virtual bool useColdCCForColdCall(Function &F) = 0;
1192 getScalarizationOverhead(Type *Ty, bool Insert, bool Extract) = 0;
1193 virtual unsigned getOperandsScalarizationOverhead(ArrayRef<const Value *> Args,
1195 virtual bool supportsEfficientVectorElementLoadStore() = 0;
1196 virtual bool enableAggressiveInterleaving(bool LoopHasReductions) = 0;
1197 virtual MemCmpExpansionOptions
1198 enableMemCmpExpansion(bool OptSize, bool IsZeroCmp) const = 0;
1199 virtual bool enableInterleavedAccessVectorization() = 0;
1200 virtual bool enableMaskedInterleavedAccessVectorization() = 0;
1201 virtual bool isFPVectorizationPotentiallyUnsafe() = 0;
1202 virtual bool allowsMisalignedMemoryAccesses(LLVMContext &Context,
1204 unsigned AddressSpace,
1207 virtual PopcntSupportKind getPopcntSupport(unsigned IntTyWidthInBit) = 0;
1208 virtual bool haveFastSqrt(Type *Ty) = 0;
1209 virtual bool isFCmpOrdCheaperThanFCmpZero(Type *Ty) = 0;
1210 virtual int getFPOpCost(Type *Ty) = 0;
1211 virtual int getIntImmCodeSizeCost(unsigned Opc, unsigned Idx, const APInt &Imm,
1213 virtual int getIntImmCost(const APInt &Imm, Type *Ty) = 0;
1214 virtual int getIntImmCost(unsigned Opc, unsigned Idx, const APInt &Imm,
1216 virtual int getIntImmCost(Intrinsic::ID IID, unsigned Idx, const APInt &Imm,
1218 virtual unsigned getNumberOfRegisters(bool Vector) = 0;
1219 virtual unsigned getRegisterBitWidth(bool Vector) const = 0;
1220 virtual unsigned getMinVectorRegisterBitWidth() = 0;
1221 virtual bool shouldMaximizeVectorBandwidth(bool OptSize) const = 0;
1222 virtual unsigned getMinimumVF(unsigned ElemWidth) const = 0;
1223 virtual bool shouldConsiderAddressTypePromotion(
1224 const Instruction &I, bool &AllowPromotionWithoutCommonHeader) = 0;
1225 virtual unsigned getCacheLineSize() = 0;
1226 virtual llvm::Optional<unsigned> getCacheSize(CacheLevel Level) = 0;
1227 virtual llvm::Optional<unsigned> getCacheAssociativity(CacheLevel Level) = 0;
1228 virtual unsigned getPrefetchDistance() = 0;
1229 virtual unsigned getMinPrefetchStride() = 0;
1230 virtual unsigned getMaxPrefetchIterationsAhead() = 0;
1231 virtual unsigned getMaxInterleaveFactor(unsigned VF) = 0;
1233 getArithmeticInstrCost(unsigned Opcode, Type *Ty, OperandValueKind Opd1Info,
1234 OperandValueKind Opd2Info,
1235 OperandValueProperties Opd1PropInfo,
1236 OperandValueProperties Opd2PropInfo,
1237 ArrayRef<const Value *> Args) = 0;
1238 virtual int getShuffleCost(ShuffleKind Kind, Type *Tp, int Index,
1240 virtual int getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src,
1241 const Instruction *I) = 0;
1242 virtual int getExtractWithExtendCost(unsigned Opcode, Type *Dst,
1243 VectorType *VecTy, unsigned Index) = 0;
1244 virtual int getCFInstrCost(unsigned Opcode) = 0;
1245 virtual int getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
1246 Type *CondTy, const Instruction *I) = 0;
1247 virtual int getVectorInstrCost(unsigned Opcode, Type *Val,
1248 unsigned Index) = 0;
1249 virtual int getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
1250 unsigned AddressSpace, const Instruction *I) = 0;
1251 virtual int getMaskedMemoryOpCost(unsigned Opcode, Type *Src,
1253 unsigned AddressSpace) = 0;
1254 virtual int getGatherScatterOpCost(unsigned Opcode, Type *DataTy,
1255 Value *Ptr, bool VariableMask,
1256 unsigned Alignment) = 0;
1257 virtual int getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy,
1259 ArrayRef<unsigned> Indices,
1261 unsigned AddressSpace,
1262 bool UseMaskForCond = false,
1263 bool UseMaskForGaps = false) = 0;
1264 virtual int getArithmeticReductionCost(unsigned Opcode, Type *Ty,
1265 bool IsPairwiseForm) = 0;
1266 virtual int getMinMaxReductionCost(Type *Ty, Type *CondTy,
1267 bool IsPairwiseForm, bool IsUnsigned) = 0;
1268 virtual int getIntrinsicInstrCost(Intrinsic::ID ID, Type *RetTy,
1269 ArrayRef<Type *> Tys, FastMathFlags FMF,
1270 unsigned ScalarizationCostPassed) = 0;
1271 virtual int getIntrinsicInstrCost(Intrinsic::ID ID, Type *RetTy,
1272 ArrayRef<Value *> Args, FastMathFlags FMF, unsigned VF) = 0;
1273 virtual int getCallInstrCost(Function *F, Type *RetTy,
1274 ArrayRef<Type *> Tys) = 0;
1275 virtual unsigned getNumberOfParts(Type *Tp) = 0;
1276 virtual int getAddressComputationCost(Type *Ty, ScalarEvolution *SE,
1277 const SCEV *Ptr) = 0;
1278 virtual unsigned getCostOfKeepingLiveOverCall(ArrayRef<Type *> Tys) = 0;
1279 virtual bool getTgtMemIntrinsic(IntrinsicInst *Inst,
1280 MemIntrinsicInfo &Info) = 0;
1281 virtual unsigned getAtomicMemIntrinsicMaxElementSize() const = 0;
1282 virtual Value *getOrCreateResultFromMemIntrinsic(IntrinsicInst *Inst,
1283 Type *ExpectedType) = 0;
1284 virtual Type *getMemcpyLoopLoweringType(LLVMContext &Context, Value *Length,
1286 unsigned DestAlign) const = 0;
1287 virtual void getMemcpyLoopResidualLoweringType(
1288 SmallVectorImpl<Type *> &OpsOut, LLVMContext &Context,
1289 unsigned RemainingBytes, unsigned SrcAlign, unsigned DestAlign) const = 0;
1290 virtual bool areInlineCompatible(const Function *Caller,
1291 const Function *Callee) const = 0;
1293 areFunctionArgsABICompatible(const Function *Caller, const Function *Callee,
1294 SmallPtrSetImpl<Argument *> &Args) const = 0;
1295 virtual bool isIndexedLoadLegal(MemIndexedMode Mode, Type *Ty) const = 0;
1296 virtual bool isIndexedStoreLegal(MemIndexedMode Mode,Type *Ty) const = 0;
1297 virtual unsigned getLoadStoreVecRegBitWidth(unsigned AddrSpace) const = 0;
1298 virtual bool isLegalToVectorizeLoad(LoadInst *LI) const = 0;
1299 virtual bool isLegalToVectorizeStore(StoreInst *SI) const = 0;
1300 virtual bool isLegalToVectorizeLoadChain(unsigned ChainSizeInBytes,
1302 unsigned AddrSpace) const = 0;
1303 virtual bool isLegalToVectorizeStoreChain(unsigned ChainSizeInBytes,
1305 unsigned AddrSpace) const = 0;
1306 virtual unsigned getLoadVectorFactor(unsigned VF, unsigned LoadSize,
1307 unsigned ChainSizeInBytes,
1308 VectorType *VecTy) const = 0;
1309 virtual unsigned getStoreVectorFactor(unsigned VF, unsigned StoreSize,
1310 unsigned ChainSizeInBytes,
1311 VectorType *VecTy) const = 0;
1312 virtual bool useReductionIntrinsic(unsigned Opcode, Type *Ty,
1313 ReductionFlags) const = 0;
1314 virtual bool shouldExpandReduction(const IntrinsicInst *II) const = 0;
1315 virtual unsigned getGISelRematGlobalCost() const = 0;
1316 virtual int getInstructionLatency(const Instruction *I) = 0;
1319 template <typename T>
1320 class TargetTransformInfo::Model final : public TargetTransformInfo::Concept {
1324 Model(T Impl) : Impl(std::move(Impl)) {}
1325 ~Model() override {}
1327 const DataLayout &getDataLayout() const override {
1328 return Impl.getDataLayout();
1331 int getOperationCost(unsigned Opcode, Type *Ty, Type *OpTy) override {
1332 return Impl.getOperationCost(Opcode, Ty, OpTy);
1334 int getGEPCost(Type *PointeeType, const Value *Ptr,
1335 ArrayRef<const Value *> Operands) override {
1336 return Impl.getGEPCost(PointeeType, Ptr, Operands);
1338 int getExtCost(const Instruction *I, const Value *Src) override {
1339 return Impl.getExtCost(I, Src);
1341 int getCallCost(FunctionType *FTy, int NumArgs, const User *U) override {
1342 return Impl.getCallCost(FTy, NumArgs, U);
1344 int getCallCost(const Function *F, int NumArgs, const User *U) override {
1345 return Impl.getCallCost(F, NumArgs, U);
1347 int getCallCost(const Function *F,
1348 ArrayRef<const Value *> Arguments, const User *U) override {
1349 return Impl.getCallCost(F, Arguments, U);
1351 unsigned getInliningThresholdMultiplier() override {
1352 return Impl.getInliningThresholdMultiplier();
1354 int getIntrinsicCost(Intrinsic::ID IID, Type *RetTy,
1355 ArrayRef<Type *> ParamTys, const User *U = nullptr) override {
1356 return Impl.getIntrinsicCost(IID, RetTy, ParamTys, U);
1358 int getIntrinsicCost(Intrinsic::ID IID, Type *RetTy,
1359 ArrayRef<const Value *> Arguments,
1360 const User *U = nullptr) override {
1361 return Impl.getIntrinsicCost(IID, RetTy, Arguments, U);
1363 int getMemcpyCost(const Instruction *I) override {
1364 return Impl.getMemcpyCost(I);
1366 int getUserCost(const User *U, ArrayRef<const Value *> Operands) override {
1367 return Impl.getUserCost(U, Operands);
1369 bool hasBranchDivergence() override { return Impl.hasBranchDivergence(); }
1370 bool isSourceOfDivergence(const Value *V) override {
1371 return Impl.isSourceOfDivergence(V);
1374 bool isAlwaysUniform(const Value *V) override {
1375 return Impl.isAlwaysUniform(V);
1378 unsigned getFlatAddressSpace() override {
1379 return Impl.getFlatAddressSpace();
1382 bool isLoweredToCall(const Function *F) override {
1383 return Impl.isLoweredToCall(F);
1385 void getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
1386 UnrollingPreferences &UP) override {
1387 return Impl.getUnrollingPreferences(L, SE, UP);
1389 bool isHardwareLoopProfitable(Loop *L, ScalarEvolution &SE,
1390 AssumptionCache &AC,
1391 TargetLibraryInfo *LibInfo,
1392 HardwareLoopInfo &HWLoopInfo) override {
1393 return Impl.isHardwareLoopProfitable(L, SE, AC, LibInfo, HWLoopInfo);
1395 bool isLegalAddImmediate(int64_t Imm) override {
1396 return Impl.isLegalAddImmediate(Imm);
1398 bool isLegalICmpImmediate(int64_t Imm) override {
1399 return Impl.isLegalICmpImmediate(Imm);
1401 bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset,
1402 bool HasBaseReg, int64_t Scale,
1404 Instruction *I) override {
1405 return Impl.isLegalAddressingMode(Ty, BaseGV, BaseOffset, HasBaseReg,
1406 Scale, AddrSpace, I);
1408 bool isLSRCostLess(TargetTransformInfo::LSRCost &C1,
1409 TargetTransformInfo::LSRCost &C2) override {
1410 return Impl.isLSRCostLess(C1, C2);
1412 bool canMacroFuseCmp() override {
1413 return Impl.canMacroFuseCmp();
1415 bool canSaveCmp(Loop *L, BranchInst **BI,
1416 ScalarEvolution *SE,
1417 LoopInfo *LI, DominatorTree *DT, AssumptionCache *AC,
1418 TargetLibraryInfo *LibInfo) override {
1419 return Impl.canSaveCmp(L, BI, SE, LI, DT, AC, LibInfo);
1421 bool shouldFavorPostInc() const override {
1422 return Impl.shouldFavorPostInc();
1424 bool shouldFavorBackedgeIndex(const Loop *L) const override {
1425 return Impl.shouldFavorBackedgeIndex(L);
1427 bool isLegalMaskedStore(Type *DataType) override {
1428 return Impl.isLegalMaskedStore(DataType);
1430 bool isLegalMaskedLoad(Type *DataType) override {
1431 return Impl.isLegalMaskedLoad(DataType);
1433 bool isLegalNTStore(Type *DataType, unsigned Alignment) override {
1434 return Impl.isLegalNTStore(DataType, Alignment);
1436 bool isLegalNTLoad(Type *DataType, unsigned Alignment) override {
1437 return Impl.isLegalNTLoad(DataType, Alignment);
1439 bool isLegalMaskedScatter(Type *DataType) override {
1440 return Impl.isLegalMaskedScatter(DataType);
1442 bool isLegalMaskedGather(Type *DataType) override {
1443 return Impl.isLegalMaskedGather(DataType);
1445 bool isLegalMaskedCompressStore(Type *DataType) override {
1446 return Impl.isLegalMaskedCompressStore(DataType);
1448 bool isLegalMaskedExpandLoad(Type *DataType) override {
1449 return Impl.isLegalMaskedExpandLoad(DataType);
1451 bool hasDivRemOp(Type *DataType, bool IsSigned) override {
1452 return Impl.hasDivRemOp(DataType, IsSigned);
1454 bool hasVolatileVariant(Instruction *I, unsigned AddrSpace) override {
1455 return Impl.hasVolatileVariant(I, AddrSpace);
1457 bool prefersVectorizedAddressing() override {
1458 return Impl.prefersVectorizedAddressing();
1460 int getScalingFactorCost(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset,
1461 bool HasBaseReg, int64_t Scale,
1462 unsigned AddrSpace) override {
1463 return Impl.getScalingFactorCost(Ty, BaseGV, BaseOffset, HasBaseReg,
1466 bool LSRWithInstrQueries() override {
1467 return Impl.LSRWithInstrQueries();
1469 bool isTruncateFree(Type *Ty1, Type *Ty2) override {
1470 return Impl.isTruncateFree(Ty1, Ty2);
1472 bool isProfitableToHoist(Instruction *I) override {
1473 return Impl.isProfitableToHoist(I);
1475 bool useAA() override { return Impl.useAA(); }
1476 bool isTypeLegal(Type *Ty) override { return Impl.isTypeLegal(Ty); }
1477 unsigned getJumpBufAlignment() override { return Impl.getJumpBufAlignment(); }
1478 unsigned getJumpBufSize() override { return Impl.getJumpBufSize(); }
1479 bool shouldBuildLookupTables() override {
1480 return Impl.shouldBuildLookupTables();
1482 bool shouldBuildLookupTablesForConstant(Constant *C) override {
1483 return Impl.shouldBuildLookupTablesForConstant(C);
1485 bool useColdCCForColdCall(Function &F) override {
1486 return Impl.useColdCCForColdCall(F);
1489 unsigned getScalarizationOverhead(Type *Ty, bool Insert,
1490 bool Extract) override {
1491 return Impl.getScalarizationOverhead(Ty, Insert, Extract);
1493 unsigned getOperandsScalarizationOverhead(ArrayRef<const Value *> Args,
1494 unsigned VF) override {
1495 return Impl.getOperandsScalarizationOverhead(Args, VF);
1498 bool supportsEfficientVectorElementLoadStore() override {
1499 return Impl.supportsEfficientVectorElementLoadStore();
1502 bool enableAggressiveInterleaving(bool LoopHasReductions) override {
1503 return Impl.enableAggressiveInterleaving(LoopHasReductions);
1505 MemCmpExpansionOptions enableMemCmpExpansion(bool OptSize,
1506 bool IsZeroCmp) const override {
1507 return Impl.enableMemCmpExpansion(OptSize, IsZeroCmp);
1509 bool enableInterleavedAccessVectorization() override {
1510 return Impl.enableInterleavedAccessVectorization();
1512 bool enableMaskedInterleavedAccessVectorization() override {
1513 return Impl.enableMaskedInterleavedAccessVectorization();
1515 bool isFPVectorizationPotentiallyUnsafe() override {
1516 return Impl.isFPVectorizationPotentiallyUnsafe();
1518 bool allowsMisalignedMemoryAccesses(LLVMContext &Context,
1519 unsigned BitWidth, unsigned AddressSpace,
1520 unsigned Alignment, bool *Fast) override {
1521 return Impl.allowsMisalignedMemoryAccesses(Context, BitWidth, AddressSpace,
1524 PopcntSupportKind getPopcntSupport(unsigned IntTyWidthInBit) override {
1525 return Impl.getPopcntSupport(IntTyWidthInBit);
1527 bool haveFastSqrt(Type *Ty) override { return Impl.haveFastSqrt(Ty); }
1529 bool isFCmpOrdCheaperThanFCmpZero(Type *Ty) override {
1530 return Impl.isFCmpOrdCheaperThanFCmpZero(Ty);
1533 int getFPOpCost(Type *Ty) override { return Impl.getFPOpCost(Ty); }
1535 int getIntImmCodeSizeCost(unsigned Opc, unsigned Idx, const APInt &Imm,
1536 Type *Ty) override {
1537 return Impl.getIntImmCodeSizeCost(Opc, Idx, Imm, Ty);
1539 int getIntImmCost(const APInt &Imm, Type *Ty) override {
1540 return Impl.getIntImmCost(Imm, Ty);
1542 int getIntImmCost(unsigned Opc, unsigned Idx, const APInt &Imm,
1543 Type *Ty) override {
1544 return Impl.getIntImmCost(Opc, Idx, Imm, Ty);
1546 int getIntImmCost(Intrinsic::ID IID, unsigned Idx, const APInt &Imm,
1547 Type *Ty) override {
1548 return Impl.getIntImmCost(IID, Idx, Imm, Ty);
1550 unsigned getNumberOfRegisters(bool Vector) override {
1551 return Impl.getNumberOfRegisters(Vector);
1553 unsigned getRegisterBitWidth(bool Vector) const override {
1554 return Impl.getRegisterBitWidth(Vector);
1556 unsigned getMinVectorRegisterBitWidth() override {
1557 return Impl.getMinVectorRegisterBitWidth();
1559 bool shouldMaximizeVectorBandwidth(bool OptSize) const override {
1560 return Impl.shouldMaximizeVectorBandwidth(OptSize);
1562 unsigned getMinimumVF(unsigned ElemWidth) const override {
1563 return Impl.getMinimumVF(ElemWidth);
1565 bool shouldConsiderAddressTypePromotion(
1566 const Instruction &I, bool &AllowPromotionWithoutCommonHeader) override {
1567 return Impl.shouldConsiderAddressTypePromotion(
1568 I, AllowPromotionWithoutCommonHeader);
1570 unsigned getCacheLineSize() override {
1571 return Impl.getCacheLineSize();
1573 llvm::Optional<unsigned> getCacheSize(CacheLevel Level) override {
1574 return Impl.getCacheSize(Level);
1576 llvm::Optional<unsigned> getCacheAssociativity(CacheLevel Level) override {
1577 return Impl.getCacheAssociativity(Level);
1579 unsigned getPrefetchDistance() override { return Impl.getPrefetchDistance(); }
1580 unsigned getMinPrefetchStride() override {
1581 return Impl.getMinPrefetchStride();
1583 unsigned getMaxPrefetchIterationsAhead() override {
1584 return Impl.getMaxPrefetchIterationsAhead();
1586 unsigned getMaxInterleaveFactor(unsigned VF) override {
1587 return Impl.getMaxInterleaveFactor(VF);
1589 unsigned getEstimatedNumberOfCaseClusters(const SwitchInst &SI,
1590 unsigned &JTSize) override {
1591 return Impl.getEstimatedNumberOfCaseClusters(SI, JTSize);
1594 getArithmeticInstrCost(unsigned Opcode, Type *Ty, OperandValueKind Opd1Info,
1595 OperandValueKind Opd2Info,
1596 OperandValueProperties Opd1PropInfo,
1597 OperandValueProperties Opd2PropInfo,
1598 ArrayRef<const Value *> Args) override {
1599 return Impl.getArithmeticInstrCost(Opcode, Ty, Opd1Info, Opd2Info,
1600 Opd1PropInfo, Opd2PropInfo, Args);
1602 int getShuffleCost(ShuffleKind Kind, Type *Tp, int Index,
1603 Type *SubTp) override {
1604 return Impl.getShuffleCost(Kind, Tp, Index, SubTp);
1606 int getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src,
1607 const Instruction *I) override {
1608 return Impl.getCastInstrCost(Opcode, Dst, Src, I);
1610 int getExtractWithExtendCost(unsigned Opcode, Type *Dst, VectorType *VecTy,
1611 unsigned Index) override {
1612 return Impl.getExtractWithExtendCost(Opcode, Dst, VecTy, Index);
1614 int getCFInstrCost(unsigned Opcode) override {
1615 return Impl.getCFInstrCost(Opcode);
1617 int getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy,
1618 const Instruction *I) override {
1619 return Impl.getCmpSelInstrCost(Opcode, ValTy, CondTy, I);
1621 int getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index) override {
1622 return Impl.getVectorInstrCost(Opcode, Val, Index);
1624 int getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
1625 unsigned AddressSpace, const Instruction *I) override {
1626 return Impl.getMemoryOpCost(Opcode, Src, Alignment, AddressSpace, I);
1628 int getMaskedMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
1629 unsigned AddressSpace) override {
1630 return Impl.getMaskedMemoryOpCost(Opcode, Src, Alignment, AddressSpace);
1632 int getGatherScatterOpCost(unsigned Opcode, Type *DataTy,
1633 Value *Ptr, bool VariableMask,
1634 unsigned Alignment) override {
1635 return Impl.getGatherScatterOpCost(Opcode, DataTy, Ptr, VariableMask,
1638 int getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy, unsigned Factor,
1639 ArrayRef<unsigned> Indices, unsigned Alignment,
1640 unsigned AddressSpace, bool UseMaskForCond,
1641 bool UseMaskForGaps) override {
1642 return Impl.getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
1643 Alignment, AddressSpace,
1644 UseMaskForCond, UseMaskForGaps);
1646 int getArithmeticReductionCost(unsigned Opcode, Type *Ty,
1647 bool IsPairwiseForm) override {
1648 return Impl.getArithmeticReductionCost(Opcode, Ty, IsPairwiseForm);
1650 int getMinMaxReductionCost(Type *Ty, Type *CondTy,
1651 bool IsPairwiseForm, bool IsUnsigned) override {
1652 return Impl.getMinMaxReductionCost(Ty, CondTy, IsPairwiseForm, IsUnsigned);
1654 int getIntrinsicInstrCost(Intrinsic::ID ID, Type *RetTy, ArrayRef<Type *> Tys,
1655 FastMathFlags FMF, unsigned ScalarizationCostPassed) override {
1656 return Impl.getIntrinsicInstrCost(ID, RetTy, Tys, FMF,
1657 ScalarizationCostPassed);
1659 int getIntrinsicInstrCost(Intrinsic::ID ID, Type *RetTy,
1660 ArrayRef<Value *> Args, FastMathFlags FMF, unsigned VF) override {
1661 return Impl.getIntrinsicInstrCost(ID, RetTy, Args, FMF, VF);
1663 int getCallInstrCost(Function *F, Type *RetTy,
1664 ArrayRef<Type *> Tys) override {
1665 return Impl.getCallInstrCost(F, RetTy, Tys);
1667 unsigned getNumberOfParts(Type *Tp) override {
1668 return Impl.getNumberOfParts(Tp);
1670 int getAddressComputationCost(Type *Ty, ScalarEvolution *SE,
1671 const SCEV *Ptr) override {
1672 return Impl.getAddressComputationCost(Ty, SE, Ptr);
1674 unsigned getCostOfKeepingLiveOverCall(ArrayRef<Type *> Tys) override {
1675 return Impl.getCostOfKeepingLiveOverCall(Tys);
1677 bool getTgtMemIntrinsic(IntrinsicInst *Inst,
1678 MemIntrinsicInfo &Info) override {
1679 return Impl.getTgtMemIntrinsic(Inst, Info);
1681 unsigned getAtomicMemIntrinsicMaxElementSize() const override {
1682 return Impl.getAtomicMemIntrinsicMaxElementSize();
1684 Value *getOrCreateResultFromMemIntrinsic(IntrinsicInst *Inst,
1685 Type *ExpectedType) override {
1686 return Impl.getOrCreateResultFromMemIntrinsic(Inst, ExpectedType);
1688 Type *getMemcpyLoopLoweringType(LLVMContext &Context, Value *Length,
1690 unsigned DestAlign) const override {
1691 return Impl.getMemcpyLoopLoweringType(Context, Length, SrcAlign, DestAlign);
1693 void getMemcpyLoopResidualLoweringType(SmallVectorImpl<Type *> &OpsOut,
1694 LLVMContext &Context,
1695 unsigned RemainingBytes,
1697 unsigned DestAlign) const override {
1698 Impl.getMemcpyLoopResidualLoweringType(OpsOut, Context, RemainingBytes,
1699 SrcAlign, DestAlign);
1701 bool areInlineCompatible(const Function *Caller,
1702 const Function *Callee) const override {
1703 return Impl.areInlineCompatible(Caller, Callee);
1705 bool areFunctionArgsABICompatible(
1706 const Function *Caller, const Function *Callee,
1707 SmallPtrSetImpl<Argument *> &Args) const override {
1708 return Impl.areFunctionArgsABICompatible(Caller, Callee, Args);
1710 bool isIndexedLoadLegal(MemIndexedMode Mode, Type *Ty) const override {
1711 return Impl.isIndexedLoadLegal(Mode, Ty, getDataLayout());
1713 bool isIndexedStoreLegal(MemIndexedMode Mode, Type *Ty) const override {
1714 return Impl.isIndexedStoreLegal(Mode, Ty, getDataLayout());
1716 unsigned getLoadStoreVecRegBitWidth(unsigned AddrSpace) const override {
1717 return Impl.getLoadStoreVecRegBitWidth(AddrSpace);
1719 bool isLegalToVectorizeLoad(LoadInst *LI) const override {
1720 return Impl.isLegalToVectorizeLoad(LI);
1722 bool isLegalToVectorizeStore(StoreInst *SI) const override {
1723 return Impl.isLegalToVectorizeStore(SI);
1725 bool isLegalToVectorizeLoadChain(unsigned ChainSizeInBytes,
1727 unsigned AddrSpace) const override {
1728 return Impl.isLegalToVectorizeLoadChain(ChainSizeInBytes, Alignment,
1731 bool isLegalToVectorizeStoreChain(unsigned ChainSizeInBytes,
1733 unsigned AddrSpace) const override {
1734 return Impl.isLegalToVectorizeStoreChain(ChainSizeInBytes, Alignment,
1737 unsigned getLoadVectorFactor(unsigned VF, unsigned LoadSize,
1738 unsigned ChainSizeInBytes,
1739 VectorType *VecTy) const override {
1740 return Impl.getLoadVectorFactor(VF, LoadSize, ChainSizeInBytes, VecTy);
1742 unsigned getStoreVectorFactor(unsigned VF, unsigned StoreSize,
1743 unsigned ChainSizeInBytes,
1744 VectorType *VecTy) const override {
1745 return Impl.getStoreVectorFactor(VF, StoreSize, ChainSizeInBytes, VecTy);
1747 bool useReductionIntrinsic(unsigned Opcode, Type *Ty,
1748 ReductionFlags Flags) const override {
1749 return Impl.useReductionIntrinsic(Opcode, Ty, Flags);
1751 bool shouldExpandReduction(const IntrinsicInst *II) const override {
1752 return Impl.shouldExpandReduction(II);
1755 unsigned getGISelRematGlobalCost() const override {
1756 return Impl.getGISelRematGlobalCost();
1759 int getInstructionLatency(const Instruction *I) override {
1760 return Impl.getInstructionLatency(I);
1764 template <typename T>
1765 TargetTransformInfo::TargetTransformInfo(T Impl)
1766 : TTIImpl(new Model<T>(Impl)) {}
1768 /// Analysis pass providing the \c TargetTransformInfo.
1770 /// The core idea of the TargetIRAnalysis is to expose an interface through
1771 /// which LLVM targets can analyze and provide information about the middle
1772 /// end's target-independent IR. This supports use cases such as target-aware
1773 /// cost modeling of IR constructs.
1775 /// This is a function analysis because much of the cost modeling for targets
1776 /// is done in a subtarget specific way and LLVM supports compiling different
1777 /// functions targeting different subtargets in order to support runtime
1778 /// dispatch according to the observed subtarget.
1779 class TargetIRAnalysis : public AnalysisInfoMixin<TargetIRAnalysis> {
1781 typedef TargetTransformInfo Result;
1783 /// Default construct a target IR analysis.
1785 /// This will use the module's datalayout to construct a baseline
1786 /// conservative TTI result.
1789 /// Construct an IR analysis pass around a target-provide callback.
1791 /// The callback will be called with a particular function for which the TTI
1792 /// is needed and must return a TTI object for that function.
1793 TargetIRAnalysis(std::function<Result(const Function &)> TTICallback);
1795 // Value semantics. We spell out the constructors for MSVC.
1796 TargetIRAnalysis(const TargetIRAnalysis &Arg)
1797 : TTICallback(Arg.TTICallback) {}
1798 TargetIRAnalysis(TargetIRAnalysis &&Arg)
1799 : TTICallback(std::move(Arg.TTICallback)) {}
1800 TargetIRAnalysis &operator=(const TargetIRAnalysis &RHS) {
1801 TTICallback = RHS.TTICallback;
1804 TargetIRAnalysis &operator=(TargetIRAnalysis &&RHS) {
1805 TTICallback = std::move(RHS.TTICallback);
1809 Result run(const Function &F, FunctionAnalysisManager &);
1812 friend AnalysisInfoMixin<TargetIRAnalysis>;
1813 static AnalysisKey Key;
1815 /// The callback used to produce a result.
1817 /// We use a completely opaque callback so that targets can provide whatever
1818 /// mechanism they desire for constructing the TTI for a given function.
1820 /// FIXME: Should we really use std::function? It's relatively inefficient.
1821 /// It might be possible to arrange for even stateful callbacks to outlive
1822 /// the analysis and thus use a function_ref which would be lighter weight.
1823 /// This may also be less error prone as the callback is likely to reference
1824 /// the external TargetMachine, and that reference needs to never dangle.
1825 std::function<Result(const Function &)> TTICallback;
1827 /// Helper function used as the callback in the default constructor.
1828 static Result getDefaultTTI(const Function &F);
1831 /// Wrapper pass for TargetTransformInfo.
1833 /// This pass can be constructed from a TTI object which it stores internally
1834 /// and is queried by passes.
1835 class TargetTransformInfoWrapperPass : public ImmutablePass {
1836 TargetIRAnalysis TIRA;
1837 Optional<TargetTransformInfo> TTI;
1839 virtual void anchor();
1844 /// We must provide a default constructor for the pass but it should
1847 /// Use the constructor below or call one of the creation routines.
1848 TargetTransformInfoWrapperPass();
1850 explicit TargetTransformInfoWrapperPass(TargetIRAnalysis TIRA);
1852 TargetTransformInfo &getTTI(const Function &F);
1855 /// Create an analysis pass wrapper around a TTI object.
1857 /// This analysis pass just holds the TTI instance and makes it available to
1859 ImmutablePass *createTargetTransformInfoWrapperPass(TargetIRAnalysis TIRA);
1861 } // End llvm namespace