From 372b46cad9f745859f542f9d2216991585ae83f4 Mon Sep 17 00:00:00 2001 From: Owen Anderson Date: Mon, 22 Jun 2009 21:39:50 +0000 Subject: [PATCH] SCEVHandle is no more! git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@73906 91177308-0d34-0410-b5e6-96231b3b80d8 --- include/llvm/Analysis/IVUsers.h | 16 +- include/llvm/Analysis/LoopVR.h | 4 +- include/llvm/Analysis/ScalarEvolution.h | 230 ++++------ include/llvm/Analysis/ScalarEvolutionExpander.h | 9 +- include/llvm/Analysis/ScalarEvolutionExpressions.h | 95 ++-- lib/Analysis/IVUsers.cpp | 28 +- lib/Analysis/LoopVR.cpp | 14 +- lib/Analysis/ScalarEvolution.cpp | 511 ++++++++++----------- lib/Analysis/ScalarEvolutionExpander.cpp | 68 +-- lib/Transforms/Scalar/IndVarSimplify.cpp | 26 +- lib/Transforms/Scalar/LoopDeletion.cpp | 2 +- lib/Transforms/Scalar/LoopStrengthReduce.cpp | 206 ++++----- 12 files changed, 568 insertions(+), 641 deletions(-) diff --git a/include/llvm/Analysis/IVUsers.h b/include/llvm/Analysis/IVUsers.h index ac785d5c54a..30a457ace81 100644 --- a/include/llvm/Analysis/IVUsers.h +++ b/include/llvm/Analysis/IVUsers.h @@ -34,7 +34,7 @@ class IVUsersOfOneStride; class IVStrideUse : public CallbackVH, public ilist_node { public: IVStrideUse(IVUsersOfOneStride *parent, - const SCEVHandle &offset, + const SCEV* offset, Instruction* U, Value *O) : CallbackVH(U), Parent(parent), Offset(offset), OperandValToReplace(O), @@ -58,10 +58,10 @@ public: /// getOffset - Return the offset to add to a theoeretical induction /// variable that starts at zero and counts up by the stride to compute /// the value for the use. This always has the same type as the stride. - SCEVHandle getOffset() const { return Offset; } + const SCEV* getOffset() const { return Offset; } /// setOffset - Assign a new offset to this use. - void setOffset(SCEVHandle Val) { + void setOffset(const SCEV* Val) { Offset = Val; } @@ -96,7 +96,7 @@ private: IVUsersOfOneStride *Parent; /// Offset - The offset to add to the base induction expression. - SCEVHandle Offset; + const SCEV* Offset; /// OperandValToReplace - The Value of the operand in the user instruction /// that this IVStrideUse is representing. @@ -158,7 +158,7 @@ public: /// initial value and the operand that uses the IV. ilist Users; - void addUser(const SCEVHandle &Offset, Instruction *User, Value *Operand) { + void addUser(const SCEV* Offset, Instruction *User, Value *Operand) { Users.push_back(new IVStrideUse(this, Offset, User, Operand)); } }; @@ -178,12 +178,12 @@ public: /// IVUsesByStride - A mapping from the strides in StrideOrder to the /// uses in IVUses. - std::map IVUsesByStride; + std::map IVUsesByStride; /// StrideOrder - An ordering of the keys in IVUsesByStride that is stable: /// We use this to iterate over the IVUsesByStride collection without being /// dependent on random ordering of pointers in the process. - SmallVector StrideOrder; + SmallVector StrideOrder; private: virtual void getAnalysisUsage(AnalysisUsage &AU) const; @@ -203,7 +203,7 @@ public: /// getReplacementExpr - Return a SCEV expression which computes the /// value of the OperandValToReplace of the given IVStrideUse. - SCEVHandle getReplacementExpr(const IVStrideUse &U) const; + const SCEV* getReplacementExpr(const IVStrideUse &U) const; void print(raw_ostream &OS, const Module* = 0) const; virtual void print(std::ostream &OS, const Module* = 0) const; diff --git a/include/llvm/Analysis/LoopVR.h b/include/llvm/Analysis/LoopVR.h index 1d806f83aa9..36b62152f86 100644 --- a/include/llvm/Analysis/LoopVR.h +++ b/include/llvm/Analysis/LoopVR.h @@ -78,9 +78,9 @@ public: private: ConstantRange compute(Value *V); - ConstantRange getRange(SCEVHandle S, Loop *L, ScalarEvolution &SE); + ConstantRange getRange(const SCEV* S, Loop *L, ScalarEvolution &SE); - ConstantRange getRange(SCEVHandle S, SCEVHandle T, ScalarEvolution &SE); + ConstantRange getRange(const SCEV* S, const SCEV* T, ScalarEvolution &SE); std::map Map; }; diff --git a/include/llvm/Analysis/ScalarEvolution.h b/include/llvm/Analysis/ScalarEvolution.h index e25c054698c..587c51dcb68 100644 --- a/include/llvm/Analysis/ScalarEvolution.h +++ b/include/llvm/Analysis/ScalarEvolution.h @@ -32,7 +32,6 @@ namespace llvm { class APInt; class ConstantInt; class Type; - class SCEVHandle; class ScalarEvolution; class TargetData; class SCEVConstant; @@ -43,7 +42,6 @@ namespace llvm { class SCEVSignExtendExpr; class SCEVAddRecExpr; class SCEVUnknown; - template<> struct DenseMapInfo; /// SCEV - This class represents an analyzed expression in the program. These /// are reference-counted opaque objects that the client is not allowed to @@ -52,9 +50,6 @@ namespace llvm { class SCEV { const unsigned SCEVType; // The SCEV baseclass this node corresponds to - friend class SCEVHandle; - friend class DenseMapInfo; - const ScalarEvolution* parent; SCEV(const SCEV &); // DO NOT IMPLEMENT @@ -94,9 +89,9 @@ namespace llvm { /// the same value, but which uses the concrete value Conc instead of the /// symbolic value. If this SCEV does not use the symbolic value, it /// returns itself. - virtual SCEVHandle - replaceSymbolicValuesWithConcrete(const SCEVHandle &Sym, - const SCEVHandle &Conc, + virtual const SCEV* + replaceSymbolicValuesWithConcrete(const SCEV* Sym, + const SCEV* Conc, ScalarEvolution &SE) const = 0; /// dominates - Return true if elements that makes up this SCEV dominates @@ -139,9 +134,9 @@ namespace llvm { virtual const Type *getType() const; virtual bool hasComputableLoopEvolution(const Loop *L) const; virtual void print(raw_ostream &OS) const; - virtual SCEVHandle - replaceSymbolicValuesWithConcrete(const SCEVHandle &Sym, - const SCEVHandle &Conc, + virtual const SCEV* + replaceSymbolicValuesWithConcrete(const SCEV* Sym, + const SCEV* Conc, ScalarEvolution &SE) const; virtual bool dominates(BasicBlock *BB, DominatorTree *DT) const { @@ -153,72 +148,6 @@ namespace llvm { static bool classof(const SCEV *S); }; - /// SCEVHandle - This class is used to maintain the SCEV object's refcounts, - /// freeing the objects when the last reference is dropped. - class SCEVHandle { - const SCEV *S; - SCEVHandle(); // DO NOT IMPLEMENT - public: - SCEVHandle(const SCEV *s) : S(s) { - assert(S && "Cannot create a handle to a null SCEV!"); - } - SCEVHandle(const SCEVHandle &RHS) : S(RHS.S) { } - ~SCEVHandle() { } - - operator const SCEV*() const { return S; } - - const SCEV &operator*() const { return *S; } - const SCEV *operator->() const { return S; } - - bool operator==(const SCEV *RHS) const { return S == RHS; } - bool operator!=(const SCEV *RHS) const { return S != RHS; } - - const SCEVHandle &operator=(SCEV *RHS) { - if (S != RHS) { - S = RHS; - } - return *this; - } - - const SCEVHandle &operator=(const SCEVHandle &RHS) { - if (S != RHS.S) { - S = RHS.S; - } - return *this; - } - }; - - template struct simplify_type; - template<> struct simplify_type { - typedef const SCEV* SimpleType; - static SimpleType getSimplifiedValue(const SCEVHandle &Node) { - return Node; - } - }; - template<> struct simplify_type - : public simplify_type {}; - - // Specialize DenseMapInfo for SCEVHandle so that SCEVHandle may be used - // as a key in DenseMaps. - template<> - struct DenseMapInfo { - static inline SCEVHandle getEmptyKey() { - static SCEVCouldNotCompute Empty(0); - return &Empty; - } - static inline SCEVHandle getTombstoneKey() { - static SCEVCouldNotCompute Tombstone(0); - return &Tombstone; - } - static unsigned getHashValue(const SCEVHandle &Val) { - return DenseMapInfo::getHashValue(Val); - } - static bool isEqual(const SCEVHandle &LHS, const SCEVHandle &RHS) { - return LHS == RHS; - } - static bool isPod() { return false; } - }; - /// ScalarEvolution - This class is the main scalar evolution driver. Because /// client code (intentionally) can't do much with the SCEV objects directly, /// they must ask this class for services. @@ -251,11 +180,11 @@ namespace llvm { /// CouldNotCompute - This SCEV is used to represent unknown trip /// counts and things. - SCEVHandle CouldNotCompute; + const SCEV* CouldNotCompute; /// Scalars - This is a cache of the scalars we have analyzed so far. /// - std::map Scalars; + std::map Scalars; /// BackedgeTakenInfo - Information about the backedge-taken count /// of a loop. This currently inclues an exact count and a maximum count. @@ -263,19 +192,16 @@ namespace llvm { struct BackedgeTakenInfo { /// Exact - An expression indicating the exact backedge-taken count of /// the loop if it is known, or a SCEVCouldNotCompute otherwise. - SCEVHandle Exact; + const SCEV* Exact; /// Exact - An expression indicating the least maximum backedge-taken /// count of the loop that is known, or a SCEVCouldNotCompute. - SCEVHandle Max; - - /*implicit*/ BackedgeTakenInfo(SCEVHandle exact) : - Exact(exact), Max(exact) {} + const SCEV* Max; - /*implicit*/ BackedgeTakenInfo(const SCEV *exact) : + /*implicit*/ BackedgeTakenInfo(const SCEV* exact) : Exact(exact), Max(exact) {} - BackedgeTakenInfo(SCEVHandle exact, SCEVHandle max) : + BackedgeTakenInfo(const SCEV* exact, const SCEV* max) : Exact(exact), Max(max) {} /// hasAnyInfo - Test whether this BackedgeTakenInfo contains any @@ -305,30 +231,30 @@ namespace llvm { /// createSCEV - We know that there is no SCEV for the specified value. /// Analyze the expression. - SCEVHandle createSCEV(Value *V); + const SCEV* createSCEV(Value *V); /// createNodeForPHI - Provide the special handling we need to analyze PHI /// SCEVs. - SCEVHandle createNodeForPHI(PHINode *PN); + const SCEV* createNodeForPHI(PHINode *PN); /// createNodeForGEP - Provide the special handling we need to analyze GEP /// SCEVs. - SCEVHandle createNodeForGEP(User *GEP); + const SCEV* createNodeForGEP(User *GEP); /// ReplaceSymbolicValueWithConcrete - This looks up the computed SCEV value /// for the specified instruction and replaces any references to the /// symbolic value SymName with the specified value. This is used during /// PHI resolution. void ReplaceSymbolicValueWithConcrete(Instruction *I, - const SCEVHandle &SymName, - const SCEVHandle &NewVal); + const SCEV* SymName, + const SCEV* NewVal); /// getBECount - Subtract the end and start values and divide by the step, /// rounding up, to get the number of times the backedge is executed. Return /// CouldNotCompute if an intermediate computation overflows. - SCEVHandle getBECount(const SCEVHandle &Start, - const SCEVHandle &End, - const SCEVHandle &Step); + const SCEV* getBECount(const SCEV* Start, + const SCEV* End, + const SCEV* Step); /// getBackedgeTakenInfo - Return the BackedgeTakenInfo for the given /// loop, lazily computing new values if the loop hasn't been analyzed @@ -366,7 +292,7 @@ namespace llvm { /// ComputeLoadConstantCompareBackedgeTakenCount - Given an exit condition /// of 'icmp op load X, cst', try to see if we can compute the trip count. - SCEVHandle + const SCEV* ComputeLoadConstantCompareBackedgeTakenCount(LoadInst *LI, Constant *RHS, const Loop *L, @@ -377,18 +303,18 @@ namespace llvm { /// try to evaluate a few iterations of the loop until we get the exit /// condition gets a value of ExitWhen (true or false). If we cannot /// evaluate the trip count of the loop, return CouldNotCompute. - SCEVHandle ComputeBackedgeTakenCountExhaustively(const Loop *L, Value *Cond, + const SCEV* ComputeBackedgeTakenCountExhaustively(const Loop *L, Value *Cond, bool ExitWhen); /// HowFarToZero - Return the number of times a backedge comparing the /// specified value to zero will execute. If not computable, return /// CouldNotCompute. - SCEVHandle HowFarToZero(const SCEV *V, const Loop *L); + const SCEV* HowFarToZero(const SCEV *V, const Loop *L); /// HowFarToNonZero - Return the number of times a backedge checking the /// specified value for nonzero will execute. If not computable, return /// CouldNotCompute. - SCEVHandle HowFarToNonZero(const SCEV *V, const Loop *L); + const SCEV* HowFarToNonZero(const SCEV *V, const Loop *L); /// HowManyLessThans - Return the number of times a backedge containing the /// specified less-than comparison will execute. If not computable, return @@ -440,115 +366,115 @@ namespace llvm { /// getSCEV - Return a SCEV expression handle for the full generality of the /// specified expression. - SCEVHandle getSCEV(Value *V); - - SCEVHandle getConstant(ConstantInt *V); - SCEVHandle getConstant(const APInt& Val); - SCEVHandle getConstant(const Type *Ty, uint64_t V, bool isSigned = false); - SCEVHandle getTruncateExpr(const SCEVHandle &Op, const Type *Ty); - SCEVHandle getZeroExtendExpr(const SCEVHandle &Op, const Type *Ty); - SCEVHandle getSignExtendExpr(const SCEVHandle &Op, const Type *Ty); - SCEVHandle getAnyExtendExpr(const SCEVHandle &Op, const Type *Ty); - SCEVHandle getAddExpr(SmallVectorImpl &Ops); - SCEVHandle getAddExpr(const SCEVHandle &LHS, const SCEVHandle &RHS) { - SmallVector Ops; + const SCEV* getSCEV(Value *V); + + const SCEV* getConstant(ConstantInt *V); + const SCEV* getConstant(const APInt& Val); + const SCEV* getConstant(const Type *Ty, uint64_t V, bool isSigned = false); + const SCEV* getTruncateExpr(const SCEV* Op, const Type *Ty); + const SCEV* getZeroExtendExpr(const SCEV* Op, const Type *Ty); + const SCEV* getSignExtendExpr(const SCEV* Op, const Type *Ty); + const SCEV* getAnyExtendExpr(const SCEV* Op, const Type *Ty); + const SCEV* getAddExpr(SmallVectorImpl &Ops); + const SCEV* getAddExpr(const SCEV* LHS, const SCEV* RHS) { + SmallVector Ops; Ops.push_back(LHS); Ops.push_back(RHS); return getAddExpr(Ops); } - SCEVHandle getAddExpr(const SCEVHandle &Op0, const SCEVHandle &Op1, - const SCEVHandle &Op2) { - SmallVector Ops; + const SCEV* getAddExpr(const SCEV* Op0, const SCEV* Op1, + const SCEV* Op2) { + SmallVector Ops; Ops.push_back(Op0); Ops.push_back(Op1); Ops.push_back(Op2); return getAddExpr(Ops); } - SCEVHandle getMulExpr(SmallVectorImpl &Ops); - SCEVHandle getMulExpr(const SCEVHandle &LHS, const SCEVHandle &RHS) { - SmallVector Ops; + const SCEV* getMulExpr(SmallVectorImpl &Ops); + const SCEV* getMulExpr(const SCEV* LHS, const SCEV* RHS) { + SmallVector Ops; Ops.push_back(LHS); Ops.push_back(RHS); return getMulExpr(Ops); } - SCEVHandle getUDivExpr(const SCEVHandle &LHS, const SCEVHandle &RHS); - SCEVHandle getAddRecExpr(const SCEVHandle &Start, const SCEVHandle &Step, + const SCEV* getUDivExpr(const SCEV* LHS, const SCEV* RHS); + const SCEV* getAddRecExpr(const SCEV* Start, const SCEV* Step, const Loop *L); - SCEVHandle getAddRecExpr(SmallVectorImpl &Operands, + const SCEV* getAddRecExpr(SmallVectorImpl &Operands, const Loop *L); - SCEVHandle getAddRecExpr(const SmallVectorImpl &Operands, + const SCEV* getAddRecExpr(const SmallVectorImpl &Operands, const Loop *L) { - SmallVector NewOp(Operands.begin(), Operands.end()); + SmallVector NewOp(Operands.begin(), Operands.end()); return getAddRecExpr(NewOp, L); } - SCEVHandle getSMaxExpr(const SCEVHandle &LHS, const SCEVHandle &RHS); - SCEVHandle getSMaxExpr(SmallVectorImpl &Operands); - SCEVHandle getUMaxExpr(const SCEVHandle &LHS, const SCEVHandle &RHS); - SCEVHandle getUMaxExpr(SmallVectorImpl &Operands); - SCEVHandle getSMinExpr(const SCEVHandle &LHS, const SCEVHandle &RHS); - SCEVHandle getUMinExpr(const SCEVHandle &LHS, const SCEVHandle &RHS); - SCEVHandle getUnknown(Value *V); - SCEVHandle getCouldNotCompute(); + const SCEV* getSMaxExpr(const SCEV* LHS, const SCEV* RHS); + const SCEV* getSMaxExpr(SmallVectorImpl &Operands); + const SCEV* getUMaxExpr(const SCEV* LHS, const SCEV* RHS); + const SCEV* getUMaxExpr(SmallVectorImpl &Operands); + const SCEV* getSMinExpr(const SCEV* LHS, const SCEV* RHS); + const SCEV* getUMinExpr(const SCEV* LHS, const SCEV* RHS); + const SCEV* getUnknown(Value *V); + const SCEV* getCouldNotCompute(); /// getNegativeSCEV - Return the SCEV object corresponding to -V. /// - SCEVHandle getNegativeSCEV(const SCEVHandle &V); + const SCEV* getNegativeSCEV(const SCEV* V); /// getNotSCEV - Return the SCEV object corresponding to ~V. /// - SCEVHandle getNotSCEV(const SCEVHandle &V); + const SCEV* getNotSCEV(const SCEV* V); /// getMinusSCEV - Return LHS-RHS. /// - SCEVHandle getMinusSCEV(const SCEVHandle &LHS, - const SCEVHandle &RHS); + const SCEV* getMinusSCEV(const SCEV* LHS, + const SCEV* RHS); /// getTruncateOrZeroExtend - Return a SCEV corresponding to a conversion /// of the input value to the specified type. If the type must be /// extended, it is zero extended. - SCEVHandle getTruncateOrZeroExtend(const SCEVHandle &V, const Type *Ty); + const SCEV* getTruncateOrZeroExtend(const SCEV* V, const Type *Ty); /// getTruncateOrSignExtend - Return a SCEV corresponding to a conversion /// of the input value to the specified type. If the type must be /// extended, it is sign extended. - SCEVHandle getTruncateOrSignExtend(const SCEVHandle &V, const Type *Ty); + const SCEV* getTruncateOrSignExtend(const SCEV* V, const Type *Ty); /// getNoopOrZeroExtend - Return a SCEV corresponding to a conversion of /// the input value to the specified type. If the type must be extended, /// it is zero extended. The conversion must not be narrowing. - SCEVHandle getNoopOrZeroExtend(const SCEVHandle &V, const Type *Ty); + const SCEV* getNoopOrZeroExtend(const SCEV* V, const Type *Ty); /// getNoopOrSignExtend - Return a SCEV corresponding to a conversion of /// the input value to the specified type. If the type must be extended, /// it is sign extended. The conversion must not be narrowing. - SCEVHandle getNoopOrSignExtend(const SCEVHandle &V, const Type *Ty); + const SCEV* getNoopOrSignExtend(const SCEV* V, const Type *Ty); /// getNoopOrAnyExtend - Return a SCEV corresponding to a conversion of /// the input value to the specified type. If the type must be extended, /// it is extended with unspecified bits. The conversion must not be /// narrowing. - SCEVHandle getNoopOrAnyExtend(const SCEVHandle &V, const Type *Ty); + const SCEV* getNoopOrAnyExtend(const SCEV* V, const Type *Ty); /// getTruncateOrNoop - Return a SCEV corresponding to a conversion of the /// input value to the specified type. The conversion must not be /// widening. - SCEVHandle getTruncateOrNoop(const SCEVHandle &V, const Type *Ty); + const SCEV* getTruncateOrNoop(const SCEV* V, const Type *Ty); /// getIntegerSCEV - Given an integer or FP type, create a constant for the /// specified signed integer value and return a SCEV for the constant. - SCEVHandle getIntegerSCEV(int Val, const Type *Ty); + const SCEV* getIntegerSCEV(int Val, const Type *Ty); /// getUMaxFromMismatchedTypes - Promote the operands to the wider of /// the types using zero-extension, and then perform a umax operation /// with them. - SCEVHandle getUMaxFromMismatchedTypes(const SCEVHandle &LHS, - const SCEVHandle &RHS); + const SCEV* getUMaxFromMismatchedTypes(const SCEV* LHS, + const SCEV* RHS); /// getUMinFromMismatchedTypes - Promote the operands to the wider of /// the types using zero-extension, and then perform a umin operation /// with them. - SCEVHandle getUMinFromMismatchedTypes(const SCEVHandle &LHS, - const SCEVHandle &RHS); + const SCEV* getUMinFromMismatchedTypes(const SCEV* LHS, + const SCEV* RHS); /// hasSCEV - Return true if the SCEV for this value has already been /// computed. @@ -556,7 +482,7 @@ namespace llvm { /// setSCEV - Insert the specified SCEV into the map of current SCEVs for /// the specified value. - void setSCEV(Value *V, const SCEVHandle &H); + void setSCEV(Value *V, const SCEV* H); /// getSCEVAtScope - Return a SCEV expression handle for the specified value /// at the specified scope in the program. The L value specifies a loop @@ -568,11 +494,11 @@ namespace llvm { /// /// In the case that a relevant loop exit value cannot be computed, the /// original value V is returned. - SCEVHandle getSCEVAtScope(const SCEV *S, const Loop *L); + const SCEV* getSCEVAtScope(const SCEV *S, const Loop *L); /// getSCEVAtScope - This is a convenience function which does /// getSCEVAtScope(getSCEV(V), L). - SCEVHandle getSCEVAtScope(Value *V, const Loop *L); + const SCEV* getSCEVAtScope(Value *V, const Loop *L); /// isLoopGuardedByCond - Test whether entry to the loop is protected by /// a conditional between LHS and RHS. This is used to help avoid max @@ -591,12 +517,12 @@ namespace llvm { /// loop-invariant backedge-taken count (see /// hasLoopInvariantBackedgeTakenCount). /// - SCEVHandle getBackedgeTakenCount(const Loop *L); + const SCEV* getBackedgeTakenCount(const Loop *L); /// getMaxBackedgeTakenCount - Similar to getBackedgeTakenCount, except /// return the least SCEV value that is known never to be less than the /// actual backedge taken count. - SCEVHandle getMaxBackedgeTakenCount(const Loop *L); + const SCEV* getMaxBackedgeTakenCount(const Loop *L); /// hasLoopInvariantBackedgeTakenCount - Return true if the specified loop /// has an analyzable loop-invariant backedge-taken count. @@ -612,15 +538,15 @@ namespace llvm { /// guaranteed to end in (at every loop iteration). It is, at the same time, /// the minimum number of times S is divisible by 2. For example, given {4,+,8} /// it returns 2. If S is guaranteed to be 0, it returns the bitwidth of S. - uint32_t GetMinTrailingZeros(const SCEVHandle &S); + uint32_t GetMinTrailingZeros(const SCEV* S); /// GetMinLeadingZeros - Determine the minimum number of zero bits that S is /// guaranteed to begin with (at every loop iteration). - uint32_t GetMinLeadingZeros(const SCEVHandle &S); + uint32_t GetMinLeadingZeros(const SCEV* S); /// GetMinSignBits - Determine the minimum number of sign bits that S is /// guaranteed to begin with. - uint32_t GetMinSignBits(const SCEVHandle &S); + uint32_t GetMinSignBits(const SCEV* S); virtual bool runOnFunction(Function &F); virtual void releaseMemory(); diff --git a/include/llvm/Analysis/ScalarEvolutionExpander.h b/include/llvm/Analysis/ScalarEvolutionExpander.h index b40fbf06f9c..730c97fff4d 100644 --- a/include/llvm/Analysis/ScalarEvolutionExpander.h +++ b/include/llvm/Analysis/ScalarEvolutionExpander.h @@ -28,7 +28,7 @@ namespace llvm { /// memory. struct SCEVExpander : public SCEVVisitor { ScalarEvolution &SE; - std::map > InsertedExpressions; + std::map > InsertedExpressions; std::set InsertedValues; BasicBlock::iterator InsertPt; @@ -77,12 +77,12 @@ namespace llvm { /// expression into the program. The inserted code is inserted into the /// SCEVExpander's current insertion point. If a type is specified, the /// result will be expanded to have that type, with a cast if necessary. - Value *expandCodeFor(SCEVHandle SH, const Type *Ty = 0); + Value *expandCodeFor(const SCEV* SH, const Type *Ty = 0); /// expandCodeFor - Insert code to directly compute the specified SCEV /// expression into the program. The inserted code is inserted into the /// specified block. - Value *expandCodeFor(SCEVHandle SH, const Type *Ty, + Value *expandCodeFor(const SCEV* SH, const Type *Ty, BasicBlock::iterator IP) { setInsertionPoint(IP); return expandCodeFor(SH, Ty); @@ -105,7 +105,8 @@ namespace llvm { private: /// expandAddToGEP - Expand a SCEVAddExpr with a pointer type into a GEP /// instead of using ptrtoint+arithmetic+inttoptr. - Value *expandAddToGEP(const SCEVHandle *op_begin, const SCEVHandle *op_end, + Value *expandAddToGEP(const SCEV* const *op_begin, + const SCEV* const *op_end, const PointerType *PTy, const Type *Ty, Value *V); Value *expand(const SCEV *S); diff --git a/include/llvm/Analysis/ScalarEvolutionExpressions.h b/include/llvm/Analysis/ScalarEvolutionExpressions.h index 0cc50f756f9..8bfd29c2be7 100644 --- a/include/llvm/Analysis/ScalarEvolutionExpressions.h +++ b/include/llvm/Analysis/ScalarEvolutionExpressions.h @@ -51,8 +51,8 @@ namespace llvm { virtual const Type *getType() const; - SCEVHandle replaceSymbolicValuesWithConcrete(const SCEVHandle &Sym, - const SCEVHandle &Conc, + const SCEV* replaceSymbolicValuesWithConcrete(const SCEV* Sym, + const SCEV* Conc, ScalarEvolution &SE) const { return this; } @@ -75,15 +75,15 @@ namespace llvm { /// class SCEVCastExpr : public SCEV { protected: - SCEVHandle Op; + const SCEV* Op; const Type *Ty; - SCEVCastExpr(unsigned SCEVTy, const SCEVHandle &op, const Type *ty, + SCEVCastExpr(unsigned SCEVTy, const SCEV* op, const Type *ty, const ScalarEvolution* p); virtual ~SCEVCastExpr(); public: - const SCEVHandle &getOperand() const { return Op; } + const SCEV* getOperand() const { return Op; } virtual const Type *getType() const { return Ty; } virtual bool isLoopInvariant(const Loop *L) const { @@ -112,14 +112,14 @@ namespace llvm { class SCEVTruncateExpr : public SCEVCastExpr { friend class ScalarEvolution; - SCEVTruncateExpr(const SCEVHandle &op, const Type *ty, + SCEVTruncateExpr(const SCEV* op, const Type *ty, const ScalarEvolution* p); public: - SCEVHandle replaceSymbolicValuesWithConcrete(const SCEVHandle &Sym, - const SCEVHandle &Conc, + const SCEV* replaceSymbolicValuesWithConcrete(const SCEV* Sym, + const SCEV* Conc, ScalarEvolution &SE) const { - SCEVHandle H = Op->replaceSymbolicValuesWithConcrete(Sym, Conc, SE); + const SCEV* H = Op->replaceSymbolicValuesWithConcrete(Sym, Conc, SE); if (H == Op) return this; return SE.getTruncateExpr(H, Ty); @@ -141,14 +141,14 @@ namespace llvm { class SCEVZeroExtendExpr : public SCEVCastExpr { friend class ScalarEvolution; - SCEVZeroExtendExpr(const SCEVHandle &op, const Type *ty, + SCEVZeroExtendExpr(const SCEV* op, const Type *ty, const ScalarEvolution* p); public: - SCEVHandle replaceSymbolicValuesWithConcrete(const SCEVHandle &Sym, - const SCEVHandle &Conc, + const SCEV* replaceSymbolicValuesWithConcrete(const SCEV* Sym, + const SCEV* Conc, ScalarEvolution &SE) const { - SCEVHandle H = Op->replaceSymbolicValuesWithConcrete(Sym, Conc, SE); + const SCEV* H = Op->replaceSymbolicValuesWithConcrete(Sym, Conc, SE); if (H == Op) return this; return SE.getZeroExtendExpr(H, Ty); @@ -170,14 +170,14 @@ namespace llvm { class SCEVSignExtendExpr : public SCEVCastExpr { friend class ScalarEvolution; - SCEVSignExtendExpr(const SCEVHandle &op, const Type *ty, + SCEVSignExtendExpr(const SCEV* op, const Type *ty, const ScalarEvolution* p); public: - SCEVHandle replaceSymbolicValuesWithConcrete(const SCEVHandle &Sym, - const SCEVHandle &Conc, + const SCEV* replaceSymbolicValuesWithConcrete(const SCEV* Sym, + const SCEV* Conc, ScalarEvolution &SE) const { - SCEVHandle H = Op->replaceSymbolicValuesWithConcrete(Sym, Conc, SE); + const SCEV* H = Op->replaceSymbolicValuesWithConcrete(Sym, Conc, SE); if (H == Op) return this; return SE.getSignExtendExpr(H, Ty); @@ -199,22 +199,22 @@ namespace llvm { /// class SCEVNAryExpr : public SCEV { protected: - SmallVector Operands; + SmallVector Operands; - SCEVNAryExpr(enum SCEVTypes T, const SmallVectorImpl &ops, + SCEVNAryExpr(enum SCEVTypes T, const SmallVectorImpl &ops, const ScalarEvolution* p) : SCEV(T, p), Operands(ops.begin(), ops.end()) {} virtual ~SCEVNAryExpr() {} public: unsigned getNumOperands() const { return (unsigned)Operands.size(); } - const SCEVHandle &getOperand(unsigned i) const { + const SCEV* getOperand(unsigned i) const { assert(i < Operands.size() && "Operand index out of range!"); return Operands[i]; } - const SmallVectorImpl &getOperands() const { return Operands; } - typedef SmallVectorImpl::const_iterator op_iterator; + const SmallVectorImpl &getOperands() const { return Operands; } + typedef SmallVectorImpl::const_iterator op_iterator; op_iterator op_begin() const { return Operands.begin(); } op_iterator op_end() const { return Operands.end(); } @@ -261,13 +261,13 @@ namespace llvm { class SCEVCommutativeExpr : public SCEVNAryExpr { protected: SCEVCommutativeExpr(enum SCEVTypes T, - const SmallVectorImpl &ops, + const SmallVectorImpl &ops, const ScalarEvolution* p) : SCEVNAryExpr(T, ops, p) {} public: - SCEVHandle replaceSymbolicValuesWithConcrete(const SCEVHandle &Sym, - const SCEVHandle &Conc, + const SCEV* replaceSymbolicValuesWithConcrete(const SCEV* Sym, + const SCEV* Conc, ScalarEvolution &SE) const; virtual const char *getOperationStr() const = 0; @@ -291,7 +291,7 @@ namespace llvm { class SCEVAddExpr : public SCEVCommutativeExpr { friend class ScalarEvolution; - explicit SCEVAddExpr(const SmallVectorImpl &ops, + explicit SCEVAddExpr(const SmallVectorImpl &ops, const ScalarEvolution* p) : SCEVCommutativeExpr(scAddExpr, ops, p) { } @@ -312,7 +312,7 @@ namespace llvm { class SCEVMulExpr : public SCEVCommutativeExpr { friend class ScalarEvolution; - explicit SCEVMulExpr(const SmallVectorImpl &ops, + explicit SCEVMulExpr(const SmallVectorImpl &ops, const ScalarEvolution* p) : SCEVCommutativeExpr(scMulExpr, ops, p) { } @@ -334,14 +334,15 @@ namespace llvm { class SCEVUDivExpr : public SCEV { friend class ScalarEvolution; - SCEVHandle LHS, RHS; - SCEVUDivExpr(const SCEVHandle &lhs, const SCEVHandle &rhs, + const SCEV* LHS; + const SCEV* RHS; + SCEVUDivExpr(const SCEV* lhs, const SCEV* rhs, const ScalarEvolution* p) : SCEV(scUDivExpr, p), LHS(lhs), RHS(rhs) {} public: - const SCEVHandle &getLHS() const { return LHS; } - const SCEVHandle &getRHS() const { return RHS; } + const SCEV* getLHS() const { return LHS; } + const SCEV* getRHS() const { return RHS; } virtual bool isLoopInvariant(const Loop *L) const { return LHS->isLoopInvariant(L) && RHS->isLoopInvariant(L); @@ -352,11 +353,11 @@ namespace llvm { RHS->hasComputableLoopEvolution(L); } - SCEVHandle replaceSymbolicValuesWithConcrete(const SCEVHandle &Sym, - const SCEVHandle &Conc, + const SCEV* replaceSymbolicValuesWithConcrete(const SCEV* Sym, + const SCEV* Conc, ScalarEvolution &SE) const { - SCEVHandle L = LHS->replaceSymbolicValuesWithConcrete(Sym, Conc, SE); - SCEVHandle R = RHS->replaceSymbolicValuesWithConcrete(Sym, Conc, SE); + const SCEV* L = LHS->replaceSymbolicValuesWithConcrete(Sym, Conc, SE); + const SCEV* R = RHS->replaceSymbolicValuesWithConcrete(Sym, Conc, SE); if (L == LHS && R == RHS) return this; else @@ -391,7 +392,7 @@ namespace llvm { const Loop *L; - SCEVAddRecExpr(const SmallVectorImpl &ops, const Loop *l, + SCEVAddRecExpr(const SmallVectorImpl &ops, const Loop *l, const ScalarEvolution* p) : SCEVNAryExpr(scAddRecExpr, ops, p), L(l) { for (size_t i = 0, e = Operands.size(); i != e; ++i) @@ -400,15 +401,15 @@ namespace llvm { } public: - const SCEVHandle &getStart() const { return Operands[0]; } + const SCEV* getStart() const { return Operands[0]; } const Loop *getLoop() const { return L; } /// getStepRecurrence - This method constructs and returns the recurrence /// indicating how much this expression steps by. If this is a polynomial /// of degree N, it returns a chrec of degree N-1. - SCEVHandle getStepRecurrence(ScalarEvolution &SE) const { + const SCEV* getStepRecurrence(ScalarEvolution &SE) const { if (isAffine()) return getOperand(1); - return SE.getAddRecExpr(SmallVector(op_begin()+1,op_end()), + return SE.getAddRecExpr(SmallVector(op_begin()+1,op_end()), getLoop()); } @@ -436,7 +437,7 @@ namespace llvm { /// evaluateAtIteration - Return the value of this chain of recurrences at /// the specified iteration number. - SCEVHandle evaluateAtIteration(SCEVHandle It, ScalarEvolution &SE) const; + const SCEV* evaluateAtIteration(const SCEV* It, ScalarEvolution &SE) const; /// getNumIterationsInRange - Return the number of iterations of this loop /// that produce values in the specified constant range. Another way of @@ -444,11 +445,11 @@ namespace llvm { /// value is not in the condition, thus computing the exit count. If the /// iteration count can't be computed, an instance of SCEVCouldNotCompute is /// returned. - SCEVHandle getNumIterationsInRange(ConstantRange Range, + const SCEV* getNumIterationsInRange(ConstantRange Range, ScalarEvolution &SE) const; - SCEVHandle replaceSymbolicValuesWithConcrete(const SCEVHandle &Sym, - const SCEVHandle &Conc, + const SCEV* replaceSymbolicValuesWithConcrete(const SCEV* Sym, + const SCEV* Conc, ScalarEvolution &SE) const; virtual void print(raw_ostream &OS) const; @@ -467,7 +468,7 @@ namespace llvm { class SCEVSMaxExpr : public SCEVCommutativeExpr { friend class ScalarEvolution; - explicit SCEVSMaxExpr(const SmallVectorImpl &ops, + explicit SCEVSMaxExpr(const SmallVectorImpl &ops, const ScalarEvolution* p) : SCEVCommutativeExpr(scSMaxExpr, ops, p) { } @@ -489,7 +490,7 @@ namespace llvm { class SCEVUMaxExpr : public SCEVCommutativeExpr { friend class ScalarEvolution; - explicit SCEVUMaxExpr(const SmallVectorImpl &ops, + explicit SCEVUMaxExpr(const SmallVectorImpl &ops, const ScalarEvolution* p) : SCEVCommutativeExpr(scUMaxExpr, ops, p) { } @@ -525,8 +526,8 @@ namespace llvm { return false; // not computable } - SCEVHandle replaceSymbolicValuesWithConcrete(const SCEVHandle &Sym, - const SCEVHandle &Conc, + const SCEV* replaceSymbolicValuesWithConcrete(const SCEV* Sym, + const SCEV* Conc, ScalarEvolution &SE) const { if (&*Sym == this) return Conc; return this; diff --git a/lib/Analysis/IVUsers.cpp b/lib/Analysis/IVUsers.cpp index 6a53a83665c..caeb14bef37 100644 --- a/lib/Analysis/IVUsers.cpp +++ b/lib/Analysis/IVUsers.cpp @@ -39,7 +39,7 @@ Pass *llvm::createIVUsersPass() { /// containsAddRecFromDifferentLoop - Determine whether expression S involves a /// subexpression that is an AddRec from a loop other than L. An outer loop /// of L is OK, but not an inner loop nor a disjoint loop. -static bool containsAddRecFromDifferentLoop(SCEVHandle S, Loop *L) { +static bool containsAddRecFromDifferentLoop(const SCEV* S, Loop *L) { // This is very common, put it first. if (isa(S)) return false; @@ -80,10 +80,10 @@ static bool containsAddRecFromDifferentLoop(SCEVHandle S, Loop *L) { /// a mix of loop invariant and loop variant expressions. The start cannot, /// however, contain an AddRec from a different loop, unless that loop is an /// outer loop of the current loop. -static bool getSCEVStartAndStride(const SCEVHandle &SH, Loop *L, Loop *UseLoop, - SCEVHandle &Start, SCEVHandle &Stride, +static bool getSCEVStartAndStride(const SCEV* &SH, Loop *L, Loop *UseLoop, + const SCEV* &Start, const SCEV* &Stride, ScalarEvolution *SE, DominatorTree *DT) { - SCEVHandle TheAddRec = Start; // Initialize to zero. + const SCEV* TheAddRec = Start; // Initialize to zero. // If the outer level is an AddExpr, the operands are all start values except // for a nested AddRecExpr. @@ -109,9 +109,9 @@ static bool getSCEVStartAndStride(const SCEVHandle &SH, Loop *L, Loop *UseLoop, // Use getSCEVAtScope to attempt to simplify other loops out of // the picture. - SCEVHandle AddRecStart = AddRec->getStart(); + const SCEV* AddRecStart = AddRec->getStart(); AddRecStart = SE->getSCEVAtScope(AddRecStart, UseLoop); - SCEVHandle AddRecStride = AddRec->getStepRecurrence(*SE); + const SCEV* AddRecStride = AddRec->getStepRecurrence(*SE); // FIXME: If Start contains an SCEVAddRecExpr from a different loop, other // than an outer loop of the current loop, reject it. LSR has no concept of @@ -196,13 +196,13 @@ bool IVUsers::AddUsersIfInteresting(Instruction *I) { return true; // Instruction already handled. // Get the symbolic expression for this instruction. - SCEVHandle ISE = SE->getSCEV(I); + const SCEV* ISE = SE->getSCEV(I); if (isa(ISE)) return false; // Get the start and stride for this expression. Loop *UseLoop = LI->getLoopFor(I->getParent()); - SCEVHandle Start = SE->getIntegerSCEV(0, ISE->getType()); - SCEVHandle Stride = Start; + const SCEV* Start = SE->getIntegerSCEV(0, ISE->getType()); + const SCEV* Stride = Start; if (!getSCEVStartAndStride(ISE, L, UseLoop, Start, Stride, SE, DT)) return false; // Non-reducible symbolic expression, bail out. @@ -254,7 +254,7 @@ bool IVUsers::AddUsersIfInteresting(Instruction *I) { if (IVUseShouldUsePostIncValue(User, I, L, LI, DT, this)) { // The value used will be incremented by the stride more than we are // expecting, so subtract this off. - SCEVHandle NewStart = SE->getMinusSCEV(Start, Stride); + const SCEV* NewStart = SE->getMinusSCEV(Start, Stride); StrideUses->addUser(NewStart, User, I); StrideUses->Users.back().setIsUseOfPostIncrementedValue(true); DOUT << " USING POSTINC SCEV, START=" << *NewStart<< "\n"; @@ -295,9 +295,9 @@ bool IVUsers::runOnLoop(Loop *l, LPPassManager &LPM) { /// getReplacementExpr - Return a SCEV expression which computes the /// value of the OperandValToReplace of the given IVStrideUse. -SCEVHandle IVUsers::getReplacementExpr(const IVStrideUse &U) const { +const SCEV* IVUsers::getReplacementExpr(const IVStrideUse &U) const { // Start with zero. - SCEVHandle RetVal = SE->getIntegerSCEV(0, U.getParent()->Stride->getType()); + const SCEV* RetVal = SE->getIntegerSCEV(0, U.getParent()->Stride->getType()); // Create the basic add recurrence. RetVal = SE->getAddRecExpr(RetVal, U.getParent()->Stride, L); // Add the offset in a separate step, because it may be loop-variant. @@ -308,7 +308,7 @@ SCEVHandle IVUsers::getReplacementExpr(const IVStrideUse &U) const { RetVal = SE->getAddExpr(RetVal, U.getParent()->Stride); // Evaluate the expression out of the loop, if possible. if (!L->contains(U.getUser()->getParent())) { - SCEVHandle ExitVal = SE->getSCEVAtScope(RetVal, L->getParentLoop()); + const SCEV* ExitVal = SE->getSCEVAtScope(RetVal, L->getParentLoop()); if (ExitVal->isLoopInvariant(L)) RetVal = ExitVal; } @@ -325,7 +325,7 @@ void IVUsers::print(raw_ostream &OS, const Module *M) const { OS << ":\n"; for (unsigned Stride = 0, e = StrideOrder.size(); Stride != e; ++Stride) { - std::map::const_iterator SI = + std::map::const_iterator SI = IVUsesByStride.find(StrideOrder[Stride]); assert(SI != IVUsesByStride.end() && "Stride doesn't exist!"); OS << " Stride " << *SI->first->getType() << " " << *SI->first << ":\n"; diff --git a/lib/Analysis/LoopVR.cpp b/lib/Analysis/LoopVR.cpp index 0a3d06bed7e..ae715ac5863 100644 --- a/lib/Analysis/LoopVR.cpp +++ b/lib/Analysis/LoopVR.cpp @@ -26,8 +26,8 @@ char LoopVR::ID = 0; static RegisterPass X("loopvr", "Loop Value Ranges", false, true); /// getRange - determine the range for a particular SCEV within a given Loop -ConstantRange LoopVR::getRange(SCEVHandle S, Loop *L, ScalarEvolution &SE) { - SCEVHandle T = SE.getBackedgeTakenCount(L); +ConstantRange LoopVR::getRange(const SCEV* S, Loop *L, ScalarEvolution &SE) { + const SCEV* T = SE.getBackedgeTakenCount(L); if (isa(T)) return ConstantRange(cast(S->getType())->getBitWidth(), true); @@ -36,7 +36,7 @@ ConstantRange LoopVR::getRange(SCEVHandle S, Loop *L, ScalarEvolution &SE) { } /// getRange - determine the range for a particular SCEV with a given trip count -ConstantRange LoopVR::getRange(SCEVHandle S, SCEVHandle T, ScalarEvolution &SE){ +ConstantRange LoopVR::getRange(const SCEV* S, const SCEV* T, ScalarEvolution &SE){ if (const SCEVConstant *C = dyn_cast(S)) return ConstantRange(C->getValue()->getValue()); @@ -182,8 +182,8 @@ ConstantRange LoopVR::getRange(SCEVHandle S, SCEVHandle T, ScalarEvolution &SE){ if (!Trip) return FullSet; if (AddRec->isAffine()) { - SCEVHandle StartHandle = AddRec->getStart(); - SCEVHandle StepHandle = AddRec->getOperand(1); + const SCEV* StartHandle = AddRec->getStart(); + const SCEV* StepHandle = AddRec->getOperand(1); const SCEVConstant *Step = dyn_cast(StepHandle); if (!Step) return FullSet; @@ -194,7 +194,7 @@ ConstantRange LoopVR::getRange(SCEVHandle S, SCEVHandle T, ScalarEvolution &SE){ if ((TripExt * StepExt).ugt(APInt::getLowBitsSet(ExWidth, ExWidth >> 1))) return FullSet; - SCEVHandle EndHandle = SE.getAddExpr(StartHandle, + const SCEV* EndHandle = SE.getAddExpr(StartHandle, SE.getMulExpr(T, StepHandle)); const SCEVConstant *Start = dyn_cast(StartHandle); const SCEVConstant *End = dyn_cast(EndHandle); @@ -254,7 +254,7 @@ ConstantRange LoopVR::compute(Value *V) { ScalarEvolution &SE = getAnalysis(); - SCEVHandle S = SE.getSCEV(I); + const SCEV* S = SE.getSCEV(I); if (isa(S) || isa(S)) return ConstantRange(cast(V->getType())->getBitWidth(), false); diff --git a/lib/Analysis/ScalarEvolution.cpp b/lib/Analysis/ScalarEvolution.cpp index d9394805c8b..f45562363a7 100644 --- a/lib/Analysis/ScalarEvolution.cpp +++ b/lib/Analysis/ScalarEvolution.cpp @@ -14,7 +14,7 @@ // There are several aspects to this library. First is the representation of // scalar expressions, which are represented as subclasses of the SCEV class. // These classes are used to represent certain types of subexpressions that we -// can handle. These classes are reference counted, managed by the SCEVHandle +// can handle. These classes are reference counted, managed by the const SCEV* // class. We only create one SCEV of a particular shape, so pointer-comparisons // for equality are legal. // @@ -152,9 +152,9 @@ bool SCEVCouldNotCompute::hasComputableLoopEvolution(const Loop *L) const { return false; } -SCEVHandle SCEVCouldNotCompute:: -replaceSymbolicValuesWithConcrete(const SCEVHandle &Sym, - const SCEVHandle &Conc, +const SCEV* SCEVCouldNotCompute:: +replaceSymbolicValuesWithConcrete(const SCEV* Sym, + const SCEV* Conc, ScalarEvolution &SE) const { return this; } @@ -169,20 +169,20 @@ bool SCEVCouldNotCompute::classof(const SCEV *S) { // SCEVConstants - Only allow the creation of one SCEVConstant for any -// particular value. Don't use a SCEVHandle here, or else the object will +// particular value. Don't use a const SCEV* here, or else the object will // never be deleted! -SCEVHandle ScalarEvolution::getConstant(ConstantInt *V) { +const SCEV* ScalarEvolution::getConstant(ConstantInt *V) { SCEVConstant *&R = SCEVConstants[V]; if (R == 0) R = new SCEVConstant(V, this); return R; } -SCEVHandle ScalarEvolution::getConstant(const APInt& Val) { +const SCEV* ScalarEvolution::getConstant(const APInt& Val) { return getConstant(ConstantInt::get(Val)); } -SCEVHandle +const SCEV* ScalarEvolution::getConstant(const Type *Ty, uint64_t V, bool isSigned) { return getConstant(ConstantInt::get(cast(Ty), V, isSigned)); } @@ -194,7 +194,7 @@ void SCEVConstant::print(raw_ostream &OS) const { } SCEVCastExpr::SCEVCastExpr(unsigned SCEVTy, - const SCEVHandle &op, const Type *ty, + const SCEV* op, const Type *ty, const ScalarEvolution* p) : SCEV(SCEVTy, p), Op(op), Ty(ty) {} @@ -205,10 +205,10 @@ bool SCEVCastExpr::dominates(BasicBlock *BB, DominatorTree *DT) const { } // SCEVTruncates - Only allow the creation of one SCEVTruncateExpr for any -// particular input. Don't use a SCEVHandle here, or else the object will +// particular input. Don't use a const SCEV* here, or else the object will // never be deleted! -SCEVTruncateExpr::SCEVTruncateExpr(const SCEVHandle &op, const Type *ty, +SCEVTruncateExpr::SCEVTruncateExpr(const SCEV* op, const Type *ty, const ScalarEvolution* p) : SCEVCastExpr(scTruncate, op, ty, p) { assert((Op->getType()->isInteger() || isa(Op->getType())) && @@ -222,10 +222,10 @@ void SCEVTruncateExpr::print(raw_ostream &OS) const { } // SCEVZeroExtends - Only allow the creation of one SCEVZeroExtendExpr for any -// particular input. Don't use a SCEVHandle here, or else the object will never +// particular input. Don't use a const SCEV* here, or else the object will never // be deleted! -SCEVZeroExtendExpr::SCEVZeroExtendExpr(const SCEVHandle &op, const Type *ty, +SCEVZeroExtendExpr::SCEVZeroExtendExpr(const SCEV* op, const Type *ty, const ScalarEvolution* p) : SCEVCastExpr(scZeroExtend, op, ty, p) { assert((Op->getType()->isInteger() || isa(Op->getType())) && @@ -238,10 +238,10 @@ void SCEVZeroExtendExpr::print(raw_ostream &OS) const { } // SCEVSignExtends - Only allow the creation of one SCEVSignExtendExpr for any -// particular input. Don't use a SCEVHandle here, or else the object will never +// particular input. Don't use a const SCEV* here, or else the object will never // be deleted! -SCEVSignExtendExpr::SCEVSignExtendExpr(const SCEVHandle &op, const Type *ty, +SCEVSignExtendExpr::SCEVSignExtendExpr(const SCEV* op, const Type *ty, const ScalarEvolution* p) : SCEVCastExpr(scSignExtend, op, ty, p) { assert((Op->getType()->isInteger() || isa(Op->getType())) && @@ -254,7 +254,7 @@ void SCEVSignExtendExpr::print(raw_ostream &OS) const { } // SCEVCommExprs - Only allow the creation of one SCEVCommutativeExpr for any -// particular input. Don't use a SCEVHandle here, or else the object will never +// particular input. Don't use a const SCEV* here, or else the object will never // be deleted! void SCEVCommutativeExpr::print(raw_ostream &OS) const { @@ -266,15 +266,15 @@ void SCEVCommutativeExpr::print(raw_ostream &OS) const { OS << ")"; } -SCEVHandle SCEVCommutativeExpr:: -replaceSymbolicValuesWithConcrete(const SCEVHandle &Sym, - const SCEVHandle &Conc, +const SCEV* SCEVCommutativeExpr:: +replaceSymbolicValuesWithConcrete(const SCEV* Sym, + const SCEV* Conc, ScalarEvolution &SE) const { for (unsigned i = 0, e = getNumOperands(); i != e; ++i) { - SCEVHandle H = + const SCEV* H = getOperand(i)->replaceSymbolicValuesWithConcrete(Sym, Conc, SE); if (H != getOperand(i)) { - SmallVector NewOps; + SmallVector NewOps; NewOps.reserve(getNumOperands()); for (unsigned j = 0; j != i; ++j) NewOps.push_back(getOperand(j)); @@ -308,7 +308,7 @@ bool SCEVNAryExpr::dominates(BasicBlock *BB, DominatorTree *DT) const { // SCEVUDivs - Only allow the creation of one SCEVUDivExpr for any particular -// input. Don't use a SCEVHandle here, or else the object will never be +// input. Don't use a const SCEV* here, or else the object will never be // deleted! bool SCEVUDivExpr::dominates(BasicBlock *BB, DominatorTree *DT) const { @@ -329,18 +329,18 @@ const Type *SCEVUDivExpr::getType() const { } // SCEVAddRecExprs - Only allow the creation of one SCEVAddRecExpr for any -// particular input. Don't use a SCEVHandle here, or else the object will never +// particular input. Don't use a const SCEV* here, or else the object will never // be deleted! -SCEVHandle SCEVAddRecExpr:: -replaceSymbolicValuesWithConcrete(const SCEVHandle &Sym, - const SCEVHandle &Conc, +const SCEV* SCEVAddRecExpr:: +replaceSymbolicValuesWithConcrete(const SCEV* Sym, + const SCEV* Conc, ScalarEvolution &SE) const { for (unsigned i = 0, e = getNumOperands(); i != e; ++i) { - SCEVHandle H = + const SCEV* H = getOperand(i)->replaceSymbolicValuesWithConcrete(Sym, Conc, SE); if (H != getOperand(i)) { - SmallVector NewOps; + SmallVector NewOps; NewOps.reserve(getNumOperands()); for (unsigned j = 0; j != i; ++j) NewOps.push_back(getOperand(j)); @@ -374,7 +374,7 @@ void SCEVAddRecExpr::print(raw_ostream &OS) const { } // SCEVUnknowns - Only allow the creation of one SCEVUnknown for any particular -// value. Don't use a SCEVHandle here, or else the object will never be +// value. Don't use a const SCEV* here, or else the object will never be // deleted! bool SCEVUnknown::isLoopInvariant(const Loop *L) const { @@ -531,7 +531,7 @@ namespace { /// this to depend on where the addresses of various SCEV objects happened to /// land in memory. /// -static void GroupByComplexity(SmallVectorImpl &Ops, +static void GroupByComplexity(SmallVectorImpl &Ops, LoopInfo *LI) { if (Ops.size() < 2) return; // Noop if (Ops.size() == 2) { @@ -574,7 +574,7 @@ static void GroupByComplexity(SmallVectorImpl &Ops, /// BinomialCoefficient - Compute BC(It, K). The result has width W. /// Assume, K > 0. -static SCEVHandle BinomialCoefficient(SCEVHandle It, unsigned K, +static const SCEV* BinomialCoefficient(const SCEV* It, unsigned K, ScalarEvolution &SE, const Type* ResultTy) { // Handle the simplest case efficiently. @@ -667,15 +667,15 @@ static SCEVHandle BinomialCoefficient(SCEVHandle It, unsigned K, // Calculate the product, at width T+W const IntegerType *CalculationTy = IntegerType::get(CalculationBits); - SCEVHandle Dividend = SE.getTruncateOrZeroExtend(It, CalculationTy); + const SCEV* Dividend = SE.getTruncateOrZeroExtend(It, CalculationTy); for (unsigned i = 1; i != K; ++i) { - SCEVHandle S = SE.getMinusSCEV(It, SE.getIntegerSCEV(i, It->getType())); + const SCEV* S = SE.getMinusSCEV(It, SE.getIntegerSCEV(i, It->getType())); Dividend = SE.getMulExpr(Dividend, SE.getTruncateOrZeroExtend(S, CalculationTy)); } // Divide by 2^T - SCEVHandle DivResult = SE.getUDivExpr(Dividend, SE.getConstant(DivFactor)); + const SCEV* DivResult = SE.getUDivExpr(Dividend, SE.getConstant(DivFactor)); // Truncate the result, and divide by K! / 2^T. @@ -692,14 +692,14 @@ static SCEVHandle BinomialCoefficient(SCEVHandle It, unsigned K, /// /// where BC(It, k) stands for binomial coefficient. /// -SCEVHandle SCEVAddRecExpr::evaluateAtIteration(SCEVHandle It, +const SCEV* SCEVAddRecExpr::evaluateAtIteration(const SCEV* It, ScalarEvolution &SE) const { - SCEVHandle Result = getStart(); + const SCEV* Result = getStart(); for (unsigned i = 1, e = getNumOperands(); i != e; ++i) { // The computation is correct in the face of overflow provided that the // multiplication is performed _after_ the evaluation of the binomial // coefficient. - SCEVHandle Coeff = BinomialCoefficient(It, i, SE, getType()); + const SCEV* Coeff = BinomialCoefficient(It, i, SE, getType()); if (isa(Coeff)) return Coeff; @@ -712,7 +712,7 @@ SCEVHandle SCEVAddRecExpr::evaluateAtIteration(SCEVHandle It, // SCEV Expression folder implementations //===----------------------------------------------------------------------===// -SCEVHandle ScalarEvolution::getTruncateExpr(const SCEVHandle &Op, +const SCEV* ScalarEvolution::getTruncateExpr(const SCEV* Op, const Type *Ty) { assert(getTypeSizeInBits(Op->getType()) > getTypeSizeInBits(Ty) && "This is not a truncating conversion!"); @@ -738,7 +738,7 @@ SCEVHandle ScalarEvolution::getTruncateExpr(const SCEVHandle &Op, // If the input value is a chrec scev, truncate the chrec's operands. if (const SCEVAddRecExpr *AddRec = dyn_cast(Op)) { - SmallVector Operands; + SmallVector Operands; for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) Operands.push_back(getTruncateExpr(AddRec->getOperand(i), Ty)); return getAddRecExpr(Operands, AddRec->getLoop()); @@ -749,7 +749,7 @@ SCEVHandle ScalarEvolution::getTruncateExpr(const SCEVHandle &Op, return Result; } -SCEVHandle ScalarEvolution::getZeroExtendExpr(const SCEVHandle &Op, +const SCEV* ScalarEvolution::getZeroExtendExpr(const SCEV* Op, const Type *Ty) { assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && "This is not an extending conversion!"); @@ -782,28 +782,28 @@ SCEVHandle ScalarEvolution::getZeroExtendExpr(const SCEVHandle &Op, // in infinite recursion. In the later case, the analysis code will // cope with a conservative value, and it will take care to purge // that value once it has finished. - SCEVHandle MaxBECount = getMaxBackedgeTakenCount(AR->getLoop()); + const SCEV* MaxBECount = getMaxBackedgeTakenCount(AR->getLoop()); if (!isa(MaxBECount)) { // Manually compute the final value for AR, checking for // overflow. - SCEVHandle Start = AR->getStart(); - SCEVHandle Step = AR->getStepRecurrence(*this); + const SCEV* Start = AR->getStart(); + const SCEV* Step = AR->getStepRecurrence(*this); // Check whether the backedge-taken count can be losslessly casted to // the addrec's type. The count is always unsigned. - SCEVHandle CastedMaxBECount = + const SCEV* CastedMaxBECount = getTruncateOrZeroExtend(MaxBECount, Start->getType()); - SCEVHandle RecastedMaxBECount = + const SCEV* RecastedMaxBECount = getTruncateOrZeroExtend(CastedMaxBECount, MaxBECount->getType()); if (MaxBECount == RecastedMaxBECount) { const Type *WideTy = IntegerType::get(getTypeSizeInBits(Start->getType()) * 2); // Check whether Start+Step*MaxBECount has no unsigned overflow. - SCEVHandle ZMul = + const SCEV* ZMul = getMulExpr(CastedMaxBECount, getTruncateOrZeroExtend(Step, Start->getType())); - SCEVHandle Add = getAddExpr(Start, ZMul); - SCEVHandle OperandExtendedAdd = + const SCEV* Add = getAddExpr(Start, ZMul); + const SCEV* OperandExtendedAdd = getAddExpr(getZeroExtendExpr(Start, WideTy), getMulExpr(getZeroExtendExpr(CastedMaxBECount, WideTy), getZeroExtendExpr(Step, WideTy))); @@ -815,7 +815,7 @@ SCEVHandle ScalarEvolution::getZeroExtendExpr(const SCEVHandle &Op, // Similar to above, only this time treat the step value as signed. // This covers loops that count down. - SCEVHandle SMul = + const SCEV* SMul = getMulExpr(CastedMaxBECount, getTruncateOrSignExtend(Step, Start->getType())); Add = getAddExpr(Start, SMul); @@ -837,7 +837,7 @@ SCEVHandle ScalarEvolution::getZeroExtendExpr(const SCEVHandle &Op, return Result; } -SCEVHandle ScalarEvolution::getSignExtendExpr(const SCEVHandle &Op, +const SCEV* ScalarEvolution::getSignExtendExpr(const SCEV* Op, const Type *Ty) { assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && "This is not an extending conversion!"); @@ -870,28 +870,28 @@ SCEVHandle ScalarEvolution::getSignExtendExpr(const SCEVHandle &Op, // in infinite recursion. In the later case, the analysis code will // cope with a conservative value, and it will take care to purge // that value once it has finished. - SCEVHandle MaxBECount = getMaxBackedgeTakenCount(AR->getLoop()); + const SCEV* MaxBECount = getMaxBackedgeTakenCount(AR->getLoop()); if (!isa(MaxBECount)) { // Manually compute the final value for AR, checking for // overflow. - SCEVHandle Start = AR->getStart(); - SCEVHandle Step = AR->getStepRecurrence(*this); + const SCEV* Start = AR->getStart(); + const SCEV* Step = AR->getStepRecurrence(*this); // Check whether the backedge-taken count can be losslessly casted to // the addrec's type. The count is always unsigned. - SCEVHandle CastedMaxBECount = + const SCEV* CastedMaxBECount = getTruncateOrZeroExtend(MaxBECount, Start->getType()); - SCEVHandle RecastedMaxBECount = + const SCEV* RecastedMaxBECount = getTruncateOrZeroExtend(CastedMaxBECount, MaxBECount->getType()); if (MaxBECount == RecastedMaxBECount) { const Type *WideTy = IntegerType::get(getTypeSizeInBits(Start->getType()) * 2); // Check whether Start+Step*MaxBECount has no signed overflow. - SCEVHandle SMul = + const SCEV* SMul = getMulExpr(CastedMaxBECount, getTruncateOrSignExtend(Step, Start->getType())); - SCEVHandle Add = getAddExpr(Start, SMul); - SCEVHandle OperandExtendedAdd = + const SCEV* Add = getAddExpr(Start, SMul); + const SCEV* OperandExtendedAdd = getAddExpr(getSignExtendExpr(Start, WideTy), getMulExpr(getZeroExtendExpr(CastedMaxBECount, WideTy), getSignExtendExpr(Step, WideTy))); @@ -912,7 +912,7 @@ SCEVHandle ScalarEvolution::getSignExtendExpr(const SCEVHandle &Op, /// getAnyExtendExpr - Return a SCEV for the given operand extended with /// unspecified bits out to the given type. /// -SCEVHandle ScalarEvolution::getAnyExtendExpr(const SCEVHandle &Op, +const SCEV* ScalarEvolution::getAnyExtendExpr(const SCEV* Op, const Type *Ty) { assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && "This is not an extending conversion!"); @@ -927,19 +927,19 @@ SCEVHandle ScalarEvolution::getAnyExtendExpr(const SCEVHandle &Op, // Peel off a truncate cast. if (const SCEVTruncateExpr *T = dyn_cast(Op)) { - SCEVHandle NewOp = T->getOperand(); + const SCEV* NewOp = T->getOperand(); if (getTypeSizeInBits(NewOp->getType()) < getTypeSizeInBits(Ty)) return getAnyExtendExpr(NewOp, Ty); return getTruncateOrNoop(NewOp, Ty); } // Next try a zext cast. If the cast is folded, use it. - SCEVHandle ZExt = getZeroExtendExpr(Op, Ty); + const SCEV* ZExt = getZeroExtendExpr(Op, Ty); if (!isa(ZExt)) return ZExt; // Next try a sext cast. If the cast is folded, use it. - SCEVHandle SExt = getSignExtendExpr(Op, Ty); + const SCEV* SExt = getSignExtendExpr(Op, Ty); if (!isa(SExt)) return SExt; @@ -977,10 +977,10 @@ SCEVHandle ScalarEvolution::getAnyExtendExpr(const SCEVHandle &Op, /// is also used as a check to avoid infinite recursion. /// static bool -CollectAddOperandsWithScales(DenseMap &M, - SmallVector &NewOps, +CollectAddOperandsWithScales(DenseMap &M, + SmallVector &NewOps, APInt &AccumulatedConstant, - const SmallVectorImpl &Ops, + const SmallVectorImpl &Ops, const APInt &Scale, ScalarEvolution &SE) { bool Interesting = false; @@ -1001,9 +1001,9 @@ CollectAddOperandsWithScales(DenseMap &M, } else { // A multiplication of a constant with some other value. Update // the map. - SmallVector MulOps(Mul->op_begin()+1, Mul->op_end()); - SCEVHandle Key = SE.getMulExpr(MulOps); - std::pair::iterator, bool> Pair = + SmallVector MulOps(Mul->op_begin()+1, Mul->op_end()); + const SCEV* Key = SE.getMulExpr(MulOps); + std::pair::iterator, bool> Pair = M.insert(std::make_pair(Key, APInt())); if (Pair.second) { Pair.first->second = NewScale; @@ -1022,7 +1022,7 @@ CollectAddOperandsWithScales(DenseMap &M, AccumulatedConstant += Scale * C->getValue()->getValue(); } else { // An ordinary operand. Update the map. - std::pair::iterator, bool> Pair = + std::pair::iterator, bool> Pair = M.insert(std::make_pair(Ops[i], APInt())); if (Pair.second) { Pair.first->second = Scale; @@ -1049,7 +1049,7 @@ namespace { /// getAddExpr - Get a canonical add expression, or something simpler if /// possible. -SCEVHandle ScalarEvolution::getAddExpr(SmallVectorImpl &Ops) { +const SCEV* ScalarEvolution::getAddExpr(SmallVectorImpl &Ops) { assert(!Ops.empty() && "Cannot get empty add!"); if (Ops.size() == 1) return Ops[0]; #ifndef NDEBUG @@ -1093,8 +1093,8 @@ SCEVHandle ScalarEvolution::getAddExpr(SmallVectorImpl &Ops) { if (Ops[i] == Ops[i+1]) { // X + Y + Y --> X + Y*2 // Found a match, merge the two values into a multiply, and add any // remaining values to the result. - SCEVHandle Two = getIntegerSCEV(2, Ty); - SCEVHandle Mul = getMulExpr(Ops[i], Two); + const SCEV* Two = getIntegerSCEV(2, Ty); + const SCEV* Mul = getMulExpr(Ops[i], Two); if (Ops.size() == 2) return Mul; Ops.erase(Ops.begin()+i, Ops.begin()+i+2); @@ -1110,7 +1110,7 @@ SCEVHandle ScalarEvolution::getAddExpr(SmallVectorImpl &Ops) { const SCEVTruncateExpr *Trunc = cast(Ops[Idx]); const Type *DstType = Trunc->getType(); const Type *SrcType = Trunc->getOperand()->getType(); - SmallVector LargeOps; + SmallVector LargeOps; bool Ok = true; // Check all the operands to see if they can be represented in the // source type of the truncate. @@ -1126,7 +1126,7 @@ SCEVHandle ScalarEvolution::getAddExpr(SmallVectorImpl &Ops) { // is much more likely to be foldable here. LargeOps.push_back(getSignExtendExpr(C, SrcType)); } else if (const SCEVMulExpr *M = dyn_cast(Ops[i])) { - SmallVector LargeMulOps; + SmallVector LargeMulOps; for (unsigned j = 0, f = M->getNumOperands(); j != f && Ok; ++j) { if (const SCEVTruncateExpr *T = dyn_cast(M->getOperand(j))) { @@ -1154,7 +1154,7 @@ SCEVHandle ScalarEvolution::getAddExpr(SmallVectorImpl &Ops) { } if (Ok) { // Evaluate the expression in the larger type. - SCEVHandle Fold = getAddExpr(LargeOps); + const SCEV* Fold = getAddExpr(LargeOps); // If it folds to something simple, use it. Otherwise, don't. if (isa(Fold) || isa(Fold)) return getTruncateExpr(Fold, DstType); @@ -1191,23 +1191,23 @@ SCEVHandle ScalarEvolution::getAddExpr(SmallVectorImpl &Ops) { // operands multiplied by constant values. if (Idx < Ops.size() && isa(Ops[Idx])) { uint64_t BitWidth = getTypeSizeInBits(Ty); - DenseMap M; - SmallVector NewOps; + DenseMap M; + SmallVector NewOps; APInt AccumulatedConstant(BitWidth, 0); if (CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant, Ops, APInt(BitWidth, 1), *this)) { // Some interesting folding opportunity is present, so its worthwhile to // re-generate the operands list. Group the operands by constant scale, // to avoid multiplying by the same constant scale multiple times. - std::map, APIntCompare> MulOpLists; - for (SmallVector::iterator I = NewOps.begin(), + std::map, APIntCompare> MulOpLists; + for (SmallVector::iterator I = NewOps.begin(), E = NewOps.end(); I != E; ++I) MulOpLists[M.find(*I)->second].push_back(*I); // Re-generate the operands list. Ops.clear(); if (AccumulatedConstant != 0) Ops.push_back(getConstant(AccumulatedConstant)); - for (std::map, APIntCompare>::iterator I = + for (std::map, APIntCompare>::iterator I = MulOpLists.begin(), E = MulOpLists.end(); I != E; ++I) if (I->first != 0) Ops.push_back(getMulExpr(getConstant(I->first), getAddExpr(I->second))); @@ -1229,17 +1229,17 @@ SCEVHandle ScalarEvolution::getAddExpr(SmallVectorImpl &Ops) { for (unsigned AddOp = 0, e = Ops.size(); AddOp != e; ++AddOp) if (MulOpSCEV == Ops[AddOp] && !isa(Ops[AddOp])) { // Fold W + X + (X * Y * Z) --> W + (X * ((Y*Z)+1)) - SCEVHandle InnerMul = Mul->getOperand(MulOp == 0); + const SCEV* InnerMul = Mul->getOperand(MulOp == 0); if (Mul->getNumOperands() != 2) { // If the multiply has more than two operands, we must get the // Y*Z term. - SmallVector MulOps(Mul->op_begin(), Mul->op_end()); + SmallVector MulOps(Mul->op_begin(), Mul->op_end()); MulOps.erase(MulOps.begin()+MulOp); InnerMul = getMulExpr(MulOps); } - SCEVHandle One = getIntegerSCEV(1, Ty); - SCEVHandle AddOne = getAddExpr(InnerMul, One); - SCEVHandle OuterMul = getMulExpr(AddOne, Ops[AddOp]); + const SCEV* One = getIntegerSCEV(1, Ty); + const SCEV* AddOne = getAddExpr(InnerMul, One); + const SCEV* OuterMul = getMulExpr(AddOne, Ops[AddOp]); if (Ops.size() == 2) return OuterMul; if (AddOp < Idx) { Ops.erase(Ops.begin()+AddOp); @@ -1263,21 +1263,21 @@ SCEVHandle ScalarEvolution::getAddExpr(SmallVectorImpl &Ops) { OMulOp != e; ++OMulOp) if (OtherMul->getOperand(OMulOp) == MulOpSCEV) { // Fold X + (A*B*C) + (A*D*E) --> X + (A*(B*C+D*E)) - SCEVHandle InnerMul1 = Mul->getOperand(MulOp == 0); + const SCEV* InnerMul1 = Mul->getOperand(MulOp == 0); if (Mul->getNumOperands() != 2) { - SmallVector MulOps(Mul->op_begin(), Mul->op_end()); + SmallVector MulOps(Mul->op_begin(), Mul->op_end()); MulOps.erase(MulOps.begin()+MulOp); InnerMul1 = getMulExpr(MulOps); } - SCEVHandle InnerMul2 = OtherMul->getOperand(OMulOp == 0); + const SCEV* InnerMul2 = OtherMul->getOperand(OMulOp == 0); if (OtherMul->getNumOperands() != 2) { - SmallVector MulOps(OtherMul->op_begin(), + SmallVector MulOps(OtherMul->op_begin(), OtherMul->op_end()); MulOps.erase(MulOps.begin()+OMulOp); InnerMul2 = getMulExpr(MulOps); } - SCEVHandle InnerMulSum = getAddExpr(InnerMul1,InnerMul2); - SCEVHandle OuterMul = getMulExpr(MulOpSCEV, InnerMulSum); + const SCEV* InnerMulSum = getAddExpr(InnerMul1,InnerMul2); + const SCEV* OuterMul = getMulExpr(MulOpSCEV, InnerMulSum); if (Ops.size() == 2) return OuterMul; Ops.erase(Ops.begin()+Idx); Ops.erase(Ops.begin()+OtherMulIdx-1); @@ -1298,7 +1298,7 @@ SCEVHandle ScalarEvolution::getAddExpr(SmallVectorImpl &Ops) { for (; Idx < Ops.size() && isa(Ops[Idx]); ++Idx) { // Scan all of the other operands to this add and add them to the vector if // they are loop invariant w.r.t. the recurrence. - SmallVector LIOps; + SmallVector LIOps; const SCEVAddRecExpr *AddRec = cast(Ops[Idx]); for (unsigned i = 0, e = Ops.size(); i != e; ++i) if (Ops[i]->isLoopInvariant(AddRec->getLoop())) { @@ -1312,11 +1312,11 @@ SCEVHandle ScalarEvolution::getAddExpr(SmallVectorImpl &Ops) { // NLI + LI + {Start,+,Step} --> NLI + {LI+Start,+,Step} LIOps.push_back(AddRec->getStart()); - SmallVector AddRecOps(AddRec->op_begin(), + SmallVector AddRecOps(AddRec->op_begin(), AddRec->op_end()); AddRecOps[0] = getAddExpr(LIOps); - SCEVHandle NewRec = getAddRecExpr(AddRecOps, AddRec->getLoop()); + const SCEV* NewRec = getAddRecExpr(AddRecOps, AddRec->getLoop()); // If all of the other operands were loop invariant, we are done. if (Ops.size() == 1) return NewRec; @@ -1338,7 +1338,7 @@ SCEVHandle ScalarEvolution::getAddExpr(SmallVectorImpl &Ops) { const SCEVAddRecExpr *OtherAddRec = cast(Ops[OtherIdx]); if (AddRec->getLoop() == OtherAddRec->getLoop()) { // Other + {A,+,B} + {C,+,D} --> Other + {A+C,+,B+D} - SmallVector NewOps(AddRec->op_begin(), AddRec->op_end()); + SmallVector NewOps(AddRec->op_begin(), AddRec->op_end()); for (unsigned i = 0, e = OtherAddRec->getNumOperands(); i != e; ++i) { if (i >= NewOps.size()) { NewOps.insert(NewOps.end(), OtherAddRec->op_begin()+i, @@ -1347,7 +1347,7 @@ SCEVHandle ScalarEvolution::getAddExpr(SmallVectorImpl &Ops) { } NewOps[i] = getAddExpr(NewOps[i], OtherAddRec->getOperand(i)); } - SCEVHandle NewAddRec = getAddRecExpr(NewOps, AddRec->getLoop()); + const SCEV* NewAddRec = getAddRecExpr(NewOps, AddRec->getLoop()); if (Ops.size() == 2) return NewAddRec; @@ -1374,7 +1374,7 @@ SCEVHandle ScalarEvolution::getAddExpr(SmallVectorImpl &Ops) { /// getMulExpr - Get a canonical multiply expression, or something simpler if /// possible. -SCEVHandle ScalarEvolution::getMulExpr(SmallVectorImpl &Ops) { +const SCEV* ScalarEvolution::getMulExpr(SmallVectorImpl &Ops) { assert(!Ops.empty() && "Cannot get empty mul!"); #ifndef NDEBUG for (unsigned i = 1, e = Ops.size(); i != e; ++i) @@ -1455,7 +1455,7 @@ SCEVHandle ScalarEvolution::getMulExpr(SmallVectorImpl &Ops) { for (; Idx < Ops.size() && isa(Ops[Idx]); ++Idx) { // Scan all of the other operands to this mul and add them to the vector if // they are loop invariant w.r.t. the recurrence. - SmallVector LIOps; + SmallVector LIOps; const SCEVAddRecExpr *AddRec = cast(Ops[Idx]); for (unsigned i = 0, e = Ops.size(); i != e; ++i) if (Ops[i]->isLoopInvariant(AddRec->getLoop())) { @@ -1467,7 +1467,7 @@ SCEVHandle ScalarEvolution::getMulExpr(SmallVectorImpl &Ops) { // If we found some loop invariants, fold them into the recurrence. if (!LIOps.empty()) { // NLI * LI * {Start,+,Step} --> NLI * {LI*Start,+,LI*Step} - SmallVector NewOps; + SmallVector NewOps; NewOps.reserve(AddRec->getNumOperands()); if (LIOps.size() == 1) { const SCEV *Scale = LIOps[0]; @@ -1475,13 +1475,13 @@ SCEVHandle ScalarEvolution::getMulExpr(SmallVectorImpl &Ops) { NewOps.push_back(getMulExpr(Scale, AddRec->getOperand(i))); } else { for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) { - SmallVector MulOps(LIOps.begin(), LIOps.end()); + SmallVector MulOps(LIOps.begin(), LIOps.end()); MulOps.push_back(AddRec->getOperand(i)); NewOps.push_back(getMulExpr(MulOps)); } } - SCEVHandle NewRec = getAddRecExpr(NewOps, AddRec->getLoop()); + const SCEV* NewRec = getAddRecExpr(NewOps, AddRec->getLoop()); // If all of the other operands were loop invariant, we are done. if (Ops.size() == 1) return NewRec; @@ -1505,14 +1505,14 @@ SCEVHandle ScalarEvolution::getMulExpr(SmallVectorImpl &Ops) { if (AddRec->getLoop() == OtherAddRec->getLoop()) { // F * G --> {A,+,B} * {C,+,D} --> {A*C,+,F*D + G*B + B*D} const SCEVAddRecExpr *F = AddRec, *G = OtherAddRec; - SCEVHandle NewStart = getMulExpr(F->getStart(), + const SCEV* NewStart = getMulExpr(F->getStart(), G->getStart()); - SCEVHandle B = F->getStepRecurrence(*this); - SCEVHandle D = G->getStepRecurrence(*this); - SCEVHandle NewStep = getAddExpr(getMulExpr(F, D), + const SCEV* B = F->getStepRecurrence(*this); + const SCEV* D = G->getStepRecurrence(*this); + const SCEV* NewStep = getAddExpr(getMulExpr(F, D), getMulExpr(G, B), getMulExpr(B, D)); - SCEVHandle NewAddRec = getAddRecExpr(NewStart, NewStep, + const SCEV* NewAddRec = getAddRecExpr(NewStart, NewStep, F->getLoop()); if (Ops.size() == 2) return NewAddRec; @@ -1539,8 +1539,8 @@ SCEVHandle ScalarEvolution::getMulExpr(SmallVectorImpl &Ops) { /// getUDivExpr - Get a canonical multiply expression, or something simpler if /// possible. -SCEVHandle ScalarEvolution::getUDivExpr(const SCEVHandle &LHS, - const SCEVHandle &RHS) { +const SCEV* ScalarEvolution::getUDivExpr(const SCEV* LHS, + const SCEV* RHS) { assert(getEffectiveSCEVType(LHS->getType()) == getEffectiveSCEVType(RHS->getType()) && "SCEVUDivExpr operand types don't match!"); @@ -1573,24 +1573,24 @@ SCEVHandle ScalarEvolution::getUDivExpr(const SCEVHandle &LHS, getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy), getZeroExtendExpr(Step, ExtTy), AR->getLoop())) { - SmallVector Operands; + SmallVector Operands; for (unsigned i = 0, e = AR->getNumOperands(); i != e; ++i) Operands.push_back(getUDivExpr(AR->getOperand(i), RHS)); return getAddRecExpr(Operands, AR->getLoop()); } // (A*B)/C --> A*(B/C) if safe and B/C can be folded. if (const SCEVMulExpr *M = dyn_cast(LHS)) { - SmallVector Operands; + SmallVector Operands; for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) Operands.push_back(getZeroExtendExpr(M->getOperand(i), ExtTy)); if (getZeroExtendExpr(M, ExtTy) == getMulExpr(Operands)) // Find an operand that's safely divisible. for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) { - SCEVHandle Op = M->getOperand(i); - SCEVHandle Div = getUDivExpr(Op, RHSC); + const SCEV* Op = M->getOperand(i); + const SCEV* Div = getUDivExpr(Op, RHSC); if (!isa(Div) && getMulExpr(Div, RHSC) == Op) { - const SmallVectorImpl &MOperands = M->getOperands(); - Operands = SmallVector(MOperands.begin(), + const SmallVectorImpl &MOperands = M->getOperands(); + Operands = SmallVector(MOperands.begin(), MOperands.end()); Operands[i] = Div; return getMulExpr(Operands); @@ -1599,13 +1599,13 @@ SCEVHandle ScalarEvolution::getUDivExpr(const SCEVHandle &LHS, } // (A+B)/C --> (A/C + B/C) if safe and A/C and B/C can be folded. if (const SCEVAddRecExpr *A = dyn_cast(LHS)) { - SmallVector Operands; + SmallVector Operands; for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i) Operands.push_back(getZeroExtendExpr(A->getOperand(i), ExtTy)); if (getZeroExtendExpr(A, ExtTy) == getAddExpr(Operands)) { Operands.clear(); for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i) { - SCEVHandle Op = getUDivExpr(A->getOperand(i), RHS); + const SCEV* Op = getUDivExpr(A->getOperand(i), RHS); if (isa(Op) || getMulExpr(Op, RHS) != A->getOperand(i)) break; Operands.push_back(Op); @@ -1631,9 +1631,9 @@ SCEVHandle ScalarEvolution::getUDivExpr(const SCEVHandle &LHS, /// getAddRecExpr - Get an add recurrence expression for the specified loop. /// Simplify the expression as much as possible. -SCEVHandle ScalarEvolution::getAddRecExpr(const SCEVHandle &Start, - const SCEVHandle &Step, const Loop *L) { - SmallVector Operands; +const SCEV* ScalarEvolution::getAddRecExpr(const SCEV* Start, + const SCEV* Step, const Loop *L) { + SmallVector Operands; Operands.push_back(Start); if (const SCEVAddRecExpr *StepChrec = dyn_cast(Step)) if (StepChrec->getLoop() == L) { @@ -1648,7 +1648,7 @@ SCEVHandle ScalarEvolution::getAddRecExpr(const SCEVHandle &Start, /// getAddRecExpr - Get an add recurrence expression for the specified loop. /// Simplify the expression as much as possible. -SCEVHandle ScalarEvolution::getAddRecExpr(SmallVectorImpl &Operands, +const SCEV* ScalarEvolution::getAddRecExpr(SmallVectorImpl &Operands, const Loop *L) { if (Operands.size() == 1) return Operands[0]; #ifndef NDEBUG @@ -1667,9 +1667,8 @@ SCEVHandle ScalarEvolution::getAddRecExpr(SmallVectorImpl &Operands, if (const SCEVAddRecExpr *NestedAR = dyn_cast(Operands[0])) { const Loop* NestedLoop = NestedAR->getLoop(); if (L->getLoopDepth() < NestedLoop->getLoopDepth()) { - SmallVector NestedOperands(NestedAR->op_begin(), + SmallVector NestedOperands(NestedAR->op_begin(), NestedAR->op_end()); - SCEVHandle NestedARHandle(NestedAR); Operands[0] = NestedAR->getStart(); NestedOperands[0] = getAddRecExpr(Operands, L); return getAddRecExpr(NestedOperands, NestedLoop); @@ -1682,16 +1681,16 @@ SCEVHandle ScalarEvolution::getAddRecExpr(SmallVectorImpl &Operands, return Result; } -SCEVHandle ScalarEvolution::getSMaxExpr(const SCEVHandle &LHS, - const SCEVHandle &RHS) { - SmallVector Ops; +const SCEV* ScalarEvolution::getSMaxExpr(const SCEV* LHS, + const SCEV* RHS) { + SmallVector Ops; Ops.push_back(LHS); Ops.push_back(RHS); return getSMaxExpr(Ops); } -SCEVHandle -ScalarEvolution::getSMaxExpr(SmallVectorImpl &Ops) { +const SCEV* +ScalarEvolution::getSMaxExpr(SmallVectorImpl &Ops) { assert(!Ops.empty() && "Cannot get empty smax!"); if (Ops.size() == 1) return Ops[0]; #ifndef NDEBUG @@ -1769,16 +1768,16 @@ ScalarEvolution::getSMaxExpr(SmallVectorImpl &Ops) { return Result; } -SCEVHandle ScalarEvolution::getUMaxExpr(const SCEVHandle &LHS, - const SCEVHandle &RHS) { - SmallVector Ops; +const SCEV* ScalarEvolution::getUMaxExpr(const SCEV* LHS, + const SCEV* RHS) { + SmallVector Ops; Ops.push_back(LHS); Ops.push_back(RHS); return getUMaxExpr(Ops); } -SCEVHandle -ScalarEvolution::getUMaxExpr(SmallVectorImpl &Ops) { +const SCEV* +ScalarEvolution::getUMaxExpr(SmallVectorImpl &Ops) { assert(!Ops.empty() && "Cannot get empty umax!"); if (Ops.size() == 1) return Ops[0]; #ifndef NDEBUG @@ -1856,19 +1855,19 @@ ScalarEvolution::getUMaxExpr(SmallVectorImpl &Ops) { return Result; } -SCEVHandle ScalarEvolution::getSMinExpr(const SCEVHandle &LHS, - const SCEVHandle &RHS) { +const SCEV* ScalarEvolution::getSMinExpr(const SCEV* LHS, + const SCEV* RHS) { // ~smax(~x, ~y) == smin(x, y). return getNotSCEV(getSMaxExpr(getNotSCEV(LHS), getNotSCEV(RHS))); } -SCEVHandle ScalarEvolution::getUMinExpr(const SCEVHandle &LHS, - const SCEVHandle &RHS) { +const SCEV* ScalarEvolution::getUMinExpr(const SCEV* LHS, + const SCEV* RHS) { // ~umax(~x, ~y) == umin(x, y) return getNotSCEV(getUMaxExpr(getNotSCEV(LHS), getNotSCEV(RHS))); } -SCEVHandle ScalarEvolution::getUnknown(Value *V) { +const SCEV* ScalarEvolution::getUnknown(Value *V) { if (ConstantInt *CI = dyn_cast(V)) return getConstant(CI); if (isa(V)) @@ -1928,7 +1927,7 @@ const Type *ScalarEvolution::getEffectiveSCEVType(const Type *Ty) const { return TD->getIntPtrType(); } -SCEVHandle ScalarEvolution::getCouldNotCompute() { +const SCEV* ScalarEvolution::getCouldNotCompute() { return CouldNotCompute; } @@ -1940,19 +1939,19 @@ bool ScalarEvolution::hasSCEV(Value *V) const { /// getSCEV - Return an existing SCEV if it exists, otherwise analyze the /// expression and create a new one. -SCEVHandle ScalarEvolution::getSCEV(Value *V) { +const SCEV* ScalarEvolution::getSCEV(Value *V) { assert(isSCEVable(V->getType()) && "Value is not SCEVable!"); - std::map::iterator I = Scalars.find(V); + std::map::iterator I = Scalars.find(V); if (I != Scalars.end()) return I->second; - SCEVHandle S = createSCEV(V); + const SCEV* S = createSCEV(V); Scalars.insert(std::make_pair(SCEVCallbackVH(V, this), S)); return S; } /// getIntegerSCEV - Given an integer or FP type, create a constant for the /// specified signed integer value and return a SCEV for the constant. -SCEVHandle ScalarEvolution::getIntegerSCEV(int Val, const Type *Ty) { +const SCEV* ScalarEvolution::getIntegerSCEV(int Val, const Type *Ty) { Ty = getEffectiveSCEVType(Ty); Constant *C; if (Val == 0) @@ -1967,7 +1966,7 @@ SCEVHandle ScalarEvolution::getIntegerSCEV(int Val, const Type *Ty) { /// getNegativeSCEV - Return a SCEV corresponding to -V = -1*V /// -SCEVHandle ScalarEvolution::getNegativeSCEV(const SCEVHandle &V) { +const SCEV* ScalarEvolution::getNegativeSCEV(const SCEV* V) { if (const SCEVConstant *VC = dyn_cast(V)) return getUnknown(ConstantExpr::getNeg(VC->getValue())); @@ -1977,20 +1976,20 @@ SCEVHandle ScalarEvolution::getNegativeSCEV(const SCEVHandle &V) { } /// getNotSCEV - Return a SCEV corresponding to ~V = -1-V -SCEVHandle ScalarEvolution::getNotSCEV(const SCEVHandle &V) { +const SCEV* ScalarEvolution::getNotSCEV(const SCEV* V) { if (const SCEVConstant *VC = dyn_cast(V)) return getUnknown(ConstantExpr::getNot(VC->getValue())); const Type *Ty = V->getType(); Ty = getEffectiveSCEVType(Ty); - SCEVHandle AllOnes = getConstant(ConstantInt::getAllOnesValue(Ty)); + const SCEV* AllOnes = getConstant(ConstantInt::getAllOnesValue(Ty)); return getMinusSCEV(AllOnes, V); } /// getMinusSCEV - Return a SCEV corresponding to LHS - RHS. /// -SCEVHandle ScalarEvolution::getMinusSCEV(const SCEVHandle &LHS, - const SCEVHandle &RHS) { +const SCEV* ScalarEvolution::getMinusSCEV(const SCEV* LHS, + const SCEV* RHS) { // X - Y --> X + -Y return getAddExpr(LHS, getNegativeSCEV(RHS)); } @@ -1998,8 +1997,8 @@ SCEVHandle ScalarEvolution::getMinusSCEV(const SCEVHandle &LHS, /// getTruncateOrZeroExtend - Return a SCEV corresponding to a conversion of the /// input value to the specified type. If the type must be extended, it is zero /// extended. -SCEVHandle -ScalarEvolution::getTruncateOrZeroExtend(const SCEVHandle &V, +const SCEV* +ScalarEvolution::getTruncateOrZeroExtend(const SCEV* V, const Type *Ty) { const Type *SrcTy = V->getType(); assert((SrcTy->isInteger() || (TD && isa(SrcTy))) && @@ -2015,8 +2014,8 @@ ScalarEvolution::getTruncateOrZeroExtend(const SCEVHandle &V, /// getTruncateOrSignExtend - Return a SCEV corresponding to a conversion of the /// input value to the specified type. If the type must be extended, it is sign /// extended. -SCEVHandle -ScalarEvolution::getTruncateOrSignExtend(const SCEVHandle &V, +const SCEV* +ScalarEvolution::getTruncateOrSignExtend(const SCEV* V, const Type *Ty) { const Type *SrcTy = V->getType(); assert((SrcTy->isInteger() || (TD && isa(SrcTy))) && @@ -2032,8 +2031,8 @@ ScalarEvolution::getTruncateOrSignExtend(const SCEVHandle &V, /// getNoopOrZeroExtend - Return a SCEV corresponding to a conversion of the /// input value to the specified type. If the type must be extended, it is zero /// extended. The conversion must not be narrowing. -SCEVHandle -ScalarEvolution::getNoopOrZeroExtend(const SCEVHandle &V, const Type *Ty) { +const SCEV* +ScalarEvolution::getNoopOrZeroExtend(const SCEV* V, const Type *Ty) { const Type *SrcTy = V->getType(); assert((SrcTy->isInteger() || (TD && isa(SrcTy))) && (Ty->isInteger() || (TD && isa(Ty))) && @@ -2048,8 +2047,8 @@ ScalarEvolution::getNoopOrZeroExtend(const SCEVHandle &V, const Type *Ty) { /// getNoopOrSignExtend - Return a SCEV corresponding to a conversion of the /// input value to the specified type. If the type must be extended, it is sign /// extended. The conversion must not be narrowing. -SCEVHandle -ScalarEvolution::getNoopOrSignExtend(const SCEVHandle &V, const Type *Ty) { +const SCEV* +ScalarEvolution::getNoopOrSignExtend(const SCEV* V, const Type *Ty) { const Type *SrcTy = V->getType(); assert((SrcTy->isInteger() || (TD && isa(SrcTy))) && (Ty->isInteger() || (TD && isa(Ty))) && @@ -2065,8 +2064,8 @@ ScalarEvolution::getNoopOrSignExtend(const SCEVHandle &V, const Type *Ty) { /// the input value to the specified type. If the type must be extended, /// it is extended with unspecified bits. The conversion must not be /// narrowing. -SCEVHandle -ScalarEvolution::getNoopOrAnyExtend(const SCEVHandle &V, const Type *Ty) { +const SCEV* +ScalarEvolution::getNoopOrAnyExtend(const SCEV* V, const Type *Ty) { const Type *SrcTy = V->getType(); assert((SrcTy->isInteger() || (TD && isa(SrcTy))) && (Ty->isInteger() || (TD && isa(Ty))) && @@ -2080,8 +2079,8 @@ ScalarEvolution::getNoopOrAnyExtend(const SCEVHandle &V, const Type *Ty) { /// getTruncateOrNoop - Return a SCEV corresponding to a conversion of the /// input value to the specified type. The conversion must not be widening. -SCEVHandle -ScalarEvolution::getTruncateOrNoop(const SCEVHandle &V, const Type *Ty) { +const SCEV* +ScalarEvolution::getTruncateOrNoop(const SCEV* V, const Type *Ty) { const Type *SrcTy = V->getType(); assert((SrcTy->isInteger() || (TD && isa(SrcTy))) && (Ty->isInteger() || (TD && isa(Ty))) && @@ -2096,10 +2095,10 @@ ScalarEvolution::getTruncateOrNoop(const SCEVHandle &V, const Type *Ty) { /// getUMaxFromMismatchedTypes - Promote the operands to the wider of /// the types using zero-extension, and then perform a umax operation /// with them. -SCEVHandle ScalarEvolution::getUMaxFromMismatchedTypes(const SCEVHandle &LHS, - const SCEVHandle &RHS) { - SCEVHandle PromotedLHS = LHS; - SCEVHandle PromotedRHS = RHS; +const SCEV* ScalarEvolution::getUMaxFromMismatchedTypes(const SCEV* LHS, + const SCEV* RHS) { + const SCEV* PromotedLHS = LHS; + const SCEV* PromotedRHS = RHS; if (getTypeSizeInBits(LHS->getType()) > getTypeSizeInBits(RHS->getType())) PromotedRHS = getZeroExtendExpr(RHS, LHS->getType()); @@ -2112,10 +2111,10 @@ SCEVHandle ScalarEvolution::getUMaxFromMismatchedTypes(const SCEVHandle &LHS, /// getUMinFromMismatchedTypes - Promote the operands to the wider of /// the types using zero-extension, and then perform a umin operation /// with them. -SCEVHandle ScalarEvolution::getUMinFromMismatchedTypes(const SCEVHandle &LHS, - const SCEVHandle &RHS) { - SCEVHandle PromotedLHS = LHS; - SCEVHandle PromotedRHS = RHS; +const SCEV* ScalarEvolution::getUMinFromMismatchedTypes(const SCEV* LHS, + const SCEV* RHS) { + const SCEV* PromotedLHS = LHS; + const SCEV* PromotedRHS = RHS; if (getTypeSizeInBits(LHS->getType()) > getTypeSizeInBits(RHS->getType())) PromotedRHS = getZeroExtendExpr(RHS, LHS->getType()); @@ -2129,13 +2128,13 @@ SCEVHandle ScalarEvolution::getUMinFromMismatchedTypes(const SCEVHandle &LHS, /// the specified instruction and replaces any references to the symbolic value /// SymName with the specified value. This is used during PHI resolution. void ScalarEvolution:: -ReplaceSymbolicValueWithConcrete(Instruction *I, const SCEVHandle &SymName, - const SCEVHandle &NewVal) { - std::map::iterator SI = +ReplaceSymbolicValueWithConcrete(Instruction *I, const SCEV* SymName, + const SCEV* NewVal) { + std::map::iterator SI = Scalars.find(SCEVCallbackVH(I, this)); if (SI == Scalars.end()) return; - SCEVHandle NV = + const SCEV* NV = SI->second->replaceSymbolicValuesWithConcrete(SymName, NewVal, *this); if (NV == SI->second) return; // No change. @@ -2151,7 +2150,7 @@ ReplaceSymbolicValueWithConcrete(Instruction *I, const SCEVHandle &SymName, /// createNodeForPHI - PHI nodes have two cases. Either the PHI node exists in /// a loop header, making it a potential recurrence, or it doesn't. /// -SCEVHandle ScalarEvolution::createNodeForPHI(PHINode *PN) { +const SCEV* ScalarEvolution::createNodeForPHI(PHINode *PN) { if (PN->getNumIncomingValues() == 2) // The loops have been canonicalized. if (const Loop *L = LI->getLoopFor(PN->getParent())) if (L->getHeader() == PN->getParent()) { @@ -2161,14 +2160,14 @@ SCEVHandle ScalarEvolution::createNodeForPHI(PHINode *PN) { unsigned BackEdge = IncomingEdge^1; // While we are analyzing this PHI node, handle its value symbolically. - SCEVHandle SymbolicName = getUnknown(PN); + const SCEV* SymbolicName = getUnknown(PN); assert(Scalars.find(PN) == Scalars.end() && "PHI node already processed?"); Scalars.insert(std::make_pair(SCEVCallbackVH(PN, this), SymbolicName)); // Using this symbolic name for the PHI, analyze the value coming around // the back-edge. - SCEVHandle BEValue = getSCEV(PN->getIncomingValue(BackEdge)); + const SCEV* BEValue = getSCEV(PN->getIncomingValue(BackEdge)); // NOTE: If BEValue is loop invariant, we know that the PHI node just // has a special value for the first iteration of the loop. @@ -2188,19 +2187,19 @@ SCEVHandle ScalarEvolution::createNodeForPHI(PHINode *PN) { if (FoundIndex != Add->getNumOperands()) { // Create an add with everything but the specified operand. - SmallVector Ops; + SmallVector Ops; for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i) if (i != FoundIndex) Ops.push_back(Add->getOperand(i)); - SCEVHandle Accum = getAddExpr(Ops); + const SCEV* Accum = getAddExpr(Ops); // This is not a valid addrec if the step amount is varying each // loop iteration, but is not itself an addrec in this loop. if (Accum->isLoopInvariant(L) || (isa(Accum) && cast(Accum)->getLoop() == L)) { - SCEVHandle StartVal = getSCEV(PN->getIncomingValue(IncomingEdge)); - SCEVHandle PHISCEV = getAddRecExpr(StartVal, Accum, L); + const SCEV* StartVal = getSCEV(PN->getIncomingValue(IncomingEdge)); + const SCEV* PHISCEV = getAddRecExpr(StartVal, Accum, L); // Okay, for the entire analysis of this edge we assumed the PHI // to be symbolic. We now need to go back and update all of the @@ -2219,13 +2218,13 @@ SCEVHandle ScalarEvolution::createNodeForPHI(PHINode *PN) { // Because the other in-value of i (0) fits the evolution of BEValue // i really is an addrec evolution. if (AddRec->getLoop() == L && AddRec->isAffine()) { - SCEVHandle StartVal = getSCEV(PN->getIncomingValue(IncomingEdge)); + const SCEV* StartVal = getSCEV(PN->getIncomingValue(IncomingEdge)); // If StartVal = j.start - j.stride, we can use StartVal as the // initial step of the addrec evolution. if (StartVal == getMinusSCEV(AddRec->getOperand(0), AddRec->getOperand(1))) { - SCEVHandle PHISCEV = + const SCEV* PHISCEV = getAddRecExpr(StartVal, AddRec->getOperand(1), L); // Okay, for the entire analysis of this edge we assumed the PHI @@ -2249,14 +2248,14 @@ SCEVHandle ScalarEvolution::createNodeForPHI(PHINode *PN) { /// createNodeForGEP - Expand GEP instructions into add and multiply /// operations. This allows them to be analyzed by regular SCEV code. /// -SCEVHandle ScalarEvolution::createNodeForGEP(User *GEP) { +const SCEV* ScalarEvolution::createNodeForGEP(User *GEP) { const Type *IntPtrTy = TD->getIntPtrType(); Value *Base = GEP->getOperand(0); // Don't attempt to analyze GEPs over unsized objects. if (!cast(Base->getType())->getElementType()->isSized()) return getUnknown(GEP); - SCEVHandle TotalOffset = getIntegerSCEV(0, IntPtrTy); + const SCEV* TotalOffset = getIntegerSCEV(0, IntPtrTy); gep_type_iterator GTI = gep_type_begin(GEP); for (GetElementPtrInst::op_iterator I = next(GEP->op_begin()), E = GEP->op_end(); @@ -2272,7 +2271,7 @@ SCEVHandle ScalarEvolution::createNodeForGEP(User *GEP) { getIntegerSCEV(Offset, IntPtrTy)); } else { // For an array, add the element offset, explicitly scaled. - SCEVHandle LocalOffset = getSCEV(Index); + const SCEV* LocalOffset = getSCEV(Index); if (!isa(LocalOffset->getType())) // Getelementptr indicies are signed. LocalOffset = getTruncateOrSignExtend(LocalOffset, @@ -2292,7 +2291,7 @@ SCEVHandle ScalarEvolution::createNodeForGEP(User *GEP) { /// the minimum number of times S is divisible by 2. For example, given {4,+,8} /// it returns 2. If S is guaranteed to be 0, it returns the bitwidth of S. uint32_t -ScalarEvolution::GetMinTrailingZeros(const SCEVHandle &S) { +ScalarEvolution::GetMinTrailingZeros(const SCEV* S) { if (const SCEVConstant *C = dyn_cast(S)) return C->getValue()->getValue().countTrailingZeros(); @@ -2369,7 +2368,7 @@ ScalarEvolution::GetMinTrailingZeros(const SCEVHandle &S) { } uint32_t -ScalarEvolution::GetMinLeadingZeros(const SCEVHandle &S) { +ScalarEvolution::GetMinLeadingZeros(const SCEV* S) { // TODO: Handle other SCEV expression types here. if (const SCEVConstant *C = dyn_cast(S)) @@ -2395,7 +2394,7 @@ ScalarEvolution::GetMinLeadingZeros(const SCEVHandle &S) { } uint32_t -ScalarEvolution::GetMinSignBits(const SCEVHandle &S) { +ScalarEvolution::GetMinSignBits(const SCEV* S) { // TODO: Handle other SCEV expression types here. if (const SCEVConstant *C = dyn_cast(S)) { @@ -2422,7 +2421,7 @@ ScalarEvolution::GetMinSignBits(const SCEVHandle &S) { /// createSCEV - We know that there is no SCEV for the specified value. /// Analyze the expression. /// -SCEVHandle ScalarEvolution::createSCEV(Value *V) { +const SCEV* ScalarEvolution::createSCEV(Value *V) { if (!isSCEVable(V->getType())) return getUnknown(V); @@ -2486,7 +2485,7 @@ SCEVHandle ScalarEvolution::createSCEV(Value *V) { // In order for this transformation to be safe, the LHS must be of the // form X*(2^n) and the Or constant must be less than 2^n. if (ConstantInt *CI = dyn_cast(U->getOperand(1))) { - SCEVHandle LHS = getSCEV(U->getOperand(0)); + const SCEV* LHS = getSCEV(U->getOperand(0)); const APInt &CIVal = CI->getValue(); if (GetMinTrailingZeros(LHS) >= (CIVal.getBitWidth() - CIVal.countLeadingZeros())) @@ -2516,7 +2515,7 @@ SCEVHandle ScalarEvolution::createSCEV(Value *V) { if (const SCEVZeroExtendExpr *Z = dyn_cast(getSCEV(U->getOperand(0)))) { const Type *UTy = U->getType(); - SCEVHandle Z0 = Z->getOperand(); + const SCEV* Z0 = Z->getOperand(); const Type *Z0Ty = Z0->getType(); unsigned Z0TySize = getTypeSizeInBits(Z0Ty); @@ -2685,14 +2684,14 @@ SCEVHandle ScalarEvolution::createSCEV(Value *V) { /// loop-invariant backedge-taken count (see /// hasLoopInvariantBackedgeTakenCount). /// -SCEVHandle ScalarEvolution::getBackedgeTakenCount(const Loop *L) { +const SCEV* ScalarEvolution::getBackedgeTakenCount(const Loop *L) { return getBackedgeTakenInfo(L).Exact; } /// getMaxBackedgeTakenCount - Similar to getBackedgeTakenCount, except /// return the least SCEV value that is known never to be less than the /// actual backedge taken count. -SCEVHandle ScalarEvolution::getMaxBackedgeTakenCount(const Loop *L) { +const SCEV* ScalarEvolution::getMaxBackedgeTakenCount(const Loop *L) { return getBackedgeTakenInfo(L).Max; } @@ -2759,7 +2758,7 @@ void ScalarEvolution::forgetLoopPHIs(const Loop *L) { SmallVector Worklist; for (BasicBlock::iterator I = Header->begin(); PHINode *PN = dyn_cast(I); ++I) { - std::map::iterator It = Scalars.find((Value*)I); + std::map::iterator It = Scalars.find((Value*)I); if (It != Scalars.end() && !isa(It->second)) Worklist.push_back(PN); } @@ -2781,8 +2780,8 @@ ScalarEvolution::ComputeBackedgeTakenCount(const Loop *L) { L->getExitingBlocks(ExitingBlocks); // Examine all exits and pick the most conservative values. - SCEVHandle BECount = CouldNotCompute; - SCEVHandle MaxBECount = CouldNotCompute; + const SCEV* BECount = CouldNotCompute; + const SCEV* MaxBECount = CouldNotCompute; bool CouldNotComputeBECount = false; bool CouldNotComputeMaxBECount = false; for (unsigned i = 0, e = ExitingBlocks.size(); i != e; ++i) { @@ -2906,8 +2905,8 @@ ScalarEvolution::ComputeBackedgeTakenCountFromExitCond(const Loop *L, ComputeBackedgeTakenCountFromExitCond(L, BO->getOperand(0), TBB, FBB); BackedgeTakenInfo BTI1 = ComputeBackedgeTakenCountFromExitCond(L, BO->getOperand(1), TBB, FBB); - SCEVHandle BECount = CouldNotCompute; - SCEVHandle MaxBECount = CouldNotCompute; + const SCEV* BECount = CouldNotCompute; + const SCEV* MaxBECount = CouldNotCompute; if (L->contains(TBB)) { // Both conditions must be true for the loop to continue executing. // Choose the less conservative count. @@ -2940,8 +2939,8 @@ ScalarEvolution::ComputeBackedgeTakenCountFromExitCond(const Loop *L, ComputeBackedgeTakenCountFromExitCond(L, BO->getOperand(0), TBB, FBB); BackedgeTakenInfo BTI1 = ComputeBackedgeTakenCountFromExitCond(L, BO->getOperand(1), TBB, FBB); - SCEVHandle BECount = CouldNotCompute; - SCEVHandle MaxBECount = CouldNotCompute; + const SCEV* BECount = CouldNotCompute; + const SCEV* MaxBECount = CouldNotCompute; if (L->contains(FBB)) { // Both conditions must be false for the loop to continue executing. // Choose the less conservative count. @@ -2998,7 +2997,7 @@ ScalarEvolution::ComputeBackedgeTakenCountFromExitCondICmp(const Loop *L, // Handle common loops like: for (X = "string"; *X; ++X) if (LoadInst *LI = dyn_cast(ExitCond->getOperand(0))) if (Constant *RHS = dyn_cast(ExitCond->getOperand(1))) { - SCEVHandle ItCnt = + const SCEV* ItCnt = ComputeLoadConstantCompareBackedgeTakenCount(LI, RHS, L, Cond); if (!isa(ItCnt)) { unsigned BitWidth = getTypeSizeInBits(ItCnt->getType()); @@ -3008,8 +3007,8 @@ ScalarEvolution::ComputeBackedgeTakenCountFromExitCondICmp(const Loop *L, } } - SCEVHandle LHS = getSCEV(ExitCond->getOperand(0)); - SCEVHandle RHS = getSCEV(ExitCond->getOperand(1)); + const SCEV* LHS = getSCEV(ExitCond->getOperand(0)); + const SCEV* RHS = getSCEV(ExitCond->getOperand(1)); // Try to evaluate any dependencies out of the loop. LHS = getSCEVAtScope(LHS, L); @@ -3032,20 +3031,20 @@ ScalarEvolution::ComputeBackedgeTakenCountFromExitCondICmp(const Loop *L, ConstantRange CompRange( ICmpInst::makeConstantRange(Cond, RHSC->getValue()->getValue())); - SCEVHandle Ret = AddRec->getNumIterationsInRange(CompRange, *this); + const SCEV* Ret = AddRec->getNumIterationsInRange(CompRange, *this); if (!isa(Ret)) return Ret; } switch (Cond) { case ICmpInst::ICMP_NE: { // while (X != Y) // Convert to: while (X-Y != 0) - SCEVHandle TC = HowFarToZero(getMinusSCEV(LHS, RHS), L); + const SCEV* TC = HowFarToZero(getMinusSCEV(LHS, RHS), L); if (!isa(TC)) return TC; break; } case ICmpInst::ICMP_EQ: { // Convert to: while (X-Y == 0) // while (X == Y) - SCEVHandle TC = HowFarToNonZero(getMinusSCEV(LHS, RHS), L); + const SCEV* TC = HowFarToNonZero(getMinusSCEV(LHS, RHS), L); if (!isa(TC)) return TC; break; } @@ -3089,8 +3088,8 @@ ScalarEvolution::ComputeBackedgeTakenCountFromExitCondICmp(const Loop *L, static ConstantInt * EvaluateConstantChrecAtConstant(const SCEVAddRecExpr *AddRec, ConstantInt *C, ScalarEvolution &SE) { - SCEVHandle InVal = SE.getConstant(C); - SCEVHandle Val = AddRec->evaluateAtIteration(InVal, SE); + const SCEV* InVal = SE.getConstant(C); + const SCEV* Val = AddRec->evaluateAtIteration(InVal, SE); assert(isa(Val) && "Evaluation of SCEV at constant didn't fold correctly?"); return cast(Val)->getValue(); @@ -3133,7 +3132,7 @@ GetAddressedElementFromGlobal(GlobalVariable *GV, /// ComputeLoadConstantCompareBackedgeTakenCount - Given an exit condition of /// 'icmp op load X, cst', try to see if we can compute the backedge /// execution count. -SCEVHandle ScalarEvolution:: +const SCEV* ScalarEvolution:: ComputeLoadConstantCompareBackedgeTakenCount(LoadInst *LI, Constant *RHS, const Loop *L, ICmpInst::Predicate predicate) { @@ -3167,7 +3166,7 @@ ComputeLoadConstantCompareBackedgeTakenCount(LoadInst *LI, Constant *RHS, // Okay, we know we have a (load (gep GV, 0, X)) comparison with a constant. // Check to see if X is a loop variant variable value now. - SCEVHandle Idx = getSCEV(VarIdx); + const SCEV* Idx = getSCEV(VarIdx); Idx = getSCEVAtScope(Idx, L); // We can only recognize very limited forms of loop index expressions, in @@ -3343,7 +3342,7 @@ getConstantEvolutionLoopExitValue(PHINode *PN, const APInt& BEs, const Loop *L){ /// try to evaluate a few iterations of the loop until we get the exit /// condition gets a value of ExitWhen (true or false). If we cannot /// evaluate the trip count of the loop, return CouldNotCompute. -SCEVHandle ScalarEvolution:: +const SCEV* ScalarEvolution:: ComputeBackedgeTakenCountExhaustively(const Loop *L, Value *Cond, bool ExitWhen) { PHINode *PN = getConstantEvolvingPHI(Cond, L); if (PN == 0) return CouldNotCompute; @@ -3400,7 +3399,7 @@ ComputeBackedgeTakenCountExhaustively(const Loop *L, Value *Cond, bool ExitWhen) /// /// In the case that a relevant loop exit value cannot be computed, the /// original value V is returned. -SCEVHandle ScalarEvolution::getSCEVAtScope(const SCEV *V, const Loop *L) { +const SCEV* ScalarEvolution::getSCEVAtScope(const SCEV *V, const Loop *L) { // FIXME: this should be turned into a virtual method on SCEV! if (isa(V)) return V; @@ -3417,7 +3416,7 @@ SCEVHandle ScalarEvolution::getSCEVAtScope(const SCEV *V, const Loop *L) { // to see if the loop that contains it has a known backedge-taken // count. If so, we may be able to force computation of the exit // value. - SCEVHandle BackedgeTakenCount = getBackedgeTakenCount(LI); + const SCEV* BackedgeTakenCount = getBackedgeTakenCount(LI); if (const SCEVConstant *BTCC = dyn_cast(BackedgeTakenCount)) { // Okay, we know how many times the containing loop executes. If @@ -3455,7 +3454,7 @@ SCEVHandle ScalarEvolution::getSCEVAtScope(const SCEV *V, const Loop *L) { if (!isSCEVable(Op->getType())) return V; - SCEVHandle OpV = getSCEVAtScope(getSCEV(Op), L); + const SCEV* OpV = getSCEVAtScope(getSCEV(Op), L); if (const SCEVConstant *SC = dyn_cast(OpV)) { Constant *C = SC->getValue(); if (C->getType() != Op->getType()) @@ -3501,11 +3500,11 @@ SCEVHandle ScalarEvolution::getSCEVAtScope(const SCEV *V, const Loop *L) { // Avoid performing the look-up in the common case where the specified // expression has no loop-variant portions. for (unsigned i = 0, e = Comm->getNumOperands(); i != e; ++i) { - SCEVHandle OpAtScope = getSCEVAtScope(Comm->getOperand(i), L); + const SCEV* OpAtScope = getSCEVAtScope(Comm->getOperand(i), L); if (OpAtScope != Comm->getOperand(i)) { // Okay, at least one of these operands is loop variant but might be // foldable. Build a new instance of the folded commutative expression. - SmallVector NewOps(Comm->op_begin(), Comm->op_begin()+i); + SmallVector NewOps(Comm->op_begin(), Comm->op_begin()+i); NewOps.push_back(OpAtScope); for (++i; i != e; ++i) { @@ -3528,8 +3527,8 @@ SCEVHandle ScalarEvolution::getSCEVAtScope(const SCEV *V, const Loop *L) { } if (const SCEVUDivExpr *Div = dyn_cast(V)) { - SCEVHandle LHS = getSCEVAtScope(Div->getLHS(), L); - SCEVHandle RHS = getSCEVAtScope(Div->getRHS(), L); + const SCEV* LHS = getSCEVAtScope(Div->getLHS(), L); + const SCEV* RHS = getSCEVAtScope(Div->getRHS(), L); if (LHS == Div->getLHS() && RHS == Div->getRHS()) return Div; // must be loop invariant return getUDivExpr(LHS, RHS); @@ -3541,7 +3540,7 @@ SCEVHandle ScalarEvolution::getSCEVAtScope(const SCEV *V, const Loop *L) { if (!L || !AddRec->getLoop()->contains(L->getHeader())) { // To evaluate this recurrence, we need to know how many times the AddRec // loop iterates. Compute this now. - SCEVHandle BackedgeTakenCount = getBackedgeTakenCount(AddRec->getLoop()); + const SCEV* BackedgeTakenCount = getBackedgeTakenCount(AddRec->getLoop()); if (BackedgeTakenCount == CouldNotCompute) return AddRec; // Then, evaluate the AddRec. @@ -3551,21 +3550,21 @@ SCEVHandle ScalarEvolution::getSCEVAtScope(const SCEV *V, const Loop *L) { } if (const SCEVZeroExtendExpr *Cast = dyn_cast(V)) { - SCEVHandle Op = getSCEVAtScope(Cast->getOperand(), L); + const SCEV* Op = getSCEVAtScope(Cast->getOperand(), L); if (Op == Cast->getOperand()) return Cast; // must be loop invariant return getZeroExtendExpr(Op, Cast->getType()); } if (const SCEVSignExtendExpr *Cast = dyn_cast(V)) { - SCEVHandle Op = getSCEVAtScope(Cast->getOperand(), L); + const SCEV* Op = getSCEVAtScope(Cast->getOperand(), L); if (Op == Cast->getOperand()) return Cast; // must be loop invariant return getSignExtendExpr(Op, Cast->getType()); } if (const SCEVTruncateExpr *Cast = dyn_cast(V)) { - SCEVHandle Op = getSCEVAtScope(Cast->getOperand(), L); + const SCEV* Op = getSCEVAtScope(Cast->getOperand(), L); if (Op == Cast->getOperand()) return Cast; // must be loop invariant return getTruncateExpr(Op, Cast->getType()); @@ -3577,7 +3576,7 @@ SCEVHandle ScalarEvolution::getSCEVAtScope(const SCEV *V, const Loop *L) { /// getSCEVAtScope - This is a convenience function which does /// getSCEVAtScope(getSCEV(V), L). -SCEVHandle ScalarEvolution::getSCEVAtScope(Value *V, const Loop *L) { +const SCEV* ScalarEvolution::getSCEVAtScope(Value *V, const Loop *L) { return getSCEVAtScope(getSCEV(V), L); } @@ -3590,7 +3589,7 @@ SCEVHandle ScalarEvolution::getSCEVAtScope(Value *V, const Loop *L) { /// A and B isn't important. /// /// If the equation does not have a solution, SCEVCouldNotCompute is returned. -static SCEVHandle SolveLinEquationWithOverflow(const APInt &A, const APInt &B, +static const SCEV* SolveLinEquationWithOverflow(const APInt &A, const APInt &B, ScalarEvolution &SE) { uint32_t BW = A.getBitWidth(); assert(BW == B.getBitWidth() && "Bit widths must be the same."); @@ -3633,7 +3632,7 @@ static SCEVHandle SolveLinEquationWithOverflow(const APInt &A, const APInt &B, /// given quadratic chrec {L,+,M,+,N}. This returns either the two roots (which /// might be the same) or two SCEVCouldNotCompute objects. /// -static std::pair +static std::pair SolveQuadraticEquation(const SCEVAddRecExpr *AddRec, ScalarEvolution &SE) { assert(AddRec->getNumOperands() == 3 && "This is not a quadratic chrec!"); const SCEVConstant *LC = dyn_cast(AddRec->getOperand(0)); @@ -3692,7 +3691,7 @@ SolveQuadraticEquation(const SCEVAddRecExpr *AddRec, ScalarEvolution &SE) { /// HowFarToZero - Return the number of times a backedge comparing the specified /// value to zero will execute. If not computable, return CouldNotCompute. -SCEVHandle ScalarEvolution::HowFarToZero(const SCEV *V, const Loop *L) { +const SCEV* ScalarEvolution::HowFarToZero(const SCEV *V, const Loop *L) { // If the value is a constant if (const SCEVConstant *C = dyn_cast(V)) { // If the value is already zero, the branch will execute zero times. @@ -3717,8 +3716,8 @@ SCEVHandle ScalarEvolution::HowFarToZero(const SCEV *V, const Loop *L) { // where BW is the common bit width of Start and Step. // Get the initial value for the loop. - SCEVHandle Start = getSCEVAtScope(AddRec->getStart(), L->getParentLoop()); - SCEVHandle Step = getSCEVAtScope(AddRec->getOperand(1), L->getParentLoop()); + const SCEV* Start = getSCEVAtScope(AddRec->getStart(), L->getParentLoop()); + const SCEV* Step = getSCEVAtScope(AddRec->getOperand(1), L->getParentLoop()); if (const SCEVConstant *StepC = dyn_cast(Step)) { // For now we handle only constant steps. @@ -3738,7 +3737,7 @@ SCEVHandle ScalarEvolution::HowFarToZero(const SCEV *V, const Loop *L) { } else if (AddRec->isQuadratic() && AddRec->getType()->isInteger()) { // If this is a quadratic (3-term) AddRec {L,+,M,+,N}, find the roots of // the quadratic equation to solve it. - std::pair Roots = SolveQuadraticEquation(AddRec, + std::pair Roots = SolveQuadraticEquation(AddRec, *this); const SCEVConstant *R1 = dyn_cast(Roots.first); const SCEVConstant *R2 = dyn_cast(Roots.second); @@ -3757,7 +3756,7 @@ SCEVHandle ScalarEvolution::HowFarToZero(const SCEV *V, const Loop *L) { // We can only use this value if the chrec ends up with an exact zero // value at this index. When solving for "X*X != 5", for example, we // should not accept a root of 2. - SCEVHandle Val = AddRec->evaluateAtIteration(R1, *this); + const SCEV* Val = AddRec->evaluateAtIteration(R1, *this); if (Val->isZero()) return R1; // We found a quadratic root! } @@ -3770,7 +3769,7 @@ SCEVHandle ScalarEvolution::HowFarToZero(const SCEV *V, const Loop *L) { /// HowFarToNonZero - Return the number of times a backedge checking the /// specified value for nonzero will execute. If not computable, return /// CouldNotCompute -SCEVHandle ScalarEvolution::HowFarToNonZero(const SCEV *V, const Loop *L) { +const SCEV* ScalarEvolution::HowFarToNonZero(const SCEV *V, const Loop *L) { // Loops that look like: while (X == 0) are very strange indeed. We don't // handle them yet except for the trivial case. This could be expanded in the // future as needed. @@ -3831,7 +3830,7 @@ ScalarEvolution::getPredecessorWithUniqueSuccessorForBB(BasicBlock *BB) { /// more general, since a front-end may have replicated the controlling /// expression. /// -static bool HasSameValue(const SCEVHandle &A, const SCEVHandle &B) { +static bool HasSameValue(const SCEV* A, const SCEV* B) { // Quick check to see if they are the same SCEV. if (A == B) return true; @@ -3946,8 +3945,8 @@ bool ScalarEvolution::isLoopGuardedByCond(const Loop *L, if (!PreCondLHS->getType()->isInteger()) continue; - SCEVHandle PreCondLHSSCEV = getSCEV(PreCondLHS); - SCEVHandle PreCondRHSSCEV = getSCEV(PreCondRHS); + const SCEV* PreCondLHSSCEV = getSCEV(PreCondLHS); + const SCEV* PreCondRHSSCEV = getSCEV(PreCondRHS); if ((HasSameValue(LHS, PreCondLHSSCEV) && HasSameValue(RHS, PreCondRHSSCEV)) || (HasSameValue(LHS, getNotSCEV(PreCondRHSSCEV)) && @@ -3961,22 +3960,22 @@ bool ScalarEvolution::isLoopGuardedByCond(const Loop *L, /// getBECount - Subtract the end and start values and divide by the step, /// rounding up, to get the number of times the backedge is executed. Return /// CouldNotCompute if an intermediate computation overflows. -SCEVHandle ScalarEvolution::getBECount(const SCEVHandle &Start, - const SCEVHandle &End, - const SCEVHandle &Step) { +const SCEV* ScalarEvolution::getBECount(const SCEV* Start, + const SCEV* End, + const SCEV* Step) { const Type *Ty = Start->getType(); - SCEVHandle NegOne = getIntegerSCEV(-1, Ty); - SCEVHandle Diff = getMinusSCEV(End, Start); - SCEVHandle RoundUp = getAddExpr(Step, NegOne); + const SCEV* NegOne = getIntegerSCEV(-1, Ty); + const SCEV* Diff = getMinusSCEV(End, Start); + const SCEV* RoundUp = getAddExpr(Step, NegOne); // Add an adjustment to the difference between End and Start so that // the division will effectively round up. - SCEVHandle Add = getAddExpr(Diff, RoundUp); + const SCEV* Add = getAddExpr(Diff, RoundUp); // Check Add for unsigned overflow. // TODO: More sophisticated things could be done here. const Type *WideTy = IntegerType::get(getTypeSizeInBits(Ty) + 1); - SCEVHandle OperandExtendedAdd = + const SCEV* OperandExtendedAdd = getAddExpr(getZeroExtendExpr(Diff, WideTy), getZeroExtendExpr(RoundUp, WideTy)); if (getZeroExtendExpr(Add, WideTy) != OperandExtendedAdd) @@ -4001,7 +4000,7 @@ HowManyLessThans(const SCEV *LHS, const SCEV *RHS, if (AddRec->isAffine()) { // FORNOW: We only support unit strides. unsigned BitWidth = getTypeSizeInBits(AddRec->getType()); - SCEVHandle Step = AddRec->getStepRecurrence(*this); + const SCEV* Step = AddRec->getStepRecurrence(*this); // TODO: handle non-constant strides. const SCEVConstant *CStep = dyn_cast(Step); @@ -4037,10 +4036,10 @@ HowManyLessThans(const SCEV *LHS, const SCEV *RHS, // treat m-n as signed nor unsigned due to overflow possibility. // First, we get the value of the LHS in the first iteration: n - SCEVHandle Start = AddRec->getOperand(0); + const SCEV* Start = AddRec->getOperand(0); // Determine the minimum constant start value. - SCEVHandle MinStart = isa(Start) ? Start : + const SCEV* MinStart = isa(Start) ? Start : getConstant(isSigned ? APInt::getSignedMinValue(BitWidth) : APInt::getMinValue(BitWidth)); @@ -4048,7 +4047,7 @@ HowManyLessThans(const SCEV *LHS, const SCEV *RHS, // then we know that it will run exactly (m-n)/s times. Otherwise, we // only know that it will execute (max(m,n)-n)/s times. In both cases, // the division must round up. - SCEVHandle End = RHS; + const SCEV* End = RHS; if (!isLoopGuardedByCond(L, isSigned ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT, getMinusSCEV(Start, Step), RHS)) @@ -4056,7 +4055,7 @@ HowManyLessThans(const SCEV *LHS, const SCEV *RHS, : getUMaxExpr(RHS, Start); // Determine the maximum constant end value. - SCEVHandle MaxEnd = + const SCEV* MaxEnd = isa(End) ? End : getConstant(isSigned ? APInt::getSignedMaxValue(BitWidth) .ashr(GetMinSignBits(End) - 1) : @@ -4065,11 +4064,11 @@ HowManyLessThans(const SCEV *LHS, const SCEV *RHS, // Finally, we subtract these two values and divide, rounding up, to get // the number of times the backedge is executed. - SCEVHandle BECount = getBECount(Start, End, Step); + const SCEV* BECount = getBECount(Start, End, Step); // The maximum backedge count is similar, except using the minimum start // value and the maximum end value. - SCEVHandle MaxBECount = getBECount(MinStart, MaxEnd, Step);; + const SCEV* MaxBECount = getBECount(MinStart, MaxEnd, Step);; return BackedgeTakenInfo(BECount, MaxBECount); } @@ -4082,7 +4081,7 @@ HowManyLessThans(const SCEV *LHS, const SCEV *RHS, /// this is that it returns the first iteration number where the value is not in /// the condition, thus computing the exit count. If the iteration count can't /// be computed, an instance of SCEVCouldNotCompute is returned. -SCEVHandle SCEVAddRecExpr::getNumIterationsInRange(ConstantRange Range, +const SCEV* SCEVAddRecExpr::getNumIterationsInRange(ConstantRange Range, ScalarEvolution &SE) const { if (Range.isFullSet()) // Infinite loop. return SE.getCouldNotCompute(); @@ -4090,9 +4089,9 @@ SCEVHandle SCEVAddRecExpr::getNumIterationsInRange(ConstantRange Range, // If the start is a non-zero constant, shift the range to simplify things. if (const SCEVConstant *SC = dyn_cast(getStart())) if (!SC->getValue()->isZero()) { - SmallVector Operands(op_begin(), op_end()); + SmallVector Operands(op_begin(), op_end()); Operands[0] = SE.getIntegerSCEV(0, SC->getType()); - SCEVHandle Shifted = SE.getAddRecExpr(Operands, getLoop()); + const SCEV* Shifted = SE.getAddRecExpr(Operands, getLoop()); if (const SCEVAddRecExpr *ShiftedAddRec = dyn_cast(Shifted)) return ShiftedAddRec->getNumIterationsInRange( @@ -4151,12 +4150,12 @@ SCEVHandle SCEVAddRecExpr::getNumIterationsInRange(ConstantRange Range, // quadratic equation to solve it. To do this, we must frame our problem in // terms of figuring out when zero is crossed, instead of when // Range.getUpper() is crossed. - SmallVector NewOps(op_begin(), op_end()); + SmallVector NewOps(op_begin(), op_end()); NewOps[0] = SE.getNegativeSCEV(SE.getConstant(Range.getUpper())); - SCEVHandle NewAddRec = SE.getAddRecExpr(NewOps, getLoop()); + const SCEV* NewAddRec = SE.getAddRecExpr(NewOps, getLoop()); // Next, solve the constructed addrec - std::pair Roots = + std::pair Roots = SolveQuadraticEquation(cast(NewAddRec), SE); const SCEVConstant *R1 = dyn_cast(Roots.first); const SCEVConstant *R2 = dyn_cast(Roots.second); @@ -4363,12 +4362,12 @@ void ScalarEvolution::print(raw_ostream &OS, const Module* ) const { if (isSCEVable(I->getType())) { OS << *I; OS << " --> "; - SCEVHandle SV = SE.getSCEV(&*I); + const SCEV* SV = SE.getSCEV(&*I); SV->print(OS); const Loop *L = LI->getLoopFor((*I).getParent()); - SCEVHandle AtUse = SE.getSCEVAtScope(SV, L); + const SCEV* AtUse = SE.getSCEVAtScope(SV, L); if (AtUse != SV) { OS << " --> "; AtUse->print(OS); @@ -4376,7 +4375,7 @@ void ScalarEvolution::print(raw_ostream &OS, const Module* ) const { if (L) { OS << "\t\t" "Exits: "; - SCEVHandle ExitValue = SE.getSCEVAtScope(SV, L->getParentLoop()); + const SCEV* ExitValue = SE.getSCEVAtScope(SV, L->getParentLoop()); if (!ExitValue->isLoopInvariant(L)) { OS << "<>"; } else { diff --git a/lib/Analysis/ScalarEvolutionExpander.cpp b/lib/Analysis/ScalarEvolutionExpander.cpp index 2a73c27405a..c7e296eef50 100644 --- a/lib/Analysis/ScalarEvolutionExpander.cpp +++ b/lib/Analysis/ScalarEvolutionExpander.cpp @@ -152,8 +152,8 @@ Value *SCEVExpander::InsertBinop(Instruction::BinaryOps Opcode, Value *LHS, /// TODO: When ScalarEvolution gets a SCEVSDivExpr, this can be made /// unnecessary; in its place, just signed-divide Ops[i] by the scale and /// check to see if the divide was folded. -static bool FactorOutConstant(SCEVHandle &S, - SCEVHandle &Remainder, +static bool FactorOutConstant(const SCEV* &S, + const SCEV* &Remainder, const APInt &Factor, ScalarEvolution &SE) { // Everything is divisible by one. @@ -168,7 +168,7 @@ static bool FactorOutConstant(SCEVHandle &S, // the value at this scale. It will be considered for subsequent // smaller scales. if (C->isZero() || !CI->isZero()) { - SCEVHandle Div = SE.getConstant(CI); + const SCEV* Div = SE.getConstant(CI); S = Div; Remainder = SE.getAddExpr(Remainder, @@ -182,8 +182,8 @@ static bool FactorOutConstant(SCEVHandle &S, if (const SCEVMulExpr *M = dyn_cast(S)) if (const SCEVConstant *C = dyn_cast(M->getOperand(0))) if (!C->getValue()->getValue().srem(Factor)) { - const SmallVectorImpl &MOperands = M->getOperands(); - SmallVector NewMulOps(MOperands.begin(), MOperands.end()); + const SmallVectorImpl &MOperands = M->getOperands(); + SmallVector NewMulOps(MOperands.begin(), MOperands.end()); NewMulOps[0] = SE.getConstant(C->getValue()->getValue().sdiv(Factor)); S = SE.getMulExpr(NewMulOps); @@ -192,13 +192,13 @@ static bool FactorOutConstant(SCEVHandle &S, // In an AddRec, check if both start and step are divisible. if (const SCEVAddRecExpr *A = dyn_cast(S)) { - SCEVHandle Step = A->getStepRecurrence(SE); - SCEVHandle StepRem = SE.getIntegerSCEV(0, Step->getType()); + const SCEV* Step = A->getStepRecurrence(SE); + const SCEV* StepRem = SE.getIntegerSCEV(0, Step->getType()); if (!FactorOutConstant(Step, StepRem, Factor, SE)) return false; if (!StepRem->isZero()) return false; - SCEVHandle Start = A->getStart(); + const SCEV* Start = A->getStart(); if (!FactorOutConstant(Start, Remainder, Factor, SE)) return false; S = SE.getAddRecExpr(Start, Step, A->getLoop()); @@ -233,14 +233,14 @@ static bool FactorOutConstant(SCEVHandle &S, /// loop-invariant portions of expressions, after considering what /// can be folded using target addressing modes. /// -Value *SCEVExpander::expandAddToGEP(const SCEVHandle *op_begin, - const SCEVHandle *op_end, +Value *SCEVExpander::expandAddToGEP(const SCEV* const *op_begin, + const SCEV* const *op_end, const PointerType *PTy, const Type *Ty, Value *V) { const Type *ElTy = PTy->getElementType(); SmallVector GepIndices; - SmallVector Ops(op_begin, op_end); + SmallVector Ops(op_begin, op_end); bool AnyNonZeroIndices = false; // Decend down the pointer's type and attempt to convert the other @@ -251,14 +251,14 @@ Value *SCEVExpander::expandAddToGEP(const SCEVHandle *op_begin, for (;;) { APInt ElSize = APInt(SE.getTypeSizeInBits(Ty), ElTy->isSized() ? SE.TD->getTypeAllocSize(ElTy) : 0); - SmallVector NewOps; - SmallVector ScaledOps; + SmallVector NewOps; + SmallVector ScaledOps; for (unsigned i = 0, e = Ops.size(); i != e; ++i) { // Split AddRecs up into parts as either of the parts may be usable // without the other. if (const SCEVAddRecExpr *A = dyn_cast(Ops[i])) if (!A->getStart()->isZero()) { - SCEVHandle Start = A->getStart(); + const SCEV* Start = A->getStart(); Ops.push_back(SE.getAddRecExpr(SE.getIntegerSCEV(0, A->getType()), A->getStepRecurrence(SE), A->getLoop())); @@ -267,8 +267,8 @@ Value *SCEVExpander::expandAddToGEP(const SCEVHandle *op_begin, } // If the scale size is not 0, attempt to factor out a scale. if (ElSize != 0) { - SCEVHandle Op = Ops[i]; - SCEVHandle Remainder = SE.getIntegerSCEV(0, Op->getType()); + const SCEV* Op = Ops[i]; + const SCEV* Remainder = SE.getIntegerSCEV(0, Op->getType()); if (FactorOutConstant(Op, Remainder, ElSize, SE)) { ScaledOps.push_back(Op); // Op now has ElSize factored out. NewOps.push_back(Remainder); @@ -364,7 +364,7 @@ Value *SCEVExpander::visitAddExpr(const SCEVAddExpr *S) { // comments on expandAddToGEP for details. if (SE.TD) if (const PointerType *PTy = dyn_cast(V->getType())) { - const SmallVectorImpl &Ops = S->getOperands(); + const SmallVectorImpl &Ops = S->getOperands(); return expandAddToGEP(&Ops[0], &Ops[Ops.size() - 1], PTy, Ty, V); } @@ -420,7 +420,7 @@ Value *SCEVExpander::visitUDivExpr(const SCEVUDivExpr *S) { /// Move parts of Base into Rest to leave Base with the minimal /// expression that provides a pointer operand suitable for a /// GEP expansion. -static void ExposePointerBase(SCEVHandle &Base, SCEVHandle &Rest, +static void ExposePointerBase(const SCEV* &Base, const SCEV* &Rest, ScalarEvolution &SE) { while (const SCEVAddRecExpr *A = dyn_cast(Base)) { Base = A->getStart(); @@ -431,7 +431,7 @@ static void ExposePointerBase(SCEVHandle &Base, SCEVHandle &Rest, } if (const SCEVAddExpr *A = dyn_cast(Base)) { Base = A->getOperand(A->getNumOperands()-1); - SmallVector NewAddOps(A->op_begin(), A->op_end()); + SmallVector NewAddOps(A->op_begin(), A->op_end()); NewAddOps.back() = Rest; Rest = SE.getAddExpr(NewAddOps); ExposePointerBase(Base, Rest, SE); @@ -455,9 +455,9 @@ Value *SCEVExpander::visitAddRecExpr(const SCEVAddRecExpr *S) { if (CanonicalIV && SE.getTypeSizeInBits(CanonicalIV->getType()) > SE.getTypeSizeInBits(Ty)) { - SCEVHandle Start = SE.getAnyExtendExpr(S->getStart(), + const SCEV* Start = SE.getAnyExtendExpr(S->getStart(), CanonicalIV->getType()); - SCEVHandle Step = SE.getAnyExtendExpr(S->getStepRecurrence(SE), + const SCEV* Step = SE.getAnyExtendExpr(S->getStepRecurrence(SE), CanonicalIV->getType()); Value *V = expand(SE.getAddRecExpr(Start, Step, S->getLoop())); BasicBlock::iterator SaveInsertPt = getInsertionPoint(); @@ -472,16 +472,16 @@ Value *SCEVExpander::visitAddRecExpr(const SCEVAddRecExpr *S) { // {X,+,F} --> X + {0,+,F} if (!S->getStart()->isZero()) { - const SmallVectorImpl &SOperands = S->getOperands(); - SmallVector NewOps(SOperands.begin(), SOperands.end()); + const SmallVectorImpl &SOperands = S->getOperands(); + SmallVector NewOps(SOperands.begin(), SOperands.end()); NewOps[0] = SE.getIntegerSCEV(0, Ty); - SCEVHandle Rest = SE.getAddRecExpr(NewOps, L); + const SCEV* Rest = SE.getAddRecExpr(NewOps, L); // Turn things like ptrtoint+arithmetic+inttoptr into GEP. See the // comments on expandAddToGEP for details. if (SE.TD) { - SCEVHandle Base = S->getStart(); - SCEVHandle RestArray[1] = { Rest }; + const SCEV* Base = S->getStart(); + const SCEV* RestArray[1] = { Rest }; // Dig into the expression to find the pointer base for a GEP. ExposePointerBase(Base, RestArray[0], SE); // If we found a pointer, expand the AddRec with a GEP. @@ -581,19 +581,19 @@ Value *SCEVExpander::visitAddRecExpr(const SCEVAddRecExpr *S) { // folders, then expandCodeFor the closed form. This allows the folders to // simplify the expression without having to build a bunch of special code // into this folder. - SCEVHandle IH = SE.getUnknown(I); // Get I as a "symbolic" SCEV. + const SCEV* IH = SE.getUnknown(I); // Get I as a "symbolic" SCEV. // Promote S up to the canonical IV type, if the cast is foldable. - SCEVHandle NewS = S; - SCEVHandle Ext = SE.getNoopOrAnyExtend(S, I->getType()); + const SCEV* NewS = S; + const SCEV* Ext = SE.getNoopOrAnyExtend(S, I->getType()); if (isa(Ext)) NewS = Ext; - SCEVHandle V = cast(NewS)->evaluateAtIteration(IH, SE); + const SCEV* V = cast(NewS)->evaluateAtIteration(IH, SE); //cerr << "Evaluated: " << *this << "\n to: " << *V << "\n"; // Truncate the result down to the original type, if needed. - SCEVHandle T = SE.getTruncateOrNoop(V, Ty); + const SCEV* T = SE.getTruncateOrNoop(V, Ty); return expand(V); } @@ -654,7 +654,7 @@ Value *SCEVExpander::visitUMaxExpr(const SCEVUMaxExpr *S) { return LHS; } -Value *SCEVExpander::expandCodeFor(SCEVHandle SH, const Type *Ty) { +Value *SCEVExpander::expandCodeFor(const SCEV* SH, const Type *Ty) { // Expand the code for this SCEV. Value *V = expand(SH); if (Ty) { @@ -667,7 +667,7 @@ Value *SCEVExpander::expandCodeFor(SCEVHandle SH, const Type *Ty) { Value *SCEVExpander::expand(const SCEV *S) { // Check to see if we already expanded this. - std::map >::iterator I = + std::map >::iterator I = InsertedExpressions.find(S); if (I != InsertedExpressions.end()) return I->second; @@ -685,7 +685,7 @@ Value * SCEVExpander::getOrInsertCanonicalInductionVariable(const Loop *L, const Type *Ty) { assert(Ty->isInteger() && "Can only insert integer induction variables!"); - SCEVHandle H = SE.getAddRecExpr(SE.getIntegerSCEV(0, Ty), + const SCEV* H = SE.getAddRecExpr(SE.getIntegerSCEV(0, Ty), SE.getIntegerSCEV(1, Ty), L); return expand(H); } diff --git a/lib/Transforms/Scalar/IndVarSimplify.cpp b/lib/Transforms/Scalar/IndVarSimplify.cpp index 326fb38909b..076111c5686 100644 --- a/lib/Transforms/Scalar/IndVarSimplify.cpp +++ b/lib/Transforms/Scalar/IndVarSimplify.cpp @@ -96,7 +96,7 @@ namespace { void RewriteNonIntegerIVs(Loop *L); - ICmpInst *LinearFunctionTestReplace(Loop *L, SCEVHandle BackedgeTakenCount, + ICmpInst *LinearFunctionTestReplace(Loop *L, const SCEV* BackedgeTakenCount, Value *IndVar, BasicBlock *ExitingBlock, BranchInst *BI, @@ -128,7 +128,7 @@ Pass *llvm::createIndVarSimplifyPass() { /// SCEV analysis can determine a loop-invariant trip count of the loop, which /// is actually a much broader range than just linear tests. ICmpInst *IndVarSimplify::LinearFunctionTestReplace(Loop *L, - SCEVHandle BackedgeTakenCount, + const SCEV* BackedgeTakenCount, Value *IndVar, BasicBlock *ExitingBlock, BranchInst *BI, @@ -137,13 +137,13 @@ ICmpInst *IndVarSimplify::LinearFunctionTestReplace(Loop *L, // against the preincremented value, otherwise we prefer to compare against // the post-incremented value. Value *CmpIndVar; - SCEVHandle RHS = BackedgeTakenCount; + const SCEV* RHS = BackedgeTakenCount; if (ExitingBlock == L->getLoopLatch()) { // Add one to the "backedge-taken" count to get the trip count. // If this addition may overflow, we have to be more pessimistic and // cast the induction variable before doing the add. - SCEVHandle Zero = SE->getIntegerSCEV(0, BackedgeTakenCount->getType()); - SCEVHandle N = + const SCEV* Zero = SE->getIntegerSCEV(0, BackedgeTakenCount->getType()); + const SCEV* N = SE->getAddExpr(BackedgeTakenCount, SE->getIntegerSCEV(1, BackedgeTakenCount->getType())); if ((isa(N) && !N->isZero()) || @@ -278,7 +278,7 @@ void IndVarSimplify::RewriteLoopExitValues(Loop *L, // Okay, this instruction has a user outside of the current loop // and varies predictably *inside* the loop. Evaluate the value it // contains when the loop exits, if possible. - SCEVHandle ExitValue = SE->getSCEVAtScope(Inst, L->getParentLoop()); + const SCEV* ExitValue = SE->getSCEVAtScope(Inst, L->getParentLoop()); if (!ExitValue->isLoopInvariant(L)) continue; @@ -348,7 +348,7 @@ bool IndVarSimplify::runOnLoop(Loop *L, LPPassManager &LPM) { BasicBlock *Header = L->getHeader(); BasicBlock *ExitingBlock = L->getExitingBlock(); // may be null - SCEVHandle BackedgeTakenCount = SE->getBackedgeTakenCount(L); + const SCEV* BackedgeTakenCount = SE->getBackedgeTakenCount(L); // Check to see if this loop has a computable loop-invariant execution count. // If so, this means that we can compute the final value of any expressions @@ -373,14 +373,14 @@ bool IndVarSimplify::runOnLoop(Loop *L, LPPassManager &LPM) { NeedCannIV = true; } for (unsigned i = 0, e = IU->StrideOrder.size(); i != e; ++i) { - SCEVHandle Stride = IU->StrideOrder[i]; + const SCEV* Stride = IU->StrideOrder[i]; const Type *Ty = SE->getEffectiveSCEVType(Stride->getType()); if (!LargestType || SE->getTypeSizeInBits(Ty) > SE->getTypeSizeInBits(LargestType)) LargestType = Ty; - std::map::iterator SI = + std::map::iterator SI = IU->IVUsesByStride.find(IU->StrideOrder[i]); assert(SI != IU->IVUsesByStride.end() && "Stride doesn't exist!"); @@ -473,21 +473,21 @@ void IndVarSimplify::RewriteIVExpressions(Loop *L, const Type *LargestType, // the need for the code evaluation methods to insert induction variables // of different sizes. for (unsigned i = 0, e = IU->StrideOrder.size(); i != e; ++i) { - SCEVHandle Stride = IU->StrideOrder[i]; + const SCEV* Stride = IU->StrideOrder[i]; - std::map::iterator SI = + std::map::iterator SI = IU->IVUsesByStride.find(IU->StrideOrder[i]); assert(SI != IU->IVUsesByStride.end() && "Stride doesn't exist!"); ilist &List = SI->second->Users; for (ilist::iterator UI = List.begin(), E = List.end(); UI != E; ++UI) { - SCEVHandle Offset = UI->getOffset(); + const SCEV* Offset = UI->getOffset(); Value *Op = UI->getOperandValToReplace(); const Type *UseTy = Op->getType(); Instruction *User = UI->getUser(); // Compute the final addrec to expand into code. - SCEVHandle AR = IU->getReplacementExpr(*UI); + const SCEV* AR = IU->getReplacementExpr(*UI); Value *NewVal = 0; if (AR->isLoopInvariant(L)) { diff --git a/lib/Transforms/Scalar/LoopDeletion.cpp b/lib/Transforms/Scalar/LoopDeletion.cpp index 65126728c7f..302cdec2ba4 100644 --- a/lib/Transforms/Scalar/LoopDeletion.cpp +++ b/lib/Transforms/Scalar/LoopDeletion.cpp @@ -187,7 +187,7 @@ bool LoopDeletion::runOnLoop(Loop* L, LPPassManager& LPM) { // Don't remove loops for which we can't solve the trip count. // They could be infinite, in which case we'd be changing program behavior. ScalarEvolution& SE = getAnalysis(); - SCEVHandle S = SE.getBackedgeTakenCount(L); + const SCEV* S = SE.getBackedgeTakenCount(L); if (isa(S)) return false; diff --git a/lib/Transforms/Scalar/LoopStrengthReduce.cpp b/lib/Transforms/Scalar/LoopStrengthReduce.cpp index 7579748bbc0..ba600584865 100644 --- a/lib/Transforms/Scalar/LoopStrengthReduce.cpp +++ b/lib/Transforms/Scalar/LoopStrengthReduce.cpp @@ -64,11 +64,11 @@ namespace { /// StrengthReduceStridedIVUsers. It contains the stride, the common base, as /// well as the PHI node and increment value created for rewrite. struct VISIBILITY_HIDDEN IVExpr { - SCEVHandle Stride; - SCEVHandle Base; + const SCEV* Stride; + const SCEV* Base; PHINode *PHI; - IVExpr(const SCEVHandle &stride, const SCEVHandle &base, PHINode *phi) + IVExpr(const SCEV* const stride, const SCEV* const base, PHINode *phi) : Stride(stride), Base(base), PHI(phi) {} }; @@ -77,7 +77,7 @@ namespace { struct VISIBILITY_HIDDEN IVsOfOneStride { std::vector IVs; - void addIV(const SCEVHandle &Stride, const SCEVHandle &Base, PHINode *PHI) { + void addIV(const SCEV* const Stride, const SCEV* const Base, PHINode *PHI) { IVs.push_back(IVExpr(Stride, Base, PHI)); } }; @@ -91,11 +91,11 @@ namespace { /// IVsByStride - Keep track of all IVs that have been inserted for a /// particular stride. - std::map IVsByStride; + std::map IVsByStride; /// StrideNoReuse - Keep track of all the strides whose ivs cannot be /// reused (nor should they be rewritten to reuse other strides). - SmallSet StrideNoReuse; + SmallSet StrideNoReuse; /// DeadInsts - Keep track of instructions we may have made dead, so that /// we can remove them after we are done working. @@ -133,7 +133,7 @@ namespace { private: ICmpInst *ChangeCompareStride(Loop *L, ICmpInst *Cond, IVStrideUse* &CondUse, - const SCEVHandle* &CondStride); + const SCEV* const * &CondStride); void OptimizeIndvars(Loop *L); void OptimizeLoopCountIV(Loop *L); @@ -149,16 +149,16 @@ namespace { IVStrideUse* &CondUse); bool FindIVUserForCond(ICmpInst *Cond, IVStrideUse *&CondUse, - const SCEVHandle *&CondStride); + const SCEV* const * &CondStride); bool RequiresTypeConversion(const Type *Ty, const Type *NewTy); - SCEVHandle CheckForIVReuse(bool, bool, bool, const SCEVHandle&, + const SCEV* CheckForIVReuse(bool, bool, bool, const SCEV* const&, IVExpr&, const Type*, const std::vector& UsersToProcess); bool ValidScale(bool, int64_t, const std::vector& UsersToProcess); bool ValidOffset(bool, int64_t, int64_t, const std::vector& UsersToProcess); - SCEVHandle CollectIVUsers(const SCEVHandle &Stride, + const SCEV* CollectIVUsers(const SCEV* const &Stride, IVUsersOfOneStride &Uses, Loop *L, bool &AllUsesAreAddresses, @@ -168,11 +168,11 @@ namespace { const std::vector &UsersToProcess, const Loop *L, bool AllUsesAreAddresses, - SCEVHandle Stride); + const SCEV* Stride); void PrepareToStrengthReduceFully( std::vector &UsersToProcess, - SCEVHandle Stride, - SCEVHandle CommonExprs, + const SCEV* Stride, + const SCEV* CommonExprs, const Loop *L, SCEVExpander &PreheaderRewriter); void PrepareToStrengthReduceFromSmallerStride( @@ -182,13 +182,13 @@ namespace { Instruction *PreInsertPt); void PrepareToStrengthReduceWithNewPhi( std::vector &UsersToProcess, - SCEVHandle Stride, - SCEVHandle CommonExprs, + const SCEV* Stride, + const SCEV* CommonExprs, Value *CommonBaseV, Instruction *IVIncInsertPt, const Loop *L, SCEVExpander &PreheaderRewriter); - void StrengthReduceStridedIVUsers(const SCEVHandle &Stride, + void StrengthReduceStridedIVUsers(const SCEV* const &Stride, IVUsersOfOneStride &Uses, Loop *L); void DeleteTriviallyDeadInstructions(); @@ -232,7 +232,7 @@ void LoopStrengthReduce::DeleteTriviallyDeadInstructions() { /// containsAddRecFromDifferentLoop - Determine whether expression S involves a /// subexpression that is an AddRec from a loop other than L. An outer loop /// of L is OK, but not an inner loop nor a disjoint loop. -static bool containsAddRecFromDifferentLoop(SCEVHandle S, Loop *L) { +static bool containsAddRecFromDifferentLoop(const SCEV* S, Loop *L) { // This is very common, put it first. if (isa(S)) return false; @@ -327,7 +327,7 @@ namespace { /// this use. As the use is processed, information gets moved from this /// field to the Imm field (below). BasedUser values are sorted by this /// field. - SCEVHandle Base; + const SCEV* Base; /// Inst - The instruction using the induction variable. Instruction *Inst; @@ -340,7 +340,7 @@ namespace { /// before Inst, because it will be folded into the imm field of the /// instruction. This is also sometimes used for loop-variant values that /// must be added inside the loop. - SCEVHandle Imm; + const SCEV* Imm; /// Phi - The induction variable that performs the striding that /// should be used for this user. @@ -362,13 +362,13 @@ namespace { // Once we rewrite the code to insert the new IVs we want, update the // operands of Inst to use the new expression 'NewBase', with 'Imm' added // to it. - void RewriteInstructionToUseNewBase(const SCEVHandle &NewBase, + void RewriteInstructionToUseNewBase(const SCEV* const &NewBase, Instruction *InsertPt, SCEVExpander &Rewriter, Loop *L, Pass *P, LoopInfo &LI, SmallVectorImpl &DeadInsts); - Value *InsertCodeForBaseAtPosition(const SCEVHandle &NewBase, + Value *InsertCodeForBaseAtPosition(const SCEV* const &NewBase, const Type *Ty, SCEVExpander &Rewriter, Instruction *IP, Loop *L, @@ -383,7 +383,7 @@ void BasedUser::dump() const { cerr << " Inst: " << *Inst; } -Value *BasedUser::InsertCodeForBaseAtPosition(const SCEVHandle &NewBase, +Value *BasedUser::InsertCodeForBaseAtPosition(const SCEV* const &NewBase, const Type *Ty, SCEVExpander &Rewriter, Instruction *IP, Loop *L, @@ -407,7 +407,7 @@ Value *BasedUser::InsertCodeForBaseAtPosition(const SCEVHandle &NewBase, Value *Base = Rewriter.expandCodeFor(NewBase, 0, BaseInsertPt); - SCEVHandle NewValSCEV = SE->getUnknown(Base); + const SCEV* NewValSCEV = SE->getUnknown(Base); // If there is no immediate value, skip the next part. if (!Imm->isZero()) { @@ -430,7 +430,7 @@ Value *BasedUser::InsertCodeForBaseAtPosition(const SCEVHandle &NewBase, // value of NewBase in the case that it's a diffferent instruction from // the PHI that NewBase is computed from, or null otherwise. // -void BasedUser::RewriteInstructionToUseNewBase(const SCEVHandle &NewBase, +void BasedUser::RewriteInstructionToUseNewBase(const SCEV* const &NewBase, Instruction *NewBasePt, SCEVExpander &Rewriter, Loop *L, Pass *P, LoopInfo &LI, @@ -542,7 +542,7 @@ void BasedUser::RewriteInstructionToUseNewBase(const SCEVHandle &NewBase, /// fitsInAddressMode - Return true if V can be subsumed within an addressing /// mode, and does not need to be put in a register first. -static bool fitsInAddressMode(const SCEVHandle &V, const Type *AccessTy, +static bool fitsInAddressMode(const SCEV* const &V, const Type *AccessTy, const TargetLowering *TLI, bool HasBaseReg) { if (const SCEVConstant *SC = dyn_cast(V)) { int64_t VC = SC->getValue()->getSExtValue(); @@ -574,12 +574,12 @@ static bool fitsInAddressMode(const SCEVHandle &V, const Type *AccessTy, /// MoveLoopVariantsToImmediateField - Move any subexpressions from Val that are /// loop varying to the Imm operand. -static void MoveLoopVariantsToImmediateField(SCEVHandle &Val, SCEVHandle &Imm, +static void MoveLoopVariantsToImmediateField(const SCEV* &Val, const SCEV* &Imm, Loop *L, ScalarEvolution *SE) { if (Val->isLoopInvariant(L)) return; // Nothing to do. if (const SCEVAddExpr *SAE = dyn_cast(Val)) { - SmallVector NewOps; + SmallVector NewOps; NewOps.reserve(SAE->getNumOperands()); for (unsigned i = 0; i != SAE->getNumOperands(); ++i) @@ -597,10 +597,10 @@ static void MoveLoopVariantsToImmediateField(SCEVHandle &Val, SCEVHandle &Imm, Val = SE->getAddExpr(NewOps); } else if (const SCEVAddRecExpr *SARE = dyn_cast(Val)) { // Try to pull immediates out of the start value of nested addrec's. - SCEVHandle Start = SARE->getStart(); + const SCEV* Start = SARE->getStart(); MoveLoopVariantsToImmediateField(Start, Imm, L, SE); - SmallVector Ops(SARE->op_begin(), SARE->op_end()); + SmallVector Ops(SARE->op_begin(), SARE->op_end()); Ops[0] = Start; Val = SE->getAddRecExpr(Ops, SARE->getLoop()); } else { @@ -616,15 +616,15 @@ static void MoveLoopVariantsToImmediateField(SCEVHandle &Val, SCEVHandle &Imm, /// Accumulate these immediate values into the Imm value. static void MoveImmediateValues(const TargetLowering *TLI, const Type *AccessTy, - SCEVHandle &Val, SCEVHandle &Imm, + const SCEV* &Val, const SCEV* &Imm, bool isAddress, Loop *L, ScalarEvolution *SE) { if (const SCEVAddExpr *SAE = dyn_cast(Val)) { - SmallVector NewOps; + SmallVector NewOps; NewOps.reserve(SAE->getNumOperands()); for (unsigned i = 0; i != SAE->getNumOperands(); ++i) { - SCEVHandle NewOp = SAE->getOperand(i); + const SCEV* NewOp = SAE->getOperand(i); MoveImmediateValues(TLI, AccessTy, NewOp, Imm, isAddress, L, SE); if (!NewOp->isLoopInvariant(L)) { @@ -643,11 +643,11 @@ static void MoveImmediateValues(const TargetLowering *TLI, return; } else if (const SCEVAddRecExpr *SARE = dyn_cast(Val)) { // Try to pull immediates out of the start value of nested addrec's. - SCEVHandle Start = SARE->getStart(); + const SCEV* Start = SARE->getStart(); MoveImmediateValues(TLI, AccessTy, Start, Imm, isAddress, L, SE); if (Start != SARE->getStart()) { - SmallVector Ops(SARE->op_begin(), SARE->op_end()); + SmallVector Ops(SARE->op_begin(), SARE->op_end()); Ops[0] = Start; Val = SE->getAddRecExpr(Ops, SARE->getLoop()); } @@ -658,8 +658,8 @@ static void MoveImmediateValues(const TargetLowering *TLI, fitsInAddressMode(SME->getOperand(0), AccessTy, TLI, false) && SME->getNumOperands() == 2 && SME->isLoopInvariant(L)) { - SCEVHandle SubImm = SE->getIntegerSCEV(0, Val->getType()); - SCEVHandle NewOp = SME->getOperand(1); + const SCEV* SubImm = SE->getIntegerSCEV(0, Val->getType()); + const SCEV* NewOp = SME->getOperand(1); MoveImmediateValues(TLI, AccessTy, NewOp, SubImm, isAddress, L, SE); // If we extracted something out of the subexpressions, see if we can @@ -694,7 +694,7 @@ static void MoveImmediateValues(const TargetLowering *TLI, static void MoveImmediateValues(const TargetLowering *TLI, Instruction *User, - SCEVHandle &Val, SCEVHandle &Imm, + const SCEV* &Val, const SCEV* &Imm, bool isAddress, Loop *L, ScalarEvolution *SE) { const Type *AccessTy = getAccessType(User); @@ -704,19 +704,19 @@ static void MoveImmediateValues(const TargetLowering *TLI, /// SeparateSubExprs - Decompose Expr into all of the subexpressions that are /// added together. This is used to reassociate common addition subexprs /// together for maximal sharing when rewriting bases. -static void SeparateSubExprs(SmallVector &SubExprs, - SCEVHandle Expr, +static void SeparateSubExprs(SmallVector &SubExprs, + const SCEV* Expr, ScalarEvolution *SE) { if (const SCEVAddExpr *AE = dyn_cast(Expr)) { for (unsigned j = 0, e = AE->getNumOperands(); j != e; ++j) SeparateSubExprs(SubExprs, AE->getOperand(j), SE); } else if (const SCEVAddRecExpr *SARE = dyn_cast(Expr)) { - SCEVHandle Zero = SE->getIntegerSCEV(0, Expr->getType()); + const SCEV* Zero = SE->getIntegerSCEV(0, Expr->getType()); if (SARE->getOperand(0) == Zero) { SubExprs.push_back(Expr); } else { // Compute the addrec with zero as its base. - SmallVector Ops(SARE->op_begin(), SARE->op_end()); + SmallVector Ops(SARE->op_begin(), SARE->op_end()); Ops[0] = Zero; // Start with zero base. SubExprs.push_back(SE->getAddRecExpr(Ops, SARE->getLoop())); @@ -740,7 +740,7 @@ struct SubExprUseData { unsigned Count; bool notAllUsesAreFree; }; /// not remove anything. This looks for things like (a+b+c) and /// (a+c+d) and computes the common (a+c) subexpression. The common expression /// is *removed* from the Bases and returned. -static SCEVHandle +static const SCEV* RemoveCommonExpressionsFromUseBases(std::vector &Uses, ScalarEvolution *SE, Loop *L, const TargetLowering *TLI) { @@ -748,9 +748,9 @@ RemoveCommonExpressionsFromUseBases(std::vector &Uses, // Only one use? This is a very common case, so we handle it specially and // cheaply. - SCEVHandle Zero = SE->getIntegerSCEV(0, Uses[0].Base->getType()); - SCEVHandle Result = Zero; - SCEVHandle FreeResult = Zero; + const SCEV* Zero = SE->getIntegerSCEV(0, Uses[0].Base->getType()); + const SCEV* Result = Zero; + const SCEV* FreeResult = Zero; if (NumUses == 1) { // If the use is inside the loop, use its base, regardless of what it is: // it is clearly shared across all the IV's. If the use is outside the loop @@ -766,13 +766,13 @@ RemoveCommonExpressionsFromUseBases(std::vector &Uses, // Also track whether all uses of each expression can be moved into an // an addressing mode "for free"; such expressions are left within the loop. // struct SubExprUseData { unsigned Count; bool notAllUsesAreFree; }; - std::map SubExpressionUseData; + std::map SubExpressionUseData; // UniqueSubExprs - Keep track of all of the subexpressions we see in the // order we see them. - SmallVector UniqueSubExprs; + SmallVector UniqueSubExprs; - SmallVector SubExprs; + SmallVector SubExprs; unsigned NumUsesInsideLoop = 0; for (unsigned i = 0; i != NumUses; ++i) { // If the user is outside the loop, just ignore it for base computation. @@ -816,7 +816,7 @@ RemoveCommonExpressionsFromUseBases(std::vector &Uses, // Now that we know how many times each is used, build Result. Iterate over // UniqueSubexprs so that we have a stable ordering. for (unsigned i = 0, e = UniqueSubExprs.size(); i != e; ++i) { - std::map::iterator I = + std::map::iterator I = SubExpressionUseData.find(UniqueSubExprs[i]); assert(I != SubExpressionUseData.end() && "Entry not found?"); if (I->second.Count == NumUsesInsideLoop) { // Found CSE! @@ -860,7 +860,7 @@ RemoveCommonExpressionsFromUseBases(std::vector &Uses, if (FreeResult != Zero) { SeparateSubExprs(SubExprs, FreeResult, SE); for (unsigned j = 0, e = SubExprs.size(); j != e; ++j) { - std::map::iterator I = + std::map::iterator I = SubExpressionUseData.find(SubExprs[j]); SubExpressionUseData.erase(I); } @@ -989,10 +989,10 @@ bool LoopStrengthReduce::RequiresTypeConversion(const Type *Ty1, /// be folded into the addressing mode, nor even that the factor be constant; /// a multiply (executed once) outside the loop is better than another IV /// within. Well, usually. -SCEVHandle LoopStrengthReduce::CheckForIVReuse(bool HasBaseReg, +const SCEV* LoopStrengthReduce::CheckForIVReuse(bool HasBaseReg, bool AllUsesAreAddresses, bool AllUsesAreOutsideLoop, - const SCEVHandle &Stride, + const SCEV* const &Stride, IVExpr &IV, const Type *Ty, const std::vector& UsersToProcess) { if (StrideNoReuse.count(Stride)) @@ -1002,7 +1002,7 @@ SCEVHandle LoopStrengthReduce::CheckForIVReuse(bool HasBaseReg, int64_t SInt = SC->getValue()->getSExtValue(); for (unsigned NewStride = 0, e = IU->StrideOrder.size(); NewStride != e; ++NewStride) { - std::map::iterator SI = + std::map::iterator SI = IVsByStride.find(IU->StrideOrder[NewStride]); if (SI == IVsByStride.end() || !isa(SI->first) || StrideNoReuse.count(SI->first)) @@ -1055,7 +1055,7 @@ SCEVHandle LoopStrengthReduce::CheckForIVReuse(bool HasBaseReg, // an existing IV if we can. for (unsigned NewStride = 0, e = IU->StrideOrder.size(); NewStride != e; ++NewStride) { - std::map::iterator SI = + std::map::iterator SI = IVsByStride.find(IU->StrideOrder[NewStride]); if (SI == IVsByStride.end() || !isa(SI->first)) continue; @@ -1075,7 +1075,7 @@ SCEVHandle LoopStrengthReduce::CheckForIVReuse(bool HasBaseReg, // -1*old. for (unsigned NewStride = 0, e = IU->StrideOrder.size(); NewStride != e; ++NewStride) { - std::map::iterator SI = + std::map::iterator SI = IVsByStride.find(IU->StrideOrder[NewStride]); if (SI == IVsByStride.end()) continue; @@ -1104,7 +1104,7 @@ static bool PartitionByIsUseOfPostIncrementedValue(const BasedUser &Val) { /// isNonConstantNegative - Return true if the specified scev is negated, but /// not a constant. -static bool isNonConstantNegative(const SCEVHandle &Expr) { +static bool isNonConstantNegative(const SCEV* const &Expr) { const SCEVMulExpr *Mul = dyn_cast(Expr); if (!Mul) return false; @@ -1121,7 +1121,7 @@ static bool isNonConstantNegative(const SCEVHandle &Expr) { /// of the strided accesses, as well as the old information from Uses. We /// progressively move information from the Base field to the Imm field, until /// we eventually have the full access expression to rewrite the use. -SCEVHandle LoopStrengthReduce::CollectIVUsers(const SCEVHandle &Stride, +const SCEV* LoopStrengthReduce::CollectIVUsers(const SCEV* const &Stride, IVUsersOfOneStride &Uses, Loop *L, bool &AllUsesAreAddresses, @@ -1152,7 +1152,7 @@ SCEVHandle LoopStrengthReduce::CollectIVUsers(const SCEVHandle &Stride, // for the strides (e.g. if we have "A+C+B" and "A+B+D" as our bases, find // "A+B"), emit it to the preheader, then remove the expression from the // UsersToProcess base values. - SCEVHandle CommonExprs = + const SCEV* CommonExprs = RemoveCommonExpressionsFromUseBases(UsersToProcess, SE, L, TLI); // Next, figure out what we can represent in the immediate fields of @@ -1218,7 +1218,7 @@ bool LoopStrengthReduce::ShouldUseFullStrengthReductionMode( const std::vector &UsersToProcess, const Loop *L, bool AllUsesAreAddresses, - SCEVHandle Stride) { + const SCEV* Stride) { if (!EnableFullLSRMode) return false; @@ -1255,7 +1255,7 @@ bool LoopStrengthReduce::ShouldUseFullStrengthReductionMode( if (!Imm) Imm = SE->getIntegerSCEV(0, Stride->getType()); const Instruction *Inst = UsersToProcess[i].Inst; const Type *AccessTy = getAccessType(Inst); - SCEVHandle Diff = SE->getMinusSCEV(UsersToProcess[i].Imm, Imm); + const SCEV* Diff = SE->getMinusSCEV(UsersToProcess[i].Imm, Imm); if (!Diff->isZero() && (!AllUsesAreAddresses || !fitsInAddressMode(Diff, AccessTy, TLI, /*HasBaseReg=*/true))) @@ -1289,7 +1289,7 @@ bool LoopStrengthReduce::ShouldUseFullStrengthReductionMode( /// /// Return the created phi node. /// -static PHINode *InsertAffinePhi(SCEVHandle Start, SCEVHandle Step, +static PHINode *InsertAffinePhi(const SCEV* Start, const SCEV* Step, Instruction *IVIncInsertPt, const Loop *L, SCEVExpander &Rewriter) { @@ -1309,7 +1309,7 @@ static PHINode *InsertAffinePhi(SCEVHandle Start, SCEVHandle Step, // If the stride is negative, insert a sub instead of an add for the // increment. bool isNegative = isNonConstantNegative(Step); - SCEVHandle IncAmount = Step; + const SCEV* IncAmount = Step; if (isNegative) IncAmount = Rewriter.SE.getNegativeSCEV(Step); @@ -1348,13 +1348,13 @@ static void SortUsersToProcess(std::vector &UsersToProcess) { // loop before users outside of the loop with a particular base. // // We would like to use stable_sort here, but we can't. The problem is that - // SCEVHandle's don't have a deterministic ordering w.r.t to each other, so + // const SCEV*'s don't have a deterministic ordering w.r.t to each other, so // we don't have anything to do a '<' comparison on. Because we think the // number of uses is small, do a horrible bubble sort which just relies on // ==. for (unsigned i = 0, e = UsersToProcess.size(); i != e; ++i) { // Get a base value. - SCEVHandle Base = UsersToProcess[i].Base; + const SCEV* Base = UsersToProcess[i].Base; // Compact everything with this base to be consecutive with this one. for (unsigned j = i+1; j != e; ++j) { @@ -1373,8 +1373,8 @@ static void SortUsersToProcess(std::vector &UsersToProcess) { void LoopStrengthReduce::PrepareToStrengthReduceFully( std::vector &UsersToProcess, - SCEVHandle Stride, - SCEVHandle CommonExprs, + const SCEV* Stride, + const SCEV* CommonExprs, const Loop *L, SCEVExpander &PreheaderRewriter) { DOUT << " Fully reducing all users\n"; @@ -1386,9 +1386,9 @@ LoopStrengthReduce::PrepareToStrengthReduceFully( // TODO: The uses are grouped by base, but not sorted. We arbitrarily // pick the first Imm value here to start with, and adjust it for the // other uses. - SCEVHandle Imm = UsersToProcess[i].Imm; - SCEVHandle Base = UsersToProcess[i].Base; - SCEVHandle Start = SE->getAddExpr(CommonExprs, Base, Imm); + const SCEV* Imm = UsersToProcess[i].Imm; + const SCEV* Base = UsersToProcess[i].Base; + const SCEV* Start = SE->getAddExpr(CommonExprs, Base, Imm); PHINode *Phi = InsertAffinePhi(Start, Stride, IVIncInsertPt, L, PreheaderRewriter); // Loop over all the users with the same base. @@ -1420,8 +1420,8 @@ static Instruction *FindIVIncInsertPt(std::vector &UsersToProcess, void LoopStrengthReduce::PrepareToStrengthReduceWithNewPhi( std::vector &UsersToProcess, - SCEVHandle Stride, - SCEVHandle CommonExprs, + const SCEV* Stride, + const SCEV* CommonExprs, Value *CommonBaseV, Instruction *IVIncInsertPt, const Loop *L, @@ -1497,7 +1497,7 @@ static bool IsImmFoldedIntoAddrMode(GlobalValue *GV, int64_t Offset, /// StrengthReduceStridedIVUsers - Strength reduce all of the users of a single /// stride of IV. All of the users may have different starting values, and this /// may not be the only stride. -void LoopStrengthReduce::StrengthReduceStridedIVUsers(const SCEVHandle &Stride, +void LoopStrengthReduce::StrengthReduceStridedIVUsers(const SCEV* const &Stride, IVUsersOfOneStride &Uses, Loop *L) { // If all the users are moved to another stride, then there is nothing to do. @@ -1520,7 +1520,7 @@ void LoopStrengthReduce::StrengthReduceStridedIVUsers(const SCEVHandle &Stride, // move information from the Base field to the Imm field, until we eventually // have the full access expression to rewrite the use. std::vector UsersToProcess; - SCEVHandle CommonExprs = CollectIVUsers(Stride, Uses, L, AllUsesAreAddresses, + const SCEV* CommonExprs = CollectIVUsers(Stride, Uses, L, AllUsesAreAddresses, AllUsesAreOutsideLoop, UsersToProcess); @@ -1538,8 +1538,8 @@ void LoopStrengthReduce::StrengthReduceStridedIVUsers(const SCEVHandle &Stride, // If all uses are addresses, consider sinking the immediate part of the // common expression back into uses if they can fit in the immediate fields. if (TLI && HaveCommonExprs && AllUsesAreAddresses) { - SCEVHandle NewCommon = CommonExprs; - SCEVHandle Imm = SE->getIntegerSCEV(0, ReplacedTy); + const SCEV* NewCommon = CommonExprs; + const SCEV* Imm = SE->getIntegerSCEV(0, ReplacedTy); MoveImmediateValues(TLI, Type::VoidTy, NewCommon, Imm, true, L, SE); if (!Imm->isZero()) { bool DoSink = true; @@ -1585,7 +1585,7 @@ void LoopStrengthReduce::StrengthReduceStridedIVUsers(const SCEVHandle &Stride, Value *CommonBaseV = Constant::getNullValue(ReplacedTy); - SCEVHandle RewriteFactor = SE->getIntegerSCEV(0, ReplacedTy); + const SCEV* RewriteFactor = SE->getIntegerSCEV(0, ReplacedTy); IVExpr ReuseIV(SE->getIntegerSCEV(0, Type::Int32Ty), SE->getIntegerSCEV(0, Type::Int32Ty), 0); @@ -1625,7 +1625,7 @@ void LoopStrengthReduce::StrengthReduceStridedIVUsers(const SCEVHandle &Stride, // strength-reduced forms. This outer loop handles all bases, the inner // loop handles all users of a particular base. while (!UsersToProcess.empty()) { - SCEVHandle Base = UsersToProcess.back().Base; + const SCEV* Base = UsersToProcess.back().Base; Instruction *Inst = UsersToProcess.back().Inst; // Emit the code for Base into the preheader. @@ -1679,7 +1679,7 @@ void LoopStrengthReduce::StrengthReduceStridedIVUsers(const SCEVHandle &Stride, User.Inst->moveBefore(IVIncInsertPt); } - SCEVHandle RewriteExpr = SE->getUnknown(RewriteOp); + const SCEV* RewriteExpr = SE->getUnknown(RewriteOp); if (SE->getEffectiveSCEVType(RewriteOp->getType()) != SE->getEffectiveSCEVType(ReplacedTy)) { @@ -1711,7 +1711,7 @@ void LoopStrengthReduce::StrengthReduceStridedIVUsers(const SCEVHandle &Stride, // The base has been used to initialize the PHI node but we don't want // it here. if (!ReuseIV.Base->isZero()) { - SCEVHandle typedBase = ReuseIV.Base; + const SCEV* typedBase = ReuseIV.Base; if (SE->getEffectiveSCEVType(RewriteExpr->getType()) != SE->getEffectiveSCEVType(ReuseIV.Base->getType())) { // It's possible the original IV is a larger type than the new IV, @@ -1776,10 +1776,10 @@ void LoopStrengthReduce::StrengthReduceStridedIVUsers(const SCEVHandle &Stride, /// set the IV user and stride information and return true, otherwise return /// false. bool LoopStrengthReduce::FindIVUserForCond(ICmpInst *Cond, IVStrideUse *&CondUse, - const SCEVHandle *&CondStride) { + const SCEV* const * &CondStride) { for (unsigned Stride = 0, e = IU->StrideOrder.size(); Stride != e && !CondUse; ++Stride) { - std::map::iterator SI = + std::map::iterator SI = IU->IVUsesByStride.find(IU->StrideOrder[Stride]); assert(SI != IU->IVUsesByStride.end() && "Stride doesn't exist!"); @@ -1806,7 +1806,7 @@ namespace { const ScalarEvolution *SE; explicit StrideCompare(const ScalarEvolution *se) : SE(se) {} - bool operator()(const SCEVHandle &LHS, const SCEVHandle &RHS) { + bool operator()(const SCEV* const &LHS, const SCEV* const &RHS) { const SCEVConstant *LHSC = dyn_cast(LHS); const SCEVConstant *RHSC = dyn_cast(RHS); if (LHSC && RHSC) { @@ -1849,14 +1849,14 @@ namespace { /// if (v1 < 30) goto loop ICmpInst *LoopStrengthReduce::ChangeCompareStride(Loop *L, ICmpInst *Cond, IVStrideUse* &CondUse, - const SCEVHandle* &CondStride) { + const SCEV* const* &CondStride) { // If there's only one stride in the loop, there's nothing to do here. if (IU->StrideOrder.size() < 2) return Cond; // If there are other users of the condition's stride, don't bother // trying to change the condition because the stride will still // remain. - std::map::iterator I = + std::map::iterator I = IU->IVUsesByStride.find(*CondStride); if (I == IU->IVUsesByStride.end() || I->second->Users.size() != 1) @@ -1873,11 +1873,11 @@ ICmpInst *LoopStrengthReduce::ChangeCompareStride(Loop *L, ICmpInst *Cond, const Type *NewCmpTy = NULL; unsigned TyBits = SE->getTypeSizeInBits(CmpTy); unsigned NewTyBits = 0; - SCEVHandle *NewStride = NULL; + const SCEV* *NewStride = NULL; Value *NewCmpLHS = NULL; Value *NewCmpRHS = NULL; int64_t Scale = 1; - SCEVHandle NewOffset = SE->getIntegerSCEV(0, CmpTy); + const SCEV* NewOffset = SE->getIntegerSCEV(0, CmpTy); if (ConstantInt *C = dyn_cast(Cond->getOperand(1))) { int64_t CmpVal = C->getValue().getSExtValue(); @@ -1889,7 +1889,7 @@ ICmpInst *LoopStrengthReduce::ChangeCompareStride(Loop *L, ICmpInst *Cond, // Look for a suitable stride / iv as replacement. for (unsigned i = 0, e = IU->StrideOrder.size(); i != e; ++i) { - std::map::iterator SI = + std::map::iterator SI = IU->IVUsesByStride.find(IU->StrideOrder[i]); if (!isa(SI->first)) continue; @@ -1969,7 +1969,7 @@ ICmpInst *LoopStrengthReduce::ChangeCompareStride(Loop *L, ICmpInst *Cond, bool AllUsesAreAddresses = true; bool AllUsesAreOutsideLoop = true; std::vector UsersToProcess; - SCEVHandle CommonExprs = CollectIVUsers(SI->first, *SI->second, L, + const SCEV* CommonExprs = CollectIVUsers(SI->first, *SI->second, L, AllUsesAreAddresses, AllUsesAreOutsideLoop, UsersToProcess); @@ -2104,13 +2104,13 @@ ICmpInst *LoopStrengthReduce::OptimizeMax(Loop *L, ICmpInst *Cond, SelectInst *Sel = dyn_cast(Cond->getOperand(1)); if (!Sel || !Sel->hasOneUse()) return Cond; - SCEVHandle BackedgeTakenCount = SE->getBackedgeTakenCount(L); + const SCEV* BackedgeTakenCount = SE->getBackedgeTakenCount(L); if (isa(BackedgeTakenCount)) return Cond; - SCEVHandle One = SE->getIntegerSCEV(1, BackedgeTakenCount->getType()); + const SCEV* One = SE->getIntegerSCEV(1, BackedgeTakenCount->getType()); // Add one to the backedge-taken count to get the trip count. - SCEVHandle IterationCount = SE->getAddExpr(BackedgeTakenCount, One); + const SCEV* IterationCount = SE->getAddExpr(BackedgeTakenCount, One); // Check for a max calculation that matches the pattern. if (!isa(IterationCount) && !isa(IterationCount)) @@ -2123,13 +2123,13 @@ ICmpInst *LoopStrengthReduce::OptimizeMax(Loop *L, ICmpInst *Cond, if (Max->getNumOperands() != 2) return Cond; - SCEVHandle MaxLHS = Max->getOperand(0); - SCEVHandle MaxRHS = Max->getOperand(1); + const SCEV* MaxLHS = Max->getOperand(0); + const SCEV* MaxRHS = Max->getOperand(1); if (!MaxLHS || MaxLHS != One) return Cond; // Check the relevant induction variable for conformance to // the pattern. - SCEVHandle IV = SE->getSCEV(Cond->getOperand(0)); + const SCEV* IV = SE->getSCEV(Cond->getOperand(0)); const SCEVAddRecExpr *AR = dyn_cast(IV); if (!AR || !AR->isAffine() || AR->getStart() != One || @@ -2175,13 +2175,13 @@ ICmpInst *LoopStrengthReduce::OptimizeMax(Loop *L, ICmpInst *Cond, /// inside the loop then try to eliminate the cast opeation. void LoopStrengthReduce::OptimizeShadowIV(Loop *L) { - SCEVHandle BackedgeTakenCount = SE->getBackedgeTakenCount(L); + const SCEV* BackedgeTakenCount = SE->getBackedgeTakenCount(L); if (isa(BackedgeTakenCount)) return; for (unsigned Stride = 0, e = IU->StrideOrder.size(); Stride != e; ++Stride) { - std::map::iterator SI = + std::map::iterator SI = IU->IVUsesByStride.find(IU->StrideOrder[Stride]); assert(SI != IU->IVUsesByStride.end() && "Stride doesn't exist!"); if (!isa(SI->first)) @@ -2311,7 +2311,7 @@ void LoopStrengthReduce::OptimizeLoopTermCond(Loop *L) { // Search IVUsesByStride to find Cond's IVUse if there is one. IVStrideUse *CondUse = 0; - const SCEVHandle *CondStride = 0; + const SCEV* const *CondStride = 0; ICmpInst *Cond = cast(TermBr->getCondition()); if (!FindIVUserForCond(Cond, CondUse, CondStride)) return; // setcc doesn't use the IV. @@ -2341,7 +2341,7 @@ void LoopStrengthReduce::OptimizeLoopTermCond(Loop *L) { int64_t SInt = SC->getValue()->getSExtValue(); for (unsigned NewStride = 0, ee = IU->StrideOrder.size(); NewStride != ee; ++NewStride) { - std::map::iterator SI = + std::map::iterator SI = IU->IVUsesByStride.find(IU->StrideOrder[NewStride]); if (!isa(SI->first) || SI->first == *CondStride) continue; @@ -2355,7 +2355,7 @@ void LoopStrengthReduce::OptimizeLoopTermCond(Loop *L) { bool AllUsesAreAddresses = true; bool AllUsesAreOutsideLoop = true; std::vector UsersToProcess; - SCEVHandle CommonExprs = CollectIVUsers(SI->first, *SI->second, L, + const SCEV* CommonExprs = CollectIVUsers(SI->first, *SI->second, L, AllUsesAreAddresses, AllUsesAreOutsideLoop, UsersToProcess); @@ -2416,7 +2416,7 @@ void LoopStrengthReduce::OptimizeLoopTermCond(Loop *L) { void LoopStrengthReduce::OptimizeLoopCountIV(Loop *L) { // If the number of times the loop is executed isn't computable, give up. - SCEVHandle BackedgeTakenCount = SE->getBackedgeTakenCount(L); + const SCEV* BackedgeTakenCount = SE->getBackedgeTakenCount(L); if (isa(BackedgeTakenCount)) return; @@ -2445,9 +2445,9 @@ void LoopStrengthReduce::OptimizeLoopCountIV(Loop *L) { // Handle only tests for equality for the moment, and only stride 1. if (Cond->getPredicate() != CmpInst::ICMP_EQ) return; - SCEVHandle IV = SE->getSCEV(Cond->getOperand(0)); + const SCEV* IV = SE->getSCEV(Cond->getOperand(0)); const SCEVAddRecExpr *AR = dyn_cast(IV); - SCEVHandle One = SE->getIntegerSCEV(1, BackedgeTakenCount->getType()); + const SCEV* One = SE->getIntegerSCEV(1, BackedgeTakenCount->getType()); if (!AR || !AR->isAffine() || AR->getStepRecurrence(*SE) != One) return; // If the RHS of the comparison is defined inside the loop, the rewrite @@ -2563,7 +2563,7 @@ bool LoopStrengthReduce::runOnLoop(Loop *L, LPPassManager &LPM) { // strides deterministic - not dependent on map order. for (unsigned Stride = 0, e = IU->StrideOrder.size(); Stride != e; ++Stride) { - std::map::iterator SI = + std::map::iterator SI = IU->IVUsesByStride.find(IU->StrideOrder[Stride]); assert(SI != IU->IVUsesByStride.end() && "Stride doesn't exist!"); // FIXME: Generalize to non-affine IV's. -- 2.11.0