From d3a5adc5ba41464aadb5e046e29127b849f163fc Mon Sep 17 00:00:00 2001 From: "Duncan P. N. Exon Smith" Date: Sat, 10 Oct 2015 00:53:03 +0000 Subject: [PATCH] Analysis: Remove implicit ilist iterator conversions Remove implicit ilist iterator conversions from LLVMAnalysis. I came across something really scary in `llvm::isKnownNotFullPoison()` which relied on `Instruction::getNextNode()` being completely broken (not surprising, but scary nevertheless). This function is documented (and coded to) return `nullptr` when it gets to the sentinel, but with an `ilist_half_node` as a sentinel, the sentinel check looks into some other memory and we don't recognize we've hit the end. Rooting out these scary cases is the reason I'm removing the implicit conversions before doing anything else with `ilist`; I'm not at all surprised that clients rely on badness. I found another scary case -- this time, not relying on badness, just bad (but I guess getting lucky so far) -- in `ObjectSizeOffsetEvaluator::compute_()`. Here, we save out the insertion point, do some things, and then restore it. Previously, we let the iterator auto-convert to `Instruction*`, and then set it back using the `Instruction*` version: Instruction *PrevInsertPoint = Builder.GetInsertPoint(); /* Logic that may change insert point */ if (PrevInsertPoint) Builder.SetInsertPoint(PrevInsertPoint); The check for `PrevInsertPoint` doesn't protect correctly against bad accesses. If the insertion point has been set to the end of a basic block (i.e., `SetInsertPoint(SomeBB)`), then `GetInsertPoint()` returns an iterator pointing at the list sentinel. The version of `SetInsertPoint()` that's getting called will then call `PrevInsertPoint->getParent()`, which explodes horribly. The only reason this hasn't blown up is that it's fairly unlikely the builder is adding to the end of the block; usually, we're adding instructions somewhere before the terminator. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@249925 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Analysis/AliasAnalysis.cpp | 6 +-- lib/Analysis/AliasAnalysisEvaluator.cpp | 6 +-- lib/Analysis/AliasSetTracker.cpp | 8 ++-- lib/Analysis/BasicAliasAnalysis.cpp | 2 +- lib/Analysis/BlockFrequencyInfo.cpp | 2 +- lib/Analysis/BranchProbabilityInfo.cpp | 9 ++--- lib/Analysis/CFG.cpp | 3 +- lib/Analysis/CodeMetrics.cpp | 2 +- lib/Analysis/CostModel.cpp | 2 +- lib/Analysis/DivergenceAnalysis.cpp | 4 +- lib/Analysis/IVUsers.cpp | 2 +- lib/Analysis/InlineCost.cpp | 15 ++++--- lib/Analysis/Lint.cpp | 8 ++-- lib/Analysis/Loads.cpp | 5 ++- lib/Analysis/LoopAccessAnalysis.cpp | 2 +- lib/Analysis/MemoryBuiltins.cpp | 7 +--- lib/Analysis/MemoryDependenceAnalysis.cpp | 20 +++++----- lib/Analysis/ObjCARCInstKind.cpp | 4 +- lib/Analysis/ScalarEvolutionExpander.cpp | 56 ++++++++++++++------------- lib/Analysis/ScalarEvolutionNormalization.cpp | 2 +- lib/Analysis/SparsePropagation.cpp | 14 +++---- lib/Analysis/ValueTracking.cpp | 19 ++++----- 22 files changed, 99 insertions(+), 99 deletions(-) diff --git a/lib/Analysis/AliasAnalysis.cpp b/lib/Analysis/AliasAnalysis.cpp index 26ab74bbb4e..4cf8dcfbbe7 100644 --- a/lib/Analysis/AliasAnalysis.cpp +++ b/lib/Analysis/AliasAnalysis.cpp @@ -351,12 +351,12 @@ bool AAResults::canInstructionRangeModRef(const Instruction &I1, const ModRefInfo Mode) { assert(I1.getParent() == I2.getParent() && "Instructions not in same basic block!"); - BasicBlock::const_iterator I = &I1; - BasicBlock::const_iterator E = &I2; + BasicBlock::const_iterator I = I1.getIterator(); + BasicBlock::const_iterator E = I2.getIterator(); ++E; // Convert from inclusive to exclusive range. for (; I != E; ++I) // Check every instruction in range - if (getModRefInfo(I, Loc) & Mode) + if (getModRefInfo(&*I, Loc) & Mode) return true; return false; } diff --git a/lib/Analysis/AliasAnalysisEvaluator.cpp b/lib/Analysis/AliasAnalysisEvaluator.cpp index 20b123c2e16..b1dca1d9375 100644 --- a/lib/Analysis/AliasAnalysisEvaluator.cpp +++ b/lib/Analysis/AliasAnalysisEvaluator.cpp @@ -149,9 +149,9 @@ bool AAEval::runOnFunction(Function &F) { SetVector Loads; SetVector Stores; - for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end(); I != E; ++I) - if (I->getType()->isPointerTy()) // Add all pointer arguments. - Pointers.insert(I); + for (auto &I : F.args()) + if (I.getType()->isPointerTy()) // Add all pointer arguments. + Pointers.insert(&I); for (inst_iterator I = inst_begin(F), E = inst_end(F); I != E; ++I) { if (I->getType()->isPointerTy()) // Add all pointer instructions. diff --git a/lib/Analysis/AliasSetTracker.cpp b/lib/Analysis/AliasSetTracker.cpp index 6cc2a030982..3094049b3cc 100644 --- a/lib/Analysis/AliasSetTracker.cpp +++ b/lib/Analysis/AliasSetTracker.cpp @@ -221,7 +221,7 @@ AliasSet *AliasSetTracker::findAliasSetForPointer(const Value *Ptr, if (Cur->Forward || !Cur->aliasesPointer(Ptr, Size, AAInfo, AA)) continue; if (!FoundSet) { // If this is the first alias set ptr can go into. - FoundSet = Cur; // Remember it. + FoundSet = &*Cur; // Remember it. } else { // Otherwise, we must merge the sets. FoundSet->mergeSetIn(*Cur, *this); // Merge in contents. } @@ -255,7 +255,7 @@ AliasSet *AliasSetTracker::findAliasSetForUnknownInst(Instruction *Inst) { if (Cur->Forward || !Cur->aliasesUnknownInst(Inst, AA)) continue; if (!FoundSet) // If this is the first alias set ptr can go into. - FoundSet = Cur; // Remember it. + FoundSet = &*Cur; // Remember it. else if (!Cur->Forward) // Otherwise, we must merge the sets. FoundSet->mergeSetIn(*Cur, *this); // Merge in contents. } @@ -372,8 +372,8 @@ bool AliasSetTracker::add(Instruction *I) { } void AliasSetTracker::add(BasicBlock &BB) { - for (BasicBlock::iterator I = BB.begin(), E = BB.end(); I != E; ++I) - add(I); + for (auto &I : BB) + add(&I); } void AliasSetTracker::add(const AliasSetTracker &AST) { diff --git a/lib/Analysis/BasicAliasAnalysis.cpp b/lib/Analysis/BasicAliasAnalysis.cpp index ce6427e6f14..dc24a85c97a 100644 --- a/lib/Analysis/BasicAliasAnalysis.cpp +++ b/lib/Analysis/BasicAliasAnalysis.cpp @@ -1418,7 +1418,7 @@ bool BasicAAResult::isValueEqualInPotentialCycles(const Value *V, // the Values cannot come from different iterations of a potential cycle the // phi nodes could be involved in. for (auto *P : VisitedPhiBBs) - if (isPotentiallyReachable(P->begin(), Inst, DT, LI)) + if (isPotentiallyReachable(&P->front(), Inst, DT, LI)) return false; return true; diff --git a/lib/Analysis/BlockFrequencyInfo.cpp b/lib/Analysis/BlockFrequencyInfo.cpp index edd5ad9d5b7..ac4ee8f11e0 100644 --- a/lib/Analysis/BlockFrequencyInfo.cpp +++ b/lib/Analysis/BlockFrequencyInfo.cpp @@ -55,7 +55,7 @@ struct GraphTraits { typedef Function::const_iterator nodes_iterator; static inline const NodeType *getEntryNode(const BlockFrequencyInfo *G) { - return G->getFunction()->begin(); + return &G->getFunction()->front(); } static ChildIteratorType child_begin(const NodeType *N) { return succ_begin(N); diff --git a/lib/Analysis/BranchProbabilityInfo.cpp b/lib/Analysis/BranchProbabilityInfo.cpp index b813dca9369..9ab357b62cf 100644 --- a/lib/Analysis/BranchProbabilityInfo.cpp +++ b/lib/Analysis/BranchProbabilityInfo.cpp @@ -514,11 +514,10 @@ void BranchProbabilityInfo::print(raw_ostream &OS) const { // We print the probabilities from the last function the analysis ran over, // or the function it is currently running over. assert(LastF && "Cannot print prior to running over a function"); - for (Function::const_iterator BI = LastF->begin(), BE = LastF->end(); - BI != BE; ++BI) { - for (succ_const_iterator SI = succ_begin(BI), SE = succ_end(BI); - SI != SE; ++SI) { - printEdgeProbability(OS << " ", BI, *SI); + for (const auto &BI : *LastF) { + for (succ_const_iterator SI = succ_begin(&BI), SE = succ_end(&BI); SI != SE; + ++SI) { + printEdgeProbability(OS << " ", &BI, *SI); } } } diff --git a/lib/Analysis/CFG.cpp b/lib/Analysis/CFG.cpp index e15109bd270..041fcb6313b 100644 --- a/lib/Analysis/CFG.cpp +++ b/lib/Analysis/CFG.cpp @@ -203,7 +203,8 @@ bool llvm::isPotentiallyReachable(const Instruction *A, const Instruction *B, return true; // Linear scan, start at 'A', see whether we hit 'B' or the end first. - for (BasicBlock::const_iterator I = A, E = BB->end(); I != E; ++I) { + for (BasicBlock::const_iterator I = A->getIterator(), E = BB->end(); I != E; + ++I) { if (&*I == B) return true; } diff --git a/lib/Analysis/CodeMetrics.cpp b/lib/Analysis/CodeMetrics.cpp index 157a4bdd11c..90fddc539dc 100644 --- a/lib/Analysis/CodeMetrics.cpp +++ b/lib/Analysis/CodeMetrics.cpp @@ -116,7 +116,7 @@ void CodeMetrics::analyzeBasicBlock(const BasicBlock *BB, for (BasicBlock::const_iterator II = BB->begin(), E = BB->end(); II != E; ++II) { // Skip ephemeral values. - if (EphValues.count(II)) + if (EphValues.count(&*II)) continue; // Special handling for calls. diff --git a/lib/Analysis/CostModel.cpp b/lib/Analysis/CostModel.cpp index da790d74524..604cc2af746 100644 --- a/lib/Analysis/CostModel.cpp +++ b/lib/Analysis/CostModel.cpp @@ -523,7 +523,7 @@ void CostModelAnalysis::print(raw_ostream &OS, const Module*) const { for (Function::iterator B = F->begin(), BE = F->end(); B != BE; ++B) { for (BasicBlock::iterator it = B->begin(), e = B->end(); it != e; ++it) { - Instruction *Inst = it; + Instruction *Inst = &*it; unsigned Cost = getInstructionCost(Inst); if (Cost != (unsigned)-1) OS << "Cost Model: Found an estimated cost of " << Cost; diff --git a/lib/Analysis/DivergenceAnalysis.cpp b/lib/Analysis/DivergenceAnalysis.cpp index c24f38a9c61..93a288858ba 100644 --- a/lib/Analysis/DivergenceAnalysis.cpp +++ b/lib/Analysis/DivergenceAnalysis.cpp @@ -147,8 +147,8 @@ void DivergencePropagator::exploreSyncDependency(TerminatorInst *TI) { for (auto I = IPostDom->begin(); isa(I); ++I) { // A PHINode is uniform if it returns the same value no matter which path is // taken. - if (!cast(I)->hasConstantValue() && DV.insert(I).second) - Worklist.push_back(I); + if (!cast(I)->hasConstantValue() && DV.insert(&*I).second) + Worklist.push_back(&*I); } // Propagation rule 2: if a value defined in a loop is used outside, the user diff --git a/lib/Analysis/IVUsers.cpp b/lib/Analysis/IVUsers.cpp index a1c6b72f9f0..e0c5d8fa5f5 100644 --- a/lib/Analysis/IVUsers.cpp +++ b/lib/Analysis/IVUsers.cpp @@ -276,7 +276,7 @@ bool IVUsers::runOnLoop(Loop *l, LPPassManager &LPM) { // them by stride. Start by finding all of the PHI nodes in the header for // this loop. If they are induction variables, inspect their uses. for (BasicBlock::iterator I = L->getHeader()->begin(); isa(I); ++I) - (void)AddUsersIfInteresting(I); + (void)AddUsersIfInteresting(&*I); return false; } diff --git a/lib/Analysis/InlineCost.cpp b/lib/Analysis/InlineCost.cpp index 539cbe147e7..8169ce48246 100644 --- a/lib/Analysis/InlineCost.cpp +++ b/lib/Analysis/InlineCost.cpp @@ -960,7 +960,7 @@ bool CallAnalyzer::analyzeBlock(BasicBlock *BB, continue; // Skip ephemeral values. - if (EphValues.count(I)) + if (EphValues.count(&*I)) continue; ++NumInstructions; @@ -992,7 +992,7 @@ bool CallAnalyzer::analyzeBlock(BasicBlock *BB, // all of the per-instruction logic. The visit tree returns true if we // consumed the instruction in any way, and false if the instruction's base // cost should count against inlining. - if (Base::visit(I)) + if (Base::visit(&*I)) ++NumInstructionsSimplified; else Cost += InlineConstants::InstrCost; @@ -1172,15 +1172,15 @@ bool CallAnalyzer::analyzeCall(CallSite CS) { FAI != FAE; ++FAI, ++CAI) { assert(CAI != CS.arg_end()); if (Constant *C = dyn_cast(CAI)) - SimplifiedValues[FAI] = C; + SimplifiedValues[&*FAI] = C; Value *PtrArg = *CAI; if (ConstantInt *C = stripAndComputeInBoundsConstantOffsets(PtrArg)) { - ConstantOffsetPtrs[FAI] = std::make_pair(PtrArg, C->getValue()); + ConstantOffsetPtrs[&*FAI] = std::make_pair(PtrArg, C->getValue()); // We can SROA any pointer arguments derived from alloca instructions. if (isa(PtrArg)) { - SROAArgValues[FAI] = PtrArg; + SROAArgValues[&*FAI] = PtrArg; SROAArgCosts[PtrArg] = 0; } } @@ -1423,9 +1423,8 @@ bool InlineCostAnalysis::isInlineViable(Function &F) { if (isa(BI->getTerminator()) || BI->hasAddressTaken()) return false; - for (BasicBlock::iterator II = BI->begin(), IE = BI->end(); II != IE; - ++II) { - CallSite CS(II); + for (auto &II : *BI) { + CallSite CS(&II); if (!CS) continue; diff --git a/lib/Analysis/Lint.cpp b/lib/Analysis/Lint.cpp index b63049c2591..2dfb09c95ad 100644 --- a/lib/Analysis/Lint.cpp +++ b/lib/Analysis/Lint.cpp @@ -234,7 +234,7 @@ void Lint::visitCallSite(CallSite CS) { for (; AI != AE; ++AI) { Value *Actual = *AI; if (PI != PE) { - Argument *Formal = PI++; + Argument *Formal = &*PI++; Assert(Formal->getType() == Actual->getType(), "Undefined behavior: Call argument type mismatches " "callee parameter type", @@ -602,8 +602,8 @@ void Lint::visitInsertElementInst(InsertElementInst &I) { void Lint::visitUnreachableInst(UnreachableInst &I) { // This isn't undefined behavior, it's merely suspicious. - Assert(&I == I.getParent()->begin() || - std::prev(BasicBlock::iterator(&I))->mayHaveSideEffects(), + Assert(&I == &I.getParent()->front() || + std::prev(I.getIterator())->mayHaveSideEffects(), "Unusual: unreachable immediately preceded by instruction without " "side effects", &I); @@ -635,7 +635,7 @@ Value *Lint::findValueImpl(Value *V, bool OffsetOk, // TODO: Look through vector insert/extract/shuffle. V = OffsetOk ? GetUnderlyingObject(V, *DL) : V->stripPointerCasts(); if (LoadInst *L = dyn_cast(V)) { - BasicBlock::iterator BBI = L; + BasicBlock::iterator BBI = L->getIterator(); BasicBlock *BB = L->getParent(); SmallPtrSet VisitedBlocks; for (;;) { diff --git a/lib/Analysis/Loads.cpp b/lib/Analysis/Loads.cpp index a544ac0c504..4b2fa3c6505 100644 --- a/lib/Analysis/Loads.cpp +++ b/lib/Analysis/Loads.cpp @@ -118,7 +118,8 @@ bool llvm::isSafeToLoadUnconditionally(Value *V, Instruction *ScanFrom, // from/to. If so, the previous load or store would have already trapped, // so there is no harm doing an extra load (also, CSE will later eliminate // the load entirely). - BasicBlock::iterator BBI = ScanFrom, E = ScanFrom->getParent()->begin(); + BasicBlock::iterator BBI = ScanFrom->getIterator(), + E = ScanFrom->getParent()->begin(); // We can at least always strip pointer casts even though we can't use the // base here. @@ -211,7 +212,7 @@ Value *llvm::FindAvailableLoadedValue(Value *Ptr, BasicBlock *ScanBB, while (ScanFrom != ScanBB->begin()) { // We must ignore debug info directives when counting (otherwise they // would affect codegen). - Instruction *Inst = --ScanFrom; + Instruction *Inst = &*--ScanFrom; if (isa(Inst)) continue; diff --git a/lib/Analysis/LoopAccessAnalysis.cpp b/lib/Analysis/LoopAccessAnalysis.cpp index 14c3c57e4c9..c2ccb2db209 100644 --- a/lib/Analysis/LoopAccessAnalysis.cpp +++ b/lib/Analysis/LoopAccessAnalysis.cpp @@ -1397,7 +1397,7 @@ void LoopAccessInfo::analyzeLoop(const ValueToValueMap &Strides) { if (it->mayWriteToMemory()) { StoreInst *St = dyn_cast(it); if (!St) { - emitAnalysis(LoopAccessReport(it) << + emitAnalysis(LoopAccessReport(&*it) << "instruction cannot be vectorized"); CanVecMem = false; return; diff --git a/lib/Analysis/MemoryBuiltins.cpp b/lib/Analysis/MemoryBuiltins.cpp index 8ddac8ffb97..7f15e13a6bb 100644 --- a/lib/Analysis/MemoryBuiltins.cpp +++ b/lib/Analysis/MemoryBuiltins.cpp @@ -621,7 +621,7 @@ SizeOffsetEvalType ObjectSizeOffsetEvaluator::compute_(Value *V) { // always generate code immediately before the instruction being // processed, so that the generated code dominates the same BBs - Instruction *PrevInsertPoint = Builder.GetInsertPoint(); + BuilderTy::InsertPointGuard Guard(Builder); if (Instruction *I = dyn_cast(V)) Builder.SetInsertPoint(I); @@ -650,9 +650,6 @@ SizeOffsetEvalType ObjectSizeOffsetEvaluator::compute_(Value *V) { Result = unknown(); } - if (PrevInsertPoint) - Builder.SetInsertPoint(PrevInsertPoint); - // Don't reuse CacheIt since it may be invalid at this point. CacheMap[V] = Result; return Result; @@ -742,7 +739,7 @@ SizeOffsetEvalType ObjectSizeOffsetEvaluator::visitPHINode(PHINode &PHI) { // compute offset/size for each PHI incoming pointer for (unsigned i = 0, e = PHI.getNumIncomingValues(); i != e; ++i) { - Builder.SetInsertPoint(PHI.getIncomingBlock(i)->getFirstInsertionPt()); + Builder.SetInsertPoint(&*PHI.getIncomingBlock(i)->getFirstInsertionPt()); SizeOffsetEvalType EdgeData = compute_(PHI.getIncomingValue(i)); if (!bothKnown(EdgeData)) { diff --git a/lib/Analysis/MemoryDependenceAnalysis.cpp b/lib/Analysis/MemoryDependenceAnalysis.cpp index ff4d55e9fd4..3e80bfe1fdf 100644 --- a/lib/Analysis/MemoryDependenceAnalysis.cpp +++ b/lib/Analysis/MemoryDependenceAnalysis.cpp @@ -216,7 +216,7 @@ getCallSiteDependencyFrom(CallSite CS, bool isReadOnlyCall, if (!Limit) return MemDepResult::getUnknown(); - Instruction *Inst = --ScanIt; + Instruction *Inst = &*--ScanIt; // If this inst is a memory op, get the pointer it accessed MemoryLocation Loc; @@ -502,7 +502,7 @@ MemDepResult MemoryDependenceAnalysis::getSimplePointerDependencyFrom( // Walk backwards through the basic block, looking for dependencies. while (ScanIt != BB->begin()) { - Instruction *Inst = --ScanIt; + Instruction *Inst = &*--ScanIt; if (IntrinsicInst *II = dyn_cast(Inst)) // Debug intrinsics don't (and can't) cause dependencies. @@ -767,13 +767,13 @@ MemDepResult MemoryDependenceAnalysis::getDependency(Instruction *QueryInst) { if (IntrinsicInst *II = dyn_cast(QueryInst)) isLoad |= II->getIntrinsicID() == Intrinsic::lifetime_start; - LocalCache = getPointerDependencyFrom(MemLoc, isLoad, ScanPos, - QueryParent, QueryInst); + LocalCache = getPointerDependencyFrom( + MemLoc, isLoad, ScanPos->getIterator(), QueryParent, QueryInst); } else if (isa(QueryInst) || isa(QueryInst)) { CallSite QueryCS(QueryInst); bool isReadOnly = AA->onlyReadsMemory(QueryCS); - LocalCache = getCallSiteDependencyFrom(QueryCS, isReadOnly, ScanPos, - QueryParent); + LocalCache = getCallSiteDependencyFrom( + QueryCS, isReadOnly, ScanPos->getIterator(), QueryParent); } else // Non-memory instruction. LocalCache = MemDepResult::getUnknown(); @@ -896,7 +896,7 @@ MemoryDependenceAnalysis::getNonLocalCallDependency(CallSite QueryCS) { BasicBlock::iterator ScanPos = DirtyBB->end(); if (ExistingResult) { if (Instruction *Inst = ExistingResult->getResult().getInst()) { - ScanPos = Inst; + ScanPos = Inst->getIterator(); // We're removing QueryInst's use of Inst. RemoveFromReverseMap(ReverseNonLocalDeps, Inst, QueryCS.getInstruction()); @@ -1035,11 +1035,11 @@ MemDepResult MemoryDependenceAnalysis::GetNonLocalInfoForBlock( assert(ExistingResult->getResult().getInst()->getParent() == BB && "Instruction invalidated?"); ++NumCacheDirtyNonLocalPtr; - ScanPos = ExistingResult->getResult().getInst(); + ScanPos = ExistingResult->getResult().getInst()->getIterator(); // Eliminating the dirty entry from 'Cache', so update the reverse info. ValueIsLoadPair CacheKey(Loc.Ptr, isLoad); - RemoveFromReverseMap(ReverseNonLocalPtrDeps, ScanPos, CacheKey); + RemoveFromReverseMap(ReverseNonLocalPtrDeps, &*ScanPos, CacheKey); } else { ++NumUncacheNonLocalPtr; } @@ -1590,7 +1590,7 @@ void MemoryDependenceAnalysis::removeInstruction(Instruction *RemInst) { // the entire block to get to this point. MemDepResult NewDirtyVal; if (!RemInst->isTerminator()) - NewDirtyVal = MemDepResult::getDirty(++BasicBlock::iterator(RemInst)); + NewDirtyVal = MemDepResult::getDirty(&*++RemInst->getIterator()); ReverseDepMapType::iterator ReverseDepIt = ReverseLocalDeps.find(RemInst); if (ReverseDepIt != ReverseLocalDeps.end()) { diff --git a/lib/Analysis/ObjCARCInstKind.cpp b/lib/Analysis/ObjCARCInstKind.cpp index ec606da9485..133b63513c8 100644 --- a/lib/Analysis/ObjCARCInstKind.cpp +++ b/lib/Analysis/ObjCARCInstKind.cpp @@ -93,7 +93,7 @@ ARCInstKind llvm::objcarc::GetFunctionClass(const Function *F) { .Default(ARCInstKind::CallOrUser); // One argument. - const Argument *A0 = AI++; + const Argument *A0 = &*AI++; if (AI == AE) // Argument is a pointer. if (PointerType *PTy = dyn_cast(A0->getType())) { @@ -131,7 +131,7 @@ ARCInstKind llvm::objcarc::GetFunctionClass(const Function *F) { } // Two arguments, first is i8**. - const Argument *A1 = AI++; + const Argument *A1 = &*AI++; if (AI == AE) if (PointerType *PTy = dyn_cast(A0->getType())) if (PointerType *Pte = dyn_cast(PTy->getElementType())) diff --git a/lib/Analysis/ScalarEvolutionExpander.cpp b/lib/Analysis/ScalarEvolutionExpander.cpp index ed7386b26f8..86c2f50ac29 100644 --- a/lib/Analysis/ScalarEvolutionExpander.cpp +++ b/lib/Analysis/ScalarEvolutionExpander.cpp @@ -63,7 +63,7 @@ Value *SCEVExpander::ReuseOrCreateCast(Value *V, Type *Ty, // Create a new cast, and leave the old cast in place in case // it is being used as an insert point. Clear its operand // so that it doesn't hold anything live. - Ret = CastInst::Create(Op, V, Ty, "", IP); + Ret = CastInst::Create(Op, V, Ty, "", &*IP); Ret->takeName(CI); CI->replaceAllUsesWith(Ret); CI->setOperand(0, UndefValue::get(V->getType())); @@ -75,12 +75,12 @@ Value *SCEVExpander::ReuseOrCreateCast(Value *V, Type *Ty, // Create a new cast. if (!Ret) - Ret = CastInst::Create(Op, V, Ty, V->getName(), IP); + Ret = CastInst::Create(Op, V, Ty, V->getName(), &*IP); // We assert at the end of the function since IP might point to an // instruction with different dominance properties than a cast // (an invoke for example) and not dominate BIP (but the cast does). - assert(SE.DT.dominates(Ret, BIP)); + assert(SE.DT.dominates(Ret, &*BIP)); rememberInstruction(Ret); return Ret; @@ -143,7 +143,7 @@ Value *SCEVExpander::InsertNoopCastOfTo(Value *V, Type *Ty) { // Cast the instruction immediately after the instruction. Instruction *I = cast(V); - BasicBlock::iterator IP = I; ++IP; + BasicBlock::iterator IP = ++I->getIterator(); if (InvokeInst *II = dyn_cast(I)) IP = II->getNormalDest()->begin(); if (CatchPadInst *CPI = dyn_cast(I)) @@ -176,7 +176,7 @@ Value *SCEVExpander::InsertBinop(Instruction::BinaryOps Opcode, ScanLimit++; if (IP->getOpcode() == (unsigned)Opcode && IP->getOperand(0) == LHS && IP->getOperand(1) == RHS) - return IP; + return &*IP; if (IP == BlockBegin) break; } } @@ -192,7 +192,7 @@ Value *SCEVExpander::InsertBinop(Instruction::BinaryOps Opcode, if (!Preheader) break; // Ok, move up a level. - Builder.SetInsertPoint(Preheader, Preheader->getTerminator()); + Builder.SetInsertPoint(Preheader->getTerminator()); } // If we haven't found this binop, insert it. @@ -485,7 +485,7 @@ Value *SCEVExpander::expandAddToGEP(const SCEV *const *op_begin, Type::getInt8PtrTy(Ty->getContext(), PTy->getAddressSpace())); assert(!isa(V) || - SE.DT.dominates(cast(V), Builder.GetInsertPoint())); + SE.DT.dominates(cast(V), &*Builder.GetInsertPoint())); // Expand the operands for a plain byte offset. Value *Idx = expandCodeFor(SE.getAddExpr(Ops), Ty); @@ -510,7 +510,7 @@ Value *SCEVExpander::expandAddToGEP(const SCEV *const *op_begin, ScanLimit++; if (IP->getOpcode() == Instruction::GetElementPtr && IP->getOperand(0) == V && IP->getOperand(1) == Idx) - return IP; + return &*IP; if (IP == BlockBegin) break; } } @@ -525,7 +525,7 @@ Value *SCEVExpander::expandAddToGEP(const SCEV *const *op_begin, if (!Preheader) break; // Ok, move up a level. - Builder.SetInsertPoint(Preheader, Preheader->getTerminator()); + Builder.SetInsertPoint(Preheader->getTerminator()); } // Emit a GEP. @@ -556,7 +556,7 @@ Value *SCEVExpander::expandAddToGEP(const SCEV *const *op_begin, if (!Preheader) break; // Ok, move up a level. - Builder.SetInsertPoint(Preheader, Preheader->getTerminator()); + Builder.SetInsertPoint(Preheader->getTerminator()); } // Insert a pretty getelementptr. Note that this GEP is not marked inbounds, @@ -1168,8 +1168,8 @@ SCEVExpander::getAddRecExprPHILiterally(const SCEVAddRecExpr *Normalized, PostIncLoops.clear(); // Expand code for the start value. - Value *StartV = expandCodeFor(Normalized->getStart(), ExpandTy, - L->getHeader()->begin()); + Value *StartV = + expandCodeFor(Normalized->getStart(), ExpandTy, &L->getHeader()->front()); // StartV must be hoisted into L's preheader to dominate the new phi. assert(!isa(StartV) || @@ -1186,7 +1186,7 @@ SCEVExpander::getAddRecExprPHILiterally(const SCEVAddRecExpr *Normalized, if (useSubtract) Step = SE.getNegativeSCEV(Step); // Expand the step somewhere that dominates the loop header. - Value *StepV = expandCodeFor(Step, IntTy, L->getHeader()->begin()); + Value *StepV = expandCodeFor(Step, IntTy, &L->getHeader()->front()); // The no-wrap behavior proved by IsIncrement(NUW|NSW) is only applicable if // we actually do emit an addition. It does not apply if we emit a @@ -1302,7 +1302,8 @@ Value *SCEVExpander::expandAddRecExprLiterally(const SCEVAddRecExpr *S) { // expandCodeFor with an InsertPoint that is either outside the PostIncLoop // or dominated by IVIncInsertPos. if (isa(Result) && - !SE.DT.dominates(cast(Result), Builder.GetInsertPoint())) { + !SE.DT.dominates(cast(Result), + &*Builder.GetInsertPoint())) { // The induction variable's postinc expansion does not dominate this use. // IVUsers tries to prevent this case, so it is rare. However, it can // happen when an IVUser outside the loop is not dominated by the latch @@ -1320,7 +1321,7 @@ Value *SCEVExpander::expandAddRecExprLiterally(const SCEVAddRecExpr *S) { { // Expand the step somewhere that dominates the loop header. BuilderType::InsertPointGuard Guard(Builder); - StepV = expandCodeFor(Step, IntTy, L->getHeader()->begin()); + StepV = expandCodeFor(Step, IntTy, &L->getHeader()->front()); } Result = expandIVInc(PN, StepV, L, ExpandTy, IntTy, useSubtract); } @@ -1400,7 +1401,7 @@ Value *SCEVExpander::visitAddRecExpr(const SCEVAddRecExpr *S) { isa(NewInsertPt)) ++NewInsertPt; V = expandCodeFor(SE.getTruncateExpr(SE.getUnknown(V), Ty), nullptr, - NewInsertPt); + &*NewInsertPt); return V; } @@ -1441,7 +1442,7 @@ Value *SCEVExpander::visitAddRecExpr(const SCEVAddRecExpr *S) { BasicBlock *Header = L->getHeader(); pred_iterator HPB = pred_begin(Header), HPE = pred_end(Header); CanonicalIV = PHINode::Create(Ty, std::distance(HPB, HPE), "indvar", - Header->begin()); + &Header->front()); rememberInstruction(CanonicalIV); SmallSet PredSeen; @@ -1586,7 +1587,8 @@ Value *SCEVExpander::visitUMaxExpr(const SCEVUMaxExpr *S) { Value *SCEVExpander::expandCodeFor(const SCEV *SH, Type *Ty, Instruction *IP) { - Builder.SetInsertPoint(IP->getParent(), IP); + assert(IP); + Builder.SetInsertPoint(IP); return expandCodeFor(SH, Ty); } @@ -1604,7 +1606,7 @@ Value *SCEVExpander::expandCodeFor(const SCEV *SH, Type *Ty) { Value *SCEVExpander::expand(const SCEV *S) { // Compute an insertion point for this SCEV object. Hoist the instructions // as far out in the loop nest as possible. - Instruction *InsertPt = Builder.GetInsertPoint(); + Instruction *InsertPt = &*Builder.GetInsertPoint(); for (Loop *L = SE.LI.getLoopFor(Builder.GetInsertBlock());; L = L->getParentLoop()) if (SE.isLoopInvariant(S, L)) { @@ -1615,18 +1617,18 @@ Value *SCEVExpander::expand(const SCEV *S) { // LSR sets the insertion point for AddRec start/step values to the // block start to simplify value reuse, even though it's an invalid // position. SCEVExpander must correct for this in all cases. - InsertPt = L->getHeader()->getFirstInsertionPt(); + InsertPt = &*L->getHeader()->getFirstInsertionPt(); } } else { // If the SCEV is computable at this level, insert it into the header // after the PHIs (and after any other instructions that we've inserted // there) so that it is guaranteed to dominate any user inside the loop. if (L && SE.hasComputableLoopEvolution(S, L) && !PostIncLoops.count(L)) - InsertPt = L->getHeader()->getFirstInsertionPt(); + InsertPt = &*L->getHeader()->getFirstInsertionPt(); while (InsertPt != Builder.GetInsertPoint() && (isInsertedInstruction(InsertPt) || isa(InsertPt))) { - InsertPt = std::next(BasicBlock::iterator(InsertPt)); + InsertPt = &*std::next(InsertPt->getIterator()); } break; } @@ -1638,7 +1640,7 @@ Value *SCEVExpander::expand(const SCEV *S) { return I->second; BuilderType::InsertPointGuard Guard(Builder); - Builder.SetInsertPoint(InsertPt->getParent(), InsertPt); + Builder.SetInsertPoint(InsertPt); // Expand the expression into instructions. Value *V = visit(S); @@ -1676,8 +1678,8 @@ SCEVExpander::getOrInsertCanonicalInductionVariable(const Loop *L, // Emit code for it. BuilderType::InsertPointGuard Guard(Builder); - PHINode *V = cast(expandCodeFor(H, nullptr, - L->getHeader()->begin())); + PHINode *V = + cast(expandCodeFor(H, nullptr, &L->getHeader()->front())); return V; } @@ -1783,7 +1785,7 @@ unsigned SCEVExpander::replaceCongruentIVs(Loop *L, const DominatorTree *DT, if (OrigInc->getType() != IsomorphicInc->getType()) { Instruction *IP = nullptr; if (PHINode *PN = dyn_cast(OrigInc)) - IP = PN->getParent()->getFirstInsertionPt(); + IP = &*PN->getParent()->getFirstInsertionPt(); else IP = OrigInc->getNextNode(); @@ -1801,7 +1803,7 @@ unsigned SCEVExpander::replaceCongruentIVs(Loop *L, const DominatorTree *DT, ++NumElim; Value *NewIV = OrigPhiRef; if (OrigPhiRef->getType() != Phi->getType()) { - IRBuilder<> Builder(L->getHeader()->getFirstInsertionPt()); + IRBuilder<> Builder(&*L->getHeader()->getFirstInsertionPt()); Builder.SetCurrentDebugLocation(Phi->getDebugLoc()); NewIV = Builder.CreateTruncOrBitCast(OrigPhiRef, Phi->getType(), IVName); } diff --git a/lib/Analysis/ScalarEvolutionNormalization.cpp b/lib/Analysis/ScalarEvolutionNormalization.cpp index b238fe43cc6..b7fd5d50617 100644 --- a/lib/Analysis/ScalarEvolutionNormalization.cpp +++ b/lib/Analysis/ScalarEvolutionNormalization.cpp @@ -109,7 +109,7 @@ TransformImpl(const SCEV *S, Instruction *User, Value *OperandValToReplace) { SmallVector Operands; const Loop *L = AR->getLoop(); // The addrec conceptually uses its operands at loop entry. - Instruction *LUser = L->getHeader()->begin(); + Instruction *LUser = &L->getHeader()->front(); // Transform each operand. for (SCEVNAryExpr::op_iterator I = AR->op_begin(), E = AR->op_end(); I != E; ++I) { diff --git a/lib/Analysis/SparsePropagation.cpp b/lib/Analysis/SparsePropagation.cpp index edd82f5fe29..f5a927b8052 100644 --- a/lib/Analysis/SparsePropagation.cpp +++ b/lib/Analysis/SparsePropagation.cpp @@ -328,17 +328,17 @@ void SparseSolver::Solve(Function &F) { void SparseSolver::Print(Function &F, raw_ostream &OS) const { OS << "\nFUNCTION: " << F.getName() << "\n"; - for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB) { - if (!BBExecutable.count(BB)) + for (auto &BB : F) { + if (!BBExecutable.count(&BB)) OS << "INFEASIBLE: "; OS << "\t"; - if (BB->hasName()) - OS << BB->getName() << ":\n"; + if (BB.hasName()) + OS << BB.getName() << ":\n"; else OS << "; anon bb\n"; - for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I) { - LatticeFunc->PrintValue(getLatticeState(I), OS); - OS << *I << "\n"; + for (auto &I : BB) { + LatticeFunc->PrintValue(getLatticeState(&I), OS); + OS << I << "\n"; } OS << "\n"; diff --git a/lib/Analysis/ValueTracking.cpp b/lib/Analysis/ValueTracking.cpp index 8b0850d105e..2a93e0d64d6 100644 --- a/lib/Analysis/ValueTracking.cpp +++ b/lib/Analysis/ValueTracking.cpp @@ -455,7 +455,7 @@ static bool isValidAssumeForContext(Value *V, const Query &Q) { for (BasicBlock::const_iterator I = std::next(BasicBlock::const_iterator(Q.CxtI)), IE(Inv); I != IE; ++I) - if (!isSafeToSpeculativelyExecute(I) && !isAssumeLikeIntrinsic(I)) + if (!isSafeToSpeculativelyExecute(&*I) && !isAssumeLikeIntrinsic(&*I)) return false; return !isEphemeralValueOf(Inv, Q.CxtI); @@ -472,14 +472,14 @@ static bool isValidAssumeForContext(Value *V, const Query &Q) { // of the block); the common case is that the assume will come first. for (BasicBlock::iterator I = std::next(BasicBlock::iterator(Inv)), IE = Inv->getParent()->end(); I != IE; ++I) - if (I == Q.CxtI) + if (&*I == Q.CxtI) return true; // The context must come first... for (BasicBlock::const_iterator I = std::next(BasicBlock::const_iterator(Q.CxtI)), IE(Inv); I != IE; ++I) - if (!isSafeToSpeculativelyExecute(I) && !isAssumeLikeIntrinsic(I)) + if (!isSafeToSpeculativelyExecute(&*I) && !isAssumeLikeIntrinsic(&*I)) return false; return !isEphemeralValueOf(Inv, Q.CxtI); @@ -3635,16 +3635,17 @@ bool llvm::isKnownNotFullPoison(const Instruction *PoisonI) { SmallSet YieldsPoison; YieldsPoison.insert(PoisonI); - for (const Instruction *I = PoisonI, *E = BB->end(); I != E; - I = I->getNextNode()) { - if (I != PoisonI) { - const Value *NotPoison = getGuaranteedNonFullPoisonOp(I); + for (BasicBlock::const_iterator I = PoisonI->getIterator(), E = BB->end(); + I != E; ++I) { + if (&*I != PoisonI) { + const Value *NotPoison = getGuaranteedNonFullPoisonOp(&*I); if (NotPoison != nullptr && YieldsPoison.count(NotPoison)) return true; - if (!isGuaranteedToTransferExecutionToSuccessor(I)) return false; + if (!isGuaranteedToTransferExecutionToSuccessor(&*I)) + return false; } // Mark poison that propagates from I through uses of I. - if (YieldsPoison.count(I)) { + if (YieldsPoison.count(&*I)) { for (const User *User : I->users()) { const Instruction *UserI = cast(User); if (UserI->getParent() == BB && propagatesFullPoison(UserI)) -- 2.11.0