1 //===- ObjCARCOpts.cpp - ObjC ARC Optimization ----------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
11 /// This file defines ObjC ARC optimizations. ARC stands for Automatic
12 /// Reference Counting and is a system for managing reference counts for objects
15 /// The optimizations performed include elimination of redundant, partially
16 /// redundant, and inconsequential reference count operations, elimination of
17 /// redundant weak pointer operations, and numerous minor simplifications.
19 /// WARNING: This file knows about certain library functions. It recognizes them
20 /// by name, and hardwires knowledge of their semantics.
22 /// WARNING: This file knows about how certain Objective-C library functions are
23 /// used. Naive LLVM IR transformations which would otherwise be
24 /// behavior-preserving may break these assumptions.
26 //===----------------------------------------------------------------------===//
28 #include "ARCRuntimeEntryPoints.h"
29 #include "BlotMapVector.h"
30 #include "DependencyAnalysis.h"
32 #include "ProvenanceAnalysis.h"
34 #include "llvm/ADT/DenseMap.h"
35 #include "llvm/ADT/None.h"
36 #include "llvm/ADT/STLExtras.h"
37 #include "llvm/ADT/SmallPtrSet.h"
38 #include "llvm/ADT/SmallVector.h"
39 #include "llvm/ADT/Statistic.h"
40 #include "llvm/Analysis/AliasAnalysis.h"
41 #include "llvm/Analysis/EHPersonalities.h"
42 #include "llvm/Analysis/ObjCARCAliasAnalysis.h"
43 #include "llvm/Analysis/ObjCARCAnalysisUtils.h"
44 #include "llvm/Analysis/ObjCARCInstKind.h"
45 #include "llvm/IR/BasicBlock.h"
46 #include "llvm/IR/CFG.h"
47 #include "llvm/IR/CallSite.h"
48 #include "llvm/IR/Constant.h"
49 #include "llvm/IR/Constants.h"
50 #include "llvm/IR/DerivedTypes.h"
51 #include "llvm/IR/Function.h"
52 #include "llvm/IR/GlobalVariable.h"
53 #include "llvm/IR/InstIterator.h"
54 #include "llvm/IR/InstrTypes.h"
55 #include "llvm/IR/Instruction.h"
56 #include "llvm/IR/Instructions.h"
57 #include "llvm/IR/LLVMContext.h"
58 #include "llvm/IR/Metadata.h"
59 #include "llvm/IR/Type.h"
60 #include "llvm/IR/User.h"
61 #include "llvm/IR/Value.h"
62 #include "llvm/Pass.h"
63 #include "llvm/Support/Casting.h"
64 #include "llvm/Support/Compiler.h"
65 #include "llvm/Support/Debug.h"
66 #include "llvm/Support/ErrorHandling.h"
67 #include "llvm/Support/raw_ostream.h"
73 using namespace llvm::objcarc;
75 #define DEBUG_TYPE "objc-arc-opts"
77 /// \defgroup ARCUtilities Utility declarations/definitions specific to ARC.
80 /// This is similar to GetRCIdentityRoot but it stops as soon
81 /// as it finds a value with multiple uses.
82 static const Value *FindSingleUseIdentifiedObject(const Value *Arg) {
83 // ConstantData (like ConstantPointerNull and UndefValue) is used across
84 // modules. It's never a single-use value.
85 if (isa<ConstantData>(Arg))
88 if (Arg->hasOneUse()) {
89 if (const BitCastInst *BC = dyn_cast<BitCastInst>(Arg))
90 return FindSingleUseIdentifiedObject(BC->getOperand(0));
91 if (const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Arg))
92 if (GEP->hasAllZeroIndices())
93 return FindSingleUseIdentifiedObject(GEP->getPointerOperand());
94 if (IsForwarding(GetBasicARCInstKind(Arg)))
95 return FindSingleUseIdentifiedObject(
96 cast<CallInst>(Arg)->getArgOperand(0));
97 if (!IsObjCIdentifiedObject(Arg))
102 // If we found an identifiable object but it has multiple uses, but they are
103 // trivial uses, we can still consider this to be a single-use value.
104 if (IsObjCIdentifiedObject(Arg)) {
105 for (const User *U : Arg->users())
106 if (!U->use_empty() || GetRCIdentityRoot(U) != Arg)
117 /// \defgroup ARCOpt ARC Optimization.
120 // TODO: On code like this:
123 // stuff_that_cannot_release()
124 // objc_autorelease(%x)
125 // stuff_that_cannot_release()
127 // stuff_that_cannot_release()
128 // objc_autorelease(%x)
130 // The second retain and autorelease can be deleted.
132 // TODO: It should be possible to delete
133 // objc_autoreleasePoolPush and objc_autoreleasePoolPop
134 // pairs if nothing is actually autoreleased between them. Also, autorelease
135 // calls followed by objc_autoreleasePoolPop calls (perhaps in ObjC++ code
136 // after inlining) can be turned into plain release calls.
138 // TODO: Critical-edge splitting. If the optimial insertion point is
139 // a critical edge, the current algorithm has to fail, because it doesn't
140 // know how to split edges. It should be possible to make the optimizer
141 // think in terms of edges, rather than blocks, and then split critical
144 // TODO: OptimizeSequences could generalized to be Interprocedural.
146 // TODO: Recognize that a bunch of other objc runtime calls have
147 // non-escaping arguments and non-releasing arguments, and may be
148 // non-autoreleasing.
150 // TODO: Sink autorelease calls as far as possible. Unfortunately we
151 // usually can't sink them past other calls, which would be the main
152 // case where it would be useful.
154 // TODO: The pointer returned from objc_loadWeakRetained is retained.
156 // TODO: Delete release+retain pairs (rare).
158 STATISTIC(NumNoops, "Number of no-op objc calls eliminated");
159 STATISTIC(NumPartialNoops, "Number of partially no-op objc calls eliminated");
160 STATISTIC(NumAutoreleases,"Number of autoreleases converted to releases");
161 STATISTIC(NumRets, "Number of return value forwarding "
162 "retain+autoreleases eliminated");
163 STATISTIC(NumRRs, "Number of retain+release paths eliminated");
164 STATISTIC(NumPeeps, "Number of calls peephole-optimized");
166 STATISTIC(NumRetainsBeforeOpt,
167 "Number of retains before optimization");
168 STATISTIC(NumReleasesBeforeOpt,
169 "Number of releases before optimization");
170 STATISTIC(NumRetainsAfterOpt,
171 "Number of retains after optimization");
172 STATISTIC(NumReleasesAfterOpt,
173 "Number of releases after optimization");
178 /// Per-BasicBlock state.
180 /// The number of unique control paths from the entry which can reach this
182 unsigned TopDownPathCount = 0;
184 /// The number of unique control paths to exits from this block.
185 unsigned BottomUpPathCount = 0;
187 /// The top-down traversal uses this to record information known about a
188 /// pointer at the bottom of each block.
189 BlotMapVector<const Value *, TopDownPtrState> PerPtrTopDown;
191 /// The bottom-up traversal uses this to record information known about a
192 /// pointer at the top of each block.
193 BlotMapVector<const Value *, BottomUpPtrState> PerPtrBottomUp;
195 /// Effective predecessors of the current block ignoring ignorable edges and
196 /// ignored backedges.
197 SmallVector<BasicBlock *, 2> Preds;
199 /// Effective successors of the current block ignoring ignorable edges and
200 /// ignored backedges.
201 SmallVector<BasicBlock *, 2> Succs;
204 static const unsigned OverflowOccurredValue;
208 using top_down_ptr_iterator = decltype(PerPtrTopDown)::iterator;
209 using const_top_down_ptr_iterator = decltype(PerPtrTopDown)::const_iterator;
211 top_down_ptr_iterator top_down_ptr_begin() { return PerPtrTopDown.begin(); }
212 top_down_ptr_iterator top_down_ptr_end() { return PerPtrTopDown.end(); }
213 const_top_down_ptr_iterator top_down_ptr_begin() const {
214 return PerPtrTopDown.begin();
216 const_top_down_ptr_iterator top_down_ptr_end() const {
217 return PerPtrTopDown.end();
219 bool hasTopDownPtrs() const {
220 return !PerPtrTopDown.empty();
223 using bottom_up_ptr_iterator = decltype(PerPtrBottomUp)::iterator;
224 using const_bottom_up_ptr_iterator =
225 decltype(PerPtrBottomUp)::const_iterator;
227 bottom_up_ptr_iterator bottom_up_ptr_begin() {
228 return PerPtrBottomUp.begin();
230 bottom_up_ptr_iterator bottom_up_ptr_end() { return PerPtrBottomUp.end(); }
231 const_bottom_up_ptr_iterator bottom_up_ptr_begin() const {
232 return PerPtrBottomUp.begin();
234 const_bottom_up_ptr_iterator bottom_up_ptr_end() const {
235 return PerPtrBottomUp.end();
237 bool hasBottomUpPtrs() const {
238 return !PerPtrBottomUp.empty();
241 /// Mark this block as being an entry block, which has one path from the
242 /// entry by definition.
243 void SetAsEntry() { TopDownPathCount = 1; }
245 /// Mark this block as being an exit block, which has one path to an exit by
247 void SetAsExit() { BottomUpPathCount = 1; }
249 /// Attempt to find the PtrState object describing the top down state for
250 /// pointer Arg. Return a new initialized PtrState describing the top down
251 /// state for Arg if we do not find one.
252 TopDownPtrState &getPtrTopDownState(const Value *Arg) {
253 return PerPtrTopDown[Arg];
256 /// Attempt to find the PtrState object describing the bottom up state for
257 /// pointer Arg. Return a new initialized PtrState describing the bottom up
258 /// state for Arg if we do not find one.
259 BottomUpPtrState &getPtrBottomUpState(const Value *Arg) {
260 return PerPtrBottomUp[Arg];
263 /// Attempt to find the PtrState object describing the bottom up state for
265 bottom_up_ptr_iterator findPtrBottomUpState(const Value *Arg) {
266 return PerPtrBottomUp.find(Arg);
269 void clearBottomUpPointers() {
270 PerPtrBottomUp.clear();
273 void clearTopDownPointers() {
274 PerPtrTopDown.clear();
277 void InitFromPred(const BBState &Other);
278 void InitFromSucc(const BBState &Other);
279 void MergePred(const BBState &Other);
280 void MergeSucc(const BBState &Other);
282 /// Compute the number of possible unique paths from an entry to an exit
283 /// which pass through this block. This is only valid after both the
284 /// top-down and bottom-up traversals are complete.
286 /// Returns true if overflow occurred. Returns false if overflow did not
288 bool GetAllPathCountWithOverflow(unsigned &PathCount) const {
289 if (TopDownPathCount == OverflowOccurredValue ||
290 BottomUpPathCount == OverflowOccurredValue)
292 unsigned long long Product =
293 (unsigned long long)TopDownPathCount*BottomUpPathCount;
294 // Overflow occurred if any of the upper bits of Product are set or if all
295 // the lower bits of Product are all set.
296 return (Product >> 32) ||
297 ((PathCount = Product) == OverflowOccurredValue);
300 // Specialized CFG utilities.
301 using edge_iterator = SmallVectorImpl<BasicBlock *>::const_iterator;
303 edge_iterator pred_begin() const { return Preds.begin(); }
304 edge_iterator pred_end() const { return Preds.end(); }
305 edge_iterator succ_begin() const { return Succs.begin(); }
306 edge_iterator succ_end() const { return Succs.end(); }
308 void addSucc(BasicBlock *Succ) { Succs.push_back(Succ); }
309 void addPred(BasicBlock *Pred) { Preds.push_back(Pred); }
311 bool isExit() const { return Succs.empty(); }
314 } // end anonymous namespace
316 const unsigned BBState::OverflowOccurredValue = 0xffffffff;
320 raw_ostream &operator<<(raw_ostream &OS,
321 BBState &BBState) LLVM_ATTRIBUTE_UNUSED;
323 } // end namespace llvm
325 void BBState::InitFromPred(const BBState &Other) {
326 PerPtrTopDown = Other.PerPtrTopDown;
327 TopDownPathCount = Other.TopDownPathCount;
330 void BBState::InitFromSucc(const BBState &Other) {
331 PerPtrBottomUp = Other.PerPtrBottomUp;
332 BottomUpPathCount = Other.BottomUpPathCount;
335 /// The top-down traversal uses this to merge information about predecessors to
336 /// form the initial state for a new block.
337 void BBState::MergePred(const BBState &Other) {
338 if (TopDownPathCount == OverflowOccurredValue)
341 // Other.TopDownPathCount can be 0, in which case it is either dead or a
342 // loop backedge. Loop backedges are special.
343 TopDownPathCount += Other.TopDownPathCount;
345 // In order to be consistent, we clear the top down pointers when by adding
346 // TopDownPathCount becomes OverflowOccurredValue even though "true" overflow
348 if (TopDownPathCount == OverflowOccurredValue) {
349 clearTopDownPointers();
353 // Check for overflow. If we have overflow, fall back to conservative
355 if (TopDownPathCount < Other.TopDownPathCount) {
356 TopDownPathCount = OverflowOccurredValue;
357 clearTopDownPointers();
361 // For each entry in the other set, if our set has an entry with the same key,
362 // merge the entries. Otherwise, copy the entry and merge it with an empty
364 for (auto MI = Other.top_down_ptr_begin(), ME = Other.top_down_ptr_end();
366 auto Pair = PerPtrTopDown.insert(*MI);
367 Pair.first->second.Merge(Pair.second ? TopDownPtrState() : MI->second,
371 // For each entry in our set, if the other set doesn't have an entry with the
372 // same key, force it to merge with an empty entry.
373 for (auto MI = top_down_ptr_begin(), ME = top_down_ptr_end(); MI != ME; ++MI)
374 if (Other.PerPtrTopDown.find(MI->first) == Other.PerPtrTopDown.end())
375 MI->second.Merge(TopDownPtrState(), /*TopDown=*/true);
378 /// The bottom-up traversal uses this to merge information about successors to
379 /// form the initial state for a new block.
380 void BBState::MergeSucc(const BBState &Other) {
381 if (BottomUpPathCount == OverflowOccurredValue)
384 // Other.BottomUpPathCount can be 0, in which case it is either dead or a
385 // loop backedge. Loop backedges are special.
386 BottomUpPathCount += Other.BottomUpPathCount;
388 // In order to be consistent, we clear the top down pointers when by adding
389 // BottomUpPathCount becomes OverflowOccurredValue even though "true" overflow
391 if (BottomUpPathCount == OverflowOccurredValue) {
392 clearBottomUpPointers();
396 // Check for overflow. If we have overflow, fall back to conservative
398 if (BottomUpPathCount < Other.BottomUpPathCount) {
399 BottomUpPathCount = OverflowOccurredValue;
400 clearBottomUpPointers();
404 // For each entry in the other set, if our set has an entry with the
405 // same key, merge the entries. Otherwise, copy the entry and merge
406 // it with an empty entry.
407 for (auto MI = Other.bottom_up_ptr_begin(), ME = Other.bottom_up_ptr_end();
409 auto Pair = PerPtrBottomUp.insert(*MI);
410 Pair.first->second.Merge(Pair.second ? BottomUpPtrState() : MI->second,
414 // For each entry in our set, if the other set doesn't have an entry
415 // with the same key, force it to merge with an empty entry.
416 for (auto MI = bottom_up_ptr_begin(), ME = bottom_up_ptr_end(); MI != ME;
418 if (Other.PerPtrBottomUp.find(MI->first) == Other.PerPtrBottomUp.end())
419 MI->second.Merge(BottomUpPtrState(), /*TopDown=*/false);
422 raw_ostream &llvm::operator<<(raw_ostream &OS, BBState &BBInfo) {
423 // Dump the pointers we are tracking.
424 OS << " TopDown State:\n";
425 if (!BBInfo.hasTopDownPtrs()) {
426 DEBUG(dbgs() << " NONE!\n");
428 for (auto I = BBInfo.top_down_ptr_begin(), E = BBInfo.top_down_ptr_end();
430 const PtrState &P = I->second;
431 OS << " Ptr: " << *I->first
432 << "\n KnownSafe: " << (P.IsKnownSafe()?"true":"false")
433 << "\n ImpreciseRelease: "
434 << (P.IsTrackingImpreciseReleases()?"true":"false") << "\n"
435 << " HasCFGHazards: "
436 << (P.IsCFGHazardAfflicted()?"true":"false") << "\n"
437 << " KnownPositive: "
438 << (P.HasKnownPositiveRefCount()?"true":"false") << "\n"
440 << P.GetSeq() << "\n";
444 OS << " BottomUp State:\n";
445 if (!BBInfo.hasBottomUpPtrs()) {
446 DEBUG(dbgs() << " NONE!\n");
448 for (auto I = BBInfo.bottom_up_ptr_begin(), E = BBInfo.bottom_up_ptr_end();
450 const PtrState &P = I->second;
451 OS << " Ptr: " << *I->first
452 << "\n KnownSafe: " << (P.IsKnownSafe()?"true":"false")
453 << "\n ImpreciseRelease: "
454 << (P.IsTrackingImpreciseReleases()?"true":"false") << "\n"
455 << " HasCFGHazards: "
456 << (P.IsCFGHazardAfflicted()?"true":"false") << "\n"
457 << " KnownPositive: "
458 << (P.HasKnownPositiveRefCount()?"true":"false") << "\n"
460 << P.GetSeq() << "\n";
469 /// The main ARC optimization pass.
470 class ObjCARCOpt : public FunctionPass {
472 ProvenanceAnalysis PA;
474 /// A cache of references to runtime entry point constants.
475 ARCRuntimeEntryPoints EP;
477 /// A cache of MDKinds that can be passed into other functions to propagate
478 /// MDKind identifiers.
479 ARCMDKindCache MDKindCache;
481 /// A flag indicating whether this optimization pass should run.
484 /// Flags which determine whether each of the interesting runtime functions
485 /// is in fact used in the current function.
486 unsigned UsedInThisFunction;
488 bool OptimizeRetainRVCall(Function &F, Instruction *RetainRV);
489 void OptimizeAutoreleaseRVCall(Function &F, Instruction *AutoreleaseRV,
491 void OptimizeIndividualCalls(Function &F);
493 void CheckForCFGHazards(const BasicBlock *BB,
494 DenseMap<const BasicBlock *, BBState> &BBStates,
495 BBState &MyStates) const;
496 bool VisitInstructionBottomUp(Instruction *Inst, BasicBlock *BB,
497 BlotMapVector<Value *, RRInfo> &Retains,
499 bool VisitBottomUp(BasicBlock *BB,
500 DenseMap<const BasicBlock *, BBState> &BBStates,
501 BlotMapVector<Value *, RRInfo> &Retains);
502 bool VisitInstructionTopDown(Instruction *Inst,
503 DenseMap<Value *, RRInfo> &Releases,
505 bool VisitTopDown(BasicBlock *BB,
506 DenseMap<const BasicBlock *, BBState> &BBStates,
507 DenseMap<Value *, RRInfo> &Releases);
508 bool Visit(Function &F, DenseMap<const BasicBlock *, BBState> &BBStates,
509 BlotMapVector<Value *, RRInfo> &Retains,
510 DenseMap<Value *, RRInfo> &Releases);
512 void MoveCalls(Value *Arg, RRInfo &RetainsToMove, RRInfo &ReleasesToMove,
513 BlotMapVector<Value *, RRInfo> &Retains,
514 DenseMap<Value *, RRInfo> &Releases,
515 SmallVectorImpl<Instruction *> &DeadInsts, Module *M);
518 PairUpRetainsAndReleases(DenseMap<const BasicBlock *, BBState> &BBStates,
519 BlotMapVector<Value *, RRInfo> &Retains,
520 DenseMap<Value *, RRInfo> &Releases, Module *M,
521 Instruction * Retain,
522 SmallVectorImpl<Instruction *> &DeadInsts,
523 RRInfo &RetainsToMove, RRInfo &ReleasesToMove,
524 Value *Arg, bool KnownSafe,
525 bool &AnyPairsCompletelyEliminated);
527 bool PerformCodePlacement(DenseMap<const BasicBlock *, BBState> &BBStates,
528 BlotMapVector<Value *, RRInfo> &Retains,
529 DenseMap<Value *, RRInfo> &Releases, Module *M);
531 void OptimizeWeakCalls(Function &F);
533 bool OptimizeSequences(Function &F);
535 void OptimizeReturns(Function &F);
538 void GatherStatistics(Function &F, bool AfterOptimization = false);
541 void getAnalysisUsage(AnalysisUsage &AU) const override;
542 bool doInitialization(Module &M) override;
543 bool runOnFunction(Function &F) override;
544 void releaseMemory() override;
549 ObjCARCOpt() : FunctionPass(ID) {
550 initializeObjCARCOptPass(*PassRegistry::getPassRegistry());
554 } // end anonymous namespace
556 char ObjCARCOpt::ID = 0;
558 INITIALIZE_PASS_BEGIN(ObjCARCOpt,
559 "objc-arc", "ObjC ARC optimization", false, false)
560 INITIALIZE_PASS_DEPENDENCY(ObjCARCAAWrapperPass)
561 INITIALIZE_PASS_END(ObjCARCOpt,
562 "objc-arc", "ObjC ARC optimization", false, false)
564 Pass *llvm::createObjCARCOptPass() {
565 return new ObjCARCOpt();
568 void ObjCARCOpt::getAnalysisUsage(AnalysisUsage &AU) const {
569 AU.addRequired<ObjCARCAAWrapperPass>();
570 AU.addRequired<AAResultsWrapperPass>();
571 // ARC optimization doesn't currently split critical edges.
572 AU.setPreservesCFG();
575 /// Turn objc_retainAutoreleasedReturnValue into objc_retain if the operand is
576 /// not a return value. Or, if it can be paired with an
577 /// objc_autoreleaseReturnValue, delete the pair and return true.
579 ObjCARCOpt::OptimizeRetainRVCall(Function &F, Instruction *RetainRV) {
580 // Check for the argument being from an immediately preceding call or invoke.
581 const Value *Arg = GetArgRCIdentityRoot(RetainRV);
582 ImmutableCallSite CS(Arg);
583 if (const Instruction *Call = CS.getInstruction()) {
584 if (Call->getParent() == RetainRV->getParent()) {
585 BasicBlock::const_iterator I(Call);
587 while (IsNoopInstruction(&*I))
591 } else if (const InvokeInst *II = dyn_cast<InvokeInst>(Call)) {
592 BasicBlock *RetainRVParent = RetainRV->getParent();
593 if (II->getNormalDest() == RetainRVParent) {
594 BasicBlock::const_iterator I = RetainRVParent->begin();
595 while (IsNoopInstruction(&*I))
603 // Check for being preceded by an objc_autoreleaseReturnValue on the same
604 // pointer. In this case, we can delete the pair.
605 BasicBlock::iterator I = RetainRV->getIterator(),
606 Begin = RetainRV->getParent()->begin();
610 while (I != Begin && IsNoopInstruction(&*I));
611 if (GetBasicARCInstKind(&*I) == ARCInstKind::AutoreleaseRV &&
612 GetArgRCIdentityRoot(&*I) == Arg) {
616 DEBUG(dbgs() << "Erasing autoreleaseRV,retainRV pair: " << *I << "\n"
617 << "Erasing " << *RetainRV << "\n");
619 EraseInstruction(&*I);
620 EraseInstruction(RetainRV);
625 // Turn it to a plain objc_retain.
629 DEBUG(dbgs() << "Transforming objc_retainAutoreleasedReturnValue => "
630 "objc_retain since the operand is not a return value.\n"
631 "Old = " << *RetainRV << "\n");
633 Constant *NewDecl = EP.get(ARCRuntimeEntryPointKind::Retain);
634 cast<CallInst>(RetainRV)->setCalledFunction(NewDecl);
636 DEBUG(dbgs() << "New = " << *RetainRV << "\n");
641 /// Turn objc_autoreleaseReturnValue into objc_autorelease if the result is not
642 /// used as a return value.
643 void ObjCARCOpt::OptimizeAutoreleaseRVCall(Function &F,
644 Instruction *AutoreleaseRV,
645 ARCInstKind &Class) {
646 // Check for a return of the pointer value.
647 const Value *Ptr = GetArgRCIdentityRoot(AutoreleaseRV);
649 // If the argument is ConstantPointerNull or UndefValue, its other users
650 // aren't actually interesting to look at.
651 if (isa<ConstantData>(Ptr))
654 SmallVector<const Value *, 2> Users;
655 Users.push_back(Ptr);
657 // Add PHIs that are equivalent to Ptr to Users.
658 if (const PHINode *PN = dyn_cast<PHINode>(Ptr))
659 getEquivalentPHIs(*PN, Users);
662 Ptr = Users.pop_back_val();
663 for (const User *U : Ptr->users()) {
664 if (isa<ReturnInst>(U) || GetBasicARCInstKind(U) == ARCInstKind::RetainRV)
666 if (isa<BitCastInst>(U))
669 } while (!Users.empty());
674 DEBUG(dbgs() << "Transforming objc_autoreleaseReturnValue => "
675 "objc_autorelease since its operand is not used as a return "
677 "Old = " << *AutoreleaseRV << "\n");
679 CallInst *AutoreleaseRVCI = cast<CallInst>(AutoreleaseRV);
680 Constant *NewDecl = EP.get(ARCRuntimeEntryPointKind::Autorelease);
681 AutoreleaseRVCI->setCalledFunction(NewDecl);
682 AutoreleaseRVCI->setTailCall(false); // Never tail call objc_autorelease.
683 Class = ARCInstKind::Autorelease;
685 DEBUG(dbgs() << "New: " << *AutoreleaseRV << "\n");
690 CloneCallInstForBB(CallInst &CI, BasicBlock &BB,
691 const DenseMap<BasicBlock *, ColorVector> &BlockColors) {
692 SmallVector<OperandBundleDef, 1> OpBundles;
693 for (unsigned I = 0, E = CI.getNumOperandBundles(); I != E; ++I) {
694 auto Bundle = CI.getOperandBundleAt(I);
695 // Funclets will be reassociated in the future.
696 if (Bundle.getTagID() == LLVMContext::OB_funclet)
698 OpBundles.emplace_back(Bundle);
701 if (!BlockColors.empty()) {
702 const ColorVector &CV = BlockColors.find(&BB)->second;
703 assert(CV.size() == 1 && "non-unique color for block!");
704 Instruction *EHPad = CV.front()->getFirstNonPHI();
705 if (EHPad->isEHPad())
706 OpBundles.emplace_back("funclet", EHPad);
709 return CallInst::Create(&CI, OpBundles);
713 /// Visit each call, one at a time, and make simplifications without doing any
714 /// additional analysis.
715 void ObjCARCOpt::OptimizeIndividualCalls(Function &F) {
716 DEBUG(dbgs() << "\n== ObjCARCOpt::OptimizeIndividualCalls ==\n");
717 // Reset all the flags in preparation for recomputing them.
718 UsedInThisFunction = 0;
720 DenseMap<BasicBlock *, ColorVector> BlockColors;
721 if (F.hasPersonalityFn() &&
722 isFuncletEHPersonality(classifyEHPersonality(F.getPersonalityFn())))
723 BlockColors = colorEHFunclets(F);
725 // Visit all objc_* calls in F.
726 for (inst_iterator I = inst_begin(&F), E = inst_end(&F); I != E; ) {
727 Instruction *Inst = &*I++;
729 ARCInstKind Class = GetBasicARCInstKind(Inst);
731 DEBUG(dbgs() << "Visiting: Class: " << Class << "; " << *Inst << "\n");
736 // Delete no-op casts. These function calls have special semantics, but
737 // the semantics are entirely implemented via lowering in the front-end,
738 // so by the time they reach the optimizer, they are just no-op calls
739 // which return their argument.
741 // There are gray areas here, as the ability to cast reference-counted
742 // pointers to raw void* and back allows code to break ARC assumptions,
743 // however these are currently considered to be unimportant.
744 case ARCInstKind::NoopCast:
747 DEBUG(dbgs() << "Erasing no-op cast: " << *Inst << "\n");
748 EraseInstruction(Inst);
751 // If the pointer-to-weak-pointer is null, it's undefined behavior.
752 case ARCInstKind::StoreWeak:
753 case ARCInstKind::LoadWeak:
754 case ARCInstKind::LoadWeakRetained:
755 case ARCInstKind::InitWeak:
756 case ARCInstKind::DestroyWeak: {
757 CallInst *CI = cast<CallInst>(Inst);
758 if (IsNullOrUndef(CI->getArgOperand(0))) {
760 Type *Ty = CI->getArgOperand(0)->getType();
761 new StoreInst(UndefValue::get(cast<PointerType>(Ty)->getElementType()),
762 Constant::getNullValue(Ty),
764 Value *NewValue = UndefValue::get(CI->getType());
765 DEBUG(dbgs() << "A null pointer-to-weak-pointer is undefined behavior."
766 "\nOld = " << *CI << "\nNew = " << *NewValue << "\n");
767 CI->replaceAllUsesWith(NewValue);
768 CI->eraseFromParent();
773 case ARCInstKind::CopyWeak:
774 case ARCInstKind::MoveWeak: {
775 CallInst *CI = cast<CallInst>(Inst);
776 if (IsNullOrUndef(CI->getArgOperand(0)) ||
777 IsNullOrUndef(CI->getArgOperand(1))) {
779 Type *Ty = CI->getArgOperand(0)->getType();
780 new StoreInst(UndefValue::get(cast<PointerType>(Ty)->getElementType()),
781 Constant::getNullValue(Ty),
784 Value *NewValue = UndefValue::get(CI->getType());
785 DEBUG(dbgs() << "A null pointer-to-weak-pointer is undefined behavior."
786 "\nOld = " << *CI << "\nNew = " << *NewValue << "\n");
788 CI->replaceAllUsesWith(NewValue);
789 CI->eraseFromParent();
794 case ARCInstKind::RetainRV:
795 if (OptimizeRetainRVCall(F, Inst))
798 case ARCInstKind::AutoreleaseRV:
799 OptimizeAutoreleaseRVCall(F, Inst, Class);
803 // objc_autorelease(x) -> objc_release(x) if x is otherwise unused.
804 if (IsAutorelease(Class) && Inst->use_empty()) {
805 CallInst *Call = cast<CallInst>(Inst);
806 const Value *Arg = Call->getArgOperand(0);
807 Arg = FindSingleUseIdentifiedObject(Arg);
812 // Create the declaration lazily.
813 LLVMContext &C = Inst->getContext();
815 Constant *Decl = EP.get(ARCRuntimeEntryPointKind::Release);
816 CallInst *NewCall = CallInst::Create(Decl, Call->getArgOperand(0), "",
818 NewCall->setMetadata(MDKindCache.get(ARCMDKindID::ImpreciseRelease),
819 MDNode::get(C, None));
821 DEBUG(dbgs() << "Replacing autorelease{,RV}(x) with objc_release(x) "
822 "since x is otherwise unused.\nOld: " << *Call << "\nNew: "
823 << *NewCall << "\n");
825 EraseInstruction(Call);
827 Class = ARCInstKind::Release;
831 // For functions which can never be passed stack arguments, add
833 if (IsAlwaysTail(Class)) {
835 DEBUG(dbgs() << "Adding tail keyword to function since it can never be "
836 "passed stack args: " << *Inst << "\n");
837 cast<CallInst>(Inst)->setTailCall();
840 // Ensure that functions that can never have a "tail" keyword due to the
841 // semantics of ARC truly do not do so.
842 if (IsNeverTail(Class)) {
844 DEBUG(dbgs() << "Removing tail keyword from function: " << *Inst <<
846 cast<CallInst>(Inst)->setTailCall(false);
849 // Set nounwind as needed.
850 if (IsNoThrow(Class)) {
852 DEBUG(dbgs() << "Found no throw class. Setting nounwind on: " << *Inst
854 cast<CallInst>(Inst)->setDoesNotThrow();
857 if (!IsNoopOnNull(Class)) {
858 UsedInThisFunction |= 1 << unsigned(Class);
862 const Value *Arg = GetArgRCIdentityRoot(Inst);
864 // ARC calls with null are no-ops. Delete them.
865 if (IsNullOrUndef(Arg)) {
868 DEBUG(dbgs() << "ARC calls with null are no-ops. Erasing: " << *Inst
870 EraseInstruction(Inst);
874 // Keep track of which of retain, release, autorelease, and retain_block
875 // are actually present in this function.
876 UsedInThisFunction |= 1 << unsigned(Class);
878 // If Arg is a PHI, and one or more incoming values to the
879 // PHI are null, and the call is control-equivalent to the PHI, and there
880 // are no relevant side effects between the PHI and the call, and the call
881 // is not a release that doesn't have the clang.imprecise_release tag, the
882 // call could be pushed up to just those paths with non-null incoming
883 // values. For now, don't bother splitting critical edges for this.
884 if (Class == ARCInstKind::Release &&
885 !Inst->getMetadata(MDKindCache.get(ARCMDKindID::ImpreciseRelease)))
888 SmallVector<std::pair<Instruction *, const Value *>, 4> Worklist;
889 Worklist.push_back(std::make_pair(Inst, Arg));
891 std::pair<Instruction *, const Value *> Pair = Worklist.pop_back_val();
895 const PHINode *PN = dyn_cast<PHINode>(Arg);
898 // Determine if the PHI has any null operands, or any incoming
900 bool HasNull = false;
901 bool HasCriticalEdges = false;
902 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
904 GetRCIdentityRoot(PN->getIncomingValue(i));
905 if (IsNullOrUndef(Incoming))
907 else if (cast<TerminatorInst>(PN->getIncomingBlock(i)->back())
908 .getNumSuccessors() != 1) {
909 HasCriticalEdges = true;
913 // If we have null operands and no critical edges, optimize.
914 if (!HasCriticalEdges && HasNull) {
915 SmallPtrSet<Instruction *, 4> DependingInstructions;
916 SmallPtrSet<const BasicBlock *, 4> Visited;
918 // Check that there is nothing that cares about the reference
919 // count between the call and the phi.
921 case ARCInstKind::Retain:
922 case ARCInstKind::RetainBlock:
923 // These can always be moved up.
925 case ARCInstKind::Release:
926 // These can't be moved across things that care about the retain
928 FindDependencies(NeedsPositiveRetainCount, Arg,
929 Inst->getParent(), Inst,
930 DependingInstructions, Visited, PA);
932 case ARCInstKind::Autorelease:
933 // These can't be moved across autorelease pool scope boundaries.
934 FindDependencies(AutoreleasePoolBoundary, Arg,
935 Inst->getParent(), Inst,
936 DependingInstructions, Visited, PA);
938 case ARCInstKind::ClaimRV:
939 case ARCInstKind::RetainRV:
940 case ARCInstKind::AutoreleaseRV:
941 // Don't move these; the RV optimization depends on the autoreleaseRV
942 // being tail called, and the retainRV being immediately after a call
943 // (which might still happen if we get lucky with codegen layout, but
944 // it's not worth taking the chance).
947 llvm_unreachable("Invalid dependence flavor");
950 if (DependingInstructions.size() == 1 &&
951 *DependingInstructions.begin() == PN) {
954 // Clone the call into each predecessor that has a non-null value.
955 CallInst *CInst = cast<CallInst>(Inst);
956 Type *ParamTy = CInst->getArgOperand(0)->getType();
957 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
959 GetRCIdentityRoot(PN->getIncomingValue(i));
960 if (!IsNullOrUndef(Incoming)) {
961 Value *Op = PN->getIncomingValue(i);
962 Instruction *InsertPos = &PN->getIncomingBlock(i)->back();
963 CallInst *Clone = cast<CallInst>(CloneCallInstForBB(
964 *CInst, *InsertPos->getParent(), BlockColors));
965 if (Op->getType() != ParamTy)
966 Op = new BitCastInst(Op, ParamTy, "", InsertPos);
967 Clone->setArgOperand(0, Op);
968 Clone->insertBefore(InsertPos);
970 DEBUG(dbgs() << "Cloning "
972 "And inserting clone at " << *InsertPos << "\n");
973 Worklist.push_back(std::make_pair(Clone, Incoming));
976 // Erase the original call.
977 DEBUG(dbgs() << "Erasing: " << *CInst << "\n");
978 EraseInstruction(CInst);
982 } while (!Worklist.empty());
986 /// If we have a top down pointer in the S_Use state, make sure that there are
987 /// no CFG hazards by checking the states of various bottom up pointers.
988 static void CheckForUseCFGHazard(const Sequence SuccSSeq,
989 const bool SuccSRRIKnownSafe,
991 bool &SomeSuccHasSame,
992 bool &AllSuccsHaveSame,
993 bool &NotAllSeqEqualButKnownSafe,
994 bool &ShouldContinue) {
997 if (!S.IsKnownSafe() && !SuccSRRIKnownSafe) {
998 S.ClearSequenceProgress();
1001 S.SetCFGHazardAfflicted(true);
1002 ShouldContinue = true;
1006 SomeSuccHasSame = true;
1010 case S_MovableRelease:
1011 if (!S.IsKnownSafe() && !SuccSRRIKnownSafe)
1012 AllSuccsHaveSame = false;
1014 NotAllSeqEqualButKnownSafe = true;
1017 llvm_unreachable("bottom-up pointer in retain state!");
1019 llvm_unreachable("This should have been handled earlier.");
1023 /// If we have a Top Down pointer in the S_CanRelease state, make sure that
1024 /// there are no CFG hazards by checking the states of various bottom up
1026 static void CheckForCanReleaseCFGHazard(const Sequence SuccSSeq,
1027 const bool SuccSRRIKnownSafe,
1029 bool &SomeSuccHasSame,
1030 bool &AllSuccsHaveSame,
1031 bool &NotAllSeqEqualButKnownSafe) {
1034 SomeSuccHasSame = true;
1038 case S_MovableRelease:
1040 if (!S.IsKnownSafe() && !SuccSRRIKnownSafe)
1041 AllSuccsHaveSame = false;
1043 NotAllSeqEqualButKnownSafe = true;
1046 llvm_unreachable("bottom-up pointer in retain state!");
1048 llvm_unreachable("This should have been handled earlier.");
1052 /// Check for critical edges, loop boundaries, irreducible control flow, or
1053 /// other CFG structures where moving code across the edge would result in it
1054 /// being executed more.
1056 ObjCARCOpt::CheckForCFGHazards(const BasicBlock *BB,
1057 DenseMap<const BasicBlock *, BBState> &BBStates,
1058 BBState &MyStates) const {
1059 // If any top-down local-use or possible-dec has a succ which is earlier in
1060 // the sequence, forget it.
1061 for (auto I = MyStates.top_down_ptr_begin(), E = MyStates.top_down_ptr_end();
1063 TopDownPtrState &S = I->second;
1064 const Sequence Seq = I->second.GetSeq();
1066 // We only care about S_Retain, S_CanRelease, and S_Use.
1070 // Make sure that if extra top down states are added in the future that this
1071 // code is updated to handle it.
1072 assert((Seq == S_Retain || Seq == S_CanRelease || Seq == S_Use) &&
1073 "Unknown top down sequence state.");
1075 const Value *Arg = I->first;
1076 const TerminatorInst *TI = cast<TerminatorInst>(&BB->back());
1077 bool SomeSuccHasSame = false;
1078 bool AllSuccsHaveSame = true;
1079 bool NotAllSeqEqualButKnownSafe = false;
1081 succ_const_iterator SI(TI), SE(TI, false);
1083 for (; SI != SE; ++SI) {
1084 // If VisitBottomUp has pointer information for this successor, take
1085 // what we know about it.
1086 const DenseMap<const BasicBlock *, BBState>::iterator BBI =
1088 assert(BBI != BBStates.end());
1089 const BottomUpPtrState &SuccS = BBI->second.getPtrBottomUpState(Arg);
1090 const Sequence SuccSSeq = SuccS.GetSeq();
1092 // If bottom up, the pointer is in an S_None state, clear the sequence
1093 // progress since the sequence in the bottom up state finished
1094 // suggesting a mismatch in between retains/releases. This is true for
1095 // all three cases that we are handling here: S_Retain, S_Use, and
1097 if (SuccSSeq == S_None) {
1098 S.ClearSequenceProgress();
1102 // If we have S_Use or S_CanRelease, perform our check for cfg hazard
1104 const bool SuccSRRIKnownSafe = SuccS.IsKnownSafe();
1106 // *NOTE* We do not use Seq from above here since we are allowing for
1107 // S.GetSeq() to change while we are visiting basic blocks.
1108 switch(S.GetSeq()) {
1110 bool ShouldContinue = false;
1111 CheckForUseCFGHazard(SuccSSeq, SuccSRRIKnownSafe, S, SomeSuccHasSame,
1112 AllSuccsHaveSame, NotAllSeqEqualButKnownSafe,
1119 CheckForCanReleaseCFGHazard(SuccSSeq, SuccSRRIKnownSafe, S,
1120 SomeSuccHasSame, AllSuccsHaveSame,
1121 NotAllSeqEqualButKnownSafe);
1127 case S_MovableRelease:
1132 // If the state at the other end of any of the successor edges
1133 // matches the current state, require all edges to match. This
1134 // guards against loops in the middle of a sequence.
1135 if (SomeSuccHasSame && !AllSuccsHaveSame) {
1136 S.ClearSequenceProgress();
1137 } else if (NotAllSeqEqualButKnownSafe) {
1138 // If we would have cleared the state foregoing the fact that we are known
1139 // safe, stop code motion. This is because whether or not it is safe to
1140 // remove RR pairs via KnownSafe is an orthogonal concept to whether we
1141 // are allowed to perform code motion.
1142 S.SetCFGHazardAfflicted(true);
1147 bool ObjCARCOpt::VisitInstructionBottomUp(
1148 Instruction *Inst, BasicBlock *BB, BlotMapVector<Value *, RRInfo> &Retains,
1149 BBState &MyStates) {
1150 bool NestingDetected = false;
1151 ARCInstKind Class = GetARCInstKind(Inst);
1152 const Value *Arg = nullptr;
1154 DEBUG(dbgs() << " Class: " << Class << "\n");
1157 case ARCInstKind::Release: {
1158 Arg = GetArgRCIdentityRoot(Inst);
1160 BottomUpPtrState &S = MyStates.getPtrBottomUpState(Arg);
1161 NestingDetected |= S.InitBottomUp(MDKindCache, Inst);
1164 case ARCInstKind::RetainBlock:
1165 // In OptimizeIndividualCalls, we have strength reduced all optimizable
1166 // objc_retainBlocks to objc_retains. Thus at this point any
1167 // objc_retainBlocks that we see are not optimizable.
1169 case ARCInstKind::Retain:
1170 case ARCInstKind::RetainRV: {
1171 Arg = GetArgRCIdentityRoot(Inst);
1172 BottomUpPtrState &S = MyStates.getPtrBottomUpState(Arg);
1173 if (S.MatchWithRetain()) {
1174 // Don't do retain+release tracking for ARCInstKind::RetainRV, because
1175 // it's better to let it remain as the first instruction after a call.
1176 if (Class != ARCInstKind::RetainRV) {
1177 DEBUG(dbgs() << " Matching with: " << *Inst << "\n");
1178 Retains[Inst] = S.GetRRInfo();
1180 S.ClearSequenceProgress();
1182 // A retain moving bottom up can be a use.
1185 case ARCInstKind::AutoreleasepoolPop:
1186 // Conservatively, clear MyStates for all known pointers.
1187 MyStates.clearBottomUpPointers();
1188 return NestingDetected;
1189 case ARCInstKind::AutoreleasepoolPush:
1190 case ARCInstKind::None:
1191 // These are irrelevant.
1192 return NestingDetected;
1197 // Consider any other possible effects of this instruction on each
1198 // pointer being tracked.
1199 for (auto MI = MyStates.bottom_up_ptr_begin(),
1200 ME = MyStates.bottom_up_ptr_end();
1202 const Value *Ptr = MI->first;
1204 continue; // Handled above.
1205 BottomUpPtrState &S = MI->second;
1207 if (S.HandlePotentialAlterRefCount(Inst, Ptr, PA, Class))
1210 S.HandlePotentialUse(BB, Inst, Ptr, PA, Class);
1213 return NestingDetected;
1216 bool ObjCARCOpt::VisitBottomUp(BasicBlock *BB,
1217 DenseMap<const BasicBlock *, BBState> &BBStates,
1218 BlotMapVector<Value *, RRInfo> &Retains) {
1219 DEBUG(dbgs() << "\n== ObjCARCOpt::VisitBottomUp ==\n");
1221 bool NestingDetected = false;
1222 BBState &MyStates = BBStates[BB];
1224 // Merge the states from each successor to compute the initial state
1225 // for the current block.
1226 BBState::edge_iterator SI(MyStates.succ_begin()),
1227 SE(MyStates.succ_end());
1229 const BasicBlock *Succ = *SI;
1230 DenseMap<const BasicBlock *, BBState>::iterator I = BBStates.find(Succ);
1231 assert(I != BBStates.end());
1232 MyStates.InitFromSucc(I->second);
1234 for (; SI != SE; ++SI) {
1236 I = BBStates.find(Succ);
1237 assert(I != BBStates.end());
1238 MyStates.MergeSucc(I->second);
1242 DEBUG(dbgs() << "Before:\n" << BBStates[BB] << "\n"
1243 << "Performing Dataflow:\n");
1245 // Visit all the instructions, bottom-up.
1246 for (BasicBlock::iterator I = BB->end(), E = BB->begin(); I != E; --I) {
1247 Instruction *Inst = &*std::prev(I);
1249 // Invoke instructions are visited as part of their successors (below).
1250 if (isa<InvokeInst>(Inst))
1253 DEBUG(dbgs() << " Visiting " << *Inst << "\n");
1255 NestingDetected |= VisitInstructionBottomUp(Inst, BB, Retains, MyStates);
1258 // If there's a predecessor with an invoke, visit the invoke as if it were
1259 // part of this block, since we can't insert code after an invoke in its own
1260 // block, and we don't want to split critical edges.
1261 for (BBState::edge_iterator PI(MyStates.pred_begin()),
1262 PE(MyStates.pred_end()); PI != PE; ++PI) {
1263 BasicBlock *Pred = *PI;
1264 if (InvokeInst *II = dyn_cast<InvokeInst>(&Pred->back()))
1265 NestingDetected |= VisitInstructionBottomUp(II, BB, Retains, MyStates);
1268 DEBUG(dbgs() << "\nFinal State:\n" << BBStates[BB] << "\n");
1270 return NestingDetected;
1274 ObjCARCOpt::VisitInstructionTopDown(Instruction *Inst,
1275 DenseMap<Value *, RRInfo> &Releases,
1276 BBState &MyStates) {
1277 bool NestingDetected = false;
1278 ARCInstKind Class = GetARCInstKind(Inst);
1279 const Value *Arg = nullptr;
1281 DEBUG(dbgs() << " Class: " << Class << "\n");
1284 case ARCInstKind::RetainBlock:
1285 // In OptimizeIndividualCalls, we have strength reduced all optimizable
1286 // objc_retainBlocks to objc_retains. Thus at this point any
1287 // objc_retainBlocks that we see are not optimizable. We need to break since
1288 // a retain can be a potential use.
1290 case ARCInstKind::Retain:
1291 case ARCInstKind::RetainRV: {
1292 Arg = GetArgRCIdentityRoot(Inst);
1293 TopDownPtrState &S = MyStates.getPtrTopDownState(Arg);
1294 NestingDetected |= S.InitTopDown(Class, Inst);
1295 // A retain can be a potential use; proceed to the generic checking
1299 case ARCInstKind::Release: {
1300 Arg = GetArgRCIdentityRoot(Inst);
1301 TopDownPtrState &S = MyStates.getPtrTopDownState(Arg);
1302 // Try to form a tentative pair in between this release instruction and the
1303 // top down pointers that we are tracking.
1304 if (S.MatchWithRelease(MDKindCache, Inst)) {
1305 // If we succeed, copy S's RRInfo into the Release -> {Retain Set
1306 // Map}. Then we clear S.
1307 DEBUG(dbgs() << " Matching with: " << *Inst << "\n");
1308 Releases[Inst] = S.GetRRInfo();
1309 S.ClearSequenceProgress();
1313 case ARCInstKind::AutoreleasepoolPop:
1314 // Conservatively, clear MyStates for all known pointers.
1315 MyStates.clearTopDownPointers();
1317 case ARCInstKind::AutoreleasepoolPush:
1318 case ARCInstKind::None:
1319 // These can not be uses of
1325 // Consider any other possible effects of this instruction on each
1326 // pointer being tracked.
1327 for (auto MI = MyStates.top_down_ptr_begin(),
1328 ME = MyStates.top_down_ptr_end();
1330 const Value *Ptr = MI->first;
1332 continue; // Handled above.
1333 TopDownPtrState &S = MI->second;
1334 if (S.HandlePotentialAlterRefCount(Inst, Ptr, PA, Class))
1337 S.HandlePotentialUse(Inst, Ptr, PA, Class);
1340 return NestingDetected;
1344 ObjCARCOpt::VisitTopDown(BasicBlock *BB,
1345 DenseMap<const BasicBlock *, BBState> &BBStates,
1346 DenseMap<Value *, RRInfo> &Releases) {
1347 DEBUG(dbgs() << "\n== ObjCARCOpt::VisitTopDown ==\n");
1348 bool NestingDetected = false;
1349 BBState &MyStates = BBStates[BB];
1351 // Merge the states from each predecessor to compute the initial state
1352 // for the current block.
1353 BBState::edge_iterator PI(MyStates.pred_begin()),
1354 PE(MyStates.pred_end());
1356 const BasicBlock *Pred = *PI;
1357 DenseMap<const BasicBlock *, BBState>::iterator I = BBStates.find(Pred);
1358 assert(I != BBStates.end());
1359 MyStates.InitFromPred(I->second);
1361 for (; PI != PE; ++PI) {
1363 I = BBStates.find(Pred);
1364 assert(I != BBStates.end());
1365 MyStates.MergePred(I->second);
1369 DEBUG(dbgs() << "Before:\n" << BBStates[BB] << "\n"
1370 << "Performing Dataflow:\n");
1372 // Visit all the instructions, top-down.
1373 for (Instruction &Inst : *BB) {
1374 DEBUG(dbgs() << " Visiting " << Inst << "\n");
1376 NestingDetected |= VisitInstructionTopDown(&Inst, Releases, MyStates);
1379 DEBUG(dbgs() << "\nState Before Checking for CFG Hazards:\n"
1380 << BBStates[BB] << "\n\n");
1381 CheckForCFGHazards(BB, BBStates, MyStates);
1382 DEBUG(dbgs() << "Final State:\n" << BBStates[BB] << "\n");
1383 return NestingDetected;
1387 ComputePostOrders(Function &F,
1388 SmallVectorImpl<BasicBlock *> &PostOrder,
1389 SmallVectorImpl<BasicBlock *> &ReverseCFGPostOrder,
1390 unsigned NoObjCARCExceptionsMDKind,
1391 DenseMap<const BasicBlock *, BBState> &BBStates) {
1392 /// The visited set, for doing DFS walks.
1393 SmallPtrSet<BasicBlock *, 16> Visited;
1395 // Do DFS, computing the PostOrder.
1396 SmallPtrSet<BasicBlock *, 16> OnStack;
1397 SmallVector<std::pair<BasicBlock *, succ_iterator>, 16> SuccStack;
1399 // Functions always have exactly one entry block, and we don't have
1400 // any other block that we treat like an entry block.
1401 BasicBlock *EntryBB = &F.getEntryBlock();
1402 BBState &MyStates = BBStates[EntryBB];
1403 MyStates.SetAsEntry();
1404 TerminatorInst *EntryTI = cast<TerminatorInst>(&EntryBB->back());
1405 SuccStack.push_back(std::make_pair(EntryBB, succ_iterator(EntryTI)));
1406 Visited.insert(EntryBB);
1407 OnStack.insert(EntryBB);
1410 BasicBlock *CurrBB = SuccStack.back().first;
1411 TerminatorInst *TI = cast<TerminatorInst>(&CurrBB->back());
1412 succ_iterator SE(TI, false);
1414 while (SuccStack.back().second != SE) {
1415 BasicBlock *SuccBB = *SuccStack.back().second++;
1416 if (Visited.insert(SuccBB).second) {
1417 TerminatorInst *TI = cast<TerminatorInst>(&SuccBB->back());
1418 SuccStack.push_back(std::make_pair(SuccBB, succ_iterator(TI)));
1419 BBStates[CurrBB].addSucc(SuccBB);
1420 BBState &SuccStates = BBStates[SuccBB];
1421 SuccStates.addPred(CurrBB);
1422 OnStack.insert(SuccBB);
1426 if (!OnStack.count(SuccBB)) {
1427 BBStates[CurrBB].addSucc(SuccBB);
1428 BBStates[SuccBB].addPred(CurrBB);
1431 OnStack.erase(CurrBB);
1432 PostOrder.push_back(CurrBB);
1433 SuccStack.pop_back();
1434 } while (!SuccStack.empty());
1438 // Do reverse-CFG DFS, computing the reverse-CFG PostOrder.
1439 // Functions may have many exits, and there also blocks which we treat
1440 // as exits due to ignored edges.
1441 SmallVector<std::pair<BasicBlock *, BBState::edge_iterator>, 16> PredStack;
1442 for (BasicBlock &ExitBB : F) {
1443 BBState &MyStates = BBStates[&ExitBB];
1444 if (!MyStates.isExit())
1447 MyStates.SetAsExit();
1449 PredStack.push_back(std::make_pair(&ExitBB, MyStates.pred_begin()));
1450 Visited.insert(&ExitBB);
1451 while (!PredStack.empty()) {
1452 reverse_dfs_next_succ:
1453 BBState::edge_iterator PE = BBStates[PredStack.back().first].pred_end();
1454 while (PredStack.back().second != PE) {
1455 BasicBlock *BB = *PredStack.back().second++;
1456 if (Visited.insert(BB).second) {
1457 PredStack.push_back(std::make_pair(BB, BBStates[BB].pred_begin()));
1458 goto reverse_dfs_next_succ;
1461 ReverseCFGPostOrder.push_back(PredStack.pop_back_val().first);
1466 // Visit the function both top-down and bottom-up.
1467 bool ObjCARCOpt::Visit(Function &F,
1468 DenseMap<const BasicBlock *, BBState> &BBStates,
1469 BlotMapVector<Value *, RRInfo> &Retains,
1470 DenseMap<Value *, RRInfo> &Releases) {
1471 // Use reverse-postorder traversals, because we magically know that loops
1472 // will be well behaved, i.e. they won't repeatedly call retain on a single
1473 // pointer without doing a release. We can't use the ReversePostOrderTraversal
1474 // class here because we want the reverse-CFG postorder to consider each
1475 // function exit point, and we want to ignore selected cycle edges.
1476 SmallVector<BasicBlock *, 16> PostOrder;
1477 SmallVector<BasicBlock *, 16> ReverseCFGPostOrder;
1478 ComputePostOrders(F, PostOrder, ReverseCFGPostOrder,
1479 MDKindCache.get(ARCMDKindID::NoObjCARCExceptions),
1482 // Use reverse-postorder on the reverse CFG for bottom-up.
1483 bool BottomUpNestingDetected = false;
1484 for (BasicBlock *BB : llvm::reverse(ReverseCFGPostOrder))
1485 BottomUpNestingDetected |= VisitBottomUp(BB, BBStates, Retains);
1487 // Use reverse-postorder for top-down.
1488 bool TopDownNestingDetected = false;
1489 for (BasicBlock *BB : llvm::reverse(PostOrder))
1490 TopDownNestingDetected |= VisitTopDown(BB, BBStates, Releases);
1492 return TopDownNestingDetected && BottomUpNestingDetected;
1495 /// Move the calls in RetainsToMove and ReleasesToMove.
1496 void ObjCARCOpt::MoveCalls(Value *Arg, RRInfo &RetainsToMove,
1497 RRInfo &ReleasesToMove,
1498 BlotMapVector<Value *, RRInfo> &Retains,
1499 DenseMap<Value *, RRInfo> &Releases,
1500 SmallVectorImpl<Instruction *> &DeadInsts,
1502 Type *ArgTy = Arg->getType();
1503 Type *ParamTy = PointerType::getUnqual(Type::getInt8Ty(ArgTy->getContext()));
1505 DEBUG(dbgs() << "== ObjCARCOpt::MoveCalls ==\n");
1507 // Insert the new retain and release calls.
1508 for (Instruction *InsertPt : ReleasesToMove.ReverseInsertPts) {
1509 Value *MyArg = ArgTy == ParamTy ? Arg :
1510 new BitCastInst(Arg, ParamTy, "", InsertPt);
1511 Constant *Decl = EP.get(ARCRuntimeEntryPointKind::Retain);
1512 CallInst *Call = CallInst::Create(Decl, MyArg, "", InsertPt);
1513 Call->setDoesNotThrow();
1514 Call->setTailCall();
1516 DEBUG(dbgs() << "Inserting new Retain: " << *Call << "\n"
1517 "At insertion point: " << *InsertPt << "\n");
1519 for (Instruction *InsertPt : RetainsToMove.ReverseInsertPts) {
1520 Value *MyArg = ArgTy == ParamTy ? Arg :
1521 new BitCastInst(Arg, ParamTy, "", InsertPt);
1522 Constant *Decl = EP.get(ARCRuntimeEntryPointKind::Release);
1523 CallInst *Call = CallInst::Create(Decl, MyArg, "", InsertPt);
1524 // Attach a clang.imprecise_release metadata tag, if appropriate.
1525 if (MDNode *M = ReleasesToMove.ReleaseMetadata)
1526 Call->setMetadata(MDKindCache.get(ARCMDKindID::ImpreciseRelease), M);
1527 Call->setDoesNotThrow();
1528 if (ReleasesToMove.IsTailCallRelease)
1529 Call->setTailCall();
1531 DEBUG(dbgs() << "Inserting new Release: " << *Call << "\n"
1532 "At insertion point: " << *InsertPt << "\n");
1535 // Delete the original retain and release calls.
1536 for (Instruction *OrigRetain : RetainsToMove.Calls) {
1537 Retains.blot(OrigRetain);
1538 DeadInsts.push_back(OrigRetain);
1539 DEBUG(dbgs() << "Deleting retain: " << *OrigRetain << "\n");
1541 for (Instruction *OrigRelease : ReleasesToMove.Calls) {
1542 Releases.erase(OrigRelease);
1543 DeadInsts.push_back(OrigRelease);
1544 DEBUG(dbgs() << "Deleting release: " << *OrigRelease << "\n");
1548 bool ObjCARCOpt::PairUpRetainsAndReleases(
1549 DenseMap<const BasicBlock *, BBState> &BBStates,
1550 BlotMapVector<Value *, RRInfo> &Retains,
1551 DenseMap<Value *, RRInfo> &Releases, Module *M,
1552 Instruction *Retain,
1553 SmallVectorImpl<Instruction *> &DeadInsts, RRInfo &RetainsToMove,
1554 RRInfo &ReleasesToMove, Value *Arg, bool KnownSafe,
1555 bool &AnyPairsCompletelyEliminated) {
1556 // If a pair happens in a region where it is known that the reference count
1557 // is already incremented, we can similarly ignore possible decrements unless
1558 // we are dealing with a retainable object with multiple provenance sources.
1559 bool KnownSafeTD = true, KnownSafeBU = true;
1560 bool CFGHazardAfflicted = false;
1562 // Connect the dots between the top-down-collected RetainsToMove and
1563 // bottom-up-collected ReleasesToMove to form sets of related calls.
1564 // This is an iterative process so that we connect multiple releases
1565 // to multiple retains if needed.
1566 unsigned OldDelta = 0;
1567 unsigned NewDelta = 0;
1568 unsigned OldCount = 0;
1569 unsigned NewCount = 0;
1570 bool FirstRelease = true;
1571 for (SmallVector<Instruction *, 4> NewRetains{Retain};;) {
1572 SmallVector<Instruction *, 4> NewReleases;
1573 for (Instruction *NewRetain : NewRetains) {
1574 auto It = Retains.find(NewRetain);
1575 assert(It != Retains.end());
1576 const RRInfo &NewRetainRRI = It->second;
1577 KnownSafeTD &= NewRetainRRI.KnownSafe;
1578 for (Instruction *NewRetainRelease : NewRetainRRI.Calls) {
1579 auto Jt = Releases.find(NewRetainRelease);
1580 if (Jt == Releases.end())
1582 const RRInfo &NewRetainReleaseRRI = Jt->second;
1584 // If the release does not have a reference to the retain as well,
1585 // something happened which is unaccounted for. Do not do anything.
1587 // This can happen if we catch an additive overflow during path count
1589 if (!NewRetainReleaseRRI.Calls.count(NewRetain))
1592 if (ReleasesToMove.Calls.insert(NewRetainRelease).second) {
1593 // If we overflow when we compute the path count, don't remove/move
1595 const BBState &NRRBBState = BBStates[NewRetainRelease->getParent()];
1596 unsigned PathCount = BBState::OverflowOccurredValue;
1597 if (NRRBBState.GetAllPathCountWithOverflow(PathCount))
1599 assert(PathCount != BBState::OverflowOccurredValue &&
1600 "PathCount at this point can not be "
1601 "OverflowOccurredValue.");
1602 OldDelta -= PathCount;
1604 // Merge the ReleaseMetadata and IsTailCallRelease values.
1606 ReleasesToMove.ReleaseMetadata =
1607 NewRetainReleaseRRI.ReleaseMetadata;
1608 ReleasesToMove.IsTailCallRelease =
1609 NewRetainReleaseRRI.IsTailCallRelease;
1610 FirstRelease = false;
1612 if (ReleasesToMove.ReleaseMetadata !=
1613 NewRetainReleaseRRI.ReleaseMetadata)
1614 ReleasesToMove.ReleaseMetadata = nullptr;
1615 if (ReleasesToMove.IsTailCallRelease !=
1616 NewRetainReleaseRRI.IsTailCallRelease)
1617 ReleasesToMove.IsTailCallRelease = false;
1620 // Collect the optimal insertion points.
1622 for (Instruction *RIP : NewRetainReleaseRRI.ReverseInsertPts) {
1623 if (ReleasesToMove.ReverseInsertPts.insert(RIP).second) {
1624 // If we overflow when we compute the path count, don't
1625 // remove/move anything.
1626 const BBState &RIPBBState = BBStates[RIP->getParent()];
1627 PathCount = BBState::OverflowOccurredValue;
1628 if (RIPBBState.GetAllPathCountWithOverflow(PathCount))
1630 assert(PathCount != BBState::OverflowOccurredValue &&
1631 "PathCount at this point can not be "
1632 "OverflowOccurredValue.");
1633 NewDelta -= PathCount;
1636 NewReleases.push_back(NewRetainRelease);
1641 if (NewReleases.empty()) break;
1643 // Back the other way.
1644 for (Instruction *NewRelease : NewReleases) {
1645 auto It = Releases.find(NewRelease);
1646 assert(It != Releases.end());
1647 const RRInfo &NewReleaseRRI = It->second;
1648 KnownSafeBU &= NewReleaseRRI.KnownSafe;
1649 CFGHazardAfflicted |= NewReleaseRRI.CFGHazardAfflicted;
1650 for (Instruction *NewReleaseRetain : NewReleaseRRI.Calls) {
1651 auto Jt = Retains.find(NewReleaseRetain);
1652 if (Jt == Retains.end())
1654 const RRInfo &NewReleaseRetainRRI = Jt->second;
1656 // If the retain does not have a reference to the release as well,
1657 // something happened which is unaccounted for. Do not do anything.
1659 // This can happen if we catch an additive overflow during path count
1661 if (!NewReleaseRetainRRI.Calls.count(NewRelease))
1664 if (RetainsToMove.Calls.insert(NewReleaseRetain).second) {
1665 // If we overflow when we compute the path count, don't remove/move
1667 const BBState &NRRBBState = BBStates[NewReleaseRetain->getParent()];
1668 unsigned PathCount = BBState::OverflowOccurredValue;
1669 if (NRRBBState.GetAllPathCountWithOverflow(PathCount))
1671 assert(PathCount != BBState::OverflowOccurredValue &&
1672 "PathCount at this point can not be "
1673 "OverflowOccurredValue.");
1674 OldDelta += PathCount;
1675 OldCount += PathCount;
1677 // Collect the optimal insertion points.
1679 for (Instruction *RIP : NewReleaseRetainRRI.ReverseInsertPts) {
1680 if (RetainsToMove.ReverseInsertPts.insert(RIP).second) {
1681 // If we overflow when we compute the path count, don't
1682 // remove/move anything.
1683 const BBState &RIPBBState = BBStates[RIP->getParent()];
1685 PathCount = BBState::OverflowOccurredValue;
1686 if (RIPBBState.GetAllPathCountWithOverflow(PathCount))
1688 assert(PathCount != BBState::OverflowOccurredValue &&
1689 "PathCount at this point can not be "
1690 "OverflowOccurredValue.");
1691 NewDelta += PathCount;
1692 NewCount += PathCount;
1695 NewRetains.push_back(NewReleaseRetain);
1699 if (NewRetains.empty()) break;
1702 // We can only remove pointers if we are known safe in both directions.
1703 bool UnconditionallySafe = KnownSafeTD && KnownSafeBU;
1704 if (UnconditionallySafe) {
1705 RetainsToMove.ReverseInsertPts.clear();
1706 ReleasesToMove.ReverseInsertPts.clear();
1709 // Determine whether the new insertion points we computed preserve the
1710 // balance of retain and release calls through the program.
1711 // TODO: If the fully aggressive solution isn't valid, try to find a
1712 // less aggressive solution which is.
1716 // At this point, we are not going to remove any RR pairs, but we still are
1717 // able to move RR pairs. If one of our pointers is afflicted with
1718 // CFGHazards, we cannot perform such code motion so exit early.
1719 const bool WillPerformCodeMotion =
1720 !RetainsToMove.ReverseInsertPts.empty() ||
1721 !ReleasesToMove.ReverseInsertPts.empty();
1722 if (CFGHazardAfflicted && WillPerformCodeMotion)
1726 // Determine whether the original call points are balanced in the retain and
1727 // release calls through the program. If not, conservatively don't touch
1729 // TODO: It's theoretically possible to do code motion in this case, as
1730 // long as the existing imbalances are maintained.
1735 assert(OldCount != 0 && "Unreachable code?");
1736 NumRRs += OldCount - NewCount;
1737 // Set to true if we completely removed any RR pairs.
1738 AnyPairsCompletelyEliminated = NewCount == 0;
1740 // We can move calls!
1744 /// Identify pairings between the retains and releases, and delete and/or move
1746 bool ObjCARCOpt::PerformCodePlacement(
1747 DenseMap<const BasicBlock *, BBState> &BBStates,
1748 BlotMapVector<Value *, RRInfo> &Retains,
1749 DenseMap<Value *, RRInfo> &Releases, Module *M) {
1750 DEBUG(dbgs() << "\n== ObjCARCOpt::PerformCodePlacement ==\n");
1752 bool AnyPairsCompletelyEliminated = false;
1753 SmallVector<Instruction *, 8> DeadInsts;
1755 // Visit each retain.
1756 for (BlotMapVector<Value *, RRInfo>::const_iterator I = Retains.begin(),
1759 Value *V = I->first;
1760 if (!V) continue; // blotted
1762 Instruction *Retain = cast<Instruction>(V);
1764 DEBUG(dbgs() << "Visiting: " << *Retain << "\n");
1766 Value *Arg = GetArgRCIdentityRoot(Retain);
1768 // If the object being released is in static or stack storage, we know it's
1769 // not being managed by ObjC reference counting, so we can delete pairs
1770 // regardless of what possible decrements or uses lie between them.
1771 bool KnownSafe = isa<Constant>(Arg) || isa<AllocaInst>(Arg);
1773 // A constant pointer can't be pointing to an object on the heap. It may
1774 // be reference-counted, but it won't be deleted.
1775 if (const LoadInst *LI = dyn_cast<LoadInst>(Arg))
1776 if (const GlobalVariable *GV =
1777 dyn_cast<GlobalVariable>(
1778 GetRCIdentityRoot(LI->getPointerOperand())))
1779 if (GV->isConstant())
1782 // Connect the dots between the top-down-collected RetainsToMove and
1783 // bottom-up-collected ReleasesToMove to form sets of related calls.
1784 RRInfo RetainsToMove, ReleasesToMove;
1786 bool PerformMoveCalls = PairUpRetainsAndReleases(
1787 BBStates, Retains, Releases, M, Retain, DeadInsts,
1788 RetainsToMove, ReleasesToMove, Arg, KnownSafe,
1789 AnyPairsCompletelyEliminated);
1791 if (PerformMoveCalls) {
1792 // Ok, everything checks out and we're all set. Let's move/delete some
1794 MoveCalls(Arg, RetainsToMove, ReleasesToMove,
1795 Retains, Releases, DeadInsts, M);
1799 // Now that we're done moving everything, we can delete the newly dead
1800 // instructions, as we no longer need them as insert points.
1801 while (!DeadInsts.empty())
1802 EraseInstruction(DeadInsts.pop_back_val());
1804 return AnyPairsCompletelyEliminated;
1807 /// Weak pointer optimizations.
1808 void ObjCARCOpt::OptimizeWeakCalls(Function &F) {
1809 DEBUG(dbgs() << "\n== ObjCARCOpt::OptimizeWeakCalls ==\n");
1811 // First, do memdep-style RLE and S2L optimizations. We can't use memdep
1812 // itself because it uses AliasAnalysis and we need to do provenance
1814 for (inst_iterator I = inst_begin(&F), E = inst_end(&F); I != E; ) {
1815 Instruction *Inst = &*I++;
1817 DEBUG(dbgs() << "Visiting: " << *Inst << "\n");
1819 ARCInstKind Class = GetBasicARCInstKind(Inst);
1820 if (Class != ARCInstKind::LoadWeak &&
1821 Class != ARCInstKind::LoadWeakRetained)
1824 // Delete objc_loadWeak calls with no users.
1825 if (Class == ARCInstKind::LoadWeak && Inst->use_empty()) {
1826 Inst->eraseFromParent();
1830 // TODO: For now, just look for an earlier available version of this value
1831 // within the same block. Theoretically, we could do memdep-style non-local
1832 // analysis too, but that would want caching. A better approach would be to
1833 // use the technique that EarlyCSE uses.
1834 inst_iterator Current = std::prev(I);
1835 BasicBlock *CurrentBB = &*Current.getBasicBlockIterator();
1836 for (BasicBlock::iterator B = CurrentBB->begin(),
1837 J = Current.getInstructionIterator();
1839 Instruction *EarlierInst = &*std::prev(J);
1840 ARCInstKind EarlierClass = GetARCInstKind(EarlierInst);
1841 switch (EarlierClass) {
1842 case ARCInstKind::LoadWeak:
1843 case ARCInstKind::LoadWeakRetained: {
1844 // If this is loading from the same pointer, replace this load's value
1846 CallInst *Call = cast<CallInst>(Inst);
1847 CallInst *EarlierCall = cast<CallInst>(EarlierInst);
1848 Value *Arg = Call->getArgOperand(0);
1849 Value *EarlierArg = EarlierCall->getArgOperand(0);
1850 switch (PA.getAA()->alias(Arg, EarlierArg)) {
1853 // If the load has a builtin retain, insert a plain retain for it.
1854 if (Class == ARCInstKind::LoadWeakRetained) {
1855 Constant *Decl = EP.get(ARCRuntimeEntryPointKind::Retain);
1856 CallInst *CI = CallInst::Create(Decl, EarlierCall, "", Call);
1859 // Zap the fully redundant load.
1860 Call->replaceAllUsesWith(EarlierCall);
1861 Call->eraseFromParent();
1871 case ARCInstKind::StoreWeak:
1872 case ARCInstKind::InitWeak: {
1873 // If this is storing to the same pointer and has the same size etc.
1874 // replace this load's value with the stored value.
1875 CallInst *Call = cast<CallInst>(Inst);
1876 CallInst *EarlierCall = cast<CallInst>(EarlierInst);
1877 Value *Arg = Call->getArgOperand(0);
1878 Value *EarlierArg = EarlierCall->getArgOperand(0);
1879 switch (PA.getAA()->alias(Arg, EarlierArg)) {
1882 // If the load has a builtin retain, insert a plain retain for it.
1883 if (Class == ARCInstKind::LoadWeakRetained) {
1884 Constant *Decl = EP.get(ARCRuntimeEntryPointKind::Retain);
1885 CallInst *CI = CallInst::Create(Decl, EarlierCall, "", Call);
1888 // Zap the fully redundant load.
1889 Call->replaceAllUsesWith(EarlierCall->getArgOperand(1));
1890 Call->eraseFromParent();
1900 case ARCInstKind::MoveWeak:
1901 case ARCInstKind::CopyWeak:
1902 // TOOD: Grab the copied value.
1904 case ARCInstKind::AutoreleasepoolPush:
1905 case ARCInstKind::None:
1906 case ARCInstKind::IntrinsicUser:
1907 case ARCInstKind::User:
1908 // Weak pointers are only modified through the weak entry points
1909 // (and arbitrary calls, which could call the weak entry points).
1912 // Anything else could modify the weak pointer.
1919 // Then, for each destroyWeak with an alloca operand, check to see if
1920 // the alloca and all its users can be zapped.
1921 for (inst_iterator I = inst_begin(&F), E = inst_end(&F); I != E; ) {
1922 Instruction *Inst = &*I++;
1923 ARCInstKind Class = GetBasicARCInstKind(Inst);
1924 if (Class != ARCInstKind::DestroyWeak)
1927 CallInst *Call = cast<CallInst>(Inst);
1928 Value *Arg = Call->getArgOperand(0);
1929 if (AllocaInst *Alloca = dyn_cast<AllocaInst>(Arg)) {
1930 for (User *U : Alloca->users()) {
1931 const Instruction *UserInst = cast<Instruction>(U);
1932 switch (GetBasicARCInstKind(UserInst)) {
1933 case ARCInstKind::InitWeak:
1934 case ARCInstKind::StoreWeak:
1935 case ARCInstKind::DestroyWeak:
1942 for (auto UI = Alloca->user_begin(), UE = Alloca->user_end(); UI != UE;) {
1943 CallInst *UserInst = cast<CallInst>(*UI++);
1944 switch (GetBasicARCInstKind(UserInst)) {
1945 case ARCInstKind::InitWeak:
1946 case ARCInstKind::StoreWeak:
1947 // These functions return their second argument.
1948 UserInst->replaceAllUsesWith(UserInst->getArgOperand(1));
1950 case ARCInstKind::DestroyWeak:
1954 llvm_unreachable("alloca really is used!");
1956 UserInst->eraseFromParent();
1958 Alloca->eraseFromParent();
1964 /// Identify program paths which execute sequences of retains and releases which
1965 /// can be eliminated.
1966 bool ObjCARCOpt::OptimizeSequences(Function &F) {
1967 // Releases, Retains - These are used to store the results of the main flow
1968 // analysis. These use Value* as the key instead of Instruction* so that the
1969 // map stays valid when we get around to rewriting code and calls get
1970 // replaced by arguments.
1971 DenseMap<Value *, RRInfo> Releases;
1972 BlotMapVector<Value *, RRInfo> Retains;
1974 // This is used during the traversal of the function to track the
1975 // states for each identified object at each block.
1976 DenseMap<const BasicBlock *, BBState> BBStates;
1978 // Analyze the CFG of the function, and all instructions.
1979 bool NestingDetected = Visit(F, BBStates, Retains, Releases);
1982 bool AnyPairsCompletelyEliminated = PerformCodePlacement(BBStates, Retains,
1986 return AnyPairsCompletelyEliminated && NestingDetected;
1989 /// Check if there is a dependent call earlier that does not have anything in
1990 /// between the Retain and the call that can affect the reference count of their
1991 /// shared pointer argument. Note that Retain need not be in BB.
1993 HasSafePathToPredecessorCall(const Value *Arg, Instruction *Retain,
1994 SmallPtrSetImpl<Instruction *> &DepInsts,
1995 SmallPtrSetImpl<const BasicBlock *> &Visited,
1996 ProvenanceAnalysis &PA) {
1997 FindDependencies(CanChangeRetainCount, Arg, Retain->getParent(), Retain,
1998 DepInsts, Visited, PA);
1999 if (DepInsts.size() != 1)
2002 auto *Call = dyn_cast_or_null<CallInst>(*DepInsts.begin());
2004 // Check that the pointer is the return value of the call.
2005 if (!Call || Arg != Call)
2008 // Check that the call is a regular call.
2009 ARCInstKind Class = GetBasicARCInstKind(Call);
2010 return Class == ARCInstKind::CallOrUser || Class == ARCInstKind::Call;
2013 /// Find a dependent retain that precedes the given autorelease for which there
2014 /// is nothing in between the two instructions that can affect the ref count of
2017 FindPredecessorRetainWithSafePath(const Value *Arg, BasicBlock *BB,
2018 Instruction *Autorelease,
2019 SmallPtrSetImpl<Instruction *> &DepInsts,
2020 SmallPtrSetImpl<const BasicBlock *> &Visited,
2021 ProvenanceAnalysis &PA) {
2022 FindDependencies(CanChangeRetainCount, Arg,
2023 BB, Autorelease, DepInsts, Visited, PA);
2024 if (DepInsts.size() != 1)
2027 auto *Retain = dyn_cast_or_null<CallInst>(*DepInsts.begin());
2029 // Check that we found a retain with the same argument.
2030 if (!Retain || !IsRetain(GetBasicARCInstKind(Retain)) ||
2031 GetArgRCIdentityRoot(Retain) != Arg) {
2038 /// Look for an ``autorelease'' instruction dependent on Arg such that there are
2039 /// no instructions dependent on Arg that need a positive ref count in between
2040 /// the autorelease and the ret.
2042 FindPredecessorAutoreleaseWithSafePath(const Value *Arg, BasicBlock *BB,
2044 SmallPtrSetImpl<Instruction *> &DepInsts,
2045 SmallPtrSetImpl<const BasicBlock *> &V,
2046 ProvenanceAnalysis &PA) {
2047 FindDependencies(NeedsPositiveRetainCount, Arg,
2048 BB, Ret, DepInsts, V, PA);
2049 if (DepInsts.size() != 1)
2052 auto *Autorelease = dyn_cast_or_null<CallInst>(*DepInsts.begin());
2055 ARCInstKind AutoreleaseClass = GetBasicARCInstKind(Autorelease);
2056 if (!IsAutorelease(AutoreleaseClass))
2058 if (GetArgRCIdentityRoot(Autorelease) != Arg)
2064 /// Look for this pattern:
2066 /// %call = call i8* @something(...)
2067 /// %2 = call i8* @objc_retain(i8* %call)
2068 /// %3 = call i8* @objc_autorelease(i8* %2)
2071 /// And delete the retain and autorelease.
2072 void ObjCARCOpt::OptimizeReturns(Function &F) {
2073 if (!F.getReturnType()->isPointerTy())
2076 DEBUG(dbgs() << "\n== ObjCARCOpt::OptimizeReturns ==\n");
2078 SmallPtrSet<Instruction *, 4> DependingInstructions;
2079 SmallPtrSet<const BasicBlock *, 4> Visited;
2080 for (BasicBlock &BB: F) {
2081 ReturnInst *Ret = dyn_cast<ReturnInst>(&BB.back());
2085 DEBUG(dbgs() << "Visiting: " << *Ret << "\n");
2087 const Value *Arg = GetRCIdentityRoot(Ret->getOperand(0));
2089 // Look for an ``autorelease'' instruction that is a predecessor of Ret and
2090 // dependent on Arg such that there are no instructions dependent on Arg
2091 // that need a positive ref count in between the autorelease and Ret.
2092 CallInst *Autorelease = FindPredecessorAutoreleaseWithSafePath(
2093 Arg, &BB, Ret, DependingInstructions, Visited, PA);
2094 DependingInstructions.clear();
2100 CallInst *Retain = FindPredecessorRetainWithSafePath(
2101 Arg, Autorelease->getParent(), Autorelease, DependingInstructions,
2103 DependingInstructions.clear();
2109 // Check that there is nothing that can affect the reference count
2110 // between the retain and the call. Note that Retain need not be in BB.
2111 bool HasSafePathToCall = HasSafePathToPredecessorCall(Arg, Retain,
2112 DependingInstructions,
2114 DependingInstructions.clear();
2117 if (!HasSafePathToCall)
2120 // If so, we can zap the retain and autorelease.
2123 DEBUG(dbgs() << "Erasing: " << *Retain << "\nErasing: "
2124 << *Autorelease << "\n");
2125 EraseInstruction(Retain);
2126 EraseInstruction(Autorelease);
2132 ObjCARCOpt::GatherStatistics(Function &F, bool AfterOptimization) {
2133 Statistic &NumRetains =
2134 AfterOptimization ? NumRetainsAfterOpt : NumRetainsBeforeOpt;
2135 Statistic &NumReleases =
2136 AfterOptimization ? NumReleasesAfterOpt : NumReleasesBeforeOpt;
2138 for (inst_iterator I = inst_begin(&F), E = inst_end(&F); I != E; ) {
2139 Instruction *Inst = &*I++;
2140 switch (GetBasicARCInstKind(Inst)) {
2143 case ARCInstKind::Retain:
2146 case ARCInstKind::Release:
2154 bool ObjCARCOpt::doInitialization(Module &M) {
2158 // If nothing in the Module uses ARC, don't do anything.
2159 Run = ModuleHasARC(M);
2163 // Intuitively, objc_retain and others are nocapture, however in practice
2164 // they are not, because they return their argument value. And objc_release
2165 // calls finalizers which can have arbitrary side effects.
2166 MDKindCache.init(&M);
2168 // Initialize our runtime entry point cache.
2174 bool ObjCARCOpt::runOnFunction(Function &F) {
2178 // If nothing in the Module uses ARC, don't do anything.
2184 DEBUG(dbgs() << "<<< ObjCARCOpt: Visiting Function: " << F.getName() << " >>>"
2187 PA.setAA(&getAnalysis<AAResultsWrapperPass>().getAAResults());
2190 if (AreStatisticsEnabled()) {
2191 GatherStatistics(F, false);
2195 // This pass performs several distinct transformations. As a compile-time aid
2196 // when compiling code that isn't ObjC, skip these if the relevant ObjC
2197 // library functions aren't declared.
2199 // Preliminary optimizations. This also computes UsedInThisFunction.
2200 OptimizeIndividualCalls(F);
2202 // Optimizations for weak pointers.
2203 if (UsedInThisFunction & ((1 << unsigned(ARCInstKind::LoadWeak)) |
2204 (1 << unsigned(ARCInstKind::LoadWeakRetained)) |
2205 (1 << unsigned(ARCInstKind::StoreWeak)) |
2206 (1 << unsigned(ARCInstKind::InitWeak)) |
2207 (1 << unsigned(ARCInstKind::CopyWeak)) |
2208 (1 << unsigned(ARCInstKind::MoveWeak)) |
2209 (1 << unsigned(ARCInstKind::DestroyWeak))))
2210 OptimizeWeakCalls(F);
2212 // Optimizations for retain+release pairs.
2213 if (UsedInThisFunction & ((1 << unsigned(ARCInstKind::Retain)) |
2214 (1 << unsigned(ARCInstKind::RetainRV)) |
2215 (1 << unsigned(ARCInstKind::RetainBlock))))
2216 if (UsedInThisFunction & (1 << unsigned(ARCInstKind::Release)))
2217 // Run OptimizeSequences until it either stops making changes or
2218 // no retain+release pair nesting is detected.
2219 while (OptimizeSequences(F)) {}
2221 // Optimizations if objc_autorelease is used.
2222 if (UsedInThisFunction & ((1 << unsigned(ARCInstKind::Autorelease)) |
2223 (1 << unsigned(ARCInstKind::AutoreleaseRV))))
2226 // Gather statistics after optimization.
2228 if (AreStatisticsEnabled()) {
2229 GatherStatistics(F, true);
2233 DEBUG(dbgs() << "\n");
2238 void ObjCARCOpt::releaseMemory() {