// An ObjC-Identified object can't alias a load if it is never locally stored.
if (AIsIdentified) {
+ // Check for an obvious escape.
+ if (isa<LoadInst>(B))
+ return isStoredObjCPointer(A);
if (BIsIdentified) {
- // If both pointers have provenance, they can be directly compared.
- if (A != B)
- return false;
- } else {
- if (isa<LoadInst>(B))
- return isStoredObjCPointer(A);
+ // Check for an obvious escape.
+ if (isa<LoadInst>(A))
+ return isStoredObjCPointer(B);
+ // Both pointers are identified and escapes aren't an evident problem.
+ return false;
}
- } else {
- if (BIsIdentified && isa<LoadInst>(A))
+ } else if (BIsIdentified) {
+ // Check for an obvious escape.
+ if (isa<LoadInst>(A))
return isStoredObjCPointer(B);
}
/// PtrState - This class summarizes several per-pointer runtime properties
/// which are propogated through the flow graph.
class PtrState {
- /// NestCount - The known minimum level of retain+release nesting.
- unsigned NestCount;
-
/// KnownPositiveRefCount - True if the reference count is known to
/// be incremented.
bool KnownPositiveRefCount;
/// TODO: Encapsulate this better.
RRInfo RRI;
- PtrState() : NestCount(0), KnownPositiveRefCount(false), Partial(false),
+ PtrState() : KnownPositiveRefCount(false), Partial(false),
Seq(S_None) {}
void SetKnownPositiveRefCount() {
return KnownPositiveRefCount;
}
- void IncrementNestCount() {
- if (NestCount != UINT_MAX) ++NestCount;
- }
-
- void DecrementNestCount() {
- if (NestCount != 0) --NestCount;
- }
-
- bool IsKnownNested() const {
- return NestCount > 0;
- }
-
void SetSeq(Sequence NewSeq) {
Seq = NewSeq;
}
PtrState::Merge(const PtrState &Other, bool TopDown) {
Seq = MergeSeqs(Seq, Other.Seq, TopDown);
KnownPositiveRefCount = KnownPositiveRefCount && Other.KnownPositiveRefCount;
- NestCount = std::min(NestCount, Other.NestCount);
// We can't merge a plain objc_retain with an objc_retainBlock.
if (RRI.IsRetainBlock != Other.RRI.IsRetainBlock)
return AutoreleaseCallee;
}
+/// IsPotentialUse - Test whether the given value is possible a
+/// reference-counted pointer, including tests which utilize AliasAnalysis.
+static bool IsPotentialUse(const Value *Op, AliasAnalysis &AA) {
+ // First make the rudimentary check.
+ if (!IsPotentialUse(Op))
+ return false;
+
+ // Objects in constant memory are not reference-counted.
+ if (AA.pointsToConstantMemory(Op))
+ return false;
+
+ // Pointers in constant memory are not pointing to reference-counted objects.
+ if (const LoadInst *LI = dyn_cast<LoadInst>(Op))
+ if (AA.pointsToConstantMemory(LI->getPointerOperand()))
+ return false;
+
+ // Otherwise assume the worst.
+ return true;
+}
+
/// CanAlterRefCount - Test whether the given instruction can result in a
/// reference count modification (positive or negative) for the pointer's
/// object.
for (ImmutableCallSite::arg_iterator I = CS.arg_begin(), E = CS.arg_end();
I != E; ++I) {
const Value *Op = *I;
- if (IsPotentialUse(Op) && PA.related(Ptr, Op))
+ if (IsPotentialUse(Op, *PA.getAA()) && PA.related(Ptr, Op))
return true;
}
return false;
// Comparing a pointer with null, or any other constant, isn't really a use,
// because we don't care what the pointer points to, or about the values
// of any other dynamic reference-counted pointers.
- if (!IsPotentialUse(ICI->getOperand(1)))
+ if (!IsPotentialUse(ICI->getOperand(1), *PA.getAA()))
return false;
} else if (ImmutableCallSite CS = static_cast<const Value *>(Inst)) {
// For calls, just check the arguments (and not the callee operand).
for (ImmutableCallSite::arg_iterator OI = CS.arg_begin(),
OE = CS.arg_end(); OI != OE; ++OI) {
const Value *Op = *OI;
- if (IsPotentialUse(Op) && PA.related(Ptr, Op))
+ if (IsPotentialUse(Op, *PA.getAA()) && PA.related(Ptr, Op))
return true;
}
return false;
const Value *Op = GetUnderlyingObjCPtr(SI->getPointerOperand());
// If we can't tell what the underlying object was, assume there is a
// dependence.
- return IsPotentialUse(Op) && PA.related(Op, Ptr);
+ return IsPotentialUse(Op, *PA.getAA()) && PA.related(Op, Ptr);
}
// Check each operand for a match.
for (User::const_op_iterator OI = Inst->op_begin(), OE = Inst->op_end();
OI != OE; ++OI) {
const Value *Op = *OI;
- if (IsPotentialUse(Op) && PA.related(Ptr, Op))
+ if (IsPotentialUse(Op, *PA.getAA()) && PA.related(Ptr, Op))
return true;
}
return false;
MDNode *ReleaseMetadata = Inst->getMetadata(ImpreciseReleaseMDKind);
S.ResetSequenceProgress(ReleaseMetadata ? S_MovableRelease : S_Release);
S.RRI.ReleaseMetadata = ReleaseMetadata;
- S.RRI.KnownSafe = S.IsKnownNested() || S.IsKnownIncremented();
+ S.RRI.KnownSafe = S.IsKnownIncremented();
S.RRI.IsTailCallRelease = cast<CallInst>(Inst)->isTailCall();
S.RRI.Calls.insert(Inst);
- S.IncrementNestCount();
+ S.SetKnownPositiveRefCount();
break;
}
case IC_RetainBlock:
PtrState &S = MyStates.getPtrBottomUpState(Arg);
S.SetKnownPositiveRefCount();
- S.DecrementNestCount();
switch (S.GetSeq()) {
case S_Stop:
S.ResetSequenceProgress(S_Retain);
S.RRI.IsRetainBlock = Class == IC_RetainBlock;
- // Don't check S.IsKnownIncremented() here because it's not sufficient.
- S.RRI.KnownSafe = S.IsKnownNested();
+ S.RRI.KnownSafe = S.IsKnownIncremented();
S.RRI.Calls.insert(Inst);
}
- S.IncrementNestCount();
+ S.SetKnownPositiveRefCount();
// A retain can be a potential use; procede to the generic checking
// code below.
Arg = GetObjCArg(Inst);
PtrState &S = MyStates.getPtrTopDownState(Arg);
- S.DecrementNestCount();
+ S.ClearRefCount();
switch (S.GetSeq()) {
case S_Retain:
declare i8* @objc_msgSend(i8*, i8*, ...) nonlazybind
declare void @use(i8*)
declare void @objc_release(i8*)
+declare i8* @def()
+declare void @__crasher_block_invoke(i8* nocapture)
+declare i8* @objc_retainBlock(i8*)
+declare void @__crasher_block_invoke1(i8* nocapture)
!0 = metadata !{}
ret void
}
-; Delete a nested retain+release pair.
+; TODO: Delete a nested retain+release pair.
+; The optimizer currently can't do this, because isn't isn't sophisticated enough in
+; reasnoning about nesting.
; CHECK: define void @test6(
; CHECK: call i8* @objc_retain
-; CHECK-NOT: @objc_retain
+; CHECK: @objc_retain
; CHECK: }
define void @test6() nounwind {
entry:
ret void
}
-; Delete a nested retain+release pair.
+; TODO: Delete a nested retain+release pair.
+; The optimizer currently can't do this, because isn't isn't sophisticated enough in
+; reasnoning about nesting.
; CHECK: define void @test7(
; CHECK: call i8* @objc_retain
-; CHECK-NOT: @objc_retain
+; CHECK: @objc_retain
; CHECK: }
define void @test7() nounwind {
entry:
ret void
}
-; Like test9, but without a split backedge. This we can optimize.
+; Like test9, but without a split backedge. TODO: optimize this.
; CHECK: define void @test9b(
; CHECK: call i8* @objc_retain
; CHECK: call i8* @objc_retain
-; CHECK-NOT: @objc_retain
+; CHECK: @objc_retain
; CHECK: }
define void @test9b() nounwind {
entry:
ret void
}
-; Like test10, but without a split backedge. This we can optimize.
+; Like test10, but without a split backedge. TODO: optimize this.
; CHECK: define void @test10b(
; CHECK: call i8* @objc_retain
; CHECK: call i8* @objc_retain
-; CHECK-NOT: @objc_retain
+; CHECK: @objc_retain
; CHECK: }
define void @test10b() nounwind {
entry:
call void @objc_release(i8* %0) nounwind, !clang.imprecise_release !0
ret void
}
+
+; Pointers to strong pointers can obscure provenance relationships. Be conservative
+; in the face of escaping pointers. rdar://12150909.
+
+%struct.__block_d = type { i64, i64 }
+
+@_NSConcreteStackBlock = external global i8*
+@__block_d_tmp = external hidden constant { i64, i64, i8*, i8*, i8*, i8* }
+@__block_d_tmp5 = external hidden constant { i64, i64, i8*, i8*, i8*, i8* }
+
+; CHECK: define void @test11(
+; CHECK: tail call i8* @objc_retain(i8* %call) nounwind
+; CHECK: tail call i8* @objc_retain(i8* %call) nounwind
+; CHECK: call void @objc_release(i8* %call) nounwind, !clang.imprecise_release !0
+; CHECK: }
+define void @test11() {
+entry:
+ %block = alloca <{ i8*, i32, i32, i8*, %struct.__block_d*, i8* }>, align 8
+ %block9 = alloca <{ i8*, i32, i32, i8*, %struct.__block_d*, i8* }>, align 8
+ %call = call i8* @def(), !clang.arc.no_objc_arc_exceptions !0
+ %foo = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_d*, i8* }>* %block, i64 0, i32 5
+ %block.isa = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_d*, i8* }>* %block, i64 0, i32 0
+ store i8* bitcast (i8** @_NSConcreteStackBlock to i8*), i8** %block.isa, align 8
+ %block.flags = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_d*, i8* }>* %block, i64 0, i32 1
+ store i32 1107296256, i32* %block.flags, align 8
+ %block.reserved = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_d*, i8* }>* %block, i64 0, i32 2
+ store i32 0, i32* %block.reserved, align 4
+ %block.invoke = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_d*, i8* }>* %block, i64 0, i32 3
+ store i8* bitcast (void (i8*)* @__crasher_block_invoke to i8*), i8** %block.invoke, align 8
+ %block.d = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_d*, i8* }>* %block, i64 0, i32 4
+ store %struct.__block_d* bitcast ({ i64, i64, i8*, i8*, i8*, i8* }* @__block_d_tmp to %struct.__block_d*), %struct.__block_d** %block.d, align 8
+ %foo2 = tail call i8* @objc_retain(i8* %call) nounwind
+ store i8* %foo2, i8** %foo, align 8
+ %foo4 = bitcast <{ i8*, i32, i32, i8*, %struct.__block_d*, i8* }>* %block to i8*
+ %foo5 = call i8* @objc_retainBlock(i8* %foo4) nounwind
+ call void @use(i8* %foo5), !clang.arc.no_objc_arc_exceptions !0
+ call void @objc_release(i8* %foo5) nounwind
+ %strongdestroy = load i8** %foo, align 8
+ call void @objc_release(i8* %strongdestroy) nounwind, !clang.imprecise_release !0
+ %foo10 = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_d*, i8* }>* %block9, i64 0, i32 5
+ %block.isa11 = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_d*, i8* }>* %block9, i64 0, i32 0
+ store i8* bitcast (i8** @_NSConcreteStackBlock to i8*), i8** %block.isa11, align 8
+ %block.flags12 = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_d*, i8* }>* %block9, i64 0, i32 1
+ store i32 1107296256, i32* %block.flags12, align 8
+ %block.reserved13 = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_d*, i8* }>* %block9, i64 0, i32 2
+ store i32 0, i32* %block.reserved13, align 4
+ %block.invoke14 = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_d*, i8* }>* %block9, i64 0, i32 3
+ store i8* bitcast (void (i8*)* @__crasher_block_invoke1 to i8*), i8** %block.invoke14, align 8
+ %block.d15 = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_d*, i8* }>* %block9, i64 0, i32 4
+ store %struct.__block_d* bitcast ({ i64, i64, i8*, i8*, i8*, i8* }* @__block_d_tmp5 to %struct.__block_d*), %struct.__block_d** %block.d15, align 8
+ %foo18 = call i8* @objc_retain(i8* %call) nounwind
+ store i8* %call, i8** %foo10, align 8
+ %foo20 = bitcast <{ i8*, i32, i32, i8*, %struct.__block_d*, i8* }>* %block9 to i8*
+ %foo21 = call i8* @objc_retainBlock(i8* %foo20) nounwind
+ call void @use(i8* %foo21), !clang.arc.no_objc_arc_exceptions !0
+ call void @objc_release(i8* %foo21) nounwind
+ %strongdestroy25 = load i8** %foo10, align 8
+ call void @objc_release(i8* %strongdestroy25) nounwind, !clang.imprecise_release !0
+ call void @objc_release(i8* %call) nounwind, !clang.imprecise_release !0
+ ret void
+}