From: Hiroshi Yamauchi Date: Sat, 19 Mar 2016 00:17:52 +0000 (-0700) Subject: Fix a CC 145-alloc-tracking-stress deadlock. X-Git-Tag: android-x86-7.1-r1~312^2~5^2~29^2 X-Git-Url: http://git.osdn.net/view?a=commitdiff_plain;h=6f0c6cdae6962532c4699df9dd5af6289b86d1c6;p=android-x86%2Fart.git Fix a CC 145-alloc-tracking-stress deadlock. When the allocation tracking gets disabled, there may be threads blocking on the system weak access for recording allocations, but when GC reenables the system weak access, it fails to wake up those blocked threads (which causes a deadlock) because the broadcast call is guarded by Heap::IsAllocTrackingEnabled(), which is false at this point. Broadcast in Heap::BroadcastForNewAllocationRecords() regardless of Heap::IsAllocTrackingEnabled(), which is safe. Also apply a similar fix for the non-CC case. Bug: 27467554 Change-Id: I74cf88bceb306589ce11a19a688be223e099e88a --- diff --git a/runtime/gc/allocation_record.cc b/runtime/gc/allocation_record.cc index 73190455c..e3714bbde 100644 --- a/runtime/gc/allocation_record.cc +++ b/runtime/gc/allocation_record.cc @@ -288,6 +288,12 @@ void AllocRecordObjectMap::RecordAllocation(Thread* self, mirror::Object* obj, m records->new_record_condition_.WaitHoldingLocks(self); } + if (!heap->IsAllocTrackingEnabled()) { + // Return if the allocation tracking has been disabled while waiting for system weak access + // above. + return; + } + DCHECK_LE(records->Size(), records->alloc_record_max_); // Get stack trace. diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc index f4fccee03..4ff0c6bfb 100644 --- a/runtime/gc/heap.cc +++ b/runtime/gc/heap.cc @@ -3960,31 +3960,31 @@ void Heap::SweepAllocationRecords(IsMarkedVisitor* visitor) const { void Heap::AllowNewAllocationRecords() const { CHECK(!kUseReadBarrier); - if (IsAllocTrackingEnabled()) { - MutexLock mu(Thread::Current(), *Locks::alloc_tracker_lock_); - if (IsAllocTrackingEnabled()) { - GetAllocationRecords()->AllowNewAllocationRecords(); - } + MutexLock mu(Thread::Current(), *Locks::alloc_tracker_lock_); + AllocRecordObjectMap* allocation_records = GetAllocationRecords(); + if (allocation_records != nullptr) { + allocation_records->AllowNewAllocationRecords(); } } void Heap::DisallowNewAllocationRecords() const { CHECK(!kUseReadBarrier); - if (IsAllocTrackingEnabled()) { - MutexLock mu(Thread::Current(), *Locks::alloc_tracker_lock_); - if (IsAllocTrackingEnabled()) { - GetAllocationRecords()->DisallowNewAllocationRecords(); - } + MutexLock mu(Thread::Current(), *Locks::alloc_tracker_lock_); + AllocRecordObjectMap* allocation_records = GetAllocationRecords(); + if (allocation_records != nullptr) { + allocation_records->DisallowNewAllocationRecords(); } } void Heap::BroadcastForNewAllocationRecords() const { CHECK(kUseReadBarrier); - if (IsAllocTrackingEnabled()) { - MutexLock mu(Thread::Current(), *Locks::alloc_tracker_lock_); - if (IsAllocTrackingEnabled()) { - GetAllocationRecords()->BroadcastForNewAllocationRecords(); - } + // Always broadcast without checking IsAllocTrackingEnabled() because IsAllocTrackingEnabled() may + // be set to false while some threads are waiting for system weak access in + // AllocRecordObjectMap::RecordAllocation() and we may fail to wake them up. b/27467554. + MutexLock mu(Thread::Current(), *Locks::alloc_tracker_lock_); + AllocRecordObjectMap* allocation_records = GetAllocationRecords(); + if (allocation_records != nullptr) { + allocation_records->BroadcastForNewAllocationRecords(); } }