From d6527cf8e824d9057f32755f2ff4bdcf46c7095b Mon Sep 17 00:00:00 2001 From: Mathieu Chartier Date: Fri, 10 Oct 2014 12:45:50 -0700 Subject: [PATCH] Hold mutator lock in DdmSendHeapSegments for all spaces Previously we were releasing the mutator lock in DdmSendHeapSegments and only reacquiring it for RosAlloc spaces. This was causing problems since the HeapChunkCallback access object fields through mirror. Bug: 17950534 Change-Id: I6bd71f43e84eae8585993da656bf1577095607a9 --- runtime/debugger.cc | 25 ++++++++++++++----------- 1 file changed, 14 insertions(+), 11 deletions(-) diff --git a/runtime/debugger.cc b/runtime/debugger.cc index 7fb199c0e..248964abc 100644 --- a/runtime/debugger.cc +++ b/runtime/debugger.cc @@ -4250,11 +4250,7 @@ void Dbg::DdmSendHeapSegments(bool native) { Thread* self = Thread::Current(); - // To allow the Walk/InspectAll() below to exclusively-lock the - // mutator lock, temporarily release the shared access to the - // mutator lock here by transitioning to the suspended state. Locks::mutator_lock_->AssertSharedHeld(self); - self->TransitionFromRunnableToSuspended(kSuspended); // Send a series of heap segment chunks. HeapChunkContext context((what == HPSG_WHAT_MERGED_OBJECTS), native); @@ -4268,32 +4264,39 @@ void Dbg::DdmSendHeapSegments(bool native) { gc::Heap* heap = Runtime::Current()->GetHeap(); for (const auto& space : heap->GetContinuousSpaces()) { if (space->IsDlMallocSpace()) { + ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); // dlmalloc's chunk header is 2 * sizeof(size_t), but if the previous chunk is in use for an // allocation then the first sizeof(size_t) may belong to it. context.SetChunkOverhead(sizeof(size_t)); space->AsDlMallocSpace()->Walk(HeapChunkContext::HeapChunkCallback, &context); } else if (space->IsRosAllocSpace()) { context.SetChunkOverhead(0); - space->AsRosAllocSpace()->Walk(HeapChunkContext::HeapChunkCallback, &context); + // Need to acquire the mutator lock before the heap bitmap lock with exclusive access since + // RosAlloc's internal logic doesn't know to release and reacquire the heap bitmap lock. + self->TransitionFromRunnableToSuspended(kSuspended); + ThreadList* tl = Runtime::Current()->GetThreadList(); + tl->SuspendAll(); + { + ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); + space->AsRosAllocSpace()->Walk(HeapChunkContext::HeapChunkCallback, &context); + } + tl->ResumeAll(); + self->TransitionFromSuspendedToRunnable(); } else if (space->IsBumpPointerSpace()) { + ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); context.SetChunkOverhead(0); - ReaderMutexLock mu(self, *Locks::mutator_lock_); - WriterMutexLock mu2(self, *Locks::heap_bitmap_lock_); space->AsBumpPointerSpace()->Walk(BumpPointerSpaceCallback, &context); } else { UNIMPLEMENTED(WARNING) << "Not counting objects in space " << *space; } context.ResetStartOfNextChunk(); } + ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); // Walk the large objects, these are not in the AllocSpace. context.SetChunkOverhead(0); heap->GetLargeObjectsSpace()->Walk(HeapChunkContext::HeapChunkCallback, &context); } - // Shared-lock the mutator lock back. - self->TransitionFromSuspendedToRunnable(); - Locks::mutator_lock_->AssertSharedHeld(self); - // Finally, send a heap end chunk. Dbg::DdmSendChunk(native ? CHUNK_TYPE("NHEN") : CHUNK_TYPE("HPEN"), sizeof(heap_id), heap_id); } -- 2.11.0