Mutex* Locks::allocated_thread_ids_lock_ = nullptr;
ReaderWriterMutex* Locks::breakpoint_lock_ = nullptr;
ReaderWriterMutex* Locks::classlinker_classes_lock_ = nullptr;
+ Mutex* Locks::deoptimization_lock_ = nullptr;
ReaderWriterMutex* Locks::heap_bitmap_lock_ = nullptr;
+Mutex* Locks::jni_libraries_lock_ = nullptr;
Mutex* Locks::logging_lock_ = nullptr;
Mutex* Locks::mem_maps_lock_ = nullptr;
Mutex* Locks::modify_ldt_lock_ = nullptr;
DCHECK(allocated_thread_ids_lock_ != nullptr);
DCHECK(breakpoint_lock_ != nullptr);
DCHECK(classlinker_classes_lock_ != nullptr);
+ DCHECK(deoptimization_lock_ != nullptr);
DCHECK(heap_bitmap_lock_ != nullptr);
+ DCHECK(intern_table_lock_ != nullptr);
+ DCHECK(jni_libraries_lock_ != nullptr);
DCHECK(logging_lock_ != nullptr);
DCHECK(mutator_lock_ != nullptr);
+ DCHECK(profiler_lock_ != nullptr);
DCHECK(thread_list_lock_ != nullptr);
DCHECK(thread_list_suspend_thread_lock_ != nullptr);
DCHECK(thread_suspend_count_lock_ != nullptr);
kBreakpointLock,
kMonitorLock,
kMonitorListLock,
+ kJniLoadLibraryLock,
kThreadListLock,
kBreakpointInvokeLock,
+ kAllocTrackerLock,
kDeoptimizationLock,
kTraceLock,
kProfilerLock,
// Guards trace (ie traceview) requests.
static Mutex* trace_lock_ ACQUIRED_AFTER(profiler_lock_);
+ // Guards debugger recent allocation records.
+ static Mutex* alloc_tracker_lock_ ACQUIRED_AFTER(trace_lock_);
+
+ // Guards updates to instrumentation to ensure mutual exclusion of
+ // events like deoptimization requests.
+ // TODO: improve name, perhaps instrumentation_update_lock_.
+ static Mutex* deoptimization_lock_ ACQUIRED_AFTER(alloc_tracker_lock_);
+
// The thread_list_lock_ guards ThreadList::list_. It is also commonly held to stop threads
// attaching and detaching.
- static Mutex* thread_list_lock_ ACQUIRED_AFTER(trace_lock_);
+ static Mutex* thread_list_lock_ ACQUIRED_AFTER(deoptimization_lock_);
+ // Guards maintaining loading library data structures.
+ static Mutex* jni_libraries_lock_ ACQUIRED_AFTER(thread_list_lock_);
+
// Guards breakpoints.
- static ReaderWriterMutex* breakpoint_lock_ ACQUIRED_AFTER(trace_lock_);
+ static ReaderWriterMutex* breakpoint_lock_ ACQUIRED_AFTER(jni_libraries_lock_);
// Guards lists of classes within the class linker.
static ReaderWriterMutex* classlinker_classes_lock_ ACQUIRED_AFTER(breakpoint_lock_);
return kDefaultNumAllocRecords;
}
- void Dbg::SetAllocTrackingEnabled(bool enabled) {
- if (enabled) {
+ void Dbg::SetAllocTrackingEnabled(bool enable) {
+ Thread* self = Thread::Current();
+ if (enable) {
{
- MutexLock mu(Thread::Current(), *alloc_tracker_lock_);
- if (recent_allocation_records_ == nullptr) {
- alloc_record_max_ = GetAllocTrackerMax();
- LOG(INFO) << "Enabling alloc tracker (" << alloc_record_max_ << " entries of "
- << kMaxAllocRecordStackDepth << " frames, taking "
- << PrettySize(sizeof(AllocRecord) * alloc_record_max_) << ")";
- alloc_record_head_ = alloc_record_count_ = 0;
- recent_allocation_records_ = new AllocRecord[alloc_record_max_];
- CHECK(recent_allocation_records_ != nullptr);
+ MutexLock mu(self, *Locks::alloc_tracker_lock_);
- if (recent_allocation_records_ != NULL) {
++ if (recent_allocation_records_ != nullptr) {
+ return; // Already enabled, bail.
}
- CHECK(recent_allocation_records_ != NULL);
+ alloc_record_max_ = GetAllocTrackerMax();
+ LOG(INFO) << "Enabling alloc tracker (" << alloc_record_max_ << " entries of "
+ << kMaxAllocRecordStackDepth << " frames, taking "
+ << PrettySize(sizeof(AllocRecord) * alloc_record_max_) << ")";
+ DCHECK_EQ(alloc_record_head_, 0U);
+ DCHECK_EQ(alloc_record_count_, 0U);
+ recent_allocation_records_ = new AllocRecord[alloc_record_max_];
++ CHECK(recent_allocation_records_ != nullptr);
}
Runtime::Current()->GetInstrumentation()->InstrumentQuickAllocEntryPoints();
} else {
- Runtime::Current()->GetInstrumentation()->UninstrumentQuickAllocEntryPoints();
{
- MutexLock mu(Thread::Current(), *alloc_tracker_lock_);
+ ScopedObjectAccess soa(self); // For type_cache_.Clear();
+ MutexLock mu(self, *Locks::alloc_tracker_lock_);
- if (recent_allocation_records_ == NULL) {
++ if (recent_allocation_records_ == nullptr) {
+ return; // Already disabled, bail.
+ }
LOG(INFO) << "Disabling alloc tracker";
delete[] recent_allocation_records_;
- recent_allocation_records_ = NULL;
+ recent_allocation_records_ = nullptr;
+ alloc_record_head_ = 0;
+ alloc_record_count_ = 0;
type_cache_.Clear();
}
+ // If an allocation comes in before we uninstrument, we will safely drop it on the floor.
+ Runtime::Current()->GetInstrumentation()->UninstrumentQuickAllocEntryPoints();
}
}
void Dbg::RecordAllocation(mirror::Class* type, size_t byte_count) {
Thread* self = Thread::Current();
- CHECK(self != NULL);
+ CHECK(self != nullptr);
- MutexLock mu(self, *alloc_tracker_lock_);
+ MutexLock mu(self, *Locks::alloc_tracker_lock_);
- if (recent_allocation_records_ == NULL) {
+ if (recent_allocation_records_ == nullptr) {
+ // In the process of shutting down recording, bail.
return;
}
void Dbg::DumpRecentAllocations() {
ScopedObjectAccess soa(Thread::Current());
- MutexLock mu(soa.Self(), *alloc_tracker_lock_);
+ MutexLock mu(soa.Self(), *Locks::alloc_tracker_lock_);
- if (recent_allocation_records_ == NULL) {
+ if (recent_allocation_records_ == nullptr) {
LOG(INFO) << "Not recording tracked allocations";
return;
}