From 4d25df3f76f864b7629ac8c0046d46997f293d8d Mon Sep 17 00:00:00 2001 From: Sebastien Hertz Date: Fri, 21 Mar 2014 17:44:46 +0100 Subject: [PATCH] Refactor deoptimization support in debugger This CL prepares breakpoint support for inlined methods where we'll have to deoptimize everything. We move deoptimization-related information to Dbg class only (deoptimization request queue, full deoptimization event count and deoptimization lock). We replace MethodInstrumentionRequest by DeoptimizationRequest. This is used to know which kind of deoptimization is required for a particular event. It also simplifies lock ordering a bit during event setup: we no longer need to hold the deoptimization lock while holding the breakpoint lock. Moreover, the deoptimization lock should be held only after the event list lock. Bug: 12187616 Change-Id: Iff13f004adaeb25e5d609238bacce0b9720510e6 --- runtime/base/mutex.cc | 4 - runtime/base/mutex.h | 5 +- runtime/debugger.cc | 185 ++++++++++++++++++++++++++++----------------- runtime/debugger.h | 64 ++++++++++++---- runtime/instrumentation.h | 4 +- runtime/jdwp/jdwp.h | 4 +- runtime/jdwp/jdwp_event.cc | 41 +++++----- runtime/jdwp/jdwp_main.cc | 1 - 8 files changed, 186 insertions(+), 122 deletions(-) diff --git a/runtime/base/mutex.cc b/runtime/base/mutex.cc index fdf5763c3..52a167269 100644 --- a/runtime/base/mutex.cc +++ b/runtime/base/mutex.cc @@ -31,7 +31,6 @@ namespace art { Mutex* Locks::abort_lock_ = nullptr; Mutex* Locks::breakpoint_lock_ = nullptr; -Mutex* Locks::deoptimization_lock_ = nullptr; ReaderWriterMutex* Locks::classlinker_classes_lock_ = nullptr; ReaderWriterMutex* Locks::heap_bitmap_lock_ = nullptr; Mutex* Locks::logging_lock_ = nullptr; @@ -812,7 +811,6 @@ void Locks::Init() { // Already initialized. DCHECK(abort_lock_ != nullptr); DCHECK(breakpoint_lock_ != nullptr); - DCHECK(deoptimization_lock_ != nullptr); DCHECK(classlinker_classes_lock_ != nullptr); DCHECK(heap_bitmap_lock_ != nullptr); DCHECK(logging_lock_ != nullptr); @@ -829,8 +827,6 @@ void Locks::Init() { DCHECK(breakpoint_lock_ == nullptr); breakpoint_lock_ = new Mutex("breakpoint lock", kBreakpointLock); - DCHECK(deoptimization_lock_ == nullptr); - deoptimization_lock_ = new Mutex("deoptimization lock", kDeoptimizationLock); DCHECK(classlinker_classes_lock_ == nullptr); classlinker_classes_lock_ = new ReaderWriterMutex("ClassLinker classes lock", kClassLinkerClassesLock); diff --git a/runtime/base/mutex.h b/runtime/base/mutex.h index 55ec1c383..4b881f694 100644 --- a/runtime/base/mutex.h +++ b/runtime/base/mutex.h @@ -537,11 +537,8 @@ class Locks { // Guards breakpoints. static Mutex* breakpoint_lock_ ACQUIRED_AFTER(thread_list_lock_); - // Guards deoptimization requests. - static Mutex* deoptimization_lock_ ACQUIRED_AFTER(breakpoint_lock_); - // Guards trace requests. - static Mutex* trace_lock_ ACQUIRED_AFTER(deoptimization_lock_); + static Mutex* trace_lock_ ACQUIRED_AFTER(breakpoint_lock_); // Guards profile objects. static Mutex* profiler_lock_ ACQUIRED_AFTER(trace_lock_); diff --git a/runtime/debugger.cc b/runtime/debugger.cc index c18d5c6e0..62019fa02 100644 --- a/runtime/debugger.cc +++ b/runtime/debugger.cc @@ -200,17 +200,9 @@ size_t Dbg::alloc_record_head_ = 0; size_t Dbg::alloc_record_count_ = 0; // Deoptimization support. -struct MethodInstrumentationRequest { - bool deoptimize; - - // Method for selective deoptimization. NULL means full deoptimization. - mirror::ArtMethod* method; - - MethodInstrumentationRequest(bool deoptimize, mirror::ArtMethod* method) - : deoptimize(deoptimize), method(method) {} -}; -// TODO we need to visit associated methods as roots. -static std::vector gDeoptimizationRequests GUARDED_BY(Locks::deoptimization_lock_); +Mutex* Dbg::deoptimization_lock_ = nullptr; +std::vector Dbg::deoptimization_requests_; +size_t Dbg::full_deoptimization_event_count_ = 0; // Breakpoints. static std::vector gBreakpoints GUARDED_BY(Locks::breakpoint_lock_); @@ -238,6 +230,12 @@ void SingleStepControl::VisitRoots(RootCallback* callback, void* arg, uint32_t t } } +void DeoptimizationRequest::VisitRoots(RootCallback* callback, void* arg) { + if (method != nullptr) { + callback(reinterpret_cast(&method), arg, 0, kRootDebugger); + } +} + static bool IsBreakpoint(const mirror::ArtMethod* m, uint32_t dex_pc) LOCKS_EXCLUDED(Locks::breakpoint_lock_) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { @@ -502,6 +500,7 @@ void Dbg::StartJdwp() { gRegistry = new ObjectRegistry; alloc_tracker_lock_ = new Mutex("AllocTracker lock"); + deoptimization_lock_ = new Mutex("deoptimization lock", kDeoptimizationLock); // Init JDWP if the debugger is enabled. This may connect out to a // debugger, passively listen for a debugger, or block waiting for a // debugger. @@ -524,9 +523,17 @@ void Dbg::StartJdwp() { } void Dbg::VisitRoots(RootCallback* callback, void* arg) { - MutexLock mu(Thread::Current(), *Locks::breakpoint_lock_); - for (Breakpoint& bp : gBreakpoints) { - bp.VisitRoots(callback, arg); + { + MutexLock mu(Thread::Current(), *Locks::breakpoint_lock_); + for (Breakpoint& bp : gBreakpoints) { + bp.VisitRoots(callback, arg); + } + } + if (deoptimization_lock_ != nullptr) { // only true if the debugger is started. + MutexLock mu(Thread::Current(), *deoptimization_lock_); + for (DeoptimizationRequest& req : deoptimization_requests_) { + req.VisitRoots(callback, arg); + } } } @@ -539,6 +546,8 @@ void Dbg::StopJdwp() { gRegistry = nullptr; delete alloc_tracker_lock_; alloc_tracker_lock_ = nullptr; + delete deoptimization_lock_; + deoptimization_lock_ = nullptr; } void Dbg::GcDidFinish() { @@ -605,8 +614,9 @@ void Dbg::GoActive() { } { - MutexLock mu(Thread::Current(), *Locks::deoptimization_lock_); - CHECK_EQ(gDeoptimizationRequests.size(), 0U); + MutexLock mu(Thread::Current(), *deoptimization_lock_); + CHECK_EQ(deoptimization_requests_.size(), 0U); + CHECK_EQ(full_deoptimization_event_count_, 0U); } Runtime* runtime = Runtime::Current(); @@ -646,8 +656,9 @@ void Dbg::Disconnected() { // Since we're going to disable deoptimization, we clear the deoptimization requests queue. // This prevents us from having any pending deoptimization request when the debugger attaches // to us again while no event has been requested yet. - MutexLock mu(Thread::Current(), *Locks::deoptimization_lock_); - gDeoptimizationRequests.clear(); + MutexLock mu(Thread::Current(), *deoptimization_lock_); + deoptimization_requests_.clear(); + full_deoptimization_event_count_ = 0U; } runtime->GetInstrumentation()->RemoveListener(&gDebugInstrumentationListener, instrumentation::Instrumentation::kMethodEntered | @@ -2546,44 +2557,86 @@ void Dbg::UpdateDebugger(Thread* thread, mirror::Object* this_object, } } -static void ProcessDeoptimizationRequests() - LOCKS_EXCLUDED(Locks::deoptimization_lock_) - EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) { - Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current()); - MutexLock mu(Thread::Current(), *Locks::deoptimization_lock_); +// Process request while all mutator threads are suspended. +void Dbg::ProcessDeoptimizationRequest(const DeoptimizationRequest& request) { instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation(); - for (const MethodInstrumentationRequest& request : gDeoptimizationRequests) { - mirror::ArtMethod* const method = request.method; - if (method != nullptr) { - // Selective deoptimization. - if (request.deoptimize) { - VLOG(jdwp) << "Deoptimize method " << PrettyMethod(method); - instrumentation->Deoptimize(method); - } else { - VLOG(jdwp) << "Undeoptimize method " << PrettyMethod(method); - instrumentation->Undeoptimize(method); + switch (request.kind) { + case DeoptimizationRequest::kNothing: + LOG(WARNING) << "Ignoring empty deoptimization request."; + break; + case DeoptimizationRequest::kFullDeoptimization: + VLOG(jdwp) << "Deoptimize the world"; + instrumentation->DeoptimizeEverything(); + break; + case DeoptimizationRequest::kFullUndeoptimization: + VLOG(jdwp) << "Undeoptimize the world"; + instrumentation->UndeoptimizeEverything(); + break; + case DeoptimizationRequest::kSelectiveDeoptimization: + VLOG(jdwp) << "Deoptimize method " << PrettyMethod(request.method); + instrumentation->Deoptimize(request.method); + break; + case DeoptimizationRequest::kSelectiveUndeoptimization: + VLOG(jdwp) << "Undeoptimize method " << PrettyMethod(request.method); + instrumentation->Undeoptimize(request.method); + break; + default: + LOG(FATAL) << "Unsupported deoptimization request kind " << request.kind; + break; + } +} + +void Dbg::RequestDeoptimization(const DeoptimizationRequest& req) { + if (req.kind == DeoptimizationRequest::kNothing) { + // Nothing to do. + return; + } + MutexLock mu(Thread::Current(), *deoptimization_lock_); + switch (req.kind) { + case DeoptimizationRequest::kFullDeoptimization: { + DCHECK(req.method == nullptr); + if (full_deoptimization_event_count_ == 0) { + VLOG(jdwp) << "Request full deoptimization"; + deoptimization_requests_.push_back(req); } - } else { - // Full deoptimization. - if (request.deoptimize) { - VLOG(jdwp) << "Deoptimize the world"; - instrumentation->DeoptimizeEverything(); - } else { - VLOG(jdwp) << "Undeoptimize the world"; - instrumentation->UndeoptimizeEverything(); + ++full_deoptimization_event_count_; + break; + } + case DeoptimizationRequest::kFullUndeoptimization: { + DCHECK(req.method == nullptr); + DCHECK_GT(full_deoptimization_event_count_, 0U); + --full_deoptimization_event_count_; + if (full_deoptimization_event_count_ == 0) { + VLOG(jdwp) << "Request full undeoptimization"; + deoptimization_requests_.push_back(req); } + break; + } + case DeoptimizationRequest::kSelectiveDeoptimization: { + DCHECK(req.method != nullptr); + VLOG(jdwp) << "Request deoptimization of " << PrettyMethod(req.method); + deoptimization_requests_.push_back(req); + break; + } + case DeoptimizationRequest::kSelectiveUndeoptimization: { + DCHECK(req.method != nullptr); + VLOG(jdwp) << "Request undeoptimization of " << PrettyMethod(req.method); + deoptimization_requests_.push_back(req); + break; + } + default: { + LOG(FATAL) << "Unknown deoptimization request kind " << req.kind; + break; } } - gDeoptimizationRequests.clear(); } -// Process deoptimization requests after suspending all mutator threads. void Dbg::ManageDeoptimization() { Thread* const self = Thread::Current(); { // Avoid suspend/resume if there is no pending request. - MutexLock mu(self, *Locks::deoptimization_lock_); - if (gDeoptimizationRequests.empty()) { + MutexLock mu(self, *deoptimization_lock_); + if (deoptimization_requests_.empty()) { return; } } @@ -2593,27 +2646,21 @@ void Dbg::ManageDeoptimization() { Runtime* const runtime = Runtime::Current(); runtime->GetThreadList()->SuspendAll(); const ThreadState old_state = self->SetStateUnsafe(kRunnable); - ProcessDeoptimizationRequests(); + { + MutexLock mu(self, *deoptimization_lock_); + for (const DeoptimizationRequest& request : deoptimization_requests_) { + ProcessDeoptimizationRequest(request); + } + deoptimization_requests_.clear(); + } CHECK_EQ(self->SetStateUnsafe(old_state), kRunnable); runtime->GetThreadList()->ResumeAll(); self->TransitionFromSuspendedToRunnable(); } -// Enable full deoptimization. -void Dbg::EnableFullDeoptimization() { - MutexLock mu(Thread::Current(), *Locks::deoptimization_lock_); - VLOG(jdwp) << "Request full deoptimization"; - gDeoptimizationRequests.push_back(MethodInstrumentationRequest(true, nullptr)); -} - -// Disable full deoptimization. -void Dbg::DisableFullDeoptimization() { - MutexLock mu(Thread::Current(), *Locks::deoptimization_lock_); - VLOG(jdwp) << "Request full undeoptimization"; - gDeoptimizationRequests.push_back(MethodInstrumentationRequest(false, nullptr)); -} - -void Dbg::WatchLocation(const JDWP::JdwpLocation* location) { +void Dbg::WatchLocation(const JDWP::JdwpLocation* location, DeoptimizationRequest* req) { + // TODO we don't need to deoptimize a method if it's not compiled since it already runs with the + // interpreter. bool need_deoptimization = true; mirror::ArtMethod* m = FromMethodId(location->method_id); { @@ -2630,18 +2677,17 @@ void Dbg::WatchLocation(const JDWP::JdwpLocation* location) { } gBreakpoints.push_back(Breakpoint(m, location->dex_pc)); - VLOG(jdwp) << "Set breakpoint #" << (gBreakpoints.size() - 1) << ": " << gBreakpoints[gBreakpoints.size() - 1]; + VLOG(jdwp) << "Set breakpoint #" << (gBreakpoints.size() - 1) << ": " + << gBreakpoints[gBreakpoints.size() - 1]; } if (need_deoptimization) { - // Request its deoptimization. This will be done after updating the JDWP event list. - MutexLock mu(Thread::Current(), *Locks::deoptimization_lock_); - gDeoptimizationRequests.push_back(MethodInstrumentationRequest(true, m)); - VLOG(jdwp) << "Request deoptimization of " << PrettyMethod(m); + req->kind = DeoptimizationRequest::kSelectiveDeoptimization; + req->method = m; } } -void Dbg::UnwatchLocation(const JDWP::JdwpLocation* location) { +void Dbg::UnwatchLocation(const JDWP::JdwpLocation* location, DeoptimizationRequest* req) { bool can_undeoptimize = true; mirror::ArtMethod* m = FromMethodId(location->method_id); DCHECK(Runtime::Current()->GetInstrumentation()->IsDeoptimized(m)); @@ -2666,9 +2712,8 @@ void Dbg::UnwatchLocation(const JDWP::JdwpLocation* location) { if (can_undeoptimize) { // Request its undeoptimization. This will be done after updating the JDWP event list. - MutexLock mu(Thread::Current(), *Locks::deoptimization_lock_); - gDeoptimizationRequests.push_back(MethodInstrumentationRequest(false, m)); - VLOG(jdwp) << "Request undeoptimization of " << PrettyMethod(m); + req->kind = DeoptimizationRequest::kSelectiveUndeoptimization; + req->method = m; } } diff --git a/runtime/debugger.h b/runtime/debugger.h index 5fbdb37d2..23c9c6a1a 100644 --- a/runtime/debugger.h +++ b/runtime/debugger.h @@ -25,6 +25,7 @@ #include #include +#include #include "jdwp/jdwp.h" #include "jni.h" @@ -121,6 +122,25 @@ struct SingleStepControl { DISALLOW_COPY_AND_ASSIGN(SingleStepControl); }; +struct DeoptimizationRequest { + enum Kind { + kNothing, // no action. + kFullDeoptimization, // deoptimize everything. + kFullUndeoptimization, // undeoptimize everything. + kSelectiveDeoptimization, // deoptimize one method. + kSelectiveUndeoptimization // undeoptimize one method. + }; + + DeoptimizationRequest() : kind(kNothing), method(nullptr) {} + + void VisitRoots(RootCallback* callback, void* arg); + + Kind kind; + + // Method for selective deoptimization. + mirror::ArtMethod* method; +}; + class Dbg { public: static bool ParseJdwpOptions(const std::string& options); @@ -144,8 +164,8 @@ class Dbg { */ static void Connected(); static void GoActive() - LOCKS_EXCLUDED(Locks::breakpoint_lock_, Locks::deoptimization_lock_, Locks::mutator_lock_); - static void Disconnected() LOCKS_EXCLUDED(Locks::deoptimization_lock_, Locks::mutator_lock_); + LOCKS_EXCLUDED(Locks::breakpoint_lock_, deoptimization_lock_, Locks::mutator_lock_); + static void Disconnected() LOCKS_EXCLUDED(deoptimization_lock_, Locks::mutator_lock_); static void Disposed(); // Returns true if we're actually debugging with a real debugger, false if it's @@ -407,26 +427,23 @@ class Dbg { LOCKS_EXCLUDED(Locks::breakpoint_lock_) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - // Full Deoptimization control. Only used for method entry/exit and single-stepping. - static void EnableFullDeoptimization() - LOCKS_EXCLUDED(Locks::deoptimization_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - static void DisableFullDeoptimization() - LOCKS_EXCLUDED(Locks::deoptimization_lock_) + // Records deoptimization request in the queue. + static void RequestDeoptimization(const DeoptimizationRequest& req) + LOCKS_EXCLUDED(deoptimization_lock_) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - // Manage deoptimization after updating JDWP events list. This must be done while all mutator - // threads are suspended. + // Manage deoptimization after updating JDWP events list. Suspends all threads, processes each + // request and finally resumes all threads. static void ManageDeoptimization() - LOCKS_EXCLUDED(Locks::deoptimization_lock_) + LOCKS_EXCLUDED(deoptimization_lock_) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Breakpoints. - static void WatchLocation(const JDWP::JdwpLocation* pLoc) - LOCKS_EXCLUDED(Locks::breakpoint_lock_, Locks::deoptimization_lock_) + static void WatchLocation(const JDWP::JdwpLocation* pLoc, DeoptimizationRequest* req) + LOCKS_EXCLUDED(Locks::breakpoint_lock_) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - static void UnwatchLocation(const JDWP::JdwpLocation* pLoc) - LOCKS_EXCLUDED(Locks::breakpoint_lock_, Locks::deoptimization_lock_) + static void UnwatchLocation(const JDWP::JdwpLocation* pLoc, DeoptimizationRequest* req) + LOCKS_EXCLUDED(Locks::breakpoint_lock_) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Single-stepping. @@ -521,6 +538,9 @@ class Dbg { static void PostThreadStartOrStop(Thread*, uint32_t) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + static void ProcessDeoptimizationRequest(const DeoptimizationRequest& request) + EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_); + static Mutex* alloc_tracker_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER; static AllocRecord* recent_allocation_records_ PT_GUARDED_BY(alloc_tracker_lock_); @@ -528,6 +548,20 @@ class Dbg { static size_t alloc_record_head_ GUARDED_BY(alloc_tracker_lock_); static size_t alloc_record_count_ GUARDED_BY(alloc_tracker_lock_); + // Guards deoptimization requests. + static Mutex* deoptimization_lock_ ACQUIRED_AFTER(Locks::breakpoint_lock_); + + // Deoptimization requests to be processed each time the event list is updated. This is used when + // registering and unregistering events so we do not deoptimize while holding the event list + // lock. + static std::vector deoptimization_requests_ GUARDED_BY(deoptimization_lock_); + + // Count the number of events requiring full deoptimization. When the counter is > 0, everything + // is deoptimized, otherwise everything is undeoptimized. + // Note: we fully deoptimize on the first event only (when the counter is set to 1). We fully + // undeoptimize when the last event is unregistered (when the counter is set to 0). + static size_t full_deoptimization_event_count_ GUARDED_BY(deoptimization_lock_); + DISALLOW_COPY_AND_ASSIGN(Dbg); }; diff --git a/runtime/instrumentation.h b/runtime/instrumentation.h index d2aa8d254..cf7271b3f 100644 --- a/runtime/instrumentation.h +++ b/runtime/instrumentation.h @@ -118,7 +118,7 @@ class Instrumentation { void EnableDeoptimization() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) LOCKS_EXCLUDED(deoptimized_methods_lock_); void DisableDeoptimization() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) - LOCKS_EXCLUDED(deoptimized_methods_lock_); + LOCKS_EXCLUDED(deoptimized_methods_lock_); bool ShouldNotifyMethodEnterExitEvents() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Executes everything with interpreter. @@ -142,7 +142,7 @@ class Instrumentation { // (except a class initializer) set to the resolution trampoline will be updated only once its // declaring class is initialized. void Undeoptimize(mirror::ArtMethod* method) - LOCKS_EXCLUDED(Locks::thread_list_lock_) + LOCKS_EXCLUDED(Locks::thread_list_lock_, deoptimized_methods_lock_) EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_); bool IsDeoptimized(mirror::ArtMethod* method) const LOCKS_EXCLUDED(deoptimized_methods_lock_); diff --git a/runtime/jdwp/jdwp.h b/runtime/jdwp/jdwp.h index 4c17c96d2..66ebb96d4 100644 --- a/runtime/jdwp/jdwp.h +++ b/runtime/jdwp/jdwp.h @@ -335,12 +335,10 @@ struct JdwpState { AtomicInteger event_serial_; // Linked list of events requested by the debugger (breakpoints, class prep, etc). - Mutex event_list_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER; + Mutex event_list_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER ACQUIRED_BEFORE(Locks::breakpoint_lock_); JdwpEvent* event_list_ GUARDED_BY(event_list_lock_); size_t event_list_size_ GUARDED_BY(event_list_lock_); // Number of elements in event_list_. - size_t full_deoptimization_requests_ GUARDED_BY(event_list_lock_); // Number of events requiring - // full deoptimization. // Used to synchronize suspension of the event thread (to avoid receiving "resume" // events before the thread has finished suspending itself). diff --git a/runtime/jdwp/jdwp_event.cc b/runtime/jdwp/jdwp_event.cc index 427350ed4..9b3ea2e6c 100644 --- a/runtime/jdwp/jdwp_event.cc +++ b/runtime/jdwp/jdwp_event.cc @@ -163,11 +163,12 @@ JdwpError JdwpState::RegisterEvent(JdwpEvent* pEvent) { * If one or more "break"-type mods are used, register them with * the interpreter. */ + DeoptimizationRequest req; for (int i = 0; i < pEvent->modCount; i++) { const JdwpEventMod* pMod = &pEvent->mods[i]; if (pMod->modKind == MK_LOCATION_ONLY) { /* should only be for Breakpoint, Step, and Exception */ - Dbg::WatchLocation(&pMod->locationOnly.loc); + Dbg::WatchLocation(&pMod->locationOnly.loc, &req); } else if (pMod->modKind == MK_STEP) { /* should only be for EK_SINGLE_STEP; should only be one */ JdwpStepSize size = static_cast(pMod->step.size); @@ -181,6 +182,11 @@ JdwpError JdwpState::RegisterEvent(JdwpEvent* pEvent) { dumpEvent(pEvent); /* TODO - need for field watches */ } } + if (NeedsFullDeoptimization(pEvent->eventKind)) { + CHECK_EQ(req.kind, DeoptimizationRequest::kNothing); + CHECK(req.method == nullptr); + req.kind = DeoptimizationRequest::kFullDeoptimization; + } { /* @@ -193,19 +199,11 @@ JdwpError JdwpState::RegisterEvent(JdwpEvent* pEvent) { } event_list_ = pEvent; ++event_list_size_; - - /** - * Do we need to enable full deoptimization ? - */ - if (NeedsFullDeoptimization(pEvent->eventKind)) { - if (full_deoptimization_requests_ == 0) { - // This is the first event that needs full deoptimization: enable it. - Dbg::EnableFullDeoptimization(); - } - ++full_deoptimization_requests_; - } } + // TODO we can do better job here since we should process only one request: the one we just + // created. + Dbg::RequestDeoptimization(req); Dbg::ManageDeoptimization(); return ERR_NONE; @@ -238,31 +236,28 @@ void JdwpState::UnregisterEvent(JdwpEvent* pEvent) { /* * Unhook us from the interpreter, if necessary. */ + DeoptimizationRequest req; for (int i = 0; i < pEvent->modCount; i++) { JdwpEventMod* pMod = &pEvent->mods[i]; if (pMod->modKind == MK_LOCATION_ONLY) { /* should only be for Breakpoint, Step, and Exception */ - Dbg::UnwatchLocation(&pMod->locationOnly.loc); + Dbg::UnwatchLocation(&pMod->locationOnly.loc, &req); } if (pMod->modKind == MK_STEP) { /* should only be for EK_SINGLE_STEP; should only be one */ Dbg::UnconfigureStep(pMod->step.threadId); } } + if (NeedsFullDeoptimization(pEvent->eventKind)) { + CHECK_EQ(req.kind, DeoptimizationRequest::kNothing); + CHECK(req.method == nullptr); + req.kind = DeoptimizationRequest::kFullUndeoptimization; + } --event_list_size_; CHECK(event_list_size_ != 0 || event_list_ == NULL); - /** - * Can we disable full deoptimization ? - */ - if (NeedsFullDeoptimization(pEvent->eventKind)) { - --full_deoptimization_requests_; - if (full_deoptimization_requests_ == 0) { - // We no longer need full deoptimization. - Dbg::DisableFullDeoptimization(); - } - } + Dbg::RequestDeoptimization(req); } /* diff --git a/runtime/jdwp/jdwp_main.cc b/runtime/jdwp/jdwp_main.cc index 77c963fe7..5fc0228f3 100644 --- a/runtime/jdwp/jdwp_main.cc +++ b/runtime/jdwp/jdwp_main.cc @@ -215,7 +215,6 @@ JdwpState::JdwpState(const JdwpOptions* options) event_list_lock_("JDWP event list lock", kJdwpEventListLock), event_list_(NULL), event_list_size_(0), - full_deoptimization_requests_(0), event_thread_lock_("JDWP event thread lock"), event_thread_cond_("JDWP event thread condition variable", event_thread_lock_), event_thread_id_(0), -- 2.11.0