OSDN Git Service

Add missing debugger root visiting.
authorMathieu Chartier <mathieuc@google.com>
Tue, 25 Mar 2014 16:29:43 +0000 (09:29 -0700)
committerMathieu Chartier <mathieuc@google.com>
Tue, 25 Mar 2014 20:56:58 +0000 (13:56 -0700)
Bug: 13634574
Change-Id: I2a76f6c43f1d0ad1922f06deb40a71ff651129fd

runtime/debugger.cc
runtime/debugger.h
runtime/gc/accounting/remembered_set.cc
runtime/gc/collector/semi_space-inl.h
runtime/gc/collector/semi_space.cc
runtime/gc/collector/semi_space.h
runtime/gc/heap.cc
runtime/instrumentation.cc
runtime/instrumentation.h
runtime/runtime.cc
runtime/thread.cc

index 43e8bb9..c18d5c6 100644 (file)
@@ -104,6 +104,12 @@ struct Breakpoint {
   mirror::ArtMethod* method;
   uint32_t dex_pc;
   Breakpoint(mirror::ArtMethod* method, uint32_t dex_pc) : method(method), dex_pc(dex_pc) {}
+
+  void VisitRoots(RootCallback* callback, void* arg) {
+    if (method != nullptr) {
+      callback(reinterpret_cast<mirror::Object**>(&method), arg, 0, kRootDebugger);
+    }
+  }
 };
 
 static std::ostream& operator<<(std::ostream& os, const Breakpoint& rhs)
@@ -209,6 +215,29 @@ static std::vector<MethodInstrumentationRequest> gDeoptimizationRequests GUARDED
 // Breakpoints.
 static std::vector<Breakpoint> gBreakpoints GUARDED_BY(Locks::breakpoint_lock_);
 
+void DebugInvokeReq::VisitRoots(RootCallback* callback, void* arg, uint32_t tid,
+                                RootType root_type) {
+  if (receiver != nullptr) {
+    callback(&receiver, arg, tid, root_type);
+  }
+  if (thread != nullptr) {
+    callback(&thread, arg, tid, root_type);
+  }
+  if (klass != nullptr) {
+    callback(reinterpret_cast<mirror::Object**>(&klass), arg, tid, root_type);
+  }
+  if (method != nullptr) {
+    callback(reinterpret_cast<mirror::Object**>(&method), arg, tid, root_type);
+  }
+}
+
+void SingleStepControl::VisitRoots(RootCallback* callback, void* arg, uint32_t tid,
+                                   RootType root_type) {
+  if (method != nullptr) {
+    callback(reinterpret_cast<mirror::Object**>(&method), arg, tid, root_type);
+  }
+}
+
 static bool IsBreakpoint(const mirror::ArtMethod* m, uint32_t dex_pc)
     LOCKS_EXCLUDED(Locks::breakpoint_lock_)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
@@ -494,6 +523,13 @@ void Dbg::StartJdwp() {
   }
 }
 
+void Dbg::VisitRoots(RootCallback* callback, void* arg) {
+  MutexLock mu(Thread::Current(), *Locks::breakpoint_lock_);
+  for (Breakpoint& bp : gBreakpoints) {
+    bp.VisitRoots(callback, arg);
+  }
+}
+
 void Dbg::StopJdwp() {
   // Prevent the JDWP thread from processing JDWP incoming packets after we close the connection.
   Disposed();
@@ -3861,7 +3897,7 @@ void Dbg::DumpRecentAllocations() {
   }
 }
 
-void Dbg::UpdateObjectPointers(IsMarkedCallback* visitor, void* arg) {
+void Dbg::UpdateObjectPointers(IsMarkedCallback* callback, void* arg) {
   if (recent_allocation_records_ != nullptr) {
     MutexLock mu(Thread::Current(), *alloc_tracker_lock_);
     size_t i = HeadIndex();
@@ -3869,12 +3905,12 @@ void Dbg::UpdateObjectPointers(IsMarkedCallback* visitor, void* arg) {
     while (count--) {
       AllocRecord* record = &recent_allocation_records_[i];
       DCHECK(record != nullptr);
-      record->UpdateObjectPointers(visitor, arg);
+      record->UpdateObjectPointers(callback, arg);
       i = (i + 1) & (alloc_record_max_ - 1);
     }
   }
   if (gRegistry != nullptr) {
-    gRegistry->UpdateObjectPointers(visitor, arg);
+    gRegistry->UpdateObjectPointers(callback, arg);
   }
 }
 
index 6569cc4..5fbdb37 100644 (file)
@@ -81,6 +81,9 @@ struct DebugInvokeReq {
   Mutex lock DEFAULT_MUTEX_ACQUIRED_AFTER;
   ConditionVariable cond GUARDED_BY(lock);
 
+  void VisitRoots(RootCallback* callback, void* arg, uint32_t tid, RootType root_type)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
  private:
   DISALLOW_COPY_AND_ASSIGN(DebugInvokeReq);
 };
@@ -111,6 +114,9 @@ struct SingleStepControl {
   // single-step depth.
   int stack_depth;
 
+  void VisitRoots(RootCallback* callback, void* arg, uint32_t tid, RootType root_type)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
  private:
   DISALLOW_COPY_AND_ASSIGN(SingleStepControl);
 };
@@ -459,6 +465,9 @@ class Dbg {
   static void DdmSendChunkV(uint32_t type, const iovec* iov, int iov_count)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
+  static void VisitRoots(RootCallback* callback, void* arg)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
   /*
    * Recent allocation tracking support.
    */
index afa5054..56f7caa 100644 (file)
@@ -68,6 +68,7 @@ class RememberedSetReferenceVisitor {
 
   void operator()(mirror::Object* obj, MemberOffset offset, bool /* is_static */) const
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    DCHECK(obj != nullptr);
     mirror::HeapReference<mirror::Object>* ref_ptr = obj->GetFieldObjectReferenceAddr(offset);
     if (target_space_->HasAddress(ref_ptr->AsMirrorPtr())) {
       *contains_reference_to_target_space_ = true;
index 3b8f7c3..d60298b 100644 (file)
 #ifndef ART_RUNTIME_GC_COLLECTOR_SEMI_SPACE_INL_H_
 #define ART_RUNTIME_GC_COLLECTOR_SEMI_SPACE_INL_H_
 
+#include "semi_space.h"
+
+#include "gc/accounting/heap_bitmap.h"
+#include "mirror/object-inl.h"
+
 namespace art {
 namespace gc {
 namespace collector {
@@ -30,6 +35,60 @@ inline mirror::Object* SemiSpace::GetForwardingAddressInFromSpace(mirror::Object
   return reinterpret_cast<mirror::Object*>(lock_word.ForwardingAddress());
 }
 
+// Used to mark and copy objects. Any newly-marked objects who are in the from space get moved to
+// the to-space and have their forward address updated. Objects which have been newly marked are
+// pushed on the mark stack.
+template<bool kPoisonReferences>
+inline void SemiSpace::MarkObject(
+    mirror::ObjectReference<kPoisonReferences, mirror::Object>* obj_ptr) {
+  mirror::Object* obj = obj_ptr->AsMirrorPtr();
+  if (obj == nullptr) {
+    return;
+  }
+  if (kUseBrooksPointer) {
+    // Verify all the objects have the correct forward pointer installed.
+    obj->AssertSelfBrooksPointer();
+  }
+  if (!immune_region_.ContainsObject(obj)) {
+    if (from_space_->HasAddress(obj)) {
+      mirror::Object* forward_address = GetForwardingAddressInFromSpace(obj);
+      // If the object has already been moved, return the new forward address.
+      if (forward_address == nullptr) {
+        forward_address = MarkNonForwardedObject(obj);
+        DCHECK(forward_address != nullptr);
+        // Make sure to only update the forwarding address AFTER you copy the object so that the
+        // monitor word doesn't get stomped over.
+        obj->SetLockWord(LockWord::FromForwardingAddress(
+            reinterpret_cast<size_t>(forward_address)));
+        // Push the object onto the mark stack for later processing.
+        MarkStackPush(forward_address);
+      }
+      obj_ptr->Assign(forward_address);
+    } else {
+      accounting::SpaceBitmap* object_bitmap =
+          heap_->GetMarkBitmap()->GetContinuousSpaceBitmap(obj);
+      if (LIKELY(object_bitmap != nullptr)) {
+        if (generational_) {
+          // If a bump pointer space only collection, we should not
+          // reach here as we don't/won't mark the objects in the
+          // non-moving space (except for the promoted objects.)  Note
+          // the non-moving space is added to the immune space.
+          DCHECK(whole_heap_collection_);
+        }
+        if (!object_bitmap->Set(obj)) {
+          // This object was not previously marked.
+          MarkStackPush(obj);
+        }
+      } else {
+        CHECK(!to_space_->HasAddress(obj)) << "Marking " << obj << " in to_space_";
+        if (MarkLargeObject(obj)) {
+          MarkStackPush(obj);
+        }
+      }
+    }
+  }
+}
+
 }  // namespace collector
 }  // namespace gc
 }  // namespace art
index 5faa3a1..cd9e217 100644 (file)
@@ -14,7 +14,7 @@
  * limitations under the License.
  */
 
-#include "semi_space.h"
+#include "semi_space-inl.h"
 
 #include <functional>
 #include <numeric>
@@ -50,7 +50,7 @@
 #include "mirror/object_array.h"
 #include "mirror/object_array-inl.h"
 #include "runtime.h"
-#include "semi_space-inl.h"
+#include "stack.h"
 #include "thread-inl.h"
 #include "thread_list.h"
 #include "verifier/method_verifier.h"
@@ -264,6 +264,7 @@ class SemiSpaceVerifyNoFromSpaceReferencesVisitor {
     mirror::Object* ref = obj->GetFieldObject<mirror::Object>(offset, false);
     if (from_space_->HasAddress(ref)) {
       Runtime::Current()->GetHeap()->DumpObject(LOG(INFO), obj);
+      LOG(FATAL) << ref << " found in from space";
     }
   }
  private:
@@ -574,64 +575,12 @@ mirror::Object* SemiSpace::MarkNonForwardedObject(mirror::Object* obj) {
   return forward_address;
 }
 
-// Used to mark and copy objects. Any newly-marked objects who are in the from space get moved to
-// the to-space and have their forward address updated. Objects which have been newly marked are
-// pushed on the mark stack.
-void SemiSpace::MarkObject(mirror::HeapReference<Object>* obj_ptr) {
-  Object* obj = obj_ptr->AsMirrorPtr();
-  if (obj == nullptr) {
-    return;
-  }
-  if (kUseBrooksPointer) {
-    // Verify all the objects have the correct forward pointer installed.
-    obj->AssertSelfBrooksPointer();
-  }
-  if (!immune_region_.ContainsObject(obj)) {
-    if (from_space_->HasAddress(obj)) {
-      mirror::Object* forward_address = GetForwardingAddressInFromSpace(obj);
-      // If the object has already been moved, return the new forward address.
-      if (forward_address == nullptr) {
-        forward_address = MarkNonForwardedObject(obj);
-        DCHECK(forward_address != nullptr);
-        // Make sure to only update the forwarding address AFTER you copy the object so that the
-        // monitor word doesn't get stomped over.
-        obj->SetLockWord(LockWord::FromForwardingAddress(
-            reinterpret_cast<size_t>(forward_address)));
-        // Push the object onto the mark stack for later processing.
-        MarkStackPush(forward_address);
-      }
-      obj_ptr->Assign(forward_address);
-    } else {
-      accounting::SpaceBitmap* object_bitmap =
-          heap_->GetMarkBitmap()->GetContinuousSpaceBitmap(obj);
-      if (LIKELY(object_bitmap != nullptr)) {
-        if (generational_) {
-          // If a bump pointer space only collection, we should not
-          // reach here as we don't/won't mark the objects in the
-          // non-moving space (except for the promoted objects.)  Note
-          // the non-moving space is added to the immune space.
-          DCHECK(whole_heap_collection_);
-        }
-        if (!object_bitmap->Set(obj)) {
-          // This object was not previously marked.
-          MarkStackPush(obj);
-        }
-      } else {
-        CHECK(!to_space_->HasAddress(obj)) << "Marking object in to_space_";
-        if (MarkLargeObject(obj)) {
-          MarkStackPush(obj);
-        }
-      }
-    }
-  }
-}
-
 void SemiSpace::ProcessMarkStackCallback(void* arg) {
   reinterpret_cast<SemiSpace*>(arg)->ProcessMarkStack();
 }
 
 mirror::Object* SemiSpace::MarkObjectCallback(mirror::Object* root, void* arg) {
-  auto ref = mirror::HeapReference<mirror::Object>::FromMirrorPtr(root);
+  auto ref = StackReference<mirror::Object>::FromMirrorPtr(root);
   reinterpret_cast<SemiSpace*>(arg)->MarkObject(&ref);
   return ref.AsMirrorPtr();
 }
@@ -643,7 +592,7 @@ void SemiSpace::MarkHeapReferenceCallback(mirror::HeapReference<mirror::Object>*
 
 void SemiSpace::MarkRootCallback(Object** root, void* arg, uint32_t /*thread_id*/,
                                  RootType /*root_type*/) {
-  auto ref = mirror::HeapReference<mirror::Object>::FromMirrorPtr(*root);
+  auto ref = StackReference<mirror::Object>::FromMirrorPtr(*root);
   reinterpret_cast<SemiSpace*>(arg)->MarkObject(&ref);
   if (*root != ref.AsMirrorPtr()) {
     *root = ref.AsMirrorPtr();
index 523c2ab..52b53aa 100644 (file)
@@ -98,7 +98,8 @@ class SemiSpace : public GarbageCollector {
   void FindDefaultMarkBitmap();
 
   // Returns the new address of the object.
-  void MarkObject(mirror::HeapReference<mirror::Object>* obj_ptr)
+  template<bool kPoisonReferences>
+  void MarkObject(mirror::ObjectReference<kPoisonReferences, mirror::Object>* obj_ptr)
       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
index 6c3ae5e..a256b67 100644 (file)
@@ -805,7 +805,8 @@ void Heap::ProcessReferences(TimingLogger& timings, bool clear_soft,
 // marked, put it on the appropriate list in the heap for later processing.
 void Heap::DelayReferenceReferent(mirror::Class* klass, mirror::Reference* ref,
                                   IsMarkedCallback is_marked_callback, void* arg) {
-  DCHECK_EQ(klass, ref->GetClass());
+  // klass can be the class of the old object if the visitor already updated the class of ref.
+  DCHECK(klass->IsReferenceClass());
   mirror::Object* referent = ref->GetReferent();
   if (referent != nullptr) {
     mirror::Object* forward_address = is_marked_callback(referent, arg);
@@ -1306,7 +1307,7 @@ class ReferringObjectsFinder {
     o->VisitReferences<true>(*this);
   }
 
-  // For MarkSweep::VisitObjectReferences.
+  // For Object::VisitReferences.
   void operator()(mirror::Object* obj, MemberOffset offset, bool /* is_static */) const
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     mirror::Object* ref = obj->GetFieldObject<mirror::Object>(offset, false);
@@ -1916,7 +1917,7 @@ class VerifyReferenceVisitor {
     this->operator()(ref, mirror::Reference::ReferentOffset(), false);
   }
 
-  void operator()(mirror::Object* obj, MemberOffset offset, bool /* static */) const
+  void operator()(mirror::Object* obj, MemberOffset offset, bool /*is_static*/) const
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     this->operator()(obj, obj->GetFieldObject<mirror::Object>(offset, false), offset);
   }
index 89a63ac..525e2b3 100644 (file)
@@ -57,6 +57,19 @@ static bool InstallStubsClassVisitor(mirror::Class* klass, void* arg)
   return instrumentation->InstallStubsForClass(klass);
 }
 
+Instrumentation::Instrumentation()
+    : instrumentation_stubs_installed_(false), entry_exit_stubs_installed_(false),
+      interpreter_stubs_installed_(false),
+      interpret_only_(false), forced_interpret_only_(false),
+      have_method_entry_listeners_(false), have_method_exit_listeners_(false),
+      have_method_unwind_listeners_(false), have_dex_pc_listeners_(false),
+      have_exception_caught_listeners_(false),
+      deoptimized_methods_lock_("deoptimized methods lock"),
+      deoptimization_enabled_(false),
+      interpreter_handler_table_(kMainHandlerTable),
+      quick_alloc_entry_points_instrumentation_counter_(0) {
+}
+
 bool Instrumentation::InstallStubsForClass(mirror::Class* klass) {
   for (size_t i = 0, e = klass->NumDirectMethods(); i < e; i++) {
     InstallStubsForMethod(klass->GetDirectMethod(i));
@@ -445,7 +458,12 @@ void Instrumentation::ConfigureStubs(bool require_entry_exit_stubs, bool require
     entry_exit_stubs_installed_ = false;
     runtime->GetClassLinker()->VisitClasses(InstallStubsClassVisitor, this);
     // Restore stack only if there is no method currently deoptimized.
-    if (deoptimized_methods_.empty()) {
+    bool empty;
+    {
+      ReaderMutexLock mu(self, deoptimized_methods_lock_);
+      empty = deoptimized_methods_.empty();  // Avoid lock violation.
+    }
+    if (empty) {
       instrumentation_stubs_installed_ = false;
       MutexLock mu(self, *Locks::thread_list_lock_);
       Runtime::Current()->GetThreadList()->ForEach(InstrumentationRestoreStack, this);
@@ -542,7 +560,12 @@ void Instrumentation::Deoptimize(mirror::ArtMethod* method) {
   CHECK(!method->IsProxyMethod());
   CHECK(!method->IsAbstract());
 
-  std::pair<std::set<mirror::ArtMethod*>::iterator, bool> pair = deoptimized_methods_.insert(method);
+  Thread* self = Thread::Current();
+  std::pair<std::set<mirror::ArtMethod*>::iterator, bool> pair;
+  {
+    WriterMutexLock mu(self, deoptimized_methods_lock_);
+    pair = deoptimized_methods_.insert(method);
+  }
   bool already_deoptimized = !pair.second;
   CHECK(!already_deoptimized) << "Method " << PrettyMethod(method) << " is already deoptimized";
 
@@ -553,7 +576,7 @@ void Instrumentation::Deoptimize(mirror::ArtMethod* method) {
     // Install instrumentation exit stub and instrumentation frames. We may already have installed
     // these previously so it will only cover the newly created frames.
     instrumentation_stubs_installed_ = true;
-    MutexLock mu(Thread::Current(), *Locks::thread_list_lock_);
+    MutexLock mu(self, *Locks::thread_list_lock_);
     Runtime::Current()->GetThreadList()->ForEach(InstrumentationInstallStack, this);
   }
 }
@@ -563,9 +586,16 @@ void Instrumentation::Undeoptimize(mirror::ArtMethod* method) {
   CHECK(!method->IsProxyMethod());
   CHECK(!method->IsAbstract());
 
-  auto it = deoptimized_methods_.find(method);
-  CHECK(it != deoptimized_methods_.end()) << "Method " << PrettyMethod(method) << " is not deoptimized";
-  deoptimized_methods_.erase(it);
+  Thread* self = Thread::Current();
+  bool empty;
+  {
+    WriterMutexLock mu(self, deoptimized_methods_lock_);
+    auto it = deoptimized_methods_.find(method);
+    CHECK(it != deoptimized_methods_.end()) << "Method " << PrettyMethod(method)
+        << " is not deoptimized";
+    deoptimized_methods_.erase(it);
+    empty = deoptimized_methods_.empty();
+  }
 
   // Restore code and possibly stack only if we did not deoptimize everything.
   if (!interpreter_stubs_installed_) {
@@ -583,8 +613,8 @@ void Instrumentation::Undeoptimize(mirror::ArtMethod* method) {
     }
 
     // If there is no deoptimized method left, we can restore the stack of each thread.
-    if (deoptimized_methods_.empty()) {
-      MutexLock mu(Thread::Current(), *Locks::thread_list_lock_);
+    if (empty) {
+      MutexLock mu(self, *Locks::thread_list_lock_);
       Runtime::Current()->GetThreadList()->ForEach(InstrumentationRestoreStack, this);
       instrumentation_stubs_installed_ = false;
     }
@@ -592,11 +622,13 @@ void Instrumentation::Undeoptimize(mirror::ArtMethod* method) {
 }
 
 bool Instrumentation::IsDeoptimized(mirror::ArtMethod* method) const {
+  ReaderMutexLock mu(Thread::Current(), deoptimized_methods_lock_);
   DCHECK(method != nullptr);
-  return deoptimized_methods_.count(method);
+  return deoptimized_methods_.find(method) != deoptimized_methods_.end();
 }
 
 void Instrumentation::EnableDeoptimization() {
+  ReaderMutexLock mu(Thread::Current(), deoptimized_methods_lock_);
   CHECK(deoptimized_methods_.empty());
   CHECK_EQ(deoptimization_enabled_, false);
   deoptimization_enabled_ = true;
@@ -609,11 +641,17 @@ void Instrumentation::DisableDeoptimization() {
     UndeoptimizeEverything();
   }
   // Undeoptimized selected methods.
-  while (!deoptimized_methods_.empty()) {
-    auto it_begin = deoptimized_methods_.begin();
-    Undeoptimize(*it_begin);
+  while (true) {
+    mirror::ArtMethod* method;
+    {
+      ReaderMutexLock mu(Thread::Current(), deoptimized_methods_lock_);
+      if (deoptimized_methods_.empty()) {
+        break;
+      }
+      method = *deoptimized_methods_.begin();
+    }
+    Undeoptimize(method);
   }
-  CHECK(deoptimized_methods_.empty());
   deoptimization_enabled_ = false;
 }
 
@@ -827,6 +865,20 @@ void Instrumentation::PopMethodForUnwind(Thread* self, bool is_deoptimization) c
   }
 }
 
+void Instrumentation::VisitRoots(RootCallback* callback, void* arg) {
+  WriterMutexLock mu(Thread::Current(), deoptimized_methods_lock_);
+  if (deoptimized_methods_.empty()) {
+    return;
+  }
+  std::set<mirror::ArtMethod*> new_deoptimized_methods;
+  for (mirror::ArtMethod* method : deoptimized_methods_) {
+    DCHECK(method != nullptr);
+    callback(reinterpret_cast<mirror::Object**>(&method), arg, 0, kRootVMInternal);
+    new_deoptimized_methods.insert(method);
+  }
+  deoptimized_methods_ = new_deoptimized_methods;
+}
+
 std::string InstrumentationStackFrame::Dump() const {
   std::ostringstream os;
   os << "Frame " << frame_id_ << " " << PrettyMethod(method_) << ":"
index e9356e0..d2aa8d2 100644 (file)
@@ -20,6 +20,7 @@
 #include "atomic.h"
 #include "base/macros.h"
 #include "base/mutex.h"
+#include "object_callbacks.h"
 
 #include <stdint.h>
 #include <set>
@@ -98,16 +99,7 @@ class Instrumentation {
     kExceptionCaught = 16
   };
 
-  Instrumentation() :
-      instrumentation_stubs_installed_(false), entry_exit_stubs_installed_(false),
-      interpreter_stubs_installed_(false),
-      interpret_only_(false), forced_interpret_only_(false),
-      have_method_entry_listeners_(false), have_method_exit_listeners_(false),
-      have_method_unwind_listeners_(false), have_dex_pc_listeners_(false),
-      have_exception_caught_listeners_(false),
-      deoptimization_enabled_(false),
-      interpreter_handler_table_(kMainHandlerTable),
-      quick_alloc_entry_points_instrumentation_counter_(0) {}
+  Instrumentation();
 
   // Add a listener to be notified of the masked together sent of instrumentation events. This
   // suspend the runtime to install stubs. You are expected to hold the mutator lock as a proxy
@@ -123,8 +115,10 @@ class Instrumentation {
       LOCKS_EXCLUDED(Locks::thread_list_lock_, Locks::classlinker_classes_lock_);
 
   // Deoptimization.
-  void EnableDeoptimization() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
-  void DisableDeoptimization() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void EnableDeoptimization() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_)
+      LOCKS_EXCLUDED(deoptimized_methods_lock_);
+  void DisableDeoptimization() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_)
+    LOCKS_EXCLUDED(deoptimized_methods_lock_);
   bool ShouldNotifyMethodEnterExitEvents() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Executes everything with interpreter.
@@ -141,7 +135,7 @@ class Instrumentation {
   // method (except a class initializer) set to the resolution trampoline will be deoptimized only
   // once its declaring class is initialized.
   void Deoptimize(mirror::ArtMethod* method)
-      LOCKS_EXCLUDED(Locks::thread_list_lock_)
+      LOCKS_EXCLUDED(Locks::thread_list_lock_, deoptimized_methods_lock_)
       EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Undeoptimze the method by restoring its entrypoints. Nevertheless, a static method
@@ -151,7 +145,7 @@ class Instrumentation {
       LOCKS_EXCLUDED(Locks::thread_list_lock_)
       EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  bool IsDeoptimized(mirror::ArtMethod* method) const;
+  bool IsDeoptimized(mirror::ArtMethod* method) const LOCKS_EXCLUDED(deoptimized_methods_lock_);
 
   // Enable method tracing by installing instrumentation entry/exit stubs.
   void EnableMethodTracing()
@@ -286,11 +280,15 @@ class Instrumentation {
   void InstallStubsForMethod(mirror::ArtMethod* method)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
+  void VisitRoots(RootCallback* callback, void* arg) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+      LOCKS_EXCLUDED(deoptimized_methods_lock_);
+
  private:
   // Does the job of installing or removing instrumentation code within methods.
   void ConfigureStubs(bool require_entry_exit_stubs, bool require_interpreter)
       EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_)
-      LOCKS_EXCLUDED(Locks::thread_list_lock_, Locks::classlinker_classes_lock_);
+      LOCKS_EXCLUDED(Locks::thread_list_lock_, Locks::classlinker_classes_lock_,
+                     deoptimized_methods_lock_);
 
   void UpdateInterpreterHandlerTable() {
     interpreter_handler_table_ = IsActive() ? kAlternativeHandlerTable : kMainHandlerTable;
@@ -354,8 +352,8 @@ class Instrumentation {
 
   // The set of methods being deoptimized (by the debugger) which must be executed with interpreter
   // only.
-  // TODO we need to visit these methods as roots.
-  std::set<mirror::ArtMethod*> deoptimized_methods_;
+  mutable ReaderWriterMutex deoptimized_methods_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
+  std::set<mirror::ArtMethod*> deoptimized_methods_ GUARDED_BY(deoptimized_methods_lock_);
   bool deoptimization_enabled_;
 
   // Current interpreter handler table. This is updated each time the thread state flags are
index 21d79c3..a8da2f8 100644 (file)
@@ -839,7 +839,7 @@ void Runtime::DetachCurrentThread() {
 
 void Runtime::VisitConstantRoots(RootCallback* callback, void* arg) {
   // Visit the classes held as static in mirror classes, these can be visited concurrently and only
-  // need to be visited once since they never change.
+  // need to be visited once per GC since they never change.
   mirror::ArtField::VisitRoots(callback, arg);
   mirror::ArtMethod::VisitRoots(callback, arg);
   mirror::Class::VisitRoots(callback, arg);
@@ -860,6 +860,7 @@ void Runtime::VisitConstantRoots(RootCallback* callback, void* arg) {
 void Runtime::VisitConcurrentRoots(RootCallback* callback, void* arg, VisitRootFlags flags) {
   intern_table_->VisitRoots(callback, arg, flags);
   class_linker_->VisitRoots(callback, arg, flags);
+  Dbg::VisitRoots(callback, arg);
   if ((flags & kVisitRootFlagNewRoots) == 0) {
     // Guaranteed to have no new roots in the constant roots.
     VisitConstantRoots(callback, arg);
@@ -896,6 +897,7 @@ void Runtime::VisitNonThreadRoots(RootCallback* callback, void* arg) {
   if (preinitialization_transaction != nullptr) {
     preinitialization_transaction->VisitRoots(callback, arg);
   }
+  instrumentation_.VisitRoots(callback, arg);
 }
 
 void Runtime::VisitNonConcurrentRoots(RootCallback* callback, void* arg) {
index 8e14924..38e4204 100644 (file)
@@ -2025,6 +2025,12 @@ void Thread::VisitRoots(RootCallback* visitor, void* arg) {
   jni_env_->locals.VisitRoots(visitor, arg, thread_id, kRootJNILocal);
   jni_env_->monitors.VisitRoots(visitor, arg, thread_id, kRootJNIMonitor);
   SirtVisitRoots(visitor, arg, thread_id);
+  if (debug_invoke_req_ != nullptr) {
+    debug_invoke_req_->VisitRoots(visitor, arg, thread_id, kRootDebugger);
+  }
+  if (single_step_control_ != nullptr) {
+    single_step_control_->VisitRoots(visitor, arg, thread_id, kRootDebugger);
+  }
   // Visit roots on this thread's stack
   Context* context = GetLongJumpContext();
   RootCallbackVisitor visitorToCallback(visitor, arg, thread_id);