OSDN Git Service

Fix race condition btw DelayReferenceRefernent vs Reference.clear().
authorHiroshi Yamauchi <yamauchi@google.com>
Mon, 19 Dec 2016 19:44:47 +0000 (11:44 -0800)
committerHiroshi Yamauchi <yamauchi@google.com>
Mon, 19 Dec 2016 19:44:47 +0000 (11:44 -0800)
Rename IsMarkedHeapReference to IsNullOrMarkedHeapReference.

Move the null check from the caller of IsMarkedHeapReference into
IsNullOrMarkedHeapReference.

Make sure that the referent is only loaded once between the null
check and the IsMarked call.

Use a CAS in ConcurrentCopying::IsNullOrMarkedHeapReference when
called from DelayReferenceRefernent.

Bug: 33389022
Test: test-art-host without and with CC.

Change-Id: I20edab4dac2a4bb02dbb72af0f09de77b55ac08e

13 files changed:
runtime/gc/collector/concurrent_copying.cc
runtime/gc/collector/concurrent_copying.h
runtime/gc/collector/garbage_collector.h
runtime/gc/collector/mark_compact.cc
runtime/gc/collector/mark_compact.h
runtime/gc/collector/mark_sweep.cc
runtime/gc/collector/mark_sweep.h
runtime/gc/collector/semi_space.cc
runtime/gc/collector/semi_space.h
runtime/gc/reference_processor.cc
runtime/gc/reference_queue.cc
runtime/mirror/object_reference-inl.h
runtime/mirror/object_reference.h

index b889913..741d1da 100644 (file)
@@ -2385,16 +2385,29 @@ void ConcurrentCopying::FinishPhase() {
   }
 }
 
-bool ConcurrentCopying::IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* field) {
+bool ConcurrentCopying::IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* field,
+                                                    bool do_atomic_update) {
   mirror::Object* from_ref = field->AsMirrorPtr();
+  if (from_ref == nullptr) {
+    return true;
+  }
   mirror::Object* to_ref = IsMarked(from_ref);
   if (to_ref == nullptr) {
     return false;
   }
   if (from_ref != to_ref) {
-    QuasiAtomic::ThreadFenceRelease();
-    field->Assign(to_ref);
-    QuasiAtomic::ThreadFenceSequentiallyConsistent();
+    if (do_atomic_update) {
+      do {
+        if (field->AsMirrorPtr() != from_ref) {
+          // Concurrently overwritten by a mutator.
+          break;
+        }
+      } while (!field->CasWeakRelaxed(from_ref, to_ref));
+    } else {
+      QuasiAtomic::ThreadFenceRelease();
+      field->Assign(to_ref);
+      QuasiAtomic::ThreadFenceSequentiallyConsistent();
+    }
   }
   return true;
 }
index 5b8a557..844bb45 100644 (file)
@@ -183,7 +183,8 @@ class ConcurrentCopying : public GarbageCollector {
       REQUIRES_SHARED(Locks::mutator_lock_);
   bool IsMarkedInUnevacFromSpace(mirror::Object* from_ref)
       REQUIRES_SHARED(Locks::mutator_lock_);
-  virtual bool IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* field) OVERRIDE
+  virtual bool IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* field,
+                                           bool do_atomic_update) OVERRIDE
       REQUIRES_SHARED(Locks::mutator_lock_);
   void SweepSystemWeaks(Thread* self)
       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Locks::heap_bitmap_lock_);
index 5b51399..0177e2a 100644 (file)
@@ -187,7 +187,10 @@ class GarbageCollector : public RootVisitor, public IsMarkedVisitor, public Mark
   // and will be used for reading system weaks while the GC is running.
   virtual mirror::Object* IsMarked(mirror::Object* obj)
       REQUIRES_SHARED(Locks::mutator_lock_) = 0;
-  virtual bool IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* obj)
+  // Returns true if the given heap reference is null or is already marked. If it's already marked,
+  // update the reference (uses a CAS if do_atomic_update is true. Otherwise, returns false.
+  virtual bool IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* obj,
+                                           bool do_atomic_update)
       REQUIRES_SHARED(Locks::mutator_lock_) = 0;
   // Used by reference processor.
   virtual void ProcessMarkStack() REQUIRES_SHARED(Locks::mutator_lock_) = 0;
index ddcb6c0..85e6783 100644 (file)
@@ -472,9 +472,15 @@ mirror::Object* MarkCompact::IsMarked(mirror::Object* object) {
   return mark_bitmap_->Test(object) ? object : nullptr;
 }
 
-bool MarkCompact::IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* ref_ptr) {
+bool MarkCompact::IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* ref_ptr,
+                                              // MarkCompact does the GC in a pause. No CAS needed.
+                                              bool do_atomic_update ATTRIBUTE_UNUSED) {
   // Side effect free since we call this before ever moving objects.
-  return IsMarked(ref_ptr->AsMirrorPtr()) != nullptr;
+  mirror::Object* obj = ref_ptr->AsMirrorPtr();
+  if (obj == nullptr) {
+    return true;
+  }
+  return IsMarked(obj) != nullptr;
 }
 
 void MarkCompact::SweepSystemWeaks() {
index 564f85b..6d52d5d 100644 (file)
@@ -175,7 +175,8 @@ class MarkCompact : public GarbageCollector {
   virtual mirror::Object* IsMarked(mirror::Object* obj) OVERRIDE
       REQUIRES_SHARED(Locks::heap_bitmap_lock_)
       REQUIRES(Locks::mutator_lock_);
-  virtual bool IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* obj) OVERRIDE
+  virtual bool IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* obj,
+                                           bool do_atomic_update) OVERRIDE
       REQUIRES_SHARED(Locks::heap_bitmap_lock_)
       REQUIRES(Locks::mutator_lock_);
   void ForwardObject(mirror::Object* obj) REQUIRES(Locks::heap_bitmap_lock_,
index 06ed029..f00da73 100644 (file)
@@ -390,8 +390,13 @@ inline void MarkSweep::MarkObjectNonNullParallel(mirror::Object* obj) {
   }
 }
 
-bool MarkSweep::IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* ref) {
-  return IsMarked(ref->AsMirrorPtr());
+bool MarkSweep::IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* ref,
+                                            bool do_atomic_update ATTRIBUTE_UNUSED) {
+  mirror::Object* obj = ref->AsMirrorPtr();
+  if (obj == nullptr) {
+    return true;
+  }
+  return IsMarked(obj);
 }
 
 class MarkSweep::MarkObjectSlowPath {
index 02cf462..a6e2d61 100644 (file)
@@ -188,7 +188,8 @@ class MarkSweep : public GarbageCollector {
   void VerifyIsLive(const mirror::Object* obj)
       REQUIRES_SHARED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
 
-  virtual bool IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* ref) OVERRIDE
+  virtual bool IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* ref,
+                                           bool do_atomic_update) OVERRIDE
       REQUIRES(Locks::heap_bitmap_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
index a815b83..82505ce 100644 (file)
@@ -765,8 +765,13 @@ mirror::Object* SemiSpace::IsMarked(mirror::Object* obj) {
   return mark_bitmap_->Test(obj) ? obj : nullptr;
 }
 
-bool SemiSpace::IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* object) {
+bool SemiSpace::IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* object,
+                                            // SemiSpace does the GC in a pause. No CAS needed.
+                                            bool do_atomic_update ATTRIBUTE_UNUSED) {
   mirror::Object* obj = object->AsMirrorPtr();
+  if (obj == nullptr) {
+    return true;
+  }
   mirror::Object* new_obj = IsMarked(obj);
   if (new_obj == nullptr) {
     return false;
index 4cebcc3..52b5e5f 100644 (file)
@@ -166,7 +166,8 @@ class SemiSpace : public GarbageCollector {
       REQUIRES(Locks::mutator_lock_)
       REQUIRES_SHARED(Locks::heap_bitmap_lock_);
 
-  virtual bool IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* object) OVERRIDE
+  virtual bool IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* object,
+                                           bool do_atomic_update) OVERRIDE
       REQUIRES(Locks::mutator_lock_)
       REQUIRES_SHARED(Locks::heap_bitmap_lock_);
 
index 081be96..c154836 100644 (file)
@@ -203,7 +203,9 @@ void ReferenceProcessor::DelayReferenceReferent(ObjPtr<mirror::Class> klass,
   DCHECK(klass != nullptr);
   DCHECK(klass->IsTypeOfReferenceClass());
   mirror::HeapReference<mirror::Object>* referent = ref->GetReferentReferenceAddr();
-  if (referent->AsMirrorPtr() != nullptr && !collector->IsMarkedHeapReference(referent)) {
+  // do_atomic_update needs to be true because this happens outside of the reference processing
+  // phase.
+  if (!collector->IsNullOrMarkedHeapReference(referent, /*do_atomic_update*/true)) {
     Thread* self = Thread::Current();
     // TODO: Remove these locks, and use atomic stacks for storing references?
     // We need to check that the references haven't already been enqueued since we can end up
index a0eb197..734caea 100644 (file)
@@ -129,8 +129,9 @@ void ReferenceQueue::ClearWhiteReferences(ReferenceQueue* cleared_references,
   while (!IsEmpty()) {
     ObjPtr<mirror::Reference> ref = DequeuePendingReference();
     mirror::HeapReference<mirror::Object>* referent_addr = ref->GetReferentReferenceAddr();
-    if (referent_addr->AsMirrorPtr() != nullptr &&
-        !collector->IsMarkedHeapReference(referent_addr)) {
+    // do_atomic_update is false because this happens during the reference processing phase where
+    // Reference.clear() would block.
+    if (!collector->IsNullOrMarkedHeapReference(referent_addr, /*do_atomic_update*/false)) {
       // Referent is white, clear it.
       if (Runtime::Current()->IsActiveTransaction()) {
         ref->ClearReferent<true>();
@@ -147,8 +148,9 @@ void ReferenceQueue::EnqueueFinalizerReferences(ReferenceQueue* cleared_referenc
   while (!IsEmpty()) {
     ObjPtr<mirror::FinalizerReference> ref = DequeuePendingReference()->AsFinalizerReference();
     mirror::HeapReference<mirror::Object>* referent_addr = ref->GetReferentReferenceAddr();
-    if (referent_addr->AsMirrorPtr() != nullptr &&
-        !collector->IsMarkedHeapReference(referent_addr)) {
+    // do_atomic_update is false because this happens during the reference processing phase where
+    // Reference.clear() would block.
+    if (!collector->IsNullOrMarkedHeapReference(referent_addr, /*do_atomic_update*/false)) {
       ObjPtr<mirror::Object> forward_address = collector->MarkObject(referent_addr->AsMirrorPtr());
       // Move the updated referent to the zombie field.
       if (Runtime::Current()->IsActiveTransaction()) {
index e70b936..22fb83c 100644 (file)
@@ -34,6 +34,15 @@ HeapReference<MirrorType> HeapReference<MirrorType>::FromObjPtr(ObjPtr<MirrorTyp
   return HeapReference<MirrorType>(ptr.Ptr());
 }
 
+template<class MirrorType>
+bool HeapReference<MirrorType>::CasWeakRelaxed(MirrorType* expected_ptr, MirrorType* new_ptr) {
+  HeapReference<Object> expected_ref(HeapReference<Object>::FromMirrorPtr(expected_ptr));
+  HeapReference<Object> new_ref(HeapReference<Object>::FromMirrorPtr(new_ptr));
+  Atomic<uint32_t>* atomic_reference = reinterpret_cast<Atomic<uint32_t>*>(&this->reference_);
+  return atomic_reference->CompareExchangeWeakRelaxed(expected_ref.reference_,
+                                                      new_ref.reference_);
+}
+
 }  // namespace mirror
 }  // namespace art
 
index 71f34c6..a96a120 100644 (file)
@@ -94,6 +94,9 @@ class MANAGED HeapReference : public ObjectReference<kPoisonHeapReferences, Mirr
   static HeapReference<MirrorType> FromObjPtr(ObjPtr<MirrorType> ptr)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
+  bool CasWeakRelaxed(MirrorType* old_ptr, MirrorType* new_ptr)
+      REQUIRES_SHARED(Locks::mutator_lock_);
+
  private:
   explicit HeapReference(MirrorType* mirror_ptr) REQUIRES_SHARED(Locks::mutator_lock_)
       : ObjectReference<kPoisonHeapReferences, MirrorType>(mirror_ptr) {}