}
}
-bool ConcurrentCopying::IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* field) {
+bool ConcurrentCopying::IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* field,
+ bool do_atomic_update) {
mirror::Object* from_ref = field->AsMirrorPtr();
+ if (from_ref == nullptr) {
+ return true;
+ }
mirror::Object* to_ref = IsMarked(from_ref);
if (to_ref == nullptr) {
return false;
}
if (from_ref != to_ref) {
- QuasiAtomic::ThreadFenceRelease();
- field->Assign(to_ref);
- QuasiAtomic::ThreadFenceSequentiallyConsistent();
+ if (do_atomic_update) {
+ do {
+ if (field->AsMirrorPtr() != from_ref) {
+ // Concurrently overwritten by a mutator.
+ break;
+ }
+ } while (!field->CasWeakRelaxed(from_ref, to_ref));
+ } else {
+ QuasiAtomic::ThreadFenceRelease();
+ field->Assign(to_ref);
+ QuasiAtomic::ThreadFenceSequentiallyConsistent();
+ }
}
return true;
}
REQUIRES_SHARED(Locks::mutator_lock_);
bool IsMarkedInUnevacFromSpace(mirror::Object* from_ref)
REQUIRES_SHARED(Locks::mutator_lock_);
- virtual bool IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* field) OVERRIDE
+ virtual bool IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* field,
+ bool do_atomic_update) OVERRIDE
REQUIRES_SHARED(Locks::mutator_lock_);
void SweepSystemWeaks(Thread* self)
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Locks::heap_bitmap_lock_);
// and will be used for reading system weaks while the GC is running.
virtual mirror::Object* IsMarked(mirror::Object* obj)
REQUIRES_SHARED(Locks::mutator_lock_) = 0;
- virtual bool IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* obj)
+ // Returns true if the given heap reference is null or is already marked. If it's already marked,
+ // update the reference (uses a CAS if do_atomic_update is true. Otherwise, returns false.
+ virtual bool IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* obj,
+ bool do_atomic_update)
REQUIRES_SHARED(Locks::mutator_lock_) = 0;
// Used by reference processor.
virtual void ProcessMarkStack() REQUIRES_SHARED(Locks::mutator_lock_) = 0;
return mark_bitmap_->Test(object) ? object : nullptr;
}
-bool MarkCompact::IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* ref_ptr) {
+bool MarkCompact::IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* ref_ptr,
+ // MarkCompact does the GC in a pause. No CAS needed.
+ bool do_atomic_update ATTRIBUTE_UNUSED) {
// Side effect free since we call this before ever moving objects.
- return IsMarked(ref_ptr->AsMirrorPtr()) != nullptr;
+ mirror::Object* obj = ref_ptr->AsMirrorPtr();
+ if (obj == nullptr) {
+ return true;
+ }
+ return IsMarked(obj) != nullptr;
}
void MarkCompact::SweepSystemWeaks() {
virtual mirror::Object* IsMarked(mirror::Object* obj) OVERRIDE
REQUIRES_SHARED(Locks::heap_bitmap_lock_)
REQUIRES(Locks::mutator_lock_);
- virtual bool IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* obj) OVERRIDE
+ virtual bool IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* obj,
+ bool do_atomic_update) OVERRIDE
REQUIRES_SHARED(Locks::heap_bitmap_lock_)
REQUIRES(Locks::mutator_lock_);
void ForwardObject(mirror::Object* obj) REQUIRES(Locks::heap_bitmap_lock_,
}
}
-bool MarkSweep::IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* ref) {
- return IsMarked(ref->AsMirrorPtr());
+bool MarkSweep::IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* ref,
+ bool do_atomic_update ATTRIBUTE_UNUSED) {
+ mirror::Object* obj = ref->AsMirrorPtr();
+ if (obj == nullptr) {
+ return true;
+ }
+ return IsMarked(obj);
}
class MarkSweep::MarkObjectSlowPath {
void VerifyIsLive(const mirror::Object* obj)
REQUIRES_SHARED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
- virtual bool IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* ref) OVERRIDE
+ virtual bool IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* ref,
+ bool do_atomic_update) OVERRIDE
REQUIRES(Locks::heap_bitmap_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
return mark_bitmap_->Test(obj) ? obj : nullptr;
}
-bool SemiSpace::IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* object) {
+bool SemiSpace::IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* object,
+ // SemiSpace does the GC in a pause. No CAS needed.
+ bool do_atomic_update ATTRIBUTE_UNUSED) {
mirror::Object* obj = object->AsMirrorPtr();
+ if (obj == nullptr) {
+ return true;
+ }
mirror::Object* new_obj = IsMarked(obj);
if (new_obj == nullptr) {
return false;
REQUIRES(Locks::mutator_lock_)
REQUIRES_SHARED(Locks::heap_bitmap_lock_);
- virtual bool IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* object) OVERRIDE
+ virtual bool IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* object,
+ bool do_atomic_update) OVERRIDE
REQUIRES(Locks::mutator_lock_)
REQUIRES_SHARED(Locks::heap_bitmap_lock_);
DCHECK(klass != nullptr);
DCHECK(klass->IsTypeOfReferenceClass());
mirror::HeapReference<mirror::Object>* referent = ref->GetReferentReferenceAddr();
- if (referent->AsMirrorPtr() != nullptr && !collector->IsMarkedHeapReference(referent)) {
+ // do_atomic_update needs to be true because this happens outside of the reference processing
+ // phase.
+ if (!collector->IsNullOrMarkedHeapReference(referent, /*do_atomic_update*/true)) {
Thread* self = Thread::Current();
// TODO: Remove these locks, and use atomic stacks for storing references?
// We need to check that the references haven't already been enqueued since we can end up
while (!IsEmpty()) {
ObjPtr<mirror::Reference> ref = DequeuePendingReference();
mirror::HeapReference<mirror::Object>* referent_addr = ref->GetReferentReferenceAddr();
- if (referent_addr->AsMirrorPtr() != nullptr &&
- !collector->IsMarkedHeapReference(referent_addr)) {
+ // do_atomic_update is false because this happens during the reference processing phase where
+ // Reference.clear() would block.
+ if (!collector->IsNullOrMarkedHeapReference(referent_addr, /*do_atomic_update*/false)) {
// Referent is white, clear it.
if (Runtime::Current()->IsActiveTransaction()) {
ref->ClearReferent<true>();
while (!IsEmpty()) {
ObjPtr<mirror::FinalizerReference> ref = DequeuePendingReference()->AsFinalizerReference();
mirror::HeapReference<mirror::Object>* referent_addr = ref->GetReferentReferenceAddr();
- if (referent_addr->AsMirrorPtr() != nullptr &&
- !collector->IsMarkedHeapReference(referent_addr)) {
+ // do_atomic_update is false because this happens during the reference processing phase where
+ // Reference.clear() would block.
+ if (!collector->IsNullOrMarkedHeapReference(referent_addr, /*do_atomic_update*/false)) {
ObjPtr<mirror::Object> forward_address = collector->MarkObject(referent_addr->AsMirrorPtr());
// Move the updated referent to the zombie field.
if (Runtime::Current()->IsActiveTransaction()) {
return HeapReference<MirrorType>(ptr.Ptr());
}
+template<class MirrorType>
+bool HeapReference<MirrorType>::CasWeakRelaxed(MirrorType* expected_ptr, MirrorType* new_ptr) {
+ HeapReference<Object> expected_ref(HeapReference<Object>::FromMirrorPtr(expected_ptr));
+ HeapReference<Object> new_ref(HeapReference<Object>::FromMirrorPtr(new_ptr));
+ Atomic<uint32_t>* atomic_reference = reinterpret_cast<Atomic<uint32_t>*>(&this->reference_);
+ return atomic_reference->CompareExchangeWeakRelaxed(expected_ref.reference_,
+ new_ref.reference_);
+}
+
} // namespace mirror
} // namespace art
static HeapReference<MirrorType> FromObjPtr(ObjPtr<MirrorType> ptr)
REQUIRES_SHARED(Locks::mutator_lock_);
+ bool CasWeakRelaxed(MirrorType* old_ptr, MirrorType* new_ptr)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
private:
explicit HeapReference(MirrorType* mirror_ptr) REQUIRES_SHARED(Locks::mutator_lock_)
: ObjectReference<kPoisonHeapReferences, MirrorType>(mirror_ptr) {}