From a1f20c3f8d0dabb9723acccf3ba760acf3ebe62d Mon Sep 17 00:00:00 2001 From: Mathieu Chartier Date: Thu, 1 Jun 2017 11:26:50 -0700 Subject: [PATCH] Move to release CAS for updating object fields Relaxed cas is not sufficient to make sure threads that read the field will see the copied contents of objects. Bug: 37187694 Bug: 62240510 Test: test-art-host Change-Id: I1239817e2b63e0e43ac3ae3148b73487408b378b --- runtime/atomic.h | 7 +++++++ runtime/class_table-inl.h | 4 ++-- runtime/gc/collector/concurrent_copying.cc | 7 +++++-- runtime/mirror/object-inl.h | 30 ++++++++++++++++++++++++++++++ runtime/mirror/object-readbarrier-inl.h | 30 ++++++++++++++++++++++++++++++ runtime/mirror/object.h | 15 +++++++++++++++ runtime/read_barrier-inl.h | 4 ++-- 7 files changed, 91 insertions(+), 6 deletions(-) diff --git a/runtime/atomic.h b/runtime/atomic.h index 45c3165b1..25dd1a3a5 100644 --- a/runtime/atomic.h +++ b/runtime/atomic.h @@ -257,6 +257,13 @@ class PACKED(sizeof(T)) Atomic : public std::atomic { return this->compare_exchange_strong(expected_value, desired_value, std::memory_order_relaxed); } + // Atomically replace the value with desired value if it matches the expected value. Prior writes + // to other memory locations become visible to the threads that do a consume or an acquire on the + // same location. + bool CompareExchangeStrongRelease(T expected_value, T desired_value) { + return this->compare_exchange_strong(expected_value, desired_value, std::memory_order_release); + } + // The same, except it may fail spuriously. bool CompareExchangeWeakRelaxed(T expected_value, T desired_value) { return this->compare_exchange_weak(expected_value, desired_value, std::memory_order_relaxed); diff --git a/runtime/class_table-inl.h b/runtime/class_table-inl.h index dfe894913..35fce4063 100644 --- a/runtime/class_table-inl.h +++ b/runtime/class_table-inl.h @@ -93,7 +93,7 @@ inline mirror::Class* ClassTable::TableSlot::Read() const { if (kReadBarrierOption != kWithoutReadBarrier && before_ptr != after_ptr) { // If another thread raced and updated the reference, do not store the read barrier updated // one. - data_.CompareExchangeStrongRelaxed(before, Encode(after_ptr, MaskHash(before))); + data_.CompareExchangeStrongRelease(before, Encode(after_ptr, MaskHash(before))); } return after_ptr.Ptr(); } @@ -108,7 +108,7 @@ inline void ClassTable::TableSlot::VisitRoot(const Visitor& visitor) const { if (before_ptr != after_ptr) { // If another thread raced and updated the reference, do not store the read barrier updated // one. - data_.CompareExchangeStrongRelaxed(before, Encode(after_ptr, MaskHash(before))); + data_.CompareExchangeStrongRelease(before, Encode(after_ptr, MaskHash(before))); } } diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc index 849d42a9e..ab2146a6b 100644 --- a/runtime/gc/collector/concurrent_copying.cc +++ b/runtime/gc/collector/concurrent_copying.cc @@ -1975,8 +1975,11 @@ inline void ConcurrentCopying::Process(mirror::Object* obj, MemberOffset offset) // It was updated by the mutator. break; } - } while (!obj->CasFieldWeakRelaxedObjectWithoutWriteBarrier< - false, false, kVerifyNone>(offset, expected_ref, new_ref)); + // Use release cas to make sure threads reading the reference see contents of copied objects. + } while (!obj->CasFieldWeakReleaseObjectWithoutWriteBarrier( + offset, + expected_ref, + new_ref)); } // Process some roots. diff --git a/runtime/mirror/object-inl.h b/runtime/mirror/object-inl.h index baed5f167..d3fc95ff2 100644 --- a/runtime/mirror/object-inl.h +++ b/runtime/mirror/object-inl.h @@ -899,6 +899,36 @@ inline bool Object::CasFieldWeakRelaxedObjectWithoutWriteBarrier( return success; } +template +inline bool Object::CasFieldWeakReleaseObjectWithoutWriteBarrier( + MemberOffset field_offset, + ObjPtr old_value, + ObjPtr new_value) { + if (kCheckTransaction) { + DCHECK_EQ(kTransactionActive, Runtime::Current()->IsActiveTransaction()); + } + if (kVerifyFlags & kVerifyThis) { + VerifyObject(this); + } + if (kVerifyFlags & kVerifyWrites) { + VerifyObject(new_value); + } + if (kVerifyFlags & kVerifyReads) { + VerifyObject(old_value); + } + if (kTransactionActive) { + Runtime::Current()->RecordWriteFieldReference(this, field_offset, old_value, true); + } + HeapReference old_ref(HeapReference::FromObjPtr(old_value)); + HeapReference new_ref(HeapReference::FromObjPtr(new_value)); + uint8_t* raw_addr = reinterpret_cast(this) + field_offset.Int32Value(); + Atomic* atomic_addr = reinterpret_cast*>(raw_addr); + + bool success = atomic_addr->CompareExchangeWeakRelease(old_ref.reference_, + new_ref.reference_); + return success; +} + template +inline bool Object::CasFieldStrongReleaseObjectWithoutWriteBarrier( + MemberOffset field_offset, + ObjPtr old_value, + ObjPtr new_value) { + if (kCheckTransaction) { + DCHECK_EQ(kTransactionActive, Runtime::Current()->IsActiveTransaction()); + } + if (kVerifyFlags & kVerifyThis) { + VerifyObject(this); + } + if (kVerifyFlags & kVerifyWrites) { + VerifyObject(new_value); + } + if (kVerifyFlags & kVerifyReads) { + VerifyObject(old_value); + } + if (kTransactionActive) { + Runtime::Current()->RecordWriteFieldReference(this, field_offset, old_value, true); + } + HeapReference old_ref(HeapReference::FromObjPtr(old_value)); + HeapReference new_ref(HeapReference::FromObjPtr(new_value)); + uint8_t* raw_addr = reinterpret_cast(this) + field_offset.Int32Value(); + Atomic* atomic_addr = reinterpret_cast*>(raw_addr); + + bool success = atomic_addr->CompareExchangeStrongRelease(old_ref.reference_, + new_ref.reference_); + return success; +} + } // namespace mirror } // namespace art diff --git a/runtime/mirror/object.h b/runtime/mirror/object.h index 35a1b733e..9cf42522d 100644 --- a/runtime/mirror/object.h +++ b/runtime/mirror/object.h @@ -350,10 +350,25 @@ class MANAGED LOCKABLE Object { template + bool CasFieldWeakReleaseObjectWithoutWriteBarrier(MemberOffset field_offset, + ObjPtr old_value, + ObjPtr new_value) + REQUIRES_SHARED(Locks::mutator_lock_); + + template bool CasFieldStrongRelaxedObjectWithoutWriteBarrier(MemberOffset field_offset, ObjPtr old_value, ObjPtr new_value) REQUIRES_SHARED(Locks::mutator_lock_); + template + bool CasFieldStrongReleaseObjectWithoutWriteBarrier(MemberOffset field_offset, + ObjPtr old_value, + ObjPtr new_value) + REQUIRES_SHARED(Locks::mutator_lock_); template HeapReference* GetFieldObjectReferenceAddr(MemberOffset field_offset); diff --git a/runtime/read_barrier-inl.h b/runtime/read_barrier-inl.h index dbe7f5c95..6d6bf59ad 100644 --- a/runtime/read_barrier-inl.h +++ b/runtime/read_barrier-inl.h @@ -62,7 +62,7 @@ inline MirrorType* ReadBarrier::Barrier( // If kAlwaysUpdateField is true, update the field atomically. This may fail if mutator // updates before us, but it's OK. if (kAlwaysUpdateField && ref != old_ref) { - obj->CasFieldStrongRelaxedObjectWithoutWriteBarrier( + obj->CasFieldStrongReleaseObjectWithoutWriteBarrier( offset, old_ref, ref); } } @@ -80,7 +80,7 @@ inline MirrorType* ReadBarrier::Barrier( ref = reinterpret_cast(Mark(old_ref)); // Update the field atomically. This may fail if mutator updates before us, but it's ok. if (ref != old_ref) { - obj->CasFieldStrongRelaxedObjectWithoutWriteBarrier( + obj->CasFieldStrongReleaseObjectWithoutWriteBarrier( offset, old_ref, ref); } } -- 2.11.0