From 4cd662e54440f76fc920cb2c67acab3bba8b33dd Mon Sep 17 00:00:00 2001 From: Hiroshi Yamauchi Date: Thu, 3 Apr 2014 16:28:10 -0700 Subject: [PATCH] Fix Object::Clone()'s pre-fence barrier. Pass in a pre-fence barrier object that sets in the array length instead of setting it after returning from AllocObject(). Fix another potential bug due to the wrong default pre-fence barrier parameter value. Since this appears error-prone, removed the default parameter value and make it an explicit parameter. Fix another potential moving GC bug due to a lack of a SirtRef. Bug: 13097759 Change-Id: I466aa0e50f9e1a5dbf20be5a195edee619c7514e --- runtime/class_linker.cc | 5 +++-- runtime/gc/allocator/rosalloc.cc | 2 ++ runtime/gc/heap-inl.h | 18 ++++++++++++------ runtime/gc/heap.h | 15 ++++++++------- runtime/mirror/class-inl.h | 11 +++++++++-- runtime/mirror/object.cc | 28 +++++++++++++++++++++++----- runtime/sirt_ref-inl.h | 13 +++++++++---- runtime/sirt_ref.h | 16 ++++++++++++++-- 8 files changed, 80 insertions(+), 28 deletions(-) diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc index 6c5406ec1..78b7cc0c9 100644 --- a/runtime/class_linker.cc +++ b/runtime/class_linker.cc @@ -202,7 +202,7 @@ void ClassLinker::InitFromCompiler(const std::vector& boot_class // The GC can't handle an object with a null class since we can't get the size of this object. heap->IncrementDisableMovingGC(self); SirtRef java_lang_Class(self, down_cast( - heap->AllocNonMovableObject(self, nullptr, sizeof(mirror::ClassClass)))); + heap->AllocNonMovableObject(self, nullptr, sizeof(mirror::ClassClass), VoidFunctor()))); CHECK(java_lang_Class.get() != NULL); mirror::Class::SetClassClass(java_lang_Class.get()); java_lang_Class->SetClass(java_lang_Class.get()); @@ -1180,7 +1180,8 @@ mirror::DexCache* ClassLinker::AllocDexCache(Thread* self, const DexFile& dex_fi SirtRef dex_cache_class(self, GetClassRoot(kJavaLangDexCache)); SirtRef dex_cache( self, down_cast( - heap->AllocObject(self, dex_cache_class.get(), dex_cache_class->GetObjectSize()))); + heap->AllocObject(self, dex_cache_class.get(), dex_cache_class->GetObjectSize(), + VoidFunctor()))); if (dex_cache.get() == NULL) { return NULL; } diff --git a/runtime/gc/allocator/rosalloc.cc b/runtime/gc/allocator/rosalloc.cc index f5f6f1668..920741f39 100644 --- a/runtime/gc/allocator/rosalloc.cc +++ b/runtime/gc/allocator/rosalloc.cc @@ -1997,6 +1997,8 @@ void RosAlloc::Run::Verify(Thread* self, RosAlloc* rosalloc) { CHECK_LE(obj_size, kLargeSizeThreshold) << "A run slot contains a large object " << Dump(); CHECK_EQ(SizeToIndex(obj_size), idx) + << PrettyTypeOf(obj) << " " + << "obj_size=" << obj_size << ", idx=" << idx << " " << "A run slot contains an object with wrong size " << Dump(); } } diff --git a/runtime/gc/heap-inl.h b/runtime/gc/heap-inl.h index 25f20d621..a06f272b5 100644 --- a/runtime/gc/heap-inl.h +++ b/runtime/gc/heap-inl.h @@ -65,7 +65,7 @@ inline mirror::Object* Heap::AllocObjectWithAllocator(Thread* self, mirror::Clas bool after_is_current_allocator = allocator == GetCurrentAllocator(); if (is_current_allocator && !after_is_current_allocator) { // If the allocator changed, we need to restart the allocation. - return AllocObject(self, klass, byte_count); + return AllocObject(self, klass, byte_count, pre_fence_visitor); } return nullptr; } @@ -111,7 +111,7 @@ inline mirror::Object* Heap::AllocObjectWithAllocator(Thread* self, mirror::Clas DCHECK(!Runtime::Current()->HasStatsEnabled()); } if (AllocatorHasAllocationStack(allocator)) { - PushOnAllocationStack(self, obj); + PushOnAllocationStack(self, &obj); } if (kInstrumented) { if (Dbg::IsAllocTrackingEnabled()) { @@ -135,28 +135,34 @@ inline mirror::Object* Heap::AllocObjectWithAllocator(Thread* self, mirror::Clas // The size of a thread-local allocation stack in the number of references. static constexpr size_t kThreadLocalAllocationStackSize = 128; -inline void Heap::PushOnAllocationStack(Thread* self, mirror::Object* obj) { +inline void Heap::PushOnAllocationStack(Thread* self, mirror::Object** obj) { if (kUseThreadLocalAllocationStack) { - bool success = self->PushOnThreadLocalAllocationStack(obj); + bool success = self->PushOnThreadLocalAllocationStack(*obj); if (UNLIKELY(!success)) { // Slow path. Allocate a new thread-local allocation stack. mirror::Object** start_address; mirror::Object** end_address; while (!allocation_stack_->AtomicBumpBack(kThreadLocalAllocationStackSize, &start_address, &end_address)) { + // Disable verify object in SirtRef as obj isn't on the alloc stack yet. + SirtRefNoVerify ref(self, *obj); CollectGarbageInternal(collector::kGcTypeSticky, kGcCauseForAlloc, false); + *obj = ref.get(); } self->SetThreadLocalAllocationStack(start_address, end_address); // Retry on the new thread-local allocation stack. - success = self->PushOnThreadLocalAllocationStack(obj); + success = self->PushOnThreadLocalAllocationStack(*obj); // Must succeed. CHECK(success); } } else { // This is safe to do since the GC will never free objects which are neither in the allocation // stack or the live bitmap. - while (!allocation_stack_->AtomicPushBack(obj)) { + while (!allocation_stack_->AtomicPushBack(*obj)) { + // Disable verify object in SirtRef as obj isn't on the alloc stack yet. + SirtRefNoVerify ref(self, *obj); CollectGarbageInternal(collector::kGcTypeSticky, kGcCauseForAlloc, false); + *obj = ref.get(); } } } diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h index 587975755..ffb4e5918 100644 --- a/runtime/gc/heap.h +++ b/runtime/gc/heap.h @@ -158,28 +158,28 @@ class Heap { ~Heap(); // Allocates and initializes storage for an object instance. - template + template mirror::Object* AllocObject(Thread* self, mirror::Class* klass, size_t num_bytes, - const PreFenceVisitor& pre_fence_visitor = VoidFunctor()) + const PreFenceVisitor& pre_fence_visitor) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return AllocObjectWithAllocator(self, klass, num_bytes, GetCurrentAllocator(), pre_fence_visitor); } - template + template mirror::Object* AllocNonMovableObject(Thread* self, mirror::Class* klass, size_t num_bytes, - const PreFenceVisitor& pre_fence_visitor = VoidFunctor()) + const PreFenceVisitor& pre_fence_visitor) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return AllocObjectWithAllocator(self, klass, num_bytes, GetCurrentNonMovingAllocator(), pre_fence_visitor); } - template + template ALWAYS_INLINE mirror::Object* AllocObjectWithAllocator( Thread* self, mirror::Class* klass, size_t byte_count, AllocatorType allocator, - const PreFenceVisitor& pre_fence_visitor = VoidFunctor()) + const PreFenceVisitor& pre_fence_visitor) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); AllocatorType GetCurrentAllocator() const { @@ -691,7 +691,8 @@ class Heap { void SignalHeapTrimDaemon(Thread* self); // Push an object onto the allocation stack. - void PushOnAllocationStack(Thread* self, mirror::Object* obj); + void PushOnAllocationStack(Thread* self, mirror::Object** obj) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // What kind of concurrency behavior is the runtime after? Currently true for concurrent mark // sweep GC, false for other GC types. diff --git a/runtime/mirror/class-inl.h b/runtime/mirror/class-inl.h index 89d9241f5..025e62a5c 100644 --- a/runtime/mirror/class-inl.h +++ b/runtime/mirror/class-inl.h @@ -442,7 +442,14 @@ inline void Class::SetName(String* name) { } inline void Class::CheckObjectAlloc() { - DCHECK(!IsArrayClass()) << PrettyClass(this); + DCHECK(!IsArrayClass()) + << PrettyClass(this) + << "A array shouldn't be allocated through this " + << "as it requires a pre-fence visitor that sets the class size."; + DCHECK(!IsClassClass()) + << PrettyClass(this) + << "A class object shouldn't be allocated through this " + << "as it requires a pre-fence visitor that sets the class size."; DCHECK(IsInstantiable()) << PrettyClass(this); // TODO: decide whether we want this check. It currently fails during bootstrap. // DCHECK(!Runtime::Current()->IsStarted() || IsInitializing()) << PrettyClass(this); @@ -454,7 +461,7 @@ inline Object* Class::Alloc(Thread* self, gc::AllocatorType allocator_type) { CheckObjectAlloc(); gc::Heap* heap = Runtime::Current()->GetHeap(); return heap->AllocObjectWithAllocator(self, this, this->object_size_, - allocator_type); + allocator_type, VoidFunctor()); } inline Object* Class::AllocObject(Thread* self) { diff --git a/runtime/mirror/object.cc b/runtime/mirror/object.cc index f1485e529..d9155f530 100644 --- a/runtime/mirror/object.cc +++ b/runtime/mirror/object.cc @@ -66,6 +66,26 @@ static Object* CopyObject(Thread* self, mirror::Object* dest, mirror::Object* sr return dest; } +// An allocation pre-fence visitor that copies the object. +class CopyObjectVisitor { + public: + explicit CopyObjectVisitor(Thread* self, SirtRef* orig, size_t num_bytes) + : self_(self), orig_(orig), num_bytes_(num_bytes) { + } + + void operator()(Object* obj, size_t usable_size) const + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + UNUSED(usable_size); + CopyObject(self_, obj, orig_->get(), num_bytes_); + } + + private: + Thread* const self_; + SirtRef* const orig_; + const size_t num_bytes_; + DISALLOW_COPY_AND_ASSIGN(CopyObjectVisitor); +}; + Object* Object::Clone(Thread* self) { CHECK(!IsClass()) << "Can't clone classes."; // Object::SizeOf gets the right size even if we're an array. Using c->AllocObject() here would @@ -74,13 +94,11 @@ Object* Object::Clone(Thread* self) { size_t num_bytes = SizeOf(); SirtRef this_object(self, this); Object* copy; + CopyObjectVisitor visitor(self, &this_object, num_bytes); if (heap->IsMovableObject(this)) { - copy = heap->AllocObject(self, GetClass(), num_bytes); + copy = heap->AllocObject(self, GetClass(), num_bytes, visitor); } else { - copy = heap->AllocNonMovableObject(self, GetClass(), num_bytes); - } - if (LIKELY(copy != nullptr)) { - return CopyObject(self, copy, this_object.get(), num_bytes); + copy = heap->AllocNonMovableObject(self, GetClass(), num_bytes, visitor); } return copy; } diff --git a/runtime/sirt_ref-inl.h b/runtime/sirt_ref-inl.h index 7f2d847fa..7de624aad 100644 --- a/runtime/sirt_ref-inl.h +++ b/runtime/sirt_ref-inl.h @@ -23,8 +23,11 @@ namespace art { -template inline SirtRef::SirtRef(Thread* self, T* object) : self_(self), sirt_(object) { - VerifyObject(object); +template inline SirtRef::SirtRef(Thread* self, T* object, bool should_verify) + : self_(self), sirt_(object) { + if (should_verify) { + VerifyObject(object); + } self_->PushSirt(&sirt_); } @@ -33,8 +36,10 @@ template inline SirtRef::~SirtRef() { DCHECK_EQ(top_sirt, &sirt_); } -template inline T* SirtRef::reset(T* object) { - VerifyObject(object); +template inline T* SirtRef::reset(T* object, bool should_verify) { + if (should_verify) { + VerifyObject(object); + } T* old_ref = get(); sirt_.SetReference(0, object); return old_ref; diff --git a/runtime/sirt_ref.h b/runtime/sirt_ref.h index 2226e17f5..cf23891ec 100644 --- a/runtime/sirt_ref.h +++ b/runtime/sirt_ref.h @@ -28,7 +28,7 @@ namespace art { template class SirtRef { public: - SirtRef(Thread* self, T* object); + SirtRef(Thread* self, T* object, bool should_verify = true); ~SirtRef(); T& operator*() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { @@ -42,7 +42,8 @@ class SirtRef { } // Returns the old reference. - T* reset(T* object = nullptr) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + T* reset(T* object = nullptr, bool should_verify = true) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); private: Thread* const self_; @@ -51,6 +52,17 @@ class SirtRef { DISALLOW_COPY_AND_ASSIGN(SirtRef); }; +// A version of SirtRef which disables the object verification. +template +class SirtRefNoVerify : public SirtRef { + public: + SirtRefNoVerify(Thread* self, T* object) : SirtRef(self, object, false) {} + // Returns the old reference. + T* reset(T* object = nullptr) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + return SirtRef::reset(object, false); + } +}; + } // namespace art #endif // ART_RUNTIME_SIRT_REF_H_ -- 2.11.0