From: Andreas Gampe Date: Wed, 27 May 2015 04:34:09 +0000 (-0700) Subject: ART: Fix VerifyObject runtime verification X-Git-Tag: android-x86-7.1-r1~889^2~1163^2 X-Git-Url: http://git.osdn.net/view?a=commitdiff_plain;h=3b45ef277e4a5f7e0314d5df7ef82e480156ba75;p=android-x86%2Fart.git ART: Fix VerifyObject runtime verification Update some bit-rotted code to work again. Most tests now work, for some the verification overhead results in a timeout. Change-Id: Ieab4f2de474a05e915e24abc93da3c2eeed996eb --- diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc index bf32febab..02a258818 100644 --- a/compiler/image_writer.cc +++ b/compiler/image_writer.cc @@ -972,8 +972,8 @@ void ImageWriter::CopyAndFixupObjects() { // Fix up the object previously had hash codes. for (const std::pair& hash_pair : saved_hashes_) { Object* obj = hash_pair.first; - DCHECK_EQ(obj->GetLockWord(false).ReadBarrierState(), 0U); - obj->SetLockWord(LockWord::FromHashCode(hash_pair.second, 0U), false); + DCHECK_EQ(obj->GetLockWord(false).ReadBarrierState(), 0U); + obj->SetLockWord(LockWord::FromHashCode(hash_pair.second, 0U), false); } saved_hashes_.clear(); } @@ -1008,11 +1008,11 @@ bool ImageWriter::CopyAndFixupIfDexCacheFieldArray(mirror::Object* dst, mirror:: const size_t num_elements = arr->GetLength(); if (target_ptr_size_ == 4u) { // Will get fixed up by fixup object. - dst->SetClass(down_cast( + dst->SetClass(down_cast( GetImageAddress(mirror::IntArray::GetArrayClass()))); } else { DCHECK_EQ(target_ptr_size_, 8u); - dst->SetClass(down_cast( + dst->SetClass(down_cast( GetImageAddress(mirror::LongArray::GetArrayClass()))); } mirror::Array* dest_array = down_cast(dst); @@ -1027,15 +1027,15 @@ bool ImageWriter::CopyAndFixupIfDexCacheFieldArray(mirror::Object* dst, mirror:: fixup_location = image_begin_ + it2->second; } if (target_ptr_size_ == 4u) { - down_cast(dest_array)->SetWithoutChecks( + down_cast(dest_array)->SetWithoutChecks( i, static_cast(reinterpret_cast(fixup_location))); } else { DCHECK_EQ(target_ptr_size_, 8u); - down_cast(dest_array)->SetWithoutChecks( + down_cast(dest_array)->SetWithoutChecks( i, reinterpret_cast(fixup_location)); } } - dst->SetLockWord(LockWord::Default(), false); + dst->SetLockWord(LockWord::Default(), false); return true; } diff --git a/patchoat/patchoat.cc b/patchoat/patchoat.cc index ef84a1717..8db1d2398 100644 --- a/patchoat/patchoat.cc +++ b/patchoat/patchoat.cc @@ -563,21 +563,21 @@ void PatchOat::FixupMethod(mirror::ArtMethod* object, mirror::ArtMethod* copy) { uintptr_t quick= reinterpret_cast( object->GetEntryPointFromQuickCompiledCodePtrSize(pointer_size)); if (quick != 0) { - copy->SetEntryPointFromQuickCompiledCodePtrSize(reinterpret_cast(quick + delta_), - pointer_size); + copy->SetEntryPointFromQuickCompiledCodePtrSize( + reinterpret_cast(quick + delta_), pointer_size); } uintptr_t interpreter = reinterpret_cast( object->GetEntryPointFromInterpreterPtrSize(pointer_size)); if (interpreter != 0) { - copy->SetEntryPointFromInterpreterPtrSize( + copy->SetEntryPointFromInterpreterPtrSize( reinterpret_cast(interpreter + delta_), pointer_size); } uintptr_t native_method = reinterpret_cast( object->GetEntryPointFromJniPtrSize(pointer_size)); if (native_method != 0) { - copy->SetEntryPointFromJniPtrSize(reinterpret_cast(native_method + delta_), - pointer_size); + copy->SetEntryPointFromJniPtrSize( + reinterpret_cast(native_method + delta_), pointer_size); } } diff --git a/runtime/entrypoints/quick/callee_save_frame.h b/runtime/entrypoints/quick/callee_save_frame.h index 8cd6ca677..3bcaf93e6 100644 --- a/runtime/entrypoints/quick/callee_save_frame.h +++ b/runtime/entrypoints/quick/callee_save_frame.h @@ -38,22 +38,24 @@ class ArtMethod; class ScopedQuickEntrypointChecks { public: - explicit ScopedQuickEntrypointChecks(Thread *self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - : self_(self) { - if (kIsDebugBuild) { + explicit ScopedQuickEntrypointChecks(Thread *self, + bool entry_check = kIsDebugBuild, + bool exit_check = kIsDebugBuild) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) : self_(self), exit_check_(exit_check) { + if (entry_check) { TestsOnEntry(); } } - explicit ScopedQuickEntrypointChecks() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - : self_(kIsDebugBuild ? Thread::Current() : nullptr) { + ScopedQuickEntrypointChecks() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + : self_(kIsDebugBuild ? Thread::Current() : nullptr), exit_check_(kIsDebugBuild) { if (kIsDebugBuild) { TestsOnEntry(); } } ~ScopedQuickEntrypointChecks() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - if (kIsDebugBuild) { + if (exit_check_) { TestsOnExit(); } } @@ -70,6 +72,7 @@ class ScopedQuickEntrypointChecks { } Thread* const self_; + bool exit_check_; }; static constexpr size_t GetCalleeSaveFrameSize(InstructionSet isa, Runtime::CalleeSaveType type) { diff --git a/runtime/entrypoints/quick/quick_instrumentation_entrypoints.cc b/runtime/entrypoints/quick/quick_instrumentation_entrypoints.cc index eb1b1056a..2bb73efa2 100644 --- a/runtime/entrypoints/quick/quick_instrumentation_entrypoints.cc +++ b/runtime/entrypoints/quick/quick_instrumentation_entrypoints.cc @@ -29,7 +29,9 @@ extern "C" const void* artInstrumentationMethodEntryFromCode(mirror::ArtMethod* Thread* self, uintptr_t lr) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - ScopedQuickEntrypointChecks sqec(self); + // Instrumentation changes the stack. Thus, when exiting, the stack cannot be verified, so skip + // that part. + ScopedQuickEntrypointChecks sqec(self, kIsDebugBuild, false); instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation(); const void* result; if (instrumentation->IsDeoptimized(method)) { diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc index 838427fcb..227c5b041 100644 --- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc +++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc @@ -823,7 +823,10 @@ extern "C" const void* artQuickResolutionTrampoline(mirror::ArtMethod* called, Thread* self, StackReference* sp) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - ScopedQuickEntrypointChecks sqec(self); + // The resolution trampoline stashes the resolved method into the callee-save frame to transport + // it. Thus, when exiting, the stack cannot be verified (as the resolved method most likely + // does not have the same stack layout as the callee-save method). + ScopedQuickEntrypointChecks sqec(self, kIsDebugBuild, false); // Start new JNI local reference state JNIEnvExt* env = self->GetJniEnv(); ScopedObjectAccessUnchecked soa(env); diff --git a/runtime/mirror/array-inl.h b/runtime/mirror/array-inl.h index 8b3418d6b..9696dcd75 100644 --- a/runtime/mirror/array-inl.h +++ b/runtime/mirror/array-inl.h @@ -237,7 +237,7 @@ inline void PrimitiveArray::Set(int32_t i, T value) { } template -template +template inline void PrimitiveArray::SetWithoutChecks(int32_t i, T value) { if (kCheckTransaction) { DCHECK_EQ(kTransactionActive, Runtime::Current()->IsActiveTransaction()); @@ -245,7 +245,7 @@ inline void PrimitiveArray::SetWithoutChecks(int32_t i, T value) { if (kTransactionActive) { Runtime::Current()->RecordWriteArray(this, i, GetWithoutChecks(i)); } - DCHECK(CheckIsValidIndex(i)); + DCHECK(CheckIsValidIndex(i)); GetData()[i] = value; } // Backward copy where elements are of aligned appropriately for T. Count is in T sized units. diff --git a/runtime/mirror/array.h b/runtime/mirror/array.h index 832ad68dc..167f824d1 100644 --- a/runtime/mirror/array.h +++ b/runtime/mirror/array.h @@ -131,7 +131,9 @@ class MANAGED PrimitiveArray : public Array { // TODO fix thread safety analysis broken by the use of template. This should be // SHARED_LOCKS_REQUIRED(Locks::mutator_lock_). - template + template void SetWithoutChecks(int32_t i, T value) ALWAYS_INLINE NO_THREAD_SAFETY_ANALYSIS; /* diff --git a/runtime/mirror/object-inl.h b/runtime/mirror/object-inl.h index 39d0f5664..7760ea2cf 100644 --- a/runtime/mirror/object-inl.h +++ b/runtime/mirror/object-inl.h @@ -59,19 +59,23 @@ inline void Object::SetClass(Class* new_klass) { OFFSET_OF_OBJECT_MEMBER(Object, klass_), new_klass); } +template inline LockWord Object::GetLockWord(bool as_volatile) { if (as_volatile) { - return LockWord(GetField32Volatile(OFFSET_OF_OBJECT_MEMBER(Object, monitor_))); + return LockWord(GetField32Volatile(OFFSET_OF_OBJECT_MEMBER(Object, monitor_))); } - return LockWord(GetField32(OFFSET_OF_OBJECT_MEMBER(Object, monitor_))); + return LockWord(GetField32(OFFSET_OF_OBJECT_MEMBER(Object, monitor_))); } +template inline void Object::SetLockWord(LockWord new_val, bool as_volatile) { // Force use of non-transactional mode and do not check. if (as_volatile) { - SetField32Volatile(OFFSET_OF_OBJECT_MEMBER(Object, monitor_), new_val.GetValue()); + SetField32Volatile( + OFFSET_OF_OBJECT_MEMBER(Object, monitor_), new_val.GetValue()); } else { - SetField32(OFFSET_OF_OBJECT_MEMBER(Object, monitor_), new_val.GetValue()); + SetField32( + OFFSET_OF_OBJECT_MEMBER(Object, monitor_), new_val.GetValue()); } } diff --git a/runtime/mirror/object.h b/runtime/mirror/object.h index 5afe99f3f..2c0e62677 100644 --- a/runtime/mirror/object.h +++ b/runtime/mirror/object.h @@ -125,7 +125,9 @@ class MANAGED LOCKABLE Object { // As_volatile can be false if the mutators are suspended. This is an optimization since it // avoids the barriers. + template LockWord GetLockWord(bool as_volatile) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + template void SetLockWord(LockWord new_val, bool as_volatile) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); bool CasLockWordWeakSequentiallyConsistent(LockWord old_val, LockWord new_val) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);