From: Nicolas Geoffray Date: Fri, 6 Nov 2015 14:18:27 +0000 (+0000) Subject: Fix interaction between JIT and instrumentation. X-Git-Tag: android-x86-7.1-r1~852^2~71^2 X-Git-Url: http://git.osdn.net/view?a=commitdiff_plain;h=a5891e81a2fb833307cf7c7e7267070dc0223dc8;p=android-x86%2Fart.git Fix interaction between JIT and instrumentation. - The JIT needs to go through the instrumentation to update entry points. - The instrumention needs to know if a method got JITted to know if needs to deoptimize. bug:25438583 Change-Id: I4b186a1da9f4a3fb329efd052a774d5502a902a1 --- diff --git a/compiler/jit/jit_compiler.cc b/compiler/jit/jit_compiler.cc index 5f4f47292..2125c9a26 100644 --- a/compiler/jit/jit_compiler.cc +++ b/compiler/jit/jit_compiler.cc @@ -177,7 +177,8 @@ bool JitCompiler::CompileMethod(Thread* self, ArtMethod* method) { } // Don't compile the method if we are supposed to be deoptimized. - if (runtime->GetInstrumentation()->AreAllMethodsDeoptimized()) { + instrumentation::Instrumentation* instrumentation = runtime->GetInstrumentation(); + if (instrumentation->AreAllMethodsDeoptimized() || instrumentation->IsDeoptimized(method)) { return false; } diff --git a/runtime/art_method.cc b/runtime/art_method.cc index 2a8cf9965..dbb546da2 100644 --- a/runtime/art_method.cc +++ b/runtime/art_method.cc @@ -456,6 +456,16 @@ const OatQuickMethodHeader* ArtMethod::GetOatQuickMethodHeader(uintptr_t pc) { return method_header; } +bool ArtMethod::HasAnyCompiledCode() { + // Check whether the JIT has compiled it. + jit::Jit* jit = Runtime::Current()->GetJit(); + if (jit != nullptr && jit->GetCodeCache()->ContainsMethod(this)) { + return true; + } + + // Check whether we have AOT code. + return Runtime::Current()->GetClassLinker()->GetOatMethodQuickCodeFor(this) != nullptr; +} void ArtMethod::CopyFrom(ArtMethod* src, size_t image_pointer_size) { memcpy(reinterpret_cast(this), reinterpret_cast(src), diff --git a/runtime/art_method.h b/runtime/art_method.h index ce9f2025c..201b3e64d 100644 --- a/runtime/art_method.h +++ b/runtime/art_method.h @@ -454,6 +454,9 @@ class ArtMethod FINAL { const OatQuickMethodHeader* GetOatQuickMethodHeader(uintptr_t pc) SHARED_REQUIRES(Locks::mutator_lock_); + // Returns whether the method has any compiled code, JIT or AOT. + bool HasAnyCompiledCode() SHARED_REQUIRES(Locks::mutator_lock_); + protected: // Field order required by test "ValidateFieldOrderOfJavaCppUnionClasses". // The class we are a part of. diff --git a/runtime/debugger.cc b/runtime/debugger.cc index e523fbb10..a25d0033c 100644 --- a/runtime/debugger.cc +++ b/runtime/debugger.cc @@ -3284,9 +3284,9 @@ static DeoptimizationRequest::Kind GetRequiredDeoptimizationKind(Thread* self, return DeoptimizationRequest::kFullDeoptimization; } else { // We don't need to deoptimize if the method has not been compiled. - ClassLinker* const class_linker = Runtime::Current()->GetClassLinker(); - const bool is_compiled = class_linker->GetOatMethodQuickCodeFor(m) != nullptr; + const bool is_compiled = m->HasAnyCompiledCode(); if (is_compiled) { + ClassLinker* const class_linker = Runtime::Current()->GetClassLinker(); // If the method may be called through its direct code pointer (without loading // its updated entrypoint), we need full deoptimization to not miss the breakpoint. if (class_linker->MayBeCalledWithDirectCodePointer(m)) { diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc index fbcba1b88..9dac5049c 100644 --- a/runtime/jit/jit_code_cache.cc +++ b/runtime/jit/jit_code_cache.cc @@ -117,6 +117,16 @@ bool JitCodeCache::ContainsPc(const void* ptr) const { return code_map_->Begin() <= ptr && ptr < code_map_->End(); } +bool JitCodeCache::ContainsMethod(ArtMethod* method) { + MutexLock mu(Thread::Current(), lock_); + for (auto& it : method_code_map_) { + if (it.second == method) { + return true; + } + } + return false; +} + class ScopedCodeCacheWrite { public: explicit ScopedCodeCacheWrite(MemMap* code_map) : code_map_(code_map) { @@ -276,26 +286,36 @@ uint8_t* JitCodeCache::CommitCodeInternal(Thread* self, __builtin___clear_cache(reinterpret_cast(code_ptr), reinterpret_cast(code_ptr + code_size)); + } + // We need to update the entry point in the runnable state for the instrumentation. + { + MutexLock mu(self, lock_); method_code_map_.Put(code_ptr, method); - // We have checked there was no collection in progress earlier. If we - // were, setting the entry point of a method would be unsafe, as the collection - // could delete it. - DCHECK(!collection_in_progress_); - method->SetEntryPointFromQuickCompiledCode(method_header->GetEntryPoint()); - } - VLOG(jit) - << "JIT added " - << PrettyMethod(method) << "@" << method - << " ccache_size=" << PrettySize(CodeCacheSize()) << ": " - << " dcache_size=" << PrettySize(DataCacheSize()) << ": " - << reinterpret_cast(method_header->GetEntryPoint()) << "," - << reinterpret_cast(method_header->GetEntryPoint() + method_header->code_size_); + Runtime::Current()->GetInstrumentation()->UpdateMethodsCode( + method, method_header->GetEntryPoint()); + if (collection_in_progress_) { + // We need to update the live bitmap if there is a GC to ensure it sees this new + // code. + GetLiveBitmap()->AtomicTestAndSet(FromCodeToAllocation(code_ptr)); + } + VLOG(jit) + << "JIT added " + << PrettyMethod(method) << "@" << method + << " ccache_size=" << PrettySize(CodeCacheSizeLocked()) << ": " + << " dcache_size=" << PrettySize(DataCacheSizeLocked()) << ": " + << reinterpret_cast(method_header->GetEntryPoint()) << "," + << reinterpret_cast(method_header->GetEntryPoint() + method_header->code_size_); + } return reinterpret_cast(method_header); } size_t JitCodeCache::CodeCacheSize() { MutexLock mu(Thread::Current(), lock_); + return CodeCacheSizeLocked(); +} + +size_t JitCodeCache::CodeCacheSizeLocked() { size_t bytes_allocated = 0; mspace_inspect_all(code_mspace_, DlmallocBytesAllocatedCallback, &bytes_allocated); return bytes_allocated; @@ -303,6 +323,10 @@ size_t JitCodeCache::CodeCacheSize() { size_t JitCodeCache::DataCacheSize() { MutexLock mu(Thread::Current(), lock_); + return DataCacheSizeLocked(); +} + +size_t JitCodeCache::DataCacheSizeLocked() { size_t bytes_allocated = 0; mspace_inspect_all(data_mspace_, DlmallocBytesAllocatedCallback, &bytes_allocated); return bytes_allocated; @@ -417,19 +441,25 @@ void JitCodeCache::GarbageCollectCache(Thread* self) { } size_t map_size = 0; - ScopedThreadSuspension sts(self, kSuspended); + instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation(); - // Walk over all compiled methods and set the entry points of these - // methods to interpreter. + // Wait for an existing collection, or let everyone know we are starting one. { + ScopedThreadSuspension sts(self, kSuspended); MutexLock mu(self, lock_); if (WaitForPotentialCollectionToComplete(self)) { return; + } else { + collection_in_progress_ = true; } - collection_in_progress_ = true; + } + // Walk over all compiled methods and set the entry points of these + // methods to interpreter. + { + MutexLock mu(self, lock_); map_size = method_code_map_.size(); for (auto& it : method_code_map_) { - it.second->SetEntryPointFromQuickCompiledCode(GetQuickToInterpreterBridge()); + instrumentation->UpdateMethodsCode(it.second, GetQuickToInterpreterBridge()); } for (ProfilingInfo* info : profiling_infos_) { info->GetMethod()->SetProfilingInfo(nullptr); @@ -440,16 +470,12 @@ void JitCodeCache::GarbageCollectCache(Thread* self) { { Barrier barrier(0); size_t threads_running_checkpoint = 0; - { - // Walking the stack requires the mutator lock. - // We only take the lock when running the checkpoint and not waiting so that - // when we go back to suspended, we can execute checkpoints that were requested - // concurrently, and then move to waiting for our own checkpoint to finish. - ScopedObjectAccess soa(self); - MarkCodeClosure closure(this, &barrier); - threads_running_checkpoint = - Runtime::Current()->GetThreadList()->RunCheckpoint(&closure); - } + MarkCodeClosure closure(this, &barrier); + threads_running_checkpoint = + Runtime::Current()->GetThreadList()->RunCheckpoint(&closure); + // Now that we have run our checkpoint, move to a suspended state and wait + // for other threads to run the checkpoint. + ScopedThreadSuspension sts(self, kSuspended); if (threads_running_checkpoint != 0) { barrier.Increment(self, threads_running_checkpoint); } @@ -457,7 +483,6 @@ void JitCodeCache::GarbageCollectCache(Thread* self) { { MutexLock mu(self, lock_); - DCHECK_EQ(map_size, method_code_map_.size()); // Free unused compiled code, and restore the entry point of used compiled code. { ScopedCodeCacheWrite scc(code_map_.get()); @@ -467,7 +492,7 @@ void JitCodeCache::GarbageCollectCache(Thread* self) { uintptr_t allocation = FromCodeToAllocation(code_ptr); const OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr); if (GetLiveBitmap()->Test(allocation)) { - method->SetEntryPointFromQuickCompiledCode(method_header->GetEntryPoint()); + instrumentation->UpdateMethodsCode(method, method_header->GetEntryPoint()); ++it; } else { method->ClearCounter(); diff --git a/runtime/jit/jit_code_cache.h b/runtime/jit/jit_code_cache.h index afff65788..131446c48 100644 --- a/runtime/jit/jit_code_cache.h +++ b/runtime/jit/jit_code_cache.h @@ -83,6 +83,9 @@ class JitCodeCache { // Return true if the code cache contains this pc. bool ContainsPc(const void* pc) const; + // Return true if the code cache contains this method. + bool ContainsMethod(ArtMethod* method) REQUIRES(!lock_); + // Reserve a region of data of size at least "size". Returns null if there is no more room. uint8_t* ReserveData(Thread* self, size_t size) SHARED_REQUIRES(Locks::mutator_lock_) @@ -163,6 +166,12 @@ class JitCodeCache { // Free in the mspace allocations taken by 'method'. void FreeCode(const void* code_ptr, ArtMethod* method) REQUIRES(lock_); + // Number of bytes allocated in the code cache. + size_t CodeCacheSizeLocked() REQUIRES(lock_); + + // Number of bytes allocated in the data cache. + size_t DataCacheSizeLocked() REQUIRES(lock_); + // Lock for guarding allocations, collections, and the method_code_map_. Mutex lock_; // Condition to wait on during collection.