From: Nicolas Geoffray Date: Tue, 1 Mar 2016 13:17:58 +0000 (+0000) Subject: Give the JIT its own arena pool to avoid lock contentions. X-Git-Tag: android-x86-7.1-r1~312^2~5^2~19^2~8^2 X-Git-Url: http://git.osdn.net/view?a=commitdiff_plain;h=25e0456b6ea13eba290b63ea88b6b7120ed89413;p=android-x86%2Fart.git Give the JIT its own arena pool to avoid lock contentions. Sharing it with the verifier and the class loader is not ideal, especially at startup time. bug:27398183 bug:23128949 Change-Id: I1b91663a13f6c5b33ad3b4be780d93eb7fe445b4 --- diff --git a/compiler/jit/jit_compiler.cc b/compiler/jit/jit_compiler.cc index 23601c39e..79a6d38fc 100644 --- a/compiler/jit/jit_compiler.cc +++ b/compiler/jit/jit_compiler.cc @@ -230,10 +230,10 @@ bool JitCompiler::CompileMethod(Thread* self, ArtMethod* method, bool osr) { } // Trim maps to reduce memory usage. - // TODO: measure how much this increases compile time. + // TODO: move this to an idle phase. { TimingLogger::ScopedTiming t2("TrimMaps", &logger); - runtime->GetArenaPool()->TrimMaps(); + runtime->GetJitArenaPool()->TrimMaps(); } total_time_ += NanoTime() - start_time; diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc index 5a9f2583f..13d6d620f 100644 --- a/compiler/optimizing/optimizing_compiler.cc +++ b/compiler/optimizing/optimizing_compiler.cc @@ -861,7 +861,7 @@ bool OptimizingCompiler::JitCompile(Thread* self, const uint32_t access_flags = method->GetAccessFlags(); const InvokeType invoke_type = method->GetInvokeType(); - ArenaAllocator arena(Runtime::Current()->GetArenaPool()); + ArenaAllocator arena(Runtime::Current()->GetJitArenaPool()); CodeVectorAllocator code_allocator(&arena); std::unique_ptr codegen; { diff --git a/runtime/base/arena_allocator.cc b/runtime/base/arena_allocator.cc index a4b38ea96..44af3f75b 100644 --- a/runtime/base/arena_allocator.cc +++ b/runtime/base/arena_allocator.cc @@ -183,10 +183,10 @@ MallocArena::~MallocArena() { free(reinterpret_cast(memory_)); } -MemMapArena::MemMapArena(size_t size, bool low_4gb) { +MemMapArena::MemMapArena(size_t size, bool low_4gb, const char* name) { std::string error_msg; map_.reset(MemMap::MapAnonymous( - "LinearAlloc", nullptr, size, PROT_READ | PROT_WRITE, low_4gb, false, &error_msg)); + name, nullptr, size, PROT_READ | PROT_WRITE, low_4gb, false, &error_msg)); CHECK(map_.get() != nullptr) << error_msg; memory_ = map_->Begin(); size_ = map_->Size(); @@ -210,9 +210,12 @@ void Arena::Reset() { } } -ArenaPool::ArenaPool(bool use_malloc, bool low_4gb) - : use_malloc_(use_malloc), lock_("Arena pool lock", kArenaPoolLock), free_arenas_(nullptr), - low_4gb_(low_4gb) { +ArenaPool::ArenaPool(bool use_malloc, bool low_4gb, const char* name) + : use_malloc_(use_malloc), + lock_("Arena pool lock", kArenaPoolLock), + free_arenas_(nullptr), + low_4gb_(low_4gb), + name_(name) { if (low_4gb) { CHECK(!use_malloc) << "low4gb must use map implementation"; } @@ -250,7 +253,7 @@ Arena* ArenaPool::AllocArena(size_t size) { } if (ret == nullptr) { ret = use_malloc_ ? static_cast(new MallocArena(size)) : - new MemMapArena(size, low_4gb_); + new MemMapArena(size, low_4gb_, name_); } ret->Reset(); return ret; diff --git a/runtime/base/arena_allocator.h b/runtime/base/arena_allocator.h index 8a96571e9..728f89722 100644 --- a/runtime/base/arena_allocator.h +++ b/runtime/base/arena_allocator.h @@ -261,7 +261,7 @@ class MallocArena FINAL : public Arena { class MemMapArena FINAL : public Arena { public: - MemMapArena(size_t size, bool low_4gb); + MemMapArena(size_t size, bool low_4gb, const char* name); virtual ~MemMapArena(); void Release() OVERRIDE; @@ -271,7 +271,9 @@ class MemMapArena FINAL : public Arena { class ArenaPool { public: - explicit ArenaPool(bool use_malloc = true, bool low_4gb = false); + ArenaPool(bool use_malloc = true, + bool low_4gb = false, + const char* name = "LinearAlloc"); ~ArenaPool(); Arena* AllocArena(size_t size) REQUIRES(!lock_); void FreeArenaChain(Arena* first) REQUIRES(!lock_); @@ -287,6 +289,7 @@ class ArenaPool { mutable Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER; Arena* free_arenas_ GUARDED_BY(lock_); const bool low_4gb_; + const char* name_; DISALLOW_COPY_AND_ASSIGN(ArenaPool); }; diff --git a/runtime/runtime.cc b/runtime/runtime.cc index eb5455a4c..47ef2143f 100644 --- a/runtime/runtime.cc +++ b/runtime/runtime.cc @@ -316,6 +316,7 @@ Runtime::~Runtime() { linear_alloc_.reset(); low_4gb_arena_pool_.reset(); arena_pool_.reset(); + jit_arena_pool_.reset(); MemMap::Shutdown(); ATRACE_END(); @@ -1019,10 +1020,13 @@ bool Runtime::Init(RuntimeArgumentMap&& runtime_options_in) { // Use MemMap arena pool for jit, malloc otherwise. Malloc arenas are faster to allocate but // can't be trimmed as easily. const bool use_malloc = IsAotCompiler(); - arena_pool_.reset(new ArenaPool(use_malloc, false)); + arena_pool_.reset(new ArenaPool(use_malloc, /* low_4gb */ false)); + jit_arena_pool_.reset( + new ArenaPool(/* use_malloc */ false, /* low_4gb */ false, "CompilerMetadata")); + if (IsAotCompiler() && Is64BitInstructionSet(kRuntimeISA)) { // 4gb, no malloc. Explanation in header. - low_4gb_arena_pool_.reset(new ArenaPool(false, true)); + low_4gb_arena_pool_.reset(new ArenaPool(/* use_malloc */ false, /* low_4gb */ true)); } linear_alloc_.reset(CreateLinearAlloc()); diff --git a/runtime/runtime.h b/runtime/runtime.h index 8aac4ce9b..83e77d237 100644 --- a/runtime/runtime.h +++ b/runtime/runtime.h @@ -561,6 +561,9 @@ class Runtime { ArenaPool* GetArenaPool() { return arena_pool_.get(); } + ArenaPool* GetJitArenaPool() { + return jit_arena_pool_.get(); + } const ArenaPool* GetArenaPool() const { return arena_pool_.get(); } @@ -669,6 +672,7 @@ class Runtime { gc::Heap* heap_; + std::unique_ptr jit_arena_pool_; std::unique_ptr arena_pool_; // Special low 4gb pool for compiler linear alloc. We need ArtFields to be in low 4gb if we are // compiling using a 32 bit image on a 64 bit compiler in case we resolve things in the image