From: Mathieu Chartier Date: Thu, 4 Jun 2015 20:42:14 +0000 (-0700) Subject: Add optimization for LOW_4G allocator X-Git-Tag: android-x86-7.1-r1~889^2~1078^2 X-Git-Url: http://git.osdn.net/view?a=commitdiff_plain;h=7889a77e464b820745c9507ccd484f68e7b1c650;p=android-x86%2Fart.git Add optimization for LOW_4G allocator The optimization uses the maps_ field to skip over regions which we know wont be large enough for the request. Reduces time to create heap from 500-700ms to 100ms on N9. (cherry picked from commit 1c8bef4a71612f10b89b102434f70de5a127cc74) Bug: 20727525 Change-Id: I4fa92d58c2e135ba31a03ababff46669089bb542 --- diff --git a/runtime/mem_map.cc b/runtime/mem_map.cc index d8c1ec150..6b9b5d101 100644 --- a/runtime/mem_map.cc +++ b/runtime/mem_map.cc @@ -314,7 +314,31 @@ MemMap* MemMap::MapAnonymous(const char* name, uint8_t* expected_ptr, size_t byt if (low_4gb && expected_ptr == nullptr) { bool first_run = true; + MutexLock mu(Thread::Current(), *Locks::mem_maps_lock_); for (uintptr_t ptr = next_mem_pos_; ptr < 4 * GB; ptr += kPageSize) { + // Use maps_ as an optimization to skip over large maps. + // Find the first map which is address > ptr. + auto it = maps_->upper_bound(reinterpret_cast(ptr)); + if (it != maps_->begin()) { + auto before_it = it; + --before_it; + // Start at the end of the map before the upper bound. + ptr = std::max(ptr, reinterpret_cast(before_it->second->BaseEnd())); + CHECK_ALIGNED(ptr, kPageSize); + } + while (it != maps_->end()) { + // How much space do we have until the next map? + size_t delta = reinterpret_cast(it->first) - ptr; + // If the space may be sufficient, break out of the loop. + if (delta >= page_aligned_byte_count) { + break; + } + // Otherwise, skip to the end of the map. + ptr = reinterpret_cast(it->second->BaseEnd()); + CHECK_ALIGNED(ptr, kPageSize); + ++it; + } + if (4U * GB - ptr < page_aligned_byte_count) { // Not enough memory until 4GB. if (first_run) {