From 5ae2c9335a0c4eeaf4c67a52d066f16aad1aa907 Mon Sep 17 00:00:00 2001 From: Mathieu Chartier Date: Fri, 28 Mar 2014 16:22:20 -0700 Subject: [PATCH] Fix non concurrent mark sweep ergonomics. Previously we would continue to do sticky GC until the sticky GC did not free enough memory for the allocation, this was excessive since it could do one sticky GC per allocation. The new logic uses the next GC type before trying all the GCs in the plan. Before memalloc benchmark (non concurrent mark sweep): Total time spent in GC: 11.212701s Score: 7790 After: Total time spent in GC: 9.422676s Score: 6870 Change-Id: Iba75b70ea825ef3fd4b3e064d4f12c2fe5a3b176 --- runtime/gc/heap.cc | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc index 90ee9552f..1a32a9a05 100644 --- a/runtime/gc/heap.cc +++ b/runtime/gc/heap.cc @@ -1157,13 +1157,29 @@ mirror::Object* Heap::AllocateInternalWithGc(Thread* self, AllocatorType allocat ptr = TryToAllocate(self, allocator, alloc_size, bytes_allocated, usable_size); } + collector::GcType tried_type = next_gc_type_; + if (ptr == nullptr) { + const bool gc_ran = + CollectGarbageInternal(tried_type, kGcCauseForAlloc, false) != collector::kGcTypeNone; + if (was_default_allocator && allocator != GetCurrentAllocator()) { + *klass = sirt_klass.get(); + return nullptr; + } + if (gc_ran) { + ptr = TryToAllocate(self, allocator, alloc_size, bytes_allocated, usable_size); + } + } + // Loop through our different Gc types and try to Gc until we get enough free memory. for (collector::GcType gc_type : gc_plan_) { if (ptr != nullptr) { break; } + if (gc_type == tried_type) { + continue; + } // Attempt to run the collector, if we succeed, re-try the allocation. - bool gc_ran = + const bool gc_ran = CollectGarbageInternal(gc_type, kGcCauseForAlloc, false) != collector::kGcTypeNone; if (was_default_allocator && allocator != GetCurrentAllocator()) { *klass = sirt_klass.get(); -- 2.11.0