From 60985b7a56d4fa7170721808734093a3affc41c6 Mon Sep 17 00:00:00 2001 From: Hiroshi Yamauchi Date: Wed, 24 Aug 2016 13:53:12 -0700 Subject: [PATCH] Background full compaction for CC. Invoke a full compaction with the CC collector when an app goes to the background like the HSpace compaction for the CMS collector. Bug: 31039431 Bug: 12687968 Test: test-art, Ritz EAAC, N9 libartd.so device boot with CC Change-Id: I119aa26c1d3c167b12983fffcb16164929bf8f68 --- cmdline/cmdline_types.h | 2 +- runtime/gc/collector/concurrent_copying.cc | 6 ++++-- runtime/gc/collector_type.h | 2 ++ runtime/gc/heap.cc | 26 +++++++++++++++++++++++++- runtime/gc/heap.h | 9 +++++---- 5 files changed, 37 insertions(+), 8 deletions(-) diff --git a/cmdline/cmdline_types.h b/cmdline/cmdline_types.h index b57383b96..b74e58804 100644 --- a/cmdline/cmdline_types.h +++ b/cmdline/cmdline_types.h @@ -581,7 +581,7 @@ struct BackgroundGcOption { : background_collector_type_(gc::kCollectorTypeNone) { if (kUseReadBarrier) { - background_collector_type_ = gc::kCollectorTypeCC; // Disable background compaction for CC. + background_collector_type_ = gc::kCollectorTypeCCBackground; // Background compaction for CC. } } diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc index 42816a04f..285b111c4 100644 --- a/runtime/gc/collector/concurrent_copying.cc +++ b/runtime/gc/collector/concurrent_copying.cc @@ -202,8 +202,10 @@ void ConcurrentCopying::InitializePhase() { immune_spaces_.Reset(); bytes_moved_.StoreRelaxed(0); objects_moved_.StoreRelaxed(0); - if (GetCurrentIteration()->GetGcCause() == kGcCauseExplicit || - GetCurrentIteration()->GetGcCause() == kGcCauseForNativeAlloc || + GcCause gc_cause = GetCurrentIteration()->GetGcCause(); + if (gc_cause == kGcCauseExplicit || + gc_cause == kGcCauseForNativeAlloc || + gc_cause == kGcCauseCollectorTransition || GetCurrentIteration()->GetClearSoftReferences()) { force_evacuate_all_ = true; } else { diff --git a/runtime/gc/collector_type.h b/runtime/gc/collector_type.h index 7899a7c31..b342cc7aa 100644 --- a/runtime/gc/collector_type.h +++ b/runtime/gc/collector_type.h @@ -40,6 +40,8 @@ enum CollectorType { kCollectorTypeHeapTrim, // A (mostly) concurrent copying collector. kCollectorTypeCC, + // The background compaction of the concurrent copying collector. + kCollectorTypeCCBackground, // Instrumentation critical section fake collector. kCollectorTypeInstrumentation, // Fake collector for adding or removing application image spaces. diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc index 638c1d841..9e454ca10 100644 --- a/runtime/gc/heap.cc +++ b/runtime/gc/heap.cc @@ -731,6 +731,7 @@ void Heap::ChangeAllocator(AllocatorType allocator) { } void Heap::DisableMovingGc() { + CHECK(!kUseReadBarrier); if (IsMovingGc(foreground_collector_type_)) { foreground_collector_type_ = kCollectorTypeCMS; } @@ -970,7 +971,8 @@ void Heap::UpdateProcessState(ProcessState old_process_state, ProcessState new_p // Don't delay for debug builds since we may want to stress test the GC. // If background_collector_type_ is kCollectorTypeHomogeneousSpaceCompact then we have // special handling which does a homogenous space compaction once but then doesn't transition - // the collector. + // the collector. Similarly, we invoke a full compaction for kCollectorTypeCC but don't + // transition the collector. RequestCollectorTransition(background_collector_type_, kIsDebugBuild ? 0 : kCollectorTransitionWait); } @@ -1384,6 +1386,16 @@ void Heap::DoPendingCollectorTransition() { } else { VLOG(gc) << "Homogeneous compaction ignored due to jank perceptible process state"; } + } else if (desired_collector_type == kCollectorTypeCCBackground) { + DCHECK(kUseReadBarrier); + if (!CareAboutPauseTimes()) { + // Invoke CC full compaction. + CollectGarbageInternal(collector::kGcTypeFull, + kGcCauseCollectorTransition, + /*clear_soft_references*/false); + } else { + VLOG(gc) << "CC background compaction ignored due to jank perceptible process state"; + } } else { TransitionCollector(desired_collector_type); } @@ -1841,6 +1853,10 @@ mirror::Object* Heap::AllocateInternalWithGc(Thread* self, break; } case kAllocatorTypeNonMoving: { + if (kUseReadBarrier) { + // DisableMovingGc() isn't compatible with CC. + break; + } // Try to transition the heap if the allocation failure was due to the space being full. if (!IsOutOfMemoryOnAllocation(allocator, alloc_size)) { // If we aren't out of memory then the OOM was probably from the non moving space being @@ -2109,6 +2125,8 @@ HomogeneousSpaceCompactResult Heap::PerformHomogeneousSpaceCompact() { } void Heap::TransitionCollector(CollectorType collector_type) { + // Collector transition must not happen with CC + CHECK(!kUseReadBarrier); if (collector_type == collector_type_) { return; } @@ -3798,6 +3816,12 @@ void Heap::RequestCollectorTransition(CollectorType desired_collector_type, uint if (desired_collector_type_ == collector_type_ || !CanAddHeapTask(self)) { return; } + if (collector_type_ == kCollectorTypeCC) { + // For CC, we invoke a full compaction when going to the background, but the collector type + // doesn't change. + DCHECK_EQ(desired_collector_type_, kCollectorTypeCCBackground); + } + DCHECK_NE(collector_type_, kCollectorTypeCCBackground); CollectorTransitionTask* added_task = nullptr; const uint64_t target_time = NanoTime() + delta_time; { diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h index be8ed40e8..b357b872f 100644 --- a/runtime/gc/heap.h +++ b/runtime/gc/heap.h @@ -537,7 +537,7 @@ class Heap { void DumpForSigQuit(std::ostream& os) REQUIRES(!*gc_complete_lock_, !native_histogram_lock_); // Do a pending collector transition. - void DoPendingCollectorTransition() REQUIRES(!*gc_complete_lock_); + void DoPendingCollectorTransition() REQUIRES(!*gc_complete_lock_, !*pending_task_lock_); // Deflate monitors, ... and trim the spaces. void Trim(Thread* self) REQUIRES(!*gc_complete_lock_); @@ -708,8 +708,6 @@ class Heap { if (IsGcConcurrent() && IsMovingGc(collector_type_)) { // Assume no transition when a concurrent moving collector is used. DCHECK_EQ(collector_type_, foreground_collector_type_); - DCHECK_EQ(foreground_collector_type_, background_collector_type_) - << "Assume no transition such that collector_type_ won't change"; return true; } return false; @@ -828,6 +826,7 @@ class Heap { collector_type == kCollectorTypeSS || collector_type == kCollectorTypeGSS || collector_type == kCollectorTypeCC || + collector_type == kCollectorTypeCCBackground || collector_type == kCollectorTypeMC || collector_type == kCollectorTypeHomogeneousSpaceCompact; } @@ -997,7 +996,9 @@ class Heap { // What kind of concurrency behavior is the runtime after? Currently true for concurrent mark // sweep GC, false for other GC types. bool IsGcConcurrent() const ALWAYS_INLINE { - return collector_type_ == kCollectorTypeCMS || collector_type_ == kCollectorTypeCC; + return collector_type_ == kCollectorTypeCMS || + collector_type_ == kCollectorTypeCC || + collector_type_ == kCollectorTypeCCBackground; } // Trim the managed and native spaces by releasing unused memory back to the OS. -- 2.11.0