ResetCumulativeStatistics();
}
-bool GarbageCollector::HandleDirtyObjectsPhase() {
- DCHECK(IsConcurrent());
- return true;
+void GarbageCollector::HandleDirtyObjectsPhase() {
+ LOG(FATAL) << "Unreachable";
}
void GarbageCollector::RegisterPause(uint64_t nano_length) {
freed_objects_ = 0;
freed_large_objects_ = 0;
- InitializePhase();
-
- if (!IsConcurrent()) {
- // Pause is the entire length of the GC.
- uint64_t pause_start = NanoTime();
- ATRACE_BEGIN("Application threads suspended");
- // Mutator lock may be already exclusively held when we do garbage collections for changing the
- // current collector / allocator during process state updates.
- if (Locks::mutator_lock_->IsExclusiveHeld(self)) {
- // PreGcRosAllocVerification() is called in Heap::TransitionCollector().
- RevokeAllThreadLocalBuffers();
- MarkingPhase();
- ReclaimPhase();
- // PostGcRosAllocVerification() is called in Heap::TransitionCollector().
- } else {
- thread_list->SuspendAll();
- GetHeap()->PreGcRosAllocVerification(&timings_);
- RevokeAllThreadLocalBuffers();
- MarkingPhase();
- ReclaimPhase();
- GetHeap()->PostGcRosAllocVerification(&timings_);
- thread_list->ResumeAll();
- }
- ATRACE_END();
- RegisterPause(NanoTime() - pause_start);
- } else {
- CHECK(!Locks::mutator_lock_->IsExclusiveHeld(self));
- Thread* self = Thread::Current();
- {
- ReaderMutexLock mu(self, *Locks::mutator_lock_);
- MarkingPhase();
+ CollectorType collector_type = GetCollectorType();
+ switch (collector_type) {
+ case kCollectorTypeMS: // Fall through.
+ case kCollectorTypeSS: // Fall through.
+ case kCollectorTypeGSS: {
+ InitializePhase();
+ // Pause is the entire length of the GC.
+ uint64_t pause_start = NanoTime();
+ ATRACE_BEGIN("Application threads suspended");
+ // Mutator lock may be already exclusively held when we do garbage collections for changing the
+ // current collector / allocator during process state updates.
+ if (Locks::mutator_lock_->IsExclusiveHeld(self)) {
+ // PreGcRosAllocVerification() is called in Heap::TransitionCollector().
+ RevokeAllThreadLocalBuffers();
+ MarkingPhase();
+ ReclaimPhase();
+ // PostGcRosAllocVerification() is called in Heap::TransitionCollector().
+ } else {
+ ATRACE_BEGIN("Suspending mutator threads");
+ thread_list->SuspendAll();
+ ATRACE_END();
+ GetHeap()->PreGcRosAllocVerification(&timings_);
+ RevokeAllThreadLocalBuffers();
+ MarkingPhase();
+ ReclaimPhase();
+ GetHeap()->PostGcRosAllocVerification(&timings_);
+ ATRACE_BEGIN("Resuming mutator threads");
+ thread_list->ResumeAll();
+ ATRACE_END();
+ }
+ ATRACE_END();
+ RegisterPause(NanoTime() - pause_start);
+ FinishPhase();
+ break;
}
- bool done = false;
- while (!done) {
+ case kCollectorTypeCMS: {
+ InitializePhase();
+ CHECK(!Locks::mutator_lock_->IsExclusiveHeld(self));
+ {
+ ReaderMutexLock mu(self, *Locks::mutator_lock_);
+ MarkingPhase();
+ }
uint64_t pause_start = NanoTime();
ATRACE_BEGIN("Suspending mutator threads");
thread_list->SuspendAll();
ATRACE_END();
ATRACE_BEGIN("All mutator threads suspended");
GetHeap()->PreGcRosAllocVerification(&timings_);
- done = HandleDirtyObjectsPhase();
- if (done) {
- RevokeAllThreadLocalBuffers();
- }
+ HandleDirtyObjectsPhase();
+ RevokeAllThreadLocalBuffers();
GetHeap()->PostGcRosAllocVerification(&timings_);
ATRACE_END();
uint64_t pause_end = NanoTime();
thread_list->ResumeAll();
ATRACE_END();
RegisterPause(pause_end - pause_start);
+ {
+ ReaderMutexLock mu(self, *Locks::mutator_lock_);
+ ReclaimPhase();
+ }
+ FinishPhase();
+ break;
}
- {
- ReaderMutexLock mu(self, *Locks::mutator_lock_);
- ReclaimPhase();
+ default: {
+ LOG(FATAL) << "Unreachable collector type=" << static_cast<size_t>(collector_type);
+ break;
}
}
- FinishPhase();
+
uint64_t end_time = NanoTime();
duration_ns_ = end_time - start_time;
total_time_ns_ += GetDurationNs();
#include "base/histogram.h"
#include "base/mutex.h"
#include "base/timing_logger.h"
+#include "gc/collector_type.h"
#include "gc/gc_cause.h"
#include "gc_type.h"
#include <stdint.h>
class GarbageCollector {
public:
- // Returns true iff the garbage collector is concurrent.
- virtual bool IsConcurrent() const = 0;
-
GarbageCollector(Heap* heap, const std::string& name);
virtual ~GarbageCollector() { }
virtual GcType GetGcType() const = 0;
+ virtual CollectorType GetCollectorType() const = 0;
+
// Run the garbage collector.
void Run(GcCause gc_cause, bool clear_soft_references);
// Mark all reachable objects, done concurrently.
virtual void MarkingPhase() = 0;
- // Only called for concurrent GCs. Gets called repeatedly until it succeeds.
- virtual bool HandleDirtyObjectsPhase() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ // Only called for concurrent GCs.
+ virtual void HandleDirtyObjectsPhase() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Called with mutators running.
virtual void ReclaimPhase() = 0;
&MarkObjectCallback, &ProcessMarkStackPausedCallback, this);
}
-bool MarkSweep::HandleDirtyObjectsPhase() {
+void MarkSweep::HandleDirtyObjectsPhase() {
TimingLogger::ScopedSplit split("(Paused)HandleDirtyObjectsPhase", &timings_);
Thread* self = Thread::Current();
Locks::mutator_lock_->AssertExclusiveHeld(self);
// incorrectly sweep it. This also fixes a race where interning may attempt to return a strong
// reference to a string that is about to be swept.
Runtime::Current()->DisallowNewSystemWeaks();
- return true;
-}
-
-bool MarkSweep::IsConcurrent() const {
- return is_concurrent_;
}
void MarkSweep::PreCleanCards() {
virtual void InitializePhase() OVERRIDE;
virtual void MarkingPhase() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- virtual bool HandleDirtyObjectsPhase() OVERRIDE EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
+ virtual void HandleDirtyObjectsPhase() OVERRIDE EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
virtual void ReclaimPhase() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
virtual void FinishPhase() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
virtual void MarkReachableObjects()
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
- virtual bool IsConcurrent() const OVERRIDE;
+ bool IsConcurrent() const {
+ return is_concurrent_;
+ }
virtual GcType GetGcType() const OVERRIDE {
return kGcTypeFull;
}
+ virtual CollectorType GetCollectorType() const OVERRIDE {
+ return is_concurrent_ ? kCollectorTypeCMS : kCollectorTypeMS;
+ }
+
// Initializes internal structures.
void Init();
~SemiSpace() {}
virtual void InitializePhase();
- virtual bool IsConcurrent() const {
- return false;
- }
virtual void MarkingPhase() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
virtual void ReclaimPhase() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
virtual void FinishPhase() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
virtual GcType GetGcType() const {
return kGcTypePartial;
}
+ virtual CollectorType GetCollectorType() const OVERRIDE {
+ return generational_ ? kCollectorTypeGSS : kCollectorTypeSS;
+ }
// Sets which space we will be copying objects to.
void SetToSpace(space::ContinuousMemMapAllocSpace* to_space);
} else {
DCHECK(!Dbg::IsAllocTrackingEnabled());
}
- // concurrent_gc_ isn't known at compile time so we can optimize by not checking it for
+ // IsConcurrentGc() isn't known at compile time so we can optimize by not checking it for
// the BumpPointer or TLAB allocators. This is nice since it allows the entire if statement to be
// optimized out. And for the other allocators, AllocatorMayHaveConcurrentGC is a constant since
// the allocator_type should be constant propagated.
- if (AllocatorMayHaveConcurrentGC(allocator) && concurrent_gc_) {
+ if (AllocatorMayHaveConcurrentGC(allocator) && IsGcConcurrent()) {
CheckConcurrentGC(self, new_num_bytes_allocated, &obj);
}
VerifyObject(obj);
if (UNLIKELY(new_footprint > growth_limit_)) {
return true;
}
- if (!AllocatorMayHaveConcurrentGC(allocator_type) || !concurrent_gc_) {
+ if (!AllocatorMayHaveConcurrentGC(allocator_type) || !IsGcConcurrent()) {
if (!kGrow) {
return true;
}
rosalloc_space_(nullptr),
dlmalloc_space_(nullptr),
main_space_(nullptr),
- concurrent_gc_(false),
collector_type_(kCollectorTypeNone),
post_zygote_collector_type_(post_zygote_collector_type),
background_collector_type_(background_collector_type),
collector_type_ = collector_type;
gc_plan_.clear();
switch (collector_type_) {
- case kCollectorTypeSS:
- // Fall-through.
+ case kCollectorTypeSS: // Fall-through.
case kCollectorTypeGSS: {
- concurrent_gc_ = false;
gc_plan_.push_back(collector::kGcTypeFull);
if (use_tlab_) {
ChangeAllocator(kAllocatorTypeTLAB);
break;
}
case kCollectorTypeMS: {
- concurrent_gc_ = false;
gc_plan_.push_back(collector::kGcTypeSticky);
gc_plan_.push_back(collector::kGcTypePartial);
gc_plan_.push_back(collector::kGcTypeFull);
break;
}
case kCollectorTypeCMS: {
- concurrent_gc_ = true;
gc_plan_.push_back(collector::kGcTypeSticky);
gc_plan_.push_back(collector::kGcTypePartial);
gc_plan_.push_back(collector::kGcTypeFull);
LOG(FATAL) << "Unimplemented";
}
}
- if (concurrent_gc_) {
+ if (IsGcConcurrent()) {
concurrent_start_bytes_ =
std::max(max_allowed_footprint_, kMinConcurrentRemainingBytes) - kMinConcurrentRemainingBytes;
} else {
} else if (current_allocator_ == kAllocatorTypeRosAlloc ||
current_allocator_ == kAllocatorTypeDlMalloc) {
for (const auto& cur_collector : garbage_collectors_) {
- if (cur_collector->IsConcurrent() == concurrent_gc_ &&
+ if (cur_collector->GetCollectorType() == collector_type_ &&
cur_collector->GetGcType() == gc_type) {
collector = cur_collector;
break;
LOG(FATAL) << "Invalid current allocator " << current_allocator_;
}
CHECK(collector != nullptr)
- << "Could not find garbage collector with concurrent=" << concurrent_gc_
- << " and type=" << gc_type;
+ << "Could not find garbage collector with collector_type="
+ << static_cast<size_t>(collector_type_) << " and gc_type=" << gc_type;
ATRACE_BEGIN(StringPrintf("%s %s GC", PrettyCause(gc_cause), collector->GetName()).c_str());
if (!clear_soft_references) {
clear_soft_references = gc_type != collector::kGcTypeSticky; // TODO: GSS?
}
if (!ignore_max_footprint_) {
SetIdealFootprint(target_size);
- if (concurrent_gc_) {
+ if (IsGcConcurrent()) {
// Calculate when to perform the next ConcurrentGC.
// Calculate the estimated GC duration.
const double gc_duration_seconds = NsToMs(gc_duration) / 1000.0;
// finalizers released native managed allocations.
UpdateMaxNativeFootprint();
} else if (!IsGCRequestPending()) {
- if (concurrent_gc_) {
+ if (IsGcConcurrent()) {
RequestConcurrentGC(self);
} else {
CollectGarbageInternal(gc_type, kGcCauseForAlloc, false);
// Push an object onto the allocation stack.
void PushOnAllocationStack(Thread* self, mirror::Object* obj);
+ // What kind of concurrency behavior is the runtime after? Currently true for concurrent mark
+ // sweep GC, false for other GC types.
+ bool IsGcConcurrent() const ALWAYS_INLINE {
+ return collector_type_ == kCollectorTypeCMS;
+ }
+
// All-known continuous spaces, where objects lie within fixed bounds.
std::vector<space::ContinuousSpace*> continuous_spaces_;
// The mem-map which we will use for the non-moving space after the zygote is done forking:
UniquePtr<MemMap> post_zygote_non_moving_space_mem_map_;
- // What kind of concurrency behavior is the runtime after? Currently true for concurrent mark
- // sweep GC, false for other GC types.
- bool concurrent_gc_;
-
// The current collector type.
CollectorType collector_type_;
// Which collector we will switch to after zygote fork.