From 440e4ceb310349ee8eb569495bc04d3d7fbe71cb Mon Sep 17 00:00:00 2001 From: Mathieu Chartier Date: Mon, 31 Mar 2014 16:36:35 -0700 Subject: [PATCH] Add monitor deflation. We now deflate the monitors when we perform a heap trim. This causes a pause but it shouldn't matter since we should be in a state where we don't care about pauses. Memory savings are hard to measure. Fixed integer overflow bug in GetEstimatedLastIterationThroughput. Bug: 13733906 Change-Id: I4e0e68add02e7f43370b3a5ea763d6fe8a5b212c --- runtime/base/mutex.h | 1 + runtime/gc/collector/garbage_collector.cc | 2 +- runtime/gc/heap.cc | 38 +++++++++++++++++----------- runtime/monitor.cc | 42 +++++++++++++++++++++++-------- runtime/monitor.h | 8 +++--- 5 files changed, 62 insertions(+), 29 deletions(-) diff --git a/runtime/base/mutex.h b/runtime/base/mutex.h index 4b881f694..b50c09856 100644 --- a/runtime/base/mutex.h +++ b/runtime/base/mutex.h @@ -76,6 +76,7 @@ enum LockLevel { kClassLinkerClassesLock, kBreakpointLock, kMonitorLock, + kMonitorListLock, kThreadListLock, kBreakpointInvokeLock, kDeoptimizationLock, diff --git a/runtime/gc/collector/garbage_collector.cc b/runtime/gc/collector/garbage_collector.cc index 07951e0b2..82340f516 100644 --- a/runtime/gc/collector/garbage_collector.cc +++ b/runtime/gc/collector/garbage_collector.cc @@ -201,7 +201,7 @@ uint64_t GarbageCollector::GetEstimatedMeanThroughput() const { uint64_t GarbageCollector::GetEstimatedLastIterationThroughput() const { // Add 1ms to prevent possible division by 0. - return (freed_bytes_ * 1000) / (NsToMs(GetDurationNs()) + 1); + return (static_cast(freed_bytes_) * 1000) / (NsToMs(GetDurationNs()) + 1); } } // namespace collector diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc index 915e54f9a..e3fa8340a 100644 --- a/runtime/gc/heap.cc +++ b/runtime/gc/heap.cc @@ -914,8 +914,16 @@ void Heap::DoPendingTransitionOrTrim() { // Transition the collector if the desired collector type is not the same as the current // collector type. TransitionCollector(desired_collector_type); - // Do a heap trim if it is needed. - Trim(); + if (!CareAboutPauseTimes()) { + // Deflate the monitors, this can cause a pause but shouldn't matter since we don't care + // about pauses. + Runtime* runtime = Runtime::Current(); + runtime->GetThreadList()->SuspendAll(); + runtime->GetMonitorList()->DeflateMonitors(); + runtime->GetThreadList()->ResumeAll(); + // Do a heap trim if it is needed. + Trim(); + } } void Heap::Trim() { @@ -2663,6 +2671,10 @@ void Heap::RequestCollectorTransition(CollectorType desired_collector_type, uint } void Heap::RequestHeapTrim() { + // Request a heap trim only if we do not currently care about pause times. + if (CareAboutPauseTimes()) { + return; + } // GC completed and now we must decide whether to request a heap trim (advising pages back to the // kernel) or not. Issuing a request will also cause trimming of the libc heap. As a trim scans // a space it will hold its lock and can become a cause of jank. @@ -2684,21 +2696,17 @@ void Heap::RequestHeapTrim() { // as we don't hold the lock while requesting the trim). return; } - - // Request a heap trim only if we do not currently care about pause times. - if (!CareAboutPauseTimes()) { - { - MutexLock mu(self, *heap_trim_request_lock_); - if (last_trim_time_ + kHeapTrimWait >= NanoTime()) { - // We have done a heap trim in the last kHeapTrimWait nanosecs, don't request another one - // just yet. - return; - } - heap_trim_request_pending_ = true; + { + MutexLock mu(self, *heap_trim_request_lock_); + if (last_trim_time_ + kHeapTrimWait >= NanoTime()) { + // We have done a heap trim in the last kHeapTrimWait nanosecs, don't request another one + // just yet. + return; } - // Notify the daemon thread which will actually do the heap trim. - SignalHeapTrimDaemon(self); + heap_trim_request_pending_ = true; } + // Notify the daemon thread which will actually do the heap trim. + SignalHeapTrimDaemon(self); } void Heap::SignalHeapTrimDaemon(Thread* self) { diff --git a/runtime/monitor.cc b/runtime/monitor.cc index bcaf8ece0..bbc7dd0d8 100644 --- a/runtime/monitor.cc +++ b/runtime/monitor.cc @@ -205,7 +205,7 @@ void Monitor::SetObject(mirror::Object* object) { void Monitor::Lock(Thread* self) { MutexLock mu(self, monitor_lock_); while (true) { - if (owner_ == NULL) { // Unowned. + if (owner_ == nullptr) { // Unowned. owner_ = self; CHECK_EQ(lock_count_, 0); // When debugging, save the current monitor holder for future @@ -223,15 +223,15 @@ void Monitor::Lock(Thread* self) { uint64_t wait_start_ms = log_contention ? 0 : MilliTime(); mirror::ArtMethod* owners_method = locking_method_; uint32_t owners_dex_pc = locking_dex_pc_; + // Do this before releasing the lock so that we don't get deflated. + ++num_waiters_; monitor_lock_.Unlock(self); // Let go of locks in order. { ScopedThreadStateChange tsc(self, kBlocked); // Change to blocked and give up mutator_lock_. self->SetMonitorEnterObject(obj_); MutexLock mu2(self, monitor_lock_); // Reacquire monitor_lock_ without mutator_lock_ for Wait. if (owner_ != NULL) { // Did the owner_ give the lock up? - ++num_waiters_; monitor_contenders_.Wait(self); // Still contended so wait. - --num_waiters_; // Woken from contention. if (log_contention) { uint64_t wait_ms = MilliTime() - wait_start_ms; @@ -252,6 +252,7 @@ void Monitor::Lock(Thread* self) { self->SetMonitorEnterObject(nullptr); } monitor_lock_.Lock(self); // Reacquire locks in order. + --num_waiters_; } } @@ -431,6 +432,7 @@ void Monitor::Wait(Thread* self, int64_t ms, int32_t ns, * not order sensitive as we hold the pthread mutex. */ AppendToWaitSet(self); + ++num_waiters_; int prev_lock_count = lock_count_; lock_count_ = 0; owner_ = NULL; @@ -507,6 +509,7 @@ void Monitor::Wait(Thread* self, int64_t ms, int32_t ns, lock_count_ = prev_lock_count; locking_method_ = saved_method; locking_dex_pc_ = saved_dex_pc; + --num_waiters_; RemoveFromWaitSet(self); if (was_interrupted) { @@ -575,8 +578,12 @@ bool Monitor::Deflate(Thread* self, mirror::Object* obj) { // If the lock isn't an inflated monitor, then we don't need to deflate anything. if (lw.GetState() == LockWord::kFatLocked) { Monitor* monitor = lw.FatLockMonitor(); - CHECK(monitor != nullptr); + DCHECK(monitor != nullptr); MutexLock mu(self, monitor->monitor_lock_); + // Can't deflate if we have anybody waiting on the CV. + if (monitor->num_waiters_ > 0) { + return false; + } Thread* owner = monitor->owner_; if (owner != nullptr) { // Can't deflate if we are locked and have a hash code. @@ -587,17 +594,16 @@ bool Monitor::Deflate(Thread* self, mirror::Object* obj) { if (monitor->lock_count_ > LockWord::kThinLockMaxCount) { return false; } - // Can't deflate if we have anybody waiting on the CV. - if (monitor->num_waiters_ > 0) { - return false; - } // Deflate to a thin lock. - obj->SetLockWord(LockWord::FromThinLockId(owner->GetTid(), monitor->lock_count_)); + obj->SetLockWord(LockWord::FromThinLockId(owner->GetThreadId(), monitor->lock_count_)); + VLOG(monitor) << "Deflated " << obj << " to thin lock " << owner->GetTid() << " / " << monitor->lock_count_; } else if (monitor->HasHashCode()) { obj->SetLockWord(LockWord::FromHashCode(monitor->GetHashCode())); + VLOG(monitor) << "Deflated " << obj << " to hash monitor " << monitor->GetHashCode(); } else { // No lock and no hash, just put an empty lock word inside the object. obj->SetLockWord(LockWord()); + VLOG(monitor) << "Deflated" << obj << " to empty lock word"; } // The monitor is deflated, mark the object as nullptr so that we know to delete it during the // next GC. @@ -1054,7 +1060,7 @@ uint32_t Monitor::GetOwnerThreadId() { } MonitorList::MonitorList() - : allow_new_monitors_(true), monitor_list_lock_("MonitorList lock"), + : allow_new_monitors_(true), monitor_list_lock_("MonitorList lock", kMonitorListLock), monitor_add_condition_("MonitorList disallow condition", monitor_list_lock_) { } @@ -1103,6 +1109,22 @@ void MonitorList::SweepMonitorList(IsMarkedCallback* callback, void* arg) { } } +static mirror::Object* MonitorDeflateCallback(mirror::Object* object, void* arg) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + if (Monitor::Deflate(reinterpret_cast(arg), object)) { + DCHECK_NE(object->GetLockWord().GetState(), LockWord::kFatLocked); + // If we deflated, return nullptr so that the monitor gets removed from the array. + return nullptr; + } + return object; // Monitor was not deflated. +} + +void MonitorList::DeflateMonitors() { + Thread* self = Thread::Current(); + Locks::mutator_lock_->AssertExclusiveHeld(self); + SweepMonitorList(MonitorDeflateCallback, reinterpret_cast(self)); +} + MonitorInfo::MonitorInfo(mirror::Object* obj) : owner_(NULL), entry_count_(0) { DCHECK(obj != NULL); diff --git a/runtime/monitor.h b/runtime/monitor.h index 55504b594..c45927826 100644 --- a/runtime/monitor.h +++ b/runtime/monitor.h @@ -224,9 +224,11 @@ class MonitorList { void Add(Monitor* m); void SweepMonitorList(IsMarkedCallback* callback, void* arg) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void DisallowNewMonitors(); - void AllowNewMonitors(); + LOCKS_EXCLUDED(monitor_list_lock_) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void DisallowNewMonitors() LOCKS_EXCLUDED(monitor_list_lock_); + void AllowNewMonitors() LOCKS_EXCLUDED(monitor_list_lock_); + void DeflateMonitors() LOCKS_EXCLUDED(monitor_list_lock_) + EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_); private: bool allow_new_monitors_ GUARDED_BY(monitor_list_lock_); -- 2.11.0