2 * Copyright (C) 2011 The Android Open Source Project
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 #ifndef ART_RUNTIME_BASE_MUTEX_H_
18 #define ART_RUNTIME_BASE_MUTEX_H_
27 #include "base/logging.h"
28 #include "base/macros.h"
31 #if defined(__APPLE__)
32 #define ART_USE_FUTEXES 0
34 #define ART_USE_FUTEXES 1
37 // Currently Darwin doesn't support locks with timeouts.
38 #if !defined(__APPLE__)
39 #define HAVE_TIMED_RWLOCK 1
41 #define HAVE_TIMED_RWLOCK 0
46 class LOCKABLE ReaderWriterMutex;
47 class ScopedContentionRecorder;
50 // LockLevel is used to impose a lock hierarchy [1] where acquisition of a Mutex at a higher or
51 // equal level to a lock a thread holds is invalid. The lock hierarchy achieves a cycle free
52 // partial ordering and thereby cause deadlock situations to fail checks.
54 // [1] http://www.drdobbs.com/parallel/use-lock-hierarchies-to-avoid-deadlock/204801163
59 kUnexpectedSignalLock,
60 kThreadSuspendCountLock,
65 kRosAllocBulkFreeLock,
67 kReferenceProcessorLock,
68 kDexFileMethodInlinerLock,
69 kDexFileToMethodInlinerMapLock,
70 kMarkSweepMarkStackLock,
74 kMarkSweepLargeObjectLock,
77 kJdwpObjectRegistryLock,
79 kAllocatedThreadIdsLock,
81 kClassLinkerClassesLock,
86 kBreakpointInvokeLock,
98 kLockLevelCount // Must come last.
100 std::ostream& operator<<(std::ostream& os, const LockLevel& rhs);
102 const bool kDebugLocking = kIsDebugBuild;
104 // Record Log contention information, dumpable via SIGQUIT.
105 #ifdef ART_USE_FUTEXES
106 // To enable lock contention logging, set this to true.
107 const bool kLogLockContentions = false;
109 // Keep this false as lock contention logging is supported only with
111 const bool kLogLockContentions = false;
113 const size_t kContentionLogSize = 4;
114 const size_t kContentionLogDataSize = kLogLockContentions ? 1 : 0;
115 const size_t kAllMutexDataSize = kLogLockContentions ? 1 : 0;
117 // Base class for all Mutex implementations
120 const char* GetName() const {
124 virtual bool IsMutex() const { return false; }
125 virtual bool IsReaderWriterMutex() const { return false; }
127 virtual void Dump(std::ostream& os) const = 0;
129 static void DumpAll(std::ostream& os);
132 friend class ConditionVariable;
134 BaseMutex(const char* name, LockLevel level);
135 virtual ~BaseMutex();
136 void RegisterAsLocked(Thread* self);
137 void RegisterAsUnlocked(Thread* self);
138 void CheckSafeToWait(Thread* self);
140 friend class ScopedContentionRecorder;
142 void RecordContention(uint64_t blocked_tid, uint64_t owner_tid, uint64_t nano_time_blocked);
143 void DumpContention(std::ostream& os) const;
145 const LockLevel level_; // Support for lock hierarchy.
146 const char* const name_;
148 // A log entry that records contention but makes no guarantee that either tid will be held live.
149 struct ContentionLogEntry {
150 ContentionLogEntry() : blocked_tid(0), owner_tid(0) {}
151 uint64_t blocked_tid;
155 struct ContentionLogData {
156 ContentionLogEntry contention_log[kContentionLogSize];
157 // The next entry in the contention log to be updated. Value ranges from 0 to
158 // kContentionLogSize - 1.
159 AtomicInteger cur_content_log_entry;
160 // Number of times the Mutex has been contended.
161 AtomicInteger contention_count;
162 // Sum of time waited by all contenders in ns.
163 volatile uint64_t wait_time;
164 void AddToWaitTime(uint64_t value);
165 ContentionLogData() : wait_time(0) {}
167 ContentionLogData contention_log_data_[kContentionLogDataSize];
170 bool HasEverContended() const {
171 if (kLogLockContentions) {
172 return contention_log_data_->contention_count.LoadSequentiallyConsistent() > 0;
178 // A Mutex is used to achieve mutual exclusion between threads. A Mutex can be used to gain
179 // exclusive access to what it guards. A Mutex can be in one of two states:
180 // - Free - not owned by any thread,
181 // - Exclusive - owned by a single thread.
183 // The effect of locking and unlocking operations on the state is:
184 // State | ExclusiveLock | ExclusiveUnlock
185 // -------------------------------------------
186 // Free | Exclusive | error
187 // Exclusive | Block* | Free
188 // * Mutex is not reentrant and so an attempt to ExclusiveLock on the same thread will result in
189 // an error. Being non-reentrant simplifies Waiting on ConditionVariables.
190 std::ostream& operator<<(std::ostream& os, const Mutex& mu);
191 class LOCKABLE Mutex : public BaseMutex {
193 explicit Mutex(const char* name, LockLevel level = kDefaultMutexLevel, bool recursive = false);
196 virtual bool IsMutex() const { return true; }
198 // Block until mutex is free then acquire exclusive access.
199 void ExclusiveLock(Thread* self) EXCLUSIVE_LOCK_FUNCTION();
200 void Lock(Thread* self) EXCLUSIVE_LOCK_FUNCTION() { ExclusiveLock(self); }
202 // Returns true if acquires exclusive access, false otherwise.
203 bool ExclusiveTryLock(Thread* self) EXCLUSIVE_TRYLOCK_FUNCTION(true);
204 bool TryLock(Thread* self) EXCLUSIVE_TRYLOCK_FUNCTION(true) { return ExclusiveTryLock(self); }
206 // Release exclusive access.
207 void ExclusiveUnlock(Thread* self) UNLOCK_FUNCTION();
208 void Unlock(Thread* self) UNLOCK_FUNCTION() { ExclusiveUnlock(self); }
210 // Is the current thread the exclusive holder of the Mutex.
211 bool IsExclusiveHeld(const Thread* self) const;
213 // Assert that the Mutex is exclusively held by the current thread.
214 void AssertExclusiveHeld(const Thread* self) {
215 if (kDebugLocking && (gAborting == 0)) {
216 CHECK(IsExclusiveHeld(self)) << *this;
219 void AssertHeld(const Thread* self) { AssertExclusiveHeld(self); }
221 // Assert that the Mutex is not held by the current thread.
222 void AssertNotHeldExclusive(const Thread* self) {
223 if (kDebugLocking && (gAborting == 0)) {
224 CHECK(!IsExclusiveHeld(self)) << *this;
227 void AssertNotHeld(const Thread* self) { AssertNotHeldExclusive(self); }
229 // Id associated with exclusive owner. No memory ordering semantics if called from a thread other
231 uint64_t GetExclusiveOwnerTid() const;
233 // Returns how many times this Mutex has been locked, it is better to use AssertHeld/NotHeld.
234 unsigned int GetDepth() const {
235 return recursion_count_;
238 virtual void Dump(std::ostream& os) const;
242 // 0 is unheld, 1 is held.
243 AtomicInteger state_;
245 volatile uint64_t exclusive_owner_;
246 // Number of waiting contenders.
247 AtomicInteger num_contenders_;
249 pthread_mutex_t mutex_;
250 volatile uint64_t exclusive_owner_; // Guarded by mutex_.
252 const bool recursive_; // Can the lock be recursively held?
253 unsigned int recursion_count_;
254 friend class ConditionVariable;
255 DISALLOW_COPY_AND_ASSIGN(Mutex);
258 // A ReaderWriterMutex is used to achieve mutual exclusion between threads, similar to a Mutex.
259 // Unlike a Mutex a ReaderWriterMutex can be used to gain exclusive (writer) or shared (reader)
260 // access to what it guards. A flaw in relation to a Mutex is that it cannot be used with a
261 // condition variable. A ReaderWriterMutex can be in one of three states:
262 // - Free - not owned by any thread,
263 // - Exclusive - owned by a single thread,
264 // - Shared(n) - shared amongst n threads.
266 // The effect of locking and unlocking operations on the state is:
268 // State | ExclusiveLock | ExclusiveUnlock | SharedLock | SharedUnlock
269 // ----------------------------------------------------------------------------
270 // Free | Exclusive | error | SharedLock(1) | error
271 // Exclusive | Block | Free | Block | error
272 // Shared(n) | Block | error | SharedLock(n+1)* | Shared(n-1) or Free
273 // * for large values of n the SharedLock may block.
274 std::ostream& operator<<(std::ostream& os, const ReaderWriterMutex& mu);
275 class LOCKABLE ReaderWriterMutex : public BaseMutex {
277 explicit ReaderWriterMutex(const char* name, LockLevel level = kDefaultMutexLevel);
278 ~ReaderWriterMutex();
280 virtual bool IsReaderWriterMutex() const { return true; }
282 // Block until ReaderWriterMutex is free then acquire exclusive access.
283 void ExclusiveLock(Thread* self) EXCLUSIVE_LOCK_FUNCTION();
284 void WriterLock(Thread* self) EXCLUSIVE_LOCK_FUNCTION() { ExclusiveLock(self); }
286 // Release exclusive access.
287 void ExclusiveUnlock(Thread* self) UNLOCK_FUNCTION();
288 void WriterUnlock(Thread* self) UNLOCK_FUNCTION() { ExclusiveUnlock(self); }
290 // Block until ReaderWriterMutex is free and acquire exclusive access. Returns true on success
291 // or false if timeout is reached.
292 #if HAVE_TIMED_RWLOCK
293 bool ExclusiveLockWithTimeout(Thread* self, int64_t ms, int32_t ns)
294 EXCLUSIVE_TRYLOCK_FUNCTION(true);
297 // Block until ReaderWriterMutex is shared or free then acquire a share on the access.
298 void SharedLock(Thread* self) SHARED_LOCK_FUNCTION() ALWAYS_INLINE;
299 void ReaderLock(Thread* self) SHARED_LOCK_FUNCTION() { SharedLock(self); }
301 // Try to acquire share of ReaderWriterMutex.
302 bool SharedTryLock(Thread* self) EXCLUSIVE_TRYLOCK_FUNCTION(true);
304 // Release a share of the access.
305 void SharedUnlock(Thread* self) UNLOCK_FUNCTION() ALWAYS_INLINE;
306 void ReaderUnlock(Thread* self) UNLOCK_FUNCTION() { SharedUnlock(self); }
308 // Is the current thread the exclusive holder of the ReaderWriterMutex.
309 bool IsExclusiveHeld(const Thread* self) const;
311 // Assert the current thread has exclusive access to the ReaderWriterMutex.
312 void AssertExclusiveHeld(const Thread* self) {
313 if (kDebugLocking && (gAborting == 0)) {
314 CHECK(IsExclusiveHeld(self)) << *this;
317 void AssertWriterHeld(const Thread* self) { AssertExclusiveHeld(self); }
319 // Assert the current thread doesn't have exclusive access to the ReaderWriterMutex.
320 void AssertNotExclusiveHeld(const Thread* self) {
321 if (kDebugLocking && (gAborting == 0)) {
322 CHECK(!IsExclusiveHeld(self)) << *this;
325 void AssertNotWriterHeld(const Thread* self) { AssertNotExclusiveHeld(self); }
327 // Is the current thread a shared holder of the ReaderWriterMutex.
328 bool IsSharedHeld(const Thread* self) const;
330 // Assert the current thread has shared access to the ReaderWriterMutex.
331 void AssertSharedHeld(const Thread* self) {
332 if (kDebugLocking && (gAborting == 0)) {
333 // TODO: we can only assert this well when self != NULL.
334 CHECK(IsSharedHeld(self) || self == NULL) << *this;
337 void AssertReaderHeld(const Thread* self) { AssertSharedHeld(self); }
339 // Assert the current thread doesn't hold this ReaderWriterMutex either in shared or exclusive
341 void AssertNotHeld(const Thread* self) {
342 if (kDebugLocking && (gAborting == 0)) {
343 CHECK(!IsSharedHeld(self)) << *this;
347 // Id associated with exclusive owner. No memory ordering semantics if called from a thread other
349 uint64_t GetExclusiveOwnerTid() const;
351 virtual void Dump(std::ostream& os) const;
355 // -1 implies held exclusive, +ve shared held by state_ many owners.
356 AtomicInteger state_;
357 // Exclusive owner. Modification guarded by this mutex.
358 volatile uint64_t exclusive_owner_;
359 // Number of contenders waiting for a reader share.
360 AtomicInteger num_pending_readers_;
361 // Number of contenders waiting to be the writer.
362 AtomicInteger num_pending_writers_;
364 pthread_rwlock_t rwlock_;
365 volatile uint64_t exclusive_owner_; // Guarded by rwlock_.
367 DISALLOW_COPY_AND_ASSIGN(ReaderWriterMutex);
370 // ConditionVariables allow threads to queue and sleep. Threads may then be resumed individually
371 // (Signal) or all at once (Broadcast).
372 class ConditionVariable {
374 explicit ConditionVariable(const char* name, Mutex& mutex);
375 ~ConditionVariable();
377 void Broadcast(Thread* self);
378 void Signal(Thread* self);
379 // TODO: No thread safety analysis on Wait and TimedWait as they call mutex operations via their
380 // pointer copy, thereby defeating annotalysis.
381 void Wait(Thread* self) NO_THREAD_SAFETY_ANALYSIS;
382 void TimedWait(Thread* self, int64_t ms, int32_t ns) NO_THREAD_SAFETY_ANALYSIS;
383 // Variant of Wait that should be used with caution. Doesn't validate that no mutexes are held
385 // TODO: remove this.
386 void WaitHoldingLocks(Thread* self) NO_THREAD_SAFETY_ANALYSIS;
389 const char* const name_;
390 // The Mutex being used by waiters. It is an error to mix condition variables between different
394 // A counter that is modified by signals and broadcasts. This ensures that when a waiter gives up
395 // their Mutex and another thread takes it and signals, the waiting thread observes that sequence_
396 // changed and doesn't enter the wait. Modified while holding guard_, but is read by futex wait
397 // without guard_ held.
398 AtomicInteger sequence_;
399 // Number of threads that have come into to wait, not the length of the waiters on the futex as
400 // waiters may have been requeued onto guard_. Guarded by guard_.
401 volatile int32_t num_waiters_;
403 pthread_cond_t cond_;
405 DISALLOW_COPY_AND_ASSIGN(ConditionVariable);
408 // Scoped locker/unlocker for a regular Mutex that acquires mu upon construction and releases it
410 class SCOPED_LOCKABLE MutexLock {
412 explicit MutexLock(Thread* self, Mutex& mu) EXCLUSIVE_LOCK_FUNCTION(mu) : self_(self), mu_(mu) {
413 mu_.ExclusiveLock(self_);
416 ~MutexLock() UNLOCK_FUNCTION() {
417 mu_.ExclusiveUnlock(self_);
423 DISALLOW_COPY_AND_ASSIGN(MutexLock);
425 // Catch bug where variable name is omitted. "MutexLock (lock);" instead of "MutexLock mu(lock)".
426 #define MutexLock(x) COMPILE_ASSERT(0, mutex_lock_declaration_missing_variable_name)
428 // Scoped locker/unlocker for a ReaderWriterMutex that acquires read access to mu upon
429 // construction and releases it upon destruction.
430 class SCOPED_LOCKABLE ReaderMutexLock {
432 explicit ReaderMutexLock(Thread* self, ReaderWriterMutex& mu) EXCLUSIVE_LOCK_FUNCTION(mu) :
433 self_(self), mu_(mu) {
434 mu_.SharedLock(self_);
437 ~ReaderMutexLock() UNLOCK_FUNCTION() {
438 mu_.SharedUnlock(self_);
443 ReaderWriterMutex& mu_;
444 DISALLOW_COPY_AND_ASSIGN(ReaderMutexLock);
446 // Catch bug where variable name is omitted. "ReaderMutexLock (lock);" instead of
447 // "ReaderMutexLock mu(lock)".
448 #define ReaderMutexLock(x) COMPILE_ASSERT(0, reader_mutex_lock_declaration_missing_variable_name)
450 // Scoped locker/unlocker for a ReaderWriterMutex that acquires write access to mu upon
451 // construction and releases it upon destruction.
452 class SCOPED_LOCKABLE WriterMutexLock {
454 explicit WriterMutexLock(Thread* self, ReaderWriterMutex& mu) EXCLUSIVE_LOCK_FUNCTION(mu) :
455 self_(self), mu_(mu) {
456 mu_.ExclusiveLock(self_);
459 ~WriterMutexLock() UNLOCK_FUNCTION() {
460 mu_.ExclusiveUnlock(self_);
465 ReaderWriterMutex& mu_;
466 DISALLOW_COPY_AND_ASSIGN(WriterMutexLock);
468 // Catch bug where variable name is omitted. "WriterMutexLock (lock);" instead of
469 // "WriterMutexLock mu(lock)".
470 #define WriterMutexLock(x) COMPILE_ASSERT(0, writer_mutex_lock_declaration_missing_variable_name)
472 // Global mutexes corresponding to the levels above.
477 // The mutator_lock_ is used to allow mutators to execute in a shared (reader) mode or to block
478 // mutators by having an exclusive (writer) owner. In normal execution each mutator thread holds
479 // a share on the mutator_lock_. The garbage collector may also execute with shared access but
480 // at times requires exclusive access to the heap (not to be confused with the heap meta-data
481 // guarded by the heap_lock_ below). When the garbage collector requires exclusive access it asks
482 // the mutators to suspend themselves which also involves usage of the thread_suspend_count_lock_
483 // to cover weaknesses in using ReaderWriterMutexes with ConditionVariables. We use a condition
484 // variable to wait upon in the suspension logic as releasing and then re-acquiring a share on
485 // the mutator lock doesn't necessarily allow the exclusive user (e.g the garbage collector)
486 // chance to acquire the lock.
488 // Thread suspension:
489 // Shared users | Exclusive user
490 // (holding mutator lock and in kRunnable state) | .. running ..
491 // .. running .. | Request thread suspension by:
492 // .. running .. | - acquiring thread_suspend_count_lock_
493 // .. running .. | - incrementing Thread::suspend_count_ on
494 // .. running .. | all mutator threads
495 // .. running .. | - releasing thread_suspend_count_lock_
496 // .. running .. | Block trying to acquire exclusive mutator lock
497 // Poll Thread::suspend_count_ and enter full | .. blocked ..
498 // suspend code. | .. blocked ..
499 // Change state to kSuspended | .. blocked ..
500 // x: Release share on mutator_lock_ | Carry out exclusive access
501 // Acquire thread_suspend_count_lock_ | .. exclusive ..
502 // while Thread::suspend_count_ > 0 | .. exclusive ..
503 // - wait on Thread::resume_cond_ | .. exclusive ..
504 // (releases thread_suspend_count_lock_) | .. exclusive ..
505 // .. waiting .. | Release mutator_lock_
506 // .. waiting .. | Request thread resumption by:
507 // .. waiting .. | - acquiring thread_suspend_count_lock_
508 // .. waiting .. | - decrementing Thread::suspend_count_ on
509 // .. waiting .. | all mutator threads
510 // .. waiting .. | - notifying on Thread::resume_cond_
511 // - re-acquire thread_suspend_count_lock_ | - releasing thread_suspend_count_lock_
512 // Release thread_suspend_count_lock_ | .. running ..
513 // Acquire share on mutator_lock_ | .. running ..
514 // - This could block but the thread still | .. running ..
515 // has a state of kSuspended and so this | .. running ..
516 // isn't an issue. | .. running ..
517 // Acquire thread_suspend_count_lock_ | .. running ..
518 // - we poll here as we're transitioning into | .. running ..
519 // kRunnable and an individual thread suspend | .. running ..
520 // request (e.g for debugging) won't try | .. running ..
521 // to acquire the mutator lock (which would | .. running ..
522 // block as we hold the mutator lock). This | .. running ..
523 // poll ensures that if the suspender thought | .. running ..
524 // we were suspended by incrementing our | .. running ..
525 // Thread::suspend_count_ and then reading | .. running ..
526 // our state we go back to waiting on | .. running ..
527 // Thread::resume_cond_. | .. running ..
528 // can_go_runnable = Thread::suspend_count_ == 0 | .. running ..
529 // Release thread_suspend_count_lock_ | .. running ..
530 // if can_go_runnable | .. running ..
531 // Change state to kRunnable | .. running ..
532 // else | .. running ..
533 // Goto x | .. running ..
534 // .. running .. | .. running ..
535 static ReaderWriterMutex* mutator_lock_;
537 // Allow reader-writer mutual exclusion on the mark and live bitmaps of the heap.
538 static ReaderWriterMutex* heap_bitmap_lock_ ACQUIRED_AFTER(mutator_lock_);
540 // Guards shutdown of the runtime.
541 static Mutex* runtime_shutdown_lock_ ACQUIRED_AFTER(heap_bitmap_lock_);
543 // Guards background profiler global state.
544 static Mutex* profiler_lock_ ACQUIRED_AFTER(runtime_shutdown_lock_);
546 // Guards trace (ie traceview) requests.
547 static Mutex* trace_lock_ ACQUIRED_AFTER(profiler_lock_);
549 // The thread_list_lock_ guards ThreadList::list_. It is also commonly held to stop threads
550 // attaching and detaching.
551 static Mutex* thread_list_lock_ ACQUIRED_AFTER(trace_lock_);
553 // Guards breakpoints.
554 static Mutex* breakpoint_lock_ ACQUIRED_AFTER(thread_list_lock_);
556 // Guards lists of classes within the class linker.
557 static ReaderWriterMutex* classlinker_classes_lock_ ACQUIRED_AFTER(breakpoint_lock_);
559 // When declaring any Mutex add DEFAULT_MUTEX_ACQUIRED_AFTER to use annotalysis to check the code
560 // doesn't try to hold a higher level Mutex.
561 #define DEFAULT_MUTEX_ACQUIRED_AFTER ACQUIRED_AFTER(Locks::classlinker_classes_lock_)
563 static Mutex* allocated_monitor_ids_lock_ ACQUIRED_AFTER(classlinker_classes_lock_);
565 // Guard the allocation/deallocation of thread ids.
566 static Mutex* allocated_thread_ids_lock_ ACQUIRED_AFTER(allocated_monitor_ids_lock_);
568 // Guards modification of the LDT on x86.
569 static Mutex* modify_ldt_lock_ ACQUIRED_AFTER(allocated_thread_ids_lock_);
571 // Guards intern table.
572 static Mutex* intern_table_lock_ ACQUIRED_AFTER(modify_ldt_lock_);
574 // Have an exclusive aborting thread.
575 static Mutex* abort_lock_ ACQUIRED_AFTER(classlinker_classes_lock_);
577 // Allow mutual exclusion when manipulating Thread::suspend_count_.
578 // TODO: Does the trade-off of a per-thread lock make sense?
579 static Mutex* thread_suspend_count_lock_ ACQUIRED_AFTER(abort_lock_);
581 // One unexpected signal at a time lock.
582 static Mutex* unexpected_signal_lock_ ACQUIRED_AFTER(thread_suspend_count_lock_);
584 // Guards the maps in mem_map.
585 static Mutex* mem_maps_lock_ ACQUIRED_AFTER(unexpected_signal_lock_);
587 // Have an exclusive logging thread.
588 static Mutex* logging_lock_ ACQUIRED_AFTER(unexpected_signal_lock_);
593 #endif // ART_RUNTIME_BASE_MUTEX_H_