2 * Copyright (C) 2008 The Android Open Source Project
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
27 * Every Object has a monitor associated with it, but not every Object is
28 * actually locked. Even the ones that are locked do not need a
29 * full-fledged monitor until a) there is actual contention or b) wait()
30 * is called on the Object.
32 * For Dalvik, we have implemented a scheme similar to the one described
33 * in Bacon et al.'s "Thin locks: featherweight synchronization for Java"
34 * (ACM 1998). Things are even easier for us, though, because we have
35 * a full 32 bits to work with.
37 * The two states of an Object's lock are referred to as "thin" and
38 * "fat". A lock may transition from the "thin" state to the "fat"
39 * state and this transition is referred to as inflation. Once a lock
40 * has been inflated it remains in the "fat" state indefinitely.
42 * The lock value itself is stored in Object.lock. The LSB of the
43 * lock encodes its state. When cleared, the lock is in the "thin"
44 * state and its bits are formatted as follows:
46 * [31 ---- 19] [18 ---- 3] [2 ---- 1] [0]
47 * lock count thread id hash state 0
49 * When set, the lock is in the "fat" state and its bits are formatted
52 * [31 ---- 3] [2 ---- 1] [0]
53 * pointer hash state 1
55 * For an in-depth description of the mechanics of thin-vs-fat locking,
56 * read the paper referred to above.
61 * - mutually exclusive access to resources
62 * - a way for multiple threads to wait for notification
64 * In effect, they fill the role of both mutexes and condition variables.
66 * Only one thread can own the monitor at any time. There may be several
67 * threads waiting on it (the wait call unlocks it). One or more waiting
68 * threads may be getting interrupted or notified at any given time.
70 * TODO: the various members of monitor are not SMP-safe.
73 Thread* owner; /* which thread currently owns the lock? */
74 int lockCount; /* owner's recursive lock depth */
75 Object* obj; /* what object are we part of [debug only] */
77 Thread* waitSet; /* threads currently waiting on this monitor */
84 * Who last acquired this monitor, when lock sampling is enabled.
85 * Even when enabled, ownerMethod may be NULL.
87 const Method* ownerMethod;
93 * Create and initialize a monitor.
95 Monitor* dvmCreateMonitor(Object* obj)
99 mon = (Monitor*) calloc(1, sizeof(Monitor));
101 ALOGE("Unable to allocate monitor");
104 if (((u4)mon & 7) != 0) {
105 ALOGE("Misaligned monitor: %p", mon);
109 dvmInitMutex(&mon->lock);
111 /* replace the head of the list with the new monitor */
113 mon->next = gDvm.monitorList;
114 } while (android_atomic_release_cas((int32_t)mon->next, (int32_t)mon,
115 (int32_t*)(void*)&gDvm.monitorList) != 0);
121 * Free the monitor list. Only used when shutting the VM down.
123 void dvmFreeMonitorList()
128 mon = gDvm.monitorList;
129 while (mon != NULL) {
137 * Get the object that a monitor is part of.
139 Object* dvmGetMonitorObject(Monitor* mon)
148 * Returns the thread id of the thread owning the given lock.
150 static u4 lockOwner(Object* obj)
157 * Since we're reading the lock value multiple times, latch it so
158 * that it doesn't change out from under us if we get preempted.
161 if (LW_SHAPE(lock) == LW_SHAPE_THIN) {
162 return LW_LOCK_OWNER(lock);
164 owner = LW_MONITOR(lock)->owner;
165 return owner ? owner->threadId : 0;
170 * Get the thread that holds the lock on the specified object. The
171 * object may be unlocked, thin-locked, or fat-locked.
173 * The caller must lock the thread list before calling here.
175 Thread* dvmGetObjectLockHolder(Object* obj)
177 u4 threadId = lockOwner(obj);
181 return dvmGetThreadByThreadId(threadId);
185 * Checks whether the given thread holds the given
188 bool dvmHoldsLock(Thread* thread, Object* obj)
190 if (thread == NULL || obj == NULL) {
193 return thread->threadId == lockOwner(obj);
198 * Free the monitor associated with an object and make the object's lock
199 * thin again. This is called during garbage collection.
201 static void freeMonitor(Monitor *mon)
204 assert(mon->obj != NULL);
205 assert(LW_SHAPE(mon->obj->lock) == LW_SHAPE_FAT);
207 /* This lock is associated with an object
208 * that's being swept. The only possible way
209 * anyone could be holding this lock would be
210 * if some JNI code locked but didn't unlock
211 * the object, in which case we've got some bad
212 * native code somewhere.
214 assert(pthread_mutex_trylock(&mon->lock) == 0);
215 assert(pthread_mutex_unlock(&mon->lock) == 0);
216 dvmDestroyMutex(&mon->lock);
221 * Frees monitor objects belonging to unmarked objects.
223 void dvmSweepMonitorList(Monitor** mon, int (*isUnmarkedObject)(void*))
226 Monitor *prev, *curr;
230 assert(isUnmarkedObject != NULL);
232 prev->next = curr = *mon;
233 while (curr != NULL) {
235 if (obj != NULL && (*isUnmarkedObject)(obj) != 0) {
236 prev->next = curr->next;
247 static char *logWriteInt(char *dst, int value)
249 *dst++ = EVENT_TYPE_INT;
250 set4LE((u1 *)dst, value);
254 static char *logWriteString(char *dst, const char *value, size_t len)
256 *dst++ = EVENT_TYPE_STRING;
257 len = len < 32 ? len : 32;
258 set4LE((u1 *)dst, len);
260 memcpy(dst, value, len);
264 #define EVENT_LOG_TAG_dvm_lock_sample 20003
266 static void logContentionEvent(Thread *self, u4 waitMs, u4 samplePercent,
267 const char *ownerFileName, u4 ownerLineNumber)
269 const StackSaveArea *saveArea;
272 char eventBuffer[174];
273 const char *fileName;
279 saveArea = SAVEAREA_FROM_FP(self->interpSave.curFrame);
280 meth = saveArea->method;
283 /* Emit the event list length, 1 byte. */
286 /* Emit the process name, <= 37 bytes. */
287 fd = open("/proc/self/cmdline", O_RDONLY);
288 memset(procName, 0, sizeof(procName));
289 read(fd, procName, sizeof(procName) - 1);
291 len = strlen(procName);
292 cp = logWriteString(cp, procName, len);
294 /* Emit the sensitive thread ("main thread") status, 5 bytes. */
295 bool isSensitive = false;
296 if (gDvm.isSensitiveThreadHook != NULL) {
297 isSensitive = gDvm.isSensitiveThreadHook();
299 cp = logWriteInt(cp, isSensitive);
301 /* Emit self thread name string, <= 37 bytes. */
302 std::string selfName = dvmGetThreadName(self);
303 cp = logWriteString(cp, selfName.c_str(), selfName.size());
305 /* Emit the wait time, 5 bytes. */
306 cp = logWriteInt(cp, waitMs);
308 /* Emit the source code file name, <= 37 bytes. */
309 fileName = dvmGetMethodSourceFile(meth);
310 if (fileName == NULL) fileName = "";
311 cp = logWriteString(cp, fileName, strlen(fileName));
313 /* Emit the source code line number, 5 bytes. */
314 relativePc = saveArea->xtra.currentPc - saveArea->method->insns;
315 cp = logWriteInt(cp, dvmLineNumFromPC(meth, relativePc));
317 /* Emit the lock owner source code file name, <= 37 bytes. */
318 if (ownerFileName == NULL) {
320 } else if (strcmp(fileName, ownerFileName) == 0) {
321 /* Common case, so save on log space. */
324 cp = logWriteString(cp, ownerFileName, strlen(ownerFileName));
326 /* Emit the source code line number, 5 bytes. */
327 cp = logWriteInt(cp, ownerLineNumber);
329 /* Emit the sample percentage, 5 bytes. */
330 cp = logWriteInt(cp, samplePercent);
332 assert((size_t)(cp - eventBuffer) <= sizeof(eventBuffer));
333 android_btWriteLog(EVENT_LOG_TAG_dvm_lock_sample,
336 (size_t)(cp - eventBuffer));
342 static void lockMonitor(Thread* self, Monitor* mon)
344 ThreadStatus oldStatus;
345 u4 waitThreshold, samplePercent;
346 u8 waitStart, waitEnd, waitMs;
348 if (mon->owner == self) {
352 if (dvmTryLockMutex(&mon->lock) != 0) {
353 oldStatus = dvmChangeStatus(self, THREAD_MONITOR);
354 waitThreshold = gDvm.lockProfThreshold;
356 waitStart = dvmGetRelativeTimeUsec();
359 const Method* currentOwnerMethod = mon->ownerMethod;
360 u4 currentOwnerPc = mon->ownerPc;
362 dvmLockMutex(&mon->lock);
364 waitEnd = dvmGetRelativeTimeUsec();
366 dvmChangeStatus(self, oldStatus);
368 waitMs = (waitEnd - waitStart) / 1000;
369 if (waitMs >= waitThreshold) {
372 samplePercent = 100 * waitMs / waitThreshold;
374 if (samplePercent != 0 && ((u4)rand() % 100 < samplePercent)) {
375 const char* currentOwnerFileName = "no_method";
376 u4 currentOwnerLineNumber = 0;
377 if (currentOwnerMethod != NULL) {
378 currentOwnerFileName = dvmGetMethodSourceFile(currentOwnerMethod);
379 if (currentOwnerFileName == NULL) {
380 currentOwnerFileName = "no_method_file";
382 currentOwnerLineNumber = dvmLineNumFromPC(currentOwnerMethod, currentOwnerPc);
384 logContentionEvent(self, waitMs, samplePercent,
385 currentOwnerFileName, currentOwnerLineNumber);
390 assert(mon->lockCount == 0);
392 // When debugging, save the current monitor holder for future
393 // acquisition failures to use in sampled logging.
394 if (gDvm.lockProfThreshold > 0) {
395 mon->ownerMethod = NULL;
397 if (self->interpSave.curFrame == NULL) {
400 const StackSaveArea* saveArea = SAVEAREA_FROM_FP(self->interpSave.curFrame);
401 if (saveArea == NULL) {
404 mon->ownerMethod = saveArea->method;
405 mon->ownerPc = (saveArea->xtra.currentPc - saveArea->method->insns);
410 * Try to lock a monitor.
412 * Returns "true" on success.
414 #ifdef WITH_COPYING_GC
415 static bool tryLockMonitor(Thread* self, Monitor* mon)
417 if (mon->owner == self) {
421 if (dvmTryLockMutex(&mon->lock) == 0) {
423 assert(mon->lockCount == 0);
435 * Returns true if the unlock succeeded.
436 * If the unlock failed, an exception will be pending.
438 static bool unlockMonitor(Thread* self, Monitor* mon)
440 assert(self != NULL);
442 if (mon->owner == self) {
444 * We own the monitor, so nobody else can be in here.
446 if (mon->lockCount == 0) {
448 mon->ownerMethod = NULL;
450 dvmUnlockMutex(&mon->lock);
456 * We don't own this, so we're not allowed to unlock it.
457 * The JNI spec says that we should throw IllegalMonitorStateException
460 dvmThrowIllegalMonitorStateException("unlock of unowned monitor");
467 * Checks the wait set for circular structure. Returns 0 if the list
468 * is not circular. Otherwise, returns 1. Used only by asserts.
471 static int waitSetCheck(Monitor *mon)
477 fast = slow = mon->waitSet;
480 if (fast == NULL) return 0;
481 if (fast->waitNext == NULL) return 0;
482 if (fast == slow && n > 0) return 1;
484 fast = fast->waitNext->waitNext;
485 slow = slow->waitNext;
491 * Links a thread into a monitor's wait set. The monitor lock must be
492 * held by the caller of this routine.
494 static void waitSetAppend(Monitor *mon, Thread *thread)
499 assert(mon->owner == dvmThreadSelf());
500 assert(thread != NULL);
501 assert(thread->waitNext == NULL);
502 assert(waitSetCheck(mon) == 0);
503 if (mon->waitSet == NULL) {
504 mon->waitSet = thread;
508 while (elt->waitNext != NULL) {
511 elt->waitNext = thread;
515 * Unlinks a thread from a monitor's wait set. The monitor lock must
516 * be held by the caller of this routine.
518 static void waitSetRemove(Monitor *mon, Thread *thread)
523 assert(mon->owner == dvmThreadSelf());
524 assert(thread != NULL);
525 assert(waitSetCheck(mon) == 0);
526 if (mon->waitSet == NULL) {
529 if (mon->waitSet == thread) {
530 mon->waitSet = thread->waitNext;
531 thread->waitNext = NULL;
535 while (elt->waitNext != NULL) {
536 if (elt->waitNext == thread) {
537 elt->waitNext = thread->waitNext;
538 thread->waitNext = NULL;
546 * Converts the given relative waiting time into an absolute time.
548 static void absoluteTime(s8 msec, s4 nsec, struct timespec *ts)
552 #ifdef HAVE_TIMEDWAIT_MONOTONIC
553 clock_gettime(CLOCK_MONOTONIC, ts);
557 gettimeofday(&tv, NULL);
558 ts->tv_sec = tv.tv_sec;
559 ts->tv_nsec = tv.tv_usec * 1000;
562 endSec = ts->tv_sec + msec / 1000;
563 if (endSec >= 0x7fffffff) {
564 ALOGV("NOTE: end time exceeds epoch");
568 ts->tv_nsec = (ts->tv_nsec + (msec % 1000) * 1000000) + nsec;
571 if (ts->tv_nsec >= 1000000000L) {
573 ts->tv_nsec -= 1000000000L;
577 int dvmRelativeCondWait(pthread_cond_t* cond, pthread_mutex_t* mutex,
582 absoluteTime(msec, nsec, &ts);
583 #if defined(HAVE_TIMEDWAIT_MONOTONIC)
584 ret = pthread_cond_timedwait_monotonic(cond, mutex, &ts);
586 ret = pthread_cond_timedwait(cond, mutex, &ts);
588 assert(ret == 0 || ret == ETIMEDOUT);
593 * Wait on a monitor until timeout, interrupt, or notification. Used for
594 * Object.wait() and (somewhat indirectly) Thread.sleep() and Thread.join().
596 * If another thread calls Thread.interrupt(), we throw InterruptedException
597 * and return immediately if one of the following are true:
598 * - blocked in wait(), wait(long), or wait(long, int) methods of Object
599 * - blocked in join(), join(long), or join(long, int) methods of Thread
600 * - blocked in sleep(long), or sleep(long, int) methods of Thread
601 * Otherwise, we set the "interrupted" flag.
603 * Checks to make sure that "nsec" is in the range 0-999999
604 * (i.e. fractions of a millisecond) and throws the appropriate
605 * exception if it isn't.
607 * The spec allows "spurious wakeups", and recommends that all code using
608 * Object.wait() do so in a loop. This appears to derive from concerns
609 * about pthread_cond_wait() on multiprocessor systems. Some commentary
610 * on the web casts doubt on whether these can/should occur.
612 * Since we're allowed to wake up "early", we clamp extremely long durations
613 * to return at the end of the 32-bit time epoch.
615 static void waitMonitor(Thread* self, Monitor* mon, s8 msec, s4 nsec,
616 bool interruptShouldThrow)
619 bool wasInterrupted = false;
623 assert(self != NULL);
626 /* Make sure that we hold the lock. */
627 if (mon->owner != self) {
628 dvmThrowIllegalMonitorStateException(
629 "object not locked by thread before wait()");
634 * Enforce the timeout range.
636 if (msec < 0 || nsec < 0 || nsec > 999999) {
637 dvmThrowIllegalArgumentException("timeout arguments out of range");
642 * Compute absolute wakeup time, if necessary.
644 if (msec == 0 && nsec == 0) {
647 absoluteTime(msec, nsec, &ts);
652 * Add ourselves to the set of threads waiting on this monitor, and
653 * release our hold. We need to let it go even if we're a few levels
654 * deep in a recursive lock, and we need to restore that later.
656 * We append to the wait set ahead of clearing the count and owner
657 * fields so the subroutine can check that the calling thread owns
658 * the monitor. Aside from that, the order of member updates is
659 * not order sensitive as we hold the pthread mutex.
661 waitSetAppend(mon, self);
662 int prevLockCount = mon->lockCount;
666 const Method* savedMethod = mon->ownerMethod;
667 u4 savedPc = mon->ownerPc;
668 mon->ownerMethod = NULL;
672 * Update thread status. If the GC wakes up, it'll ignore us, knowing
673 * that we won't touch any references in this state, and we'll check
674 * our suspend mode before we transition out.
677 dvmChangeStatus(self, THREAD_TIMED_WAIT);
679 dvmChangeStatus(self, THREAD_WAIT);
681 dvmLockMutex(&self->waitMutex);
684 * Set waitMonitor to the monitor object we will be waiting on.
685 * When waitMonitor is non-NULL a notifying or interrupting thread
686 * must signal the thread's waitCond to wake it up.
688 assert(self->waitMonitor == NULL);
689 self->waitMonitor = mon;
692 * Handle the case where the thread was interrupted before we called
695 if (self->interrupted) {
696 wasInterrupted = true;
697 self->waitMonitor = NULL;
698 dvmUnlockMutex(&self->waitMutex);
703 * Release the monitor lock and wait for a notification or
704 * a timeout to occur.
706 dvmUnlockMutex(&mon->lock);
709 ret = pthread_cond_wait(&self->waitCond, &self->waitMutex);
712 #ifdef HAVE_TIMEDWAIT_MONOTONIC
713 ret = pthread_cond_timedwait_monotonic(&self->waitCond, &self->waitMutex, &ts);
715 ret = pthread_cond_timedwait(&self->waitCond, &self->waitMutex, &ts);
717 assert(ret == 0 || ret == ETIMEDOUT);
719 if (self->interrupted) {
720 wasInterrupted = true;
723 self->interrupted = false;
724 self->waitMonitor = NULL;
726 dvmUnlockMutex(&self->waitMutex);
728 /* Reacquire the monitor lock. */
729 lockMonitor(self, mon);
733 * We remove our thread from wait set after restoring the count
734 * and owner fields so the subroutine can check that the calling
735 * thread owns the monitor. Aside from that, the order of member
736 * updates is not order sensitive as we hold the pthread mutex.
739 mon->lockCount = prevLockCount;
740 mon->ownerMethod = savedMethod;
741 mon->ownerPc = savedPc;
742 waitSetRemove(mon, self);
744 /* set self->status back to THREAD_RUNNING, and self-suspend if needed */
745 dvmChangeStatus(self, THREAD_RUNNING);
747 if (wasInterrupted) {
749 * We were interrupted while waiting, or somebody interrupted an
750 * un-interruptible thread earlier and we're bailing out immediately.
752 * The doc sayeth: "The interrupted status of the current thread is
753 * cleared when this exception is thrown."
755 self->interrupted = false;
756 if (interruptShouldThrow) {
757 dvmThrowInterruptedException(NULL);
763 * Notify one thread waiting on this monitor.
765 static void notifyMonitor(Thread* self, Monitor* mon)
769 assert(self != NULL);
772 /* Make sure that we hold the lock. */
773 if (mon->owner != self) {
774 dvmThrowIllegalMonitorStateException(
775 "object not locked by thread before notify()");
778 /* Signal the first waiting thread in the wait set. */
779 while (mon->waitSet != NULL) {
780 thread = mon->waitSet;
781 mon->waitSet = thread->waitNext;
782 thread->waitNext = NULL;
783 dvmLockMutex(&thread->waitMutex);
784 /* Check to see if the thread is still waiting. */
785 if (thread->waitMonitor != NULL) {
786 pthread_cond_signal(&thread->waitCond);
787 dvmUnlockMutex(&thread->waitMutex);
790 dvmUnlockMutex(&thread->waitMutex);
795 * Notify all threads waiting on this monitor.
797 static void notifyAllMonitor(Thread* self, Monitor* mon)
801 assert(self != NULL);
804 /* Make sure that we hold the lock. */
805 if (mon->owner != self) {
806 dvmThrowIllegalMonitorStateException(
807 "object not locked by thread before notifyAll()");
810 /* Signal all threads in the wait set. */
811 while (mon->waitSet != NULL) {
812 thread = mon->waitSet;
813 mon->waitSet = thread->waitNext;
814 thread->waitNext = NULL;
815 dvmLockMutex(&thread->waitMutex);
816 /* Check to see if the thread is still waiting. */
817 if (thread->waitMonitor != NULL) {
818 pthread_cond_signal(&thread->waitCond);
820 dvmUnlockMutex(&thread->waitMutex);
825 * Changes the shape of a monitor from thin to fat, preserving the
826 * internal lock state. The calling thread must own the lock.
828 static void inflateMonitor(Thread *self, Object *obj)
833 assert(self != NULL);
835 assert(LW_SHAPE(obj->lock) == LW_SHAPE_THIN);
836 assert(LW_LOCK_OWNER(obj->lock) == self->threadId);
837 /* Allocate and acquire a new monitor. */
838 mon = dvmCreateMonitor(obj);
839 lockMonitor(self, mon);
840 /* Propagate the lock state. */
842 mon->lockCount = LW_LOCK_COUNT(thin);
843 thin &= LW_HASH_STATE_MASK << LW_HASH_STATE_SHIFT;
844 thin |= (u4)mon | LW_SHAPE_FAT;
845 /* Publish the updated lock word. */
846 android_atomic_release_store(thin, (int32_t *)&obj->lock);
850 * Implements monitorenter for "synchronized" stuff.
852 * This does not fail or throw an exception (unless deadlock prediction
853 * is enabled and set to "err" mode).
855 void dvmLockObject(Thread* self, Object *obj)
858 ThreadStatus oldStatus;
861 long minSleepDelayNs = 1000000; /* 1 millisecond */
862 long maxSleepDelayNs = 1000000000; /* 1 second */
863 u4 thin, newThin, threadId;
865 assert(self != NULL);
867 threadId = self->threadId;
871 if (LW_SHAPE(thin) == LW_SHAPE_THIN) {
873 * The lock is a thin lock. The owner field is used to
874 * determine the acquire method, ordered by cost.
876 if (LW_LOCK_OWNER(thin) == threadId) {
878 * The calling thread owns the lock. Increment the
879 * value of the recursion count field.
881 obj->lock += 1 << LW_LOCK_COUNT_SHIFT;
882 if (LW_LOCK_COUNT(obj->lock) == LW_LOCK_COUNT_MASK) {
884 * The reacquisition limit has been reached. Inflate
885 * the lock so the next acquire will not overflow the
886 * recursion count field.
888 inflateMonitor(self, obj);
890 } else if (LW_LOCK_OWNER(thin) == 0) {
892 * The lock is unowned. Install the thread id of the
893 * calling thread into the owner field. This is the
894 * common case. In performance critical code the JIT
895 * will have tried this before calling out to the VM.
897 newThin = thin | (threadId << LW_LOCK_OWNER_SHIFT);
898 if (android_atomic_acquire_cas(thin, newThin,
899 (int32_t*)thinp) != 0) {
901 * The acquire failed. Try again.
906 ALOGV("(%d) spin on lock %p: %#x (%#x) %#x",
907 threadId, &obj->lock, 0, *thinp, thin);
909 * The lock is owned by another thread. Notify the VM
910 * that we are about to wait.
912 oldStatus = dvmChangeStatus(self, THREAD_MONITOR);
914 * Spin until the thin lock is released or inflated.
920 * Check the shape of the lock word. Another thread
921 * may have inflated the lock while we were waiting.
923 if (LW_SHAPE(thin) == LW_SHAPE_THIN) {
924 if (LW_LOCK_OWNER(thin) == 0) {
926 * The lock has been released. Install the
927 * thread id of the calling thread into the
930 newThin = thin | (threadId << LW_LOCK_OWNER_SHIFT);
931 if (android_atomic_acquire_cas(thin, newThin,
932 (int32_t *)thinp) == 0) {
934 * The acquire succeed. Break out of the
935 * loop and proceed to inflate the lock.
941 * The lock has not been released. Yield so
942 * the owning thread can run.
944 if (sleepDelayNs == 0) {
946 sleepDelayNs = minSleepDelayNs;
949 tm.tv_nsec = sleepDelayNs;
950 nanosleep(&tm, NULL);
952 * Prepare the next delay value. Wrap to
953 * avoid once a second polls for eternity.
955 if (sleepDelayNs < maxSleepDelayNs / 2) {
958 sleepDelayNs = minSleepDelayNs;
964 * The thin lock was inflated by another thread.
965 * Let the VM know we are no longer waiting and
968 ALOGV("(%d) lock %p surprise-fattened",
969 threadId, &obj->lock);
970 dvmChangeStatus(self, oldStatus);
974 ALOGV("(%d) spin on lock done %p: %#x (%#x) %#x",
975 threadId, &obj->lock, 0, *thinp, thin);
977 * We have acquired the thin lock. Let the VM know that
978 * we are no longer waiting.
980 dvmChangeStatus(self, oldStatus);
984 inflateMonitor(self, obj);
985 ALOGV("(%d) lock %p fattened", threadId, &obj->lock);
989 * The lock is a fat lock.
991 assert(LW_MONITOR(obj->lock) != NULL);
992 lockMonitor(self, LW_MONITOR(obj->lock));
997 * Implements monitorexit for "synchronized" stuff.
999 * On failure, throws an exception and returns "false".
1001 bool dvmUnlockObject(Thread* self, Object *obj)
1005 assert(self != NULL);
1006 assert(self->status == THREAD_RUNNING);
1007 assert(obj != NULL);
1009 * Cache the lock word as its value can change while we are
1010 * examining its state.
1012 thin = *(volatile u4 *)&obj->lock;
1013 if (LW_SHAPE(thin) == LW_SHAPE_THIN) {
1015 * The lock is thin. We must ensure that the lock is owned
1016 * by the given thread before unlocking it.
1018 if (LW_LOCK_OWNER(thin) == self->threadId) {
1020 * We are the lock owner. It is safe to update the lock
1021 * without CAS as lock ownership guards the lock itself.
1023 if (LW_LOCK_COUNT(thin) == 0) {
1025 * The lock was not recursively acquired, the common
1026 * case. Unlock by clearing all bits except for the
1029 thin &= (LW_HASH_STATE_MASK << LW_HASH_STATE_SHIFT);
1030 android_atomic_release_store(thin, (int32_t*)&obj->lock);
1033 * The object was recursively acquired. Decrement the
1034 * lock recursion count field.
1036 obj->lock -= 1 << LW_LOCK_COUNT_SHIFT;
1040 * We do not own the lock. The JVM spec requires that we
1041 * throw an exception in this case.
1043 dvmThrowIllegalMonitorStateException("unlock of unowned monitor");
1048 * The lock is fat. We must check to see if unlockMonitor has
1049 * raised any exceptions before continuing.
1051 assert(LW_MONITOR(obj->lock) != NULL);
1052 if (!unlockMonitor(self, LW_MONITOR(obj->lock))) {
1054 * An exception has been raised. Do not fall through.
1063 * Object.wait(). Also called for class init.
1065 void dvmObjectWait(Thread* self, Object *obj, s8 msec, s4 nsec,
1066 bool interruptShouldThrow)
1069 u4 thin = *(volatile u4 *)&obj->lock;
1071 /* If the lock is still thin, we need to fatten it.
1073 if (LW_SHAPE(thin) == LW_SHAPE_THIN) {
1074 /* Make sure that 'self' holds the lock.
1076 if (LW_LOCK_OWNER(thin) != self->threadId) {
1077 dvmThrowIllegalMonitorStateException(
1078 "object not locked by thread before wait()");
1082 /* This thread holds the lock. We need to fatten the lock
1083 * so 'self' can block on it. Don't update the object lock
1084 * field yet, because 'self' needs to acquire the lock before
1085 * any other thread gets a chance.
1087 inflateMonitor(self, obj);
1088 ALOGV("(%d) lock %p fattened by wait()", self->threadId, &obj->lock);
1090 mon = LW_MONITOR(obj->lock);
1091 waitMonitor(self, mon, msec, nsec, interruptShouldThrow);
1097 void dvmObjectNotify(Thread* self, Object *obj)
1099 u4 thin = *(volatile u4 *)&obj->lock;
1101 /* If the lock is still thin, there aren't any waiters;
1102 * waiting on an object forces lock fattening.
1104 if (LW_SHAPE(thin) == LW_SHAPE_THIN) {
1105 /* Make sure that 'self' holds the lock.
1107 if (LW_LOCK_OWNER(thin) != self->threadId) {
1108 dvmThrowIllegalMonitorStateException(
1109 "object not locked by thread before notify()");
1113 /* no-op; there are no waiters to notify.
1118 notifyMonitor(self, LW_MONITOR(thin));
1123 * Object.notifyAll().
1125 void dvmObjectNotifyAll(Thread* self, Object *obj)
1127 u4 thin = *(volatile u4 *)&obj->lock;
1129 /* If the lock is still thin, there aren't any waiters;
1130 * waiting on an object forces lock fattening.
1132 if (LW_SHAPE(thin) == LW_SHAPE_THIN) {
1133 /* Make sure that 'self' holds the lock.
1135 if (LW_LOCK_OWNER(thin) != self->threadId) {
1136 dvmThrowIllegalMonitorStateException(
1137 "object not locked by thread before notifyAll()");
1141 /* no-op; there are no waiters to notify.
1146 notifyAllMonitor(self, LW_MONITOR(thin));
1151 * This implements java.lang.Thread.sleep(long msec, int nsec).
1153 * The sleep is interruptible by other threads, which means we can't just
1154 * plop into an OS sleep call. (We probably could if we wanted to send
1155 * signals around and rely on EINTR, but that's inefficient and relies
1156 * on native code respecting our signal mask.)
1158 * We have to do all of this stuff for Object.wait() as well, so it's
1159 * easiest to just sleep on a private Monitor.
1161 * It appears that we want sleep(0,0) to go through the motions of sleeping
1162 * for a very short duration, rather than just returning.
1164 void dvmThreadSleep(u8 msec, u4 nsec)
1166 Thread* self = dvmThreadSelf();
1167 Monitor* mon = gDvm.threadSleepMon;
1169 /* sleep(0,0) wakes up immediately, wait(0,0) means wait forever; adjust */
1170 if (msec == 0 && nsec == 0)
1173 lockMonitor(self, mon);
1174 waitMonitor(self, mon, msec, nsec, true);
1175 unlockMonitor(self, mon);
1179 * Implement java.lang.Thread.interrupt().
1181 void dvmThreadInterrupt(Thread* thread)
1183 assert(thread != NULL);
1185 dvmLockMutex(&thread->waitMutex);
1188 * If the interrupted flag is already set no additional action is
1191 if (thread->interrupted == true) {
1192 dvmUnlockMutex(&thread->waitMutex);
1197 * Raise the "interrupted" flag. This will cause it to bail early out
1198 * of the next wait() attempt, if it's not currently waiting on
1201 thread->interrupted = true;
1204 * Is the thread waiting?
1206 * Note that fat vs. thin doesn't matter here; waitMonitor
1207 * is only set when a thread actually waits on a monitor,
1208 * which implies that the monitor has already been fattened.
1210 if (thread->waitMonitor != NULL) {
1211 pthread_cond_signal(&thread->waitCond);
1214 dvmUnlockMutex(&thread->waitMutex);
1217 #ifndef WITH_COPYING_GC
1218 u4 dvmIdentityHashCode(Object *obj)
1224 * Returns the identity hash code of the given object.
1226 u4 dvmIdentityHashCode(Object *obj)
1228 Thread *self, *thread;
1231 u4 lock, owner, hashState;
1235 * Null is defined to have an identity hash code of 0.
1241 hashState = LW_HASH_STATE(*lw);
1242 if (hashState == LW_HASH_STATE_HASHED) {
1244 * The object has been hashed but has not had its hash code
1245 * relocated by the garbage collector. Use the raw object
1248 return (u4)obj >> 3;
1249 } else if (hashState == LW_HASH_STATE_HASHED_AND_MOVED) {
1251 * The object has been hashed and its hash code has been
1252 * relocated by the collector. Use the value of the naturally
1253 * aligned word following the instance data.
1255 assert(!dvmIsClassObject(obj));
1256 if (IS_CLASS_FLAG_SET(obj->clazz, CLASS_ISARRAY)) {
1257 size = dvmArrayObjectSize((ArrayObject *)obj);
1258 size = (size + 2) & ~2;
1260 size = obj->clazz->objectSize;
1262 return *(u4 *)(((char *)obj) + size);
1263 } else if (hashState == LW_HASH_STATE_UNHASHED) {
1265 * The object has never been hashed. Change the hash state to
1266 * hashed and use the raw object address.
1268 self = dvmThreadSelf();
1269 if (self->threadId == lockOwner(obj)) {
1271 * We already own the lock so we can update the hash state
1274 *lw |= (LW_HASH_STATE_HASHED << LW_HASH_STATE_SHIFT);
1275 return (u4)obj >> 3;
1278 * We do not own the lock. Try acquiring the lock. Should
1279 * this fail, we must suspend the owning thread.
1281 if (LW_SHAPE(*lw) == LW_SHAPE_THIN) {
1283 * If the lock is thin assume it is unowned. We simulate
1284 * an acquire, update, and release with a single CAS.
1286 lock = (LW_HASH_STATE_HASHED << LW_HASH_STATE_SHIFT);
1287 if (android_atomic_acquire_cas(
1290 (int32_t *)lw) == 0) {
1292 * A new lockword has been installed with a hash state
1293 * of hashed. Use the raw object address.
1295 return (u4)obj >> 3;
1298 if (tryLockMonitor(self, LW_MONITOR(*lw))) {
1300 * The monitor lock has been acquired. Change the
1301 * hash state to hashed and use the raw object
1304 *lw |= (LW_HASH_STATE_HASHED << LW_HASH_STATE_SHIFT);
1305 unlockMonitor(self, LW_MONITOR(*lw));
1306 return (u4)obj >> 3;
1310 * At this point we have failed to acquire the lock. We must
1311 * identify the owning thread and suspend it.
1313 dvmLockThreadList(self);
1315 * Cache the lock word as its value can change between
1316 * determining its shape and retrieving its owner.
1319 if (LW_SHAPE(lock) == LW_SHAPE_THIN) {
1321 * Find the thread with the corresponding thread id.
1323 owner = LW_LOCK_OWNER(lock);
1324 assert(owner != self->threadId);
1326 * If the lock has no owner do not bother scanning the
1327 * thread list and fall through to the failure handler.
1329 thread = owner ? gDvm.threadList : NULL;
1330 while (thread != NULL) {
1331 if (thread->threadId == owner) {
1334 thread = thread->next;
1337 thread = LW_MONITOR(lock)->owner;
1340 * If thread is NULL the object has been released since the
1341 * thread list lock was acquired. Try again.
1343 if (thread == NULL) {
1344 dvmUnlockThreadList();
1348 * Wait for the owning thread to suspend.
1350 dvmSuspendThread(thread);
1351 if (dvmHoldsLock(thread, obj)) {
1353 * The owning thread has been suspended. We can safely
1354 * change the hash state to hashed.
1356 *lw |= (LW_HASH_STATE_HASHED << LW_HASH_STATE_SHIFT);
1357 dvmResumeThread(thread);
1358 dvmUnlockThreadList();
1359 return (u4)obj >> 3;
1362 * The wrong thread has been suspended. Try again.
1364 dvmResumeThread(thread);
1365 dvmUnlockThreadList();
1368 ALOGE("object %p has an unknown hash state %#x", obj, hashState);
1369 dvmDumpThread(dvmThreadSelf(), false);
1371 return 0; /* Quiet the compiler. */
1373 #endif /* WITH_COPYING_GC */