2 * Copyright (C) 2008 The Android Open Source Project
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
18 * Fundamental synchronization mechanisms.
20 * The top part of the file has operations on "monitor" structs; the
21 * next part has the native calls on objects.
23 * The current implementation uses "thin locking" to avoid allocating
24 * an Object's full Monitor struct until absolutely necessary (i.e.,
25 * during contention or a call to wait()).
27 * TODO: make improvements to thin locking
28 * We may be able to improve performance and reduce memory requirements by:
29 * - reverting to a thin lock once the Monitor is no longer necessary
30 * - using a pool of monitor objects, with some sort of recycling scheme
32 * TODO: recycle native-level monitors when objects are garbage collected.
46 #ifdef WITH_DEADLOCK_PREDICTION /* fwd */
47 static const char* kStartBanner =
48 "<-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#";
49 static const char* kEndBanner =
50 "#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#->";
53 * Unsorted, expanding list of objects.
55 * This is very similar to PointerSet (which came into existence after this),
56 * but these are unsorted, uniqueness is not enforced by the "add" function,
57 * and the base object isn't allocated on the heap.
59 typedef struct ExpandingObjectList {
63 } ExpandingObjectList;
66 static void updateDeadlockPrediction(Thread* self, Object* obj);
67 static void removeCollectedObject(Object* obj);
68 static void expandObjClear(ExpandingObjectList* pList);
72 * Every Object has a monitor associated with it, but not every Object is
73 * actually locked. Even the ones that are locked do not need a
74 * full-fledged monitor until a) there is actual contention or b) wait()
75 * is called on the Object.
77 * For Dalvik, we have implemented a scheme similar to the one described
78 * in Bacon et al.'s "Thin locks: featherweight synchronization for Java"
79 * (ACM 1998). Things are even easier for us, though, because we have
80 * a full 32 bits to work with.
82 * The two states of an Object's lock are referred to as "thin" and
83 * "fat". A lock may transition from the "thin" state to the "fat"
84 * state and this transition is referred to as inflation. Once a lock
85 * has been inflated it remains in the "fat" state indefinitely.
87 * The lock value itself is stored in Object.lock. The LSB of the
88 * lock encodes its state. When cleared, the lock is in the "thin"
89 * state and its bits are formatted as follows:
91 * [31 ---- 19] [18 ---- 3] [2 ---- 1] [0]
92 * lock count thread id hash state 0
94 * When set, the lock is in the "fat" state and its bits are formatted
97 * [31 ---- 3] [2 ---- 1] [0]
98 * pointer hash state 1
100 * For an in-depth description of the mechanics of thin-vs-fat locking,
101 * read the paper referred to above.
106 * - mutually exclusive access to resources
107 * - a way for multiple threads to wait for notification
109 * In effect, they fill the role of both mutexes and condition variables.
111 * Only one thread can own the monitor at any time. There may be several
112 * threads waiting on it (the wait call unlocks it). One or more waiting
113 * threads may be getting interrupted or notified at any given time.
116 Thread* owner; /* which thread currently owns the lock? */
117 int lockCount; /* owner's recursive lock depth */
118 Object* obj; /* what object are we part of [debug only] */
120 Thread* waitSet; /* threads currently waiting on this monitor */
122 pthread_mutex_t lock;
126 #ifdef WITH_DEADLOCK_PREDICTION
128 * Objects that have been locked immediately after this one in the
129 * past. We use an expanding flat array, allocated on first use, to
130 * minimize allocations. Deletions from the list, expected to be
131 * infrequent, are crunched down.
133 ExpandingObjectList historyChildren;
136 * We also track parents. This isn't strictly necessary, but it makes
137 * the cleanup at GC time significantly faster.
139 ExpandingObjectList historyParents;
141 /* used during cycle detection */
144 /* stack trace, established the first time we locked the object */
145 int historyStackDepth;
146 int* historyRawStackTrace;
152 * Create and initialize a monitor.
154 Monitor* dvmCreateMonitor(Object* obj)
158 mon = (Monitor*) calloc(1, sizeof(Monitor));
160 LOGE("Unable to allocate monitor\n");
163 if (((u4)mon & 7) != 0) {
164 LOGE("Misaligned monitor: %p\n", mon);
168 dvmInitMutex(&mon->lock);
170 /* replace the head of the list with the new monitor */
172 mon->next = gDvm.monitorList;
173 } while (!ATOMIC_CMP_SWAP((int32_t*)(void*)&gDvm.monitorList,
174 (int32_t)mon->next, (int32_t)mon));
180 * Free the monitor list. Only used when shutting the VM down.
182 void dvmFreeMonitorList(void)
187 mon = gDvm.monitorList;
188 while (mon != NULL) {
191 #ifdef WITH_DEADLOCK_PREDICTION
192 expandObjClear(&mon->historyChildren);
193 expandObjClear(&mon->historyParents);
194 free(mon->historyRawStackTrace);
202 * Log some info about our monitors.
204 void dvmDumpMonitorInfo(const char* msg)
206 #if QUIET_ZYGOTE_MONITOR
215 totalCount = liveCount = 0;
216 Monitor* mon = gDvm.monitorList;
217 while (mon != NULL) {
219 if (mon->obj != NULL)
224 LOGD("%s: monitor list has %d entries (%d live)\n",
225 msg, totalCount, liveCount);
229 * Get the object that a monitor is part of.
231 Object* dvmGetMonitorObject(Monitor* mon)
240 * Returns the thread id of the thread owning the given lock.
242 static u4 lockOwner(Object* obj)
249 * Since we're reading the lock value multiple times, latch it so
250 * that it doesn't change out from under us if we get preempted.
253 if (LW_SHAPE(lock) == LW_SHAPE_THIN) {
254 return LW_LOCK_OWNER(lock);
256 owner = LW_MONITOR(lock)->owner;
257 return owner ? owner->threadId : 0;
262 * Get the thread that holds the lock on the specified object. The
263 * object may be unlocked, thin-locked, or fat-locked.
265 * The caller must lock the thread list before calling here.
267 Thread* dvmGetObjectLockHolder(Object* obj)
269 u4 threadId = lockOwner(obj);
273 return dvmGetThreadByThreadId(threadId);
277 * Checks whether the given thread holds the given
280 bool dvmHoldsLock(Thread* thread, Object* obj)
282 if (thread == NULL || obj == NULL) {
285 return thread->threadId == lockOwner(obj);
290 * Free the monitor associated with an object and make the object's lock
291 * thin again. This is called during garbage collection.
293 static void freeObjectMonitor(Object* obj)
297 assert(LW_SHAPE(obj->lock) == LW_SHAPE_FAT);
299 #ifdef WITH_DEADLOCK_PREDICTION
300 if (gDvm.deadlockPredictMode != kDPOff)
301 removeCollectedObject(obj);
304 mon = LW_MONITOR(obj->lock);
305 obj->lock = DVM_LOCK_INITIAL_THIN_VALUE;
307 /* This lock is associated with an object
308 * that's being swept. The only possible way
309 * anyone could be holding this lock would be
310 * if some JNI code locked but didn't unlock
311 * the object, in which case we've got some bad
312 * native code somewhere.
314 assert(pthread_mutex_trylock(&mon->lock) == 0);
315 pthread_mutex_destroy(&mon->lock);
316 #ifdef WITH_DEADLOCK_PREDICTION
317 expandObjClear(&mon->historyChildren);
318 expandObjClear(&mon->historyParents);
319 free(mon->historyRawStackTrace);
325 * Frees monitor objects belonging to unmarked objects.
327 void dvmSweepMonitorList(Monitor** mon, int (*isUnmarkedObject)(void*))
330 Monitor *prev, *curr;
334 assert(*mon != NULL);
335 assert(isUnmarkedObject != NULL);
337 prev->next = curr = *mon;
338 while (curr != NULL) {
340 if (obj != NULL && (*isUnmarkedObject)(obj) != 0) {
341 prev->next = curr = curr->next;
342 freeObjectMonitor(obj);
351 static char *logWriteInt(char *dst, int value)
353 *dst++ = EVENT_TYPE_INT;
354 set4LE((u1 *)dst, value);
358 static char *logWriteString(char *dst, const char *value, size_t len)
360 *dst++ = EVENT_TYPE_STRING;
361 set4LE((u1 *)dst, len);
363 len = len < 32 ? len : 32;
364 memcpy(dst, value, len);
368 #define EVENT_LOG_TAG_dvm_lock_contention 20003
370 static void logContentionEvent(Thread *self, u4 waitMs, u4 samplePercent)
372 const StackSaveArea *saveArea;
375 char eventBuffer[131];
376 const char *fileName;
377 char procName[32], *selfName, *ownerName;
381 saveArea = SAVEAREA_FROM_FP(self->curFrame);
382 meth = saveArea->method;
385 /* Emit the event list length, 1 byte. */
388 /* Emit the process name, <= 37 bytes. */
389 fd = open("/proc/self/cmdline", O_RDONLY);
390 len = read(fd, procName, sizeof(procName));
392 cp = logWriteString(cp, procName, (len > 0 ? len : 0));
394 /* Emit the main thread status, 5 bytes. */
395 bool isMainThread = (self->systemTid == getpid());
396 cp = logWriteInt(cp, isMainThread);
398 /* Emit self thread name string, <= 37 bytes. */
399 selfName = dvmGetThreadName(self);
400 cp = logWriteString(cp, selfName, strlen(selfName));
403 /* Emit the wait time, 5 bytes. */
404 cp = logWriteInt(cp, waitMs);
406 /* Emit the source code file name, <= 37 bytes. */
407 fileName = dvmGetMethodSourceFile(meth);
408 if (fileName == NULL) fileName = "";
409 cp = logWriteString(cp, fileName, strlen(fileName));
411 /* Emit the source code line number, 5 bytes. */
412 relativePc = saveArea->xtra.currentPc - saveArea->method->insns;
413 cp = logWriteInt(cp, dvmLineNumFromPC(meth, relativePc));
415 /* Emit the sample percentage, 5 bytes. */
416 cp = logWriteInt(cp, samplePercent);
418 assert((size_t)(cp - eventBuffer) <= sizeof(eventBuffer));
419 android_btWriteLog(EVENT_LOG_TAG_dvm_lock_contention,
422 (size_t)(cp - eventBuffer));
428 static void lockMonitor(Thread* self, Monitor* mon)
431 ThreadStatus oldStatus;
432 u4 waitThreshold, samplePercent;
433 u8 waitStart, waitEnd, waitMs;
435 if (mon->owner == self) {
439 if (pthread_mutex_trylock(&mon->lock) != 0) {
440 oldStatus = dvmChangeStatus(self, THREAD_MONITOR);
441 waitThreshold = gDvm.lockProfThreshold;
443 waitStart = dvmGetRelativeTimeUsec();
445 dvmLockMutex(&mon->lock);
447 waitEnd = dvmGetRelativeTimeUsec();
449 dvmChangeStatus(self, oldStatus);
451 waitMs = (waitEnd - waitStart) / 1000;
452 if (waitMs >= waitThreshold) {
455 samplePercent = 1 + 100 * waitMs / gDvm.lockProfSample;
457 if ((u4)rand() % 100 < samplePercent) {
458 logContentionEvent(self, waitMs, samplePercent);
463 assert(mon->lockCount == 0);
467 * Try to lock a monitor.
469 * Returns "true" on success.
471 static bool tryLockMonitor(Thread* self, Monitor* mon)
475 if (mon->owner == self) {
479 cc = pthread_mutex_trylock(&mon->lock);
482 assert(mon->lockCount == 0);
494 * Returns true if the unlock succeeded.
495 * If the unlock failed, an exception will be pending.
497 static bool unlockMonitor(Thread* self, Monitor* mon)
499 assert(self != NULL);
500 assert(mon != NULL); // can this happen?
502 if (mon->owner == self) {
504 * We own the monitor, so nobody else can be in here.
506 if (mon->lockCount == 0) {
509 cc = pthread_mutex_unlock(&mon->lock);
516 * We don't own this, so we're not allowed to unlock it.
517 * The JNI spec says that we should throw IllegalMonitorStateException
520 dvmThrowExceptionFmt("Ljava/lang/IllegalMonitorStateException;",
521 "unlock of unowned monitor, self=%d owner=%d",
523 mon->owner ? mon->owner->threadId : 0);
530 * Checks the wait set for circular structure. Returns 0 if the list
531 * is not circular. Otherwise, returns 1. Used only by asserts.
533 static int waitSetCheck(Monitor *mon)
539 fast = slow = mon->waitSet;
542 if (fast == NULL) return 0;
543 if (fast->waitNext == NULL) return 0;
544 if (fast == slow && n > 0) return 1;
546 fast = fast->waitNext->waitNext;
547 slow = slow->waitNext;
552 * Links a thread into a monitor's wait set. The monitor lock must be
553 * held by the caller of this routine.
555 static void waitSetAppend(Monitor *mon, Thread *thread)
560 assert(mon->owner == dvmThreadSelf());
561 assert(thread != NULL);
562 assert(thread->waitNext == NULL);
563 assert(waitSetCheck(mon) == 0);
564 if (mon->waitSet == NULL) {
565 mon->waitSet = thread;
569 while (elt->waitNext != NULL) {
572 elt->waitNext = thread;
576 * Unlinks a thread from a monitor's wait set. The monitor lock must
577 * be held by the caller of this routine.
579 static void waitSetRemove(Monitor *mon, Thread *thread)
584 assert(mon->owner == dvmThreadSelf());
585 assert(thread != NULL);
586 assert(waitSetCheck(mon) == 0);
587 if (mon->waitSet == NULL) {
590 if (mon->waitSet == thread) {
591 mon->waitSet = thread->waitNext;
592 thread->waitNext = NULL;
596 while (elt->waitNext != NULL) {
597 if (elt->waitNext == thread) {
598 elt->waitNext = thread->waitNext;
599 thread->waitNext = NULL;
607 * Converts the given relative waiting time into an absolute time.
609 void absoluteTime(s8 msec, s4 nsec, struct timespec *ts)
613 #ifdef HAVE_TIMEDWAIT_MONOTONIC
614 clock_gettime(CLOCK_MONOTONIC, ts);
618 gettimeofday(&tv, NULL);
619 ts->tv_sec = tv.tv_sec;
620 ts->tv_nsec = tv.tv_usec * 1000;
623 endSec = ts->tv_sec + msec / 1000;
624 if (endSec >= 0x7fffffff) {
625 LOGV("NOTE: end time exceeds epoch\n");
629 ts->tv_nsec = (ts->tv_nsec + (msec % 1000) * 1000000) + nsec;
632 if (ts->tv_nsec >= 1000000000L) {
634 ts->tv_nsec -= 1000000000L;
638 int dvmRelativeCondWait(pthread_cond_t* cond, pthread_mutex_t* mutex,
643 absoluteTime(msec, nsec, &ts);
644 #if defined(HAVE_TIMEDWAIT_MONOTONIC)
645 ret = pthread_cond_timedwait_monotonic(cond, mutex, &ts);
647 ret = pthread_cond_timedwait(cond, mutex, &ts);
649 assert(ret == 0 || ret == ETIMEDOUT);
654 * Wait on a monitor until timeout, interrupt, or notification. Used for
655 * Object.wait() and (somewhat indirectly) Thread.sleep() and Thread.join().
657 * If another thread calls Thread.interrupt(), we throw InterruptedException
658 * and return immediately if one of the following are true:
659 * - blocked in wait(), wait(long), or wait(long, int) methods of Object
660 * - blocked in join(), join(long), or join(long, int) methods of Thread
661 * - blocked in sleep(long), or sleep(long, int) methods of Thread
662 * Otherwise, we set the "interrupted" flag.
664 * Checks to make sure that "nsec" is in the range 0-999999
665 * (i.e. fractions of a millisecond) and throws the appropriate
666 * exception if it isn't.
668 * The spec allows "spurious wakeups", and recommends that all code using
669 * Object.wait() do so in a loop. This appears to derive from concerns
670 * about pthread_cond_wait() on multiprocessor systems. Some commentary
671 * on the web casts doubt on whether these can/should occur.
673 * Since we're allowed to wake up "early", we clamp extremely long durations
674 * to return at the end of the 32-bit time epoch.
676 static void waitMonitor(Thread* self, Monitor* mon, s8 msec, s4 nsec,
677 bool interruptShouldThrow)
680 bool wasInterrupted = false;
684 assert(self != NULL);
687 /* Make sure that we hold the lock. */
688 if (mon->owner != self) {
689 dvmThrowException("Ljava/lang/IllegalMonitorStateException;",
690 "object not locked by thread before wait()");
695 * Enforce the timeout range.
697 if (msec < 0 || nsec < 0 || nsec > 999999) {
698 dvmThrowException("Ljava/lang/IllegalArgumentException;",
699 "timeout arguments out of range");
704 * Compute absolute wakeup time, if necessary.
706 if (msec == 0 && nsec == 0) {
709 absoluteTime(msec, nsec, &ts);
714 * Add ourselves to the set of threads waiting on this monitor, and
715 * release our hold. We need to let it go even if we're a few levels
716 * deep in a recursive lock, and we need to restore that later.
718 * We append to the wait set ahead of clearing the count and owner
719 * fields so the subroutine can check that the calling thread owns
720 * the monitor. Aside from that, the order of member updates is
721 * not order sensitive as we hold the pthread mutex.
723 waitSetAppend(mon, self);
724 int prevLockCount = mon->lockCount;
729 * Update thread status. If the GC wakes up, it'll ignore us, knowing
730 * that we won't touch any references in this state, and we'll check
731 * our suspend mode before we transition out.
734 dvmChangeStatus(self, THREAD_TIMED_WAIT);
736 dvmChangeStatus(self, THREAD_WAIT);
738 ret = pthread_mutex_lock(&self->waitMutex);
742 * Set waitMonitor to the monitor object we will be waiting on.
743 * When waitMonitor is non-NULL a notifying or interrupting thread
744 * must signal the thread's waitCond to wake it up.
746 assert(self->waitMonitor == NULL);
747 self->waitMonitor = mon;
750 * Handle the case where the thread was interrupted before we called
753 if (self->interrupted) {
754 wasInterrupted = true;
755 self->waitMonitor = NULL;
756 pthread_mutex_unlock(&self->waitMutex);
761 * Release the monitor lock and wait for a notification or
762 * a timeout to occur.
764 pthread_mutex_unlock(&mon->lock);
767 ret = pthread_cond_wait(&self->waitCond, &self->waitMutex);
770 #ifdef HAVE_TIMEDWAIT_MONOTONIC
771 ret = pthread_cond_timedwait_monotonic(&self->waitCond, &self->waitMutex, &ts);
773 ret = pthread_cond_timedwait(&self->waitCond, &self->waitMutex, &ts);
775 assert(ret == 0 || ret == ETIMEDOUT);
777 if (self->interrupted) {
778 wasInterrupted = true;
781 self->interrupted = false;
782 self->waitMonitor = NULL;
784 pthread_mutex_unlock(&self->waitMutex);
786 /* Reacquire the monitor lock. */
787 lockMonitor(self, mon);
791 * We remove our thread from wait set after restoring the count
792 * and owner fields so the subroutine can check that the calling
793 * thread owns the monitor. Aside from that, the order of member
794 * updates is not order sensitive as we hold the pthread mutex.
797 mon->lockCount = prevLockCount;
798 waitSetRemove(mon, self);
800 /* set self->status back to THREAD_RUNNING, and self-suspend if needed */
801 dvmChangeStatus(self, THREAD_RUNNING);
803 if (wasInterrupted) {
805 * We were interrupted while waiting, or somebody interrupted an
806 * un-interruptible thread earlier and we're bailing out immediately.
808 * The doc sayeth: "The interrupted status of the current thread is
809 * cleared when this exception is thrown."
811 self->interrupted = false;
812 if (interruptShouldThrow)
813 dvmThrowException("Ljava/lang/InterruptedException;", NULL);
818 * Notify one thread waiting on this monitor.
820 static void notifyMonitor(Thread* self, Monitor* mon)
824 assert(self != NULL);
827 /* Make sure that we hold the lock. */
828 if (mon->owner != self) {
829 dvmThrowException("Ljava/lang/IllegalMonitorStateException;",
830 "object not locked by thread before notify()");
833 /* Signal the first waiting thread in the wait set. */
834 while (mon->waitSet != NULL) {
835 thread = mon->waitSet;
836 mon->waitSet = thread->waitNext;
837 thread->waitNext = NULL;
838 pthread_mutex_lock(&thread->waitMutex);
839 /* Check to see if the thread is still waiting. */
840 if (thread->waitMonitor != NULL) {
841 pthread_cond_signal(&thread->waitCond);
842 pthread_mutex_unlock(&thread->waitMutex);
845 pthread_mutex_unlock(&thread->waitMutex);
850 * Notify all threads waiting on this monitor.
852 static void notifyAllMonitor(Thread* self, Monitor* mon)
856 assert(self != NULL);
859 /* Make sure that we hold the lock. */
860 if (mon->owner != self) {
861 dvmThrowException("Ljava/lang/IllegalMonitorStateException;",
862 "object not locked by thread before notifyAll()");
865 /* Signal all threads in the wait set. */
866 while (mon->waitSet != NULL) {
867 thread = mon->waitSet;
868 mon->waitSet = thread->waitNext;
869 thread->waitNext = NULL;
870 pthread_mutex_lock(&thread->waitMutex);
871 /* Check to see if the thread is still waiting. */
872 if (thread->waitMonitor != NULL) {
873 pthread_cond_signal(&thread->waitCond);
875 pthread_mutex_unlock(&thread->waitMutex);
880 * Implements monitorenter for "synchronized" stuff.
882 * This does not fail or throw an exception (unless deadlock prediction
883 * is enabled and set to "err" mode).
885 void dvmLockObject(Thread* self, Object *obj)
889 ThreadStatus oldStatus;
890 useconds_t sleepDelay;
891 const useconds_t maxSleepDelay = 1 << 20;
892 u4 thin, newThin, threadId;
894 assert(self != NULL);
896 threadId = self->threadId;
900 if (LW_SHAPE(thin) == LW_SHAPE_THIN) {
902 * The lock is a thin lock. The owner field is used to
903 * determine the acquire method, ordered by cost.
905 if (LW_LOCK_OWNER(thin) == threadId) {
907 * The calling thread owns the lock. Increment the
908 * value of the recursion count field.
910 obj->lock += 1 << LW_LOCK_COUNT_SHIFT;
911 } else if (LW_LOCK_OWNER(thin) == 0) {
913 * The lock is unowned. Install the thread id of the
914 * calling thread into the owner field. This is the
915 * common case. In performance critical code the JIT
916 * will have tried this before calling out to the VM.
918 newThin = thin | (threadId << LW_LOCK_OWNER_SHIFT);
919 if (!ATOMIC_CMP_SWAP((int32_t *)thinp, thin, newThin)) {
921 * The acquire failed. Try again.
926 LOG_THIN("(%d) spin on lock %p: %#x (%#x) %#x",
927 threadId, &obj->lock, 0, *thinp, thin);
929 * The lock is owned by another thread. Notify the VM
930 * that we are about to wait.
932 oldStatus = dvmChangeStatus(self, THREAD_MONITOR);
934 * Spin until the thin lock is released or inflated.
940 * Check the shape of the lock word. Another thread
941 * may have inflated the lock while we were waiting.
943 if (LW_SHAPE(thin) == LW_SHAPE_THIN) {
944 if (LW_LOCK_OWNER(thin) == 0) {
946 * The lock has been released. Install the
947 * thread id of the calling thread into the
950 newThin = thin | (threadId << LW_LOCK_OWNER_SHIFT);
951 if (ATOMIC_CMP_SWAP((int32_t *)thinp,
954 * The acquire succeed. Break out of the
955 * loop and proceed to inflate the lock.
961 * The lock has not been released. Yield so
962 * the owning thread can run.
964 if (sleepDelay == 0) {
969 if (sleepDelay < maxSleepDelay / 2) {
976 * The thin lock was inflated by another thread.
977 * Let the VM know we are no longer waiting and
980 LOG_THIN("(%d) lock %p surprise-fattened",
981 threadId, &obj->lock);
982 dvmChangeStatus(self, oldStatus);
986 LOG_THIN("(%d) spin on lock done %p: %#x (%#x) %#x",
987 threadId, &obj->lock, 0, *thinp, thin);
989 * We have acquired the thin lock. Let the VM know that
990 * we are no longer waiting.
992 dvmChangeStatus(self, oldStatus);
996 mon = dvmCreateMonitor(obj);
997 lockMonitor(self, mon);
999 thin &= LW_HASH_STATE_MASK << LW_HASH_STATE_SHIFT;
1000 thin |= (u4)mon | LW_SHAPE_FAT;
1003 LOG_THIN("(%d) lock %p fattened", threadId, &obj->lock);
1007 * The lock is a fat lock.
1009 assert(LW_MONITOR(obj->lock) != NULL);
1010 lockMonitor(self, LW_MONITOR(obj->lock));
1012 #ifdef WITH_DEADLOCK_PREDICTION
1014 * See if we were allowed to grab the lock at this time. We do it
1015 * *after* acquiring the lock, rather than before, so that we can
1016 * freely update the Monitor struct. This seems counter-intuitive,
1017 * but our goal is deadlock *prediction* not deadlock *prevention*.
1018 * (If we actually deadlock, the situation is easy to diagnose from
1019 * a thread dump, so there's no point making a special effort to do
1020 * the checks before the lock is held.)
1022 * This needs to happen before we add the object to the thread's
1023 * monitor list, so we can tell the difference between first-lock and
1026 * It's also important that we do this while in THREAD_RUNNING, so
1027 * that we don't interfere with cleanup operations in the GC.
1029 if (gDvm.deadlockPredictMode != kDPOff) {
1030 if (self->status != THREAD_RUNNING) {
1031 LOGE("Bad thread status (%d) in DP\n", self->status);
1032 dvmDumpThread(self, false);
1035 assert(!dvmCheckException(self));
1036 updateDeadlockPrediction(self, obj);
1037 if (dvmCheckException(self)) {
1039 * If we're throwing an exception here, we need to free the
1040 * lock. We add the object to the thread's monitor list so the
1041 * "unlock" code can remove it.
1043 dvmAddToMonitorList(self, obj, false);
1044 dvmUnlockObject(self, obj);
1045 LOGV("--- unlocked, pending is '%s'\n",
1046 dvmGetException(self)->clazz->descriptor);
1051 * Add the locked object, and the current stack trace, to the list
1052 * held by the Thread object. If deadlock prediction isn't on,
1053 * don't capture the stack trace.
1055 dvmAddToMonitorList(self, obj, gDvm.deadlockPredictMode != kDPOff);
1056 #elif defined(WITH_MONITOR_TRACKING)
1058 * Add the locked object to the list held by the Thread object.
1060 dvmAddToMonitorList(self, obj, false);
1065 * Implements monitorexit for "synchronized" stuff.
1067 * On failure, throws an exception and returns "false".
1069 bool dvmUnlockObject(Thread* self, Object *obj)
1073 assert(self != NULL);
1074 assert(self->status == THREAD_RUNNING);
1075 assert(obj != NULL);
1077 * Cache the lock word as its value can change while we are
1078 * examining its state.
1081 if (LW_SHAPE(thin) == LW_SHAPE_THIN) {
1083 * The lock is thin. We must ensure that the lock is owned
1084 * by the given thread before unlocking it.
1086 if (LW_LOCK_OWNER(thin) == self->threadId) {
1088 * We are the lock owner. It is safe to update the lock
1089 * without CAS as lock ownership guards the lock itself.
1091 if (LW_LOCK_COUNT(thin) == 0) {
1093 * The lock was not recursively acquired, the common
1094 * case. Unlock by clearing all bits except for the
1097 obj->lock &= (LW_HASH_STATE_MASK << LW_HASH_STATE_SHIFT);
1100 * The object was recursively acquired. Decrement the
1101 * lock recursion count field.
1103 obj->lock -= 1 << LW_LOCK_COUNT_SHIFT;
1107 * We do not own the lock. The JVM spec requires that we
1108 * throw an exception in this case.
1110 dvmThrowException("Ljava/lang/IllegalMonitorStateException;",
1111 "unlock of unowned monitor");
1116 * The lock is fat. We must check to see if unlockMonitor has
1117 * raised any exceptions before continuing.
1119 assert(LW_MONITOR(obj->lock) != NULL);
1120 if (!unlockMonitor(self, LW_MONITOR(obj->lock))) {
1122 * An exception has been raised. Do not fall through.
1128 #ifdef WITH_MONITOR_TRACKING
1130 * Remove the object from the Thread's list.
1132 dvmRemoveFromMonitorList(self, obj);
1139 * Object.wait(). Also called for class init.
1141 void dvmObjectWait(Thread* self, Object *obj, s8 msec, s4 nsec,
1142 bool interruptShouldThrow)
1144 Monitor* mon = LW_MONITOR(obj->lock);
1146 u4 thin = obj->lock;
1148 /* If the lock is still thin, we need to fatten it.
1150 if (LW_SHAPE(thin) == LW_SHAPE_THIN) {
1151 /* Make sure that 'self' holds the lock.
1153 if (LW_LOCK_OWNER(thin) != self->threadId) {
1154 dvmThrowException("Ljava/lang/IllegalMonitorStateException;",
1155 "object not locked by thread before wait()");
1159 /* This thread holds the lock. We need to fatten the lock
1160 * so 'self' can block on it. Don't update the object lock
1161 * field yet, because 'self' needs to acquire the lock before
1162 * any other thread gets a chance.
1164 mon = dvmCreateMonitor(obj);
1166 /* 'self' has actually locked the object one or more times;
1167 * make sure that the monitor reflects this.
1169 lockMonitor(self, mon);
1170 mon->lockCount = LW_LOCK_COUNT(thin);
1171 LOG_THIN("(%d) lock 0x%08x fattened by wait() to count %d\n",
1172 self->threadId, (uint)&obj->lock, mon->lockCount);
1175 /* Make the monitor public now that it's in the right state.
1177 thin &= LW_HASH_STATE_MASK << LW_HASH_STATE_SHIFT;
1178 thin |= (u4)mon | LW_SHAPE_FAT;
1183 waitMonitor(self, mon, msec, nsec, interruptShouldThrow);
1189 void dvmObjectNotify(Thread* self, Object *obj)
1191 u4 thin = obj->lock;
1193 /* If the lock is still thin, there aren't any waiters;
1194 * waiting on an object forces lock fattening.
1196 if (LW_SHAPE(thin) == LW_SHAPE_THIN) {
1197 /* Make sure that 'self' holds the lock.
1199 if (LW_LOCK_OWNER(thin) != self->threadId) {
1200 dvmThrowException("Ljava/lang/IllegalMonitorStateException;",
1201 "object not locked by thread before notify()");
1205 /* no-op; there are no waiters to notify.
1210 notifyMonitor(self, LW_MONITOR(thin));
1215 * Object.notifyAll().
1217 void dvmObjectNotifyAll(Thread* self, Object *obj)
1219 u4 thin = obj->lock;
1221 /* If the lock is still thin, there aren't any waiters;
1222 * waiting on an object forces lock fattening.
1224 if (LW_SHAPE(thin) == LW_SHAPE_THIN) {
1225 /* Make sure that 'self' holds the lock.
1227 if (LW_LOCK_OWNER(thin) != self->threadId) {
1228 dvmThrowException("Ljava/lang/IllegalMonitorStateException;",
1229 "object not locked by thread before notifyAll()");
1233 /* no-op; there are no waiters to notify.
1238 notifyAllMonitor(self, LW_MONITOR(thin));
1243 * This implements java.lang.Thread.sleep(long msec, int nsec).
1245 * The sleep is interruptible by other threads, which means we can't just
1246 * plop into an OS sleep call. (We probably could if we wanted to send
1247 * signals around and rely on EINTR, but that's inefficient and relies
1248 * on native code respecting our signal mask.)
1250 * We have to do all of this stuff for Object.wait() as well, so it's
1251 * easiest to just sleep on a private Monitor.
1253 * It appears that we want sleep(0,0) to go through the motions of sleeping
1254 * for a very short duration, rather than just returning.
1256 void dvmThreadSleep(u8 msec, u4 nsec)
1258 Thread* self = dvmThreadSelf();
1259 Monitor* mon = gDvm.threadSleepMon;
1261 /* sleep(0,0) wakes up immediately, wait(0,0) means wait forever; adjust */
1262 if (msec == 0 && nsec == 0)
1265 lockMonitor(self, mon);
1266 waitMonitor(self, mon, msec, nsec, true);
1267 unlockMonitor(self, mon);
1271 * Implement java.lang.Thread.interrupt().
1273 void dvmThreadInterrupt(Thread* thread)
1275 assert(thread != NULL);
1277 pthread_mutex_lock(&thread->waitMutex);
1280 * If the interrupted flag is already set no additional action is
1283 if (thread->interrupted == true) {
1284 pthread_mutex_unlock(&thread->waitMutex);
1289 * Raise the "interrupted" flag. This will cause it to bail early out
1290 * of the next wait() attempt, if it's not currently waiting on
1293 thread->interrupted = true;
1297 * Is the thread waiting?
1299 * Note that fat vs. thin doesn't matter here; waitMonitor
1300 * is only set when a thread actually waits on a monitor,
1301 * which implies that the monitor has already been fattened.
1303 if (thread->waitMonitor != NULL) {
1304 pthread_cond_signal(&thread->waitCond);
1307 pthread_mutex_unlock(&thread->waitMutex);
1310 #ifndef WITH_COPYING_GC
1311 u4 dvmIdentityHashCode(Object *obj)
1316 static size_t arrayElementWidth(const ArrayObject *array)
1318 const char *descriptor;
1320 if (dvmIsObjectArray(array)) {
1321 return sizeof(Object *);
1323 descriptor = array->obj.clazz->descriptor;
1324 switch (descriptor[1]) {
1325 case 'B': return 1; /* byte */
1326 case 'C': return 2; /* char */
1327 case 'D': return 8; /* double */
1328 case 'F': return 4; /* float */
1329 case 'I': return 4; /* int */
1330 case 'J': return 8; /* long */
1331 case 'S': return 2; /* short */
1332 case 'Z': return 1; /* boolean */
1335 LOGE("object %p has an unhandled descriptor '%s'", array, descriptor);
1336 dvmDumpThread(dvmThreadSelf(), false);
1338 return 0; /* Quiet the compiler. */
1341 static size_t arrayObjectLength(const ArrayObject *array)
1345 length = offsetof(ArrayObject, contents);
1346 length += array->length * arrayElementWidth(array);
1351 * Returns the identity hash code of the given object.
1353 u4 dvmIdentityHashCode(Object *obj)
1355 Thread *self, *thread;
1358 u4 lock, owner, hashState;
1362 * Null is defined to have an identity hash code of 0.
1368 hashState = LW_HASH_STATE(*lw);
1369 if (hashState == LW_HASH_STATE_HASHED) {
1371 * The object has been hashed but has not had its hash code
1372 * relocated by the garbage collector. Use the raw object
1375 return (u4)obj >> 3;
1376 } else if (hashState == LW_HASH_STATE_HASHED_AND_MOVED) {
1378 * The object has been hashed and its hash code has been
1379 * relocated by the collector. Use the value of the naturally
1380 * aligned word following the instance data.
1382 if (IS_CLASS_FLAG_SET(obj->clazz, CLASS_ISARRAY)) {
1383 length = arrayObjectLength((ArrayObject *)obj);
1384 length = (length + 3) & ~3;
1386 length = obj->clazz->objectSize;
1388 return *(u4 *)(((char *)obj) + length);
1389 } else if (hashState == LW_HASH_STATE_UNHASHED) {
1391 * The object has never been hashed. Change the hash state to
1392 * hashed and use the raw object address.
1394 self = dvmThreadSelf();
1395 if (self->threadId == lockOwner(obj)) {
1397 * We already own the lock so we can update the hash state
1400 *lw |= (LW_HASH_STATE_HASHED << LW_HASH_STATE_SHIFT);
1401 return (u4)obj >> 3;
1404 * We do not own the lock. Try acquiring the lock. Should
1405 * this fail, we must suspend the owning thread.
1407 if (LW_SHAPE(*lw) == LW_SHAPE_THIN) {
1409 * If the lock is thin assume it is unowned. We simulate
1410 * an acquire, update, and release with a single CAS.
1412 lock = DVM_LOCK_INITIAL_THIN_VALUE;
1413 lock |= (LW_HASH_STATE_HASHED << LW_HASH_STATE_SHIFT);
1414 if (ATOMIC_CMP_SWAP((int32_t *)lw,
1415 (int32_t)DVM_LOCK_INITIAL_THIN_VALUE,
1418 * A new lockword has been installed with a hash state
1419 * of hashed. Use the raw object address.
1421 return (u4)obj >> 3;
1424 if (tryLockMonitor(self, LW_MONITOR(*lw))) {
1426 * The monitor lock has been acquired. Change the
1427 * hash state to hashed and use the raw object
1430 *lw |= (LW_HASH_STATE_HASHED << LW_HASH_STATE_SHIFT);
1431 unlockMonitor(self, LW_MONITOR(*lw));
1432 return (u4)obj >> 3;
1436 * At this point we have failed to acquire the lock. We must
1437 * identify the owning thread and suspend it.
1439 dvmLockThreadList(self);
1441 * Cache the lock word as its value can change between
1442 * determining its shape and retrieving its owner.
1445 if (LW_SHAPE(lock) == LW_SHAPE_THIN) {
1447 * Find the thread with the corresponding thread id.
1449 owner = LW_LOCK_OWNER(lock);
1450 assert(owner != self->threadId);
1452 * If the lock has no owner do not bother scanning the
1453 * thread list and fall through to the failure handler.
1455 thread = owner ? gDvm.threadList : NULL;
1456 while (thread != NULL) {
1457 if (thread->threadId == owner) {
1460 thread = thread->next;
1463 thread = LW_MONITOR(lock)->owner;
1466 * If thread is NULL the object has been released since the
1467 * thread list lock was acquired. Try again.
1469 if (thread == NULL) {
1470 dvmUnlockThreadList();
1474 * Wait for the owning thread to suspend.
1476 dvmSuspendThread(thread);
1477 if (dvmHoldsLock(thread, obj)) {
1479 * The owning thread has been suspended. We can safely
1480 * change the hash state to hashed.
1482 *lw |= (LW_HASH_STATE_HASHED << LW_HASH_STATE_SHIFT);
1483 dvmResumeThread(thread);
1484 dvmUnlockThreadList();
1485 return (u4)obj >> 3;
1488 * The wrong thread has been suspended. Try again.
1490 dvmResumeThread(thread);
1491 dvmUnlockThreadList();
1494 LOGE("object %p has an unknown hash state %#x", obj, hashState);
1495 dvmDumpThread(dvmThreadSelf(), false);
1497 return 0; /* Quiet the compiler. */
1499 #endif /* WITH_COPYING_GC */
1501 #ifdef WITH_DEADLOCK_PREDICTION
1503 * ===========================================================================
1504 * Deadlock prediction
1505 * ===========================================================================
1508 The idea is to predict the possibility of deadlock by recording the order
1509 in which monitors are acquired. If we see an attempt to acquire a lock
1510 out of order, we can identify the locks and offending code.
1512 To make this work, we need to keep track of the locks held by each thread,
1513 and create history trees for each lock. When a thread tries to acquire
1514 a new lock, we walk through the "history children" of the lock, looking
1515 for a match with locks the thread already holds. If we find a match,
1516 it means the thread has made a request that could result in a deadlock.
1518 To support recursive locks, we always allow re-locking a currently-held
1519 lock, and maintain a recursion depth count.
1521 An ASCII-art example, where letters represent Objects:
1531 The above is the tree we'd have after handling Object synchronization
1532 sequences "ABC", "AC", "AD". A has three children, {B, C, D}. C is also
1533 a child of B. (The lines represent pointers between parent and child.
1534 Every node can have multiple parents and multiple children.)
1536 If we hold AC, and want to lock B, we recursively search through B's
1537 children to see if A or C appears. It does, so we reject the attempt.
1538 (A straightforward way to implement it: add a link from C to B, then
1539 determine whether the graph starting at B contains a cycle.)
1541 If we hold AC and want to lock D, we would succeed, creating a new link
1544 The lock history and a stack trace is attached to the Object's Monitor
1545 struct, which means we need to fatten every Object we lock (thin locking
1546 is effectively disabled). If we don't need the stack trace we can
1547 avoid fattening the leaf nodes, only fattening objects that need to hold
1550 Updates to Monitor structs are only allowed for the thread that holds
1551 the Monitor, so we actually do most of our deadlock prediction work after
1552 the lock has been acquired.
1554 When an object with a monitor is GCed, we need to remove it from the
1555 history trees. There are two basic approaches:
1556 (1) For through the entire set of known monitors, search all child
1557 lists for the object in question. This is rather slow, resulting
1558 in GC passes that take upwards of 10 seconds to complete.
1559 (2) Maintain "parent" pointers in each node. Remove the entries as
1560 required. This requires additional storage and maintenance for
1561 every operation, but is significantly faster at GC time.
1562 For each GCed object, we merge all of the object's children into each of
1563 the object's parents.
1566 #if !defined(WITH_MONITOR_TRACKING)
1567 # error "WITH_DEADLOCK_PREDICTION requires WITH_MONITOR_TRACKING"
1571 * Clear out the contents of an ExpandingObjectList, freeing any
1572 * dynamic allocations.
1574 static void expandObjClear(ExpandingObjectList* pList)
1576 if (pList->list != NULL) {
1580 pList->alloc = pList->count = 0;
1584 * Get the number of objects currently stored in the list.
1586 static inline int expandBufGetCount(const ExpandingObjectList* pList)
1588 return pList->count;
1592 * Get the Nth entry from the list.
1594 static inline Object* expandBufGetEntry(const ExpandingObjectList* pList,
1597 return pList->list[i];
1601 * Add a new entry to the list.
1603 * We don't check for or try to enforce uniqueness. It's expected that
1604 * the higher-level code does this for us.
1606 static void expandObjAddEntry(ExpandingObjectList* pList, Object* obj)
1608 if (pList->count == pList->alloc) {
1609 /* time to expand */
1612 if (pList->alloc == 0)
1616 LOGVV("expanding %p to %d\n", pList, pList->alloc);
1617 newList = realloc(pList->list, pList->alloc * sizeof(Object*));
1618 if (newList == NULL) {
1619 LOGE("Failed expanding DP object list (alloc=%d)\n", pList->alloc);
1622 pList->list = newList;
1625 pList->list[pList->count++] = obj;
1629 * Returns "true" if the element was successfully removed.
1631 static bool expandObjRemoveEntry(ExpandingObjectList* pList, Object* obj)
1635 for (i = pList->count-1; i >= 0; i--) {
1636 if (pList->list[i] == obj)
1642 if (i != pList->count-1) {
1644 * The order of elements is not important, so we just copy the
1645 * last entry into the new slot.
1647 //memmove(&pList->list[i], &pList->list[i+1],
1648 // (pList->count-1 - i) * sizeof(pList->list[0]));
1649 pList->list[i] = pList->list[pList->count-1];
1653 pList->list[pList->count] = (Object*) 0xdecadead;
1658 * Returns "true" if "obj" appears in the list.
1660 static bool expandObjHas(const ExpandingObjectList* pList, Object* obj)
1664 for (i = 0; i < pList->count; i++) {
1665 if (pList->list[i] == obj)
1672 * Print the list contents to stdout. For debugging.
1674 static void expandObjDump(const ExpandingObjectList* pList)
1677 for (i = 0; i < pList->count; i++)
1678 printf(" %p", pList->list[i]);
1682 * Check for duplicate entries. Returns the index of the first instance
1683 * of the duplicated value, or -1 if no duplicates were found.
1685 static int expandObjCheckForDuplicates(const ExpandingObjectList* pList)
1688 for (i = 0; i < pList->count-1; i++) {
1689 for (j = i + 1; j < pList->count; j++) {
1690 if (pList->list[i] == pList->list[j]) {
1701 * Determine whether "child" appears in the list of objects associated
1702 * with the Monitor in "parent". If "parent" is a thin lock, we return
1703 * false immediately.
1705 static bool objectInChildList(const Object* parent, Object* child)
1707 u4 lock = parent->lock;
1708 if (!IS_LOCK_FAT(&lock)) {
1709 //LOGI("on thin\n");
1713 return expandObjHas(&LW_MONITOR(lock)->historyChildren, child);
1717 * Print the child list.
1719 static void dumpKids(Object* parent)
1721 Monitor* mon = LW_MONITOR(parent->lock);
1723 printf("Children of %p:", parent);
1724 expandObjDump(&mon->historyChildren);
1729 * Add "child" to the list of children in "parent", and add "parent" to
1730 * the list of parents in "child".
1732 static void linkParentToChild(Object* parent, Object* child)
1734 //assert(LW_MONITOR(parent->lock)->owner == dvmThreadSelf()); // !owned for merge
1735 assert(IS_LOCK_FAT(&parent->lock));
1736 assert(IS_LOCK_FAT(&child->lock));
1737 assert(parent != child);
1740 mon = LW_MONITOR(parent->lock);
1741 assert(!expandObjHas(&mon->historyChildren, child));
1742 expandObjAddEntry(&mon->historyChildren, child);
1744 mon = LW_MONITOR(child->lock);
1745 assert(!expandObjHas(&mon->historyParents, parent));
1746 expandObjAddEntry(&mon->historyParents, parent);
1751 * Remove "child" from the list of children in "parent".
1753 static void unlinkParentFromChild(Object* parent, Object* child)
1755 //assert(LW_MONITOR(parent->lock)->owner == dvmThreadSelf()); // !owned for GC
1756 assert(IS_LOCK_FAT(&parent->lock));
1757 assert(IS_LOCK_FAT(&child->lock));
1758 assert(parent != child);
1761 mon = LW_MONITOR(parent->lock);
1762 if (!expandObjRemoveEntry(&mon->historyChildren, child)) {
1763 LOGW("WARNING: child %p not found in parent %p\n", child, parent);
1765 assert(!expandObjHas(&mon->historyChildren, child));
1766 assert(expandObjCheckForDuplicates(&mon->historyChildren) < 0);
1768 mon = LW_MONITOR(child->lock);
1769 if (!expandObjRemoveEntry(&mon->historyParents, parent)) {
1770 LOGW("WARNING: parent %p not found in child %p\n", parent, child);
1772 assert(!expandObjHas(&mon->historyParents, parent));
1773 assert(expandObjCheckForDuplicates(&mon->historyParents) < 0);
1778 * Log the monitors held by the current thread. This is done as part of
1779 * flagging an error.
1781 static void logHeldMonitors(Thread* self)
1785 name = dvmGetThreadName(self);
1786 LOGW("Monitors currently held by thread (threadid=%d '%s')\n",
1787 self->threadId, name);
1788 LOGW("(most-recently-acquired on top):\n");
1791 LockedObjectData* lod = self->pLockedObjects;
1792 while (lod != NULL) {
1793 LOGW("--- object %p[%d] (%s)\n",
1794 lod->obj, lod->recursionCount, lod->obj->clazz->descriptor);
1795 dvmLogRawStackTrace(lod->rawStackTrace, lod->stackDepth);
1802 * Recursively traverse the object hierarchy starting at "obj". We mark
1803 * ourselves on entry and clear the mark on exit. If we ever encounter
1804 * a marked object, we have a cycle.
1806 * Returns "true" if all is well, "false" if we found a cycle.
1808 static bool traverseTree(Thread* self, const Object* obj)
1810 assert(IS_LOCK_FAT(&obj->lock));
1811 Monitor* mon = LW_MONITOR(obj->lock);
1814 * Have we been here before?
1816 if (mon->historyMark) {
1820 LOGW("%s\n", kStartBanner);
1821 LOGW("Illegal lock attempt:\n");
1822 LOGW("--- object %p (%s)\n", obj, obj->clazz->descriptor);
1824 rawStackTrace = dvmFillInStackTraceRaw(self, &stackDepth);
1825 dvmLogRawStackTrace(rawStackTrace, stackDepth);
1826 free(rawStackTrace);
1829 logHeldMonitors(self);
1832 LOGW("Earlier, the following lock order (from last to first) was\n");
1833 LOGW("established -- stack trace is from first successful lock):\n");
1836 mon->historyMark = true;
1839 * Examine the children. We do NOT hold these locks, so they might
1840 * very well transition from thin to fat or change ownership while
1843 * NOTE: we rely on the fact that they cannot revert from fat to thin
1844 * while we work. This is currently a safe assumption.
1846 * We can safely ignore thin-locked children, because by definition
1847 * they have no history and are leaf nodes. In the current
1848 * implementation we always fatten the locks to provide a place to
1849 * hang the stack trace.
1851 ExpandingObjectList* pList = &mon->historyChildren;
1853 for (i = expandBufGetCount(pList)-1; i >= 0; i--) {
1854 const Object* child = expandBufGetEntry(pList, i);
1855 u4 lock = child->lock;
1856 if (!IS_LOCK_FAT(&lock))
1858 if (!traverseTree(self, child)) {
1859 LOGW("--- object %p (%s)\n", obj, obj->clazz->descriptor);
1860 dvmLogRawStackTrace(mon->historyRawStackTrace,
1861 mon->historyStackDepth);
1862 mon->historyMark = false;
1867 mon->historyMark = false;
1873 * Update the deadlock prediction tree, based on the current thread
1874 * acquiring "acqObj". This must be called before the object is added to
1875 * the thread's list of held monitors.
1877 * If the thread already holds the lock (recursion), or this is a known
1878 * lock configuration, we return without doing anything. Otherwise, we add
1879 * a link from the most-recently-acquired lock in this thread to "acqObj"
1880 * after ensuring that the parent lock is "fat".
1882 * This MUST NOT be called while a GC is in progress in another thread,
1883 * because we assume exclusive access to history trees in owned monitors.
1885 static void updateDeadlockPrediction(Thread* self, Object* acqObj)
1887 LockedObjectData* lod;
1888 LockedObjectData* mrl;
1891 * Quick check for recursive access.
1893 lod = dvmFindInMonitorList(self, acqObj);
1895 LOGV("+++ DP: recursive %p\n", acqObj);
1900 * Make the newly-acquired object's monitor "fat". In some ways this
1901 * isn't strictly necessary, but we need the GC to tell us when
1902 * "interesting" objects go away, and right now the only way to make
1903 * an object look interesting is to give it a monitor.
1905 * This also gives us a place to hang a stack trace.
1907 * Our thread holds the lock, so we're allowed to rewrite the lock
1908 * without worrying that something will change out from under us.
1910 if (!IS_LOCK_FAT(&acqObj->lock)) {
1911 LOGVV("fattening lockee %p (recur=%d)\n",
1912 acqObj, LW_LOCK_COUNT(acqObj->lock.thin));
1913 Monitor* newMon = dvmCreateMonitor(acqObj);
1914 lockMonitor(self, newMon); // can't stall, don't need VMWAIT
1915 newMon->lockCount += LW_LOCK_COUNT(acqObj->lock);
1916 u4 hashState = LW_HASH_STATE(acqObj->lock) << LW_HASH_STATE_SHIFT;
1917 acqObj->lock = (u4)newMon | hashState | LW_SHAPE_FAT;
1920 /* if we don't have a stack trace for this monitor, establish one */
1921 if (LW_MONITOR(acqObj->lock)->historyRawStackTrace == NULL) {
1922 Monitor* mon = LW_MONITOR(acqObj->lock);
1923 mon->historyRawStackTrace = dvmFillInStackTraceRaw(self,
1924 &mon->historyStackDepth);
1928 * We need to examine and perhaps modify the most-recently-locked
1929 * monitor. We own that, so there's no risk of another thread
1932 * Retrieve the most-recently-locked entry from our thread.
1934 mrl = self->pLockedObjects;
1936 return; /* no other locks held */
1939 * Do a quick check to see if "acqObj" is a direct descendant. We can do
1940 * this without holding the global lock because of our assertion that
1941 * a GC is not running in parallel -- nobody except the GC can
1942 * modify a history list in a Monitor they don't own, and we own "mrl".
1943 * (There might be concurrent *reads*, but no concurrent *writes.)
1945 * If we find it, this is a known good configuration, and we're done.
1947 if (objectInChildList(mrl->obj, acqObj))
1951 * "mrl" is going to need to have a history tree. If it's currently
1952 * a thin lock, we make it fat now. The thin lock might have a
1953 * nonzero recursive lock count, which we need to carry over.
1955 * Our thread holds the lock, so we're allowed to rewrite the lock
1956 * without worrying that something will change out from under us.
1958 if (!IS_LOCK_FAT(&mrl->obj->lock)) {
1959 LOGVV("fattening parent %p f/b/o child %p (recur=%d)\n",
1960 mrl->obj, acqObj, LW_LOCK_COUNT(mrl->obj->lock));
1961 Monitor* newMon = dvmCreateMonitor(mrl->obj);
1962 lockMonitor(self, newMon); // can't stall, don't need VMWAIT
1963 newMon->lockCount += LW_LOCK_COUNT(mrl->obj->lock);
1964 u4 hashState = LW_HASH_STATE(mrl->obj->lock) << LW_HASH_STATE_SHIFT;
1965 mrl->obj->lock = (u4)newMon | hashState | LW_SHAPE_FAT;
1969 * We haven't seen this configuration before. We need to scan down
1970 * acqObj's tree to see if any of the monitors in self->pLockedObjects
1971 * appear. We grab a global lock before traversing or updating the
1974 * If we find a match for any of our held locks, we know that the lock
1975 * has previously been acquired *after* acqObj, and we throw an error.
1977 * The easiest way to do this is to create a link from "mrl" to "acqObj"
1978 * and do a recursive traversal, marking nodes as we cross them. If
1979 * we cross one a second time, we have a cycle and can throw an error.
1980 * (We do the flag-clearing traversal before adding the new link, so
1981 * that we're guaranteed to terminate.)
1983 * If "acqObj" is a thin lock, it has no history, and we can create a
1984 * link to it without additional checks. [ We now guarantee that it's
1987 bool failed = false;
1988 dvmLockMutex(&gDvm.deadlockHistoryLock);
1989 linkParentToChild(mrl->obj, acqObj);
1990 if (!traverseTree(self, acqObj)) {
1991 LOGW("%s\n", kEndBanner);
1994 /* remove the entry so we're still okay when in "warning" mode */
1995 unlinkParentFromChild(mrl->obj, acqObj);
1997 dvmUnlockMutex(&gDvm.deadlockHistoryLock);
2000 switch (gDvm.deadlockPredictMode) {
2002 dvmThrowException("Ldalvik/system/PotentialDeadlockError;", NULL);
2005 LOGE("Aborting due to potential deadlock\n");
2016 * We're removing "child" from existence. We want to pull all of
2017 * child's children into "parent", filtering out duplicates. This is
2018 * called during the GC.
2020 * This does not modify "child", which might have multiple parents.
2022 static void mergeChildren(Object* parent, const Object* child)
2027 assert(IS_LOCK_FAT(&child->lock));
2028 mon = LW_MONITOR(child->lock);
2029 ExpandingObjectList* pList = &mon->historyChildren;
2031 for (i = expandBufGetCount(pList)-1; i >= 0; i--) {
2032 Object* grandChild = expandBufGetEntry(pList, i);
2034 if (!objectInChildList(parent, grandChild)) {
2035 LOGVV("+++ migrating %p link to %p\n", grandChild, parent);
2036 linkParentToChild(parent, grandChild);
2038 LOGVV("+++ parent %p already links to %p\n", parent, grandChild);
2044 * An object with a fat lock is being collected during a GC pass. We
2045 * want to remove it from any lock history trees that it is a part of.
2047 * This may require updating the history trees in several monitors. The
2048 * monitor semantics guarantee that no other thread will be accessing
2049 * the history trees at the same time.
2051 static void removeCollectedObject(Object* obj)
2055 LOGVV("+++ collecting %p\n", obj);
2059 * We're currently running through the entire set of known monitors.
2060 * This can be somewhat slow. We may want to keep lists of parents
2061 * in each child to speed up GC.
2063 mon = gDvm.monitorList;
2064 while (mon != NULL) {
2065 Object* parent = mon->obj;
2066 if (parent != NULL) { /* value nulled for deleted entries */
2067 if (objectInChildList(parent, obj)) {
2068 LOGVV("removing child %p from parent %p\n", obj, parent);
2069 unlinkParentFromChild(parent, obj);
2070 mergeChildren(parent, obj);
2078 * For every parent of this object:
2079 * - merge all of our children into the parent's child list (creates
2080 * a two-way link between parent and child)
2081 * - remove ourselves from the parent's child list
2083 ExpandingObjectList* pList;
2086 assert(IS_LOCK_FAT(&obj->lock));
2087 mon = LW_MONITOR(obj->lock);
2088 pList = &mon->historyParents;
2089 for (i = expandBufGetCount(pList)-1; i >= 0; i--) {
2090 Object* parent = expandBufGetEntry(pList, i);
2091 Monitor* parentMon = LW_MONITOR(parent->lock);
2093 if (!expandObjRemoveEntry(&parentMon->historyChildren, obj)) {
2094 LOGW("WARNING: child %p not found in parent %p\n", obj, parent);
2096 assert(!expandObjHas(&parentMon->historyChildren, obj));
2098 mergeChildren(parent, obj);
2102 * For every child of this object:
2103 * - remove ourselves from the child's parent list
2105 pList = &mon->historyChildren;
2106 for (i = expandBufGetCount(pList)-1; i >= 0; i--) {
2107 Object* child = expandBufGetEntry(pList, i);
2108 Monitor* childMon = LW_MONITOR(child->lock);
2110 if (!expandObjRemoveEntry(&childMon->historyParents, obj)) {
2111 LOGW("WARNING: parent %p not found in child %p\n", obj, child);
2113 assert(!expandObjHas(&childMon->historyParents, obj));
2117 #endif /*WITH_DEADLOCK_PREDICTION*/