#define LOG_THIN LOGV
-#ifdef WITH_DEADLOCK_PREDICTION /* fwd */
-static const char* kStartBanner =
- "<-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#";
-static const char* kEndBanner =
- "#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#->";
-
-/*
- * Unsorted, expanding list of objects.
- *
- * This is very similar to PointerSet (which came into existence after this),
- * but these are unsorted, uniqueness is not enforced by the "add" function,
- * and the base object isn't allocated on the heap.
- */
-typedef struct ExpandingObjectList {
- u2 alloc;
- u2 count;
- Object** list;
-} ExpandingObjectList;
-
-/* fwd */
-static void updateDeadlockPrediction(Thread* self, Object* obj);
-static void removeCollectedObject(Object* obj);
-static void expandObjClear(ExpandingObjectList* pList);
-#endif
-
/*
* Every Object has a monitor associated with it, but not every Object is
* actually locked. Even the ones that are locked do not need a
*/
char* ownerFileName;
u4 ownerLineNumber;
-
-#ifdef WITH_DEADLOCK_PREDICTION
- /*
- * Objects that have been locked immediately after this one in the
- * past. We use an expanding flat array, allocated on first use, to
- * minimize allocations. Deletions from the list, expected to be
- * infrequent, are crunched down.
- */
- ExpandingObjectList historyChildren;
-
- /*
- * We also track parents. This isn't strictly necessary, but it makes
- * the cleanup at GC time significantly faster.
- */
- ExpandingObjectList historyParents;
-
- /* used during cycle detection */
- bool historyMark;
-
- /* stack trace, established the first time we locked the object */
- int historyStackDepth;
- int* historyRawStackTrace;
-#endif
};
mon = gDvm.monitorList;
while (mon != NULL) {
nextMon = mon->next;
-
-#ifdef WITH_DEADLOCK_PREDICTION
- expandObjClear(&mon->historyChildren);
- expandObjClear(&mon->historyParents);
- free(mon->historyRawStackTrace);
-#endif
free(mon);
mon = nextMon;
}
assert(LW_SHAPE(obj->lock) == LW_SHAPE_FAT);
-#ifdef WITH_DEADLOCK_PREDICTION
- if (gDvm.deadlockPredictMode != kDPOff)
- removeCollectedObject(obj);
-#endif
-
mon = LW_MONITOR(obj->lock);
obj->lock = DVM_LOCK_INITIAL_THIN_VALUE;
assert(pthread_mutex_trylock(&mon->lock) == 0);
assert(pthread_mutex_unlock(&mon->lock) == 0);
dvmDestroyMutex(&mon->lock);
-#ifdef WITH_DEADLOCK_PREDICTION
- expandObjClear(&mon->historyChildren);
- expandObjClear(&mon->historyParents);
- free(mon->historyRawStackTrace);
-#endif
free(mon);
}
assert(mon != NULL);
assert(isUnmarkedObject != NULL);
-#ifdef WITH_DEADLOCK_PREDICTION
- dvmDumpMonitorInfo("before monitor sweep");
-#endif
prev = &handle;
prev->next = curr = *mon;
while (curr != NULL) {
}
}
*mon = handle.next;
-#ifdef WITH_DEADLOCK_PREDICTION
- dvmDumpMonitorInfo("after monitor sweep");
-#endif
}
static char *logWriteInt(char *dst, int value)
assert(LW_MONITOR(obj->lock) != NULL);
lockMonitor(self, LW_MONITOR(obj->lock));
}
-#ifdef WITH_DEADLOCK_PREDICTION
- /*
- * See if we were allowed to grab the lock at this time. We do it
- * *after* acquiring the lock, rather than before, so that we can
- * freely update the Monitor struct. This seems counter-intuitive,
- * but our goal is deadlock *prediction* not deadlock *prevention*.
- * (If we actually deadlock, the situation is easy to diagnose from
- * a thread dump, so there's no point making a special effort to do
- * the checks before the lock is held.)
- *
- * This needs to happen before we add the object to the thread's
- * monitor list, so we can tell the difference between first-lock and
- * re-lock.
- *
- * It's also important that we do this while in THREAD_RUNNING, so
- * that we don't interfere with cleanup operations in the GC.
- */
- if (gDvm.deadlockPredictMode != kDPOff) {
- if (self->status != THREAD_RUNNING) {
- LOGE("Bad thread status (%d) in DP\n", self->status);
- dvmDumpThread(self, false);
- dvmAbort();
- }
- assert(!dvmCheckException(self));
- updateDeadlockPrediction(self, obj);
- if (dvmCheckException(self)) {
- /*
- * If we're throwing an exception here, we need to free the
- * lock. We add the object to the thread's monitor list so the
- * "unlock" code can remove it.
- */
- dvmAddToMonitorList(self, obj, false);
- dvmUnlockObject(self, obj);
- LOGV("--- unlocked, pending is '%s'\n",
- dvmGetException(self)->clazz->descriptor);
- }
- }
-
- /*
- * Add the locked object, and the current stack trace, to the list
- * held by the Thread object. If deadlock prediction isn't on,
- * don't capture the stack trace.
- */
- dvmAddToMonitorList(self, obj, gDvm.deadlockPredictMode != kDPOff);
-#elif defined(WITH_MONITOR_TRACKING)
- /*
- * Add the locked object to the list held by the Thread object.
- */
- dvmAddToMonitorList(self, obj, false);
-#endif
}
/*
return false;
}
}
-
-#ifdef WITH_MONITOR_TRACKING
- /*
- * Remove the object from the Thread's list.
- */
- dvmRemoveFromMonitorList(self, obj);
-#endif
-
return true;
}
return 0; /* Quiet the compiler. */
}
#endif /* WITH_COPYING_GC */
-
-#ifdef WITH_DEADLOCK_PREDICTION
-/*
- * ===========================================================================
- * Deadlock prediction
- * ===========================================================================
- */
-/*
-The idea is to predict the possibility of deadlock by recording the order
-in which monitors are acquired. If we see an attempt to acquire a lock
-out of order, we can identify the locks and offending code.
-
-To make this work, we need to keep track of the locks held by each thread,
-and create history trees for each lock. When a thread tries to acquire
-a new lock, we walk through the "history children" of the lock, looking
-for a match with locks the thread already holds. If we find a match,
-it means the thread has made a request that could result in a deadlock.
-
-To support recursive locks, we always allow re-locking a currently-held
-lock, and maintain a recursion depth count.
-
-An ASCII-art example, where letters represent Objects:
-
- A
- /|\
- / | \
- B | D
- \ |
- \|
- C
-
-The above is the tree we'd have after handling Object synchronization
-sequences "ABC", "AC", "AD". A has three children, {B, C, D}. C is also
-a child of B. (The lines represent pointers between parent and child.
-Every node can have multiple parents and multiple children.)
-
-If we hold AC, and want to lock B, we recursively search through B's
-children to see if A or C appears. It does, so we reject the attempt.
-(A straightforward way to implement it: add a link from C to B, then
-determine whether the graph starting at B contains a cycle.)
-
-If we hold AC and want to lock D, we would succeed, creating a new link
-from C to D.
-
-The lock history and a stack trace is attached to the Object's Monitor
-struct, which means we need to fatten every Object we lock (thin locking
-is effectively disabled). If we don't need the stack trace we can
-avoid fattening the leaf nodes, only fattening objects that need to hold
-history trees.
-
-Updates to Monitor structs are only allowed for the thread that holds
-the Monitor, so we actually do most of our deadlock prediction work after
-the lock has been acquired.
-
-When an object with a monitor is GCed, we need to remove it from the
-history trees. There are two basic approaches:
- (1) For through the entire set of known monitors, search all child
- lists for the object in question. This is rather slow, resulting
- in GC passes that take upwards of 10 seconds to complete.
- (2) Maintain "parent" pointers in each node. Remove the entries as
- required. This requires additional storage and maintenance for
- every operation, but is significantly faster at GC time.
-For each GCed object, we merge all of the object's children into each of
-the object's parents.
-*/
-
-#if !defined(WITH_MONITOR_TRACKING)
-# error "WITH_DEADLOCK_PREDICTION requires WITH_MONITOR_TRACKING"
-#endif
-
-/*
- * Clear out the contents of an ExpandingObjectList, freeing any
- * dynamic allocations.
- */
-static void expandObjClear(ExpandingObjectList* pList)
-{
- if (pList->list != NULL) {
- free(pList->list);
- pList->list = NULL;
- }
- pList->alloc = pList->count = 0;
-}
-
-/*
- * Get the number of objects currently stored in the list.
- */
-static inline int expandBufGetCount(const ExpandingObjectList* pList)
-{
- return pList->count;
-}
-
-/*
- * Get the Nth entry from the list.
- */
-static inline Object* expandBufGetEntry(const ExpandingObjectList* pList,
- int i)
-{
- return pList->list[i];
-}
-
-/*
- * Add a new entry to the list.
- *
- * We don't check for or try to enforce uniqueness. It's expected that
- * the higher-level code does this for us.
- */
-static void expandObjAddEntry(ExpandingObjectList* pList, Object* obj)
-{
- if (pList->count == pList->alloc) {
- /* time to expand */
- Object** newList;
-
- if (pList->alloc == 0)
- pList->alloc = 4;
- else
- pList->alloc *= 2;
- LOGVV("expanding %p to %d\n", pList, pList->alloc);
- newList = realloc(pList->list, pList->alloc * sizeof(Object*));
- if (newList == NULL) {
- LOGE("Failed expanding DP object list (alloc=%d)\n", pList->alloc);
- dvmAbort();
- }
- pList->list = newList;
- }
-
- pList->list[pList->count++] = obj;
-}
-
-/*
- * Returns "true" if the element was successfully removed.
- */
-static bool expandObjRemoveEntry(ExpandingObjectList* pList, Object* obj)
-{
- int i;
-
- for (i = pList->count-1; i >= 0; i--) {
- if (pList->list[i] == obj)
- break;
- }
- if (i < 0)
- return false;
-
- if (i != pList->count-1) {
- /*
- * The order of elements is not important, so we just copy the
- * last entry into the new slot.
- */
- //memmove(&pList->list[i], &pList->list[i+1],
- // (pList->count-1 - i) * sizeof(pList->list[0]));
- pList->list[i] = pList->list[pList->count-1];
- }
-
- pList->count--;
- pList->list[pList->count] = (Object*) 0xdecadead;
- return true;
-}
-
-/*
- * Returns "true" if "obj" appears in the list.
- */
-static bool expandObjHas(const ExpandingObjectList* pList, Object* obj)
-{
- int i;
-
- for (i = 0; i < pList->count; i++) {
- if (pList->list[i] == obj)
- return true;
- }
- return false;
-}
-
-/*
- * Print the list contents to stdout. For debugging.
- */
-static void expandObjDump(const ExpandingObjectList* pList)
-{
- int i;
- for (i = 0; i < pList->count; i++)
- printf(" %p", pList->list[i]);
-}
-
-/*
- * Check for duplicate entries. Returns the index of the first instance
- * of the duplicated value, or -1 if no duplicates were found.
- */
-static int expandObjCheckForDuplicates(const ExpandingObjectList* pList)
-{
- int i, j;
- for (i = 0; i < pList->count-1; i++) {
- for (j = i + 1; j < pList->count; j++) {
- if (pList->list[i] == pList->list[j]) {
- return i;
- }
- }
- }
-
- return -1;
-}
-
-
-/*
- * Determine whether "child" appears in the list of objects associated
- * with the Monitor in "parent". If "parent" is a thin lock, we return
- * false immediately.
- */
-static bool objectInChildList(const Object* parent, Object* child)
-{
- u4 lock = parent->lock;
- if (!IS_LOCK_FAT(&lock)) {
- //LOGI("on thin\n");
- return false;
- }
-
- return expandObjHas(&LW_MONITOR(lock)->historyChildren, child);
-}
-
-/*
- * Print the child list.
- */
-static void dumpKids(Object* parent)
-{
- Monitor* mon = LW_MONITOR(parent->lock);
-
- printf("Children of %p:", parent);
- expandObjDump(&mon->historyChildren);
- printf("\n");
-}
-
-/*
- * Add "child" to the list of children in "parent", and add "parent" to
- * the list of parents in "child".
- */
-static void linkParentToChild(Object* parent, Object* child)
-{
- //assert(LW_MONITOR(parent->lock)->owner == dvmThreadSelf()); // !owned for merge
- assert(IS_LOCK_FAT(&parent->lock));
- assert(IS_LOCK_FAT(&child->lock));
- assert(parent != child);
- Monitor* mon;
-
- mon = LW_MONITOR(parent->lock);
- assert(!expandObjHas(&mon->historyChildren, child));
- expandObjAddEntry(&mon->historyChildren, child);
-
- mon = LW_MONITOR(child->lock);
- assert(!expandObjHas(&mon->historyParents, parent));
- expandObjAddEntry(&mon->historyParents, parent);
-}
-
-
-/*
- * Remove "child" from the list of children in "parent".
- */
-static void unlinkParentFromChild(Object* parent, Object* child)
-{
- //assert(LW_MONITOR(parent->lock)->owner == dvmThreadSelf()); // !owned for GC
- assert(IS_LOCK_FAT(&parent->lock));
- assert(IS_LOCK_FAT(&child->lock));
- assert(parent != child);
- Monitor* mon;
-
- mon = LW_MONITOR(parent->lock);
- if (!expandObjRemoveEntry(&mon->historyChildren, child)) {
- LOGW("WARNING: child %p not found in parent %p\n", child, parent);
- }
- assert(!expandObjHas(&mon->historyChildren, child));
- assert(expandObjCheckForDuplicates(&mon->historyChildren) < 0);
-
- mon = LW_MONITOR(child->lock);
- if (!expandObjRemoveEntry(&mon->historyParents, parent)) {
- LOGW("WARNING: parent %p not found in child %p\n", parent, child);
- }
- assert(!expandObjHas(&mon->historyParents, parent));
- assert(expandObjCheckForDuplicates(&mon->historyParents) < 0);
-}
-
-
-/*
- * Log the monitors held by the current thread. This is done as part of
- * flagging an error.
- */
-static void logHeldMonitors(Thread* self)
-{
- char* name = NULL;
-
- name = dvmGetThreadName(self);
- LOGW("Monitors currently held by thread (threadid=%d '%s')\n",
- self->threadId, name);
- LOGW("(most-recently-acquired on top):\n");
- free(name);
-
- LockedObjectData* lod = self->pLockedObjects;
- while (lod != NULL) {
- LOGW("--- object %p[%d] (%s)\n",
- lod->obj, lod->recursionCount, lod->obj->clazz->descriptor);
- dvmLogRawStackTrace(lod->rawStackTrace, lod->stackDepth);
-
- lod = lod->next;
- }
-}
-
-/*
- * Recursively traverse the object hierarchy starting at "obj". We mark
- * ourselves on entry and clear the mark on exit. If we ever encounter
- * a marked object, we have a cycle.
- *
- * Returns "true" if all is well, "false" if we found a cycle.
- */
-static bool traverseTree(Thread* self, const Object* obj)
-{
- assert(IS_LOCK_FAT(&obj->lock));
- Monitor* mon = LW_MONITOR(obj->lock);
-
- /*
- * Have we been here before?
- */
- if (mon->historyMark) {
- int* rawStackTrace;
- int stackDepth;
-
- LOGW("%s\n", kStartBanner);
- LOGW("Illegal lock attempt:\n");
- LOGW("--- object %p (%s)\n", obj, obj->clazz->descriptor);
-
- rawStackTrace = dvmFillInStackTraceRaw(self, &stackDepth);
- dvmLogRawStackTrace(rawStackTrace, stackDepth);
- free(rawStackTrace);
-
- LOGW(" ");
- logHeldMonitors(self);
-
- LOGW(" ");
- LOGW("Earlier, the following lock order (from last to first) was\n");
- LOGW("established -- stack trace is from first successful lock):\n");
- return false;
- }
- mon->historyMark = true;
-
- /*
- * Examine the children. We do NOT hold these locks, so they might
- * very well transition from thin to fat or change ownership while
- * we work.
- *
- * NOTE: we rely on the fact that they cannot revert from fat to thin
- * while we work. This is currently a safe assumption.
- *
- * We can safely ignore thin-locked children, because by definition
- * they have no history and are leaf nodes. In the current
- * implementation we always fatten the locks to provide a place to
- * hang the stack trace.
- */
- ExpandingObjectList* pList = &mon->historyChildren;
- int i;
- for (i = expandBufGetCount(pList)-1; i >= 0; i--) {
- const Object* child = expandBufGetEntry(pList, i);
- u4 lock = child->lock;
- if (!IS_LOCK_FAT(&lock))
- continue;
- if (!traverseTree(self, child)) {
- LOGW("--- object %p (%s)\n", obj, obj->clazz->descriptor);
- dvmLogRawStackTrace(mon->historyRawStackTrace,
- mon->historyStackDepth);
- mon->historyMark = false;
- return false;
- }
- }
-
- mon->historyMark = false;
-
- return true;
-}
-
-/*
- * Update the deadlock prediction tree, based on the current thread
- * acquiring "acqObj". This must be called before the object is added to
- * the thread's list of held monitors.
- *
- * If the thread already holds the lock (recursion), or this is a known
- * lock configuration, we return without doing anything. Otherwise, we add
- * a link from the most-recently-acquired lock in this thread to "acqObj"
- * after ensuring that the parent lock is "fat".
- *
- * This MUST NOT be called while a GC is in progress in another thread,
- * because we assume exclusive access to history trees in owned monitors.
- */
-static void updateDeadlockPrediction(Thread* self, Object* acqObj)
-{
- LockedObjectData* lod;
- LockedObjectData* mrl;
-
- /*
- * Quick check for recursive access.
- */
- lod = dvmFindInMonitorList(self, acqObj);
- if (lod != NULL) {
- LOGV("+++ DP: recursive %p\n", acqObj);
- return;
- }
-
- /*
- * Make the newly-acquired object's monitor "fat". In some ways this
- * isn't strictly necessary, but we need the GC to tell us when
- * "interesting" objects go away, and right now the only way to make
- * an object look interesting is to give it a monitor.
- *
- * This also gives us a place to hang a stack trace.
- *
- * Our thread holds the lock, so we're allowed to rewrite the lock
- * without worrying that something will change out from under us.
- */
- if (!IS_LOCK_FAT(&acqObj->lock)) {
- LOGVV("fattening lockee %p (recur=%d)\n",
- acqObj, LW_LOCK_COUNT(acqObj->lock.thin));
- inflateMonitor(self, acqObj);
- }
-
- /* if we don't have a stack trace for this monitor, establish one */
- if (LW_MONITOR(acqObj->lock)->historyRawStackTrace == NULL) {
- Monitor* mon = LW_MONITOR(acqObj->lock);
- mon->historyRawStackTrace = dvmFillInStackTraceRaw(self,
- &mon->historyStackDepth);
- }
-
- /*
- * We need to examine and perhaps modify the most-recently-locked
- * monitor. We own that, so there's no risk of another thread
- * stepping on us.
- *
- * Retrieve the most-recently-locked entry from our thread.
- */
- mrl = self->pLockedObjects;
- if (mrl == NULL)
- return; /* no other locks held */
-
- /*
- * Do a quick check to see if "acqObj" is a direct descendant. We can do
- * this without holding the global lock because of our assertion that
- * a GC is not running in parallel -- nobody except the GC can
- * modify a history list in a Monitor they don't own, and we own "mrl".
- * (There might be concurrent *reads*, but no concurrent *writes.)
- *
- * If we find it, this is a known good configuration, and we're done.
- */
- if (objectInChildList(mrl->obj, acqObj))
- return;
-
- /*
- * "mrl" is going to need to have a history tree. If it's currently
- * a thin lock, we make it fat now. The thin lock might have a
- * nonzero recursive lock count, which we need to carry over.
- *
- * Our thread holds the lock, so we're allowed to rewrite the lock
- * without worrying that something will change out from under us.
- */
- if (!IS_LOCK_FAT(&mrl->obj->lock)) {
- LOGVV("fattening parent %p f/b/o child %p (recur=%d)\n",
- mrl->obj, acqObj, LW_LOCK_COUNT(mrl->obj->lock));
- inflateMonitor(self, mrl->obj);
- }
-
- /*
- * We haven't seen this configuration before. We need to scan down
- * acqObj's tree to see if any of the monitors in self->pLockedObjects
- * appear. We grab a global lock before traversing or updating the
- * history list.
- *
- * If we find a match for any of our held locks, we know that the lock
- * has previously been acquired *after* acqObj, and we throw an error.
- *
- * The easiest way to do this is to create a link from "mrl" to "acqObj"
- * and do a recursive traversal, marking nodes as we cross them. If
- * we cross one a second time, we have a cycle and can throw an error.
- * (We do the flag-clearing traversal before adding the new link, so
- * that we're guaranteed to terminate.)
- *
- * If "acqObj" is a thin lock, it has no history, and we can create a
- * link to it without additional checks. [ We now guarantee that it's
- * always fat. ]
- */
- bool failed = false;
- dvmLockMutex(&gDvm.deadlockHistoryLock);
- linkParentToChild(mrl->obj, acqObj);
- if (!traverseTree(self, acqObj)) {
- LOGW("%s\n", kEndBanner);
- failed = true;
-
- /* remove the entry so we're still okay when in "warning" mode */
- unlinkParentFromChild(mrl->obj, acqObj);
- }
- dvmUnlockMutex(&gDvm.deadlockHistoryLock);
-
- if (failed) {
- switch (gDvm.deadlockPredictMode) {
- case kDPErr:
- dvmThrowException("Ldalvik/system/PotentialDeadlockError;", NULL);
- break;
- case kDPAbort:
- LOGE("Aborting due to potential deadlock\n");
- dvmAbort();
- break;
- default:
- /* warn only */
- break;
- }
- }
-}
-
-/*
- * We're removing "child" from existence. We want to pull all of
- * child's children into "parent", filtering out duplicates. This is
- * called during the GC.
- *
- * This does not modify "child", which might have multiple parents.
- */
-static void mergeChildren(Object* parent, const Object* child)
-{
- Monitor* mon;
- int i;
-
- assert(IS_LOCK_FAT(&child->lock));
- mon = LW_MONITOR(child->lock);
- ExpandingObjectList* pList = &mon->historyChildren;
-
- for (i = expandBufGetCount(pList)-1; i >= 0; i--) {
- Object* grandChild = expandBufGetEntry(pList, i);
-
- if (!objectInChildList(parent, grandChild)) {
- LOGVV("+++ migrating %p link to %p\n", grandChild, parent);
- linkParentToChild(parent, grandChild);
- } else {
- LOGVV("+++ parent %p already links to %p\n", parent, grandChild);
- }
- }
-}
-
-/*
- * An object with a fat lock is being collected during a GC pass. We
- * want to remove it from any lock history trees that it is a part of.
- *
- * This may require updating the history trees in several monitors. The
- * monitor semantics guarantee that no other thread will be accessing
- * the history trees at the same time.
- */
-static void removeCollectedObject(Object* obj)
-{
- Monitor* mon;
-
- LOGVV("+++ collecting %p\n", obj);
-
- /*
- * For every parent of this object:
- * - merge all of our children into the parent's child list (creates
- * a two-way link between parent and child)
- * - remove ourselves from the parent's child list
- */
- ExpandingObjectList* pList;
- int i;
-
- assert(IS_LOCK_FAT(&obj->lock));
- mon = LW_MONITOR(obj->lock);
- pList = &mon->historyParents;
- for (i = expandBufGetCount(pList)-1; i >= 0; i--) {
- Object* parent = expandBufGetEntry(pList, i);
- Monitor* parentMon = LW_MONITOR(parent->lock);
-
- if (!expandObjRemoveEntry(&parentMon->historyChildren, obj)) {
- LOGW("WARNING: child %p not found in parent %p\n", obj, parent);
- }
- assert(!expandObjHas(&parentMon->historyChildren, obj));
-
- mergeChildren(parent, obj);
- }
-
- /*
- * For every child of this object:
- * - remove ourselves from the child's parent list
- */
- pList = &mon->historyChildren;
- for (i = expandBufGetCount(pList)-1; i >= 0; i--) {
- Object* child = expandBufGetEntry(pList, i);
- Monitor* childMon = LW_MONITOR(child->lock);
-
- if (!expandObjRemoveEntry(&childMon->historyParents, obj)) {
- LOGW("WARNING: parent %p not found in child %p\n", obj, child);
- }
- assert(!expandObjHas(&childMon->historyParents, obj));
- }
-}
-
-#endif /*WITH_DEADLOCK_PREDICTION*/