OSDN Git Service

Allow allocation during a concurrent GC.
authorCarl Shapiro <cshapiro@google.com>
Fri, 2 Jul 2010 00:44:46 +0000 (17:44 -0700)
committerCarl Shapiro <cshapiro@google.com>
Fri, 2 Jul 2010 00:44:46 +0000 (17:44 -0700)
Previously, any thread performing a GC held the heap lock for the
entire GC.  If the GC performed was a concurrent GC, mutator threads
that allocate during the GC would be blocked until the GC completed.

With this change, if the GC performed is a concurrent GC, the heap
lock is released while the roots are being traced.  If a mutator
thread allocates an object from available storage, the allocation
proceeds.  If a mutator thread attempts to allocate an object larger
than available storage, the thread will block until the GC completes.

Change-Id: I91a04179c6f583f878b685405a6fdd16b9995017

vm/Globals.h
vm/alloc/Heap.c
vm/alloc/Heap.h
vm/alloc/HeapSource.c

index 30df209..e9b6ef3 100644 (file)
@@ -444,6 +444,12 @@ struct DvmGlobals {
      */
     pthread_mutex_t gcHeapLock;
 
+    /*
+     * Condition variable to queue threads waiting to retry an
+     * allocation.  Signaled after a concurrent GC is completed.
+     */
+    pthread_cond_t gcHeapCond;
+
     /* Opaque pointer representing the heap. */
     GcHeap*     gcHeap;
 
index 80b5030..cc3fd72 100644 (file)
@@ -285,9 +285,25 @@ static void *tryMalloc(size_t size)
         return ptr;
     }
 
-    /* The allocation failed.  Free up some space by doing
-     * a full garbage collection.  This may grow the heap
-     * if the live set is sufficiently large.
+    /*
+     * The allocation failed.  If the GC is running, block until it
+     * completes and retry.
+     */
+    if (gDvm.gcHeap->gcRunning) {
+        /*
+         * The GC is concurrently tracing the heap.  Release the heap
+         * lock, wait for the GC to complete, and retrying allocating.
+         */
+        dvmWaitForConcurrentGcToComplete();
+        ptr = dvmHeapSourceAlloc(size);
+        if (ptr != NULL) {
+            return ptr;
+        }
+    }
+    /*
+     * Another failure.  Our thread was starved or there may be too
+     * many live objects.  Try a foreground GC.  This will have no
+     * effect if the concurrent GC is already running.
      */
     gcForMalloc(false);
     ptr = dvmHeapSourceAlloc(size);
@@ -758,9 +774,10 @@ void dvmCollectGarbageInternal(bool clearSoftRefs, GcReason reason)
 
     if (reason == GC_CONCURRENT) {
         /*
-         * We are performing a concurrent collection.  Resume all
-         * threads for the duration of the recursive mark.
+         * Resume threads while tracing from the roots.  We unlock the
+         * heap to allow mutator threads to allocate from free space.
          */
+        dvmUnlockHeap();
         dvmResumeAllThreads(SUSPEND_FOR_GC);
     }
 
@@ -773,9 +790,10 @@ void dvmCollectGarbageInternal(bool clearSoftRefs, GcReason reason)
 
     if (reason == GC_CONCURRENT) {
         /*
-         * We are performing a concurrent collection.  Perform the
-         * final thread suspension.
+         * Re-acquire the heap lock and perform the final thread
+         * suspension.
          */
+        dvmLockHeap();
         dvmSuspendAllThreads(SUSPEND_FOR_GC);
         /*
          * As no barrier intercepts root updates, we conservatively
@@ -892,6 +910,15 @@ void dvmCollectGarbageInternal(bool clearSoftRefs, GcReason reason)
 #endif
 
     dvmResumeAllThreads(SUSPEND_FOR_GC);
+
+    if (reason == GC_CONCURRENT) {
+        /*
+         * Wake-up any threads that blocked after a failed allocation
+         * request.
+         */
+        dvmBroadcastCond(&gDvm.gcHeapCond);
+    }
+
     if (reason != GC_CONCURRENT) {
         if (oldThreadPriority != kInvalidPriority) {
             if (setpriority(PRIO_PROCESS, 0, oldThreadPriority) != 0) {
@@ -925,6 +952,16 @@ void dvmCollectGarbageInternal(bool clearSoftRefs, GcReason reason)
     }
 }
 
+void dvmWaitForConcurrentGcToComplete(void)
+{
+    Thread *self = dvmThreadSelf();
+    ThreadStatus oldStatus;
+    assert(self != NULL);
+    oldStatus = dvmChangeStatus(self, THREAD_VMWAIT);
+    dvmWaitCond(&gDvm.gcHeapCond, &gDvm.gcHeapLock);
+    dvmChangeStatus(self, oldStatus);
+}
+
 #if WITH_HPROF
 /*
  * Perform garbage collection, writing heap information to the specified file.
index 95740c1..ea0510f 100644 (file)
@@ -83,4 +83,10 @@ typedef enum {
  */
 void dvmCollectGarbageInternal(bool clearSoftRefs, GcReason reason);
 
+/*
+ * Blocks the until the GC thread signals the completion of a
+ * concurrent GC.
+ */
+void dvmWaitForConcurrentGcToComplete(void);
+
 #endif  // _DALVIK_ALLOC_HEAP
index 544230d..86f9207 100644 (file)
@@ -285,8 +285,7 @@ ptr2heap(const HeapSource *hs, const void *ptr)
  *
  * These aren't exact, and should not be treated as such.
  */
-static inline void
-countAllocation(Heap *heap, const void *ptr, bool isObj)
+static void countAllocation(Heap *heap, const void *ptr, bool isObj)
 {
     HeapSource *hs;
 
@@ -789,28 +788,41 @@ dvmHeapSourceAlloc(size_t n)
     HeapSource *hs = gHs;
     Heap *heap;
     void *ptr;
+    size_t allocated;
 
     HS_BOILERPLATE();
     heap = hs2heap(hs);
-
-    if (heap->bytesAllocated + n <= hs->softLimit) {
-        ptr = mspace_calloc(heap->msp, 1, n);
-        if (ptr != NULL) {
-            countAllocation(heap, ptr, true);
-            size_t allocated = heap->bytesAllocated - heap->prevBytesAllocated;
-            if (allocated > OCCUPANCY_THRESHOLD) {
-                if (hs->hasGcThread == true) {
-                    dvmSignalCond(&gHs->gcThreadCond);
-                }
-            }
-        }
-    } else {
-        /* This allocation would push us over the soft limit;
-         * act as if the heap is full.
+    if (heap->bytesAllocated + n > hs->softLimit) {
+        /*
+         * This allocation would push us over the soft limit; act as
+         * if the heap is full.
          */
         LOGV_HEAP("softLimit of %zd.%03zdMB hit for %zd-byte allocation\n",
-                FRACTIONAL_MB(hs->softLimit), n);
-        ptr = NULL;
+                  FRACTIONAL_MB(hs->softLimit), n);
+        return NULL;
+    }
+    ptr = mspace_calloc(heap->msp, 1, n);
+    if (ptr == NULL) {
+        return NULL;
+    }
+    countAllocation(heap, ptr, true);
+    /*
+     * Check to see if a concurrent GC should be initiated.
+     */
+    if (gDvm.gcHeap->gcRunning || !hs->hasGcThread) {
+        /*
+         * The garbage collector thread is already running or has yet
+         * to be started.  Do nothing.
+         */
+        return ptr;
+    }
+    allocated = heap->bytesAllocated - heap->prevBytesAllocated;
+    if (allocated > OCCUPANCY_THRESHOLD) {
+        /*
+         * We have exceeded the occupancy threshold.  Wake up the
+         * garbage collector.
+         */
+        dvmSignalCond(&gHs->gcThreadCond);
     }
     return ptr;
 }
@@ -1653,6 +1665,18 @@ dvmTrackExternalAllocation(size_t n)
         goto out;
     }
 
+    if (gDvm.gcHeap->gcRunning) {
+        /*
+         * The GC is concurrently tracing the heap.  Release the heap
+         * lock, wait for the GC to complete, and try again.
+         */
+        dvmWaitForConcurrentGcToComplete();
+        if (externalAlloc(hs, n, false)) {
+            ret = true;
+            goto out;
+        }
+    }
+
     /* The "allocation" failed.  Free up some space by doing
      * a full garbage collection.  This may grow the heap source
      * if the live set is sufficiently large.