2 * Copyright (C) 2008 The Android Open Source Project
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 * Garbage-collecting memory allocator.
20 #include "alloc/HeapBitmap.h"
21 #include "alloc/Verify.h"
22 #include "alloc/HeapTable.h"
23 #include "alloc/Heap.h"
24 #include "alloc/HeapInternal.h"
25 #include "alloc/DdmHeap.h"
26 #include "alloc/HeapSource.h"
27 #include "alloc/MarkSweep.h"
29 #include "utils/threads.h" // need Android thread priorities
30 #define kInvalidPriority 10000
32 #include <cutils/sched_policy.h>
35 #include <sys/resource.h>
39 static const char* GcReasonStr[] = {
40 [GC_FOR_MALLOC] = "GC_FOR_MALLOC",
41 [GC_CONCURRENT] = "GC_CONCURRENT",
42 [GC_EXPLICIT] = "GC_EXPLICIT"
46 * Initialize the GC heap.
48 * Returns true if successful, false otherwise.
54 if (gDvm.heapGrowthLimit == 0) {
55 gDvm.heapGrowthLimit = gDvm.heapMaximumSize;
58 gcHeap = dvmHeapSourceStartup(gDvm.heapStartingSize,
60 gDvm.heapGrowthLimit);
64 gcHeap->heapWorkerCurrentObject = NULL;
65 gcHeap->heapWorkerCurrentMethod = NULL;
66 gcHeap->heapWorkerInterpStartTime = 0LL;
67 gcHeap->ddmHpifWhen = 0;
68 gcHeap->ddmHpsgWhen = 0;
69 gcHeap->ddmHpsgWhat = 0;
70 gcHeap->ddmNhsgWhen = 0;
71 gcHeap->ddmNhsgWhat = 0;
74 /* Set up the lists and lock we'll use for finalizable
75 * and reference objects.
77 dvmInitMutex(&gDvm.heapWorkerListLock);
78 gcHeap->finalizableRefs = NULL;
79 gcHeap->pendingFinalizationRefs = NULL;
80 gcHeap->referenceOperations = NULL;
82 if (!dvmCardTableStartup(gDvm.heapMaximumSize)) {
83 LOGE_HEAP("card table startup failed.");
87 /* Initialize the HeapWorker locks and other state
90 dvmInitializeHeapWorkerState();
95 bool dvmHeapStartupAfterZygote(void)
97 return dvmHeapSourceStartupAfterZygote();
100 void dvmHeapShutdown()
102 //TODO: make sure we're locked
103 if (gDvm.gcHeap != NULL) {
104 dvmCardTableShutdown();
105 /* Tables are allocated on the native heap; they need to be
106 * cleaned up explicitly. The process may stick around, so we
107 * don't want to leak any native memory.
109 dvmHeapFreeLargeTable(gDvm.gcHeap->finalizableRefs);
110 gDvm.gcHeap->finalizableRefs = NULL;
112 dvmHeapFreeLargeTable(gDvm.gcHeap->pendingFinalizationRefs);
113 gDvm.gcHeap->pendingFinalizationRefs = NULL;
115 dvmHeapFreeLargeTable(gDvm.gcHeap->referenceOperations);
116 gDvm.gcHeap->referenceOperations = NULL;
118 /* Destroy the heap. Any outstanding pointers will point to
119 * unmapped memory (unless/until someone else maps it). This
120 * frees gDvm.gcHeap as a side-effect.
122 dvmHeapSourceShutdown(&gDvm.gcHeap);
127 * Shutdown any threads internal to the heap.
129 void dvmHeapThreadShutdown(void)
131 dvmHeapSourceThreadShutdown();
135 * We've been asked to allocate something we can't, e.g. an array so
136 * large that (length * elementWidth) is larger than 2^31.
138 * _The Java Programming Language_, 4th edition, says, "you can be sure
139 * that all SoftReferences to softly reachable objects will be cleared
140 * before an OutOfMemoryError is thrown."
142 * It's unclear whether that holds for all situations where an OOM can
143 * be thrown, or just in the context of an allocation that fails due
144 * to lack of heap space. For simplicity we just throw the exception.
146 * (OOM due to actually running out of space is handled elsewhere.)
148 void dvmThrowBadAllocException(const char* msg)
150 dvmThrowException("Ljava/lang/OutOfMemoryError;", msg);
154 * Grab the lock, but put ourselves into THREAD_VMWAIT if it looks like
155 * we're going to have to wait on the mutex.
159 if (dvmTryLockMutex(&gDvm.gcHeapLock) != 0) {
161 ThreadStatus oldStatus;
163 self = dvmThreadSelf();
164 oldStatus = dvmChangeStatus(self, THREAD_VMWAIT);
165 dvmLockMutex(&gDvm.gcHeapLock);
166 dvmChangeStatus(self, oldStatus);
174 dvmUnlockMutex(&gDvm.gcHeapLock);
177 /* Pop an object from the list of pending finalizations and
178 * reference clears/enqueues, and return the object.
179 * The caller must call dvmReleaseTrackedAlloc()
180 * on the object when finished.
182 * Typically only called by the heap worker thread.
184 Object *dvmGetNextHeapWorkerObject(HeapWorkerOperation *op)
187 GcHeap *gcHeap = gDvm.gcHeap;
191 dvmLockMutex(&gDvm.heapWorkerListLock);
193 obj = dvmHeapGetNextObjectFromLargeTable(&gcHeap->referenceOperations);
195 *op = WORKER_ENQUEUE;
197 obj = dvmHeapGetNextObjectFromLargeTable(
198 &gcHeap->pendingFinalizationRefs);
200 *op = WORKER_FINALIZE;
205 /* Don't let the GC collect the object until the
206 * worker thread is done with it.
208 dvmAddTrackedAlloc(obj, NULL);
211 dvmUnlockMutex(&gDvm.heapWorkerListLock);
216 /* Do a full garbage collection, which may grow the
217 * heap as a side-effect if the live set is large.
219 static void gcForMalloc(bool clearSoftReferences)
221 if (gDvm.allocProf.enabled) {
222 Thread* self = dvmThreadSelf();
223 gDvm.allocProf.gcCount++;
225 self->allocProf.gcCount++;
228 /* This may adjust the soft limit as a side-effect.
230 LOGD_HEAP("dvmMalloc initiating GC%s",
231 clearSoftReferences ? "(clear SoftReferences)" : "");
232 dvmCollectGarbageInternal(clearSoftReferences, GC_FOR_MALLOC);
235 /* Try as hard as possible to allocate some memory.
237 static void *tryMalloc(size_t size)
241 /* Don't try too hard if there's no way the allocation is
242 * going to succeed. We have to collect SoftReferences before
243 * throwing an OOME, though.
245 if (size >= gDvm.heapGrowthLimit) {
246 LOGW_HEAP("dvmMalloc(%zu/0x%08zx): "
247 "someone's allocating a huge buffer\n", size, size);
249 goto collect_soft_refs;
252 //TODO: figure out better heuristics
253 // There will be a lot of churn if someone allocates a bunch of
254 // big objects in a row, and we hit the frag case each time.
255 // A full GC for each.
256 // Maybe we grow the heap in bigger leaps
257 // Maybe we skip the GC if the size is large and we did one recently
258 // (number of allocations ago) (watch for thread effects)
259 // DeflateTest allocs a bunch of ~128k buffers w/in 0-5 allocs of each other
260 // (or, at least, there are only 0-5 objects swept each time)
262 ptr = dvmHeapSourceAlloc(size);
268 * The allocation failed. If the GC is running, block until it
269 * completes and retry.
271 if (gDvm.gcHeap->gcRunning) {
273 * The GC is concurrently tracing the heap. Release the heap
274 * lock, wait for the GC to complete, and retrying allocating.
276 dvmWaitForConcurrentGcToComplete();
277 ptr = dvmHeapSourceAlloc(size);
283 * Another failure. Our thread was starved or there may be too
284 * many live objects. Try a foreground GC. This will have no
285 * effect if the concurrent GC is already running.
288 ptr = dvmHeapSourceAlloc(size);
293 /* Even that didn't work; this is an exceptional state.
294 * Try harder, growing the heap if necessary.
296 ptr = dvmHeapSourceAllocAndGrow(size);
300 newHeapSize = dvmHeapSourceGetIdealFootprint();
301 //TODO: may want to grow a little bit more so that the amount of free
302 // space is equal to the old free space + the utilization slop for
303 // the new allocation.
304 LOGI_HEAP("Grow heap (frag case) to "
305 "%zu.%03zuMB for %zu-byte allocation\n",
306 FRACTIONAL_MB(newHeapSize), size);
310 /* Most allocations should have succeeded by now, so the heap
311 * is really full, really fragmented, or the requested size is
312 * really big. Do another GC, collecting SoftReferences this
313 * time. The VM spec requires that all SoftReferences have
314 * been collected and cleared before throwing an OOME.
316 //TODO: wait for the finalizers from the previous GC to finish
318 LOGI_HEAP("Forcing collection of SoftReferences for %zu-byte allocation\n",
321 ptr = dvmHeapSourceAllocAndGrow(size);
325 //TODO: maybe wait for finalizers and try one last time
327 LOGE_HEAP("Out of memory on a %zd-byte allocation.\n", size);
328 //TODO: tell the HeapSource to dump its state
329 dvmDumpThread(dvmThreadSelf(), false);
334 /* Throw an OutOfMemoryError if there's a thread to attach it to.
337 * The caller must not be holding the heap lock, or else the allocations
338 * in dvmThrowException() will deadlock.
340 static void throwOOME()
344 if ((self = dvmThreadSelf()) != NULL) {
345 /* If the current (failing) dvmMalloc() happened as part of thread
346 * creation/attachment before the thread became part of the root set,
347 * we can't rely on the thread-local trackedAlloc table, so
348 * we can't keep track of a real allocated OOME object. But, since
349 * the thread is in the process of being created, it won't have
350 * a useful stack anyway, so we may as well make things easier
351 * by throwing the (stackless) pre-built OOME.
353 if (dvmIsOnThreadList(self) && !self->throwingOOME) {
354 /* Let ourselves know that we tried to throw an OOM
355 * error in the normal way in case we run out of
356 * memory trying to allocate it inside dvmThrowException().
358 self->throwingOOME = true;
360 /* Don't include a description string;
361 * one fewer allocation.
363 dvmThrowException("Ljava/lang/OutOfMemoryError;", NULL);
366 * This thread has already tried to throw an OutOfMemoryError,
367 * which probably means that we're running out of memory
368 * while recursively trying to throw.
370 * To avoid any more allocation attempts, "throw" a pre-built
371 * OutOfMemoryError object (which won't have a useful stack trace).
373 * Note that since this call can't possibly allocate anything,
374 * we don't care about the state of self->throwingOOME
375 * (which will usually already be set).
377 dvmSetException(self, gDvm.outOfMemoryObj);
379 /* We're done with the possible recursion.
381 self->throwingOOME = false;
386 * Allocate storage on the GC heap. We guarantee 8-byte alignment.
388 * The new storage is zeroed out.
390 * Note that, in rare cases, this could get called while a GC is in
391 * progress. If a non-VM thread tries to attach itself through JNI,
392 * it will need to allocate some objects. If this becomes annoying to
393 * deal with, we can block it at the source, but holding the allocation
394 * mutex should be enough.
396 * In rare circumstances (JNI AttachCurrentThread) we can be called
397 * from a non-VM thread.
399 * Use ALLOC_DONT_TRACK when we either don't want to track an allocation
400 * (because it's being done for the interpreter "new" operation and will
401 * be part of the root set immediately) or we can't (because this allocation
402 * is for a brand new thread).
404 * Returns NULL and throws an exception on failure.
406 * TODO: don't do a GC if the debugger thinks all threads are suspended
408 void* dvmMalloc(size_t size, int flags)
410 GcHeap *gcHeap = gDvm.gcHeap;
415 /* Try as hard as possible to allocate some memory.
417 ptr = tryMalloc(size);
419 /* We've got the memory.
421 if ((flags & ALLOC_FINALIZABLE) != 0) {
422 /* This object is an instance of a class that
423 * overrides finalize(). Add it to the finalizable list.
425 if (!dvmHeapAddRefToLargeTable(&gcHeap->finalizableRefs,
428 LOGE_HEAP("dvmMalloc(): no room for any more "
429 "finalizable objects\n");
434 if (gDvm.allocProf.enabled) {
435 Thread* self = dvmThreadSelf();
436 gDvm.allocProf.allocCount++;
437 gDvm.allocProf.allocSize += size;
439 self->allocProf.allocCount++;
440 self->allocProf.allocSize += size;
444 /* The allocation failed.
447 if (gDvm.allocProf.enabled) {
448 Thread* self = dvmThreadSelf();
449 gDvm.allocProf.failedAllocCount++;
450 gDvm.allocProf.failedAllocSize += size;
452 self->allocProf.failedAllocCount++;
453 self->allocProf.failedAllocSize += size;
462 * If caller hasn't asked us not to track it, add it to the
463 * internal tracking list.
465 if ((flags & ALLOC_DONT_TRACK) == 0) {
466 dvmAddTrackedAlloc(ptr, NULL);
470 * The allocation failed; throw an OutOfMemoryError.
479 * Returns true iff <obj> points to a valid allocated object.
481 bool dvmIsValidObject(const Object* obj)
483 /* Don't bother if it's NULL or not 8-byte aligned.
485 if (obj != NULL && ((uintptr_t)obj & (8-1)) == 0) {
486 /* Even if the heap isn't locked, this shouldn't return
487 * any false negatives. The only mutation that could
488 * be happening is allocation, which means that another
489 * thread could be in the middle of a read-modify-write
490 * to add a new bit for a new object. However, that
491 * RMW will have completed by the time any other thread
492 * could possibly see the new pointer, so there is no
493 * danger of dvmIsValidObject() being called on a valid
494 * pointer whose bit isn't set.
496 * Freeing will only happen during the sweep phase, which
497 * only happens while the heap is locked.
499 return dvmHeapSourceContains(obj);
504 size_t dvmObjectSizeInHeap(const Object *obj)
506 return dvmHeapSourceChunkSize(obj);
509 static void verifyRootsAndHeap(void)
512 dvmVerifyBitmap(dvmHeapSourceGetLiveBits());
516 * Initiate garbage collection.
519 * - If we don't hold gDvm.threadListLock, it's possible for a thread to
520 * be added to the thread list while we work. The thread should NOT
521 * start executing, so this is only interesting when we start chasing
522 * thread stacks. (Before we do so, grab the lock.)
524 * We are not allowed to GC when the debugger has suspended the VM, which
525 * is awkward because debugger requests can cause allocations. The easiest
526 * way to enforce this is to refuse to GC on an allocation made by the
527 * JDWP thread -- we have to expand the heap or fail.
529 void dvmCollectGarbageInternal(bool clearSoftRefs, GcReason reason)
531 GcHeap *gcHeap = gDvm.gcHeap;
532 u4 rootSuspend, rootSuspendTime, rootStart, rootEnd;
533 u4 dirtySuspend, dirtyStart, dirtyEnd;
535 size_t numObjectsFreed, numBytesFreed;
536 size_t currAllocated, currFootprint;
539 int oldThreadPriority = kInvalidPriority;
541 /* The heap lock must be held.
544 if (gcHeap->gcRunning) {
545 LOGW_HEAP("Attempted recursive GC\n");
549 gcMode = (reason == GC_FOR_MALLOC) ? GC_PARTIAL : GC_FULL;
550 gcHeap->gcRunning = true;
553 * Grab the heapWorkerLock to prevent the HeapWorker thread from
554 * doing work. If it's executing a finalizer or an enqueue operation
555 * it won't be holding the lock, so this should return quickly.
557 dvmLockMutex(&gDvm.heapWorkerLock);
559 rootSuspend = dvmGetRelativeTimeMsec();
560 dvmSuspendAllThreads(SUSPEND_FOR_GC);
561 rootStart = dvmGetRelativeTimeMsec();
562 rootSuspendTime = rootStart - rootSuspend;
565 * If we are not marking concurrently raise the priority of the
566 * thread performing the garbage collection.
568 if (reason != GC_CONCURRENT) {
569 /* Get the priority (the "nice" value) of the current thread. The
570 * getpriority() call can legitimately return -1, so we have to
571 * explicitly test errno.
574 int priorityResult = getpriority(PRIO_PROCESS, 0);
576 LOGI_HEAP("getpriority(self) failed: %s\n", strerror(errno));
577 } else if (priorityResult > ANDROID_PRIORITY_NORMAL) {
578 /* Current value is numerically greater than "normal", which
579 * in backward UNIX terms means lower priority.
582 if (priorityResult >= ANDROID_PRIORITY_BACKGROUND) {
583 set_sched_policy(dvmGetSysThreadId(), SP_FOREGROUND);
586 if (setpriority(PRIO_PROCESS, 0, ANDROID_PRIORITY_NORMAL) != 0) {
587 LOGI_HEAP("Unable to elevate priority from %d to %d\n",
588 priorityResult, ANDROID_PRIORITY_NORMAL);
590 /* priority elevated; save value so we can restore it later */
591 LOGD_HEAP("Elevating priority from %d to %d\n",
592 priorityResult, ANDROID_PRIORITY_NORMAL);
593 oldThreadPriority = priorityResult;
598 /* Make sure that the HeapWorker thread hasn't become
599 * wedged inside interp code. If it has, this call will
600 * print a message and abort the VM.
602 dvmAssertHeapWorkerThreadRunning();
604 /* Lock the pendingFinalizationRefs list.
606 * Acquire the lock after suspending so the finalizer
607 * thread can't block in the RUNNING state while
610 dvmLockMutex(&gDvm.heapWorkerListLock);
612 if (gDvm.preVerify) {
613 LOGV_HEAP("Verifying roots and heap before GC");
614 verifyRootsAndHeap();
617 dvmMethodTraceGCBegin();
619 /* Set up the marking context.
621 if (!dvmHeapBeginMarkStep(gcMode)) {
622 LOGE_HEAP("dvmHeapBeginMarkStep failed; aborting\n");
626 /* Mark the set of objects that are strongly reachable from the roots.
628 LOGD_HEAP("Marking...");
629 dvmHeapMarkRootSet();
631 /* dvmHeapScanMarkedObjects() will build the lists of known
632 * instances of the Reference classes.
634 gcHeap->softReferences = NULL;
635 gcHeap->weakReferences = NULL;
636 gcHeap->phantomReferences = NULL;
638 if (reason == GC_CONCURRENT) {
640 * Resume threads while tracing from the roots. We unlock the
641 * heap to allow mutator threads to allocate from free space.
643 rootEnd = dvmGetRelativeTimeMsec();
646 dvmResumeAllThreads(SUSPEND_FOR_GC);
649 /* Recursively mark any objects that marked objects point to strongly.
650 * If we're not collecting soft references, soft-reachable
651 * objects will also be marked.
653 LOGD_HEAP("Recursing...");
654 dvmHeapScanMarkedObjects();
656 if (reason == GC_CONCURRENT) {
658 * Re-acquire the heap lock and perform the final thread
662 dirtySuspend = dvmGetRelativeTimeMsec();
663 dvmSuspendAllThreads(SUSPEND_FOR_GC);
664 dirtyStart = dvmGetRelativeTimeMsec();
666 * As no barrier intercepts root updates, we conservatively
667 * assume all roots may be gray and re-mark them.
669 dvmHeapReMarkRootSet();
671 * With the exception of reference objects and weak interned
672 * strings, all gray objects should now be on dirty cards.
674 if (gDvm.verifyCardTable) {
675 dvmVerifyCardTable();
678 * Recursively mark gray objects pointed to by the roots or by
679 * heap objects dirtied during the concurrent mark.
681 dvmHeapReScanMarkedObjects();
685 * All strongly-reachable objects have now been marked. Process
686 * weakly-reachable objects discovered while tracing.
688 dvmHeapProcessReferences(&gcHeap->softReferences, clearSoftRefs,
689 &gcHeap->weakReferences,
690 &gcHeap->phantomReferences);
692 #if defined(WITH_JIT)
694 * Patching a chaining cell is very cheap as it only updates 4 words. It's
695 * the overhead of stopping all threads and synchronizing the I/D cache
696 * that makes it expensive.
698 * Therefore we batch those work orders in a queue and go through them
699 * when threads are suspended for GC.
701 dvmCompilerPerformSafePointChecks();
704 LOGD_HEAP("Sweeping...");
706 dvmHeapSweepSystemWeaks();
709 * Live objects have a bit set in the mark bitmap, swap the mark
710 * and live bitmaps. The sweep can proceed concurrently viewing
711 * the new live bitmap as the old mark bitmap, and vice versa.
713 dvmHeapSourceSwapBitmaps();
715 if (gDvm.postVerify) {
716 LOGV_HEAP("Verifying roots and heap after GC");
717 verifyRootsAndHeap();
720 if (reason == GC_CONCURRENT) {
721 dirtyEnd = dvmGetRelativeTimeMsec();
723 dvmResumeAllThreads(SUSPEND_FOR_GC);
725 dvmHeapSweepUnmarkedObjects(gcMode, reason == GC_CONCURRENT,
726 &numObjectsFreed, &numBytesFreed);
727 LOGD_HEAP("Cleaning up...");
728 dvmHeapFinishMarkStep();
729 if (reason == GC_CONCURRENT) {
735 /* Now's a good time to adjust the heap size, since
736 * we know what our utilization is.
738 * This doesn't actually resize any memory;
739 * it just lets the heap grow more when necessary.
741 dvmHeapSourceGrowForUtilization();
743 currAllocated = dvmHeapSourceGetValue(HS_BYTES_ALLOCATED, NULL, 0);
744 currFootprint = dvmHeapSourceGetValue(HS_FOOTPRINT, NULL, 0);
746 /* Now that we've freed up the GC heap, return any large
747 * free chunks back to the system. They'll get paged back
748 * in the next time they're used. Don't do it immediately,
749 * though; if the process is still allocating a bunch of
750 * memory, we'll be taking a ton of page faults that we don't
751 * necessarily need to.
753 * Cancel any old scheduled trims, and schedule a new one.
755 dvmScheduleHeapSourceTrim(5); // in seconds
757 dvmMethodTraceGCEnd();
758 LOGV_HEAP("GC finished");
760 gcHeap->gcRunning = false;
762 LOGV_HEAP("Resuming threads");
763 dvmUnlockMutex(&gDvm.heapWorkerListLock);
764 dvmUnlockMutex(&gDvm.heapWorkerLock);
766 if (reason == GC_CONCURRENT) {
768 * Wake-up any threads that blocked after a failed allocation
771 dvmBroadcastCond(&gDvm.gcHeapCond);
774 if (reason != GC_CONCURRENT) {
775 dirtyEnd = dvmGetRelativeTimeMsec();
776 dvmResumeAllThreads(SUSPEND_FOR_GC);
777 if (oldThreadPriority != kInvalidPriority) {
778 if (setpriority(PRIO_PROCESS, 0, oldThreadPriority) != 0) {
779 LOGW_HEAP("Unable to reset priority to %d: %s\n",
780 oldThreadPriority, strerror(errno));
782 LOGD_HEAP("Reset priority to %d\n", oldThreadPriority);
785 if (oldThreadPriority >= ANDROID_PRIORITY_BACKGROUND) {
786 set_sched_policy(dvmGetSysThreadId(), SP_BACKGROUND);
791 percentFree = 100 - (size_t)(100.0f * (float)currAllocated / currFootprint);
792 if (reason != GC_CONCURRENT) {
793 u4 markSweepTime = dirtyEnd - rootStart;
794 bool isSmall = numBytesFreed > 0 && numBytesFreed < 1024;
795 totalTime = rootSuspendTime + markSweepTime;
796 LOGD("%s freed %s%zdK, %d%% free %zdK/%zdK, paused %ums",
799 numBytesFreed ? MAX(numBytesFreed / 1024, 1) : 0,
801 currAllocated / 1024, currFootprint / 1024,
804 u4 rootTime = rootEnd - rootStart;
805 u4 dirtySuspendTime = dirtyStart - dirtySuspend;
806 u4 dirtyTime = dirtyEnd - dirtyStart;
807 bool isSmall = numBytesFreed > 0 && numBytesFreed < 1024;
808 totalTime = rootSuspendTime + rootTime + dirtySuspendTime + dirtyTime;
809 LOGD("%s freed %s%zdK, %d%% free %zdK/%zdK, paused %ums+%ums",
812 numBytesFreed ? MAX(numBytesFreed / 1024, 1) : 0,
814 currAllocated / 1024, currFootprint / 1024,
815 rootTime, dirtyTime);
817 if (gcHeap->ddmHpifWhen != 0) {
818 LOGD_HEAP("Sending VM heap info to DDM\n");
819 dvmDdmSendHeapInfo(gcHeap->ddmHpifWhen, false);
821 if (gcHeap->ddmHpsgWhen != 0) {
822 LOGD_HEAP("Dumping VM heap to DDM\n");
823 dvmDdmSendHeapSegments(false, false);
825 if (gcHeap->ddmNhsgWhen != 0) {
826 LOGD_HEAP("Dumping native heap to DDM\n");
827 dvmDdmSendHeapSegments(false, true);
832 * If the concurrent GC is running, wait for it to finish. The caller
833 * must hold the heap lock.
835 * Note: the second dvmChangeStatus() could stall if we were in RUNNING
836 * on entry, and some other thread has asked us to suspend. In that
837 * case we will be suspended with the heap lock held, which can lead to
838 * deadlock if the other thread tries to do something with the managed heap.
839 * For example, the debugger might suspend us and then execute a method that
840 * allocates memory. We can avoid this situation by releasing the lock
841 * before self-suspending. (The developer can work around this specific
842 * situation by single-stepping the VM. Alternatively, we could disable
843 * concurrent GC when the debugger is attached, but that might change
844 * behavior more than is desirable.)
846 * This should not be a problem in production, because any GC-related
847 * activity will grab the lock before issuing a suspend-all. (We may briefly
848 * suspend when the GC thread calls dvmUnlockHeap before dvmResumeAllThreads,
849 * but there's no risk of deadlock.)
851 void dvmWaitForConcurrentGcToComplete(void)
853 Thread *self = dvmThreadSelf();
854 assert(self != NULL);
855 while (gDvm.gcHeap->gcRunning) {
856 ThreadStatus oldStatus = dvmChangeStatus(self, THREAD_VMWAIT);
857 dvmWaitCond(&gDvm.gcHeapCond, &gDvm.gcHeapLock);
858 dvmChangeStatus(self, oldStatus);