2 * Copyright (C) 2008 The Android Open Source Project
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 #include <cutils/mspace.h>
18 #include <limits.h> // for UINT_MAX
23 #include "alloc/Heap.h"
24 #include "alloc/HeapInternal.h"
25 #include "alloc/HeapSource.h"
26 #include "alloc/HeapBitmap.h"
28 // TODO: find a real header file for these.
29 extern int dlmalloc_trim(size_t);
30 extern void dlmalloc_walk_free_pages(void(*)(void*, void*, void*), void*);
32 static void snapIdealFootprint(void);
33 static void setIdealFootprint(size_t max);
35 #define ALIGN_UP_TO_PAGE_SIZE(p) \
36 (((size_t)(p) + (SYSTEM_PAGE_SIZE - 1)) & ~(SYSTEM_PAGE_SIZE - 1))
37 #define ALIGN_DOWN_TO_PAGE_SIZE(p) \
38 ((size_t)(p) & ~(SYSTEM_PAGE_SIZE - 1))
40 #define HEAP_UTILIZATION_MAX 1024
41 #define DEFAULT_HEAP_UTILIZATION 512 // Range 1..HEAP_UTILIZATION_MAX
42 #define HEAP_IDEAL_FREE (2 * 1024 * 1024)
43 #define HEAP_MIN_FREE (HEAP_IDEAL_FREE / 4)
45 /* Start a concurrent collection when free memory falls under this
48 #define CONCURRENT_START (128 << 10)
50 /* The next GC will not be concurrent when free memory after a GC is
51 * under this many bytes.
53 #define CONCURRENT_MIN_FREE (CONCURRENT_START + (128 << 10))
55 #define HS_BOILERPLATE() \
57 assert(gDvm.gcHeap != NULL); \
58 assert(gDvm.gcHeap->heapSource != NULL); \
59 assert(gHs == gDvm.gcHeap->heapSource); \
62 #define DEBUG_HEAP_SOURCE 0
64 #define HSTRACE(...) LOG(LOG_INFO, LOG_TAG "-hs", __VA_ARGS__)
66 #define HSTRACE(...) /**/
70 =======================================================
71 =======================================================
72 =======================================================
74 How will this be used?
75 allocating/freeing: Heap.c just wants to say "alloc(n)" and get a ptr
76 - if allocating in large doesn't work, try allocating from small
77 Heap.c will use HeapSource.h; HeapSource.c will do the right thing
78 between small and large
79 - some operations should be abstracted; put in a structure
81 How do we manage the size trade-offs?
82 - keep mspace max footprint clamped to actual footprint
83 - if small-alloc returns null, adjust large vs. small ratio
84 - give small all available slack and retry
85 - success or fail, snap back to actual footprint and give rest to large
87 managed as "small actual" + "large actual" + "delta to allowed total footprint"
88 - when allocating from one source or the other, give the delta to the
89 active source, but snap back afterwards
90 - that may not work so great for a gc heap, because small will always consume.
91 - but we need to use the memory, and the current max is the amount we
92 need to fill before a GC.
94 Find a way to permanently steal pages from the middle of the heap
97 Allocate String and char[] in a separate heap?
99 Maybe avoid growing small heap, even if there's slack? Look at
100 live ratio of small heap after a gc; scale it based on that.
102 =======================================================
103 =======================================================
104 =======================================================
108 /* The mspace to allocate from.
112 /* The largest size that this heap is allowed to grow to.
114 size_t absoluteMaxSize;
116 /* Number of bytes allocated from this mspace for objects,
117 * including any overhead. This value is NOT exact, and
118 * should only be used as an input for certain heuristics.
120 size_t bytesAllocated;
122 /* Number of bytes allocated from this mspace at which a
123 * concurrent garbage collection will be started.
125 size_t concurrentStartBytes;
127 /* Number of objects currently allocated from this mspace.
129 size_t objectsAllocated;
132 * The lowest address of this heap, inclusive.
137 * The highest address of this heap, exclusive.
143 /* Target ideal heap utilization ratio; range 1..HEAP_UTILIZATION_MAX
145 size_t targetUtilization;
147 /* Requested minimum heap size, or zero if there is no minimum.
151 /* The starting heap size.
155 /* The largest that the heap source as a whole is allowed to grow.
157 size_t absoluteMaxSize;
159 /* The desired max size of the heap source as a whole.
163 /* The maximum number of bytes allowed to be allocated from the
164 * active heap before a GC is forced. This is used to "shrink" the
165 * heap in lieu of actual compaction.
169 /* The heaps; heaps[0] is always the active heap,
170 * which new objects should be allocated from.
172 Heap heaps[HEAP_SOURCE_MAX_HEAP_COUNT];
174 /* The current number of heaps.
178 /* External allocation count.
180 size_t externalBytesAllocated;
182 /* The maximum number of external bytes that may be allocated.
184 size_t externalLimit;
186 /* True if zygote mode was active when the HeapSource was created.
191 * The base address of the virtual memory reservation.
196 * The length in bytes of the virtual memory reservation.
201 * The live object bitmap.
211 * State for the GC daemon.
215 bool gcThreadShutdown;
216 pthread_mutex_t gcThreadMutex;
217 pthread_cond_t gcThreadCond;
220 #define hs2heap(hs_) (&((hs_)->heaps[0]))
223 * Returns true iff a soft limit is in effect for the active heap.
226 softLimited(const HeapSource *hs)
228 /* softLimit will be either UINT_MAX or the limit for the
229 * active mspace. idealSize can be greater than softLimit
230 * if there is more than one heap. If there is only one
231 * heap, a non-UINT_MAX softLimit should always be the same
234 return hs->softLimit <= hs->idealSize;
238 * Returns approximately the maximum number of bytes allowed to be
239 * allocated from the active heap before a GC is forced.
242 getAllocLimit(const HeapSource *hs)
244 if (softLimited(hs)) {
245 return hs->softLimit;
247 return mspace_max_allowed_footprint(hs2heap(hs)->msp);
252 * Returns the current footprint of all heaps. If includeActive
253 * is false, don't count the heap at index 0.
256 oldHeapOverhead(const HeapSource *hs, bool includeActive)
258 size_t footprint = 0;
266 for (/* i = i */; i < hs->numHeaps; i++) {
267 //TODO: include size of bitmaps? If so, don't use bitsLen, listen to .max
268 footprint += mspace_footprint(hs->heaps[i].msp);
274 * Returns the heap that <ptr> could have come from, or NULL
275 * if it could not have come from any heap.
278 ptr2heap(const HeapSource *hs, const void *ptr)
280 const size_t numHeaps = hs->numHeaps;
283 //TODO: unroll this to HEAP_SOURCE_MAX_HEAP_COUNT
285 for (i = 0; i < numHeaps; i++) {
286 const Heap *const heap = &hs->heaps[i];
288 if ((const char *)ptr >= heap->base && (const char *)ptr < heap->limit) {
297 * Functions to update heapSource->bytesAllocated when an object
298 * is allocated or freed. mspace_usable_size() will give
299 * us a much more accurate picture of heap utilization than
300 * the requested byte sizes would.
302 * These aren't exact, and should not be treated as such.
304 static void countAllocation(Heap *heap, const void *ptr, bool isObj)
308 assert(heap->bytesAllocated < mspace_footprint(heap->msp));
310 heap->bytesAllocated += mspace_usable_size(heap->msp, ptr) +
311 HEAP_SOURCE_CHUNK_OVERHEAD;
313 heap->objectsAllocated++;
314 hs = gDvm.gcHeap->heapSource;
315 dvmHeapBitmapSetObjectBit(&hs->liveBits, ptr);
318 assert(heap->bytesAllocated < mspace_footprint(heap->msp));
321 static void countFree(Heap *heap, const void *ptr, size_t *numBytes)
326 delta = mspace_usable_size(heap->msp, ptr) + HEAP_SOURCE_CHUNK_OVERHEAD;
328 if (delta < heap->bytesAllocated) {
329 heap->bytesAllocated -= delta;
331 heap->bytesAllocated = 0;
333 hs = gDvm.gcHeap->heapSource;
334 dvmHeapBitmapClearObjectBit(&hs->liveBits, ptr);
335 if (heap->objectsAllocated > 0) {
336 heap->objectsAllocated--;
341 static HeapSource *gHs = NULL;
344 createMspace(void *base, size_t startSize, size_t absoluteMaxSize)
348 /* Create an unlocked dlmalloc mspace to use as
349 * a small-object heap source.
351 * We start off reserving heapSizeStart/2 bytes but
352 * letting the heap grow to heapSizeStart. This saves
353 * memory in the case where a process uses even less
354 * than the starting size.
356 LOGV_HEAP("Creating VM heap of size %u\n", startSize);
358 msp = create_contiguous_mspace_with_base(startSize/2,
359 absoluteMaxSize, /*locked=*/false, base);
361 /* Don't let the heap grow past the starting size without
364 mspace_set_max_allowed_footprint(msp, startSize);
366 /* There's no guarantee that errno has meaning when the call
367 * fails, but it often does.
369 LOGE_HEAP("Can't create VM heap of size (%u,%u): %s\n",
370 startSize/2, absoluteMaxSize, strerror(errno));
377 addNewHeap(HeapSource *hs, mspace msp, size_t mspAbsoluteMaxSize)
382 if (hs->numHeaps >= HEAP_SOURCE_MAX_HEAP_COUNT) {
383 LOGE("Attempt to create too many heaps (%zd >= %zd)\n",
384 hs->numHeaps, HEAP_SOURCE_MAX_HEAP_COUNT);
389 memset(&heap, 0, sizeof(heap));
393 heap.absoluteMaxSize = mspAbsoluteMaxSize;
394 heap.concurrentStartBytes = UINT_MAX;
395 heap.base = hs->heapBase;
396 heap.limit = hs->heapBase + heap.absoluteMaxSize;
400 overhead = ALIGN_UP_TO_PAGE_SIZE(oldHeapOverhead(hs, true));
401 if (overhead + HEAP_MIN_FREE >= hs->absoluteMaxSize) {
402 LOGE_HEAP("No room to create any more heaps "
403 "(%zd overhead, %zd max)\n",
404 overhead, hs->absoluteMaxSize);
407 hs->heaps[0].absoluteMaxSize = overhead;
408 heap.absoluteMaxSize = hs->absoluteMaxSize - overhead;
409 base = contiguous_mspace_sbrk0(hs->heaps[0].msp);
410 hs->heaps[0].limit = base;
411 base = (void *)ALIGN_UP_TO_PAGE_SIZE(base);
412 heap.msp = createMspace(base, HEAP_MIN_FREE, heap.absoluteMaxSize);
413 heap.concurrentStartBytes = HEAP_MIN_FREE - CONCURRENT_START;
415 heap.limit = heap.base + heap.absoluteMaxSize;
416 if (heap.msp == NULL) {
421 /* Don't let the soon-to-be-old heap grow any further.
423 if (hs->numHeaps > 0) {
424 mspace msp = hs->heaps[0].msp;
425 mspace_set_max_allowed_footprint(msp, mspace_footprint(msp));
428 /* Put the new heap in the list, at heaps[0].
429 * Shift existing heaps down.
431 memmove(&hs->heaps[1], &hs->heaps[0], hs->numHeaps * sizeof(hs->heaps[0]));
439 * The garbage collection daemon. Initiates a concurrent collection
442 static void *gcDaemonThread(void* arg)
444 dvmChangeStatus(NULL, THREAD_VMWAIT);
445 dvmLockMutex(&gHs->gcThreadMutex);
446 while (gHs->gcThreadShutdown != true) {
447 dvmWaitCond(&gHs->gcThreadCond, &gHs->gcThreadMutex);
449 dvmChangeStatus(NULL, THREAD_RUNNING);
450 dvmCollectGarbageInternal(false, GC_CONCURRENT);
451 dvmChangeStatus(NULL, THREAD_VMWAIT);
454 dvmChangeStatus(NULL, THREAD_RUNNING);
458 static bool gcDaemonStartup(void)
460 dvmInitMutex(&gHs->gcThreadMutex);
461 pthread_cond_init(&gHs->gcThreadCond, NULL);
462 gHs->gcThreadShutdown = false;
463 gHs->hasGcThread = dvmCreateInternalThread(&gHs->gcThread, "GC",
464 gcDaemonThread, NULL);
465 return gHs->hasGcThread;
468 static void gcDaemonShutdown(void)
470 if (gHs->hasGcThread) {
471 dvmLockMutex(&gHs->gcThreadMutex);
472 gHs->gcThreadShutdown = true;
473 dvmSignalCond(&gHs->gcThreadCond);
474 dvmUnlockMutex(&gHs->gcThreadMutex);
475 pthread_join(gHs->gcThread, NULL);
480 * Initializes the heap source; must be called before any other
481 * dvmHeapSource*() functions. Returns a GcHeap structure
482 * allocated from the heap source.
485 dvmHeapSourceStartup(size_t startSize, size_t absoluteMaxSize)
495 if (startSize > absoluteMaxSize) {
496 LOGE("Bad heap parameters (start=%d, max=%d)\n",
497 startSize, absoluteMaxSize);
502 * Allocate a contiguous region of virtual memory to subdivided
503 * among the heaps managed by the garbage collector.
505 length = ALIGN_UP_TO_PAGE_SIZE(absoluteMaxSize);
506 base = dvmAllocRegion(length, PROT_NONE, "dalvik-heap");
511 /* Create an unlocked dlmalloc mspace to use as
512 * the small object heap source.
514 msp = createMspace(base, startSize, absoluteMaxSize);
519 /* Allocate a descriptor from the heap we just created.
521 gcHeap = mspace_malloc(msp, sizeof(*gcHeap));
522 if (gcHeap == NULL) {
523 LOGE_HEAP("Can't allocate heap descriptor\n");
526 memset(gcHeap, 0, sizeof(*gcHeap));
528 hs = mspace_malloc(msp, sizeof(*hs));
530 LOGE_HEAP("Can't allocate heap source\n");
533 memset(hs, 0, sizeof(*hs));
535 hs->targetUtilization = DEFAULT_HEAP_UTILIZATION;
537 hs->startSize = startSize;
538 hs->absoluteMaxSize = absoluteMaxSize;
539 hs->idealSize = startSize;
540 hs->softLimit = UINT_MAX; // no soft limit at first
542 hs->sawZygote = gDvm.zygote;
543 hs->hasGcThread = false;
545 hs->heapLength = length;
546 if (!addNewHeap(hs, msp, absoluteMaxSize)) {
547 LOGE_HEAP("Can't add initial heap\n");
550 if (!dvmHeapBitmapInit(&hs->liveBits, base, length, "dalvik-bitmap-1")) {
551 LOGE_HEAP("Can't create liveBits\n");
554 if (!dvmHeapBitmapInit(&hs->markBits, base, length, "dalvik-bitmap-2")) {
555 LOGE_HEAP("Can't create markBits\n");
556 dvmHeapBitmapDelete(&hs->liveBits);
560 gcHeap->markContext.bitmap = &hs->markBits;
561 gcHeap->heapSource = hs;
563 countAllocation(hs2heap(hs), gcHeap, false);
564 countAllocation(hs2heap(hs), hs, false);
570 munmap(base, length);
574 bool dvmHeapSourceStartupAfterZygote(void)
576 return gDvm.concurrentMarkSweep ? gcDaemonStartup() : true;
580 * This is called while in zygote mode, right before we fork() for the
581 * first time. We create a heap for all future zygote process allocations,
582 * in an attempt to avoid touching pages in the zygote heap. (This would
583 * probably be unnecessary if we had a compacting GC -- the source of our
584 * troubles is small allocations filling in the gaps from larger ones.)
587 dvmHeapSourceStartupBeforeFork()
589 HeapSource *hs = gHs; // use a local to avoid the implicit "volatile"
595 if (!gDvm.newZygoteHeapAllocated) {
596 /* Create a new heap for post-fork zygote allocations. We only
597 * try once, even if it fails.
599 LOGV("Splitting out new zygote heap\n");
600 gDvm.newZygoteHeapAllocated = true;
601 return addNewHeap(hs, NULL, 0);
606 void dvmHeapSourceThreadShutdown(void)
608 if (gDvm.gcHeap != NULL && gDvm.concurrentMarkSweep) {
614 * Tears down the entire GcHeap structure and all of the substructures
615 * attached to it. This call has the side effect of setting the given
616 * gcHeap pointer and gHs to NULL.
619 dvmHeapSourceShutdown(GcHeap **gcHeap)
621 if (*gcHeap != NULL && (*gcHeap)->heapSource != NULL) {
624 hs = (*gcHeap)->heapSource;
626 assert((char *)*gcHeap >= hs->heapBase);
627 assert((char *)*gcHeap < hs->heapBase + hs->heapLength);
629 dvmHeapBitmapDelete(&hs->liveBits);
630 dvmHeapBitmapDelete(&hs->markBits);
632 munmap(hs->heapBase, hs->heapLength);
639 * Gets the begining of the allocation for the HeapSource.
641 void *dvmHeapSourceGetBase(void)
643 return gHs->heapBase;
647 * Returns the requested value. If the per-heap stats are requested, fill
650 * Caller must hold the heap lock.
653 dvmHeapSourceGetValue(enum HeapSourceValueSpec spec, size_t perHeapStats[],
656 HeapSource *hs = gHs;
664 case HS_EXTERNAL_BYTES_ALLOCATED:
665 return hs->externalBytesAllocated;
666 case HS_EXTERNAL_LIMIT:
667 return hs->externalLimit;
669 // look at all heaps.
673 assert(arrayLen >= hs->numHeaps || perHeapStats == NULL);
674 for (i = 0; i < hs->numHeaps; i++) {
675 Heap *const heap = &hs->heaps[i];
679 value = mspace_footprint(heap->msp);
681 case HS_ALLOWED_FOOTPRINT:
682 value = mspace_max_allowed_footprint(heap->msp);
684 case HS_BYTES_ALLOCATED:
685 value = heap->bytesAllocated;
687 case HS_OBJECTS_ALLOCATED:
688 value = heap->objectsAllocated;
695 perHeapStats[i] = value;
702 static void aliasBitmap(HeapBitmap *dst, HeapBitmap *src,
703 uintptr_t base, uintptr_t max) {
708 dst->bitsLen = HB_OFFSET_TO_BYTE_INDEX(max - base) + sizeof(dst->bits);
709 /* The exclusive limit from bitsLen is greater than the inclusive max. */
710 assert(base + HB_MAX_OFFSET(dst) > max);
711 /* The exclusive limit is at most one word of bits beyond max. */
712 assert((base + HB_MAX_OFFSET(dst)) - max <=
713 HB_OBJECT_ALIGNMENT * HB_BITS_PER_WORD);
714 dst->allocLen = dst->bitsLen;
715 offset = base - src->base;
716 assert(HB_OFFSET_TO_MASK(offset) == 1 << 31);
717 dst->bits = &src->bits[HB_OFFSET_TO_INDEX(offset)];
721 * Initializes a vector of object and mark bits to the object and mark
722 * bits of each heap. The bits are aliased to the heapsource
723 * object and mark bitmaps. This routine is used by the sweep code
724 * which needs to free each object in the correct heap.
726 void dvmHeapSourceGetObjectBitmaps(HeapBitmap liveBits[], HeapBitmap markBits[],
729 HeapSource *hs = gHs;
735 assert(numHeaps == hs->numHeaps);
736 for (i = 0; i < hs->numHeaps; ++i) {
737 base = (uintptr_t)hs->heaps[i].base;
738 /* -1 because limit is exclusive but max is inclusive. */
739 max = MIN((uintptr_t)hs->heaps[i].limit - 1, hs->markBits.max);
740 aliasBitmap(&liveBits[i], &hs->liveBits, base, max);
741 aliasBitmap(&markBits[i], &hs->markBits, base, max);
746 * Get the bitmap representing all live objects.
748 HeapBitmap *dvmHeapSourceGetLiveBits(void)
752 return &gHs->liveBits;
755 void dvmHeapSourceSwapBitmaps(void)
760 gHs->liveBits = gHs->markBits;
764 void dvmHeapSourceZeroMarkBitmap(void)
768 dvmHeapBitmapZero(&gHs->markBits);
771 void dvmMarkImmuneObjects(const char *immuneLimit)
774 size_t i, index, length;
777 * Copy the contents of the live bit vector for immune object
778 * range into the mark bit vector.
780 /* The only values generated by dvmHeapSourceGetImmuneLimit() */
781 assert(immuneLimit == gHs->heaps[0].base ||
782 immuneLimit == NULL);
783 assert(gHs->liveBits.base == gHs->markBits.base);
784 assert(gHs->liveBits.bitsLen == gHs->markBits.bitsLen);
785 /* heap[0] is never immune */
786 assert(gHs->heaps[0].base >= immuneLimit);
787 assert(gHs->heaps[0].limit > immuneLimit);
789 for (i = 1; i < gHs->numHeaps; ++i) {
790 if (gHs->heaps[i].base < immuneLimit) {
791 assert(gHs->heaps[i].limit <= immuneLimit);
792 /* Compute the number of words to copy in the bitmap. */
793 index = HB_OFFSET_TO_INDEX(
794 (uintptr_t)gHs->heaps[i].base - gHs->liveBits.base);
795 /* Compute the starting offset in the live and mark bits. */
796 src = (char *)(gHs->liveBits.bits + index);
797 dst = (char *)(gHs->markBits.bits + index);
798 /* Compute the number of bytes of the live bitmap to copy. */
799 length = HB_OFFSET_TO_BYTE_INDEX(
800 gHs->heaps[i].limit - gHs->heaps[i].base);
802 memcpy(dst, src, length);
803 /* Make sure max points to the address of the highest set bit. */
804 if (gHs->markBits.max < (uintptr_t)gHs->heaps[i].limit) {
805 gHs->markBits.max = (uintptr_t)gHs->heaps[i].limit;
812 * Allocates <n> bytes of zeroed data.
815 dvmHeapSourceAlloc(size_t n)
817 HeapSource *hs = gHs;
823 if (heap->bytesAllocated + n > hs->softLimit) {
825 * This allocation would push us over the soft limit; act as
826 * if the heap is full.
828 LOGV_HEAP("softLimit of %zd.%03zdMB hit for %zd-byte allocation\n",
829 FRACTIONAL_MB(hs->softLimit), n);
832 ptr = mspace_calloc(heap->msp, 1, n);
836 countAllocation(heap, ptr, true);
838 * Check to see if a concurrent GC should be initiated.
840 if (gDvm.gcHeap->gcRunning || !hs->hasGcThread) {
842 * The garbage collector thread is already running or has yet
843 * to be started. Do nothing.
847 if (heap->bytesAllocated > heap->concurrentStartBytes) {
849 * We have exceeded the allocation threshold. Wake up the
852 dvmSignalCond(&gHs->gcThreadCond);
857 /* Remove any hard limits, try to allocate, and shrink back down.
858 * Last resort when trying to allocate an object.
861 heapAllocAndGrow(HeapSource *hs, Heap *heap, size_t n)
866 /* Grow as much as possible, but don't let the real footprint
867 * plus external allocations go over the absolute max.
869 max = heap->absoluteMaxSize;
870 if (max > hs->externalBytesAllocated) {
871 max -= hs->externalBytesAllocated;
873 mspace_set_max_allowed_footprint(heap->msp, max);
874 ptr = dvmHeapSourceAlloc(n);
876 /* Shrink back down as small as possible. Our caller may
877 * readjust max_allowed to a more appropriate value.
879 mspace_set_max_allowed_footprint(heap->msp,
880 mspace_footprint(heap->msp));
889 * Allocates <n> bytes of zeroed data, growing as much as possible
893 dvmHeapSourceAllocAndGrow(size_t n)
895 HeapSource *hs = gHs;
903 ptr = dvmHeapSourceAlloc(n);
908 oldIdealSize = hs->idealSize;
909 if (softLimited(hs)) {
910 /* We're soft-limited. Try removing the soft limit to
911 * see if we can allocate without actually growing.
913 hs->softLimit = UINT_MAX;
914 ptr = dvmHeapSourceAlloc(n);
916 /* Removing the soft limit worked; fix things up to
917 * reflect the new effective ideal size.
919 snapIdealFootprint();
922 // softLimit intentionally left at UINT_MAX.
925 /* We're not soft-limited. Grow the heap to satisfy the request.
926 * If this call fails, no footprints will have changed.
928 ptr = heapAllocAndGrow(hs, heap, n);
930 /* The allocation succeeded. Fix up the ideal size to
931 * reflect any footprint modifications that had to happen.
933 snapIdealFootprint();
935 /* We just couldn't do it. Restore the original ideal size,
936 * fixing up softLimit if necessary.
938 setIdealFootprint(oldIdealSize);
944 * Frees the first numPtrs objects in the ptrs list and returns the
945 * amount of reclaimed storage. The list must contain addresses all in
946 * the same mspace, and must be in increasing order. This implies that
947 * there are no duplicates, and no entries are NULL.
949 size_t dvmHeapSourceFreeList(size_t numPtrs, void **ptrs)
960 assert(ptrs != NULL);
961 assert(*ptrs != NULL);
962 heap = ptr2heap(gHs, *ptrs);
965 mspace *msp = heap->msp;
966 // Calling mspace_free on shared heaps disrupts sharing too
967 // much. For heap[0] -- the 'active heap' -- we call
968 // mspace_free, but on the other heaps we only do some
970 if (heap == gHs->heaps) {
971 // mspace_merge_objects takes two allocated objects, and
972 // if the second immediately follows the first, will merge
973 // them, returning a larger object occupying the same
974 // memory. This is a local operation, and doesn't require
975 // dlmalloc to manipulate any freelists. It's pretty
976 // inexpensive compared to free().
978 // ptrs is an array of objects all in memory order, and if
979 // client code has been allocating lots of short-lived
980 // objects, this is likely to contain runs of objects all
981 // now garbage, and thus highly amenable to this optimization.
983 // Unroll the 0th iteration around the loop below,
984 // countFree ptrs[0] and initializing merged.
985 assert(ptrs[0] != NULL);
986 assert(ptr2heap(gHs, ptrs[0]) == heap);
987 countFree(heap, ptrs[0], &numBytes);
988 void *merged = ptrs[0];
991 for (i = 1; i < numPtrs; i++) {
992 assert(merged != NULL);
993 assert(ptrs[i] != NULL);
994 assert((intptr_t)merged < (intptr_t)ptrs[i]);
995 assert(ptr2heap(gHs, ptrs[i]) == heap);
996 countFree(heap, ptrs[i], &numBytes);
997 // Try to merge. If it works, merged now includes the
998 // memory of ptrs[i]. If it doesn't, free merged, and
999 // see if ptrs[i] starts a new run of adjacent
1000 // objects to merge.
1001 if (mspace_merge_objects(msp, merged, ptrs[i]) == NULL) {
1002 mspace_free(msp, merged);
1006 assert(merged != NULL);
1007 mspace_free(msp, merged);
1009 // This is not an 'active heap'. Only do the accounting.
1011 for (i = 0; i < numPtrs; i++) {
1012 assert(ptrs[i] != NULL);
1013 assert(ptr2heap(gHs, ptrs[i]) == heap);
1014 countFree(heap, ptrs[i], &numBytes);
1022 * Returns true iff <ptr> is in the heap source.
1025 dvmHeapSourceContainsAddress(const void *ptr)
1029 return (dvmHeapBitmapCoversAddress(&gHs->liveBits, ptr));
1033 * Returns true iff <ptr> was allocated from the heap source.
1036 dvmHeapSourceContains(const void *ptr)
1040 if (dvmHeapSourceContainsAddress(ptr)) {
1041 return dvmHeapBitmapIsObjectBitSet(&gHs->liveBits, ptr) != 0;
1047 * Returns the value of the requested flag.
1050 dvmHeapSourceGetPtrFlag(const void *ptr, enum HeapSourcePtrFlag flag)
1056 if (flag == HS_CONTAINS) {
1057 return dvmHeapSourceContains(ptr);
1058 } else if (flag == HS_ALLOCATED_IN_ZYGOTE) {
1059 HeapSource *hs = gHs;
1063 if (hs->sawZygote) {
1066 heap = ptr2heap(hs, ptr);
1068 /* If the object is not in the active heap, we assume that
1069 * it was allocated as part of zygote.
1071 return heap != hs->heaps;
1074 /* The pointer is outside of any known heap, or we are not
1075 * running in zygote mode.
1084 * Returns the number of usable bytes in an allocated chunk; the size
1085 * may be larger than the size passed to dvmHeapSourceAlloc().
1088 dvmHeapSourceChunkSize(const void *ptr)
1094 heap = ptr2heap(gHs, ptr);
1096 return mspace_usable_size(heap->msp, ptr);
1102 * Returns the number of bytes that the heap source has allocated
1103 * from the system using sbrk/mmap, etc.
1105 * Caller must hold the heap lock.
1108 dvmHeapSourceFootprint()
1112 //TODO: include size of bitmaps?
1113 return oldHeapOverhead(gHs, true);
1117 * Return the real bytes used by old heaps and external memory
1118 * plus the soft usage of the current heap. When a soft limit
1119 * is in effect, this is effectively what it's compared against
1120 * (though, in practice, it only looks at the current heap).
1123 getSoftFootprint(bool includeActive)
1125 HeapSource *hs = gHs;
1130 ret = oldHeapOverhead(hs, false) + hs->externalBytesAllocated;
1131 if (includeActive) {
1132 ret += hs->heaps[0].bytesAllocated;
1139 * Gets the maximum number of bytes that the heap source is allowed
1140 * to allocate from the system.
1143 dvmHeapSourceGetIdealFootprint()
1145 HeapSource *hs = gHs;
1149 return hs->idealSize;
1153 * Sets the soft limit, handling any necessary changes to the allowed
1154 * footprint of the active heap.
1157 setSoftLimit(HeapSource *hs, size_t softLimit)
1159 /* Compare against the actual footprint, rather than the
1160 * max_allowed, because the heap may not have grown all the
1161 * way to the allowed size yet.
1163 mspace msp = hs->heaps[0].msp;
1164 size_t currentHeapSize = mspace_footprint(msp);
1165 if (softLimit < currentHeapSize) {
1166 /* Don't let the heap grow any more, and impose a soft limit.
1168 mspace_set_max_allowed_footprint(msp, currentHeapSize);
1169 hs->softLimit = softLimit;
1171 /* Let the heap grow to the requested max, and remove any
1172 * soft limit, if set.
1174 mspace_set_max_allowed_footprint(msp, softLimit);
1175 hs->softLimit = UINT_MAX;
1180 * Sets the maximum number of bytes that the heap source is allowed
1181 * to allocate from the system. Clamps to the appropriate maximum
1185 setIdealFootprint(size_t max)
1187 HeapSource *hs = gHs;
1188 #if DEBUG_HEAP_SOURCE
1189 HeapSource oldHs = *hs;
1190 mspace msp = hs->heaps[0].msp;
1191 size_t oldAllowedFootprint =
1192 mspace_max_allowed_footprint(msp);
1197 if (max > hs->absoluteMaxSize) {
1198 LOGI_HEAP("Clamp target GC heap from %zd.%03zdMB to %u.%03uMB\n",
1200 FRACTIONAL_MB(hs->absoluteMaxSize));
1201 max = hs->absoluteMaxSize;
1202 } else if (max < hs->minimumSize) {
1203 max = hs->minimumSize;
1206 /* Convert max into a size that applies to the active heap.
1207 * Old heaps and external allocations will count against the ideal size.
1209 size_t overhead = getSoftFootprint(false);
1211 if (overhead < max) {
1212 activeMax = max - overhead;
1217 setSoftLimit(hs, activeMax);
1218 hs->idealSize = max;
1220 HSTRACE("IDEAL %zd->%zd (%d), soft %zd->%zd (%d), allowed %zd->%zd (%d), "
1222 oldHs.idealSize, hs->idealSize, hs->idealSize - oldHs.idealSize,
1223 oldHs.softLimit, hs->softLimit, hs->softLimit - oldHs.softLimit,
1224 oldAllowedFootprint, mspace_max_allowed_footprint(msp),
1225 mspace_max_allowed_footprint(msp) - oldAllowedFootprint,
1226 hs->externalBytesAllocated);
1231 * Make the ideal footprint equal to the current footprint.
1234 snapIdealFootprint()
1238 setIdealFootprint(getSoftFootprint(true));
1242 * Gets the current ideal heap utilization, represented as a number
1243 * between zero and one.
1245 float dvmGetTargetHeapUtilization()
1247 HeapSource *hs = gHs;
1251 return (float)hs->targetUtilization / (float)HEAP_UTILIZATION_MAX;
1255 * Sets the new ideal heap utilization, represented as a number
1256 * between zero and one.
1258 void dvmSetTargetHeapUtilization(float newTarget)
1260 HeapSource *hs = gHs;
1264 /* Clamp it to a reasonable range.
1266 // TODO: This may need some tuning.
1267 if (newTarget < 0.2) {
1269 } else if (newTarget > 0.8) {
1273 hs->targetUtilization =
1274 (size_t)(newTarget * (float)HEAP_UTILIZATION_MAX);
1275 LOGV("Set heap target utilization to %zd/%d (%f)\n",
1276 hs->targetUtilization, HEAP_UTILIZATION_MAX, newTarget);
1280 * If set is true, sets the new minimum heap size to size; always
1281 * returns the current (or previous) size. If size is negative,
1282 * removes the current minimum constraint (if present).
1285 dvmMinimumHeapSize(size_t size, bool set)
1287 HeapSource *hs = gHs;
1288 size_t oldMinimumSize;
1290 /* gHs caches an entry in gDvm.gcHeap; we need to hold the
1291 * heap lock if we're going to look at it. We also need the
1292 * lock for the call to setIdealFootprint().
1298 oldMinimumSize = hs->minimumSize;
1301 /* Don't worry about external allocations right now.
1302 * setIdealFootprint() will take them into account when
1303 * minimumSize is used, and it's better to hold onto the
1304 * intended minimumSize than to clamp it arbitrarily based
1305 * on the current allocations.
1307 if (size > hs->absoluteMaxSize) {
1308 size = hs->absoluteMaxSize;
1310 hs->minimumSize = size;
1311 if (size > hs->idealSize) {
1312 /* Force a snap to the minimum value, which we just set
1313 * and which setIdealFootprint() will take into consideration.
1315 setIdealFootprint(hs->idealSize);
1317 /* Otherwise we'll just keep it in mind the next time
1318 * setIdealFootprint() is called.
1324 return oldMinimumSize;
1328 * Given the size of a live set, returns the ideal heap size given
1329 * the current target utilization and MIN/MAX values.
1331 * targetUtilization is in the range 1..HEAP_UTILIZATION_MAX.
1334 getUtilizationTarget(size_t liveSize, size_t targetUtilization)
1338 /* Use the current target utilization ratio to determine the
1339 * ideal heap size based on the size of the live set.
1341 targetSize = (liveSize / targetUtilization) * HEAP_UTILIZATION_MAX;
1343 /* Cap the amount of free space, though, so we don't end up
1344 * with, e.g., 8MB of free space when the live set size hits 8MB.
1346 if (targetSize > liveSize + HEAP_IDEAL_FREE) {
1347 targetSize = liveSize + HEAP_IDEAL_FREE;
1348 } else if (targetSize < liveSize + HEAP_MIN_FREE) {
1349 targetSize = liveSize + HEAP_MIN_FREE;
1355 * Given the current contents of the active heap, increase the allowed
1356 * heap footprint to match the target utilization ratio. This
1357 * should only be called immediately after a full mark/sweep.
1359 void dvmHeapSourceGrowForUtilization()
1361 HeapSource *hs = gHs;
1363 size_t targetHeapSize;
1364 size_t currentHeapUsed;
1365 size_t oldIdealSize;
1373 /* Use the current target utilization ratio to determine the
1374 * ideal heap size based on the size of the live set.
1375 * Note that only the active heap plays any part in this.
1377 * Avoid letting the old heaps influence the target free size,
1378 * because they may be full of objects that aren't actually
1379 * in the working set. Just look at the allocated size of
1382 currentHeapUsed = heap->bytesAllocated;
1383 #define LET_EXTERNAL_INFLUENCE_UTILIZATION 1
1384 #if LET_EXTERNAL_INFLUENCE_UTILIZATION
1385 /* This is a hack to deal with the side-effects of moving
1386 * bitmap data out of the Dalvik heap. Since the amount
1387 * of free space after a GC scales with the size of the
1388 * live set, many apps expected the large free space that
1389 * appeared along with megabytes' worth of bitmaps. When
1390 * the bitmaps were removed, the free size shrank significantly,
1391 * and apps started GCing constantly. This makes it so the
1392 * post-GC free space is the same size it would have been
1393 * if the bitmaps were still in the Dalvik heap.
1395 currentHeapUsed += hs->externalBytesAllocated;
1398 getUtilizationTarget(currentHeapUsed, hs->targetUtilization);
1399 #if LET_EXTERNAL_INFLUENCE_UTILIZATION
1400 currentHeapUsed -= hs->externalBytesAllocated;
1401 targetHeapSize -= hs->externalBytesAllocated;
1404 /* The ideal size includes the old heaps; add overhead so that
1405 * it can be immediately subtracted again in setIdealFootprint().
1406 * If the target heap size would exceed the max, setIdealFootprint()
1407 * will clamp it to a legal value.
1409 overhead = getSoftFootprint(false);
1410 oldIdealSize = hs->idealSize;
1411 setIdealFootprint(targetHeapSize + overhead);
1413 freeBytes = getAllocLimit(hs);
1414 if (freeBytes < CONCURRENT_MIN_FREE) {
1415 /* Not enough free memory to allow a concurrent GC. */
1416 heap->concurrentStartBytes = UINT_MAX;
1418 heap->concurrentStartBytes = freeBytes - CONCURRENT_START;
1420 newHeapMax = mspace_max_allowed_footprint(heap->msp);
1421 if (softLimited(hs)) {
1422 LOGD_HEAP("GC old usage %zd.%zd%%; now "
1423 "%zd.%03zdMB used / %zd.%03zdMB soft max "
1424 "(%zd.%03zdMB over, "
1426 "%zd.%03zdMB real max)\n",
1427 FRACTIONAL_PCT(currentHeapUsed, oldIdealSize),
1428 FRACTIONAL_MB(currentHeapUsed),
1429 FRACTIONAL_MB(hs->softLimit),
1430 FRACTIONAL_MB(overhead),
1431 FRACTIONAL_MB(hs->externalBytesAllocated),
1432 FRACTIONAL_MB(newHeapMax));
1434 LOGD_HEAP("GC old usage %zd.%zd%%; now "
1435 "%zd.%03zdMB used / %zd.%03zdMB real max "
1436 "(%zd.%03zdMB over, "
1437 "%zd.%03zdMB ext)\n",
1438 FRACTIONAL_PCT(currentHeapUsed, oldIdealSize),
1439 FRACTIONAL_MB(currentHeapUsed),
1440 FRACTIONAL_MB(newHeapMax),
1441 FRACTIONAL_MB(overhead),
1442 FRACTIONAL_MB(hs->externalBytesAllocated));
1447 * Return free pages to the system.
1448 * TODO: move this somewhere else, especially the native heap part.
1451 static void releasePagesInRange(void *start, void *end, void *nbytes)
1453 /* Linux requires that the madvise() start address is page-aligned.
1454 * We also align the end address.
1456 start = (void *)ALIGN_UP_TO_PAGE_SIZE(start);
1457 end = (void *)((size_t)end & ~(SYSTEM_PAGE_SIZE - 1));
1459 size_t length = (char *)end - (char *)start;
1460 madvise(start, length, MADV_DONTNEED);
1461 *(size_t *)nbytes += length;
1466 * Return unused memory to the system if possible.
1469 dvmHeapSourceTrim(size_t bytesTrimmed[], size_t arrayLen)
1471 HeapSource *hs = gHs;
1472 size_t nativeBytes, heapBytes;
1477 assert(arrayLen >= hs->numHeaps);
1480 for (i = 0; i < hs->numHeaps; i++) {
1481 Heap *heap = &hs->heaps[i];
1483 /* Return the wilderness chunk to the system.
1485 mspace_trim(heap->msp, 0);
1487 /* Return any whole free pages to the system.
1489 bytesTrimmed[i] = 0;
1490 mspace_walk_free_pages(heap->msp, releasePagesInRange,
1492 heapBytes += bytesTrimmed[i];
1495 /* Same for the native heap.
1499 dlmalloc_walk_free_pages(releasePagesInRange, &nativeBytes);
1501 LOGD_HEAP("madvised %zd (GC) + %zd (native) = %zd total bytes\n",
1502 heapBytes, nativeBytes, heapBytes + nativeBytes);
1506 * Walks over the heap source and passes every allocated and
1507 * free chunk to the callback.
1510 dvmHeapSourceWalk(void(*callback)(const void *chunkptr, size_t chunklen,
1511 const void *userptr, size_t userlen,
1515 HeapSource *hs = gHs;
1520 /* Walk the heaps from oldest to newest.
1522 //TODO: do this in address order
1523 for (i = hs->numHeaps; i > 0; --i) {
1524 mspace_walk_heap(hs->heaps[i-1].msp, callback, arg);
1529 * Gets the number of heaps available in the heap source.
1531 * Caller must hold the heap lock, because gHs caches a field
1535 dvmHeapSourceGetNumHeaps()
1537 HeapSource *hs = gHs;
1541 return hs->numHeaps;
1546 * External allocation tracking
1548 * In some situations, memory outside of the heap is tied to the
1549 * lifetime of objects in the heap. Since that memory is kept alive
1550 * by heap objects, it should provide memory pressure that can influence
1556 externalAllocPossible(const HeapSource *hs, size_t n)
1559 size_t currentHeapSize;
1561 /* Make sure that this allocation is even possible.
1562 * Don't let the external size plus the actual heap size
1563 * go over the absolute max. This essentially treats
1564 * external allocations as part of the active heap.
1566 * Note that this will fail "mysteriously" if there's
1567 * a small softLimit but a large heap footprint.
1570 currentHeapSize = mspace_max_allowed_footprint(heap->msp);
1571 if (currentHeapSize + hs->externalBytesAllocated + n <=
1572 heap->absoluteMaxSize)
1576 HSTRACE("externalAllocPossible(): "
1577 "footprint %zu + extAlloc %zu + n %zu >= max %zu (space for %zu)\n",
1578 currentHeapSize, hs->externalBytesAllocated, n,
1579 heap->absoluteMaxSize,
1580 heap->absoluteMaxSize -
1581 (currentHeapSize + hs->externalBytesAllocated));
1585 #define EXTERNAL_TARGET_UTILIZATION 820 // 80%
1588 * Tries to update the internal count of externally-allocated memory.
1589 * If there's enough room for that memory, returns true. If not, returns
1590 * false and does not update the count.
1592 * The caller must ensure externalAllocPossible(hs, n) == true.
1595 externalAlloc(HeapSource *hs, size_t n, bool grow)
1597 assert(hs->externalLimit >= hs->externalBytesAllocated);
1599 HSTRACE("externalAlloc(%zd%s)\n", n, grow ? ", grow" : "");
1600 assert(externalAllocPossible(hs, n)); // The caller must ensure this.
1602 /* External allocations have their own "free space" that they
1603 * can allocate from without causing a GC.
1605 if (hs->externalBytesAllocated + n <= hs->externalLimit) {
1606 hs->externalBytesAllocated += n;
1607 #if PROFILE_EXTERNAL_ALLOCATIONS
1608 if (gDvm.allocProf.enabled) {
1609 Thread* self = dvmThreadSelf();
1610 gDvm.allocProf.externalAllocCount++;
1611 gDvm.allocProf.externalAllocSize += n;
1613 self->allocProf.externalAllocCount++;
1614 self->allocProf.externalAllocSize += n;
1625 hs->externalBytesAllocated += n;
1626 hs->externalLimit = getUtilizationTarget(
1627 hs->externalBytesAllocated, EXTERNAL_TARGET_UTILIZATION);
1628 HSTRACE("EXTERNAL grow limit to %zd\n", hs->externalLimit);
1633 gcForExternalAlloc(bool collectSoftReferences)
1635 if (gDvm.allocProf.enabled) {
1636 Thread* self = dvmThreadSelf();
1637 gDvm.allocProf.gcCount++;
1639 self->allocProf.gcCount++;
1642 dvmCollectGarbageInternal(collectSoftReferences, GC_EXTERNAL_ALLOC);
1646 * Updates the internal count of externally-allocated memory. If there's
1647 * enough room for that memory, returns true. If not, returns false and
1648 * does not update the count.
1650 * May cause a GC as a side-effect.
1653 dvmTrackExternalAllocation(size_t n)
1655 HeapSource *hs = gHs;
1658 /* gHs caches an entry in gDvm.gcHeap; we need to hold the
1659 * heap lock if we're going to look at it.
1664 assert(hs->externalLimit >= hs->externalBytesAllocated);
1666 if (!externalAllocPossible(hs, n)) {
1667 LOGE_HEAP("%zd-byte external allocation "
1668 "too large for this process.\n", n);
1672 /* Try "allocating" using the existing "free space".
1674 HSTRACE("EXTERNAL alloc %zu (%zu < %zu)\n",
1675 n, hs->externalBytesAllocated, hs->externalLimit);
1676 if (externalAlloc(hs, n, false)) {
1681 if (gDvm.gcHeap->gcRunning) {
1683 * The GC is concurrently tracing the heap. Release the heap
1684 * lock, wait for the GC to complete, and try again.
1686 dvmWaitForConcurrentGcToComplete();
1687 if (externalAlloc(hs, n, false)) {
1693 /* The "allocation" failed. Free up some space by doing
1694 * a full garbage collection. This may grow the heap source
1695 * if the live set is sufficiently large.
1697 HSTRACE("EXTERNAL alloc %zd: GC 1\n", n);
1698 gcForExternalAlloc(false); // don't collect SoftReferences
1699 if (externalAlloc(hs, n, false)) {
1704 /* Even that didn't work; this is an exceptional state.
1705 * Try harder, growing the heap source if necessary.
1707 HSTRACE("EXTERNAL alloc %zd: frag\n", n);
1708 ret = externalAlloc(hs, n, true);
1713 /* We couldn't even grow enough to satisfy the request.
1714 * Try one last GC, collecting SoftReferences this time.
1716 HSTRACE("EXTERNAL alloc %zd: GC 2\n", n);
1717 gcForExternalAlloc(true); // collect SoftReferences
1718 ret = externalAlloc(hs, n, true);
1720 LOGE_HEAP("Out of external memory on a %zu-byte allocation.\n", n);
1723 #if PROFILE_EXTERNAL_ALLOCATIONS
1724 if (gDvm.allocProf.enabled) {
1725 Thread* self = dvmThreadSelf();
1726 gDvm.allocProf.failedExternalAllocCount++;
1727 gDvm.allocProf.failedExternalAllocSize += n;
1729 self->allocProf.failedExternalAllocCount++;
1730 self->allocProf.failedExternalAllocSize += n;
1742 * Reduces the internal count of externally-allocated memory.
1745 dvmTrackExternalFree(size_t n)
1747 HeapSource *hs = gHs;
1748 size_t newExternalLimit;
1749 size_t oldExternalBytesAllocated;
1751 HSTRACE("EXTERNAL free %zu (%zu < %zu)\n",
1752 n, hs->externalBytesAllocated, hs->externalLimit);
1754 /* gHs caches an entry in gDvm.gcHeap; we need to hold the
1755 * heap lock if we're going to look at it.
1760 assert(hs->externalLimit >= hs->externalBytesAllocated);
1762 oldExternalBytesAllocated = hs->externalBytesAllocated;
1763 if (n <= hs->externalBytesAllocated) {
1764 hs->externalBytesAllocated -= n;
1766 n = hs->externalBytesAllocated;
1767 hs->externalBytesAllocated = 0;
1770 #if PROFILE_EXTERNAL_ALLOCATIONS
1771 if (gDvm.allocProf.enabled) {
1772 Thread* self = dvmThreadSelf();
1773 gDvm.allocProf.externalFreeCount++;
1774 gDvm.allocProf.externalFreeSize += n;
1776 self->allocProf.externalFreeCount++;
1777 self->allocProf.externalFreeSize += n;
1782 /* Shrink as quickly as we can.
1784 newExternalLimit = getUtilizationTarget(
1785 hs->externalBytesAllocated, EXTERNAL_TARGET_UTILIZATION);
1786 if (newExternalLimit < oldExternalBytesAllocated) {
1787 /* Make sure that the remaining free space is at least
1788 * big enough to allocate something of the size that was
1789 * just freed. This makes it more likely that
1790 * externalFree(N); externalAlloc(N);
1791 * will work without causing a GC.
1793 HSTRACE("EXTERNAL free preserved %zu extra free bytes\n",
1794 oldExternalBytesAllocated - newExternalLimit);
1795 newExternalLimit = oldExternalBytesAllocated;
1797 if (newExternalLimit < hs->externalLimit) {
1798 hs->externalLimit = newExternalLimit;
1805 * Returns the number of externally-allocated bytes being tracked by
1806 * dvmTrackExternalAllocation/Free().
1809 dvmGetExternalBytesAllocated()
1811 const HeapSource *hs = gHs;
1814 /* gHs caches an entry in gDvm.gcHeap; we need to hold the
1815 * heap lock if we're going to look at it. We also need the
1816 * lock for the call to setIdealFootprint().
1820 ret = hs->externalBytesAllocated;
1826 void *dvmHeapSourceGetImmuneLimit(GcMode mode)
1828 if (mode == GC_PARTIAL) {
1829 return hs2heap(gHs)->base;