#include "alloc/Heap.h"
#include "alloc/HeapInternal.h"
#include "alloc/DdmHeap.h"
-#include "alloc/DlMalloc.h"
#include "alloc/HeapSource.h"
#define DEFAULT_HEAP_ID 1
((u1)((((kind) & 0x7) << 3) | ((solidity) & 0x7)))
struct HeapChunkContext {
- void* startOfNextMemoryChunk;
u1 *buf;
u1 *p;
u1 *pieceLenField;
ctx->pieceLenField = NULL;
}
-static void append_chunk(HeapChunkContext *ctx, u1 state, void* ptr, size_t length) {
+static void heap_chunk_callback(const void *chunkptr, size_t chunklen,
+ const void *userptr, size_t userlen, void *arg)
+{
+ HeapChunkContext *ctx = (HeapChunkContext *)arg;
+ u1 state;
+
+ UNUSED_PARAMETER(userlen);
+
+ assert((chunklen & (ALLOCATION_UNIT_SIZE-1)) == 0);
+
/* Make sure there's enough room left in the buffer.
* We need to use two bytes for every fractional 256
- * allocation units used by the chunk and 17 bytes for
- * any header.
+ * allocation units used by the chunk.
*/
{
- size_t needed = (((length/ALLOCATION_UNIT_SIZE + 255) / 256) * 2) + 17;
+ size_t needed = (((chunklen/ALLOCATION_UNIT_SIZE + 255) / 256) * 2);
size_t bytesLeft = ctx->bufLen - (size_t)(ctx->p - ctx->buf);
if (bytesLeft < needed) {
flush_hpsg_chunk(ctx);
}
+
bytesLeft = ctx->bufLen - (size_t)(ctx->p - ctx->buf);
if (bytesLeft < needed) {
- ALOGW("chunk is too big to transmit (length=%zd, %zd bytes)",
- length, needed);
+ ALOGW("chunk is too big to transmit (chunklen=%zd, %zd bytes)",
+ chunklen, needed);
return;
}
}
+
+//TODO: notice when there's a gap and start a new heap, or at least a new range.
if (ctx->needHeader) {
/*
* Start a new HPSx chunk.
*ctx->p++ = 8;
/* [u4]: virtual address of segment start */
- set4BE(ctx->p, (uintptr_t)ptr); ctx->p += 4;
+ set4BE(ctx->p, (uintptr_t)chunkptr); ctx->p += 4;
/* [u4]: offset of this piece (relative to the virtual address) */
set4BE(ctx->p, 0); ctx->p += 4;
ctx->needHeader = false;
}
- /* Write out the chunk description.
- */
- length /= ALLOCATION_UNIT_SIZE; // convert to allocation units
- ctx->totalAllocationUnits += length;
- while (length > 256) {
- *ctx->p++ = state | HPSG_PARTIAL;
- *ctx->p++ = 255; // length - 1
- length -= 256;
- }
- *ctx->p++ = state;
- *ctx->p++ = length - 1;
-}
-
-/*
- * Called by dlmalloc_inspect_all. If used_bytes != 0 then start is
- * the start of a malloc-ed piece of memory of size used_bytes. If
- * start is 0 then start is the beginning of any free space not
- * including dlmalloc's book keeping and end the start of the next
- * dlmalloc chunk. Regions purely containing book keeping don't
- * callback.
- */
-static void heap_chunk_callback(void* start, void* end, size_t used_bytes,
- void* arg)
-{
- u1 state;
- HeapChunkContext *ctx = (HeapChunkContext *)arg;
- UNUSED_PARAMETER(end);
- if (used_bytes == 0) {
- if (start == NULL) {
- // Reset for start of new heap.
- ctx->startOfNextMemoryChunk = NULL;
- flush_hpsg_chunk(ctx);
- }
- // Only process in use memory so that free region information
- // also includes dlmalloc book keeping.
- return;
- }
-
- /* If we're looking at the native heap, we'll just return
- * (SOLIDITY_HARD, KIND_NATIVE) for all allocated chunks
+ /* Determine the type of this chunk.
*/
- bool native = ctx->type == CHUNK_TYPE("NHSG");
-
- if (ctx->startOfNextMemoryChunk != NULL) {
- // Transmit any pending free memory. Native free memory of
- // over kMaxFreeLen could be because of the use of mmaps, so
- // don't report. If not free memory then start a new segment.
- bool flush = true;
- if (start > ctx->startOfNextMemoryChunk) {
- const size_t kMaxFreeLen = 2 * SYSTEM_PAGE_SIZE;
- void* freeStart = ctx->startOfNextMemoryChunk;
- void* freeEnd = start;
- size_t freeLen = (char*)freeEnd - (char*)freeStart;
- if (!native || freeLen < kMaxFreeLen) {
- append_chunk(ctx, HPSG_STATE(SOLIDITY_FREE, 0),
- freeStart, freeLen);
- flush = false;
- }
- }
- if (flush) {
- ctx->startOfNextMemoryChunk = NULL;
- flush_hpsg_chunk(ctx);
- }
- }
- const Object *obj = (const Object *)start;
+ if (userptr == NULL) {
+ /* It's a free chunk.
+ */
+ state = HPSG_STATE(SOLIDITY_FREE, 0);
+ } else {
+ const Object *obj = (const Object *)userptr;
+ /* If we're looking at the native heap, we'll just return
+ * (SOLIDITY_HARD, KIND_NATIVE) for all allocated chunks
+ */
+ bool native = ctx->type == CHUNK_TYPE("NHSG");
- /* It's an allocated chunk. Figure out what it is.
- */
+ /* It's an allocated chunk. Figure out what it is.
+ */
//TODO: if ctx.merge, see if this chunk is different from the last chunk.
// If it's the same, we should combine them.
- if (!native && dvmIsValidObject(obj)) {
- ClassObject *clazz = obj->clazz;
- if (clazz == NULL) {
- /* The object was probably just created
- * but hasn't been initialized yet.
- */
- state = HPSG_STATE(SOLIDITY_HARD, KIND_OBJECT);
- } else if (dvmIsTheClassClass(clazz)) {
- state = HPSG_STATE(SOLIDITY_HARD, KIND_CLASS_OBJECT);
- } else if (IS_CLASS_FLAG_SET(clazz, CLASS_ISARRAY)) {
- if (IS_CLASS_FLAG_SET(clazz, CLASS_ISOBJECTARRAY)) {
- state = HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_4);
- } else {
- switch (clazz->elementClass->primitiveType) {
- case PRIM_BOOLEAN:
- case PRIM_BYTE:
- state = HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_1);
- break;
- case PRIM_CHAR:
- case PRIM_SHORT:
- state = HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_2);
- break;
- case PRIM_INT:
- case PRIM_FLOAT:
+ if (!native && dvmIsValidObject(obj)) {
+ ClassObject *clazz = obj->clazz;
+ if (clazz == NULL) {
+ /* The object was probably just created
+ * but hasn't been initialized yet.
+ */
+ state = HPSG_STATE(SOLIDITY_HARD, KIND_OBJECT);
+ } else if (dvmIsTheClassClass(clazz)) {
+ state = HPSG_STATE(SOLIDITY_HARD, KIND_CLASS_OBJECT);
+ } else if (IS_CLASS_FLAG_SET(clazz, CLASS_ISARRAY)) {
+ if (IS_CLASS_FLAG_SET(clazz, CLASS_ISOBJECTARRAY)) {
state = HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_4);
- break;
- case PRIM_DOUBLE:
- case PRIM_LONG:
- state = HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_8);
- break;
- default:
- assert(!"Unknown GC heap object type");
- state = HPSG_STATE(SOLIDITY_HARD, KIND_UNKNOWN);
- break;
+ } else {
+ switch (clazz->elementClass->primitiveType) {
+ case PRIM_BOOLEAN:
+ case PRIM_BYTE:
+ state = HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_1);
+ break;
+ case PRIM_CHAR:
+ case PRIM_SHORT:
+ state = HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_2);
+ break;
+ case PRIM_INT:
+ case PRIM_FLOAT:
+ state = HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_4);
+ break;
+ case PRIM_DOUBLE:
+ case PRIM_LONG:
+ state = HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_8);
+ break;
+ default:
+ assert(!"Unknown GC heap object type");
+ state = HPSG_STATE(SOLIDITY_HARD, KIND_UNKNOWN);
+ break;
+ }
}
+ } else {
+ state = HPSG_STATE(SOLIDITY_HARD, KIND_OBJECT);
}
} else {
- state = HPSG_STATE(SOLIDITY_HARD, KIND_OBJECT);
+ obj = NULL; // it's not actually an object
+ state = HPSG_STATE(SOLIDITY_HARD, KIND_NATIVE);
}
- } else {
- obj = NULL; // it's not actually an object
- state = HPSG_STATE(SOLIDITY_HARD, KIND_NATIVE);
}
- append_chunk(ctx, state, start, used_bytes + HEAP_SOURCE_CHUNK_OVERHEAD);
- ctx->startOfNextMemoryChunk =
- (char*)start + used_bytes + HEAP_SOURCE_CHUNK_OVERHEAD;
+
+ /* Write out the chunk description.
+ */
+ chunklen /= ALLOCATION_UNIT_SIZE; // convert to allocation units
+ ctx->totalAllocationUnits += chunklen;
+ while (chunklen > 256) {
+ *ctx->p++ = state | HPSG_PARTIAL;
+ *ctx->p++ = 255; // length - 1
+ chunklen -= 256;
+ }
+ *ctx->p++ = state;
+ *ctx->p++ = chunklen - 1;
}
enum HpsgWhen {
*/
#define HPSx_CHUNK_SIZE (16384 - 16)
+extern "C" void dlmalloc_walk_heap(void(*)(const void*, size_t, const void*, size_t, void*),void*);
+
static void walkHeap(bool merge, bool native)
{
HeapChunkContext ctx;
ctx.p = ctx.buf;
ctx.needHeader = true;
if (native) {
- dlmalloc_inspect_all(heap_chunk_callback, (void*)&ctx);
+ dlmalloc_walk_heap(heap_chunk_callback, (void *)&ctx);
} else {
dvmHeapSourceWalk(heap_chunk_callback, (void *)&ctx);
}
* limitations under the License.
*/
+#include <cutils/mspace.h>
#include <stdint.h>
#include <sys/mman.h>
#include <errno.h>
#define SIZE_MAX UINT_MAX // TODO: get SIZE_MAX from stdint.h
#include "Dalvik.h"
-#include "alloc/DlMalloc.h"
#include "alloc/Heap.h"
#include "alloc/HeapInternal.h"
#include "alloc/HeapSource.h"
#include "alloc/HeapBitmap.h"
#include "alloc/HeapBitmapInlines.h"
+// TODO: find a real header file for these.
+extern "C" int dlmalloc_trim(size_t);
+extern "C" void dlmalloc_walk_free_pages(void(*)(void*, void*, void*), void*);
+
static void snapIdealFootprint();
static void setIdealFootprint(size_t max);
static size_t getMaximumSize(const HeapSource *hs);
* The highest address of this heap, exclusive.
*/
char *limit;
-
- /*
- * If the heap has an mspace, the current high water mark in
- * allocations requested via dvmHeapSourceMorecore.
- */
- char *brk;
};
struct HeapSource {
if (isSoftLimited(hs)) {
return hs->softLimit;
} else {
- return mspace_footprint_limit(hs2heap(hs)->msp);
+ return mspace_max_allowed_footprint(hs2heap(hs)->msp);
}
}
{
assert(heap->bytesAllocated < mspace_footprint(heap->msp));
- heap->bytesAllocated += mspace_usable_size(ptr) +
+ heap->bytesAllocated += mspace_usable_size(heap->msp, ptr) +
HEAP_SOURCE_CHUNK_OVERHEAD;
heap->objectsAllocated++;
HeapSource* hs = gDvm.gcHeap->heapSource;
static void countFree(Heap *heap, const void *ptr, size_t *numBytes)
{
- size_t delta = mspace_usable_size(ptr) + HEAP_SOURCE_CHUNK_OVERHEAD;
+ size_t delta = mspace_usable_size(heap->msp, ptr) + HEAP_SOURCE_CHUNK_OVERHEAD;
assert(delta > 0);
if (delta < heap->bytesAllocated) {
heap->bytesAllocated -= delta;
static HeapSource *gHs = NULL;
-static mspace createMspace(void* begin, size_t morecoreStart, size_t startingSize)
+static mspace createMspace(void *base, size_t startSize, size_t maximumSize)
{
- // Clear errno to allow strerror on error.
+ /* Create an unlocked dlmalloc mspace to use as
+ * a heap source.
+ *
+ * We start off reserving startSize / 2 bytes but
+ * letting the heap grow to startSize. This saves
+ * memory in the case where a process uses even less
+ * than the starting size.
+ */
+ LOGV_HEAP("Creating VM heap of size %zu", startSize);
errno = 0;
- // Allow access to inital pages that will hold mspace.
- mprotect(begin, morecoreStart, PROT_READ | PROT_WRITE);
- // Create mspace using our backing storage starting at begin and with a footprint of
- // morecoreStart. Don't use an internal dlmalloc lock. When morecoreStart bytes of memory are
- // exhausted morecore will be called.
- mspace msp = create_mspace_with_base(begin, morecoreStart, false /*locked*/);
+
+ mspace msp = create_contiguous_mspace_with_base(startSize/2,
+ maximumSize, /*locked=*/false, base);
if (msp != NULL) {
- // Do not allow morecore requests to succeed beyond the starting size of the heap.
- mspace_set_footprint_limit(msp, startingSize);
+ /* Don't let the heap grow past the starting size without
+ * our intervention.
+ */
+ mspace_set_max_allowed_footprint(msp, startSize);
} else {
- ALOGE("create_mspace_with_base failed %s", strerror(errno));
+ /* There's no guarantee that errno has meaning when the call
+ * fails, but it often does.
+ */
+ LOGE_HEAP("Can't create VM heap of size (%zu,%zu): %s",
+ startSize/2, maximumSize, strerror(errno));
}
- return msp;
-}
-/*
- * Service request from DlMalloc to increase heap size.
- */
-void* dvmHeapSourceMorecore(void* mspace, intptr_t increment)
-{
- Heap* heap = NULL;
- for (size_t i = 0; i < gHs->numHeaps; i++) {
- if (gHs->heaps[i].msp == mspace) {
- heap = &gHs->heaps[i];
- break;
- }
- }
- if (heap == NULL) {
- ALOGE("Failed to find heap for mspace %p", mspace);
- dvmAbort();
- }
- char* original_brk = heap->brk;
- if (increment != 0) {
- char* new_brk = original_brk + increment;
- if (increment > 0) {
- // Should never be asked to increase the allocation beyond the capacity of the space.
- // Enforced by mspace_set_footprint_limit.
- assert(new_brk <= heap->limit);
- mprotect(original_brk, increment, PROT_READ | PROT_WRITE);
- } else {
- // Should never be asked for negative footprint (ie before base).
- assert(original_brk + increment > heap->base);
- // Advise we don't need the pages and protect them.
- size_t size = -increment;
- madvise(new_brk, size, MADV_DONTNEED);
- mprotect(new_brk, size, PROT_NONE);
- }
- // Update brk.
- heap->brk = new_brk;
- }
- return original_brk;
+ return msp;
}
-const size_t kInitialMorecoreStart = SYSTEM_PAGE_SIZE;
/*
* Add the initial heap. Returns false if the initial heap was
* already added to the heap source.
hs->heaps[0].maximumSize = maximumSize;
hs->heaps[0].concurrentStartBytes = SIZE_MAX;
hs->heaps[0].base = hs->heapBase;
- hs->heaps[0].limit = hs->heapBase + maximumSize;
- hs->heaps[0].brk = hs->heapBase + kInitialMorecoreStart;
+ hs->heaps[0].limit = hs->heapBase + hs->heaps[0].maximumSize;
hs->numHeaps = 1;
return true;
}
* Heap storage comes from a common virtual memory reservation.
* The new heap will start on the page after the old heap.
*/
- char *base = hs->heaps[0].brk;
+ void *sbrk0 = contiguous_mspace_sbrk0(hs->heaps[0].msp);
+ char *base = (char *)ALIGN_UP_TO_PAGE_SIZE(sbrk0);
size_t overhead = base - hs->heaps[0].base;
assert(((size_t)hs->heaps[0].base & (SYSTEM_PAGE_SIZE - 1)) == 0);
overhead, hs->maximumSize);
return false;
}
- size_t morecoreStart = SYSTEM_PAGE_SIZE;
+
heap.maximumSize = hs->growthLimit - overhead;
heap.concurrentStartBytes = HEAP_MIN_FREE - CONCURRENT_START;
heap.base = base;
heap.limit = heap.base + heap.maximumSize;
- heap.brk = heap.base + morecoreStart;
- heap.msp = createMspace(base, morecoreStart, HEAP_MIN_FREE);
+ heap.msp = createMspace(base, HEAP_MIN_FREE, hs->maximumSize - overhead);
if (heap.msp == NULL) {
return false;
}
*/
hs->heaps[0].maximumSize = overhead;
hs->heaps[0].limit = base;
- mspace_set_footprint_limit(hs->heaps[0].msp, overhead);
+ mspace msp = hs->heaps[0].msp;
+ mspace_set_max_allowed_footprint(msp, mspace_footprint(msp));
/* Put the new heap in the list, at heaps[0].
* Shift existing heaps down.
/* Create an unlocked dlmalloc mspace to use as
* a heap source.
*/
- msp = createMspace(base, kInitialMorecoreStart, startSize);
+ msp = createMspace(base, startSize, maximumSize);
if (msp == NULL) {
goto fail;
}
value = mspace_footprint(heap->msp);
break;
case HS_ALLOWED_FOOTPRINT:
- value = mspace_footprint_limit(heap->msp);
+ value = mspace_max_allowed_footprint(heap->msp);
break;
case HS_BYTES_ALLOCATED:
value = heap->bytesAllocated;
*/
size_t max = heap->maximumSize;
- mspace_set_footprint_limit(heap->msp, max);
+ mspace_set_max_allowed_footprint(heap->msp, max);
void* ptr = dvmHeapSourceAlloc(n);
/* Shrink back down as small as possible. Our caller may
* readjust max_allowed to a more appropriate value.
*/
- mspace_set_footprint_limit(heap->msp,
- mspace_footprint(heap->msp));
+ mspace_set_max_allowed_footprint(heap->msp,
+ mspace_footprint(heap->msp));
return ptr;
}
// mspace_free, but on the other heaps we only do some
// accounting.
if (heap == gHs->heaps) {
- // Count freed objects.
- for (size_t i = 0; i < numPtrs; i++) {
+ // mspace_merge_objects takes two allocated objects, and
+ // if the second immediately follows the first, will merge
+ // them, returning a larger object occupying the same
+ // memory. This is a local operation, and doesn't require
+ // dlmalloc to manipulate any freelists. It's pretty
+ // inexpensive compared to free().
+
+ // ptrs is an array of objects all in memory order, and if
+ // client code has been allocating lots of short-lived
+ // objects, this is likely to contain runs of objects all
+ // now garbage, and thus highly amenable to this optimization.
+
+ // Unroll the 0th iteration around the loop below,
+ // countFree ptrs[0] and initializing merged.
+ assert(ptrs[0] != NULL);
+ assert(ptr2heap(gHs, ptrs[0]) == heap);
+ countFree(heap, ptrs[0], &numBytes);
+ void *merged = ptrs[0];
+ for (size_t i = 1; i < numPtrs; i++) {
+ assert(merged != NULL);
assert(ptrs[i] != NULL);
+ assert((intptr_t)merged < (intptr_t)ptrs[i]);
assert(ptr2heap(gHs, ptrs[i]) == heap);
countFree(heap, ptrs[i], &numBytes);
+ // Try to merge. If it works, merged now includes the
+ // memory of ptrs[i]. If it doesn't, free merged, and
+ // see if ptrs[i] starts a new run of adjacent
+ // objects to merge.
+ if (mspace_merge_objects(msp, merged, ptrs[i]) == NULL) {
+ mspace_free(msp, merged);
+ merged = ptrs[i];
+ }
}
- // Bulk free ptrs.
- mspace_bulk_free(msp, ptrs, numPtrs);
+ assert(merged != NULL);
+ mspace_free(msp, merged);
} else {
// This is not an 'active heap'. Only do the accounting.
for (size_t i = 0; i < numPtrs; i++) {
Heap* heap = ptr2heap(gHs, ptr);
if (heap != NULL) {
- return mspace_usable_size(ptr);
+ return mspace_usable_size(heap->msp, ptr);
}
return 0;
}
if (softLimit < currentHeapSize) {
/* Don't let the heap grow any more, and impose a soft limit.
*/
- mspace_set_footprint_limit(msp, currentHeapSize);
+ mspace_set_max_allowed_footprint(msp, currentHeapSize);
hs->softLimit = softLimit;
} else {
/* Let the heap grow to the requested max, and remove any
* soft limit, if set.
*/
- mspace_set_footprint_limit(msp, softLimit);
+ mspace_set_max_allowed_footprint(msp, softLimit);
hs->softLimit = SIZE_MAX;
}
}
* Return free pages to the system.
* TODO: move this somewhere else, especially the native heap part.
*/
-static void releasePagesInRange(void* start, void* end, size_t used_bytes,
- void* releasedBytes)
+static void releasePagesInRange(void *start, void *end, void *nbytes)
{
- if (used_bytes == 0) {
- /*
- * We have a range of memory we can try to madvise()
- * back. Linux requires that the madvise() start address is
- * page-aligned. We also align the end address.
- */
- start = (void *)ALIGN_UP_TO_PAGE_SIZE(start);
- end = (void *)((size_t)end & ~(SYSTEM_PAGE_SIZE - 1));
- if (end > start) {
- size_t length = (char *)end - (char *)start;
- madvise(start, length, MADV_DONTNEED);
- *(size_t *)releasedBytes += length;
- }
+ /* Linux requires that the madvise() start address is page-aligned.
+ * We also align the end address.
+ */
+ start = (void *)ALIGN_UP_TO_PAGE_SIZE(start);
+ end = (void *)((size_t)end & ~(SYSTEM_PAGE_SIZE - 1));
+ if (start < end) {
+ size_t length = (char *)end - (char *)start;
+ madvise(start, length, MADV_DONTNEED);
+ *(size_t *)nbytes += length;
}
}
for (size_t i = 0; i < hs->numHeaps; i++) {
Heap *heap = &hs->heaps[i];
- /* Return the wilderness chunk to the system. */
+ /* Return the wilderness chunk to the system.
+ */
mspace_trim(heap->msp, 0);
- /* Return any whole free pages to the system. */
- mspace_inspect_all(heap->msp, releasePagesInRange, &heapBytes);
+ /* Return any whole free pages to the system.
+ */
+ mspace_walk_free_pages(heap->msp, releasePagesInRange, &heapBytes);
}
- /* Same for the native heap. */
+ /* Same for the native heap.
+ */
dlmalloc_trim(0);
size_t nativeBytes = 0;
- dlmalloc_inspect_all(releasePagesInRange, &nativeBytes);
+ dlmalloc_walk_free_pages(releasePagesInRange, &nativeBytes);
LOGD_HEAP("madvised %zd (GC) + %zd (native) = %zd total bytes",
heapBytes, nativeBytes, heapBytes + nativeBytes);
* Walks over the heap source and passes every allocated and
* free chunk to the callback.
*/
-void dvmHeapSourceWalk(void(*callback)(void* start, void* end,
- size_t used_bytes, void* arg),
+void dvmHeapSourceWalk(void(*callback)(const void *chunkptr, size_t chunklen,
+ const void *userptr, size_t userlen,
+ void *arg),
void *arg)
{
HS_BOILERPLATE();
//TODO: do this in address order
HeapSource *hs = gHs;
for (size_t i = hs->numHeaps; i > 0; --i) {
- mspace_inspect_all(hs->heaps[i-1].msp, callback, arg);
- callback(NULL, NULL, 0, arg); // Indicate end of a heap.
+ mspace_walk_heap(hs->heaps[i-1].msp, callback, arg);
}
}