2 * Copyright (C) 2008 The Android Open Source Project
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
18 * Linear memory allocation, tied to class loaders.
26 //#define DISABLE_LINEAR_ALLOC
28 // Use ashmem to name the LinearAlloc section
32 #include <cutils/ashmem.h>
33 #endif /* USE_ASHMEM */
38 This is intended to be a simple, fast allocator for "write-once" storage.
39 The expectation is that this will hold small allocations that don't change,
40 such as parts of classes (vtables, fields, methods, interfaces). Because
41 the lifetime of these items is tied to classes, which in turn are tied
42 to class loaders, we associate the storage with a ClassLoader object.
44 [ We don't yet support class unloading, and our ClassLoader implementation
45 is in flux, so for now we just have a single global region and the
46 "classLoader" argument is ignored. ]
48 By storing the data here, rather than on the system heap, we reduce heap
49 clutter, speed class loading, reduce the memory footprint (reduced heap
50 structure overhead), and most importantly we increase the number of pages
51 that remain shared between processes launched in "Zygote mode".
53 The 4 bytes preceding each block contain the block length. This allows us
54 to support "free" and "realloc" calls in a limited way. We don't free
55 storage once it has been allocated, but in some circumstances it could be
56 useful to erase storage to garbage values after a "free" or "realloc".
57 (Bad idea if we're trying to share pages.) We need to align to 8-byte
58 boundaries for some architectures, so we have a 50-50 chance of getting
59 this for free in a given block.
61 A NULL value for the "classLoader" argument refers to the bootstrap class
62 loader, which is never unloaded (until the VM shuts down).
64 Because the memory is not expected to be updated, we can use mprotect to
65 guard the pages on debug builds. Handy when tracking down corruption.
68 /* alignment for allocations; must be power of 2, and currently >= hdr_xtra */
71 /* default length of memory segment (worst case is probably "dexopt") */
72 #define DEFAULT_MAX_LENGTH (16*1024*1024)
74 /* leave enough space for a length word */
75 #define HEADER_EXTRA 4
77 /* overload the length word */
78 #define LENGTHFLAG_FREE 0x80000000
79 #define LENGTHFLAG_RW 0x40000000
80 #define LENGTHFLAG_MASK (~(LENGTHFLAG_FREE|LENGTHFLAG_RW))
84 static void checkAllFree(Object* classLoader);
88 * Someday, retrieve the linear alloc struct associated with a particular
89 * class loader. For now, always use the boostrap loader's instance.
91 static inline LinearAllocHdr* getHeader(Object* classLoader)
93 return gDvm.pBootLoaderAlloc;
97 * Convert a pointer to memory to a pointer to the block header (which is
98 * currently just a length word).
100 static inline u4* getBlockHeader(void* mem)
102 return ((u4*) mem) -1;
106 * Create a new linear allocation block.
108 LinearAllocHdr* dvmLinearAllocCreate(Object* classLoader)
110 #ifdef DISABLE_LINEAR_ALLOC
111 return (LinearAllocHdr*) 0x12345;
113 LinearAllocHdr* pHdr;
115 pHdr = (LinearAllocHdr*) malloc(sizeof(*pHdr));
119 * "curOffset" points to the location of the next pre-block header,
120 * which means we have to advance to the next BLOCK_ALIGN address and
123 * Note we leave the first page empty (see below), and start the
124 * first entry on the second page at an offset that ensures the next
125 * chunk of data will be properly aligned.
127 assert(BLOCK_ALIGN >= HEADER_EXTRA);
128 pHdr->curOffset = pHdr->firstOffset =
129 (BLOCK_ALIGN-HEADER_EXTRA) + SYSTEM_PAGE_SIZE;
130 pHdr->mapLength = DEFAULT_MAX_LENGTH;
135 fd = ashmem_create_region("dalvik-LinearAlloc", DEFAULT_MAX_LENGTH);
137 ALOGE("ashmem LinearAlloc failed %s", strerror(errno));
142 pHdr->mapAddr = (char*)mmap(NULL, pHdr->mapLength, PROT_READ | PROT_WRITE,
144 if (pHdr->mapAddr == MAP_FAILED) {
145 ALOGE("LinearAlloc mmap(%d) failed: %s", pHdr->mapLength,
154 // MAP_ANON is listed as "deprecated" on Linux,
155 // but MAP_ANONYMOUS is not defined under Mac OS X.
156 pHdr->mapAddr = mmap(NULL, pHdr->mapLength, PROT_READ | PROT_WRITE,
157 MAP_PRIVATE | MAP_ANON, -1, 0);
158 if (pHdr->mapAddr == MAP_FAILED) {
159 ALOGE("LinearAlloc mmap(%d) failed: %s", pHdr->mapLength,
164 #endif /*USE_ASHMEM*/
166 /* region expected to begin on a page boundary */
167 assert(((int) pHdr->mapAddr & (SYSTEM_PAGE_SIZE-1)) == 0);
169 /* the system should initialize newly-mapped memory to zero */
170 assert(*(u4*) (pHdr->mapAddr + pHdr->curOffset) == 0);
173 * Disable access to all except starting page. We will enable pages
174 * as we use them. This helps prevent bad pointers from working. The
175 * pages start out PROT_NONE, become read/write while we access them,
176 * then go to read-only after we finish our changes.
178 * We have to make the first page readable because we have 4 pad bytes,
179 * followed by 4 length bytes, giving an initial offset of 8. The
180 * generic code below assumes that there could have been a previous
181 * allocation that wrote into those 4 pad bytes, therefore the page
182 * must have been marked readable by the previous allocation.
184 * We insert an extra page in here to force a break in the memory map
185 * so we can see ourselves more easily in "showmap". Otherwise this
186 * stuff blends into the neighboring pages. [TODO: do we still need
187 * the extra page now that we have ashmem?]
189 if (mprotect(pHdr->mapAddr, pHdr->mapLength, PROT_NONE) != 0) {
190 ALOGW("LinearAlloc init mprotect failed: %s", strerror(errno));
194 if (mprotect(pHdr->mapAddr + SYSTEM_PAGE_SIZE, SYSTEM_PAGE_SIZE,
195 ENFORCE_READ_ONLY ? PROT_READ : PROT_READ|PROT_WRITE) != 0)
197 ALOGW("LinearAlloc init mprotect #2 failed: %s", strerror(errno));
202 if (ENFORCE_READ_ONLY) {
203 /* allocate the per-page ref count */
204 int numPages = (pHdr->mapLength+SYSTEM_PAGE_SIZE-1) / SYSTEM_PAGE_SIZE;
205 pHdr->writeRefCount = (short*)calloc(numPages, sizeof(short));
206 if (pHdr->writeRefCount == NULL) {
212 dvmInitMutex(&pHdr->lock);
214 ALOGV("LinearAlloc: created region at %p-%p",
215 pHdr->mapAddr, pHdr->mapAddr + pHdr->mapLength-1);
221 * Destroy a linear allocation area.
223 * We do a trivial "has everything been freed?" check before unmapping the
224 * memory and freeing the LinearAllocHdr.
226 void dvmLinearAllocDestroy(Object* classLoader)
228 #ifdef DISABLE_LINEAR_ALLOC
231 LinearAllocHdr* pHdr = getHeader(classLoader);
235 checkAllFree(classLoader);
237 //dvmLinearAllocDump(classLoader);
239 if (gDvm.verboseShutdown) {
240 ALOGV("Unmapping linear allocator base=%p", pHdr->mapAddr);
241 ALOGD("LinearAlloc %p used %d of %d (%d%%)",
242 classLoader, pHdr->curOffset, pHdr->mapLength,
243 (pHdr->curOffset * 100) / pHdr->mapLength);
246 if (munmap(pHdr->mapAddr, pHdr->mapLength) != 0) {
247 ALOGW("LinearAlloc munmap(%p, %d) failed: %s",
248 pHdr->mapAddr, pHdr->mapLength, strerror(errno));
254 * Allocate "size" bytes of storage, associated with a particular class
257 * It's okay for size to be zero.
259 * We always leave "curOffset" pointing at the next place where we will
260 * store the header that precedes the returned storage.
262 * This aborts the VM on failure, so it's not necessary to check for a
265 void* dvmLinearAlloc(Object* classLoader, size_t size)
267 LinearAllocHdr* pHdr = getHeader(classLoader);
268 int startOffset, nextOffset;
269 int lastGoodOff, firstWriteOff, lastWriteOff;
271 #ifdef DISABLE_LINEAR_ALLOC
272 return calloc(1, size);
275 LOGVV("--- LinearAlloc(%p, %d)", classLoader, size);
278 * What we'd like to do is just determine the new end-of-alloc size
279 * and atomic-swap the updated value in. The trouble is that, the
280 * first time we reach a new page, we need to call mprotect() to
281 * make the page available, and we don't want to call mprotect() on
282 * every allocation. The troubled situation is:
283 * - thread A allocs across a page boundary, but gets preempted
284 * before mprotect() completes
285 * - thread B allocs within the new page, and doesn't call mprotect()
287 dvmLockMutex(&pHdr->lock);
289 startOffset = pHdr->curOffset;
290 assert(((startOffset + HEADER_EXTRA) & (BLOCK_ALIGN-1)) == 0);
293 * Compute the new offset. The old offset points at the address where
294 * we will store the hidden block header, so we advance past that,
295 * add the size of data they want, add another header's worth so we
296 * know we have room for that, and round up to BLOCK_ALIGN. That's
297 * the next location where we'll put user data. We then subtract the
298 * chunk header size off so we're back to the header pointer.
301 * old=12 size=3 new=((12+(4*2)+3+7) & ~7)-4 = 24-4 --> 20
302 * old=12 size=5 new=((12+(4*2)+5+7) & ~7)-4 = 32-4 --> 28
304 nextOffset = ((startOffset + HEADER_EXTRA*2 + size + (BLOCK_ALIGN-1))
305 & ~(BLOCK_ALIGN-1)) - HEADER_EXTRA;
306 LOGVV("--- old=%d size=%d new=%d", startOffset, size, nextOffset);
308 if (nextOffset > pHdr->mapLength) {
310 * We don't have to abort here. We could fall back on the system
311 * malloc(), and have our "free" call figure out what to do. Only
312 * works if the users of these functions actually free everything
315 ALOGE("LinearAlloc exceeded capacity (%d), last=%d",
316 pHdr->mapLength, (int) size);
321 * Round up "size" to encompass the entire region, including the 0-7
322 * pad bytes before the next chunk header. This way we get maximum
323 * utility out of "realloc", and when we're doing ENFORCE_READ_ONLY
324 * stuff we always treat the full extent.
326 size = nextOffset - (startOffset + HEADER_EXTRA);
327 LOGVV("--- (size now %d)", size);
330 * See if we are starting on or have crossed into a new page. If so,
331 * call mprotect on the page(s) we're about to write to. We have to
332 * page-align the start address, but don't have to make the length a
333 * SYSTEM_PAGE_SIZE multiple (but we do it anyway).
335 * Note that "startOffset" is not the last *allocated* byte, but rather
336 * the offset of the first *unallocated* byte (which we are about to
337 * write the chunk header to). "nextOffset" is similar.
339 * If ENFORCE_READ_ONLY is enabled, we have to call mprotect even if
340 * we've written to this page before, because it might be read-only.
342 lastGoodOff = (startOffset-1) & ~(SYSTEM_PAGE_SIZE-1);
343 firstWriteOff = startOffset & ~(SYSTEM_PAGE_SIZE-1);
344 lastWriteOff = (nextOffset-1) & ~(SYSTEM_PAGE_SIZE-1);
345 LOGVV("--- lastGood=0x%04x firstWrite=0x%04x lastWrite=0x%04x",
346 lastGoodOff, firstWriteOff, lastWriteOff);
347 if (lastGoodOff != lastWriteOff || ENFORCE_READ_ONLY) {
350 start = firstWriteOff;
351 assert(start <= nextOffset);
352 len = (lastWriteOff - firstWriteOff) + SYSTEM_PAGE_SIZE;
354 LOGVV("--- calling mprotect(start=%d len=%d RW)", start, len);
355 cc = mprotect(pHdr->mapAddr + start, len, PROT_READ | PROT_WRITE);
357 ALOGE("LinearAlloc mprotect (+%d %d) failed: %s",
358 start, len, strerror(errno));
359 /* we're going to fail soon, might as do it now */
364 /* update the ref counts on the now-writable pages */
365 if (ENFORCE_READ_ONLY) {
368 start = firstWriteOff / SYSTEM_PAGE_SIZE;
369 end = lastWriteOff / SYSTEM_PAGE_SIZE;
371 LOGVV("--- marking pages %d-%d RW (alloc %d at %p)",
372 start, end, size, pHdr->mapAddr + startOffset + HEADER_EXTRA);
373 for (i = start; i <= end; i++)
374 pHdr->writeRefCount[i]++;
377 /* stow the size in the header */
378 if (ENFORCE_READ_ONLY)
379 *(u4*)(pHdr->mapAddr + startOffset) = size | LENGTHFLAG_RW;
381 *(u4*)(pHdr->mapAddr + startOffset) = size;
384 * Update data structure.
386 pHdr->curOffset = nextOffset;
388 dvmUnlockMutex(&pHdr->lock);
389 return pHdr->mapAddr + startOffset + HEADER_EXTRA;
393 * Helper function, replaces strdup().
395 char* dvmLinearStrdup(Object* classLoader, const char* str)
397 #ifdef DISABLE_LINEAR_ALLOC
400 int len = strlen(str);
401 void* mem = dvmLinearAlloc(classLoader, len+1);
402 memcpy(mem, str, len+1);
403 if (ENFORCE_READ_ONLY)
404 dvmLinearSetReadOnly(classLoader, mem);
409 * "Reallocate" a piece of memory.
411 * If the new size is <= the old size, we return the original pointer
412 * without doing anything.
414 * If the new size is > the old size, we allocate new storage, copy the
415 * old stuff over, and mark the new stuff as free.
417 void* dvmLinearRealloc(Object* classLoader, void* mem, size_t newSize)
419 #ifdef DISABLE_LINEAR_ALLOC
420 return realloc(mem, newSize);
422 /* make sure we have the right region (and mem != NULL) */
424 assert(mem >= (void*) getHeader(classLoader)->mapAddr &&
425 mem < (void*) (getHeader(classLoader)->mapAddr +
426 getHeader(classLoader)->curOffset));
428 const u4* pLen = getBlockHeader(mem);
429 ALOGV("--- LinearRealloc(%d) old=%d", newSize, *pLen);
431 /* handle size reduction case */
432 if (*pLen >= newSize) {
433 if (ENFORCE_READ_ONLY)
434 dvmLinearSetReadWrite(classLoader, mem);
440 newMem = dvmLinearAlloc(classLoader, newSize);
441 assert(newMem != NULL);
442 memcpy(newMem, mem, *pLen);
443 dvmLinearFree(classLoader, mem);
450 * Update the read/write status of one or more pages.
452 static void updatePages(Object* classLoader, void* mem, int direction)
454 LinearAllocHdr* pHdr = getHeader(classLoader);
455 dvmLockMutex(&pHdr->lock);
457 /* make sure we have the right region */
458 assert(mem >= (void*) pHdr->mapAddr &&
459 mem < (void*) (pHdr->mapAddr + pHdr->curOffset));
461 u4* pLen = getBlockHeader(mem);
462 u4 len = *pLen & LENGTHFLAG_MASK;
463 int firstPage, lastPage;
465 firstPage = ((u1*)pLen - (u1*)pHdr->mapAddr) / SYSTEM_PAGE_SIZE;
466 lastPage = ((u1*)mem - (u1*)pHdr->mapAddr + (len-1)) / SYSTEM_PAGE_SIZE;
467 LOGVV("--- updating pages %d-%d (%d)", firstPage, lastPage, direction);
472 * Update individual pages. We could do some sort of "lazy update" to
473 * combine mprotect calls, but that's almost certainly more trouble
476 for (i = firstPage; i <= lastPage; i++) {
479 * Trying to mark read-only.
481 if (i == firstPage) {
482 if ((*pLen & LENGTHFLAG_RW) == 0) {
483 ALOGW("Double RO on %p", mem);
486 *pLen &= ~LENGTHFLAG_RW;
489 if (pHdr->writeRefCount[i] == 0) {
490 ALOGE("Can't make page %d any less writable", i);
493 pHdr->writeRefCount[i]--;
494 if (pHdr->writeRefCount[i] == 0) {
495 LOGVV("--- prot page %d RO", i);
496 cc = mprotect(pHdr->mapAddr + SYSTEM_PAGE_SIZE * i,
497 SYSTEM_PAGE_SIZE, PROT_READ);
502 * Trying to mark writable.
504 if (pHdr->writeRefCount[i] >= 32767) {
505 ALOGE("Can't make page %d any more writable", i);
508 if (pHdr->writeRefCount[i] == 0) {
509 LOGVV("--- prot page %d RW", i);
510 cc = mprotect(pHdr->mapAddr + SYSTEM_PAGE_SIZE * i,
511 SYSTEM_PAGE_SIZE, PROT_READ | PROT_WRITE);
514 pHdr->writeRefCount[i]++;
516 if (i == firstPage) {
517 if ((*pLen & LENGTHFLAG_RW) != 0) {
518 ALOGW("Double RW on %p", mem);
521 *pLen |= LENGTHFLAG_RW;
526 dvmUnlockMutex(&pHdr->lock);
530 * Try to mark the pages in which a chunk of memory lives as read-only.
531 * Whether or not the pages actually change state depends on how many
532 * others are trying to access the same pages.
534 * Only call here if ENFORCE_READ_ONLY is true.
536 void dvmLinearSetReadOnly(Object* classLoader, void* mem)
538 #ifdef DISABLE_LINEAR_ALLOC
541 updatePages(classLoader, mem, -1);
545 * Make the pages on which "mem" sits read-write.
547 * This covers the header as well as the data itself. (We could add a
548 * "header-only" mode for dvmLinearFree.)
550 * Only call here if ENFORCE_READ_ONLY is true.
552 void dvmLinearSetReadWrite(Object* classLoader, void* mem)
554 #ifdef DISABLE_LINEAR_ALLOC
557 updatePages(classLoader, mem, 1);
561 * Mark an allocation as free.
563 void dvmLinearFree(Object* classLoader, void* mem)
565 #ifdef DISABLE_LINEAR_ALLOC
572 /* make sure we have the right region */
573 assert(mem >= (void*) getHeader(classLoader)->mapAddr &&
574 mem < (void*) (getHeader(classLoader)->mapAddr +
575 getHeader(classLoader)->curOffset));
577 if (ENFORCE_READ_ONLY)
578 dvmLinearSetReadWrite(classLoader, mem);
580 u4* pLen = getBlockHeader(mem);
581 *pLen |= LENGTHFLAG_FREE;
583 if (ENFORCE_READ_ONLY)
584 dvmLinearSetReadOnly(classLoader, mem);
588 * For debugging, dump the contents of a linear alloc area.
590 * We grab the lock so that the header contents and list output are
593 void dvmLinearAllocDump(Object* classLoader)
595 #ifdef DISABLE_LINEAR_ALLOC
598 LinearAllocHdr* pHdr = getHeader(classLoader);
600 dvmLockMutex(&pHdr->lock);
602 ALOGI("LinearAlloc classLoader=%p", classLoader);
603 ALOGI(" mapAddr=%p mapLength=%d firstOffset=%d",
604 pHdr->mapAddr, pHdr->mapLength, pHdr->firstOffset);
605 ALOGI(" curOffset=%d", pHdr->curOffset);
607 int off = pHdr->firstOffset;
610 while (off < pHdr->curOffset) {
611 rawLen = *(u4*) (pHdr->mapAddr + off);
612 fullLen = ((HEADER_EXTRA*2 + (rawLen & LENGTHFLAG_MASK))
615 ALOGI(" %p (%3d): %clen=%d%s", pHdr->mapAddr + off + HEADER_EXTRA,
616 (int) ((off + HEADER_EXTRA) / SYSTEM_PAGE_SIZE),
617 (rawLen & LENGTHFLAG_FREE) != 0 ? '*' : ' ',
618 rawLen & LENGTHFLAG_MASK,
619 (rawLen & LENGTHFLAG_RW) != 0 ? " [RW]" : "");
624 if (ENFORCE_READ_ONLY) {
625 ALOGI("writeRefCount map:");
627 int numPages = (pHdr->mapLength+SYSTEM_PAGE_SIZE-1) / SYSTEM_PAGE_SIZE;
631 for (i = 0; i < numPages; i++) {
632 int count = pHdr->writeRefCount[i];
636 printf(" %d-%d: zero\n", zstart, i-1);
637 else if (zstart == i-1)
638 printf(" %d: zero\n", zstart);
640 printf(" %d: %d\n", i, count);
644 printf(" %d-%d: zero\n", zstart, i-1);
647 ALOGD("LinearAlloc %p using %d of %d (%d%%)",
648 classLoader, pHdr->curOffset, pHdr->mapLength,
649 (pHdr->curOffset * 100) / pHdr->mapLength);
651 dvmUnlockMutex(&pHdr->lock);
655 * Verify that all blocks are freed.
657 * This should only be done as we're shutting down, but there could be a
658 * daemon thread that's still trying to do something, so we grab the locks.
660 static void checkAllFree(Object* classLoader)
662 #ifdef DISABLE_LINEAR_ALLOC
665 LinearAllocHdr* pHdr = getHeader(classLoader);
667 dvmLockMutex(&pHdr->lock);
669 int off = pHdr->firstOffset;
672 while (off < pHdr->curOffset) {
673 rawLen = *(u4*) (pHdr->mapAddr + off);
674 fullLen = ((HEADER_EXTRA*2 + (rawLen & LENGTHFLAG_MASK))
677 if ((rawLen & LENGTHFLAG_FREE) == 0) {
678 ALOGW("LinearAlloc %p not freed: %p len=%d", classLoader,
679 pHdr->mapAddr + off + HEADER_EXTRA, rawLen & LENGTHFLAG_MASK);
685 dvmUnlockMutex(&pHdr->lock);
689 * Determine if [start, start+length) is contained in the in-use area of
690 * a single LinearAlloc. The full set of linear allocators is scanned.
692 * [ Since we currently only have one region, this is pretty simple. In
693 * the future we'll need to traverse a table of class loaders. ]
695 bool dvmLinearAllocContains(const void* start, size_t length)
697 LinearAllocHdr* pHdr = getHeader(NULL);
702 return (char*) start >= pHdr->mapAddr &&
703 ((char*)start + length) <= (pHdr->mapAddr + pHdr->curOffset);