2 * Copyright (C) 2007 The Android Open Source Project
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 #define LOG_TAG "MemoryDealer"
19 #include <binder/MemoryDealer.h>
20 #include <binder/IPCThreadState.h>
21 #include <binder/MemoryBase.h>
23 #include <utils/Log.h>
24 #include <utils/SortedVector.h>
25 #include <utils/String8.h>
26 #include <utils/threads.h>
37 #include <sys/types.h>
42 // ----------------------------------------------------------------------------
45 * A simple templatized doubly linked-list implementation
48 template <typename NODE>
55 LinkedList() : mFirst(0), mLast(0) { }
56 bool isEmpty() const { return mFirst == 0; }
57 NODE const* head() const { return mFirst; }
58 NODE* head() { return mFirst; }
59 NODE const* tail() const { return mLast; }
60 NODE* tail() { return mLast; }
62 void insertAfter(NODE* node, NODE* newNode) {
64 newNode->next = node->next;
65 if (node->next == 0) mLast = newNode;
66 else node->next->prev = newNode;
70 void insertBefore(NODE* node, NODE* newNode) {
71 newNode->prev = node->prev;
73 if (node->prev == 0) mFirst = newNode;
74 else node->prev->next = newNode;
78 void insertHead(NODE* newNode) {
80 mFirst = mLast = newNode;
81 newNode->prev = newNode->next = 0;
84 newNode->next = mFirst;
85 mFirst->prev = newNode;
90 void insertTail(NODE* newNode) {
94 newNode->prev = mLast;
96 mLast->next = newNode;
101 NODE* remove(NODE* node) {
102 if (node->prev == 0) mFirst = node->next;
103 else node->prev->next = node->next;
104 if (node->next == 0) mLast = node->prev;
105 else node->next->prev = node->prev;
110 // ----------------------------------------------------------------------------
112 class Allocation : public MemoryBase {
114 Allocation(const sp<MemoryDealer>& dealer,
115 const sp<IMemoryHeap>& heap, ssize_t offset, size_t size);
116 virtual ~Allocation();
118 sp<MemoryDealer> mDealer;
121 // ----------------------------------------------------------------------------
123 class SimpleBestFitAllocator
126 PAGE_ALIGNED = 0x00000001
129 SimpleBestFitAllocator(size_t size);
130 ~SimpleBestFitAllocator();
132 size_t allocate(size_t size, uint32_t flags = 0);
133 status_t deallocate(size_t offset);
135 void dump(const char* what) const;
136 void dump(String8& res, const char* what) const;
141 chunk_t(size_t start, size_t size)
142 : start(start), size(size), free(1), prev(0), next(0) {
147 mutable chunk_t* prev;
148 mutable chunk_t* next;
151 ssize_t alloc(size_t size, uint32_t flags);
152 chunk_t* dealloc(size_t start);
153 void dump_l(const char* what) const;
154 void dump_l(String8& res, const char* what) const;
156 static const int kMemoryAlign;
158 LinkedList<chunk_t> mList;
162 // ----------------------------------------------------------------------------
164 Allocation::Allocation(
165 const sp<MemoryDealer>& dealer,
166 const sp<IMemoryHeap>& heap, ssize_t offset, size_t size)
167 : MemoryBase(heap, offset, size), mDealer(dealer)
170 void* const start_ptr = (void*)(intptr_t(heap->base()) + offset);
171 memset(start_ptr, 0xda, size);
175 Allocation::~Allocation()
177 size_t freedOffset = getOffset();
178 size_t freedSize = getSize();
180 /* NOTE: it's VERY important to not free allocations of size 0 because
181 * they're special as they don't have any record in the allocator
182 * and could alias some real allocation (their offset is zero). */
184 // keep the size to unmap in excess
185 size_t pagesize = getpagesize();
186 size_t start = freedOffset;
187 size_t end = start + freedSize;
188 start &= ~(pagesize-1);
189 end = (end + pagesize-1) & ~(pagesize-1);
191 // give back to the kernel the pages we don't need
192 size_t free_start = freedOffset;
193 size_t free_end = free_start + freedSize;
194 if (start < free_start)
198 start = (start + pagesize-1) & ~(pagesize-1);
199 end &= ~(pagesize-1);
202 void* const start_ptr = (void*)(intptr_t(getHeap()->base()) + start);
203 size_t size = end-start;
206 memset(start_ptr, 0xdf, size);
209 // MADV_REMOVE is not defined on Dapper based Goobuntu
212 int err = madvise(start_ptr, size, MADV_REMOVE);
213 ALOGW_IF(err, "madvise(%p, %u, MADV_REMOVE) returned %s",
214 start_ptr, size, err<0 ? strerror(errno) : "Ok");
219 // This should be done after madvise(MADV_REMOVE), otherwise madvise()
220 // might kick out the memory region that's allocated and/or written
221 // right after the deallocation.
222 mDealer->deallocate(freedOffset);
226 // ----------------------------------------------------------------------------
228 MemoryDealer::MemoryDealer(size_t size, const char* name)
229 : mHeap(new MemoryHeapBase(size, 0, name)),
230 mAllocator(new SimpleBestFitAllocator(size))
234 MemoryDealer::~MemoryDealer()
239 sp<IMemory> MemoryDealer::allocate(size_t size)
242 const ssize_t offset = allocator()->allocate(size);
244 memory = new Allocation(this, heap(), offset, size);
249 void MemoryDealer::deallocate(size_t offset)
251 allocator()->deallocate(offset);
254 void MemoryDealer::dump(const char* what) const
256 allocator()->dump(what);
259 const sp<IMemoryHeap>& MemoryDealer::heap() const {
263 SimpleBestFitAllocator* MemoryDealer::allocator() const {
267 // ----------------------------------------------------------------------------
269 // align all the memory blocks on a cache-line boundary
270 const int SimpleBestFitAllocator::kMemoryAlign = 32;
272 SimpleBestFitAllocator::SimpleBestFitAllocator(size_t size)
274 size_t pagesize = getpagesize();
275 mHeapSize = ((size + pagesize-1) & ~(pagesize-1));
277 chunk_t* node = new chunk_t(0, mHeapSize / kMemoryAlign);
278 mList.insertHead(node);
281 SimpleBestFitAllocator::~SimpleBestFitAllocator()
283 while(!mList.isEmpty()) {
284 delete mList.remove(mList.head());
288 size_t SimpleBestFitAllocator::size() const
293 size_t SimpleBestFitAllocator::allocate(size_t size, uint32_t flags)
295 Mutex::Autolock _l(mLock);
296 ssize_t offset = alloc(size, flags);
300 status_t SimpleBestFitAllocator::deallocate(size_t offset)
302 Mutex::Autolock _l(mLock);
303 chunk_t const * const freed = dealloc(offset);
307 return NAME_NOT_FOUND;
310 ssize_t SimpleBestFitAllocator::alloc(size_t size, uint32_t flags)
315 size = (size + kMemoryAlign-1) / kMemoryAlign;
316 chunk_t* free_chunk = 0;
317 chunk_t* cur = mList.head();
319 size_t pagesize = getpagesize();
322 if (flags & PAGE_ALIGNED)
323 extra = ( -cur->start & ((pagesize/kMemoryAlign)-1) ) ;
326 if (cur->free && (cur->size >= (size+extra))) {
327 if ((!free_chunk) || (cur->size < free_chunk->size)) {
330 if (cur->size == size) {
338 const size_t free_size = free_chunk->size;
339 free_chunk->free = 0;
340 free_chunk->size = size;
341 if (free_size > size) {
343 if (flags & PAGE_ALIGNED)
344 extra = ( -free_chunk->start & ((pagesize/kMemoryAlign)-1) ) ;
346 chunk_t* split = new chunk_t(free_chunk->start, extra);
347 free_chunk->start += extra;
348 mList.insertBefore(free_chunk, split);
351 ALOGE_IF((flags&PAGE_ALIGNED) &&
352 ((free_chunk->start*kMemoryAlign)&(pagesize-1)),
353 "PAGE_ALIGNED requested, but page is not aligned!!!");
355 const ssize_t tail_free = free_size - (size+extra);
357 chunk_t* split = new chunk_t(
358 free_chunk->start + free_chunk->size, tail_free);
359 mList.insertAfter(free_chunk, split);
362 return (free_chunk->start)*kMemoryAlign;
367 SimpleBestFitAllocator::chunk_t* SimpleBestFitAllocator::dealloc(size_t start)
369 start = start / kMemoryAlign;
370 chunk_t* cur = mList.head();
372 if (cur->start == start) {
373 LOG_FATAL_IF(cur->free,
374 "block at offset 0x%08lX of size 0x%08lX already freed",
375 cur->start*kMemoryAlign, cur->size*kMemoryAlign);
377 // merge freed blocks together
378 chunk_t* freed = cur;
381 chunk_t* const p = cur->prev;
382 chunk_t* const n = cur->next;
383 if (p && (p->free || !cur->size)) {
385 p->size += cur->size;
390 } while (cur && cur->free);
394 dump_l("dealloc (!freed->free)");
397 LOG_FATAL_IF(!freed->free,
398 "freed block at offset 0x%08lX of size 0x%08lX is not free!",
399 freed->start * kMemoryAlign, freed->size * kMemoryAlign);
408 void SimpleBestFitAllocator::dump(const char* what) const
410 Mutex::Autolock _l(mLock);
414 void SimpleBestFitAllocator::dump_l(const char* what) const
417 dump_l(result, what);
418 ALOGD("%s", result.string());
421 void SimpleBestFitAllocator::dump(String8& result,
422 const char* what) const
424 Mutex::Autolock _l(mLock);
425 dump_l(result, what);
428 void SimpleBestFitAllocator::dump_l(String8& result,
429 const char* what) const
433 chunk_t const* cur = mList.head();
435 const size_t SIZE = 256;
437 snprintf(buffer, SIZE, " %s (%p, size=%u)\n",
438 what, this, (unsigned int)mHeapSize);
440 result.append(buffer);
443 const char* errs[] = {"", "| link bogus NP",
444 "| link bogus PN", "| link bogus NP+PN" };
445 int np = ((cur->next) && cur->next->prev != cur) ? 1 : 0;
446 int pn = ((cur->prev) && cur->prev->next != cur) ? 2 : 0;
448 snprintf(buffer, SIZE, " %3u: %08x | 0x%08X | 0x%08X | %s %s\n",
449 i, int(cur), int(cur->start*kMemoryAlign),
450 int(cur->size*kMemoryAlign),
451 int(cur->free) ? "F" : "A",
454 result.append(buffer);
457 size += cur->size*kMemoryAlign;
462 snprintf(buffer, SIZE,
463 " size allocated: %u (%u KB)\n", int(size), int(size/1024));
464 result.append(buffer);
468 }; // namespace android