2 This is a version (aka dlmalloc) of malloc/free/realloc written by
3 Doug Lea and released to the public domain. Use, modify, and
4 redistribute this code without permission or acknowledgement in any
5 way you wish. Send questions, comments, complaints, performance
6 data, etc to dl@cs.oswego.edu
8 VERSION 2.7.2 Sat Aug 17 09:07:30 2002 Doug Lea (dl at gee)
10 Note: There may be an updated version of this malloc obtainable at
11 ftp://gee.cs.oswego.edu/pub/misc/malloc.c
12 Check before installing!
14 Hacked up for uClibc by Erik Andersen <andersen@codepoet.org>
19 libc_hidden_proto(munmap)
21 /* ------------------------- __malloc_trim -------------------------
22 __malloc_trim is an inverse of sorts to __malloc_alloc. It gives memory
23 back to the system (via negative arguments to sbrk) if there is unused
24 memory at the `high' end of the malloc pool. It is called automatically by
25 free() when top space exceeds the trim threshold. It is also called by the
26 public malloc_trim routine. It returns 1 if it actually released any
29 static int __malloc_trim(size_t pad, mstate av)
31 long top_size; /* Amount of top-most memory */
32 long extra; /* Amount to release */
33 long released; /* Amount actually released */
34 char* current_brk; /* address returned by pre-check sbrk call */
35 char* new_brk; /* address returned by post-check sbrk call */
38 pagesz = av->pagesize;
39 top_size = chunksize(av->top);
41 /* Release in pagesize units, keeping at least one page */
42 extra = ((top_size - pad - MINSIZE + (pagesz-1)) / pagesz - 1) * pagesz;
47 Only proceed if end of memory is where we last set it.
48 This avoids problems if there were foreign sbrk calls.
50 current_brk = (char*)(MORECORE(0));
51 if (current_brk == (char*)(av->top) + top_size) {
54 Attempt to release memory. We ignore MORECORE return value,
55 and instead call again to find out where new end of memory is.
56 This avoids problems if first call releases less than we asked,
57 of if failure somehow altered brk value. (We could still
58 encounter problems if it altered brk in some very bad way,
59 but the only thing we can do is adjust anyway, which will cause
60 some downstream failure.)
64 new_brk = (char*)(MORECORE(0));
66 if (new_brk != (char*)MORECORE_FAILURE) {
67 released = (long)(current_brk - new_brk);
70 /* Success. Adjust top. */
71 av->sbrked_mem -= released;
72 set_head(av->top, (top_size - released) | PREV_INUSE);
82 /* ------------------------- malloc_trim -------------------------
83 malloc_trim(size_t pad);
85 If possible, gives memory back to the system (via negative
86 arguments to sbrk) if there is unused memory at the `high' end of
87 the malloc pool. You can call this after freeing large blocks of
88 memory to potentially reduce the system-level memory requirements
89 of a program. However, it cannot guarantee to reduce memory. Under
90 some allocation patterns, some large free blocks of memory will be
91 locked between two used chunks, so they cannot be given back to
94 The `pad' argument to malloc_trim represents the amount of free
95 trailing space to leave untrimmed. If this argument is zero,
96 only the minimum amount of memory to maintain internal data
97 structures will be left (one page or less). Non-zero arguments
98 can be supplied to maintain enough trailing space to service
99 future expected allocations without having to re-obtain memory
102 Malloc_trim returns 1 if it actually released any memory, else 0.
103 On systems that do not support "negative sbrks", it will always
106 int malloc_trim(size_t pad)
108 mstate av = get_malloc_state();
109 __malloc_consolidate(av);
110 return __malloc_trim(pad, av);
114 Initialize a malloc_state struct.
116 This is called only from within __malloc_consolidate, which needs
117 be called in the same contexts anyway. It is never called directly
118 outside of __malloc_consolidate because some optimizing compilers try
119 to inline it at all call points, which turns out not to be an
120 optimization at all. (Inlining it in __malloc_consolidate is fine though.)
122 static void malloc_init_state(mstate av)
127 /* Establish circular links for normal bins */
128 for (i = 1; i < NBINS; ++i) {
130 bin->fd = bin->bk = bin;
133 av->top_pad = DEFAULT_TOP_PAD;
134 av->n_mmaps_max = DEFAULT_MMAP_MAX;
135 av->mmap_threshold = DEFAULT_MMAP_THRESHOLD;
136 av->trim_threshold = DEFAULT_TRIM_THRESHOLD;
138 #if MORECORE_CONTIGUOUS
141 set_noncontiguous(av);
145 set_max_fast(av, DEFAULT_MXFAST);
147 av->top = initial_top(av);
148 av->pagesize = malloc_getpagesize;
152 /* ----------------------------------------------------------------------
156 * ----------------------------------------------------------------------*/
159 /* ------------------------- __malloc_consolidate -------------------------
161 __malloc_consolidate is a specialized version of free() that tears
162 down chunks held in fastbins. Free itself cannot be used for this
163 purpose since, among other things, it might place chunks back onto
164 fastbins. So, instead, we need to use a minor variant of the same
167 Also, because this routine needs to be called the first time through
168 malloc anyway, it turns out to be the perfect place to trigger
171 void attribute_hidden __malloc_consolidate(mstate av)
173 mfastbinptr* fb; /* current fastbin being consolidated */
174 mfastbinptr* maxfb; /* last fastbin (for loop control) */
175 mchunkptr p; /* current chunk being consolidated */
176 mchunkptr nextp; /* next chunk to consolidate */
177 mchunkptr unsorted_bin; /* bin header */
178 mchunkptr first_unsorted; /* chunk to link to */
180 /* These have same use as in free() */
190 If max_fast is 0, we know that av hasn't
191 yet been initialized, in which case do so below
194 if (av->max_fast != 0) {
195 clear_fastchunks(av);
197 unsorted_bin = unsorted_chunks(av);
200 Remove each chunk from fast bin and consolidate it, placing it
201 then in unsorted bin. Among other reasons for doing this,
202 placing in unsorted bin avoids needing to calculate actual bins
203 until malloc is sure that chunks aren't immediately going to be
207 maxfb = &(av->fastbins[fastbin_index(av->max_fast)]);
208 fb = &(av->fastbins[0]);
210 if ( (p = *fb) != 0) {
214 check_inuse_chunk(p);
217 /* Slightly streamlined version of consolidation code in free() */
218 size = p->size & ~PREV_INUSE;
219 nextchunk = chunk_at_offset(p, size);
220 nextsize = chunksize(nextchunk);
222 if (!prev_inuse(p)) {
223 prevsize = p->prev_size;
225 p = chunk_at_offset(p, -((long) prevsize));
229 if (nextchunk != av->top) {
230 nextinuse = inuse_bit_at_offset(nextchunk, nextsize);
231 set_head(nextchunk, nextsize);
235 unlink(nextchunk, bck, fwd);
238 first_unsorted = unsorted_bin->fd;
239 unsorted_bin->fd = p;
240 first_unsorted->bk = p;
242 set_head(p, size | PREV_INUSE);
243 p->bk = unsorted_bin;
244 p->fd = first_unsorted;
250 set_head(p, size | PREV_INUSE);
254 } while ( (p = nextp) != 0);
257 } while (fb++ != maxfb);
260 malloc_init_state(av);
261 check_malloc_state();
266 /* ------------------------------ free ------------------------------ */
271 mchunkptr p; /* chunk corresponding to mem */
272 size_t size; /* its size */
273 mfastbinptr* fb; /* associated fastbin */
274 mchunkptr nextchunk; /* next contiguous chunk */
275 size_t nextsize; /* its size */
276 int nextinuse; /* true if nextchunk is used */
277 size_t prevsize; /* size of previous contiguous chunk */
278 mchunkptr bck; /* misc temp for linking */
279 mchunkptr fwd; /* misc temp for linking */
281 /* free(0) has no effect */
286 av = get_malloc_state();
290 check_inuse_chunk(p);
293 If eligible, place chunk on a fastbin so it can be found
294 and used quickly in malloc.
297 if ((unsigned long)(size) <= (unsigned long)(av->max_fast)
300 /* If TRIM_FASTBINS set, don't place chunks
301 bordering top into fastbins */
302 && (chunk_at_offset(p, size) != av->top)
307 fb = &(av->fastbins[fastbin_index(size)]);
313 Consolidate other non-mmapped chunks as they arrive.
316 else if (!chunk_is_mmapped(p)) {
319 nextchunk = chunk_at_offset(p, size);
320 nextsize = chunksize(nextchunk);
322 /* consolidate backward */
323 if (!prev_inuse(p)) {
324 prevsize = p->prev_size;
326 p = chunk_at_offset(p, -((long) prevsize));
330 if (nextchunk != av->top) {
331 /* get and clear inuse bit */
332 nextinuse = inuse_bit_at_offset(nextchunk, nextsize);
333 set_head(nextchunk, nextsize);
335 /* consolidate forward */
337 unlink(nextchunk, bck, fwd);
342 Place the chunk in unsorted chunk list. Chunks are
343 not placed into regular bins until after they have
344 been given one chance to be used in malloc.
347 bck = unsorted_chunks(av);
354 set_head(p, size | PREV_INUSE);
361 If the chunk borders the current high end of memory,
367 set_head(p, size | PREV_INUSE);
373 If freeing a large space, consolidate possibly-surrounding
374 chunks. Then, if the total unused topmost memory exceeds trim
375 threshold, ask malloc_trim to reduce top.
377 Unless max_fast is 0, we don't know if there are fastbins
378 bordering top, so we cannot tell for sure whether threshold
379 has been reached unless fastbins are consolidated. But we
380 don't want to consolidate on each free. As a compromise,
381 consolidation is performed if FASTBIN_CONSOLIDATION_THRESHOLD
385 if ((unsigned long)(size) >= FASTBIN_CONSOLIDATION_THRESHOLD) {
386 if (have_fastchunks(av))
387 __malloc_consolidate(av);
389 if ((unsigned long)(chunksize(av->top)) >=
390 (unsigned long)(av->trim_threshold))
391 __malloc_trim(av->top_pad, av);
396 If the chunk was allocated via mmap, release via munmap()
397 Note that if HAVE_MMAP is false but chunk_is_mmapped is
398 true, then user must have overwritten memory. There's nothing
399 we can do to catch this error unless DEBUG is set, in which case
400 check_inuse_chunk (above) will have triggered error.
405 size_t offset = p->prev_size;
407 av->mmapped_mem -= (size + offset);
408 ret = munmap((char*)p - offset, size + offset);
409 /* munmap returns non-zero on failure */