2 This is a version (aka dlmalloc) of malloc/free/realloc written by
3 Doug Lea and released to the public domain. Use, modify, and
4 redistribute this code without permission or acknowledgement in any
5 way you wish. Send questions, comments, complaints, performance
6 data, etc to dl@cs.oswego.edu
8 VERSION 2.7.2 Sat Aug 17 09:07:30 2002 Doug Lea (dl at gee)
10 Note: There may be an updated version of this malloc obtainable at
11 ftp://gee.cs.oswego.edu/pub/misc/malloc.c
12 Check before installing!
14 Hacked up for uClibc by Erik Andersen <andersen@codepoet.org>
26 libc_hidden_proto(mmap)
27 libc_hidden_proto(sysconf)
28 libc_hidden_proto(sbrk)
29 libc_hidden_proto(abort)
31 #ifdef __UCLIBC_HAS_THREADS__
33 extern pthread_mutex_t __malloc_lock;
35 #define LOCK __pthread_mutex_lock(&__malloc_lock)
36 #define UNLOCK __pthread_mutex_unlock(&__malloc_lock)
41 MALLOC_ALIGNMENT is the minimum alignment for malloc'ed chunks.
42 It must be a power of two at least 2 * (sizeof(size_t)), even on machines
43 for which smaller alignments would suffice. It may be defined as
44 larger than this though. Note however that code and data structures
45 are optimized for the case of 8-byte alignment.
47 #ifndef MALLOC_ALIGNMENT
48 #define MALLOC_ALIGNMENT (2 * (sizeof(size_t)))
51 /* The corresponding bit mask value */
52 #define MALLOC_ALIGN_MASK (MALLOC_ALIGNMENT - 1)
55 TRIM_FASTBINS controls whether free() of a very small chunk can
56 immediately lead to trimming. Setting to true (1) can reduce memory
57 footprint, but will almost always slow down programs that use a lot
60 Define this only if you are willing to give up some speed to more
61 aggressively reduce system-level memory footprint when releasing
62 memory in programs that use many small chunks. You can get
63 essentially the same effect by setting MXFAST to 0, but this can
64 lead to even greater slowdowns in programs using many small chunks.
65 TRIM_FASTBINS is an in-between compile-time option, that disables
66 only those chunks bordering topmost memory from being placed in
70 #define TRIM_FASTBINS 0
75 MORECORE-related declarations. By default, rely on sbrk
80 MORECORE is the name of the routine to call to obtain more memory
81 from the system. See below for general guidance on writing
82 alternative MORECORE functions, as well as a version for WIN32 and a
83 sample version for pre-OSX macos.
90 MORECORE_FAILURE is the value returned upon failure of MORECORE
91 as well as mmap. Since it cannot be an otherwise valid memory address,
92 and must reflect values of standard sys calls, you probably ought not
95 #ifndef MORECORE_FAILURE
96 #define MORECORE_FAILURE (-1)
100 If MORECORE_CONTIGUOUS is true, take advantage of fact that
101 consecutive calls to MORECORE with positive arguments always return
102 contiguous increasing addresses. This is true of unix sbrk. Even
103 if not defined, when regions happen to be contiguous, malloc will
104 permit allocations spanning regions obtained from different
105 calls. But defining this when applicable enables some stronger
106 consistency checks and space efficiencies.
108 #ifndef MORECORE_CONTIGUOUS
109 #define MORECORE_CONTIGUOUS 1
113 MMAP_AS_MORECORE_SIZE is the minimum mmap size argument to use if
114 sbrk fails, and mmap is used as a backup (which is done only if
115 HAVE_MMAP). The value must be a multiple of page size. This
116 backup strategy generally applies only when systems have "holes" in
117 address space, so sbrk cannot perform contiguous expansion, but
118 there is still space available on system. On systems for which
119 this is known to be useful (i.e. most linux kernels), this occurs
120 only when programs allocate huge amounts of memory. Between this,
121 and the fact that mmap regions tend to be limited, the size should
122 be large, to avoid too many mmap calls and thus avoid running out
125 #ifndef MMAP_AS_MORECORE_SIZE
126 #define MMAP_AS_MORECORE_SIZE (1024 * 1024)
130 The system page size. To the extent possible, this malloc manages
131 memory from the system in page-size units. Note that this value is
132 cached during initialization into a field of malloc_state. So even
133 if malloc_getpagesize is a function, it is only called once.
135 The following mechanics for getpagesize were adapted from bsd/gnu
136 getpagesize.h. If none of the system-probes here apply, a value of
137 4096 is used, which should be OK: If they don't apply, then using
138 the actual value probably doesn't impact performance.
140 #ifndef malloc_getpagesize
142 # define malloc_getpagesize sysconf(_SC_PAGESIZE)
143 #else /* just guess */
144 # define malloc_getpagesize (4096)
148 /* mallopt tuning options */
151 M_MXFAST is the maximum request size used for "fastbins", special bins
152 that hold returned chunks without consolidating their spaces. This
153 enables future requests for chunks of the same size to be handled
154 very quickly, but can increase fragmentation, and thus increase the
155 overall memory footprint of a program.
157 This malloc manages fastbins very conservatively yet still
158 efficiently, so fragmentation is rarely a problem for values less
159 than or equal to the default. The maximum supported value of MXFAST
160 is 80. You wouldn't want it any higher than this anyway. Fastbins
161 are designed especially for use with many small structs, objects or
162 strings -- the default handles structs/objects/arrays with sizes up
163 to 16 4byte fields, or small strings representing words, tokens,
164 etc. Using fastbins for larger objects normally worsens
165 fragmentation without improving speed.
167 M_MXFAST is set in REQUEST size units. It is internally used in
168 chunksize units, which adds padding and alignment. You can reduce
169 M_MXFAST to 0 to disable all use of fastbins. This causes the malloc
170 algorithm to be a closer approximation of fifo-best-fit in all cases,
171 not just for larger requests, but will generally cause it to be
176 /* M_MXFAST is a standard SVID/XPG tuning option, usually listed in malloc.h */
181 #ifndef DEFAULT_MXFAST
182 #define DEFAULT_MXFAST 64
187 M_TRIM_THRESHOLD is the maximum amount of unused top-most memory
188 to keep before releasing via malloc_trim in free().
190 Automatic trimming is mainly useful in long-lived programs.
191 Because trimming via sbrk can be slow on some systems, and can
192 sometimes be wasteful (in cases where programs immediately
193 afterward allocate more large chunks) the value should be high
194 enough so that your overall system performance would improve by
195 releasing this much memory.
197 The trim threshold and the mmap control parameters (see below)
198 can be traded off with one another. Trimming and mmapping are
199 two different ways of releasing unused memory back to the
200 system. Between these two, it is often possible to keep
201 system-level demands of a long-lived program down to a bare
202 minimum. For example, in one test suite of sessions measuring
203 the XF86 X server on Linux, using a trim threshold of 128K and a
204 mmap threshold of 192K led to near-minimal long term resource
207 If you are using this malloc in a long-lived program, it should
208 pay to experiment with these values. As a rough guide, you
209 might set to a value close to the average size of a process
210 (program) running on your system. Releasing this much memory
211 would allow such a process to run in memory. Generally, it's
212 worth it to tune for trimming rather tham memory mapping when a
213 program undergoes phases where several large chunks are
214 allocated and released in ways that can reuse each other's
215 storage, perhaps mixed with phases where there are no such
216 chunks at all. And in well-behaved long-lived programs,
217 controlling release of large blocks via trimming versus mapping
220 However, in most programs, these parameters serve mainly as
221 protection against the system-level effects of carrying around
222 massive amounts of unneeded memory. Since frequent calls to
223 sbrk, mmap, and munmap otherwise degrade performance, the default
224 parameters are set to relatively high values that serve only as
227 The trim value must be greater than page size to have any useful
228 effect. To disable trimming completely, you can set to
231 Trim settings interact with fastbin (MXFAST) settings: Unless
232 TRIM_FASTBINS is defined, automatic trimming never takes place upon
233 freeing a chunk with size less than or equal to MXFAST. Trimming is
234 instead delayed until subsequent freeing of larger chunks. However,
235 you can still force an attempted trim by calling malloc_trim.
237 Also, trimming is not generally possible in cases where
238 the main arena is obtained via mmap.
240 Note that the trick some people use of mallocing a huge space and
241 then freeing it at program startup, in an attempt to reserve system
242 memory, doesn't have the intended effect under automatic trimming,
243 since that memory will immediately be returned to the system.
245 #define M_TRIM_THRESHOLD -1
247 #ifndef DEFAULT_TRIM_THRESHOLD
248 #define DEFAULT_TRIM_THRESHOLD (256 * 1024)
252 M_TOP_PAD is the amount of extra `padding' space to allocate or
253 retain whenever sbrk is called. It is used in two ways internally:
255 * When sbrk is called to extend the top of the arena to satisfy
256 a new malloc request, this much padding is added to the sbrk
259 * When malloc_trim is called automatically from free(),
260 it is used as the `pad' argument.
262 In both cases, the actual amount of padding is rounded
263 so that the end of the arena is always a system page boundary.
265 The main reason for using padding is to avoid calling sbrk so
266 often. Having even a small pad greatly reduces the likelihood
267 that nearly every malloc request during program start-up (or
268 after trimming) will invoke sbrk, which needlessly wastes
271 Automatic rounding-up to page-size units is normally sufficient
272 to avoid measurable overhead, so the default is 0. However, in
273 systems where sbrk is relatively slow, it can pay to increase
274 this value, at the expense of carrying around more memory than
279 #ifndef DEFAULT_TOP_PAD
280 #define DEFAULT_TOP_PAD (0)
284 M_MMAP_THRESHOLD is the request size threshold for using mmap()
285 to service a request. Requests of at least this size that cannot
286 be allocated using already-existing space will be serviced via mmap.
287 (If enough normal freed space already exists it is used instead.)
289 Using mmap segregates relatively large chunks of memory so that
290 they can be individually obtained and released from the host
291 system. A request serviced through mmap is never reused by any
292 other request (at least not directly; the system may just so
293 happen to remap successive requests to the same locations).
295 Segregating space in this way has the benefits that:
297 1. Mmapped space can ALWAYS be individually released back
298 to the system, which helps keep the system level memory
299 demands of a long-lived program low.
300 2. Mapped memory can never become `locked' between
301 other chunks, as can happen with normally allocated chunks, which
302 means that even trimming via malloc_trim would not release them.
303 3. On some systems with "holes" in address spaces, mmap can obtain
304 memory that sbrk cannot.
306 However, it has the disadvantages that:
308 1. The space cannot be reclaimed, consolidated, and then
309 used to service later requests, as happens with normal chunks.
310 2. It can lead to more wastage because of mmap page alignment
312 3. It causes malloc performance to be more dependent on host
313 system memory management support routines which may vary in
314 implementation quality and may impose arbitrary
315 limitations. Generally, servicing a request via normal
316 malloc steps is faster than going through a system's mmap.
318 The advantages of mmap nearly always outweigh disadvantages for
319 "large" chunks, but the value of "large" varies across systems. The
320 default is an empirically derived value that works well in most
323 #define M_MMAP_THRESHOLD -3
325 #ifndef DEFAULT_MMAP_THRESHOLD
326 #define DEFAULT_MMAP_THRESHOLD (256 * 1024)
330 M_MMAP_MAX is the maximum number of requests to simultaneously
331 service using mmap. This parameter exists because
332 . Some systems have a limited number of internal tables for
333 use by mmap, and using more than a few of them may degrade
336 The default is set to a value that serves only as a safeguard.
337 Setting to 0 disables use of mmap for servicing large requests. If
338 HAVE_MMAP is not set, the default value is 0, and attempts to set it
339 to non-zero values in mallopt will fail.
341 #define M_MMAP_MAX -4
343 #ifndef DEFAULT_MMAP_MAX
344 #define DEFAULT_MMAP_MAX (65536)
348 /* ------------------ MMAP support ------------------ */
350 #include <sys/mman.h>
352 #if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
353 #define MAP_ANONYMOUS MAP_ANON
356 #ifdef __ARCH_HAS_MMU__
358 #define MMAP(addr, size, prot) \
359 (mmap((addr), (size), (prot), MAP_PRIVATE|MAP_ANONYMOUS, 0, 0))
363 #define MMAP(addr, size, prot) \
364 (mmap((addr), (size), (prot), MAP_SHARED|MAP_ANONYMOUS, 0, 0))
369 /* ----------------------- Chunk representations ----------------------- */
373 This struct declaration is misleading (but accurate and necessary).
374 It declares a "view" into memory allowing access to necessary
375 fields at known offsets from a given base. See explanation below.
378 struct malloc_chunk {
380 size_t prev_size; /* Size of previous chunk (if free). */
381 size_t size; /* Size in bytes, including overhead. */
383 struct malloc_chunk* fd; /* double links -- used only if free. */
384 struct malloc_chunk* bk;
388 typedef struct malloc_chunk* mchunkptr;
391 malloc_chunk details:
393 (The following includes lightly edited explanations by Colin Plumb.)
395 Chunks of memory are maintained using a `boundary tag' method as
396 described in e.g., Knuth or Standish. (See the paper by Paul
397 Wilson ftp://ftp.cs.utexas.edu/pub/garbage/allocsrv.ps for a
398 survey of such techniques.) Sizes of free chunks are stored both
399 in the front of each chunk and at the end. This makes
400 consolidating fragmented chunks into bigger chunks very fast. The
401 size fields also hold bits representing whether chunks are free or
404 An allocated chunk looks like this:
407 chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
408 | Size of previous chunk, if allocated | |
409 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
410 | Size of chunk, in bytes |P|
411 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
412 | User data starts here... .
414 . (malloc_usable_space() bytes) .
416 nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
418 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
421 Where "chunk" is the front of the chunk for the purpose of most of
422 the malloc code, but "mem" is the pointer that is returned to the
423 user. "Nextchunk" is the beginning of the next contiguous chunk.
425 Chunks always begin on even word boundries, so the mem portion
426 (which is returned to the user) is also on an even word boundary, and
427 thus at least double-word aligned.
429 Free chunks are stored in circular doubly-linked lists, and look like this:
431 chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
432 | Size of previous chunk |
433 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
434 `head:' | Size of chunk, in bytes |P|
435 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
436 | Forward pointer to next chunk in list |
437 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
438 | Back pointer to previous chunk in list |
439 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
440 | Unused space (may be 0 bytes long) .
443 nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
444 `foot:' | Size of chunk, in bytes |
445 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
447 The P (PREV_INUSE) bit, stored in the unused low-order bit of the
448 chunk size (which is always a multiple of two words), is an in-use
449 bit for the *previous* chunk. If that bit is *clear*, then the
450 word before the current chunk size contains the previous chunk
451 size, and can be used to find the front of the previous chunk.
452 The very first chunk allocated always has this bit set,
453 preventing access to non-existent (or non-owned) memory. If
454 prev_inuse is set for any given chunk, then you CANNOT determine
455 the size of the previous chunk, and might even get a memory
456 addressing fault when trying to do so.
458 Note that the `foot' of the current chunk is actually represented
459 as the prev_size of the NEXT chunk. This makes it easier to
460 deal with alignments etc but can be very confusing when trying
461 to extend or adapt this code.
463 The two exceptions to all this are
465 1. The special chunk `top' doesn't bother using the
466 trailing size field since there is no next contiguous chunk
467 that would have to index off it. After initialization, `top'
468 is forced to always exist. If it would become less than
469 MINSIZE bytes long, it is replenished.
471 2. Chunks allocated via mmap, which have the second-lowest-order
472 bit (IS_MMAPPED) set in their size fields. Because they are
473 allocated one-by-one, each must contain its own trailing size field.
478 ---------- Size and alignment checks and conversions ----------
481 /* conversion from malloc headers to user pointers, and back */
483 #define chunk2mem(p) ((void*)((char*)(p) + 2*(sizeof(size_t))))
484 #define mem2chunk(mem) ((mchunkptr)((char*)(mem) - 2*(sizeof(size_t))))
486 /* The smallest possible chunk */
487 #define MIN_CHUNK_SIZE (sizeof(struct malloc_chunk))
489 /* The smallest size we can malloc is an aligned minimal chunk */
492 (unsigned long)(((MIN_CHUNK_SIZE+MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK))
494 /* Check if m has acceptable alignment */
496 #define aligned_OK(m) (((unsigned long)((m)) & (MALLOC_ALIGN_MASK)) == 0)
499 /* Check if a request is so large that it would wrap around zero when
500 padded and aligned. To simplify some other code, the bound is made
501 low enough so that adding MINSIZE will also not wrap around sero.
504 #define REQUEST_OUT_OF_RANGE(req) \
505 ((unsigned long)(req) >= \
506 (unsigned long)(size_t)(-2 * MINSIZE))
508 /* pad request bytes into a usable size -- internal version */
510 #define request2size(req) \
511 (((req) + (sizeof(size_t)) + MALLOC_ALIGN_MASK < MINSIZE) ? \
513 ((req) + (sizeof(size_t)) + MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK)
515 /* Same, except also perform argument check */
517 #define checked_request2size(req, sz) \
518 if (REQUEST_OUT_OF_RANGE(req)) { \
522 (sz) = request2size(req);
525 --------------- Physical chunk operations ---------------
529 /* size field is or'ed with PREV_INUSE when previous adjacent chunk in use */
530 #define PREV_INUSE 0x1
532 /* extract inuse bit of previous chunk */
533 #define prev_inuse(p) ((p)->size & PREV_INUSE)
536 /* size field is or'ed with IS_MMAPPED if the chunk was obtained with mmap() */
537 #define IS_MMAPPED 0x2
539 /* check for mmap()'ed chunk */
540 #define chunk_is_mmapped(p) ((p)->size & IS_MMAPPED)
542 /* Bits to mask off when extracting size
544 Note: IS_MMAPPED is intentionally not masked off from size field in
545 macros for which mmapped chunks should never be seen. This should
546 cause helpful core dumps to occur if it is tried by accident by
547 people extending or adapting this malloc.
549 #define SIZE_BITS (PREV_INUSE|IS_MMAPPED)
551 /* Get size, ignoring use bits */
552 #define chunksize(p) ((p)->size & ~(SIZE_BITS))
555 /* Ptr to next physical malloc_chunk. */
556 #define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->size & ~PREV_INUSE) ))
558 /* Ptr to previous physical malloc_chunk */
559 #define prev_chunk(p) ((mchunkptr)( ((char*)(p)) - ((p)->prev_size) ))
561 /* Treat space at ptr + offset as a chunk */
562 #define chunk_at_offset(p, s) ((mchunkptr)(((char*)(p)) + (s)))
564 /* extract p's inuse bit */
566 ((((mchunkptr)(((char*)(p))+((p)->size & ~PREV_INUSE)))->size) & PREV_INUSE)
568 /* set/clear chunk as being inuse without otherwise disturbing */
569 #define set_inuse(p)\
570 ((mchunkptr)(((char*)(p)) + ((p)->size & ~PREV_INUSE)))->size |= PREV_INUSE
572 #define clear_inuse(p)\
573 ((mchunkptr)(((char*)(p)) + ((p)->size & ~PREV_INUSE)))->size &= ~(PREV_INUSE)
576 /* check/set/clear inuse bits in known places */
577 #define inuse_bit_at_offset(p, s)\
578 (((mchunkptr)(((char*)(p)) + (s)))->size & PREV_INUSE)
580 #define set_inuse_bit_at_offset(p, s)\
581 (((mchunkptr)(((char*)(p)) + (s)))->size |= PREV_INUSE)
583 #define clear_inuse_bit_at_offset(p, s)\
584 (((mchunkptr)(((char*)(p)) + (s)))->size &= ~(PREV_INUSE))
587 /* Set size at head, without disturbing its use bit */
588 #define set_head_size(p, s) ((p)->size = (((p)->size & PREV_INUSE) | (s)))
590 /* Set size/use field */
591 #define set_head(p, s) ((p)->size = (s))
593 /* Set size at footer (only when chunk is not in use) */
594 #define set_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_size = (s))
597 /* -------------------- Internal data structures -------------------- */
602 An array of bin headers for free chunks. Each bin is doubly
603 linked. The bins are approximately proportionally (log) spaced.
604 There are a lot of these bins (128). This may look excessive, but
605 works very well in practice. Most bins hold sizes that are
606 unusual as malloc request sizes, but are more usual for fragments
607 and consolidated sets of chunks, which is what these bins hold, so
608 they can be found quickly. All procedures maintain the invariant
609 that no consolidated chunk physically borders another one, so each
610 chunk in a list is known to be preceeded and followed by either
611 inuse chunks or the ends of memory.
613 Chunks in bins are kept in size order, with ties going to the
614 approximately least recently used chunk. Ordering isn't needed
615 for the small bins, which all contain the same-sized chunks, but
616 facilitates best-fit allocation for larger chunks. These lists
617 are just sequential. Keeping them in order almost never requires
618 enough traversal to warrant using fancier ordered data
621 Chunks of the same size are linked with the most
622 recently freed at the front, and allocations are taken from the
623 back. This results in LRU (FIFO) allocation order, which tends
624 to give each chunk an equal opportunity to be consolidated with
625 adjacent freed chunks, resulting in larger free chunks and less
628 To simplify use in double-linked lists, each bin header acts
629 as a malloc_chunk. This avoids special-casing for headers.
630 But to conserve space and improve locality, we allocate
631 only the fd/bk pointers of bins, and then use repositioning tricks
632 to treat these as the fields of a malloc_chunk*.
635 typedef struct malloc_chunk* mbinptr;
637 /* addressing -- note that bin_at(0) does not exist */
638 #define bin_at(m, i) ((mbinptr)((char*)&((m)->bins[(i)<<1]) - ((sizeof(size_t))<<1)))
640 /* analog of ++bin */
641 #define next_bin(b) ((mbinptr)((char*)(b) + (sizeof(mchunkptr)<<1)))
643 /* Reminders about list directionality within bins */
644 #define first(b) ((b)->fd)
645 #define last(b) ((b)->bk)
647 /* Take a chunk off a bin list */
648 #define unlink(P, BK, FD) { \
651 if (FD->bk != P || BK->fd != P) \
660 Bins for sizes < 512 bytes contain chunks of all the same size, spaced
661 8 bytes apart. Larger bins are approximately logarithmically spaced:
668 2 bins of size 262144
669 1 bin of size what's left
671 The bins top out around 1MB because we expect to service large
676 #define NSMALLBINS 32
677 #define SMALLBIN_WIDTH 8
678 #define MIN_LARGE_SIZE 256
680 #define in_smallbin_range(sz) \
681 ((unsigned long)(sz) < (unsigned long)MIN_LARGE_SIZE)
683 #define smallbin_index(sz) (((unsigned)(sz)) >> 3)
685 #define bin_index(sz) \
686 ((in_smallbin_range(sz)) ? smallbin_index(sz) : __malloc_largebin_index(sz))
689 FIRST_SORTED_BIN_SIZE is the chunk size corresponding to the
690 first bin that is maintained in sorted order. This must
691 be the smallest size corresponding to a given bin.
693 Normally, this should be MIN_LARGE_SIZE. But you can weaken
694 best fit guarantees to sometimes speed up malloc by increasing value.
695 Doing this means that malloc may choose a chunk that is
696 non-best-fitting by up to the width of the bin.
698 Some useful cutoff values:
699 512 - all bins sorted
700 2560 - leaves bins <= 64 bytes wide unsorted
701 12288 - leaves bins <= 512 bytes wide unsorted
702 65536 - leaves bins <= 4096 bytes wide unsorted
703 262144 - leaves bins <= 32768 bytes wide unsorted
704 -1 - no bins sorted (not recommended!)
707 #define FIRST_SORTED_BIN_SIZE MIN_LARGE_SIZE
708 /* #define FIRST_SORTED_BIN_SIZE 65536 */
713 All remainders from chunk splits, as well as all returned chunks,
714 are first placed in the "unsorted" bin. They are then placed
715 in regular bins after malloc gives them ONE chance to be used before
716 binning. So, basically, the unsorted_chunks list acts as a queue,
717 with chunks being placed on it in free (and __malloc_consolidate),
718 and taken off (to be either used or placed in bins) in malloc.
721 /* The otherwise unindexable 1-bin is used to hold unsorted chunks. */
722 #define unsorted_chunks(M) (bin_at(M, 1))
727 The top-most available chunk (i.e., the one bordering the end of
728 available memory) is treated specially. It is never included in
729 any bin, is used only if no other chunk is available, and is
730 released back to the system if it is very large (see
731 M_TRIM_THRESHOLD). Because top initially
732 points to its own bin with initial zero size, thus forcing
733 extension on the first malloc request, we avoid having any special
734 code in malloc to check whether it even exists yet. But we still
735 need to do so when getting memory from system, so we make
736 initial_top treat the bin as a legal but unusable chunk during the
737 interval between initialization and the first call to
738 __malloc_alloc. (This is somewhat delicate, since it relies on
739 the 2 preceding words to be zero during this interval as well.)
742 /* Conveniently, the unsorted bin can be used as dummy top on first call */
743 #define initial_top(M) (unsorted_chunks(M))
748 To help compensate for the large number of bins, a one-level index
749 structure is used for bin-by-bin searching. `binmap' is a
750 bitvector recording whether bins are definitely empty so they can
751 be skipped over during during traversals. The bits are NOT always
752 cleared as soon as bins are empty, but instead only
753 when they are noticed to be empty during traversal in malloc.
756 /* Conservatively use 32 bits per map word, even if on 64bit system */
757 #define BINMAPSHIFT 5
758 #define BITSPERMAP (1U << BINMAPSHIFT)
759 #define BINMAPSIZE (NBINS / BITSPERMAP)
761 #define idx2block(i) ((i) >> BINMAPSHIFT)
762 #define idx2bit(i) ((1U << ((i) & ((1U << BINMAPSHIFT)-1))))
764 #define mark_bin(m,i) ((m)->binmap[idx2block(i)] |= idx2bit(i))
765 #define unmark_bin(m,i) ((m)->binmap[idx2block(i)] &= ~(idx2bit(i)))
766 #define get_binmap(m,i) ((m)->binmap[idx2block(i)] & idx2bit(i))
771 An array of lists holding recently freed small chunks. Fastbins
772 are not doubly linked. It is faster to single-link them, and
773 since chunks are never removed from the middles of these lists,
774 double linking is not necessary. Also, unlike regular bins, they
775 are not even processed in FIFO order (they use faster LIFO) since
776 ordering doesn't much matter in the transient contexts in which
777 fastbins are normally used.
779 Chunks in fastbins keep their inuse bit set, so they cannot
780 be consolidated with other free chunks. __malloc_consolidate
781 releases all chunks in fastbins and consolidates them with
785 typedef struct malloc_chunk* mfastbinptr;
787 /* offset 2 to use otherwise unindexable first 2 bins */
788 #define fastbin_index(sz) ((((unsigned int)(sz)) >> 3) - 2)
790 /* The maximum fastbin request size we support */
791 #define MAX_FAST_SIZE 80
793 #define NFASTBINS (fastbin_index(request2size(MAX_FAST_SIZE))+1)
796 FASTBIN_CONSOLIDATION_THRESHOLD is the size of a chunk in free()
797 that triggers automatic consolidation of possibly-surrounding
798 fastbin chunks. This is a heuristic, so the exact value should not
799 matter too much. It is defined at half the default trim threshold as a
800 compromise heuristic to only attempt consolidation if it is likely
801 to lead to trimming. However, it is not dynamically tunable, since
802 consolidation reduces fragmentation surrounding loarge chunks even
803 if trimming is not used.
806 #define FASTBIN_CONSOLIDATION_THRESHOLD \
807 ((unsigned long)(DEFAULT_TRIM_THRESHOLD) >> 1)
810 Since the lowest 2 bits in max_fast don't matter in size comparisons,
811 they are used as flags.
815 ANYCHUNKS_BIT held in max_fast indicates that there may be any
816 freed chunks at all. It is set true when entering a chunk into any
820 #define ANYCHUNKS_BIT (1U)
822 #define have_anychunks(M) (((M)->max_fast & ANYCHUNKS_BIT))
823 #define set_anychunks(M) ((M)->max_fast |= ANYCHUNKS_BIT)
824 #define clear_anychunks(M) ((M)->max_fast &= ~ANYCHUNKS_BIT)
827 FASTCHUNKS_BIT held in max_fast indicates that there are probably
828 some fastbin chunks. It is set true on entering a chunk into any
829 fastbin, and cleared only in __malloc_consolidate.
832 #define FASTCHUNKS_BIT (2U)
834 #define have_fastchunks(M) (((M)->max_fast & FASTCHUNKS_BIT))
835 #define set_fastchunks(M) ((M)->max_fast |= (FASTCHUNKS_BIT|ANYCHUNKS_BIT))
836 #define clear_fastchunks(M) ((M)->max_fast &= ~(FASTCHUNKS_BIT))
838 /* Set value of max_fast. Use impossibly small value if 0. */
839 #define set_max_fast(M, s) \
840 (M)->max_fast = (((s) == 0)? SMALLBIN_WIDTH: request2size(s)) | \
841 ((M)->max_fast & (FASTCHUNKS_BIT|ANYCHUNKS_BIT))
843 #define get_max_fast(M) \
844 ((M)->max_fast & ~(FASTCHUNKS_BIT | ANYCHUNKS_BIT))
848 morecore_properties is a status word holding dynamically discovered
849 or controlled properties of the morecore function
852 #define MORECORE_CONTIGUOUS_BIT (1U)
854 #define contiguous(M) \
855 (((M)->morecore_properties & MORECORE_CONTIGUOUS_BIT))
856 #define noncontiguous(M) \
857 (((M)->morecore_properties & MORECORE_CONTIGUOUS_BIT) == 0)
858 #define set_contiguous(M) \
859 ((M)->morecore_properties |= MORECORE_CONTIGUOUS_BIT)
860 #define set_noncontiguous(M) \
861 ((M)->morecore_properties &= ~MORECORE_CONTIGUOUS_BIT)
865 ----------- Internal state representation and initialization -----------
868 struct malloc_state {
870 /* The maximum chunk size to be eligible for fastbin */
871 size_t max_fast; /* low 2 bits used as flags */
874 mfastbinptr fastbins[NFASTBINS];
876 /* Base of the topmost chunk -- not otherwise kept in a bin */
879 /* The remainder from the most recent split of a small request */
880 mchunkptr last_remainder;
882 /* Normal bins packed as described above */
883 mchunkptr bins[NBINS * 2];
885 /* Bitmap of bins. Trailing zero map handles cases of largest binned size */
886 unsigned int binmap[BINMAPSIZE+1];
888 /* Tunable parameters */
889 unsigned long trim_threshold;
891 size_t mmap_threshold;
893 /* Memory map support */
898 /* Cache malloc_getpagesize */
899 unsigned int pagesize;
901 /* Track properties of MORECORE */
902 unsigned int morecore_properties;
907 size_t max_sbrked_mem;
908 size_t max_mmapped_mem;
909 size_t max_total_mem;
912 typedef struct malloc_state *mstate;
915 There is exactly one instance of this struct in this malloc.
916 If you are adapting this malloc in a way that does NOT use a static
917 malloc_state, you MUST explicitly zero-fill it before using. This
918 malloc relies on the property that malloc_state is initialized to
919 all zeroes (as is true of C statics).
921 extern struct malloc_state __malloc_state; /* never directly referenced */
924 All uses of av_ are via get_malloc_state().
925 At most one "call" to get_malloc_state is made per invocation of
926 the public versions of malloc and free, but other routines
927 that in turn invoke malloc and/or free may call more then once.
928 Also, it is called in check* routines if __MALLOC_DEBUGGING is set.
931 #define get_malloc_state() (&(__malloc_state))
933 /* External internal utilities operating on mstates */
934 void __malloc_consolidate(mstate) attribute_hidden;
937 /* Debugging support */
938 #if ! __MALLOC_DEBUGGING
940 #define check_chunk(P)
941 #define check_free_chunk(P)
942 #define check_inuse_chunk(P)
943 #define check_remalloced_chunk(P,N)
944 #define check_malloced_chunk(P,N)
945 #define check_malloc_state()
946 #define assert(x) ((void)0)
951 #define check_chunk(P) __do_check_chunk(P)
952 #define check_free_chunk(P) __do_check_free_chunk(P)
953 #define check_inuse_chunk(P) __do_check_inuse_chunk(P)
954 #define check_remalloced_chunk(P,N) __do_check_remalloced_chunk(P,N)
955 #define check_malloced_chunk(P,N) __do_check_malloced_chunk(P,N)
956 #define check_malloc_state() __do_check_malloc_state()
958 extern void __do_check_chunk(mchunkptr p);
959 extern void __do_check_free_chunk(mchunkptr p);
960 extern void __do_check_inuse_chunk(mchunkptr p);
961 extern void __do_check_remalloced_chunk(mchunkptr p, size_t s);
962 extern void __do_check_malloced_chunk(mchunkptr p, size_t s);
963 extern void __do_check_malloc_state(void);