2 * libc/stdlib/malloc/free.c -- free function
4 * Copyright (C) 2002,03 NEC Electronics Corporation
5 * Copyright (C) 2002,03 Miles Bader <miles@gnu.org>
7 * This file is subject to the terms and conditions of the GNU Lesser
8 * General Public License. See the file COPYING.LIB in the main
9 * directory of this archive for more details.
11 * Written by Miles Bader <miles@gnu.org>
18 /* libc_hidden_proto(munmap) */
19 /* libc_hidden_proto(sbrk) */
24 #ifdef HEAP_USE_LOCKING
25 #define free_to_heap(mem, heap, lck) __free_to_heap(mem, heap, lck)
27 #define free_to_heap(mem, heap, lck) __free_to_heap(mem, heap)
31 __free_to_heap (void *mem, struct heap_free_area **heap
32 #ifdef HEAP_USE_LOCKING
33 , malloc_mutex_t *heap_lock
38 struct heap_free_area *fa;
40 /* Check for special cases. */
46 MALLOC_DEBUG (1, "free: 0x%lx (base = 0x%lx, total_size = %d)",
47 (long)mem, (long)MALLOC_BASE (mem), MALLOC_SIZE (mem));
49 size = MALLOC_SIZE (mem);
50 mem = MALLOC_BASE (mem);
52 __heap_lock (heap_lock);
54 /* Put MEM back in the heap, and get the free-area it was placed in. */
55 fa = __heap_free (heap, mem, size);
57 /* See if the free-area FA has grown big enough that it should be
59 if (HEAP_FREE_AREA_SIZE (fa) < MALLOC_UNMAP_THRESHOLD)
60 /* Nope, nothing left to do, just release the lock. */
61 __heap_unlock (heap_lock);
63 /* Yup, try to unmap FA. */
65 unsigned long start = (unsigned long)HEAP_FREE_AREA_START (fa);
66 unsigned long end = (unsigned long)HEAP_FREE_AREA_END (fa);
67 #ifndef MALLOC_USE_SBRK
68 # ifdef __UCLIBC_UCLINUX_BROKEN_MUNMAP__
69 struct malloc_mmb *mmb, *prev_mmb;
70 unsigned long mmb_start, mmb_end;
71 # else /* !__UCLIBC_UCLINUX_BROKEN_MUNMAP__ */
72 unsigned long unmap_start, unmap_end;
73 # endif /* __UCLIBC_UCLINUX_BROKEN_MUNMAP__ */
74 #endif /* !MALLOC_USE_SBRK */
76 #ifdef MALLOC_USE_SBRK
77 /* Get the sbrk lock so that the two possible calls to sbrk below
78 are guaranteed to be contiguous. */
79 __malloc_lock_sbrk ();
80 /* When using sbrk, we only shrink the heap from the end. It would
81 be possible to allow _both_ -- shrinking via sbrk when possible,
82 and otherwise shrinking via munmap, but this results in holes in
83 memory that prevent the brk from every growing back down; since
84 we only ever grow the heap via sbrk, this tends to produce a
85 continuously growing brk (though the actual memory is unmapped),
86 which could eventually run out of address space. Note that
87 `sbrk(0)' shouldn't normally do a system call, so this test is
89 if ((void *)end != sbrk (0))
91 MALLOC_DEBUG (-1, "not unmapping: 0x%lx - 0x%lx (%ld bytes)",
92 start, end, end - start);
93 __malloc_unlock_sbrk ();
94 __heap_unlock (heap_lock);
99 MALLOC_DEBUG (0, "unmapping: 0x%lx - 0x%lx (%ld bytes)",
100 start, end, end - start);
102 /* Remove FA from the heap. */
103 __heap_delete (heap, fa);
105 if (__heap_is_empty (heap))
106 /* We want to avoid the heap from losing all memory, so reserve
107 a bit. This test is only a heuristic -- the existance of
108 another free area, even if it's smaller than
109 MALLOC_MIN_SIZE, will cause us not to reserve anything. */
111 /* Put the reserved memory back in the heap; we assume that
112 MALLOC_UNMAP_THRESHOLD is greater than MALLOC_MIN_SIZE, so
113 we use the latter unconditionally here. */
114 __heap_free (heap, (void *)start, MALLOC_MIN_SIZE);
115 start += MALLOC_MIN_SIZE;
118 #ifdef MALLOC_USE_SBRK
120 /* Release the heap lock; we're still holding the sbrk lock. */
121 __heap_unlock (heap_lock);
124 /* Release the sbrk lock too; now we hold no locks. */
125 __malloc_unlock_sbrk ();
127 #else /* !MALLOC_USE_SBRK */
129 # ifdef __UCLIBC_UCLINUX_BROKEN_MUNMAP__
130 /* Using the uClinux broken munmap, we have to only munmap blocks
131 exactly as we got them from mmap, so scan through our list of
132 mmapped blocks, and return them in order. */
134 MALLOC_MMB_DEBUG (1, "walking mmb list for region 0x%x[%d]...",
138 mmb = __malloc_mmapped_blocks;
140 && ((mmb_end = (mmb_start = (unsigned long)mmb->mem) + mmb->size)
143 MALLOC_MMB_DEBUG (1, "considering mmb at 0x%x: 0x%x[%d]",
144 (unsigned)mmb, mmb_start, mmb_end - mmb_start);
146 if (mmb_start >= start
147 /* If the space between START and MMB_START is non-zero, but
148 too small to return to the heap, we can't unmap MMB. */
149 && (start == mmb_start
150 || mmb_start - start > HEAP_MIN_FREE_AREA_SIZE))
152 struct malloc_mmb *next_mmb = mmb->next;
154 if (mmb_end != end && mmb_end + HEAP_MIN_FREE_AREA_SIZE > end)
155 /* There's too little space left at the end to deallocate
156 this block, so give up. */
159 MALLOC_MMB_DEBUG (1, "unmapping mmb at 0x%x: 0x%x[%d]",
160 (unsigned)mmb, mmb_start, mmb_end - mmb_start);
162 if (mmb_start != start)
163 /* We're going to unmap a part of the heap that begins after
164 start, so put the intervening region back into the heap. */
166 MALLOC_MMB_DEBUG (0, "putting intervening region back into heap: 0x%x[%d]",
167 start, mmb_start - start);
168 __heap_free (heap, (void *)start, mmb_start - start);
171 MALLOC_MMB_DEBUG_INDENT (-1);
173 /* Unlink MMB from the list. */
175 prev_mmb->next = next_mmb;
177 __malloc_mmapped_blocks = next_mmb;
179 /* Start searching again from the end of this block. */
182 /* We have to unlock the heap before we recurse to free the mmb
183 descriptor, because we might be unmapping from the mmb
185 __heap_unlock (heap_lock);
187 /* Release the descriptor block we used. */
188 free_to_heap (mmb, &__malloc_mmb_heap, &__malloc_mmb_heap_lock);
190 /* Do the actual munmap. */
191 munmap ((void *)mmb_start, mmb_end - mmb_start);
193 __heap_lock (heap_lock);
195 # ifdef __UCLIBC_HAS_THREADS__
196 /* In a multi-threaded program, it's possible that PREV_MMB has
197 been invalidated by another thread when we released the
198 heap lock to do the munmap system call, so just start over
199 from the beginning of the list. It sucks, but oh well;
200 it's probably not worth the bother to do better. */
202 mmb = __malloc_mmapped_blocks;
213 MALLOC_MMB_DEBUG_INDENT (-1);
217 /* Hmm, well there's something we couldn't unmap, so put it back
220 MALLOC_MMB_DEBUG (0, "putting tail region back into heap: 0x%x[%d]",
222 __heap_free (heap, (void *)start, end - start);
225 /* Finally release the lock for good. */
226 __heap_unlock (heap_lock);
228 MALLOC_MMB_DEBUG_INDENT (-1);
230 # else /* !__UCLIBC_UCLINUX_BROKEN_MUNMAP__ */
232 /* MEM/LEN may not be page-aligned, so we have to page-align them,
233 and return any left-over bits on the end to the heap. */
234 unmap_start = MALLOC_ROUND_UP_TO_PAGE_SIZE (start);
235 unmap_end = MALLOC_ROUND_DOWN_TO_PAGE_SIZE (end);
237 /* We have to be careful that any left-over bits are large enough to
238 return. Note that we _don't check_ to make sure there's room to
239 grow/shrink the start/end by another page, we just assume that
240 the unmap threshold is high enough so that this is always safe
241 (i.e., it should probably be at least 3 pages). */
242 if (unmap_start > start)
244 if (unmap_start - start < HEAP_MIN_FREE_AREA_SIZE)
245 unmap_start += MALLOC_PAGE_SIZE;
246 __heap_free (heap, (void *)start, unmap_start - start);
250 if (end - unmap_end < HEAP_MIN_FREE_AREA_SIZE)
251 unmap_end -= MALLOC_PAGE_SIZE;
252 __heap_free (heap, (void *)unmap_end, end - unmap_end);
255 /* Release the heap lock before we do the system call. */
256 __heap_unlock (heap_lock);
258 if (unmap_end > unmap_start)
259 /* Finally, actually unmap the memory. */
260 munmap ((void *)unmap_start, unmap_end - unmap_start);
262 # endif /* __UCLIBC_UCLINUX_BROKEN_MUNMAP__ */
264 #endif /* MALLOC_USE_SBRK */
267 MALLOC_DEBUG_INDENT (-1);
273 free_to_heap (mem, &__malloc_heap, &__malloc_heap_lock);