lh | 9ed821d | 2023-04-07 01:36:19 -0700 | [diff] [blame] | 1 | /* |
| 2 | * libc/stdlib/malloc/free.c -- free function |
| 3 | * |
| 4 | * Copyright (C) 2002,03 NEC Electronics Corporation |
| 5 | * Copyright (C) 2002,03 Miles Bader <miles@gnu.org> |
| 6 | * |
| 7 | * This file is subject to the terms and conditions of the GNU Lesser |
| 8 | * General Public License. See the file COPYING.LIB in the main |
| 9 | * directory of this archive for more details. |
| 10 | * |
| 11 | * Written by Miles Bader <miles@gnu.org> |
| 12 | */ |
| 13 | |
| 14 | #include <stdlib.h> |
| 15 | #include <unistd.h> |
| 16 | #include <sys/mman.h> |
| 17 | |
| 18 | |
| 19 | #include "malloc.h" |
| 20 | #include "heap.h" |
| 21 | |
| 22 | #ifdef HEAP_USE_LOCKING |
| 23 | #define free_to_heap(mem, heap, lck) __free_to_heap(mem, heap, lck) |
| 24 | #else |
| 25 | #define free_to_heap(mem, heap, lck) __free_to_heap(mem, heap) |
| 26 | #endif |
| 27 | |
| 28 | static void |
| 29 | __free_to_heap (void *mem, struct heap_free_area **heap |
| 30 | #ifdef HEAP_USE_LOCKING |
| 31 | , malloc_mutex_t *heap_lock |
| 32 | #endif |
| 33 | ) |
| 34 | { |
| 35 | size_t size; |
| 36 | struct heap_free_area *fa; |
| 37 | |
| 38 | /* Check for special cases. */ |
| 39 | if (unlikely (! mem)) |
| 40 | return; |
| 41 | |
| 42 | /* Normal free. */ |
| 43 | |
| 44 | MALLOC_DEBUG (1, "free: 0x%lx (base = 0x%lx, total_size = %d)", |
| 45 | (long)mem, (long)MALLOC_BASE (mem), MALLOC_SIZE (mem)); |
| 46 | |
| 47 | size = MALLOC_SIZE (mem); |
| 48 | mem = MALLOC_BASE (mem); |
| 49 | |
| 50 | __heap_lock (heap_lock); |
| 51 | |
| 52 | /* Put MEM back in the heap, and get the free-area it was placed in. */ |
| 53 | fa = __heap_free (heap, mem, size); |
| 54 | |
| 55 | /* See if the free-area FA has grown big enough that it should be |
| 56 | unmapped. */ |
| 57 | if (HEAP_FREE_AREA_SIZE (fa) < MALLOC_UNMAP_THRESHOLD) |
| 58 | /* Nope, nothing left to do, just release the lock. */ |
| 59 | __heap_unlock (heap_lock); |
| 60 | else |
| 61 | /* Yup, try to unmap FA. */ |
| 62 | { |
| 63 | unsigned long start = (unsigned long)HEAP_FREE_AREA_START (fa); |
| 64 | unsigned long end = (unsigned long)HEAP_FREE_AREA_END (fa); |
| 65 | #ifndef MALLOC_USE_SBRK |
| 66 | # ifdef __UCLIBC_UCLINUX_BROKEN_MUNMAP__ |
| 67 | struct malloc_mmb *mmb, *prev_mmb; |
| 68 | unsigned long mmb_start, mmb_end; |
| 69 | # else /* !__UCLIBC_UCLINUX_BROKEN_MUNMAP__ */ |
| 70 | unsigned long unmap_start, unmap_end; |
| 71 | # endif /* __UCLIBC_UCLINUX_BROKEN_MUNMAP__ */ |
| 72 | #endif /* !MALLOC_USE_SBRK */ |
| 73 | |
| 74 | #ifdef MALLOC_USE_SBRK |
| 75 | /* Get the sbrk lock so that the two possible calls to sbrk below |
| 76 | are guaranteed to be contiguous. */ |
| 77 | __malloc_lock_sbrk (); |
| 78 | /* When using sbrk, we only shrink the heap from the end. It would |
| 79 | be possible to allow _both_ -- shrinking via sbrk when possible, |
| 80 | and otherwise shrinking via munmap, but this results in holes in |
| 81 | memory that prevent the brk from every growing back down; since |
| 82 | we only ever grow the heap via sbrk, this tends to produce a |
| 83 | continuously growing brk (though the actual memory is unmapped), |
| 84 | which could eventually run out of address space. Note that |
| 85 | `sbrk(0)' shouldn't normally do a system call, so this test is |
| 86 | reasonably cheap. */ |
| 87 | if ((void *)end != sbrk (0)) |
| 88 | { |
| 89 | MALLOC_DEBUG (-1, "not unmapping: 0x%lx - 0x%lx (%ld bytes)", |
| 90 | start, end, end - start); |
| 91 | __malloc_unlock_sbrk (); |
| 92 | __heap_unlock (heap_lock); |
| 93 | return; |
| 94 | } |
| 95 | #endif |
| 96 | |
| 97 | MALLOC_DEBUG (0, "unmapping: 0x%lx - 0x%lx (%ld bytes)", |
| 98 | start, end, end - start); |
| 99 | |
| 100 | /* Remove FA from the heap. */ |
| 101 | __heap_delete (heap, fa); |
| 102 | |
| 103 | if (__heap_is_empty (heap)) |
| 104 | /* We want to avoid the heap from losing all memory, so reserve |
| 105 | a bit. This test is only a heuristic -- the existance of |
| 106 | another free area, even if it's smaller than |
| 107 | MALLOC_MIN_SIZE, will cause us not to reserve anything. */ |
| 108 | { |
| 109 | /* Put the reserved memory back in the heap; we assume that |
| 110 | MALLOC_UNMAP_THRESHOLD is greater than MALLOC_MIN_SIZE, so |
| 111 | we use the latter unconditionally here. */ |
| 112 | __heap_free (heap, (void *)start, MALLOC_MIN_SIZE); |
| 113 | start += MALLOC_MIN_SIZE; |
| 114 | } |
| 115 | |
| 116 | #ifdef MALLOC_USE_SBRK |
| 117 | |
| 118 | /* Release the heap lock; we're still holding the sbrk lock. */ |
| 119 | __heap_unlock (heap_lock); |
| 120 | /* Lower the brk. */ |
| 121 | sbrk (start - end); |
| 122 | /* Release the sbrk lock too; now we hold no locks. */ |
| 123 | __malloc_unlock_sbrk (); |
| 124 | |
| 125 | #else /* !MALLOC_USE_SBRK */ |
| 126 | |
| 127 | # ifdef __UCLIBC_UCLINUX_BROKEN_MUNMAP__ |
| 128 | /* Using the uClinux broken munmap, we have to only munmap blocks |
| 129 | exactly as we got them from mmap, so scan through our list of |
| 130 | mmapped blocks, and return them in order. */ |
| 131 | |
| 132 | MALLOC_MMB_DEBUG (1, "walking mmb list for region 0x%x[%d]...", |
| 133 | start, end - start); |
| 134 | |
| 135 | prev_mmb = 0; |
| 136 | mmb = __malloc_mmapped_blocks; |
| 137 | while (mmb |
| 138 | && ((mmb_end = (mmb_start = (unsigned long)mmb->mem) + mmb->size) |
| 139 | <= end)) |
| 140 | { |
| 141 | MALLOC_MMB_DEBUG (1, "considering mmb at 0x%x: 0x%x[%d]", |
| 142 | (unsigned)mmb, mmb_start, mmb_end - mmb_start); |
| 143 | |
| 144 | if (mmb_start >= start |
| 145 | /* If the space between START and MMB_START is non-zero, but |
| 146 | too small to return to the heap, we can't unmap MMB. */ |
| 147 | && (start == mmb_start |
| 148 | || mmb_start - start > HEAP_MIN_FREE_AREA_SIZE)) |
| 149 | { |
| 150 | struct malloc_mmb *next_mmb = mmb->next; |
| 151 | |
| 152 | if (mmb_end != end && mmb_end + HEAP_MIN_FREE_AREA_SIZE > end) |
| 153 | /* There's too little space left at the end to deallocate |
| 154 | this block, so give up. */ |
| 155 | break; |
| 156 | |
| 157 | MALLOC_MMB_DEBUG (1, "unmapping mmb at 0x%x: 0x%x[%d]", |
| 158 | (unsigned)mmb, mmb_start, mmb_end - mmb_start); |
| 159 | |
| 160 | if (mmb_start != start) |
| 161 | /* We're going to unmap a part of the heap that begins after |
| 162 | start, so put the intervening region back into the heap. */ |
| 163 | { |
| 164 | MALLOC_MMB_DEBUG (0, "putting intervening region back into heap: 0x%x[%d]", |
| 165 | start, mmb_start - start); |
| 166 | __heap_free (heap, (void *)start, mmb_start - start); |
| 167 | } |
| 168 | |
| 169 | MALLOC_MMB_DEBUG_INDENT (-1); |
| 170 | |
| 171 | /* Unlink MMB from the list. */ |
| 172 | if (prev_mmb) |
| 173 | prev_mmb->next = next_mmb; |
| 174 | else |
| 175 | __malloc_mmapped_blocks = next_mmb; |
| 176 | |
| 177 | /* Start searching again from the end of this block. */ |
| 178 | start = mmb_end; |
| 179 | |
| 180 | /* Release the descriptor block we used. */ |
| 181 | free_to_heap (mmb, &__malloc_mmb_heap, &__malloc_mmb_heap_lock); |
| 182 | |
| 183 | /* We have to unlock the heap before we recurse to free the mmb |
| 184 | descriptor, because we might be unmapping from the mmb |
| 185 | heap. */ |
| 186 | __heap_unlock (heap_lock); |
| 187 | |
| 188 | /* Do the actual munmap. */ |
| 189 | munmap ((void *)mmb_start, mmb_end - mmb_start); |
| 190 | |
| 191 | __heap_lock (heap_lock); |
| 192 | |
| 193 | # ifdef __UCLIBC_HAS_THREADS__ |
| 194 | /* In a multi-threaded program, it's possible that PREV_MMB has |
| 195 | been invalidated by another thread when we released the |
| 196 | heap lock to do the munmap system call, so just start over |
| 197 | from the beginning of the list. It sucks, but oh well; |
| 198 | it's probably not worth the bother to do better. */ |
| 199 | prev_mmb = 0; |
| 200 | mmb = __malloc_mmapped_blocks; |
| 201 | # else |
| 202 | mmb = next_mmb; |
| 203 | # endif |
| 204 | } |
| 205 | else |
| 206 | { |
| 207 | prev_mmb = mmb; |
| 208 | mmb = mmb->next; |
| 209 | } |
| 210 | |
| 211 | MALLOC_MMB_DEBUG_INDENT (-1); |
| 212 | } |
| 213 | |
| 214 | if (start != end) |
| 215 | /* Hmm, well there's something we couldn't unmap, so put it back |
| 216 | into the heap. */ |
| 217 | { |
| 218 | MALLOC_MMB_DEBUG (0, "putting tail region back into heap: 0x%x[%d]", |
| 219 | start, end - start); |
| 220 | __heap_free (heap, (void *)start, end - start); |
| 221 | } |
| 222 | |
| 223 | /* Finally release the lock for good. */ |
| 224 | __heap_unlock (heap_lock); |
| 225 | |
| 226 | MALLOC_MMB_DEBUG_INDENT (-1); |
| 227 | |
| 228 | # else /* !__UCLIBC_UCLINUX_BROKEN_MUNMAP__ */ |
| 229 | |
| 230 | /* MEM/LEN may not be page-aligned, so we have to page-align them, |
| 231 | and return any left-over bits on the end to the heap. */ |
| 232 | unmap_start = MALLOC_ROUND_UP_TO_PAGE_SIZE (start); |
| 233 | unmap_end = MALLOC_ROUND_DOWN_TO_PAGE_SIZE (end); |
| 234 | |
| 235 | /* We have to be careful that any left-over bits are large enough to |
| 236 | return. Note that we _don't check_ to make sure there's room to |
| 237 | grow/shrink the start/end by another page, we just assume that |
| 238 | the unmap threshold is high enough so that this is always safe |
| 239 | (i.e., it should probably be at least 3 pages). */ |
| 240 | if (unmap_start > start) |
| 241 | { |
| 242 | if (unmap_start - start < HEAP_MIN_FREE_AREA_SIZE) |
| 243 | unmap_start += MALLOC_PAGE_SIZE; |
| 244 | __heap_free (heap, (void *)start, unmap_start - start); |
| 245 | } |
| 246 | if (end > unmap_end) |
| 247 | { |
| 248 | if (end - unmap_end < HEAP_MIN_FREE_AREA_SIZE) |
| 249 | unmap_end -= MALLOC_PAGE_SIZE; |
| 250 | __heap_free (heap, (void *)unmap_end, end - unmap_end); |
| 251 | } |
| 252 | |
| 253 | /* Release the heap lock before we do the system call. */ |
| 254 | __heap_unlock (heap_lock); |
| 255 | |
| 256 | if (unmap_end > unmap_start) |
| 257 | /* Finally, actually unmap the memory. */ |
| 258 | munmap ((void *)unmap_start, unmap_end - unmap_start); |
| 259 | |
| 260 | # endif /* __UCLIBC_UCLINUX_BROKEN_MUNMAP__ */ |
| 261 | |
| 262 | #endif /* MALLOC_USE_SBRK */ |
| 263 | } |
| 264 | |
| 265 | MALLOC_DEBUG_INDENT (-1); |
| 266 | } |
| 267 | |
| 268 | void |
| 269 | free (void *mem) |
| 270 | { |
| 271 | free_to_heap (mem, &__malloc_heap, &__malloc_heap_lock); |
| 272 | } |