lh | 9ed821d | 2023-04-07 01:36:19 -0700 | [diff] [blame] | 1 | /* |
| 2 | This is a version (aka dlmalloc) of malloc/free/realloc written by |
| 3 | Doug Lea and released to the public domain. Use, modify, and |
| 4 | redistribute this code without permission or acknowledgement in any |
| 5 | way you wish. Send questions, comments, complaints, performance |
| 6 | data, etc to dl@cs.oswego.edu |
| 7 | |
| 8 | VERSION 2.7.2 Sat Aug 17 09:07:30 2002 Doug Lea (dl at gee) |
| 9 | |
| 10 | Note: There may be an updated version of this malloc obtainable at |
| 11 | ftp://gee.cs.oswego.edu/pub/misc/malloc.c |
| 12 | Check before installing! |
| 13 | |
| 14 | Hacked up for uClibc by Erik Andersen <andersen@codepoet.org> |
| 15 | */ |
| 16 | |
| 17 | #include "malloc.h" |
| 18 | |
| 19 | |
| 20 | /* ------------------------- __malloc_trim ------------------------- |
| 21 | __malloc_trim is an inverse of sorts to __malloc_alloc. It gives memory |
| 22 | back to the system (via negative arguments to sbrk) if there is unused |
| 23 | memory at the `high' end of the malloc pool. It is called automatically by |
| 24 | free() when top space exceeds the trim threshold. It is also called by the |
| 25 | public malloc_trim routine. It returns 1 if it actually released any |
| 26 | memory, else 0. |
| 27 | */ |
| 28 | static int __malloc_trim(size_t pad, mstate av) |
| 29 | { |
| 30 | long top_size; /* Amount of top-most memory */ |
| 31 | long extra; /* Amount to release */ |
| 32 | long released; /* Amount actually released */ |
| 33 | char* current_brk; /* address returned by pre-check sbrk call */ |
| 34 | char* new_brk; /* address returned by post-check sbrk call */ |
| 35 | size_t pagesz; |
| 36 | |
| 37 | pagesz = av->pagesize; |
| 38 | top_size = chunksize(av->top); |
| 39 | |
| 40 | /* Release in pagesize units, keeping at least one page */ |
| 41 | extra = ((top_size - pad - MINSIZE + (pagesz-1)) / pagesz - 1) * pagesz; |
| 42 | |
| 43 | if (extra > 0) { |
| 44 | |
| 45 | /* |
| 46 | Only proceed if end of memory is where we last set it. |
| 47 | This avoids problems if there were foreign sbrk calls. |
| 48 | */ |
| 49 | current_brk = (char*)(MORECORE(0)); |
| 50 | if (current_brk == (char*)(av->top) + top_size) { |
| 51 | |
| 52 | /* |
| 53 | Attempt to release memory. We ignore MORECORE return value, |
| 54 | and instead call again to find out where new end of memory is. |
| 55 | This avoids problems if first call releases less than we asked, |
| 56 | of if failure somehow altered brk value. (We could still |
| 57 | encounter problems if it altered brk in some very bad way, |
| 58 | but the only thing we can do is adjust anyway, which will cause |
| 59 | some downstream failure.) |
| 60 | */ |
| 61 | |
| 62 | MORECORE(-extra); |
| 63 | new_brk = (char*)(MORECORE(0)); |
| 64 | |
| 65 | if (new_brk != (char*)MORECORE_FAILURE) { |
| 66 | released = (long)(current_brk - new_brk); |
| 67 | |
| 68 | if (released != 0) { |
| 69 | /* Success. Adjust top. */ |
| 70 | av->sbrked_mem -= released; |
| 71 | set_head(av->top, (top_size - released) | PREV_INUSE); |
| 72 | check_malloc_state(); |
| 73 | return 1; |
| 74 | } |
| 75 | } |
| 76 | } |
| 77 | } |
| 78 | return 0; |
| 79 | } |
| 80 | |
| 81 | /* ------------------------- malloc_trim ------------------------- |
| 82 | malloc_trim(size_t pad); |
| 83 | |
| 84 | If possible, gives memory back to the system (via negative |
| 85 | arguments to sbrk) if there is unused memory at the `high' end of |
| 86 | the malloc pool. You can call this after freeing large blocks of |
| 87 | memory to potentially reduce the system-level memory requirements |
| 88 | of a program. However, it cannot guarantee to reduce memory. Under |
| 89 | some allocation patterns, some large free blocks of memory will be |
| 90 | locked between two used chunks, so they cannot be given back to |
| 91 | the system. |
| 92 | |
| 93 | The `pad' argument to malloc_trim represents the amount of free |
| 94 | trailing space to leave untrimmed. If this argument is zero, |
| 95 | only the minimum amount of memory to maintain internal data |
| 96 | structures will be left (one page or less). Non-zero arguments |
| 97 | can be supplied to maintain enough trailing space to service |
| 98 | future expected allocations without having to re-obtain memory |
| 99 | from the system. |
| 100 | |
| 101 | Malloc_trim returns 1 if it actually released any memory, else 0. |
| 102 | On systems that do not support "negative sbrks", it will always |
| 103 | return 0. |
| 104 | */ |
| 105 | int malloc_trim(size_t pad) |
| 106 | { |
| 107 | mstate av = get_malloc_state(); |
| 108 | __malloc_consolidate(av); |
| 109 | return __malloc_trim(pad, av); |
| 110 | } |
| 111 | |
| 112 | /* |
| 113 | Initialize a malloc_state struct. |
| 114 | |
| 115 | This is called only from within __malloc_consolidate, which needs |
| 116 | be called in the same contexts anyway. It is never called directly |
| 117 | outside of __malloc_consolidate because some optimizing compilers try |
| 118 | to inline it at all call points, which turns out not to be an |
| 119 | optimization at all. (Inlining it in __malloc_consolidate is fine though.) |
| 120 | */ |
| 121 | static void malloc_init_state(mstate av) |
| 122 | { |
| 123 | int i; |
| 124 | mbinptr bin; |
| 125 | |
| 126 | /* Establish circular links for normal bins */ |
| 127 | for (i = 1; i < NBINS; ++i) { |
| 128 | bin = bin_at(av,i); |
| 129 | bin->fd = bin->bk = bin; |
| 130 | } |
| 131 | |
| 132 | av->top_pad = DEFAULT_TOP_PAD; |
| 133 | av->n_mmaps_max = DEFAULT_MMAP_MAX; |
| 134 | av->mmap_threshold = DEFAULT_MMAP_THRESHOLD; |
| 135 | av->trim_threshold = DEFAULT_TRIM_THRESHOLD; |
| 136 | |
| 137 | #if MORECORE_CONTIGUOUS |
| 138 | set_contiguous(av); |
| 139 | #else |
| 140 | set_noncontiguous(av); |
| 141 | #endif |
| 142 | |
| 143 | |
| 144 | set_max_fast(av, DEFAULT_MXFAST); |
| 145 | |
| 146 | av->top = initial_top(av); |
| 147 | av->pagesize = malloc_getpagesize; |
| 148 | } |
| 149 | |
| 150 | |
| 151 | /* ---------------------------------------------------------------------- |
| 152 | * |
| 153 | * PUBLIC STUFF |
| 154 | * |
| 155 | * ----------------------------------------------------------------------*/ |
| 156 | |
| 157 | |
| 158 | /* ------------------------- __malloc_consolidate ------------------------- |
| 159 | |
| 160 | __malloc_consolidate is a specialized version of free() that tears |
| 161 | down chunks held in fastbins. Free itself cannot be used for this |
| 162 | purpose since, among other things, it might place chunks back onto |
| 163 | fastbins. So, instead, we need to use a minor variant of the same |
| 164 | code. |
| 165 | |
| 166 | Also, because this routine needs to be called the first time through |
| 167 | malloc anyway, it turns out to be the perfect place to trigger |
| 168 | initialization code. |
| 169 | */ |
| 170 | void attribute_hidden __malloc_consolidate(mstate av) |
| 171 | { |
| 172 | mfastbinptr* fb; /* current fastbin being consolidated */ |
| 173 | mfastbinptr* maxfb; /* last fastbin (for loop control) */ |
| 174 | mchunkptr p; /* current chunk being consolidated */ |
| 175 | mchunkptr nextp; /* next chunk to consolidate */ |
| 176 | mchunkptr unsorted_bin; /* bin header */ |
| 177 | mchunkptr first_unsorted; /* chunk to link to */ |
| 178 | |
| 179 | /* These have same use as in free() */ |
| 180 | mchunkptr nextchunk; |
| 181 | size_t size; |
| 182 | size_t nextsize; |
| 183 | size_t prevsize; |
| 184 | int nextinuse; |
| 185 | mchunkptr bck; |
| 186 | mchunkptr fwd; |
| 187 | |
| 188 | /* |
| 189 | If max_fast is 0, we know that av hasn't |
| 190 | yet been initialized, in which case do so below |
| 191 | */ |
| 192 | |
| 193 | if (av->max_fast != 0) { |
| 194 | clear_fastchunks(av); |
| 195 | |
| 196 | unsorted_bin = unsorted_chunks(av); |
| 197 | |
| 198 | /* |
| 199 | Remove each chunk from fast bin and consolidate it, placing it |
| 200 | then in unsorted bin. Among other reasons for doing this, |
| 201 | placing in unsorted bin avoids needing to calculate actual bins |
| 202 | until malloc is sure that chunks aren't immediately going to be |
| 203 | reused anyway. |
| 204 | */ |
| 205 | |
| 206 | maxfb = &(av->fastbins[fastbin_index(av->max_fast)]); |
| 207 | fb = &(av->fastbins[0]); |
| 208 | do { |
| 209 | if ( (p = *fb) != 0) { |
| 210 | *fb = 0; |
| 211 | |
| 212 | do { |
| 213 | check_inuse_chunk(p); |
| 214 | nextp = p->fd; |
| 215 | |
| 216 | /* Slightly streamlined version of consolidation code in free() */ |
| 217 | size = p->size & ~PREV_INUSE; |
| 218 | nextchunk = chunk_at_offset(p, size); |
| 219 | nextsize = chunksize(nextchunk); |
| 220 | |
| 221 | if (!prev_inuse(p)) { |
| 222 | prevsize = p->prev_size; |
| 223 | size += prevsize; |
| 224 | p = chunk_at_offset(p, -((long) prevsize)); |
| 225 | unlink(p, bck, fwd); |
| 226 | } |
| 227 | |
| 228 | if (nextchunk != av->top) { |
| 229 | nextinuse = inuse_bit_at_offset(nextchunk, nextsize); |
| 230 | set_head(nextchunk, nextsize); |
| 231 | |
| 232 | if (!nextinuse) { |
| 233 | size += nextsize; |
| 234 | unlink(nextchunk, bck, fwd); |
| 235 | } |
| 236 | |
| 237 | first_unsorted = unsorted_bin->fd; |
| 238 | unsorted_bin->fd = p; |
| 239 | first_unsorted->bk = p; |
| 240 | |
| 241 | set_head(p, size | PREV_INUSE); |
| 242 | p->bk = unsorted_bin; |
| 243 | p->fd = first_unsorted; |
| 244 | set_foot(p, size); |
| 245 | } |
| 246 | |
| 247 | else { |
| 248 | size += nextsize; |
| 249 | set_head(p, size | PREV_INUSE); |
| 250 | av->top = p; |
| 251 | } |
| 252 | |
| 253 | } while ( (p = nextp) != 0); |
| 254 | |
| 255 | } |
| 256 | } while (fb++ != maxfb); |
| 257 | } |
| 258 | else { |
| 259 | malloc_init_state(av); |
| 260 | check_malloc_state(); |
| 261 | } |
| 262 | } |
| 263 | |
| 264 | |
| 265 | /* ------------------------------ free ------------------------------ */ |
| 266 | void free(void* mem) |
| 267 | { |
| 268 | mstate av; |
| 269 | |
| 270 | mchunkptr p; /* chunk corresponding to mem */ |
| 271 | size_t size; /* its size */ |
| 272 | mfastbinptr* fb; /* associated fastbin */ |
| 273 | mchunkptr nextchunk; /* next contiguous chunk */ |
| 274 | size_t nextsize; /* its size */ |
| 275 | int nextinuse; /* true if nextchunk is used */ |
| 276 | size_t prevsize; /* size of previous contiguous chunk */ |
| 277 | mchunkptr bck; /* misc temp for linking */ |
| 278 | mchunkptr fwd; /* misc temp for linking */ |
| 279 | |
| 280 | /* free(0) has no effect */ |
| 281 | if (mem == NULL) |
| 282 | return; |
| 283 | |
| 284 | __MALLOC_LOCK; |
| 285 | av = get_malloc_state(); |
| 286 | p = mem2chunk(mem); |
| 287 | size = chunksize(p); |
| 288 | |
| 289 | check_inuse_chunk(p); |
| 290 | |
| 291 | /* |
| 292 | If eligible, place chunk on a fastbin so it can be found |
| 293 | and used quickly in malloc. |
| 294 | */ |
| 295 | |
| 296 | if ((unsigned long)(size) <= (unsigned long)(av->max_fast) |
| 297 | |
| 298 | #if TRIM_FASTBINS |
| 299 | /* If TRIM_FASTBINS set, don't place chunks |
| 300 | bordering top into fastbins */ |
| 301 | && (chunk_at_offset(p, size) != av->top) |
| 302 | #endif |
| 303 | ) { |
| 304 | |
| 305 | set_fastchunks(av); |
| 306 | fb = &(av->fastbins[fastbin_index(size)]); |
| 307 | p->fd = *fb; |
| 308 | *fb = p; |
| 309 | } |
| 310 | |
| 311 | /* |
| 312 | Consolidate other non-mmapped chunks as they arrive. |
| 313 | */ |
| 314 | |
| 315 | else if (!chunk_is_mmapped(p)) { |
| 316 | set_anychunks(av); |
| 317 | |
| 318 | nextchunk = chunk_at_offset(p, size); |
| 319 | nextsize = chunksize(nextchunk); |
| 320 | |
| 321 | /* consolidate backward */ |
| 322 | if (!prev_inuse(p)) { |
| 323 | prevsize = p->prev_size; |
| 324 | size += prevsize; |
| 325 | p = chunk_at_offset(p, -((long) prevsize)); |
| 326 | unlink(p, bck, fwd); |
| 327 | } |
| 328 | |
| 329 | if (nextchunk != av->top) { |
| 330 | /* get and clear inuse bit */ |
| 331 | nextinuse = inuse_bit_at_offset(nextchunk, nextsize); |
| 332 | set_head(nextchunk, nextsize); |
| 333 | |
| 334 | /* consolidate forward */ |
| 335 | if (!nextinuse) { |
| 336 | unlink(nextchunk, bck, fwd); |
| 337 | size += nextsize; |
| 338 | } |
| 339 | |
| 340 | /* |
| 341 | Place the chunk in unsorted chunk list. Chunks are |
| 342 | not placed into regular bins until after they have |
| 343 | been given one chance to be used in malloc. |
| 344 | */ |
| 345 | |
| 346 | bck = unsorted_chunks(av); |
| 347 | fwd = bck->fd; |
| 348 | p->bk = bck; |
| 349 | p->fd = fwd; |
| 350 | bck->fd = p; |
| 351 | fwd->bk = p; |
| 352 | |
| 353 | set_head(p, size | PREV_INUSE); |
| 354 | set_foot(p, size); |
| 355 | |
| 356 | check_free_chunk(p); |
| 357 | } |
| 358 | |
| 359 | /* |
| 360 | If the chunk borders the current high end of memory, |
| 361 | consolidate into top |
| 362 | */ |
| 363 | |
| 364 | else { |
| 365 | size += nextsize; |
| 366 | set_head(p, size | PREV_INUSE); |
| 367 | av->top = p; |
| 368 | check_chunk(p); |
| 369 | } |
| 370 | |
| 371 | /* |
| 372 | If freeing a large space, consolidate possibly-surrounding |
| 373 | chunks. Then, if the total unused topmost memory exceeds trim |
| 374 | threshold, ask malloc_trim to reduce top. |
| 375 | |
| 376 | Unless max_fast is 0, we don't know if there are fastbins |
| 377 | bordering top, so we cannot tell for sure whether threshold |
| 378 | has been reached unless fastbins are consolidated. But we |
| 379 | don't want to consolidate on each free. As a compromise, |
| 380 | consolidation is performed if FASTBIN_CONSOLIDATION_THRESHOLD |
| 381 | is reached. |
| 382 | */ |
| 383 | |
| 384 | if ((unsigned long)(size) >= FASTBIN_CONSOLIDATION_THRESHOLD) { |
| 385 | if (have_fastchunks(av)) |
| 386 | __malloc_consolidate(av); |
| 387 | |
| 388 | if ((unsigned long)(chunksize(av->top)) >= |
| 389 | (unsigned long)(av->trim_threshold)) |
| 390 | __malloc_trim(av->top_pad, av); |
| 391 | } |
| 392 | |
| 393 | } |
| 394 | /* |
| 395 | If the chunk was allocated via mmap, release via munmap() |
| 396 | Note that if HAVE_MMAP is false but chunk_is_mmapped is |
| 397 | true, then user must have overwritten memory. There's nothing |
| 398 | we can do to catch this error unless DEBUG is set, in which case |
| 399 | check_inuse_chunk (above) will have triggered error. |
| 400 | */ |
| 401 | |
| 402 | else { |
| 403 | size_t offset = p->prev_size; |
| 404 | av->n_mmaps--; |
| 405 | av->mmapped_mem -= (size + offset); |
| 406 | munmap((char*)p - offset, size + offset); |
| 407 | } |
| 408 | __MALLOC_UNLOCK; |
| 409 | } |
| 410 | |