lh | 9ed821d | 2023-04-07 01:36:19 -0700 | [diff] [blame] | 1 | /* |
| 2 | * libc/stdlib/malloc/malloc.c -- malloc function |
| 3 | * |
| 4 | * Copyright (C) 2002,03 NEC Electronics Corporation |
| 5 | * Copyright (C) 2002,03 Miles Bader <miles@gnu.org> |
| 6 | * |
| 7 | * This file is subject to the terms and conditions of the GNU Lesser |
| 8 | * General Public License. See the file COPYING.LIB in the main |
| 9 | * directory of this archive for more details. |
| 10 | * |
| 11 | * Written by Miles Bader <miles@gnu.org> |
| 12 | */ |
| 13 | |
| 14 | #include <stdlib.h> |
| 15 | #include <unistd.h> |
| 16 | #include <errno.h> |
| 17 | #include <sys/mman.h> |
| 18 | |
| 19 | |
| 20 | #include "malloc.h" |
| 21 | #include "heap.h" |
| 22 | |
| 23 | |
| 24 | /* The malloc heap. We provide a bit of initial static space so that |
| 25 | programs can do a little mallocing without mmaping in more space. */ |
| 26 | HEAP_DECLARE_STATIC_FREE_AREA (initial_fa, 256); |
| 27 | struct heap_free_area *__malloc_heap = HEAP_INIT_WITH_FA (initial_fa); |
| 28 | #ifdef HEAP_USE_LOCKING |
| 29 | malloc_mutex_t __malloc_heap_lock = PTHREAD_MUTEX_INITIALIZER; |
| 30 | #endif |
| 31 | |
| 32 | #if defined(MALLOC_USE_LOCKING) && defined(MALLOC_USE_SBRK) |
| 33 | /* A lock protecting our use of sbrk. */ |
| 34 | malloc_mutex_t __malloc_sbrk_lock; |
| 35 | #endif /* MALLOC_USE_LOCKING && MALLOC_USE_SBRK */ |
| 36 | |
| 37 | |
| 38 | #ifdef __UCLIBC_UCLINUX_BROKEN_MUNMAP__ |
| 39 | /* A list of all malloc_mmb structures describing blocks that |
| 40 | malloc has mmapped, ordered by the block address. */ |
| 41 | struct malloc_mmb *__malloc_mmapped_blocks = 0; |
| 42 | |
| 43 | /* A heap used for allocating malloc_mmb structures. We could allocate |
| 44 | them from the main heap, but that tends to cause heap fragmentation in |
| 45 | annoying ways. */ |
| 46 | HEAP_DECLARE_STATIC_FREE_AREA (initial_mmb_fa, 48); /* enough for 3 mmbs */ |
| 47 | struct heap_free_area *__malloc_mmb_heap = HEAP_INIT_WITH_FA (initial_mmb_fa); |
| 48 | #ifdef HEAP_USE_LOCKING |
| 49 | malloc_mutex_t __malloc_mmb_heap_lock = PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP; |
| 50 | #endif |
| 51 | #endif /* __UCLIBC_UCLINUX_BROKEN_MUNMAP__ */ |
| 52 | |
| 53 | |
| 54 | #ifdef HEAP_USE_LOCKING |
| 55 | #define malloc_from_heap(size, heap, lck) __malloc_from_heap(size, heap, lck) |
| 56 | #else |
| 57 | #define malloc_from_heap(size, heap, lck) __malloc_from_heap(size, heap) |
| 58 | #endif |
| 59 | static void * |
| 60 | __malloc_from_heap (size_t size, struct heap_free_area **heap |
| 61 | #ifdef HEAP_USE_LOCKING |
| 62 | , malloc_mutex_t *heap_lock |
| 63 | #endif |
| 64 | ) |
| 65 | { |
| 66 | void *mem; |
| 67 | |
| 68 | MALLOC_DEBUG (1, "malloc: %d bytes", size); |
| 69 | |
| 70 | /* Include extra space to record the size of the allocated block. */ |
| 71 | size += MALLOC_HEADER_SIZE; |
| 72 | |
| 73 | __heap_lock (heap_lock); |
| 74 | |
| 75 | /* First try to get memory that's already in our heap. */ |
| 76 | mem = __heap_alloc (heap, &size); |
| 77 | |
| 78 | __heap_unlock (heap_lock); |
| 79 | |
| 80 | if (unlikely (! mem)) |
| 81 | /* We couldn't allocate from the heap, so grab some more |
| 82 | from the system, add it to the heap, and try again. */ |
| 83 | { |
| 84 | /* If we're trying to allocate a block bigger than the default |
| 85 | MALLOC_HEAP_EXTEND_SIZE, make sure we get enough to hold it. */ |
| 86 | void *block; |
| 87 | size_t block_size |
| 88 | = (size < MALLOC_HEAP_EXTEND_SIZE |
| 89 | ? MALLOC_HEAP_EXTEND_SIZE |
| 90 | : MALLOC_ROUND_UP_TO_PAGE_SIZE (size)); |
| 91 | |
| 92 | /* Allocate the new heap block. */ |
| 93 | #ifdef MALLOC_USE_SBRK |
| 94 | |
| 95 | __malloc_lock_sbrk (); |
| 96 | |
| 97 | /* Use sbrk we can, as it's faster than mmap, and guarantees |
| 98 | contiguous allocation. */ |
| 99 | block = sbrk (block_size); |
| 100 | if (likely (block != (void *)-1)) |
| 101 | { |
| 102 | /* Because sbrk can return results of arbitrary |
| 103 | alignment, align the result to a MALLOC_ALIGNMENT boundary. */ |
| 104 | long aligned_block = MALLOC_ROUND_UP ((long)block, MALLOC_ALIGNMENT); |
| 105 | if (block != (void *)aligned_block) |
| 106 | /* Have to adjust. We should only have to actually do this |
| 107 | the first time (after which we will have aligned the brk |
| 108 | correctly). */ |
| 109 | { |
| 110 | /* Move the brk to reflect the alignment; our next allocation |
| 111 | should start on exactly the right alignment. */ |
| 112 | sbrk (aligned_block - (long)block); |
| 113 | block = (void *)aligned_block; |
| 114 | } |
| 115 | } |
| 116 | |
| 117 | __malloc_unlock_sbrk (); |
| 118 | |
| 119 | #else /* !MALLOC_USE_SBRK */ |
| 120 | |
| 121 | /* Otherwise, use mmap. */ |
| 122 | #ifdef __ARCH_USE_MMU__ |
| 123 | block = mmap ((void *)0, block_size, PROT_READ | PROT_WRITE, |
| 124 | MAP_PRIVATE | MAP_ANONYMOUS, 0, 0); |
| 125 | #else |
| 126 | block = mmap ((void *)0, block_size, PROT_READ | PROT_WRITE, |
| 127 | MAP_SHARED | MAP_ANONYMOUS | MAP_UNINITIALIZE, 0, 0); |
| 128 | #endif |
| 129 | |
| 130 | #endif /* MALLOC_USE_SBRK */ |
| 131 | |
| 132 | if (likely (block != (void *)-1)) |
| 133 | { |
| 134 | #if !defined(MALLOC_USE_SBRK) && defined(__UCLIBC_UCLINUX_BROKEN_MUNMAP__) |
| 135 | struct malloc_mmb *mmb, *prev_mmb, *new_mmb; |
| 136 | #endif |
| 137 | |
| 138 | MALLOC_DEBUG (1, "adding system memory to heap: 0x%lx - 0x%lx (%d bytes)", |
| 139 | (long)block, (long)block + block_size, block_size); |
| 140 | |
| 141 | /* Get back the heap lock. */ |
| 142 | __heap_lock (heap_lock); |
| 143 | |
| 144 | /* Put BLOCK into the heap. */ |
| 145 | __heap_free (heap, block, block_size); |
| 146 | |
| 147 | MALLOC_DEBUG_INDENT (-1); |
| 148 | |
| 149 | /* Try again to allocate. */ |
| 150 | mem = __heap_alloc (heap, &size); |
| 151 | |
| 152 | |
| 153 | #if !defined(MALLOC_USE_SBRK) && defined(__UCLIBC_UCLINUX_BROKEN_MUNMAP__) |
| 154 | /* Insert a record of BLOCK in sorted order into the |
| 155 | __malloc_mmapped_blocks list. */ |
| 156 | |
| 157 | new_mmb = malloc_from_heap (sizeof *new_mmb, &__malloc_mmb_heap, &__malloc_mmb_heap_lock); |
| 158 | |
| 159 | for (prev_mmb = 0, mmb = __malloc_mmapped_blocks; |
| 160 | mmb; |
| 161 | prev_mmb = mmb, mmb = mmb->next) |
| 162 | if (block < mmb->mem) |
| 163 | break; |
| 164 | |
| 165 | new_mmb->next = mmb; |
| 166 | new_mmb->mem = block; |
| 167 | new_mmb->size = block_size; |
| 168 | |
| 169 | if (prev_mmb) |
| 170 | prev_mmb->next = new_mmb; |
| 171 | else |
| 172 | __malloc_mmapped_blocks = new_mmb; |
| 173 | |
| 174 | MALLOC_MMB_DEBUG (0, "new mmb at 0x%x: 0x%x[%d]", |
| 175 | (unsigned)new_mmb, |
| 176 | (unsigned)new_mmb->mem, block_size); |
| 177 | #endif /* !MALLOC_USE_SBRK && __UCLIBC_UCLINUX_BROKEN_MUNMAP__ */ |
| 178 | __heap_unlock (heap_lock); |
| 179 | } |
| 180 | } |
| 181 | |
| 182 | if (likely (mem)) |
| 183 | /* Record the size of the block and get the user address. */ |
| 184 | { |
| 185 | mem = MALLOC_SETUP (mem, size); |
| 186 | |
| 187 | MALLOC_DEBUG (-1, "malloc: returning 0x%lx (base:0x%lx, total_size:%ld)", |
| 188 | (long)mem, (long)MALLOC_BASE(mem), (long)MALLOC_SIZE(mem)); |
| 189 | } |
| 190 | else |
| 191 | MALLOC_DEBUG (-1, "malloc: returning 0"); |
| 192 | |
| 193 | return mem; |
| 194 | } |
| 195 | |
| 196 | void * |
| 197 | malloc (size_t size) |
| 198 | { |
| 199 | void *mem; |
| 200 | #ifdef MALLOC_DEBUGGING |
| 201 | static smallint debugging_initialized; |
| 202 | if (! debugging_initialized) |
| 203 | { |
| 204 | debugging_initialized = 1; |
| 205 | __malloc_debug_init (); |
| 206 | } |
| 207 | if (__malloc_check) |
| 208 | __heap_check (__malloc_heap, "malloc"); |
| 209 | #endif |
| 210 | |
| 211 | #ifdef __MALLOC_GLIBC_COMPAT__ |
| 212 | if (unlikely (size == 0)) |
| 213 | size++; |
| 214 | #else |
| 215 | /* Some programs will call malloc (0). Lets be strict and return NULL */ |
| 216 | if (unlikely (size == 0)) |
| 217 | goto oom; |
| 218 | #endif |
| 219 | |
| 220 | /* Check if they are doing something dumb like malloc(-1) */ |
| 221 | if (unlikely(((unsigned long)size > (unsigned long)(MALLOC_HEADER_SIZE*-2)))) |
| 222 | goto oom; |
| 223 | |
| 224 | mem = malloc_from_heap (size, &__malloc_heap, &__malloc_heap_lock); |
| 225 | if (unlikely (!mem)) |
| 226 | { |
| 227 | oom: |
| 228 | __set_errno (ENOMEM); |
| 229 | return 0; |
| 230 | } |
| 231 | |
| 232 | return mem; |
| 233 | } |