| xf.li | bdd93d5 | 2023-05-12 07:10:14 -0700 | [diff] [blame] | 1 | /* Map in a shared object's segments.  NaCl version. | 
|  | 2 | Copyright (C) 2015-2016 Free Software Foundation, Inc. | 
|  | 3 | This file is part of the GNU C Library. | 
|  | 4 |  | 
|  | 5 | The GNU C Library is free software; you can redistribute it and/or | 
|  | 6 | modify it under the terms of the GNU Lesser General Public | 
|  | 7 | License as published by the Free Software Foundation; either | 
|  | 8 | version 2.1 of the License, or (at your option) any later version. | 
|  | 9 |  | 
|  | 10 | The GNU C Library is distributed in the hope that it will be useful, | 
|  | 11 | but WITHOUT ANY WARRANTY; without even the implied warranty of | 
|  | 12 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU | 
|  | 13 | Lesser General Public License for more details. | 
|  | 14 |  | 
|  | 15 | You should have received a copy of the GNU Lesser General Public | 
|  | 16 | License along with the GNU C Library; if not, see | 
|  | 17 | <http://www.gnu.org/licenses/>.  */ | 
|  | 18 |  | 
|  | 19 | #include <assert.h> | 
|  | 20 | #include <dl-load.h> | 
|  | 21 | #include <errno.h> | 
|  | 22 | #include <stdbool.h> | 
|  | 23 | #include <unistd.h> | 
|  | 24 | #include <libc-internal.h> | 
|  | 25 |  | 
|  | 26 |  | 
|  | 27 | /* This is basically pread, but with iteration after short reads.  */ | 
|  | 28 | static bool | 
|  | 29 | read_in_data (int fd, void *data, size_t len, off_t pos) | 
|  | 30 | { | 
|  | 31 | if (__glibc_unlikely (__lseek (fd, pos, SEEK_SET) == (off_t) -1)) | 
|  | 32 | return true; | 
|  | 33 | while (len > 0) | 
|  | 34 | { | 
|  | 35 | ssize_t n = __read (fd, data, len); | 
|  | 36 | if (__glibc_unlikely (n < 0)) | 
|  | 37 | return true; | 
|  | 38 | if (__glibc_unlikely (n == 0)) | 
|  | 39 | { | 
|  | 40 | errno = EFTYPE; | 
|  | 41 | return true; | 
|  | 42 | } | 
|  | 43 | data += n; | 
|  | 44 | len -= n; | 
|  | 45 | } | 
|  | 46 | return false; | 
|  | 47 | } | 
|  | 48 |  | 
|  | 49 | static const char * | 
|  | 50 | _dl_map_segments (struct link_map *l, int fd, | 
|  | 51 | const ElfW(Ehdr) *header, int type, | 
|  | 52 | const struct loadcmd loadcmds[], size_t nloadcmds, | 
|  | 53 | const size_t maplength, bool has_holes, | 
|  | 54 | struct link_map *loader) | 
|  | 55 | { | 
|  | 56 | if (__glibc_likely (type == ET_DYN)) | 
|  | 57 | { | 
|  | 58 | /* This is a position-independent shared object.  Let the system | 
|  | 59 | choose where to place it. | 
|  | 60 |  | 
|  | 61 | As a refinement, sometimes we have an address that we would | 
|  | 62 | prefer to map such objects at; but this is only a preference, | 
|  | 63 | the OS can do whatever it likes. */ | 
|  | 64 | ElfW(Addr) mappref | 
|  | 65 | = (ELF_PREFERRED_ADDRESS (loader, maplength, | 
|  | 66 | loadcmds[0].mapstart & GLRO(dl_use_load_bias)) | 
|  | 67 | - MAP_BASE_ADDR (l)); | 
|  | 68 |  | 
|  | 69 | uintptr_t mapstart; | 
|  | 70 | if (__glibc_likely (loadcmds[0].prot & PROT_EXEC)) | 
|  | 71 | { | 
|  | 72 | /* When there is a code segment, we must use the | 
|  | 73 | allocate_code_data interface to choose a location.  */ | 
|  | 74 |  | 
|  | 75 | uintptr_t code_size = loadcmds[0].allocend - loadcmds[0].mapstart; | 
|  | 76 | uintptr_t data_offset; | 
|  | 77 | size_t data_size; | 
|  | 78 |  | 
|  | 79 | if (__glibc_likely (nloadcmds > 1)) | 
|  | 80 | { | 
|  | 81 | data_offset = loadcmds[1].mapstart - loadcmds[0].mapstart; | 
|  | 82 | data_size = ALIGN_UP (maplength - data_offset, | 
|  | 83 | GLRO(dl_pagesize)); | 
|  | 84 | } | 
|  | 85 | else | 
|  | 86 | { | 
|  | 87 | data_offset = 0; | 
|  | 88 | data_size = 0; | 
|  | 89 | } | 
|  | 90 |  | 
|  | 91 | int error = __nacl_irt_code_data_alloc.allocate_code_data | 
|  | 92 | (mappref, code_size, data_offset, data_size, &mapstart); | 
|  | 93 | if (__glibc_unlikely (error)) | 
|  | 94 | { | 
|  | 95 | errno = error; | 
|  | 96 | return DL_MAP_SEGMENTS_ERROR_MAP_SEGMENT; | 
|  | 97 | } | 
|  | 98 | } | 
|  | 99 | else | 
|  | 100 | { | 
|  | 101 | /* With no code pages involved, plain mmap works fine.  */ | 
|  | 102 | void *mapped = __mmap ((void *) mappref, maplength, | 
|  | 103 | PROT_NONE, MAP_ANON, -1, 0); | 
|  | 104 | if (__glibc_unlikely (mapped == MAP_FAILED)) | 
|  | 105 | return DL_MAP_SEGMENTS_ERROR_MAP_SEGMENT; | 
|  | 106 | mapstart = (uintptr_t) mapped; | 
|  | 107 | } | 
|  | 108 |  | 
|  | 109 | l->l_addr = mapstart - loadcmds[0].mapstart; | 
|  | 110 | } | 
|  | 111 |  | 
|  | 112 | /* Remember which part of the address space this object uses.  */ | 
|  | 113 | l->l_map_start = loadcmds[0].mapstart + l->l_addr; | 
|  | 114 | l->l_map_end = l->l_map_start + maplength; | 
|  | 115 | l->l_contiguous = !has_holes; | 
|  | 116 |  | 
|  | 117 | /* Now actually map (or read) in each segment.  */ | 
|  | 118 | for (const struct loadcmd *c = loadcmds; c < &loadcmds[nloadcmds]; ++c) | 
|  | 119 | if (__glibc_likely (c->mapend > c->mapstart)) | 
|  | 120 | { | 
|  | 121 | /* Unlike POSIX mmap, NaCl's mmap does not reliably handle COW | 
|  | 122 | faults in the remainder of the final partial page.  So to get | 
|  | 123 | the expected behavior for the unaligned boundary between data | 
|  | 124 | and bss, it's necessary to allocate the final partial page of | 
|  | 125 | data as anonymous memory rather than mapping it from the file.  */ | 
|  | 126 |  | 
|  | 127 | size_t maplen = c->mapend - c->mapstart; | 
|  | 128 | if (c->mapend > c->dataend && c->allocend > c->dataend) | 
|  | 129 | maplen = (c->dataend & -GLRO(dl_pagesize)) - c->mapstart; | 
|  | 130 |  | 
|  | 131 | /* Map the segment contents from the file.  */ | 
|  | 132 | if (__glibc_unlikely (__mmap ((void *) (l->l_addr + c->mapstart), | 
|  | 133 | maplen, c->prot, | 
|  | 134 | MAP_FIXED|MAP_COPY|MAP_FILE, | 
|  | 135 | fd, c->mapoff) | 
|  | 136 | == MAP_FAILED)) | 
|  | 137 | { | 
|  | 138 | switch (errno) | 
|  | 139 | { | 
|  | 140 | case EINVAL: | 
|  | 141 | case ENOTSUP: | 
|  | 142 | case ENOSYS: | 
|  | 143 | break; | 
|  | 144 | default: | 
|  | 145 | return DL_MAP_SEGMENTS_ERROR_MAP_SEGMENT; | 
|  | 146 | } | 
|  | 147 |  | 
|  | 148 | /* No mmap support for this file.  */ | 
|  | 149 | if (c->prot & PROT_EXEC) | 
|  | 150 | { | 
|  | 151 | /* Read the data into a temporary buffer.  */ | 
|  | 152 | const size_t len = c->mapend - c->mapstart; | 
|  | 153 | void *data = __mmap (NULL, len, PROT_READ | PROT_WRITE, | 
|  | 154 | MAP_ANON|MAP_PRIVATE, -1, 0); | 
|  | 155 | if (__glibc_unlikely (data == MAP_FAILED)) | 
|  | 156 | return DL_MAP_SEGMENTS_ERROR_MAP_ZERO_FILL; | 
|  | 157 | if (read_in_data (fd, data, len, c->mapoff)) | 
|  | 158 | return DL_MAP_SEGMENTS_ERROR_MAP_SEGMENT; | 
|  | 159 | /* Now validate and install the code.  */ | 
|  | 160 | int error = __nacl_irt_dyncode.dyncode_create | 
|  | 161 | ((void *) (l->l_addr + c->mapstart), data, len); | 
|  | 162 | __munmap (data, len); | 
|  | 163 | if (__glibc_unlikely (error)) | 
|  | 164 | { | 
|  | 165 | errno = error; | 
|  | 166 | return DL_MAP_SEGMENTS_ERROR_MAP_SEGMENT; | 
|  | 167 | } | 
|  | 168 | if (__glibc_unlikely (type != ET_DYN)) | 
|  | 169 | { | 
|  | 170 | /* A successful PROT_EXEC mmap would have implicitly | 
|  | 171 | updated the bookkeeping so that a future | 
|  | 172 | allocate_code_data call would know that this range | 
|  | 173 | of the address space is already occupied.  That | 
|  | 174 | doesn't happen implicitly with dyncode_create, so | 
|  | 175 | it's necessary to do an explicit call to update the | 
|  | 176 | bookkeeping.  */ | 
|  | 177 | uintptr_t allocated_address; | 
|  | 178 | error = __nacl_irt_code_data_alloc.allocate_code_data | 
|  | 179 | (l->l_addr + c->mapstart, len, 0, 0, &allocated_address); | 
|  | 180 | if (__glibc_unlikely (error)) | 
|  | 181 | { | 
|  | 182 | errno = error; | 
|  | 183 | return DL_MAP_SEGMENTS_ERROR_MAP_SEGMENT; | 
|  | 184 | } | 
|  | 185 | if (__glibc_unlikely | 
|  | 186 | (allocated_address != l->l_addr + c->mapstart)) | 
|  | 187 | { | 
|  | 188 | /* This is not a very helpful error for this case, | 
|  | 189 | but there isn't really anything better to use.  */ | 
|  | 190 | errno = ENOMEM; | 
|  | 191 | return DL_MAP_SEGMENTS_ERROR_MAP_SEGMENT; | 
|  | 192 | } | 
|  | 193 | } | 
|  | 194 | } | 
|  | 195 | else | 
|  | 196 | { | 
|  | 197 | /* Allocate the pages.  */ | 
|  | 198 | if (__mmap ((void *) (l->l_addr + c->mapstart), | 
|  | 199 | c->mapend - c->mapstart, c->prot | PROT_WRITE, | 
|  | 200 | MAP_FIXED|MAP_ANON|MAP_PRIVATE, -1, 0) | 
|  | 201 | == MAP_FAILED) | 
|  | 202 | return DL_MAP_SEGMENTS_ERROR_MAP_ZERO_FILL; | 
|  | 203 | /* Now read in the data.  */ | 
|  | 204 | if (read_in_data (fd, (void *) (l->l_addr + c->mapstart), | 
|  | 205 | c->dataend - c->mapstart, c->mapoff)) | 
|  | 206 | return DL_MAP_SEGMENTS_ERROR_MAP_SEGMENT; | 
|  | 207 | /* Now that we've filled the pages, reset the page | 
|  | 208 | protections to what they should be.  */ | 
|  | 209 | if (!(c->prot & PROT_WRITE) | 
|  | 210 | && __mprotect ((void *) (l->l_addr + c->mapstart), | 
|  | 211 | c->mapend - c->mapstart, c->prot) < 0) | 
|  | 212 | return DL_MAP_SEGMENTS_ERROR_MPROTECT; | 
|  | 213 | } | 
|  | 214 | } | 
|  | 215 | else if (c->allocend > c->dataend) | 
|  | 216 | { | 
|  | 217 | /* Extra zero pages should appear at the end of this segment, | 
|  | 218 | after the data mapped from the file.   */ | 
|  | 219 |  | 
|  | 220 | uintptr_t allocend = c->mapend; | 
|  | 221 | if (c->mapend > c->dataend) | 
|  | 222 | { | 
|  | 223 | /* The final data page was partial.  So we didn't map it in. | 
|  | 224 | Instead, we must allocate an anonymous page to fill.  */ | 
|  | 225 | if (c->prot & PROT_WRITE) | 
|  | 226 | /* Do the whole allocation right here.  */ | 
|  | 227 | allocend = c->allocend; | 
|  | 228 | if (__mmap ((void *) (l->l_addr + c->mapstart + maplen), | 
|  | 229 | allocend - (c->mapstart + maplen), c->prot, | 
|  | 230 | MAP_FIXED|MAP_ANON|MAP_PRIVATE, -1, 0) | 
|  | 231 | == MAP_FAILED) | 
|  | 232 | return DL_MAP_SEGMENTS_ERROR_MAP_ZERO_FILL; | 
|  | 233 | if (read_in_data (fd, | 
|  | 234 | (void *) (l->l_addr + c->mapstart + maplen), | 
|  | 235 | c->dataend & (GLRO(dl_pagesize) - 1), | 
|  | 236 | c->mapoff + maplen)) | 
|  | 237 | return DL_MAP_SEGMENTS_ERROR_MAP_SEGMENT; | 
|  | 238 | /* Now that we've filled the page, reset its | 
|  | 239 | protections to what they should be.  */ | 
|  | 240 | if (!(c->prot & PROT_WRITE) | 
|  | 241 | && __mprotect ((void *) (l->l_addr + c->mapstart + maplen), | 
|  | 242 | c->mapend - (c->mapstart + maplen), | 
|  | 243 | c->prot) < 0) | 
|  | 244 | return DL_MAP_SEGMENTS_ERROR_MPROTECT; | 
|  | 245 | } | 
|  | 246 |  | 
|  | 247 | /* Now allocate the pure zero-fill pages.  */ | 
|  | 248 | if (allocend < c->allocend | 
|  | 249 | && (__mmap ((void *) (l->l_addr + c->mapstart + allocend), | 
|  | 250 | c->allocend - (c->mapstart + allocend), c->prot, | 
|  | 251 | MAP_FIXED|MAP_ANON|MAP_PRIVATE, -1, 0) | 
|  | 252 | == MAP_FAILED)) | 
|  | 253 | return DL_MAP_SEGMENTS_ERROR_MAP_ZERO_FILL; | 
|  | 254 | } | 
|  | 255 |  | 
|  | 256 | _dl_postprocess_loadcmd (l, header, c); | 
|  | 257 | } | 
|  | 258 |  | 
|  | 259 | /* Notify ELF_PREFERRED_ADDRESS that we have to load this one | 
|  | 260 | fixed.  */ | 
|  | 261 | ELF_FIXED_ADDRESS (loader, c->mapstart); | 
|  | 262 |  | 
|  | 263 | return NULL; | 
|  | 264 | } |