xf.li | bdd93d5 | 2023-05-12 07:10:14 -0700 | [diff] [blame] | 1 | /* Load a shared object at runtime, relocate it, and run its initializer. |
| 2 | Copyright (C) 1996-2016 Free Software Foundation, Inc. |
| 3 | This file is part of the GNU C Library. |
| 4 | |
| 5 | The GNU C Library is free software; you can redistribute it and/or |
| 6 | modify it under the terms of the GNU Lesser General Public |
| 7 | License as published by the Free Software Foundation; either |
| 8 | version 2.1 of the License, or (at your option) any later version. |
| 9 | |
| 10 | The GNU C Library is distributed in the hope that it will be useful, |
| 11 | but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 12 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| 13 | Lesser General Public License for more details. |
| 14 | |
| 15 | You should have received a copy of the GNU Lesser General Public |
| 16 | License along with the GNU C Library; if not, see |
| 17 | <http://www.gnu.org/licenses/>. */ |
| 18 | |
| 19 | #include <assert.h> |
| 20 | #include <dlfcn.h> |
| 21 | #include <errno.h> |
| 22 | #include <libintl.h> |
| 23 | #include <stdio.h> |
| 24 | #include <stdlib.h> |
| 25 | #include <string.h> |
| 26 | #include <unistd.h> |
| 27 | #include <sys/mman.h> /* Check whether MAP_COPY is defined. */ |
| 28 | #include <sys/param.h> |
| 29 | #include <libc-lock.h> |
| 30 | #include <ldsodefs.h> |
| 31 | #include <caller.h> |
| 32 | #include <sysdep-cancel.h> |
| 33 | #include <tls.h> |
| 34 | #include <stap-probe.h> |
| 35 | #include <atomic.h> |
| 36 | |
| 37 | #include <dl-dst.h> |
| 38 | |
| 39 | |
| 40 | extern int __libc_multiple_libcs; /* Defined in init-first.c. */ |
| 41 | |
| 42 | /* We must be careful not to leave us in an inconsistent state. Thus we |
| 43 | catch any error and re-raise it after cleaning up. */ |
| 44 | |
| 45 | struct dl_open_args |
| 46 | { |
| 47 | const char *file; |
| 48 | int mode; |
| 49 | /* This is the caller of the dlopen() function. */ |
| 50 | const void *caller_dlopen; |
| 51 | /* This is the caller of _dl_open(). */ |
| 52 | const void *caller_dl_open; |
| 53 | struct link_map *map; |
| 54 | /* Namespace ID. */ |
| 55 | Lmid_t nsid; |
| 56 | /* Original parameters to the program and the current environment. */ |
| 57 | int argc; |
| 58 | char **argv; |
| 59 | char **env; |
| 60 | }; |
| 61 | |
| 62 | |
| 63 | static int |
| 64 | add_to_global (struct link_map *new) |
| 65 | { |
| 66 | struct link_map **new_global; |
| 67 | unsigned int to_add = 0; |
| 68 | unsigned int cnt; |
| 69 | |
| 70 | /* Count the objects we have to put in the global scope. */ |
| 71 | for (cnt = 0; cnt < new->l_searchlist.r_nlist; ++cnt) |
| 72 | if (new->l_searchlist.r_list[cnt]->l_global == 0) |
| 73 | ++to_add; |
| 74 | |
| 75 | /* The symbols of the new objects and its dependencies are to be |
| 76 | introduced into the global scope that will be used to resolve |
| 77 | references from other dynamically-loaded objects. |
| 78 | |
| 79 | The global scope is the searchlist in the main link map. We |
| 80 | extend this list if necessary. There is one problem though: |
| 81 | since this structure was allocated very early (before the libc |
| 82 | is loaded) the memory it uses is allocated by the malloc()-stub |
| 83 | in the ld.so. When we come here these functions are not used |
| 84 | anymore. Instead the malloc() implementation of the libc is |
| 85 | used. But this means the block from the main map cannot be used |
| 86 | in an realloc() call. Therefore we allocate a completely new |
| 87 | array the first time we have to add something to the locale scope. */ |
| 88 | |
| 89 | struct link_namespaces *ns = &GL(dl_ns)[new->l_ns]; |
| 90 | if (ns->_ns_global_scope_alloc == 0) |
| 91 | { |
| 92 | /* This is the first dynamic object given global scope. */ |
| 93 | ns->_ns_global_scope_alloc |
| 94 | = ns->_ns_main_searchlist->r_nlist + to_add + 8; |
| 95 | new_global = (struct link_map **) |
| 96 | malloc (ns->_ns_global_scope_alloc * sizeof (struct link_map *)); |
| 97 | if (new_global == NULL) |
| 98 | { |
| 99 | ns->_ns_global_scope_alloc = 0; |
| 100 | nomem: |
| 101 | _dl_signal_error (ENOMEM, new->l_libname->name, NULL, |
| 102 | N_("cannot extend global scope")); |
| 103 | return 1; |
| 104 | } |
| 105 | |
| 106 | /* Copy over the old entries. */ |
| 107 | ns->_ns_main_searchlist->r_list |
| 108 | = memcpy (new_global, ns->_ns_main_searchlist->r_list, |
| 109 | (ns->_ns_main_searchlist->r_nlist |
| 110 | * sizeof (struct link_map *))); |
| 111 | } |
| 112 | else if (ns->_ns_main_searchlist->r_nlist + to_add |
| 113 | > ns->_ns_global_scope_alloc) |
| 114 | { |
| 115 | /* We have to extend the existing array of link maps in the |
| 116 | main map. */ |
| 117 | struct link_map **old_global |
| 118 | = GL(dl_ns)[new->l_ns]._ns_main_searchlist->r_list; |
| 119 | size_t new_nalloc = ((ns->_ns_global_scope_alloc + to_add) * 2); |
| 120 | |
| 121 | new_global = (struct link_map **) |
| 122 | malloc (new_nalloc * sizeof (struct link_map *)); |
| 123 | if (new_global == NULL) |
| 124 | goto nomem; |
| 125 | |
| 126 | memcpy (new_global, old_global, |
| 127 | ns->_ns_global_scope_alloc * sizeof (struct link_map *)); |
| 128 | |
| 129 | ns->_ns_global_scope_alloc = new_nalloc; |
| 130 | ns->_ns_main_searchlist->r_list = new_global; |
| 131 | |
| 132 | if (!RTLD_SINGLE_THREAD_P) |
| 133 | THREAD_GSCOPE_WAIT (); |
| 134 | |
| 135 | free (old_global); |
| 136 | } |
| 137 | |
| 138 | /* Now add the new entries. */ |
| 139 | unsigned int new_nlist = ns->_ns_main_searchlist->r_nlist; |
| 140 | for (cnt = 0; cnt < new->l_searchlist.r_nlist; ++cnt) |
| 141 | { |
| 142 | struct link_map *map = new->l_searchlist.r_list[cnt]; |
| 143 | |
| 144 | if (map->l_global == 0) |
| 145 | { |
| 146 | map->l_global = 1; |
| 147 | ns->_ns_main_searchlist->r_list[new_nlist++] = map; |
| 148 | |
| 149 | /* We modify the global scope. Report this. */ |
| 150 | if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_SCOPES)) |
| 151 | _dl_debug_printf ("\nadd %s [%lu] to global scope\n", |
| 152 | map->l_name, map->l_ns); |
| 153 | } |
| 154 | } |
| 155 | atomic_write_barrier (); |
| 156 | ns->_ns_main_searchlist->r_nlist = new_nlist; |
| 157 | |
| 158 | return 0; |
| 159 | } |
| 160 | |
| 161 | /* Search link maps in all namespaces for the DSO that contains the object at |
| 162 | address ADDR. Returns the pointer to the link map of the matching DSO, or |
| 163 | NULL if a match is not found. */ |
| 164 | struct link_map * |
| 165 | internal_function |
| 166 | _dl_find_dso_for_object (const ElfW(Addr) addr) |
| 167 | { |
| 168 | struct link_map *l; |
| 169 | |
| 170 | /* Find the highest-addressed object that ADDR is not below. */ |
| 171 | for (Lmid_t ns = 0; ns < GL(dl_nns); ++ns) |
| 172 | for (l = GL(dl_ns)[ns]._ns_loaded; l != NULL; l = l->l_next) |
| 173 | if (addr >= l->l_map_start && addr < l->l_map_end |
| 174 | && (l->l_contiguous |
| 175 | || _dl_addr_inside_object (l, (ElfW(Addr)) addr))) |
| 176 | { |
| 177 | assert (ns == l->l_ns); |
| 178 | return l; |
| 179 | } |
| 180 | return NULL; |
| 181 | } |
| 182 | rtld_hidden_def (_dl_find_dso_for_object); |
| 183 | |
| 184 | static void |
| 185 | dl_open_worker (void *a) |
| 186 | { |
| 187 | struct dl_open_args *args = a; |
| 188 | const char *file = args->file; |
| 189 | int mode = args->mode; |
| 190 | struct link_map *call_map = NULL; |
| 191 | |
| 192 | /* Check whether _dl_open() has been called from a valid DSO. */ |
| 193 | if (__check_caller (args->caller_dl_open, |
| 194 | allow_libc|allow_libdl|allow_ldso) != 0) |
| 195 | _dl_signal_error (0, "dlopen", NULL, N_("invalid caller")); |
| 196 | |
| 197 | /* Determine the caller's map if necessary. This is needed in case |
| 198 | we have a DST, when we don't know the namespace ID we have to put |
| 199 | the new object in, or when the file name has no path in which |
| 200 | case we need to look along the RUNPATH/RPATH of the caller. */ |
| 201 | const char *dst = strchr (file, '$'); |
| 202 | if (dst != NULL || args->nsid == __LM_ID_CALLER |
| 203 | || strchr (file, '/') == NULL) |
| 204 | { |
| 205 | const void *caller_dlopen = args->caller_dlopen; |
| 206 | |
| 207 | /* We have to find out from which object the caller is calling. |
| 208 | By default we assume this is the main application. */ |
| 209 | call_map = GL(dl_ns)[LM_ID_BASE]._ns_loaded; |
| 210 | |
| 211 | struct link_map *l = _dl_find_dso_for_object ((ElfW(Addr)) caller_dlopen); |
| 212 | |
| 213 | if (l) |
| 214 | call_map = l; |
| 215 | |
| 216 | if (args->nsid == __LM_ID_CALLER) |
| 217 | args->nsid = call_map->l_ns; |
| 218 | } |
| 219 | |
| 220 | /* One might be tempted to assert that we are RT_CONSISTENT at this point, but that |
| 221 | may not be true if this is a recursive call to dlopen. */ |
| 222 | _dl_debug_initialize (0, args->nsid); |
| 223 | |
| 224 | /* Load the named object. */ |
| 225 | struct link_map *new; |
| 226 | args->map = new = _dl_map_object (call_map, file, lt_loaded, 0, |
| 227 | mode | __RTLD_CALLMAP, args->nsid); |
| 228 | |
| 229 | /* Mark the object as not deletable if the RTLD_NODELETE flags was passed. |
| 230 | Do this early so that we don't skip marking the object if it was |
| 231 | already loaded. */ |
| 232 | if (__glibc_unlikely (mode & RTLD_NODELETE)) |
| 233 | new->l_flags_1 |= DF_1_NODELETE; |
| 234 | |
| 235 | /* If the pointer returned is NULL this means the RTLD_NOLOAD flag is |
| 236 | set and the object is not already loaded. */ |
| 237 | if (new == NULL) |
| 238 | { |
| 239 | assert (mode & RTLD_NOLOAD); |
| 240 | return; |
| 241 | } |
| 242 | |
| 243 | if (__glibc_unlikely (mode & __RTLD_SPROF)) |
| 244 | /* This happens only if we load a DSO for 'sprof'. */ |
| 245 | return; |
| 246 | |
| 247 | /* This object is directly loaded. */ |
| 248 | ++new->l_direct_opencount; |
| 249 | |
| 250 | /* It was already open. */ |
| 251 | if (__glibc_unlikely (new->l_searchlist.r_list != NULL)) |
| 252 | { |
| 253 | /* Let the user know about the opencount. */ |
| 254 | if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_FILES)) |
| 255 | _dl_debug_printf ("opening file=%s [%lu]; direct_opencount=%u\n\n", |
| 256 | new->l_name, new->l_ns, new->l_direct_opencount); |
| 257 | |
| 258 | /* If the user requested the object to be in the global namespace |
| 259 | but it is not so far, add it now. */ |
| 260 | if ((mode & RTLD_GLOBAL) && new->l_global == 0) |
| 261 | (void) add_to_global (new); |
| 262 | |
| 263 | assert (_dl_debug_initialize (0, args->nsid)->r_state == RT_CONSISTENT); |
| 264 | |
| 265 | return; |
| 266 | } |
| 267 | |
| 268 | /* Load that object's dependencies. */ |
| 269 | _dl_map_object_deps (new, NULL, 0, 0, |
| 270 | mode & (__RTLD_DLOPEN | RTLD_DEEPBIND | __RTLD_AUDIT)); |
| 271 | |
| 272 | /* So far, so good. Now check the versions. */ |
| 273 | for (unsigned int i = 0; i < new->l_searchlist.r_nlist; ++i) |
| 274 | if (new->l_searchlist.r_list[i]->l_real->l_versions == NULL) |
| 275 | (void) _dl_check_map_versions (new->l_searchlist.r_list[i]->l_real, |
| 276 | 0, 0); |
| 277 | |
| 278 | #ifdef SHARED |
| 279 | /* Auditing checkpoint: we have added all objects. */ |
| 280 | if (__glibc_unlikely (GLRO(dl_naudit) > 0)) |
| 281 | { |
| 282 | struct link_map *head = GL(dl_ns)[new->l_ns]._ns_loaded; |
| 283 | /* Do not call the functions for any auditing object. */ |
| 284 | if (head->l_auditing == 0) |
| 285 | { |
| 286 | struct audit_ifaces *afct = GLRO(dl_audit); |
| 287 | for (unsigned int cnt = 0; cnt < GLRO(dl_naudit); ++cnt) |
| 288 | { |
| 289 | if (afct->activity != NULL) |
| 290 | afct->activity (&head->l_audit[cnt].cookie, LA_ACT_CONSISTENT); |
| 291 | |
| 292 | afct = afct->next; |
| 293 | } |
| 294 | } |
| 295 | } |
| 296 | #endif |
| 297 | |
| 298 | /* Notify the debugger all new objects are now ready to go. */ |
| 299 | struct r_debug *r = _dl_debug_initialize (0, args->nsid); |
| 300 | r->r_state = RT_CONSISTENT; |
| 301 | _dl_debug_state (); |
| 302 | LIBC_PROBE (map_complete, 3, args->nsid, r, new); |
| 303 | |
| 304 | /* Print scope information. */ |
| 305 | if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_SCOPES)) |
| 306 | _dl_show_scope (new, 0); |
| 307 | |
| 308 | /* Only do lazy relocation if `LD_BIND_NOW' is not set. */ |
| 309 | int reloc_mode = mode & __RTLD_AUDIT; |
| 310 | if (GLRO(dl_lazy)) |
| 311 | reloc_mode |= mode & RTLD_LAZY; |
| 312 | |
| 313 | /* Sort the objects by dependency for the relocation process. This |
| 314 | allows IFUNC relocations to work and it also means copy |
| 315 | relocation of dependencies are if necessary overwritten. */ |
| 316 | size_t nmaps = 0; |
| 317 | struct link_map *l = new; |
| 318 | do |
| 319 | { |
| 320 | if (! l->l_real->l_relocated) |
| 321 | ++nmaps; |
| 322 | l = l->l_next; |
| 323 | } |
| 324 | while (l != NULL); |
| 325 | struct link_map *maps[nmaps]; |
| 326 | nmaps = 0; |
| 327 | l = new; |
| 328 | do |
| 329 | { |
| 330 | if (! l->l_real->l_relocated) |
| 331 | maps[nmaps++] = l; |
| 332 | l = l->l_next; |
| 333 | } |
| 334 | while (l != NULL); |
| 335 | if (nmaps > 1) |
| 336 | { |
| 337 | uint16_t seen[nmaps]; |
| 338 | memset (seen, '\0', sizeof (seen)); |
| 339 | size_t i = 0; |
| 340 | while (1) |
| 341 | { |
| 342 | ++seen[i]; |
| 343 | struct link_map *thisp = maps[i]; |
| 344 | |
| 345 | /* Find the last object in the list for which the current one is |
| 346 | a dependency and move the current object behind the object |
| 347 | with the dependency. */ |
| 348 | size_t k = nmaps - 1; |
| 349 | while (k > i) |
| 350 | { |
| 351 | struct link_map **runp = maps[k]->l_initfini; |
| 352 | if (runp != NULL) |
| 353 | /* Look through the dependencies of the object. */ |
| 354 | while (*runp != NULL) |
| 355 | if (__glibc_unlikely (*runp++ == thisp)) |
| 356 | { |
| 357 | /* Move the current object to the back past the last |
| 358 | object with it as the dependency. */ |
| 359 | memmove (&maps[i], &maps[i + 1], |
| 360 | (k - i) * sizeof (maps[0])); |
| 361 | maps[k] = thisp; |
| 362 | |
| 363 | if (seen[i + 1] > nmaps - i) |
| 364 | { |
| 365 | ++i; |
| 366 | goto next_clear; |
| 367 | } |
| 368 | |
| 369 | uint16_t this_seen = seen[i]; |
| 370 | memmove (&seen[i], &seen[i + 1], |
| 371 | (k - i) * sizeof (seen[0])); |
| 372 | seen[k] = this_seen; |
| 373 | |
| 374 | goto next; |
| 375 | } |
| 376 | |
| 377 | --k; |
| 378 | } |
| 379 | |
| 380 | if (++i == nmaps) |
| 381 | break; |
| 382 | next_clear: |
| 383 | memset (&seen[i], 0, (nmaps - i) * sizeof (seen[0])); |
| 384 | next:; |
| 385 | } |
| 386 | } |
| 387 | |
| 388 | int relocation_in_progress = 0; |
| 389 | |
| 390 | for (size_t i = nmaps; i-- > 0; ) |
| 391 | { |
| 392 | l = maps[i]; |
| 393 | |
| 394 | if (! relocation_in_progress) |
| 395 | { |
| 396 | /* Notify the debugger that relocations are about to happen. */ |
| 397 | LIBC_PROBE (reloc_start, 2, args->nsid, r); |
| 398 | relocation_in_progress = 1; |
| 399 | } |
| 400 | |
| 401 | #ifdef SHARED |
| 402 | if (__glibc_unlikely (GLRO(dl_profile) != NULL)) |
| 403 | { |
| 404 | /* If this here is the shared object which we want to profile |
| 405 | make sure the profile is started. We can find out whether |
| 406 | this is necessary or not by observing the `_dl_profile_map' |
| 407 | variable. If it was NULL but is not NULL afterwards we must |
| 408 | start the profiling. */ |
| 409 | struct link_map *old_profile_map = GL(dl_profile_map); |
| 410 | |
| 411 | _dl_relocate_object (l, l->l_scope, reloc_mode | RTLD_LAZY, 1); |
| 412 | |
| 413 | if (old_profile_map == NULL && GL(dl_profile_map) != NULL) |
| 414 | { |
| 415 | /* We must prepare the profiling. */ |
| 416 | _dl_start_profile (); |
| 417 | |
| 418 | /* Prevent unloading the object. */ |
| 419 | GL(dl_profile_map)->l_flags_1 |= DF_1_NODELETE; |
| 420 | } |
| 421 | } |
| 422 | else |
| 423 | #endif |
| 424 | _dl_relocate_object (l, l->l_scope, reloc_mode, 0); |
| 425 | } |
| 426 | |
| 427 | /* If the file is not loaded now as a dependency, add the search |
| 428 | list of the newly loaded object to the scope. */ |
| 429 | bool any_tls = false; |
| 430 | unsigned int first_static_tls = new->l_searchlist.r_nlist; |
| 431 | for (unsigned int i = 0; i < new->l_searchlist.r_nlist; ++i) |
| 432 | { |
| 433 | struct link_map *imap = new->l_searchlist.r_list[i]; |
| 434 | int from_scope = 0; |
| 435 | |
| 436 | /* If the initializer has been called already, the object has |
| 437 | not been loaded here and now. */ |
| 438 | if (imap->l_init_called && imap->l_type == lt_loaded) |
| 439 | { |
| 440 | struct r_scope_elem **runp = imap->l_scope; |
| 441 | size_t cnt = 0; |
| 442 | |
| 443 | while (*runp != NULL) |
| 444 | { |
| 445 | if (*runp == &new->l_searchlist) |
| 446 | break; |
| 447 | ++cnt; |
| 448 | ++runp; |
| 449 | } |
| 450 | |
| 451 | if (*runp != NULL) |
| 452 | /* Avoid duplicates. */ |
| 453 | continue; |
| 454 | |
| 455 | if (__glibc_unlikely (cnt + 1 >= imap->l_scope_max)) |
| 456 | { |
| 457 | /* The 'r_scope' array is too small. Allocate a new one |
| 458 | dynamically. */ |
| 459 | size_t new_size; |
| 460 | struct r_scope_elem **newp; |
| 461 | |
| 462 | #define SCOPE_ELEMS(imap) \ |
| 463 | (sizeof (imap->l_scope_mem) / sizeof (imap->l_scope_mem[0])) |
| 464 | |
| 465 | if (imap->l_scope != imap->l_scope_mem |
| 466 | && imap->l_scope_max < SCOPE_ELEMS (imap)) |
| 467 | { |
| 468 | new_size = SCOPE_ELEMS (imap); |
| 469 | newp = imap->l_scope_mem; |
| 470 | } |
| 471 | else |
| 472 | { |
| 473 | new_size = imap->l_scope_max * 2; |
| 474 | newp = (struct r_scope_elem **) |
| 475 | malloc (new_size * sizeof (struct r_scope_elem *)); |
| 476 | if (newp == NULL) |
| 477 | _dl_signal_error (ENOMEM, "dlopen", NULL, |
| 478 | N_("cannot create scope list")); |
| 479 | } |
| 480 | |
| 481 | memcpy (newp, imap->l_scope, cnt * sizeof (imap->l_scope[0])); |
| 482 | struct r_scope_elem **old = imap->l_scope; |
| 483 | |
| 484 | imap->l_scope = newp; |
| 485 | |
| 486 | if (old != imap->l_scope_mem) |
| 487 | _dl_scope_free (old); |
| 488 | |
| 489 | imap->l_scope_max = new_size; |
| 490 | } |
| 491 | |
| 492 | /* First terminate the extended list. Otherwise a thread |
| 493 | might use the new last element and then use the garbage |
| 494 | at offset IDX+1. */ |
| 495 | imap->l_scope[cnt + 1] = NULL; |
| 496 | atomic_write_barrier (); |
| 497 | imap->l_scope[cnt] = &new->l_searchlist; |
| 498 | |
| 499 | /* Print only new scope information. */ |
| 500 | from_scope = cnt; |
| 501 | } |
| 502 | /* Only add TLS memory if this object is loaded now and |
| 503 | therefore is not yet initialized. */ |
| 504 | else if (! imap->l_init_called |
| 505 | /* Only if the module defines thread local data. */ |
| 506 | && __builtin_expect (imap->l_tls_blocksize > 0, 0)) |
| 507 | { |
| 508 | /* Now that we know the object is loaded successfully add |
| 509 | modules containing TLS data to the slot info table. We |
| 510 | might have to increase its size. */ |
| 511 | _dl_add_to_slotinfo (imap); |
| 512 | |
| 513 | if (imap->l_need_tls_init |
| 514 | && first_static_tls == new->l_searchlist.r_nlist) |
| 515 | first_static_tls = i; |
| 516 | |
| 517 | /* We have to bump the generation counter. */ |
| 518 | any_tls = true; |
| 519 | } |
| 520 | |
| 521 | /* Print scope information. */ |
| 522 | if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_SCOPES)) |
| 523 | _dl_show_scope (imap, from_scope); |
| 524 | } |
| 525 | |
| 526 | /* Bump the generation number if necessary. */ |
| 527 | if (any_tls && __builtin_expect (++GL(dl_tls_generation) == 0, 0)) |
| 528 | _dl_fatal_printf (N_("\ |
| 529 | TLS generation counter wrapped! Please report this.")); |
| 530 | |
| 531 | /* We need a second pass for static tls data, because _dl_update_slotinfo |
| 532 | must not be run while calls to _dl_add_to_slotinfo are still pending. */ |
| 533 | for (unsigned int i = first_static_tls; i < new->l_searchlist.r_nlist; ++i) |
| 534 | { |
| 535 | struct link_map *imap = new->l_searchlist.r_list[i]; |
| 536 | |
| 537 | if (imap->l_need_tls_init |
| 538 | && ! imap->l_init_called |
| 539 | && imap->l_tls_blocksize > 0) |
| 540 | { |
| 541 | /* For static TLS we have to allocate the memory here and |
| 542 | now, but we can delay updating the DTV. */ |
| 543 | imap->l_need_tls_init = 0; |
| 544 | #ifdef SHARED |
| 545 | /* Update the slot information data for at least the |
| 546 | generation of the DSO we are allocating data for. */ |
| 547 | _dl_update_slotinfo (imap->l_tls_modid); |
| 548 | #endif |
| 549 | |
| 550 | GL(dl_init_static_tls) (imap); |
| 551 | assert (imap->l_need_tls_init == 0); |
| 552 | } |
| 553 | } |
| 554 | |
| 555 | /* Notify the debugger all new objects have been relocated. */ |
| 556 | if (relocation_in_progress) |
| 557 | LIBC_PROBE (reloc_complete, 3, args->nsid, r, new); |
| 558 | |
| 559 | #ifndef SHARED |
| 560 | DL_STATIC_INIT (new); |
| 561 | #endif |
| 562 | |
| 563 | /* Run the initializer functions of new objects. */ |
| 564 | _dl_init (new, args->argc, args->argv, args->env); |
| 565 | |
| 566 | /* Now we can make the new map available in the global scope. */ |
| 567 | if (mode & RTLD_GLOBAL) |
| 568 | /* Move the object in the global namespace. */ |
| 569 | if (add_to_global (new) != 0) |
| 570 | /* It failed. */ |
| 571 | return; |
| 572 | |
| 573 | #ifndef SHARED |
| 574 | /* We must be the static _dl_open in libc.a. A static program that |
| 575 | has loaded a dynamic object now has competition. */ |
| 576 | __libc_multiple_libcs = 1; |
| 577 | #endif |
| 578 | |
| 579 | /* Let the user know about the opencount. */ |
| 580 | if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_FILES)) |
| 581 | _dl_debug_printf ("opening file=%s [%lu]; direct_opencount=%u\n\n", |
| 582 | new->l_name, new->l_ns, new->l_direct_opencount); |
| 583 | } |
| 584 | |
| 585 | |
| 586 | void * |
| 587 | _dl_open (const char *file, int mode, const void *caller_dlopen, Lmid_t nsid, |
| 588 | int argc, char *argv[], char *env[]) |
| 589 | { |
| 590 | if ((mode & RTLD_BINDING_MASK) == 0) |
| 591 | /* One of the flags must be set. */ |
| 592 | _dl_signal_error (EINVAL, file, NULL, N_("invalid mode for dlopen()")); |
| 593 | |
| 594 | /* Make sure we are alone. */ |
| 595 | __rtld_lock_lock_recursive (GL(dl_load_lock)); |
| 596 | |
| 597 | if (__glibc_unlikely (nsid == LM_ID_NEWLM)) |
| 598 | { |
| 599 | /* Find a new namespace. */ |
| 600 | for (nsid = 1; DL_NNS > 1 && nsid < GL(dl_nns); ++nsid) |
| 601 | if (GL(dl_ns)[nsid]._ns_loaded == NULL) |
| 602 | break; |
| 603 | |
| 604 | if (__glibc_unlikely (nsid == DL_NNS)) |
| 605 | { |
| 606 | /* No more namespace available. */ |
| 607 | __rtld_lock_unlock_recursive (GL(dl_load_lock)); |
| 608 | |
| 609 | _dl_signal_error (EINVAL, file, NULL, N_("\ |
| 610 | no more namespaces available for dlmopen()")); |
| 611 | } |
| 612 | else if (nsid == GL(dl_nns)) |
| 613 | { |
| 614 | __rtld_lock_initialize (GL(dl_ns)[nsid]._ns_unique_sym_table.lock); |
| 615 | ++GL(dl_nns); |
| 616 | } |
| 617 | |
| 618 | _dl_debug_initialize (0, nsid)->r_state = RT_CONSISTENT; |
| 619 | } |
| 620 | /* Never allow loading a DSO in a namespace which is empty. Such |
| 621 | direct placements is only causing problems. Also don't allow |
| 622 | loading into a namespace used for auditing. */ |
| 623 | else if (__glibc_unlikely (nsid != LM_ID_BASE && nsid != __LM_ID_CALLER) |
| 624 | && (__glibc_unlikely (nsid < 0 || nsid >= GL(dl_nns)) |
| 625 | /* This prevents the [NSID] index expressions from being |
| 626 | evaluated, so the compiler won't think that we are |
| 627 | accessing an invalid index here in the !SHARED case where |
| 628 | DL_NNS is 1 and so any NSID != 0 is invalid. */ |
| 629 | || DL_NNS == 1 |
| 630 | || GL(dl_ns)[nsid]._ns_nloaded == 0 |
| 631 | || GL(dl_ns)[nsid]._ns_loaded->l_auditing)) |
| 632 | _dl_signal_error (EINVAL, file, NULL, |
| 633 | N_("invalid target namespace in dlmopen()")); |
| 634 | |
| 635 | struct dl_open_args args; |
| 636 | args.file = file; |
| 637 | args.mode = mode; |
| 638 | args.caller_dlopen = caller_dlopen; |
| 639 | args.caller_dl_open = RETURN_ADDRESS (0); |
| 640 | args.map = NULL; |
| 641 | args.nsid = nsid; |
| 642 | args.argc = argc; |
| 643 | args.argv = argv; |
| 644 | args.env = env; |
| 645 | |
| 646 | const char *objname; |
| 647 | const char *errstring; |
| 648 | bool malloced; |
| 649 | int errcode = _dl_catch_error (&objname, &errstring, &malloced, |
| 650 | dl_open_worker, &args); |
| 651 | |
| 652 | #if defined USE_LDCONFIG && !defined MAP_COPY |
| 653 | /* We must unmap the cache file. */ |
| 654 | _dl_unload_cache (); |
| 655 | #endif |
| 656 | |
| 657 | /* See if an error occurred during loading. */ |
| 658 | if (__glibc_unlikely (errstring != NULL)) |
| 659 | { |
| 660 | /* Remove the object from memory. It may be in an inconsistent |
| 661 | state if relocation failed, for example. */ |
| 662 | if (args.map) |
| 663 | { |
| 664 | /* Maybe some of the modules which were loaded use TLS. |
| 665 | Since it will be removed in the following _dl_close call |
| 666 | we have to mark the dtv array as having gaps to fill the |
| 667 | holes. This is a pessimistic assumption which won't hurt |
| 668 | if not true. There is no need to do this when we are |
| 669 | loading the auditing DSOs since TLS has not yet been set |
| 670 | up. */ |
| 671 | if ((mode & __RTLD_AUDIT) == 0) |
| 672 | GL(dl_tls_dtv_gaps) = true; |
| 673 | |
| 674 | _dl_close_worker (args.map, true); |
| 675 | } |
| 676 | |
| 677 | assert (_dl_debug_initialize (0, args.nsid)->r_state == RT_CONSISTENT); |
| 678 | |
| 679 | /* Release the lock. */ |
| 680 | __rtld_lock_unlock_recursive (GL(dl_load_lock)); |
| 681 | |
| 682 | /* Make a local copy of the error string so that we can release the |
| 683 | memory allocated for it. */ |
| 684 | size_t len_errstring = strlen (errstring) + 1; |
| 685 | char *local_errstring; |
| 686 | if (objname == errstring + len_errstring) |
| 687 | { |
| 688 | size_t total_len = len_errstring + strlen (objname) + 1; |
| 689 | local_errstring = alloca (total_len); |
| 690 | memcpy (local_errstring, errstring, total_len); |
| 691 | objname = local_errstring + len_errstring; |
| 692 | } |
| 693 | else |
| 694 | { |
| 695 | local_errstring = alloca (len_errstring); |
| 696 | memcpy (local_errstring, errstring, len_errstring); |
| 697 | } |
| 698 | |
| 699 | if (malloced) |
| 700 | free ((char *) errstring); |
| 701 | |
| 702 | /* Reraise the error. */ |
| 703 | _dl_signal_error (errcode, objname, NULL, local_errstring); |
| 704 | } |
| 705 | |
| 706 | assert (_dl_debug_initialize (0, args.nsid)->r_state == RT_CONSISTENT); |
| 707 | |
| 708 | /* Release the lock. */ |
| 709 | __rtld_lock_unlock_recursive (GL(dl_load_lock)); |
| 710 | |
| 711 | return args.map; |
| 712 | } |
| 713 | |
| 714 | |
| 715 | void |
| 716 | _dl_show_scope (struct link_map *l, int from) |
| 717 | { |
| 718 | _dl_debug_printf ("object=%s [%lu]\n", |
| 719 | DSO_FILENAME (l->l_name), l->l_ns); |
| 720 | if (l->l_scope != NULL) |
| 721 | for (int scope_cnt = from; l->l_scope[scope_cnt] != NULL; ++scope_cnt) |
| 722 | { |
| 723 | _dl_debug_printf (" scope %u:", scope_cnt); |
| 724 | |
| 725 | for (unsigned int cnt = 0; cnt < l->l_scope[scope_cnt]->r_nlist; ++cnt) |
| 726 | if (*l->l_scope[scope_cnt]->r_list[cnt]->l_name) |
| 727 | _dl_debug_printf_c (" %s", |
| 728 | l->l_scope[scope_cnt]->r_list[cnt]->l_name); |
| 729 | else |
| 730 | _dl_debug_printf_c (" %s", RTLD_PROGNAME); |
| 731 | |
| 732 | _dl_debug_printf_c ("\n"); |
| 733 | } |
| 734 | else |
| 735 | _dl_debug_printf (" no scope\n"); |
| 736 | _dl_debug_printf ("\n"); |
| 737 | } |
| 738 | |
| 739 | #if IS_IN (rtld) |
| 740 | /* Return non-zero if ADDR lies within one of L's segments. */ |
| 741 | int |
| 742 | internal_function |
| 743 | _dl_addr_inside_object (struct link_map *l, const ElfW(Addr) addr) |
| 744 | { |
| 745 | int n = l->l_phnum; |
| 746 | const ElfW(Addr) reladdr = addr - l->l_addr; |
| 747 | |
| 748 | while (--n >= 0) |
| 749 | if (l->l_phdr[n].p_type == PT_LOAD |
| 750 | && reladdr - l->l_phdr[n].p_vaddr >= 0 |
| 751 | && reladdr - l->l_phdr[n].p_vaddr < l->l_phdr[n].p_memsz) |
| 752 | return 1; |
| 753 | return 0; |
| 754 | } |
| 755 | #endif |