xf.li | bdd93d5 | 2023-05-12 07:10:14 -0700 | [diff] [blame^] | 1 | /* Manage TLS descriptors. AArch64 version. |
| 2 | |
| 3 | Copyright (C) 2011-2016 Free Software Foundation, Inc. |
| 4 | |
| 5 | This file is part of the GNU C Library. |
| 6 | |
| 7 | The GNU C Library is free software; you can redistribute it and/or |
| 8 | modify it under the terms of the GNU Lesser General Public |
| 9 | License as published by the Free Software Foundation; either |
| 10 | version 2.1 of the License, or (at your option) any later version. |
| 11 | |
| 12 | The GNU C Library is distributed in the hope that it will be useful, |
| 13 | but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 14 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| 15 | Lesser General Public License for more details. |
| 16 | |
| 17 | You should have received a copy of the GNU Lesser General Public |
| 18 | License along with the GNU C Library; if not, see |
| 19 | <http://www.gnu.org/licenses/>. */ |
| 20 | |
| 21 | #include <link.h> |
| 22 | #include <ldsodefs.h> |
| 23 | #include <elf/dynamic-link.h> |
| 24 | #include <tls.h> |
| 25 | #include <dl-tlsdesc.h> |
| 26 | #include <dl-unmap-segments.h> |
| 27 | #include <tlsdeschtab.h> |
| 28 | #include <atomic.h> |
| 29 | |
| 30 | /* The following functions take an entry_check_offset argument. It's |
| 31 | computed by the caller as an offset between its entry point and the |
| 32 | call site, such that by adding the built-in return address that is |
| 33 | implicitly passed to the function with this offset, we can easily |
| 34 | obtain the caller's entry point to compare with the entry point |
| 35 | given in the TLS descriptor. If it's changed, we want to return |
| 36 | immediately. */ |
| 37 | |
| 38 | /* This function is used to lazily resolve TLS_DESC RELA relocations. |
| 39 | The argument location is used to hold a pointer to the relocation. */ |
| 40 | |
| 41 | void |
| 42 | attribute_hidden |
| 43 | _dl_tlsdesc_resolve_rela_fixup (struct tlsdesc *td, struct link_map *l) |
| 44 | { |
| 45 | const ElfW(Rela) *reloc = atomic_load_relaxed (&td->arg); |
| 46 | |
| 47 | /* After GL(dl_load_lock) is grabbed only one caller can see td->entry in |
| 48 | initial state in _dl_tlsdesc_resolve_early_return_p, other concurrent |
| 49 | callers will return and retry calling td->entry. The updated td->entry |
| 50 | synchronizes with the single writer so all read accesses here can use |
| 51 | relaxed order. */ |
| 52 | if (_dl_tlsdesc_resolve_early_return_p |
| 53 | (td, (void*)(D_PTR (l, l_info[ADDRIDX (DT_TLSDESC_PLT)]) + l->l_addr))) |
| 54 | return; |
| 55 | |
| 56 | /* The code below was borrowed from _dl_fixup(), |
| 57 | except for checking for STB_LOCAL. */ |
| 58 | const ElfW(Sym) *const symtab |
| 59 | = (const void *) D_PTR (l, l_info[DT_SYMTAB]); |
| 60 | const char *strtab = (const void *) D_PTR (l, l_info[DT_STRTAB]); |
| 61 | const ElfW(Sym) *sym = &symtab[ELFW(R_SYM) (reloc->r_info)]; |
| 62 | lookup_t result; |
| 63 | |
| 64 | /* Look up the target symbol. If the normal lookup rules are not |
| 65 | used don't look in the global scope. */ |
| 66 | if (ELFW(ST_BIND) (sym->st_info) != STB_LOCAL |
| 67 | && __builtin_expect (ELFW(ST_VISIBILITY) (sym->st_other), 0) == 0) |
| 68 | { |
| 69 | const struct r_found_version *version = NULL; |
| 70 | |
| 71 | if (l->l_info[VERSYMIDX (DT_VERSYM)] != NULL) |
| 72 | { |
| 73 | const ElfW(Half) *vernum = |
| 74 | (const void *) D_PTR (l, l_info[VERSYMIDX (DT_VERSYM)]); |
| 75 | ElfW(Half) ndx = vernum[ELFW(R_SYM) (reloc->r_info)] & 0x7fff; |
| 76 | version = &l->l_versions[ndx]; |
| 77 | if (version->hash == 0) |
| 78 | version = NULL; |
| 79 | } |
| 80 | |
| 81 | result = _dl_lookup_symbol_x (strtab + sym->st_name, l, &sym, |
| 82 | l->l_scope, version, ELF_RTYPE_CLASS_PLT, |
| 83 | DL_LOOKUP_ADD_DEPENDENCY, NULL); |
| 84 | } |
| 85 | else |
| 86 | { |
| 87 | /* We already found the symbol. The module (and therefore its load |
| 88 | address) is also known. */ |
| 89 | result = l; |
| 90 | } |
| 91 | |
| 92 | if (!sym) |
| 93 | { |
| 94 | atomic_store_relaxed (&td->arg, (void *) reloc->r_addend); |
| 95 | /* This release store synchronizes with the ldar acquire load |
| 96 | instruction in _dl_tlsdesc_undefweak. */ |
| 97 | atomic_store_release (&td->entry, _dl_tlsdesc_undefweak); |
| 98 | } |
| 99 | else |
| 100 | { |
| 101 | # ifndef SHARED |
| 102 | CHECK_STATIC_TLS (l, result); |
| 103 | # else |
| 104 | if (!TRY_STATIC_TLS (l, result)) |
| 105 | { |
| 106 | void *p = _dl_make_tlsdesc_dynamic (result, sym->st_value |
| 107 | + reloc->r_addend); |
| 108 | atomic_store_relaxed (&td->arg, p); |
| 109 | /* This release store synchronizes with the ldar acquire load |
| 110 | instruction in _dl_tlsdesc_dynamic. */ |
| 111 | atomic_store_release (&td->entry, _dl_tlsdesc_dynamic); |
| 112 | } |
| 113 | else |
| 114 | # endif |
| 115 | { |
| 116 | void *p = (void*) (sym->st_value + result->l_tls_offset |
| 117 | + reloc->r_addend); |
| 118 | atomic_store_relaxed (&td->arg, p); |
| 119 | /* This release store synchronizes with the ldar acquire load |
| 120 | instruction in _dl_tlsdesc_return_lazy. */ |
| 121 | atomic_store_release (&td->entry, _dl_tlsdesc_return_lazy); |
| 122 | } |
| 123 | } |
| 124 | |
| 125 | _dl_tlsdesc_wake_up_held_fixups (); |
| 126 | } |
| 127 | |
| 128 | /* This function is used to avoid busy waiting for other threads to |
| 129 | complete the lazy relocation. Once another thread wins the race to |
| 130 | relocate a TLS descriptor, it sets the descriptor up such that this |
| 131 | function is called to wait until the resolver releases the |
| 132 | lock. */ |
| 133 | |
| 134 | void |
| 135 | attribute_hidden |
| 136 | _dl_tlsdesc_resolve_hold_fixup (struct tlsdesc *td, void *caller) |
| 137 | { |
| 138 | /* Maybe we're lucky and can return early. */ |
| 139 | if (caller != atomic_load_relaxed (&td->entry)) |
| 140 | return; |
| 141 | |
| 142 | /* Locking here will stop execution until the running resolver runs |
| 143 | _dl_tlsdesc_wake_up_held_fixups(), releasing the lock. |
| 144 | |
| 145 | FIXME: We'd be better off waiting on a condition variable, such |
| 146 | that we didn't have to hold the lock throughout the relocation |
| 147 | processing. */ |
| 148 | __rtld_lock_lock_recursive (GL(dl_load_lock)); |
| 149 | __rtld_lock_unlock_recursive (GL(dl_load_lock)); |
| 150 | } |
| 151 | |
| 152 | |
| 153 | /* Unmap the dynamic object, but also release its TLS descriptor table |
| 154 | if there is one. */ |
| 155 | |
| 156 | void |
| 157 | internal_function |
| 158 | _dl_unmap (struct link_map *map) |
| 159 | { |
| 160 | _dl_unmap_segments (map); |
| 161 | |
| 162 | #ifdef SHARED |
| 163 | if (map->l_mach.tlsdesc_table) |
| 164 | htab_delete (map->l_mach.tlsdesc_table); |
| 165 | #endif |
| 166 | } |