| xf.li | bdd93d5 | 2023-05-12 07:10:14 -0700 | [diff] [blame] | 1 | /* Thread-local storage handling in the ELF dynamic linker.  Generic version. | 
 | 2 |    Copyright (C) 2002-2016 Free Software Foundation, Inc. | 
 | 3 |    This file is part of the GNU C Library. | 
 | 4 |  | 
 | 5 |    The GNU C Library is free software; you can redistribute it and/or | 
 | 6 |    modify it under the terms of the GNU Lesser General Public | 
 | 7 |    License as published by the Free Software Foundation; either | 
 | 8 |    version 2.1 of the License, or (at your option) any later version. | 
 | 9 |  | 
 | 10 |    The GNU C Library is distributed in the hope that it will be useful, | 
 | 11 |    but WITHOUT ANY WARRANTY; without even the implied warranty of | 
 | 12 |    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU | 
 | 13 |    Lesser General Public License for more details. | 
 | 14 |  | 
 | 15 |    You should have received a copy of the GNU Lesser General Public | 
 | 16 |    License along with the GNU C Library; if not, see | 
 | 17 |    <http://www.gnu.org/licenses/>.  */ | 
 | 18 |  | 
 | 19 | #include <assert.h> | 
 | 20 | #include <errno.h> | 
 | 21 | #include <libintl.h> | 
 | 22 | #include <signal.h> | 
 | 23 | #include <stdlib.h> | 
 | 24 | #include <unistd.h> | 
 | 25 | #include <sys/param.h> | 
 | 26 | #include <atomic.h> | 
 | 27 |  | 
 | 28 | #include <tls.h> | 
 | 29 | #include <dl-tls.h> | 
 | 30 | #include <ldsodefs.h> | 
 | 31 |  | 
 | 32 | /* Amount of excess space to allocate in the static TLS area | 
 | 33 |    to allow dynamic loading of modules defining IE-model TLS data.  */ | 
 | 34 | #define TLS_STATIC_SURPLUS	64 + DL_NNS * 100 | 
 | 35 |  | 
 | 36 |  | 
 | 37 | /* Out-of-memory handler.  */ | 
 | 38 | static void | 
 | 39 | __attribute__ ((__noreturn__)) | 
 | 40 | oom (void) | 
 | 41 | { | 
 | 42 |   _dl_fatal_printf ("cannot allocate memory for thread-local data: ABORT\n"); | 
 | 43 | } | 
 | 44 |  | 
 | 45 |  | 
 | 46 | size_t | 
 | 47 | internal_function | 
 | 48 | _dl_next_tls_modid (void) | 
 | 49 | { | 
 | 50 |   size_t result; | 
 | 51 |  | 
 | 52 |   if (__builtin_expect (GL(dl_tls_dtv_gaps), false)) | 
 | 53 |     { | 
 | 54 |       size_t disp = 0; | 
 | 55 |       struct dtv_slotinfo_list *runp = GL(dl_tls_dtv_slotinfo_list); | 
 | 56 |  | 
 | 57 |       /* Note that this branch will never be executed during program | 
 | 58 | 	 start since there are no gaps at that time.  Therefore it | 
 | 59 | 	 does not matter that the dl_tls_dtv_slotinfo is not allocated | 
 | 60 | 	 yet when the function is called for the first times. | 
 | 61 |  | 
 | 62 | 	 NB: the offset +1 is due to the fact that DTV[0] is used | 
 | 63 | 	 for something else.  */ | 
 | 64 |       result = GL(dl_tls_static_nelem) + 1; | 
 | 65 |       if (result <= GL(dl_tls_max_dtv_idx)) | 
 | 66 | 	do | 
 | 67 | 	  { | 
 | 68 | 	    while (result - disp < runp->len) | 
 | 69 | 	      { | 
 | 70 | 		if (runp->slotinfo[result - disp].map == NULL) | 
 | 71 | 		  break; | 
 | 72 |  | 
 | 73 | 		++result; | 
 | 74 | 		assert (result <= GL(dl_tls_max_dtv_idx) + 1); | 
 | 75 | 	      } | 
 | 76 |  | 
 | 77 | 	    if (result - disp < runp->len) | 
 | 78 | 	      break; | 
 | 79 |  | 
 | 80 | 	    disp += runp->len; | 
 | 81 | 	  } | 
 | 82 | 	while ((runp = runp->next) != NULL); | 
 | 83 |  | 
 | 84 |       if (result > GL(dl_tls_max_dtv_idx)) | 
 | 85 | 	{ | 
 | 86 | 	  /* The new index must indeed be exactly one higher than the | 
 | 87 | 	     previous high.  */ | 
 | 88 | 	  assert (result == GL(dl_tls_max_dtv_idx) + 1); | 
 | 89 | 	  /* There is no gap anymore.  */ | 
 | 90 | 	  GL(dl_tls_dtv_gaps) = false; | 
 | 91 |  | 
 | 92 | 	  goto nogaps; | 
 | 93 | 	} | 
 | 94 |     } | 
 | 95 |   else | 
 | 96 |     { | 
 | 97 |       /* No gaps, allocate a new entry.  */ | 
 | 98 |     nogaps: | 
 | 99 |  | 
 | 100 |       result = ++GL(dl_tls_max_dtv_idx); | 
 | 101 |     } | 
 | 102 |  | 
 | 103 |   return result; | 
 | 104 | } | 
 | 105 |  | 
 | 106 |  | 
 | 107 | size_t | 
 | 108 | internal_function | 
 | 109 | _dl_count_modids (void) | 
 | 110 | { | 
 | 111 |   /* It is rare that we have gaps; see elf/dl-open.c (_dl_open) where | 
 | 112 |      we fail to load a module and unload it leaving a gap.  If we don't | 
 | 113 |      have gaps then the number of modids is the current maximum so | 
 | 114 |      return that.  */ | 
 | 115 |   if (__glibc_likely (!GL(dl_tls_dtv_gaps))) | 
 | 116 |     return GL(dl_tls_max_dtv_idx); | 
 | 117 |  | 
 | 118 |   /* We have gaps and are forced to count the non-NULL entries.  */ | 
 | 119 |   size_t n = 0; | 
 | 120 |   struct dtv_slotinfo_list *runp = GL(dl_tls_dtv_slotinfo_list); | 
 | 121 |   while (runp != NULL) | 
 | 122 |     { | 
 | 123 |       for (size_t i = 0; i < runp->len; ++i) | 
 | 124 | 	if (runp->slotinfo[i].map != NULL) | 
 | 125 | 	  ++n; | 
 | 126 |  | 
 | 127 |       runp = runp->next; | 
 | 128 |     } | 
 | 129 |  | 
 | 130 |   return n; | 
 | 131 | } | 
 | 132 |  | 
 | 133 |  | 
 | 134 | #ifdef SHARED | 
 | 135 | void | 
 | 136 | internal_function | 
 | 137 | _dl_determine_tlsoffset (void) | 
 | 138 | { | 
 | 139 |   size_t max_align = TLS_TCB_ALIGN; | 
 | 140 |   size_t freetop = 0; | 
 | 141 |   size_t freebottom = 0; | 
 | 142 |  | 
 | 143 |   /* The first element of the dtv slot info list is allocated.  */ | 
 | 144 |   assert (GL(dl_tls_dtv_slotinfo_list) != NULL); | 
 | 145 |   /* There is at this point only one element in the | 
 | 146 |      dl_tls_dtv_slotinfo_list list.  */ | 
 | 147 |   assert (GL(dl_tls_dtv_slotinfo_list)->next == NULL); | 
 | 148 |  | 
 | 149 |   struct dtv_slotinfo *slotinfo = GL(dl_tls_dtv_slotinfo_list)->slotinfo; | 
 | 150 |  | 
 | 151 |   /* Determining the offset of the various parts of the static TLS | 
 | 152 |      block has several dependencies.  In addition we have to work | 
 | 153 |      around bugs in some toolchains. | 
 | 154 |  | 
 | 155 |      Each TLS block from the objects available at link time has a size | 
 | 156 |      and an alignment requirement.  The GNU ld computes the alignment | 
 | 157 |      requirements for the data at the positions *in the file*, though. | 
 | 158 |      I.e, it is not simply possible to allocate a block with the size | 
 | 159 |      of the TLS program header entry.  The data is layed out assuming | 
 | 160 |      that the first byte of the TLS block fulfills | 
 | 161 |  | 
 | 162 |        p_vaddr mod p_align == &TLS_BLOCK mod p_align | 
 | 163 |  | 
 | 164 |      This means we have to add artificial padding at the beginning of | 
 | 165 |      the TLS block.  These bytes are never used for the TLS data in | 
 | 166 |      this module but the first byte allocated must be aligned | 
 | 167 |      according to mod p_align == 0 so that the first byte of the TLS | 
 | 168 |      block is aligned according to p_vaddr mod p_align.  This is ugly | 
 | 169 |      and the linker can help by computing the offsets in the TLS block | 
 | 170 |      assuming the first byte of the TLS block is aligned according to | 
 | 171 |      p_align. | 
 | 172 |  | 
 | 173 |      The extra space which might be allocated before the first byte of | 
 | 174 |      the TLS block need not go unused.  The code below tries to use | 
 | 175 |      that memory for the next TLS block.  This can work if the total | 
 | 176 |      memory requirement for the next TLS block is smaller than the | 
 | 177 |      gap.  */ | 
 | 178 |  | 
 | 179 | #if TLS_TCB_AT_TP | 
 | 180 |   /* We simply start with zero.  */ | 
 | 181 |   size_t offset = 0; | 
 | 182 |  | 
 | 183 |   for (size_t cnt = 0; slotinfo[cnt].map != NULL; ++cnt) | 
 | 184 |     { | 
 | 185 |       assert (cnt < GL(dl_tls_dtv_slotinfo_list)->len); | 
 | 186 |  | 
 | 187 |       size_t firstbyte = (-slotinfo[cnt].map->l_tls_firstbyte_offset | 
 | 188 | 			  & (slotinfo[cnt].map->l_tls_align - 1)); | 
 | 189 |       size_t off; | 
 | 190 |       max_align = MAX (max_align, slotinfo[cnt].map->l_tls_align); | 
 | 191 |  | 
 | 192 |       if (freebottom - freetop >= slotinfo[cnt].map->l_tls_blocksize) | 
 | 193 | 	{ | 
 | 194 | 	  off = roundup (freetop + slotinfo[cnt].map->l_tls_blocksize | 
 | 195 | 			 - firstbyte, slotinfo[cnt].map->l_tls_align) | 
 | 196 | 		+ firstbyte; | 
 | 197 | 	  if (off <= freebottom) | 
 | 198 | 	    { | 
 | 199 | 	      freetop = off; | 
 | 200 |  | 
 | 201 | 	      /* XXX For some architectures we perhaps should store the | 
 | 202 | 		 negative offset.  */ | 
 | 203 | 	      slotinfo[cnt].map->l_tls_offset = off; | 
 | 204 | 	      continue; | 
 | 205 | 	    } | 
 | 206 | 	} | 
 | 207 |  | 
 | 208 |       off = roundup (offset + slotinfo[cnt].map->l_tls_blocksize - firstbyte, | 
 | 209 | 		     slotinfo[cnt].map->l_tls_align) + firstbyte; | 
 | 210 |       if (off > offset + slotinfo[cnt].map->l_tls_blocksize | 
 | 211 | 		+ (freebottom - freetop)) | 
 | 212 | 	{ | 
 | 213 | 	  freetop = offset; | 
 | 214 | 	  freebottom = off - slotinfo[cnt].map->l_tls_blocksize; | 
 | 215 | 	} | 
 | 216 |       offset = off; | 
 | 217 |  | 
 | 218 |       /* XXX For some architectures we perhaps should store the | 
 | 219 | 	 negative offset.  */ | 
 | 220 |       slotinfo[cnt].map->l_tls_offset = off; | 
 | 221 |     } | 
 | 222 |  | 
 | 223 |   GL(dl_tls_static_used) = offset; | 
 | 224 |   GL(dl_tls_static_size) = (roundup (offset + TLS_STATIC_SURPLUS, max_align) | 
 | 225 | 			    + TLS_TCB_SIZE); | 
 | 226 | #elif TLS_DTV_AT_TP | 
 | 227 |   /* The TLS blocks start right after the TCB.  */ | 
 | 228 |   size_t offset = TLS_TCB_SIZE; | 
 | 229 |  | 
 | 230 |   for (size_t cnt = 0; slotinfo[cnt].map != NULL; ++cnt) | 
 | 231 |     { | 
 | 232 |       assert (cnt < GL(dl_tls_dtv_slotinfo_list)->len); | 
 | 233 |  | 
 | 234 |       size_t firstbyte = (-slotinfo[cnt].map->l_tls_firstbyte_offset | 
 | 235 | 			  & (slotinfo[cnt].map->l_tls_align - 1)); | 
 | 236 |       size_t off; | 
 | 237 |       max_align = MAX (max_align, slotinfo[cnt].map->l_tls_align); | 
 | 238 |  | 
 | 239 |       if (slotinfo[cnt].map->l_tls_blocksize <= freetop - freebottom) | 
 | 240 | 	{ | 
 | 241 | 	  off = roundup (freebottom, slotinfo[cnt].map->l_tls_align); | 
 | 242 | 	  if (off - freebottom < firstbyte) | 
 | 243 | 	    off += slotinfo[cnt].map->l_tls_align; | 
 | 244 | 	  if (off + slotinfo[cnt].map->l_tls_blocksize - firstbyte <= freetop) | 
 | 245 | 	    { | 
 | 246 | 	      slotinfo[cnt].map->l_tls_offset = off - firstbyte; | 
 | 247 | 	      freebottom = (off + slotinfo[cnt].map->l_tls_blocksize | 
 | 248 | 			    - firstbyte); | 
 | 249 | 	      continue; | 
 | 250 | 	    } | 
 | 251 | 	} | 
 | 252 |  | 
 | 253 |       off = roundup (offset, slotinfo[cnt].map->l_tls_align); | 
 | 254 |       if (off - offset < firstbyte) | 
 | 255 | 	off += slotinfo[cnt].map->l_tls_align; | 
 | 256 |  | 
 | 257 |       slotinfo[cnt].map->l_tls_offset = off - firstbyte; | 
 | 258 |       if (off - firstbyte - offset > freetop - freebottom) | 
 | 259 | 	{ | 
 | 260 | 	  freebottom = offset; | 
 | 261 | 	  freetop = off - firstbyte; | 
 | 262 | 	} | 
 | 263 |  | 
 | 264 |       offset = off + slotinfo[cnt].map->l_tls_blocksize - firstbyte; | 
 | 265 |     } | 
 | 266 |  | 
 | 267 |   GL(dl_tls_static_used) = offset; | 
 | 268 |   GL(dl_tls_static_size) = roundup (offset + TLS_STATIC_SURPLUS, | 
 | 269 | 				    TLS_TCB_ALIGN); | 
 | 270 | #else | 
 | 271 | # error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined" | 
 | 272 | #endif | 
 | 273 |  | 
 | 274 |   /* The alignment requirement for the static TLS block.  */ | 
 | 275 |   GL(dl_tls_static_align) = max_align; | 
 | 276 | } | 
 | 277 |  | 
 | 278 |  | 
 | 279 | /* This is called only when the data structure setup was skipped at startup, | 
 | 280 |    when there was no need for it then.  Now we have dynamically loaded | 
 | 281 |    something needing TLS, or libpthread needs it.  */ | 
 | 282 | int | 
 | 283 | internal_function | 
 | 284 | _dl_tls_setup (void) | 
 | 285 | { | 
 | 286 |   assert (GL(dl_tls_dtv_slotinfo_list) == NULL); | 
 | 287 |   assert (GL(dl_tls_max_dtv_idx) == 0); | 
 | 288 |  | 
 | 289 |   const size_t nelem = 2 + TLS_SLOTINFO_SURPLUS; | 
 | 290 |  | 
 | 291 |   GL(dl_tls_dtv_slotinfo_list) | 
 | 292 |     = calloc (1, (sizeof (struct dtv_slotinfo_list) | 
 | 293 | 		  + nelem * sizeof (struct dtv_slotinfo))); | 
 | 294 |   if (GL(dl_tls_dtv_slotinfo_list) == NULL) | 
 | 295 |     return -1; | 
 | 296 |  | 
 | 297 |   GL(dl_tls_dtv_slotinfo_list)->len = nelem; | 
 | 298 |  | 
 | 299 |   /* Number of elements in the static TLS block.  It can't be zero | 
 | 300 |      because of various assumptions.  The one element is null.  */ | 
 | 301 |   GL(dl_tls_static_nelem) = GL(dl_tls_max_dtv_idx) = 1; | 
 | 302 |  | 
 | 303 |   /* This initializes more variables for us.  */ | 
 | 304 |   _dl_determine_tlsoffset (); | 
 | 305 |  | 
 | 306 |   return 0; | 
 | 307 | } | 
 | 308 | rtld_hidden_def (_dl_tls_setup) | 
 | 309 | #endif | 
 | 310 |  | 
 | 311 | static void * | 
 | 312 | internal_function | 
 | 313 | allocate_dtv (void *result) | 
 | 314 | { | 
 | 315 |   dtv_t *dtv; | 
 | 316 |   size_t dtv_length; | 
 | 317 |  | 
 | 318 |   /* We allocate a few more elements in the dtv than are needed for the | 
 | 319 |      initial set of modules.  This should avoid in most cases expansions | 
 | 320 |      of the dtv.  */ | 
 | 321 |   dtv_length = GL(dl_tls_max_dtv_idx) + DTV_SURPLUS; | 
 | 322 |   dtv = calloc (dtv_length + 2, sizeof (dtv_t)); | 
 | 323 |   if (dtv != NULL) | 
 | 324 |     { | 
 | 325 |       /* This is the initial length of the dtv.  */ | 
 | 326 |       dtv[0].counter = dtv_length; | 
 | 327 |  | 
 | 328 |       /* The rest of the dtv (including the generation counter) is | 
 | 329 | 	 Initialize with zero to indicate nothing there.  */ | 
 | 330 |  | 
 | 331 |       /* Add the dtv to the thread data structures.  */ | 
 | 332 |       INSTALL_DTV (result, dtv); | 
 | 333 |     } | 
 | 334 |   else | 
 | 335 |     result = NULL; | 
 | 336 |  | 
 | 337 |   return result; | 
 | 338 | } | 
 | 339 |  | 
 | 340 |  | 
 | 341 | /* Get size and alignment requirements of the static TLS block.  */ | 
 | 342 | void | 
 | 343 | internal_function | 
 | 344 | _dl_get_tls_static_info (size_t *sizep, size_t *alignp) | 
 | 345 | { | 
 | 346 |   *sizep = GL(dl_tls_static_size); | 
 | 347 |   *alignp = GL(dl_tls_static_align); | 
 | 348 | } | 
 | 349 |  | 
 | 350 |  | 
 | 351 | void * | 
 | 352 | internal_function | 
 | 353 | _dl_allocate_tls_storage (void) | 
 | 354 | { | 
 | 355 |   void *result; | 
 | 356 |   size_t size = GL(dl_tls_static_size); | 
 | 357 |  | 
 | 358 | #if TLS_DTV_AT_TP | 
 | 359 |   /* Memory layout is: | 
 | 360 |      [ TLS_PRE_TCB_SIZE ] [ TLS_TCB_SIZE ] [ TLS blocks ] | 
 | 361 | 			  ^ This should be returned.  */ | 
 | 362 |   size += (TLS_PRE_TCB_SIZE + GL(dl_tls_static_align) - 1) | 
 | 363 | 	  & ~(GL(dl_tls_static_align) - 1); | 
 | 364 | #endif | 
 | 365 |  | 
 | 366 |   /* Allocate a correctly aligned chunk of memory.  */ | 
 | 367 |   result = __libc_memalign (GL(dl_tls_static_align), size); | 
 | 368 |   if (__builtin_expect (result != NULL, 1)) | 
 | 369 |     { | 
 | 370 |       /* Allocate the DTV.  */ | 
 | 371 |       void *allocated = result; | 
 | 372 |  | 
 | 373 | #if TLS_TCB_AT_TP | 
 | 374 |       /* The TCB follows the TLS blocks.  */ | 
 | 375 |       result = (char *) result + size - TLS_TCB_SIZE; | 
 | 376 |  | 
 | 377 |       /* Clear the TCB data structure.  We can't ask the caller (i.e. | 
 | 378 | 	 libpthread) to do it, because we will initialize the DTV et al.  */ | 
 | 379 |       memset (result, '\0', TLS_TCB_SIZE); | 
 | 380 | #elif TLS_DTV_AT_TP | 
 | 381 |       result = (char *) result + size - GL(dl_tls_static_size); | 
 | 382 |  | 
 | 383 |       /* Clear the TCB data structure and TLS_PRE_TCB_SIZE bytes before it. | 
 | 384 | 	 We can't ask the caller (i.e. libpthread) to do it, because we will | 
 | 385 | 	 initialize the DTV et al.  */ | 
 | 386 |       memset ((char *) result - TLS_PRE_TCB_SIZE, '\0', | 
 | 387 | 	      TLS_PRE_TCB_SIZE + TLS_TCB_SIZE); | 
 | 388 | #endif | 
 | 389 |  | 
 | 390 |       result = allocate_dtv (result); | 
 | 391 |       if (result == NULL) | 
 | 392 | 	free (allocated); | 
 | 393 |     } | 
 | 394 |  | 
 | 395 |   return result; | 
 | 396 | } | 
 | 397 |  | 
 | 398 |  | 
 | 399 | #ifndef SHARED | 
 | 400 | extern dtv_t _dl_static_dtv[]; | 
 | 401 | # define _dl_initial_dtv (&_dl_static_dtv[1]) | 
 | 402 | #endif | 
 | 403 |  | 
 | 404 | static dtv_t * | 
 | 405 | _dl_resize_dtv (dtv_t *dtv) | 
 | 406 | { | 
 | 407 |   /* Resize the dtv.  */ | 
 | 408 |   dtv_t *newp; | 
 | 409 |   /* Load GL(dl_tls_max_dtv_idx) atomically since it may be written to by | 
 | 410 |      other threads concurrently.  */ | 
 | 411 |   size_t newsize | 
 | 412 |     = atomic_load_acquire (&GL(dl_tls_max_dtv_idx)) + DTV_SURPLUS; | 
 | 413 |   size_t oldsize = dtv[-1].counter; | 
 | 414 |  | 
 | 415 |   if (dtv == GL(dl_initial_dtv)) | 
 | 416 |     { | 
 | 417 |       /* This is the initial dtv that was either statically allocated in | 
 | 418 | 	 __libc_setup_tls or allocated during rtld startup using the | 
 | 419 | 	 dl-minimal.c malloc instead of the real malloc.  We can't free | 
 | 420 | 	 it, we have to abandon the old storage.  */ | 
 | 421 |  | 
 | 422 |       newp = malloc ((2 + newsize) * sizeof (dtv_t)); | 
 | 423 |       if (newp == NULL) | 
 | 424 | 	oom (); | 
 | 425 |       memcpy (newp, &dtv[-1], (2 + oldsize) * sizeof (dtv_t)); | 
 | 426 |     } | 
 | 427 |   else | 
 | 428 |     { | 
 | 429 |       newp = realloc (&dtv[-1], | 
 | 430 | 		      (2 + newsize) * sizeof (dtv_t)); | 
 | 431 |       if (newp == NULL) | 
 | 432 | 	oom (); | 
 | 433 |     } | 
 | 434 |  | 
 | 435 |   newp[0].counter = newsize; | 
 | 436 |  | 
 | 437 |   /* Clear the newly allocated part.  */ | 
 | 438 |   memset (newp + 2 + oldsize, '\0', | 
 | 439 | 	  (newsize - oldsize) * sizeof (dtv_t)); | 
 | 440 |  | 
 | 441 |   /* Return the generation counter.  */ | 
 | 442 |   return &newp[1]; | 
 | 443 | } | 
 | 444 |  | 
 | 445 |  | 
 | 446 | void * | 
 | 447 | internal_function | 
 | 448 | _dl_allocate_tls_init (void *result) | 
 | 449 | { | 
 | 450 |   if (result == NULL) | 
 | 451 |     /* The memory allocation failed.  */ | 
 | 452 |     return NULL; | 
 | 453 |  | 
 | 454 |   dtv_t *dtv = GET_DTV (result); | 
 | 455 |   struct dtv_slotinfo_list *listp; | 
 | 456 |   size_t total = 0; | 
 | 457 |   size_t maxgen = 0; | 
 | 458 |  | 
 | 459 |   /* Check if the current dtv is big enough.   */ | 
 | 460 |   if (dtv[-1].counter < GL(dl_tls_max_dtv_idx)) | 
 | 461 |     { | 
 | 462 |       /* Resize the dtv.  */ | 
 | 463 |       dtv = _dl_resize_dtv (dtv); | 
 | 464 |  | 
 | 465 |       /* Install this new dtv in the thread data structures.  */ | 
 | 466 |       INSTALL_DTV (result, &dtv[-1]); | 
 | 467 |     } | 
 | 468 |  | 
 | 469 |   /* We have to prepare the dtv for all currently loaded modules using | 
 | 470 |      TLS.  For those which are dynamically loaded we add the values | 
 | 471 |      indicating deferred allocation.  */ | 
 | 472 |   listp = GL(dl_tls_dtv_slotinfo_list); | 
 | 473 |   while (1) | 
 | 474 |     { | 
 | 475 |       size_t cnt; | 
 | 476 |  | 
 | 477 |       for (cnt = total == 0 ? 1 : 0; cnt < listp->len; ++cnt) | 
 | 478 | 	{ | 
 | 479 | 	  struct link_map *map; | 
 | 480 | 	  void *dest; | 
 | 481 |  | 
 | 482 | 	  /* Check for the total number of used slots.  */ | 
 | 483 | 	  if (total + cnt > GL(dl_tls_max_dtv_idx)) | 
 | 484 | 	    break; | 
 | 485 |  | 
 | 486 | 	  map = listp->slotinfo[cnt].map; | 
 | 487 | 	  if (map == NULL) | 
 | 488 | 	    /* Unused entry.  */ | 
 | 489 | 	    continue; | 
 | 490 |  | 
 | 491 | 	  /* Keep track of the maximum generation number.  This might | 
 | 492 | 	     not be the generation counter.  */ | 
 | 493 | 	  assert (listp->slotinfo[cnt].gen <= GL(dl_tls_generation)); | 
 | 494 | 	  maxgen = MAX (maxgen, listp->slotinfo[cnt].gen); | 
 | 495 |  | 
 | 496 | 	  dtv[map->l_tls_modid].pointer.val = TLS_DTV_UNALLOCATED; | 
 | 497 | 	  dtv[map->l_tls_modid].pointer.is_static = false; | 
 | 498 |  | 
 | 499 | 	  if (map->l_tls_offset == NO_TLS_OFFSET | 
 | 500 | 	      || map->l_tls_offset == FORCED_DYNAMIC_TLS_OFFSET) | 
 | 501 | 	    continue; | 
 | 502 |  | 
 | 503 | 	  assert (map->l_tls_modid == total + cnt); | 
 | 504 | 	  assert (map->l_tls_blocksize >= map->l_tls_initimage_size); | 
 | 505 | #if TLS_TCB_AT_TP | 
 | 506 | 	  assert ((size_t) map->l_tls_offset >= map->l_tls_blocksize); | 
 | 507 | 	  dest = (char *) result - map->l_tls_offset; | 
 | 508 | #elif TLS_DTV_AT_TP | 
 | 509 | 	  dest = (char *) result + map->l_tls_offset; | 
 | 510 | #else | 
 | 511 | # error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined" | 
 | 512 | #endif | 
 | 513 |  | 
 | 514 | 	  /* Copy the initialization image and clear the BSS part.  */ | 
 | 515 | 	  memset (__mempcpy (dest, map->l_tls_initimage, | 
 | 516 | 			     map->l_tls_initimage_size), '\0', | 
 | 517 | 		  map->l_tls_blocksize - map->l_tls_initimage_size); | 
 | 518 | 	} | 
 | 519 |  | 
 | 520 |       total += cnt; | 
 | 521 |       if (total >= GL(dl_tls_max_dtv_idx)) | 
 | 522 | 	break; | 
 | 523 |  | 
 | 524 |       listp = listp->next; | 
 | 525 |       assert (listp != NULL); | 
 | 526 |     } | 
 | 527 |  | 
 | 528 |   /* The DTV version is up-to-date now.  */ | 
 | 529 |   dtv[0].counter = maxgen; | 
 | 530 |  | 
 | 531 |   return result; | 
 | 532 | } | 
 | 533 | rtld_hidden_def (_dl_allocate_tls_init) | 
 | 534 |  | 
 | 535 | void * | 
 | 536 | internal_function | 
 | 537 | _dl_allocate_tls (void *mem) | 
 | 538 | { | 
 | 539 |   return _dl_allocate_tls_init (mem == NULL | 
 | 540 | 				? _dl_allocate_tls_storage () | 
 | 541 | 				: allocate_dtv (mem)); | 
 | 542 | } | 
 | 543 | rtld_hidden_def (_dl_allocate_tls) | 
 | 544 |  | 
 | 545 |  | 
 | 546 | void | 
 | 547 | internal_function | 
 | 548 | _dl_deallocate_tls (void *tcb, bool dealloc_tcb) | 
 | 549 | { | 
 | 550 |   dtv_t *dtv = GET_DTV (tcb); | 
 | 551 |  | 
 | 552 |   /* We need to free the memory allocated for non-static TLS.  */ | 
 | 553 |   for (size_t cnt = 0; cnt < dtv[-1].counter; ++cnt) | 
 | 554 |     if (! dtv[1 + cnt].pointer.is_static | 
 | 555 | 	&& dtv[1 + cnt].pointer.val != TLS_DTV_UNALLOCATED) | 
 | 556 |       free (dtv[1 + cnt].pointer.val); | 
 | 557 |  | 
 | 558 |   /* The array starts with dtv[-1].  */ | 
 | 559 |   if (dtv != GL(dl_initial_dtv)) | 
 | 560 |     free (dtv - 1); | 
 | 561 |  | 
 | 562 |   if (dealloc_tcb) | 
 | 563 |     { | 
 | 564 | #if TLS_TCB_AT_TP | 
 | 565 |       /* The TCB follows the TLS blocks.  Back up to free the whole block.  */ | 
 | 566 |       tcb -= GL(dl_tls_static_size) - TLS_TCB_SIZE; | 
 | 567 | #elif TLS_DTV_AT_TP | 
 | 568 |       /* Back up the TLS_PRE_TCB_SIZE bytes.  */ | 
 | 569 |       tcb -= (TLS_PRE_TCB_SIZE + GL(dl_tls_static_align) - 1) | 
 | 570 | 	     & ~(GL(dl_tls_static_align) - 1); | 
 | 571 | #endif | 
 | 572 |       free (tcb); | 
 | 573 |     } | 
 | 574 | } | 
 | 575 | rtld_hidden_def (_dl_deallocate_tls) | 
 | 576 |  | 
 | 577 |  | 
 | 578 | #ifdef SHARED | 
 | 579 | /* The __tls_get_addr function has two basic forms which differ in the | 
 | 580 |    arguments.  The IA-64 form takes two parameters, the module ID and | 
 | 581 |    offset.  The form used, among others, on IA-32 takes a reference to | 
 | 582 |    a special structure which contain the same information.  The second | 
 | 583 |    form seems to be more often used (in the moment) so we default to | 
 | 584 |    it.  Users of the IA-64 form have to provide adequate definitions | 
 | 585 |    of the following macros.  */ | 
 | 586 | # ifndef GET_ADDR_ARGS | 
 | 587 | #  define GET_ADDR_ARGS tls_index *ti | 
 | 588 | #  define GET_ADDR_PARAM ti | 
 | 589 | # endif | 
 | 590 | # ifndef GET_ADDR_MODULE | 
 | 591 | #  define GET_ADDR_MODULE ti->ti_module | 
 | 592 | # endif | 
 | 593 | # ifndef GET_ADDR_OFFSET | 
 | 594 | #  define GET_ADDR_OFFSET ti->ti_offset | 
 | 595 | # endif | 
 | 596 |  | 
 | 597 |  | 
 | 598 | static void * | 
 | 599 | allocate_and_init (struct link_map *map) | 
 | 600 | { | 
 | 601 |   void *newp; | 
 | 602 |  | 
 | 603 |   newp = __libc_memalign (map->l_tls_align, map->l_tls_blocksize); | 
 | 604 |   if (newp == NULL) | 
 | 605 |     oom (); | 
 | 606 |  | 
 | 607 |   /* Initialize the memory.  */ | 
 | 608 |   memset (__mempcpy (newp, map->l_tls_initimage, map->l_tls_initimage_size), | 
 | 609 | 	  '\0', map->l_tls_blocksize - map->l_tls_initimage_size); | 
 | 610 |  | 
 | 611 |   return newp; | 
 | 612 | } | 
 | 613 |  | 
 | 614 |  | 
 | 615 | struct link_map * | 
 | 616 | _dl_update_slotinfo (unsigned long int req_modid) | 
 | 617 | { | 
 | 618 |   struct link_map *the_map = NULL; | 
 | 619 |   dtv_t *dtv = THREAD_DTV (); | 
 | 620 |  | 
 | 621 |   /* The global dl_tls_dtv_slotinfo array contains for each module | 
 | 622 |      index the generation counter current when the entry was created. | 
 | 623 |      This array never shrinks so that all module indices which were | 
 | 624 |      valid at some time can be used to access it.  Before the first | 
 | 625 |      use of a new module index in this function the array was extended | 
 | 626 |      appropriately.  Access also does not have to be guarded against | 
 | 627 |      modifications of the array.  It is assumed that pointer-size | 
 | 628 |      values can be read atomically even in SMP environments.  It is | 
 | 629 |      possible that other threads at the same time dynamically load | 
 | 630 |      code and therefore add to the slotinfo list.  This is a problem | 
 | 631 |      since we must not pick up any information about incomplete work. | 
 | 632 |      The solution to this is to ignore all dtv slots which were | 
 | 633 |      created after the one we are currently interested.  We know that | 
 | 634 |      dynamic loading for this module is completed and this is the last | 
 | 635 |      load operation we know finished.  */ | 
 | 636 |   unsigned long int idx = req_modid; | 
 | 637 |   struct dtv_slotinfo_list *listp = GL(dl_tls_dtv_slotinfo_list); | 
 | 638 |  | 
 | 639 |   while (idx >= listp->len) | 
 | 640 |     { | 
 | 641 |       idx -= listp->len; | 
 | 642 |       listp = listp->next; | 
 | 643 |     } | 
 | 644 |  | 
 | 645 |   if (dtv[0].counter < listp->slotinfo[idx].gen) | 
 | 646 |     { | 
 | 647 |       /* The generation counter for the slot is higher than what the | 
 | 648 | 	 current dtv implements.  We have to update the whole dtv but | 
 | 649 | 	 only those entries with a generation counter <= the one for | 
 | 650 | 	 the entry we need.  */ | 
 | 651 |       size_t new_gen = listp->slotinfo[idx].gen; | 
 | 652 |       size_t total = 0; | 
 | 653 |  | 
 | 654 |       /* We have to look through the entire dtv slotinfo list.  */ | 
 | 655 |       listp =  GL(dl_tls_dtv_slotinfo_list); | 
 | 656 |       do | 
 | 657 | 	{ | 
 | 658 | 	  for (size_t cnt = total == 0 ? 1 : 0; cnt < listp->len; ++cnt) | 
 | 659 | 	    { | 
 | 660 | 	      size_t gen = listp->slotinfo[cnt].gen; | 
 | 661 |  | 
 | 662 | 	      if (gen > new_gen) | 
 | 663 | 		/* This is a slot for a generation younger than the | 
 | 664 | 		   one we are handling now.  It might be incompletely | 
 | 665 | 		   set up so ignore it.  */ | 
 | 666 | 		continue; | 
 | 667 |  | 
 | 668 | 	      /* If the entry is older than the current dtv layout we | 
 | 669 | 		 know we don't have to handle it.  */ | 
 | 670 | 	      if (gen <= dtv[0].counter) | 
 | 671 | 		continue; | 
 | 672 |  | 
 | 673 | 	      /* If there is no map this means the entry is empty.  */ | 
 | 674 | 	      struct link_map *map = listp->slotinfo[cnt].map; | 
 | 675 | 	      if (map == NULL) | 
 | 676 | 		{ | 
 | 677 | 		  if (dtv[-1].counter >= total + cnt) | 
 | 678 | 		    { | 
 | 679 | 		      /* If this modid was used at some point the memory | 
 | 680 | 			 might still be allocated.  */ | 
 | 681 | 		      if (! dtv[total + cnt].pointer.is_static | 
 | 682 | 			  && (dtv[total + cnt].pointer.val | 
 | 683 | 			      != TLS_DTV_UNALLOCATED)) | 
 | 684 | 			free (dtv[total + cnt].pointer.val); | 
 | 685 | 		      dtv[total + cnt].pointer.val = TLS_DTV_UNALLOCATED; | 
 | 686 | 		      dtv[total + cnt].pointer.is_static = false; | 
 | 687 | 		    } | 
 | 688 |  | 
 | 689 | 		  continue; | 
 | 690 | 		} | 
 | 691 |  | 
 | 692 | 	      /* Check whether the current dtv array is large enough.  */ | 
 | 693 | 	      size_t modid = map->l_tls_modid; | 
 | 694 | 	      assert (total + cnt == modid); | 
 | 695 | 	      if (dtv[-1].counter < modid) | 
 | 696 | 		{ | 
 | 697 | 		  /* Resize the dtv.  */ | 
 | 698 | 		  dtv = _dl_resize_dtv (dtv); | 
 | 699 |  | 
 | 700 | 		  assert (modid <= dtv[-1].counter); | 
 | 701 |  | 
 | 702 | 		  /* Install this new dtv in the thread data | 
 | 703 | 		     structures.  */ | 
 | 704 | 		  INSTALL_NEW_DTV (dtv); | 
 | 705 | 		} | 
 | 706 |  | 
 | 707 | 	      /* If there is currently memory allocate for this | 
 | 708 | 		 dtv entry free it.  */ | 
 | 709 | 	      /* XXX Ideally we will at some point create a memory | 
 | 710 | 		 pool.  */ | 
 | 711 | 	      if (! dtv[modid].pointer.is_static | 
 | 712 | 		  && dtv[modid].pointer.val != TLS_DTV_UNALLOCATED) | 
 | 713 | 		/* Note that free is called for NULL is well.  We | 
 | 714 | 		   deallocate even if it is this dtv entry we are | 
 | 715 | 		   supposed to load.  The reason is that we call | 
 | 716 | 		   memalign and not malloc.  */ | 
 | 717 | 		free (dtv[modid].pointer.val); | 
 | 718 |  | 
 | 719 | 	      dtv[modid].pointer.val = TLS_DTV_UNALLOCATED; | 
 | 720 | 	      dtv[modid].pointer.is_static = false; | 
 | 721 |  | 
 | 722 | 	      if (modid == req_modid) | 
 | 723 | 		the_map = map; | 
 | 724 | 	    } | 
 | 725 |  | 
 | 726 | 	  total += listp->len; | 
 | 727 | 	} | 
 | 728 |       while ((listp = listp->next) != NULL); | 
 | 729 |  | 
 | 730 |       /* This will be the new maximum generation counter.  */ | 
 | 731 |       dtv[0].counter = new_gen; | 
 | 732 |     } | 
 | 733 |  | 
 | 734 |   return the_map; | 
 | 735 | } | 
 | 736 |  | 
 | 737 |  | 
 | 738 | static void * | 
 | 739 | __attribute_noinline__ | 
 | 740 | tls_get_addr_tail (GET_ADDR_ARGS, dtv_t *dtv, struct link_map *the_map) | 
 | 741 | { | 
 | 742 |   /* The allocation was deferred.  Do it now.  */ | 
 | 743 |   if (the_map == NULL) | 
 | 744 |     { | 
 | 745 |       /* Find the link map for this module.  */ | 
 | 746 |       size_t idx = GET_ADDR_MODULE; | 
 | 747 |       struct dtv_slotinfo_list *listp = GL(dl_tls_dtv_slotinfo_list); | 
 | 748 |  | 
 | 749 |       while (idx >= listp->len) | 
 | 750 | 	{ | 
 | 751 | 	  idx -= listp->len; | 
 | 752 | 	  listp = listp->next; | 
 | 753 | 	} | 
 | 754 |  | 
 | 755 |       the_map = listp->slotinfo[idx].map; | 
 | 756 |     } | 
 | 757 |  | 
 | 758 |   /* Make sure that, if a dlopen running in parallel forces the | 
 | 759 |      variable into static storage, we'll wait until the address in the | 
 | 760 |      static TLS block is set up, and use that.  If we're undecided | 
 | 761 |      yet, make sure we make the decision holding the lock as well.  */ | 
 | 762 |   if (__glibc_unlikely (the_map->l_tls_offset | 
 | 763 | 			!= FORCED_DYNAMIC_TLS_OFFSET)) | 
 | 764 |     { | 
 | 765 |       __rtld_lock_lock_recursive (GL(dl_load_lock)); | 
 | 766 |       if (__glibc_likely (the_map->l_tls_offset == NO_TLS_OFFSET)) | 
 | 767 | 	{ | 
 | 768 | 	  the_map->l_tls_offset = FORCED_DYNAMIC_TLS_OFFSET; | 
 | 769 | 	  __rtld_lock_unlock_recursive (GL(dl_load_lock)); | 
 | 770 | 	} | 
 | 771 |       else if (__glibc_likely (the_map->l_tls_offset | 
 | 772 | 			       != FORCED_DYNAMIC_TLS_OFFSET)) | 
 | 773 | 	{ | 
 | 774 | #if TLS_TCB_AT_TP | 
 | 775 | 	  void *p = (char *) THREAD_SELF - the_map->l_tls_offset; | 
 | 776 | #elif TLS_DTV_AT_TP | 
 | 777 | 	  void *p = (char *) THREAD_SELF + the_map->l_tls_offset + TLS_PRE_TCB_SIZE; | 
 | 778 | #else | 
 | 779 | # error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined" | 
 | 780 | #endif | 
 | 781 | 	  __rtld_lock_unlock_recursive (GL(dl_load_lock)); | 
 | 782 |  | 
 | 783 | 	  dtv[GET_ADDR_MODULE].pointer.is_static = true; | 
 | 784 | 	  dtv[GET_ADDR_MODULE].pointer.val = p; | 
 | 785 |  | 
 | 786 | 	  return (char *) p + GET_ADDR_OFFSET; | 
 | 787 | 	} | 
 | 788 |       else | 
 | 789 | 	__rtld_lock_unlock_recursive (GL(dl_load_lock)); | 
 | 790 |     } | 
 | 791 |   void *p = dtv[GET_ADDR_MODULE].pointer.val = allocate_and_init (the_map); | 
 | 792 |   assert (!dtv[GET_ADDR_MODULE].pointer.is_static); | 
 | 793 |  | 
 | 794 |   return (char *) p + GET_ADDR_OFFSET; | 
 | 795 | } | 
 | 796 |  | 
 | 797 |  | 
 | 798 | static struct link_map * | 
 | 799 | __attribute_noinline__ | 
 | 800 | update_get_addr (GET_ADDR_ARGS) | 
 | 801 | { | 
 | 802 |   struct link_map *the_map = _dl_update_slotinfo (GET_ADDR_MODULE); | 
 | 803 |   dtv_t *dtv = THREAD_DTV (); | 
 | 804 |  | 
 | 805 |   void *p = dtv[GET_ADDR_MODULE].pointer.val; | 
 | 806 |  | 
 | 807 |   if (__glibc_unlikely (p == TLS_DTV_UNALLOCATED)) | 
 | 808 |     return tls_get_addr_tail (GET_ADDR_PARAM, dtv, the_map); | 
 | 809 |  | 
 | 810 |   return (void *) p + GET_ADDR_OFFSET; | 
 | 811 | } | 
 | 812 |  | 
 | 813 | /* For all machines that have a non-macro version of __tls_get_addr, we | 
 | 814 |    want to use rtld_hidden_proto/rtld_hidden_def in order to call the | 
 | 815 |    internal alias for __tls_get_addr from ld.so. This avoids a PLT entry | 
 | 816 |    in ld.so for __tls_get_addr.  */ | 
 | 817 |  | 
 | 818 | #ifndef __tls_get_addr | 
 | 819 | extern void * __tls_get_addr (GET_ADDR_ARGS); | 
 | 820 | rtld_hidden_proto (__tls_get_addr) | 
 | 821 | rtld_hidden_def (__tls_get_addr) | 
 | 822 | #endif | 
 | 823 |  | 
 | 824 | /* The generic dynamic and local dynamic model cannot be used in | 
 | 825 |    statically linked applications.  */ | 
 | 826 | void * | 
 | 827 | __tls_get_addr (GET_ADDR_ARGS) | 
 | 828 | { | 
 | 829 |   dtv_t *dtv = THREAD_DTV (); | 
 | 830 |  | 
 | 831 |   if (__glibc_unlikely (dtv[0].counter != GL(dl_tls_generation))) | 
 | 832 |     return update_get_addr (GET_ADDR_PARAM); | 
 | 833 |  | 
 | 834 |   void *p = dtv[GET_ADDR_MODULE].pointer.val; | 
 | 835 |  | 
 | 836 |   if (__glibc_unlikely (p == TLS_DTV_UNALLOCATED)) | 
 | 837 |     return tls_get_addr_tail (GET_ADDR_PARAM, dtv, NULL); | 
 | 838 |  | 
 | 839 |   return (char *) p + GET_ADDR_OFFSET; | 
 | 840 | } | 
 | 841 | #endif | 
 | 842 |  | 
 | 843 |  | 
 | 844 | /* Look up the module's TLS block as for __tls_get_addr, | 
 | 845 |    but never touch anything.  Return null if it's not allocated yet.  */ | 
 | 846 | void * | 
 | 847 | _dl_tls_get_addr_soft (struct link_map *l) | 
 | 848 | { | 
 | 849 |   if (__glibc_unlikely (l->l_tls_modid == 0)) | 
 | 850 |     /* This module has no TLS segment.  */ | 
 | 851 |     return NULL; | 
 | 852 |  | 
 | 853 |   dtv_t *dtv = THREAD_DTV (); | 
 | 854 |   if (__glibc_unlikely (dtv[0].counter != GL(dl_tls_generation))) | 
 | 855 |     { | 
 | 856 |       /* This thread's DTV is not completely current, | 
 | 857 | 	 but it might already cover this module.  */ | 
 | 858 |  | 
 | 859 |       if (l->l_tls_modid >= dtv[-1].counter) | 
 | 860 | 	/* Nope.  */ | 
 | 861 | 	return NULL; | 
 | 862 |  | 
 | 863 |       size_t idx = l->l_tls_modid; | 
 | 864 |       struct dtv_slotinfo_list *listp = GL(dl_tls_dtv_slotinfo_list); | 
 | 865 |       while (idx >= listp->len) | 
 | 866 | 	{ | 
 | 867 | 	  idx -= listp->len; | 
 | 868 | 	  listp = listp->next; | 
 | 869 | 	} | 
 | 870 |  | 
 | 871 |       /* We've reached the slot for this module. | 
 | 872 | 	 If its generation counter is higher than the DTV's, | 
 | 873 | 	 this thread does not know about this module yet.  */ | 
 | 874 |       if (dtv[0].counter < listp->slotinfo[idx].gen) | 
 | 875 | 	return NULL; | 
 | 876 |     } | 
 | 877 |  | 
 | 878 |   void *data = dtv[l->l_tls_modid].pointer.val; | 
 | 879 |   if (__glibc_unlikely (data == TLS_DTV_UNALLOCATED)) | 
 | 880 |     /* The DTV is current, but this thread has not yet needed | 
 | 881 |        to allocate this module's segment.  */ | 
 | 882 |     data = NULL; | 
 | 883 |  | 
 | 884 |   return data; | 
 | 885 | } | 
 | 886 |  | 
 | 887 |  | 
 | 888 | void | 
 | 889 | _dl_add_to_slotinfo (struct link_map *l) | 
 | 890 | { | 
 | 891 |   /* Now that we know the object is loaded successfully add | 
 | 892 |      modules containing TLS data to the dtv info table.  We | 
 | 893 |      might have to increase its size.  */ | 
 | 894 |   struct dtv_slotinfo_list *listp; | 
 | 895 |   struct dtv_slotinfo_list *prevp; | 
 | 896 |   size_t idx = l->l_tls_modid; | 
 | 897 |  | 
 | 898 |   /* Find the place in the dtv slotinfo list.  */ | 
 | 899 |   listp = GL(dl_tls_dtv_slotinfo_list); | 
 | 900 |   prevp = NULL;		/* Needed to shut up gcc.  */ | 
 | 901 |   do | 
 | 902 |     { | 
 | 903 |       /* Does it fit in the array of this list element?  */ | 
 | 904 |       if (idx < listp->len) | 
 | 905 | 	break; | 
 | 906 |       idx -= listp->len; | 
 | 907 |       prevp = listp; | 
 | 908 |       listp = listp->next; | 
 | 909 |     } | 
 | 910 |   while (listp != NULL); | 
 | 911 |  | 
 | 912 |   if (listp == NULL) | 
 | 913 |     { | 
 | 914 |       /* When we come here it means we have to add a new element | 
 | 915 | 	 to the slotinfo list.  And the new module must be in | 
 | 916 | 	 the first slot.  */ | 
 | 917 |       assert (idx == 0); | 
 | 918 |  | 
 | 919 |       listp = prevp->next = (struct dtv_slotinfo_list *) | 
 | 920 | 	malloc (sizeof (struct dtv_slotinfo_list) | 
 | 921 | 		+ TLS_SLOTINFO_SURPLUS * sizeof (struct dtv_slotinfo)); | 
 | 922 |       if (listp == NULL) | 
 | 923 | 	{ | 
 | 924 | 	  /* We ran out of memory.  We will simply fail this | 
 | 925 | 	     call but don't undo anything we did so far.  The | 
 | 926 | 	     application will crash or be terminated anyway very | 
 | 927 | 	     soon.  */ | 
 | 928 |  | 
 | 929 | 	  /* We have to do this since some entries in the dtv | 
 | 930 | 	     slotinfo array might already point to this | 
 | 931 | 	     generation.  */ | 
 | 932 | 	  ++GL(dl_tls_generation); | 
 | 933 |  | 
 | 934 | 	  _dl_signal_error (ENOMEM, "dlopen", NULL, N_("\ | 
 | 935 | cannot create TLS data structures")); | 
 | 936 | 	} | 
 | 937 |  | 
 | 938 |       listp->len = TLS_SLOTINFO_SURPLUS; | 
 | 939 |       listp->next = NULL; | 
 | 940 |       memset (listp->slotinfo, '\0', | 
 | 941 | 	      TLS_SLOTINFO_SURPLUS * sizeof (struct dtv_slotinfo)); | 
 | 942 |     } | 
 | 943 |  | 
 | 944 |   /* Add the information into the slotinfo data structure.  */ | 
 | 945 |   listp->slotinfo[idx].map = l; | 
 | 946 |   listp->slotinfo[idx].gen = GL(dl_tls_generation) + 1; | 
 | 947 | } |