blob: 904da8b8de4139fa2899d8a75e4ebd1a65128d12 [file] [log] [blame]
yuezonghe824eb0c2024-06-27 02:32:26 -07001/* Thread-local storage handling in the ELF dynamic linker. Generic version.
2 Copyright (C) 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
9
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
14
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, write to the Free
17 Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
18 02111-1307 USA. */
19
20#if defined SHARED || defined NOT_IN_libc
21# error in buildsystem: This file is for libc.a
22#endif
23#include <libintl.h>
24#include <signal.h>
25#include <stdlib.h>
26#include <sys/param.h>
27#include <tls.h>
28#include <dl-tls.h>
29#include <ldsodefs.h>
30#include <dl-elf.h>
31#include <dl-hash.h>
32
33#include <assert.h>
34#include <link.h>
35#include <string.h>
36#include <unistd.h>
37#include <stdio.h>
38
39#define _dl_malloc malloc
40#define _dl_memset memset
41#define _dl_mempcpy mempcpy
42#define _dl_dprintf fprintf
43#define _dl_debug_file stderr
44#define _dl_exit exit
45
46/* Amount of excess space to allocate in the static TLS area
47 to allow dynamic loading of modules defining IE-model TLS data. */
48# define TLS_STATIC_SURPLUS 64 + DL_NNS * 100
49
50/* Value used for dtv entries for which the allocation is delayed. */
51# define TLS_DTV_UNALLOCATED ((void *) -1l)
52
53
54/* Out-of-memory handler. */
55# ifdef SHARED
56static void
57__attribute__ ((__noreturn__))
58oom (void)
59{
60 do {
61 _dl_dprintf (_dl_debug_file,
62 "cannot allocate thread-local memory: ABORT\n");
63 _dl_exit (127);
64 } while (1);
65}
66# endif
67
68
69void *_dl_memalign(size_t alignment, size_t bytes);
70void *_dl_memalign(size_t alignment, size_t bytes)
71{
72 return _dl_malloc(bytes);
73}
74
75
76/*
77 * We are trying to perform a static TLS relocation in MAP, but it was
78 * dynamically loaded. This can only work if there is enough surplus in
79 * the static TLS area already allocated for each running thread. If this
80 * object's TLS segment is too big to fit, we fail. If it fits,
81 * we set MAP->l_tls_offset and return.
82 * This function intentionally does not return any value but signals error
83 * directly, as static TLS should be rare and code handling it should
84 * not be inlined as much as possible.
85 */
86
87
88void
89internal_function __attribute_noinline__
90_dl_allocate_static_tls (struct link_map *map)
91{
92 /* If the alignment requirements are too high fail. */
93 if (map->l_tls_align > _dl_tls_static_align)
94 {
95fail:
96 _dl_dprintf(_dl_debug_file, "cannot allocate memory in static TLS block");
97 _dl_exit(30);
98 }
99
100# if defined(TLS_TCB_AT_TP)
101 size_t freebytes;
102 size_t n;
103 size_t blsize;
104
105 freebytes = _dl_tls_static_size - _dl_tls_static_used - TLS_TCB_SIZE;
106
107 blsize = map->l_tls_blocksize + map->l_tls_firstbyte_offset;
108 if (freebytes < blsize)
109 goto fail;
110
111 n = (freebytes - blsize) / map->l_tls_align;
112
113 size_t offset = _dl_tls_static_used + (freebytes - n * map->l_tls_align
114 - map->l_tls_firstbyte_offset);
115
116 map->l_tls_offset = _dl_tls_static_used = offset;
117# elif defined(TLS_DTV_AT_TP)
118 size_t used;
119 size_t check;
120
121 size_t offset = roundup (_dl_tls_static_used, map->l_tls_align);
122 used = offset + map->l_tls_blocksize;
123 check = used;
124
125 /* dl_tls_static_used includes the TCB at the beginning. */
126 if (check > _dl_tls_static_size)
127 goto fail;
128
129 map->l_tls_offset = offset;
130 _dl_tls_static_used = used;
131# else
132# error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
133# endif
134
135 /*
136 * If the object is not yet relocated we cannot initialize the
137 * static TLS region. Delay it.
138 */
139 if (((struct elf_resolve *) map)->init_flag & RELOCS_DONE)
140 {
141#ifdef SHARED
142 /*
143 * Update the slot information data for at least the generation of
144 * the DSO we are allocating data for.
145 */
146 if (__builtin_expect (THREAD_DTV()[0].counter != _dl_tls_generation, 0))
147 (void) _dl_update_slotinfo (map->l_tls_modid);
148#endif
149 _dl_init_static_tls (map);
150 }
151 else
152 map->l_need_tls_init = 1;
153}
154
155size_t
156internal_function
157_dl_next_tls_modid (void)
158{
159 size_t result;
160
161 if (__builtin_expect (GL(dl_tls_dtv_gaps), false))
162 {
163 size_t disp = 0;
164 struct dtv_slotinfo_list *runp = GL(dl_tls_dtv_slotinfo_list);
165
166 /* Note that this branch will never be executed during program
167 start since there are no gaps at that time. Therefore it
168 does not matter that the dl_tls_dtv_slotinfo is not allocated
169 yet when the function is called for the first times.
170
171 NB: the offset +1 is due to the fact that DTV[0] is used
172 for something else. */
173 result = GL(dl_tls_static_nelem) + 1;
174 if (result <= GL(dl_tls_max_dtv_idx))
175 do
176 {
177 while (result - disp < runp->len)
178 {
179 if (runp->slotinfo[result - disp].map == NULL)
180 break;
181
182 ++result;
183 assert (result <= GL(dl_tls_max_dtv_idx) + 1);
184 }
185
186 if (result - disp < runp->len)
187 break;
188
189 disp += runp->len;
190 }
191 while ((runp = runp->next) != NULL);
192
193 if (result > GL(dl_tls_max_dtv_idx))
194 {
195 /* The new index must indeed be exactly one higher than the
196 previous high. */
197 assert (result == GL(dl_tls_max_dtv_idx) + 1);
198 /* There is no gap anymore. */
199 GL(dl_tls_dtv_gaps) = false;
200
201 goto nogaps;
202 }
203 }
204 else
205 {
206 /* No gaps, allocate a new entry. */
207 nogaps:
208
209 result = ++GL(dl_tls_max_dtv_idx);
210 }
211
212 return result;
213}
214
215
216# ifdef SHARED
217void
218internal_function
219_dl_determine_tlsoffset (void)
220{
221 size_t max_align = TLS_TCB_ALIGN;
222 size_t freetop = 0;
223 size_t freebottom = 0;
224
225 /* The first element of the dtv slot info list is allocated. */
226 assert (GL(dl_tls_dtv_slotinfo_list) != NULL);
227 /* There is at this point only one element in the
228 dl_tls_dtv_slotinfo_list list. */
229 assert (GL(dl_tls_dtv_slotinfo_list)->next == NULL);
230
231 struct dtv_slotinfo *slotinfo = GL(dl_tls_dtv_slotinfo_list)->slotinfo;
232
233 /* Determining the offset of the various parts of the static TLS
234 block has several dependencies. In addition we have to work
235 around bugs in some toolchains.
236
237 Each TLS block from the objects available at link time has a size
238 and an alignment requirement. The GNU ld computes the alignment
239 requirements for the data at the positions *in the file*, though.
240 I.e, it is not simply possible to allocate a block with the size
241 of the TLS program header entry. The data is layed out assuming
242 that the first byte of the TLS block fulfills
243
244 p_vaddr mod p_align == &TLS_BLOCK mod p_align
245
246 This means we have to add artificial padding at the beginning of
247 the TLS block. These bytes are never used for the TLS data in
248 this module but the first byte allocated must be aligned
249 according to mod p_align == 0 so that the first byte of the TLS
250 block is aligned according to p_vaddr mod p_align. This is ugly
251 and the linker can help by computing the offsets in the TLS block
252 assuming the first byte of the TLS block is aligned according to
253 p_align.
254
255 The extra space which might be allocated before the first byte of
256 the TLS block need not go unused. The code below tries to use
257 that memory for the next TLS block. This can work if the total
258 memory requirement for the next TLS block is smaller than the
259 gap. */
260
261# if defined(TLS_TCB_AT_TP)
262 /* We simply start with zero. */
263 size_t offset = 0;
264
265 size_t cnt;
266 for (cnt = 0; slotinfo[cnt].map != NULL; ++cnt)
267 {
268 assert (cnt < GL(dl_tls_dtv_slotinfo_list)->len);
269
270 size_t firstbyte = (-slotinfo[cnt].map->l_tls_firstbyte_offset
271 & (slotinfo[cnt].map->l_tls_align - 1));
272 size_t off;
273 max_align = MAX (max_align, slotinfo[cnt].map->l_tls_align);
274
275 if (freebottom - freetop >= slotinfo[cnt].map->l_tls_blocksize)
276 {
277 off = roundup (freetop + slotinfo[cnt].map->l_tls_blocksize
278 - firstbyte, slotinfo[cnt].map->l_tls_align)
279 + firstbyte;
280 if (off <= freebottom)
281 {
282 freetop = off;
283
284 /* XXX For some architectures we perhaps should store the
285 negative offset. */
286 slotinfo[cnt].map->l_tls_offset = off;
287 continue;
288 }
289 }
290
291 off = roundup (offset + slotinfo[cnt].map->l_tls_blocksize - firstbyte,
292 slotinfo[cnt].map->l_tls_align) + firstbyte;
293 if (off > offset + slotinfo[cnt].map->l_tls_blocksize
294 + (freebottom - freetop))
295 {
296 freetop = offset;
297 freebottom = off - slotinfo[cnt].map->l_tls_blocksize;
298 }
299 offset = off;
300
301 /* XXX For some architectures we perhaps should store the
302 negative offset. */
303 slotinfo[cnt].map->l_tls_offset = off;
304 }
305
306 GL(dl_tls_static_used) = offset;
307 GL(dl_tls_static_size) = (roundup (offset + TLS_STATIC_SURPLUS, max_align)
308 + TLS_TCB_SIZE);
309# elif defined(TLS_DTV_AT_TP)
310 /* The TLS blocks start right after the TCB. */
311 size_t offset = TLS_TCB_SIZE;
312 size_t cnt;
313
314 for (cnt = 0; slotinfo[cnt].map != NULL; ++cnt)
315 {
316 assert (cnt < GL(dl_tls_dtv_slotinfo_list)->len);
317
318 size_t firstbyte = (-slotinfo[cnt].map->l_tls_firstbyte_offset
319 & (slotinfo[cnt].map->l_tls_align - 1));
320 size_t off;
321 max_align = MAX (max_align, slotinfo[cnt].map->l_tls_align);
322
323 if (slotinfo[cnt].map->l_tls_blocksize <= freetop - freebottom)
324 {
325 off = roundup (freebottom, slotinfo[cnt].map->l_tls_align);
326 if (off - freebottom < firstbyte)
327 off += slotinfo[cnt].map->l_tls_align;
328 if (off + slotinfo[cnt].map->l_tls_blocksize - firstbyte <= freetop)
329 {
330 slotinfo[cnt].map->l_tls_offset = off - firstbyte;
331 freebottom = (off + slotinfo[cnt].map->l_tls_blocksize
332 - firstbyte);
333 continue;
334 }
335 }
336
337 off = roundup (offset, slotinfo[cnt].map->l_tls_align);
338 if (off - offset < firstbyte)
339 off += slotinfo[cnt].map->l_tls_align;
340
341 slotinfo[cnt].map->l_tls_offset = off - firstbyte;
342 if (off - firstbyte - offset > freetop - freebottom)
343 {
344 freebottom = offset;
345 freetop = off - firstbyte;
346 }
347
348 offset = off + slotinfo[cnt].map->l_tls_blocksize - firstbyte;
349 }
350
351 GL(dl_tls_static_used) = offset;
352 GL(dl_tls_static_size) = roundup (offset + TLS_STATIC_SURPLUS,
353 TLS_TCB_ALIGN);
354# else
355# error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
356# endif
357
358 /* The alignment requirement for the static TLS block. */
359 GL(dl_tls_static_align) = max_align;
360}
361
362
363/* This is called only when the data structure setup was skipped at startup,
364 when there was no need for it then. Now we have dynamically loaded
365 something needing TLS, or libpthread needs it. */
366int
367internal_function
368_dl_tls_setup (void)
369{
370 assert (GL(dl_tls_dtv_slotinfo_list) == NULL);
371 assert (GL(dl_tls_max_dtv_idx) == 0);
372
373 const size_t nelem = 2 + TLS_SLOTINFO_SURPLUS;
374
375 GL(dl_tls_dtv_slotinfo_list)
376 = calloc (1, (sizeof (struct dtv_slotinfo_list)
377 + nelem * sizeof (struct dtv_slotinfo)));
378 if (GL(dl_tls_dtv_slotinfo_list) == NULL)
379 return -1;
380
381 GL(dl_tls_dtv_slotinfo_list)->len = nelem;
382
383 /* Number of elements in the static TLS block. It can't be zero
384 because of various assumptions. The one element is null. */
385 GL(dl_tls_static_nelem) = GL(dl_tls_max_dtv_idx) = 1;
386
387 /* This initializes more variables for us. */
388 _dl_determine_tlsoffset ();
389
390 return 0;
391}
392# endif
393
394static void *
395internal_function
396allocate_dtv (void *result)
397{
398 dtv_t *dtv;
399 size_t dtv_length;
400
401 /* We allocate a few more elements in the dtv than are needed for the
402 initial set of modules. This should avoid in most cases expansions
403 of the dtv. */
404 dtv_length = GL(dl_tls_max_dtv_idx) + DTV_SURPLUS;
405 dtv = calloc (dtv_length + 2, sizeof (dtv_t));
406 if (dtv != NULL)
407 {
408 /* This is the initial length of the dtv. */
409 dtv[0].counter = dtv_length;
410
411 /* The rest of the dtv (including the generation counter) is
412 Initialize with zero to indicate nothing there. */
413
414 /* Add the dtv to the thread data structures. */
415 INSTALL_DTV (result, dtv);
416 }
417 else
418 result = NULL;
419
420 return result;
421}
422
423
424/* Get size and alignment requirements of the static TLS block. */
425void
426internal_function
427_dl_get_tls_static_info (size_t *sizep, size_t *alignp)
428{
429 *sizep = GL(dl_tls_static_size);
430 *alignp = GL(dl_tls_static_align);
431}
432
433
434void *
435internal_function
436_dl_allocate_tls_storage (void)
437{
438 void *result;
439 size_t size = GL(dl_tls_static_size);
440
441# if defined(TLS_DTV_AT_TP)
442 /* Memory layout is:
443 [ TLS_PRE_TCB_SIZE ] [ TLS_TCB_SIZE ] [ TLS blocks ]
444 ^ This should be returned. */
445 size += (TLS_PRE_TCB_SIZE + GL(dl_tls_static_align) - 1)
446 & ~(GL(dl_tls_static_align) - 1);
447# endif
448
449 /* Allocate a correctly aligned chunk of memory. */
450 result = _dl_memalign (GL(dl_tls_static_align), size);
451 if (__builtin_expect (result != NULL, 1))
452 {
453 /* Allocate the DTV. */
454 void *allocated = result;
455
456# if defined(TLS_TCB_AT_TP)
457 /* The TCB follows the TLS blocks. */
458 result = (char *) result + size - TLS_TCB_SIZE;
459
460 /* Clear the TCB data structure. We can't ask the caller (i.e.
461 libpthread) to do it, because we will initialize the DTV et al. */
462 _dl_memset (result, '\0', TLS_TCB_SIZE);
463# elif defined(TLS_DTV_AT_TP)
464 result = (char *) result + size - GL(dl_tls_static_size);
465
466 /* Clear the TCB data structure and TLS_PRE_TCB_SIZE bytes before it.
467 We can't ask the caller (i.e. libpthread) to do it, because we will
468 initialize the DTV et al. */
469 _dl_memset ((char *) result - TLS_PRE_TCB_SIZE, '\0',
470 TLS_PRE_TCB_SIZE + TLS_TCB_SIZE);
471# endif
472
473 result = allocate_dtv (result);
474 if (result == NULL)
475 free (allocated);
476 }
477
478 return result;
479}
480
481
482void *
483internal_function
484_dl_allocate_tls_init (void *result)
485{
486 if (result == NULL)
487 /* The memory allocation failed. */
488 return NULL;
489
490 dtv_t *dtv = GET_DTV (result);
491 struct dtv_slotinfo_list *listp;
492 size_t total = 0;
493 size_t maxgen = 0;
494
495 /* We have to prepare the dtv for all currently loaded modules using
496 TLS. For those which are dynamically loaded we add the values
497 indicating deferred allocation. */
498 listp = GL(dl_tls_dtv_slotinfo_list);
499 while (1)
500 {
501 size_t cnt;
502
503 for (cnt = total == 0 ? 1 : 0; cnt < listp->len; ++cnt)
504 {
505 struct link_map *map;
506 void *dest;
507
508 /* Check for the total number of used slots. */
509 if (total + cnt > GL(dl_tls_max_dtv_idx))
510 break;
511
512 map = listp->slotinfo[cnt].map;
513 if (map == NULL)
514 /* Unused entry. */
515 continue;
516
517 /* Keep track of the maximum generation number. This might
518 not be the generation counter. */
519 maxgen = MAX (maxgen, listp->slotinfo[cnt].gen);
520
521 if (map->l_tls_offset == NO_TLS_OFFSET)
522 {
523 /* For dynamically loaded modules we simply store
524 the value indicating deferred allocation. */
525 dtv[map->l_tls_modid].pointer.val = TLS_DTV_UNALLOCATED;
526 dtv[map->l_tls_modid].pointer.is_static = false;
527 continue;
528 }
529
530 assert (map->l_tls_modid == cnt);
531 assert (map->l_tls_blocksize >= map->l_tls_initimage_size);
532# if defined(TLS_TCB_AT_TP)
533 assert ((size_t) map->l_tls_offset >= map->l_tls_blocksize);
534 dest = (char *) result - map->l_tls_offset;
535# elif defined(TLS_DTV_AT_TP)
536 dest = (char *) result + map->l_tls_offset;
537# else
538# error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
539# endif
540
541 /* Copy the initialization image and clear the BSS part. */
542 dtv[map->l_tls_modid].pointer.val = dest;
543 dtv[map->l_tls_modid].pointer.is_static = true;
544 _dl_memset (_dl_mempcpy (dest, map->l_tls_initimage,
545 map->l_tls_initimage_size), '\0',
546 map->l_tls_blocksize - map->l_tls_initimage_size);
547 }
548
549 total += cnt;
550 if (total >= GL(dl_tls_max_dtv_idx))
551 break;
552
553 listp = listp->next;
554 assert (listp != NULL);
555 }
556
557 /* The DTV version is up-to-date now. */
558 dtv[0].counter = maxgen;
559
560 return result;
561}
562
563void *
564internal_function
565_dl_allocate_tls (void *mem)
566{
567 return _dl_allocate_tls_init (mem == NULL
568 ? _dl_allocate_tls_storage ()
569 : allocate_dtv (mem));
570}
571
572
573void
574internal_function
575_dl_deallocate_tls (void *tcb, bool dealloc_tcb)
576{
577 dtv_t *dtv = GET_DTV (tcb);
578 size_t cnt;
579
580 /* We need to free the memory allocated for non-static TLS. */
581 for (cnt = 0; cnt < dtv[-1].counter; ++cnt)
582 if (! dtv[1 + cnt].pointer.is_static
583 && dtv[1 + cnt].pointer.val != TLS_DTV_UNALLOCATED)
584 free (dtv[1 + cnt].pointer.val);
585
586 /* The array starts with dtv[-1]. */
587#ifdef SHARED
588 if (dtv != GL(dl_initial_dtv))
589#endif
590 free (dtv - 1);
591
592 if (dealloc_tcb)
593 {
594# if defined(TLS_TCB_AT_TP)
595 /* The TCB follows the TLS blocks. Back up to free the whole block. */
596 tcb -= GL(dl_tls_static_size) - TLS_TCB_SIZE;
597# elif defined(TLS_DTV_AT_TP)
598 /* Back up the TLS_PRE_TCB_SIZE bytes. */
599 tcb -= (TLS_PRE_TCB_SIZE + GL(dl_tls_static_align) - 1)
600 & ~(GL(dl_tls_static_align) - 1);
601# endif
602 free (tcb);
603 }
604}
605
606
607# ifdef SHARED
608/* The __tls_get_addr function has two basic forms which differ in the
609 arguments. The IA-64 form takes two parameters, the module ID and
610 offset. The form used, among others, on IA-32 takes a reference to
611 a special structure which contain the same information. The second
612 form seems to be more often used (in the moment) so we default to
613 it. Users of the IA-64 form have to provide adequate definitions
614 of the following macros. */
615# ifndef GET_ADDR_ARGS
616# define GET_ADDR_ARGS tls_index *ti
617# endif
618# ifndef GET_ADDR_MODULE
619# define GET_ADDR_MODULE ti->ti_module
620# endif
621# ifndef GET_ADDR_OFFSET
622# define GET_ADDR_OFFSET ti->ti_offset
623# endif
624
625
626static void *
627allocate_and_init (struct link_map *map)
628{
629 void *newp;
630
631 newp = _dl_memalign (map->l_tls_align, map->l_tls_blocksize);
632 if (newp == NULL)
633 oom ();
634
635 /* Initialize the memory. */
636 _dl_memset (_dl_mempcpy (newp, map->l_tls_initimage, map->l_tls_initimage_size),
637 '\0', map->l_tls_blocksize - map->l_tls_initimage_size);
638
639 return newp;
640}
641
642
643struct link_map *
644_dl_update_slotinfo (unsigned long int req_modid)
645{
646 struct link_map *the_map = NULL;
647 dtv_t *dtv = THREAD_DTV ();
648
649 /* The global dl_tls_dtv_slotinfo array contains for each module
650 index the generation counter current when the entry was created.
651 This array never shrinks so that all module indices which were
652 valid at some time can be used to access it. Before the first
653 use of a new module index in this function the array was extended
654 appropriately. Access also does not have to be guarded against
655 modifications of the array. It is assumed that pointer-size
656 values can be read atomically even in SMP environments. It is
657 possible that other threads at the same time dynamically load
658 code and therefore add to the slotinfo list. This is a problem
659 since we must not pick up any information about incomplete work.
660 The solution to this is to ignore all dtv slots which were
661 created after the one we are currently interested. We know that
662 dynamic loading for this module is completed and this is the last
663 load operation we know finished. */
664 unsigned long int idx = req_modid;
665 struct dtv_slotinfo_list *listp = GL(dl_tls_dtv_slotinfo_list);
666
667 while (idx >= listp->len)
668 {
669 idx -= listp->len;
670 listp = listp->next;
671 }
672
673 if (dtv[0].counter < listp->slotinfo[idx].gen)
674 {
675 /* The generation counter for the slot is higher than what the
676 current dtv implements. We have to update the whole dtv but
677 only those entries with a generation counter <= the one for
678 the entry we need. */
679 size_t new_gen = listp->slotinfo[idx].gen;
680 size_t total = 0;
681
682 /* We have to look through the entire dtv slotinfo list. */
683 listp = GL(dl_tls_dtv_slotinfo_list);
684 do
685 {
686 size_t cnt;
687
688 for (cnt = total == 0 ? 1 : 0; cnt < listp->len; ++cnt)
689 {
690 size_t gen = listp->slotinfo[cnt].gen;
691
692 if (gen > new_gen)
693 /* This is a slot for a generation younger than the
694 one we are handling now. It might be incompletely
695 set up so ignore it. */
696 continue;
697
698 /* If the entry is older than the current dtv layout we
699 know we don't have to handle it. */
700 if (gen <= dtv[0].counter)
701 continue;
702
703 /* If there is no map this means the entry is empty. */
704 struct link_map *map = listp->slotinfo[cnt].map;
705 if (map == NULL)
706 {
707 /* If this modid was used at some point the memory
708 might still be allocated. */
709 if (! dtv[total + cnt].pointer.is_static
710 && dtv[total + cnt].pointer.val != TLS_DTV_UNALLOCATED)
711 {
712 free (dtv[total + cnt].pointer.val);
713 dtv[total + cnt].pointer.val = TLS_DTV_UNALLOCATED;
714 }
715
716 continue;
717 }
718
719 /* Check whether the current dtv array is large enough. */
720 size_t modid = map->l_tls_modid;
721 assert (total + cnt == modid);
722 if (dtv[-1].counter < modid)
723 {
724 /* Reallocate the dtv. */
725 dtv_t *newp;
726 size_t newsize = GL(dl_tls_max_dtv_idx) + DTV_SURPLUS;
727 size_t oldsize = dtv[-1].counter;
728
729 assert (map->l_tls_modid <= newsize);
730
731 if (dtv == GL(dl_initial_dtv))
732 {
733 /* This is the initial dtv that was allocated
734 during rtld startup using the dl-minimal.c
735 malloc instead of the real malloc. We can't
736 free it, we have to abandon the old storage. */
737
738 newp = malloc ((2 + newsize) * sizeof (dtv_t));
739 if (newp == NULL)
740 oom ();
741 _dl_memcpy (newp, &dtv[-1], oldsize * sizeof (dtv_t));
742 }
743 else
744 {
745 newp = realloc (&dtv[-1],
746 (2 + newsize) * sizeof (dtv_t));
747 if (newp == NULL)
748 oom ();
749 }
750
751 newp[0].counter = newsize;
752
753 /* Clear the newly allocated part. */
754 _dl_memset (newp + 2 + oldsize, '\0',
755 (newsize - oldsize) * sizeof (dtv_t));
756
757 /* Point dtv to the generation counter. */
758 dtv = &newp[1];
759
760 /* Install this new dtv in the thread data
761 structures. */
762 INSTALL_NEW_DTV (dtv);
763 }
764
765 /* If there is currently memory allocate for this
766 dtv entry free it. */
767 /* XXX Ideally we will at some point create a memory
768 pool. */
769 if (! dtv[modid].pointer.is_static
770 && dtv[modid].pointer.val != TLS_DTV_UNALLOCATED)
771 /* Note that free is called for NULL is well. We
772 deallocate even if it is this dtv entry we are
773 supposed to load. The reason is that we call
774 memalign and not malloc. */
775 free (dtv[modid].pointer.val);
776
777 /* This module is loaded dynamically- We defer memory
778 allocation. */
779 dtv[modid].pointer.is_static = false;
780 dtv[modid].pointer.val = TLS_DTV_UNALLOCATED;
781
782 if (modid == req_modid)
783 the_map = map;
784 }
785
786 total += listp->len;
787 }
788 while ((listp = listp->next) != NULL);
789
790 /* This will be the new maximum generation counter. */
791 dtv[0].counter = new_gen;
792 }
793
794 return the_map;
795}
796
797
798/* The generic dynamic and local dynamic model cannot be used in
799 statically linked applications. */
800void *
801__tls_get_addr (GET_ADDR_ARGS)
802{
803 dtv_t *dtv = THREAD_DTV ();
804 struct link_map *the_map = NULL;
805 void *p;
806
807 if (__builtin_expect (dtv[0].counter != GL(dl_tls_generation), 0))
808 the_map = _dl_update_slotinfo (GET_ADDR_MODULE);
809
810 p = dtv[GET_ADDR_MODULE].pointer.val;
811
812 if (__builtin_expect (p == TLS_DTV_UNALLOCATED, 0))
813 {
814 /* The allocation was deferred. Do it now. */
815 if (the_map == NULL)
816 {
817 /* Find the link map for this module. */
818 size_t idx = GET_ADDR_MODULE;
819 struct dtv_slotinfo_list *listp = GL(dl_tls_dtv_slotinfo_list);
820
821 while (idx >= listp->len)
822 {
823 idx -= listp->len;
824 listp = listp->next;
825 }
826
827 the_map = listp->slotinfo[idx].map;
828 }
829
830 p = dtv[GET_ADDR_MODULE].pointer.val = allocate_and_init (the_map);
831 dtv[GET_ADDR_MODULE].pointer.is_static = false;
832 }
833
834 return (char *) p + GET_ADDR_OFFSET;
835}
836# endif
837
838
839
840void _dl_add_to_slotinfo (struct link_map *l);
841void
842_dl_add_to_slotinfo (struct link_map *l)
843{
844 /* Now that we know the object is loaded successfully add
845 modules containing TLS data to the dtv info table. We
846 might have to increase its size. */
847 struct dtv_slotinfo_list *listp;
848 struct dtv_slotinfo_list *prevp;
849 size_t idx = l->l_tls_modid;
850
851 /* Find the place in the dtv slotinfo list. */
852 listp = GL(dl_tls_dtv_slotinfo_list);
853 prevp = NULL; /* Needed to shut up gcc. */
854 do
855 {
856 /* Does it fit in the array of this list element? */
857 if (idx < listp->len)
858 break;
859 idx -= listp->len;
860 prevp = listp;
861 listp = listp->next;
862 }
863 while (listp != NULL);
864
865 if (listp == NULL)
866 {
867 /* When we come here it means we have to add a new element
868 to the slotinfo list. And the new module must be in
869 the first slot. */
870 assert (idx == 0);
871
872 listp = prevp->next = (struct dtv_slotinfo_list *)
873 malloc (sizeof (struct dtv_slotinfo_list)
874 + TLS_SLOTINFO_SURPLUS * sizeof (struct dtv_slotinfo));
875 if (listp == NULL)
876 {
877 /* We ran out of memory. We will simply fail this
878 call but don't undo anything we did so far. The
879 application will crash or be terminated anyway very
880 soon. */
881
882 /* We have to do this since some entries in the dtv
883 slotinfo array might already point to this
884 generation. */
885 ++GL(dl_tls_generation);
886
887 _dl_dprintf (_dl_debug_file,
888 "cannot create TLS data structures: ABORT\n");
889 _dl_exit (127);
890 }
891
892 listp->len = TLS_SLOTINFO_SURPLUS;
893 listp->next = NULL;
894 _dl_memset (listp->slotinfo, '\0',
895 TLS_SLOTINFO_SURPLUS * sizeof (struct dtv_slotinfo));
896 }
897
898 /* Add the information into the slotinfo data structure. */
899 listp->slotinfo[idx].map = l;
900 listp->slotinfo[idx].gen = GL(dl_tls_generation) + 1;
901}