blob: 66efe3a25bda090ce3a1b7b83ff7803f81b1abb5 [file] [log] [blame]
lh9ed821d2023-04-07 01:36:19 -07001
2/* Linuxthreads - a simple clone()-based implementation of Posix */
3/* threads for Linux. */
4/* Copyright (C) 1996 Xavier Leroy (Xavier.Leroy@inria.fr) */
5/* */
6/* This program is free software; you can redistribute it and/or */
7/* modify it under the terms of the GNU Library General Public License */
8/* as published by the Free Software Foundation; either version 2 */
9/* of the License, or (at your option) any later version. */
10/* */
11/* This program is distributed in the hope that it will be useful, */
12/* but WITHOUT ANY WARRANTY; without even the implied warranty of */
13/* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
14/* GNU Library General Public License for more details. */
15
16/* Thread creation, initialization, and basic low-level routines */
17
18#include <errno.h>
19#include <stddef.h>
20#include <stdio.h>
21#include <stdlib.h>
22#include <string.h>
23#include <unistd.h>
24#include <fcntl.h>
25#include <sys/wait.h>
26#include <sys/resource.h>
27#include <sys/time.h>
28#include "pthread.h"
29#include "internals.h"
30#include "spinlock.h"
31#include "restart.h"
32#include "smp.h"
33#include <not-cancel.h>
34
35/* Sanity check. */
36#if !defined __SIGRTMIN || (__SIGRTMAX - __SIGRTMIN) < 3
37# error "This must not happen"
38#endif
39
40/* mods for uClibc: __libc_sigaction is not in any standard headers */
41extern __typeof(sigaction) __libc_sigaction;
42
43#if !(USE_TLS && HAVE___THREAD)
44/* These variables are used by the setup code. */
45extern int _errno;
46extern int _h_errno;
47
48# if defined __UCLIBC_HAS_RESOLVER_SUPPORT__
49/* We need the global/static resolver state here. */
50# include <resolv.h>
51# undef _res
52extern struct __res_state *__resp;
53# endif
54#endif
55
56#ifdef USE_TLS
57
58/* We need only a few variables. */
59#define manager_thread __pthread_manager_threadp
60pthread_descr __pthread_manager_threadp attribute_hidden;
61
62#else
63
64/* Descriptor of the initial thread */
65
66struct _pthread_descr_struct __pthread_initial_thread = {
67 .p_header.data.self = &__pthread_initial_thread,
68 .p_nextlive = &__pthread_initial_thread,
69 .p_prevlive = &__pthread_initial_thread,
70 .p_tid = PTHREAD_THREADS_MAX,
71 .p_lock = &__pthread_handles[0].h_lock,
72 .p_start_args = PTHREAD_START_ARGS_INITIALIZER(NULL),
73#if !(USE_TLS && HAVE___THREAD)
74 .p_errnop = &_errno,
75 .p_h_errnop = &_h_errno,
76#endif
77 .p_userstack = 1,
78 .p_resume_count = __ATOMIC_INITIALIZER,
79 .p_alloca_cutoff = __MAX_ALLOCA_CUTOFF
80};
81
82/* Descriptor of the manager thread; none of this is used but the error
83 variables, the p_pid and p_priority fields,
84 and the address for identification. */
85
86#define manager_thread (&__pthread_manager_thread)
87struct _pthread_descr_struct __pthread_manager_thread = {
88 .p_header.data.self = &__pthread_manager_thread,
89 .p_header.data.multiple_threads = 1,
90 .p_lock = &__pthread_handles[1].h_lock,
91 .p_start_args = PTHREAD_START_ARGS_INITIALIZER(__pthread_manager),
92#if !(USE_TLS && HAVE___THREAD)
93 .p_errnop = &__pthread_manager_thread.p_errno,
94#endif
95 .p_nr = 1,
96 .p_resume_count = __ATOMIC_INITIALIZER,
97 .p_alloca_cutoff = PTHREAD_STACK_MIN / 4
98};
99#endif
100
101/* Pointer to the main thread (the father of the thread manager thread) */
102/* Originally, this is the initial thread, but this changes after fork() */
103
104#ifdef USE_TLS
105pthread_descr __pthread_main_thread;
106#else
107pthread_descr __pthread_main_thread = &__pthread_initial_thread;
108#endif
109
110/* Limit between the stack of the initial thread (above) and the
111 stacks of other threads (below). Aligned on a STACK_SIZE boundary. */
112
113char *__pthread_initial_thread_bos;
114
115/* File descriptor for sending requests to the thread manager. */
116/* Initially -1, meaning that the thread manager is not running. */
117
118int __pthread_manager_request = -1;
119
120int __pthread_multiple_threads attribute_hidden;
121
122/* Other end of the pipe for sending requests to the thread manager. */
123
124int __pthread_manager_reader;
125
126/* Limits of the thread manager stack */
127
128char *__pthread_manager_thread_bos;
129char *__pthread_manager_thread_tos;
130
131/* For process-wide exit() */
132
133int __pthread_exit_requested;
134int __pthread_exit_code;
135
136/* Maximum stack size. */
137size_t __pthread_max_stacksize;
138
139/* Nozero if the machine has more than one processor. */
140int __pthread_smp_kernel;
141
142
143#if !__ASSUME_REALTIME_SIGNALS
144/* Pointers that select new or old suspend/resume functions
145 based on availability of rt signals. */
146
147void (*__pthread_restart)(pthread_descr) = __pthread_restart_old;
148void (*__pthread_suspend)(pthread_descr) = __pthread_suspend_old;
149int (*__pthread_timedsuspend)(pthread_descr, const struct timespec *) = __pthread_timedsuspend_old;
150#endif /* __ASSUME_REALTIME_SIGNALS */
151
152/* Communicate relevant LinuxThreads constants to gdb */
153
154const int __pthread_threads_max = PTHREAD_THREADS_MAX;
155const int __pthread_sizeof_handle = sizeof(struct pthread_handle_struct);
156const int __pthread_offsetof_descr = offsetof(struct pthread_handle_struct,
157 h_descr);
158const int __pthread_offsetof_pid = offsetof(struct _pthread_descr_struct,
159 p_pid);
160const int __linuxthreads_pthread_sizeof_descr
161 = sizeof(struct _pthread_descr_struct);
162
163const int __linuxthreads_initial_report_events;
164
165const char __linuxthreads_version[] = VERSION;
166
167/* Forward declarations */
168
169static void pthread_onexit_process(int retcode, void *arg);
170#ifndef HAVE_Z_NODELETE
171static void pthread_atexit_process(void *arg, int retcode);
172static void pthread_atexit_retcode(void *arg, int retcode);
173#endif
174static void pthread_handle_sigcancel(int sig);
175static void pthread_handle_sigrestart(int sig);
176static void pthread_handle_sigdebug(int sig);
177
178/* Signal numbers used for the communication.
179 In these variables we keep track of the used variables. If the
180 platform does not support any real-time signals we will define the
181 values to some unreasonable value which will signal failing of all
182 the functions below. */
183int __pthread_sig_restart = __SIGRTMIN;
184int __pthread_sig_cancel = __SIGRTMIN + 1;
185int __pthread_sig_debug = __SIGRTMIN + 2;
186
187extern int __libc_current_sigrtmin_private (void);
188
189#if !__ASSUME_REALTIME_SIGNALS
190static int rtsigs_initialized;
191
192static void
193init_rtsigs (void)
194{
195 if (rtsigs_initialized)
196 return;
197
198 if (__libc_current_sigrtmin_private () == -1)
199 {
200 __pthread_sig_restart = SIGUSR1;
201 __pthread_sig_cancel = SIGUSR2;
202 __pthread_sig_debug = 0;
203 }
204 else
205 {
206 __pthread_restart = __pthread_restart_new;
207 __pthread_suspend = __pthread_wait_for_restart_signal;
208 __pthread_timedsuspend = __pthread_timedsuspend_new;
209 }
210
211 rtsigs_initialized = 1;
212}
213#endif
214
215
216/* Initialize the pthread library.
217 Initialization is split in two functions:
218 - a constructor function that blocks the __pthread_sig_restart signal
219 (must do this very early, since the program could capture the signal
220 mask with e.g. sigsetjmp before creating the first thread);
221 - a regular function called from pthread_create when needed. */
222
223static void pthread_initialize(void) __attribute__((constructor));
224
225#ifndef HAVE_Z_NODELETE
226extern void *__dso_handle __attribute__ ((weak));
227#endif
228
229
230#if defined USE_TLS && !defined SHARED
231extern void __libc_setup_tls (size_t tcbsize, size_t tcbalign);
232#endif
233
234struct pthread_functions __pthread_functions =
235 {
236#if !(USE_TLS && HAVE___THREAD)
237 .ptr_pthread_internal_tsd_set = __pthread_internal_tsd_set,
238 .ptr_pthread_internal_tsd_get = __pthread_internal_tsd_get,
239 .ptr_pthread_internal_tsd_address = __pthread_internal_tsd_address,
240#endif
241 .ptr_pthread_fork = __pthread_fork,
242 .ptr_pthread_attr_destroy = __pthread_attr_destroy,
243 .ptr_pthread_attr_init = __pthread_attr_init,
244 .ptr_pthread_attr_getdetachstate = __pthread_attr_getdetachstate,
245 .ptr_pthread_attr_setdetachstate = __pthread_attr_setdetachstate,
246 .ptr_pthread_attr_getinheritsched = __pthread_attr_getinheritsched,
247 .ptr_pthread_attr_setinheritsched = __pthread_attr_setinheritsched,
248 .ptr_pthread_attr_getschedparam = __pthread_attr_getschedparam,
249 .ptr_pthread_attr_setschedparam = __pthread_attr_setschedparam,
250 .ptr_pthread_attr_getschedpolicy = __pthread_attr_getschedpolicy,
251 .ptr_pthread_attr_setschedpolicy = __pthread_attr_setschedpolicy,
252 .ptr_pthread_attr_getscope = __pthread_attr_getscope,
253 .ptr_pthread_attr_setscope = __pthread_attr_setscope,
254 .ptr_pthread_condattr_destroy = __pthread_condattr_destroy,
255 .ptr_pthread_condattr_init = __pthread_condattr_init,
256 .ptr_pthread_cond_broadcast = __pthread_cond_broadcast,
257 .ptr_pthread_cond_destroy = __pthread_cond_destroy,
258 .ptr_pthread_cond_init = __pthread_cond_init,
259 .ptr_pthread_cond_signal = __pthread_cond_signal,
260 .ptr_pthread_cond_wait = __pthread_cond_wait,
261 .ptr_pthread_cond_timedwait = __pthread_cond_timedwait,
262 .ptr_pthread_equal = __pthread_equal,
263 .ptr___pthread_exit = __pthread_exit,
264 .ptr_pthread_getschedparam = __pthread_getschedparam,
265 .ptr_pthread_setschedparam = __pthread_setschedparam,
266 .ptr_pthread_mutex_destroy = __pthread_mutex_destroy,
267 .ptr_pthread_mutex_init = __pthread_mutex_init,
268 .ptr_pthread_mutex_lock = __pthread_mutex_lock,
269 .ptr_pthread_mutex_trylock = __pthread_mutex_trylock,
270 .ptr_pthread_mutex_unlock = __pthread_mutex_unlock,
271 .ptr_pthread_self = __pthread_self,
272 .ptr_pthread_setcancelstate = __pthread_setcancelstate,
273 .ptr_pthread_setcanceltype = __pthread_setcanceltype,
274 .ptr_pthread_do_exit = __pthread_do_exit,
275 .ptr_pthread_thread_self = __pthread_thread_self,
276 .ptr_pthread_cleanup_upto = __pthread_cleanup_upto,
277 .ptr_pthread_sigaction = __pthread_sigaction,
278 .ptr_pthread_sigwait = __pthread_sigwait,
279 .ptr_pthread_raise = __pthread_raise,
280 .ptr__pthread_cleanup_push = _pthread_cleanup_push,
281 .ptr__pthread_cleanup_push_defer = _pthread_cleanup_push_defer,
282 .ptr__pthread_cleanup_pop = _pthread_cleanup_pop,
283 .ptr__pthread_cleanup_pop_restore = _pthread_cleanup_pop_restore,
284 };
285#ifdef SHARED
286# define ptr_pthread_functions &__pthread_functions
287#else
288# define ptr_pthread_functions NULL
289#endif
290
291static int *__libc_multiple_threads_ptr;
292
293/* Do some minimal initialization which has to be done during the
294 startup of the C library. */
295void
296__pthread_initialize_minimal(void)
297{
298#ifdef USE_TLS
299 pthread_descr self;
300
301 /* First of all init __pthread_handles[0] and [1] if needed. */
302# if __LT_SPINLOCK_INIT != 0
303 __pthread_handles[0].h_lock = __LOCK_INITIALIZER;
304 __pthread_handles[1].h_lock = __LOCK_INITIALIZER;
305# endif
306# ifndef SHARED
307 /* Unlike in the dynamically linked case the dynamic linker has not
308 taken care of initializing the TLS data structures. */
309 __libc_setup_tls (TLS_TCB_SIZE, TLS_TCB_ALIGN);
310# elif !USE___THREAD
311 if (__builtin_expect (GL(dl_tls_dtv_slotinfo_list) == NULL, 0))
312 {
313 tcbhead_t *tcbp;
314
315 /* There is no actual TLS being used, so the thread register
316 was not initialized in the dynamic linker. */
317
318 /* We need to install special hooks so that the malloc and memalign
319 calls in _dl_tls_setup and _dl_allocate_tls won't cause full
320 malloc initialization that will try to set up its thread state. */
321
322 extern void __libc_malloc_pthread_startup (bool first_time);
323 __libc_malloc_pthread_startup (true);
324
325 if (__builtin_expect (_dl_tls_setup (), 0)
326 || __builtin_expect ((tcbp = _dl_allocate_tls (NULL)) == NULL, 0))
327 {
328 static const char msg[] = "\
329cannot allocate TLS data structures for initial thread\n";
330 TEMP_FAILURE_RETRY (write_not_cancel (STDERR_FILENO,
331 msg, sizeof msg - 1));
332 abort ();
333 }
334 const char *lossage = TLS_INIT_TP (tcbp, 0);
335 if (__builtin_expect (lossage != NULL, 0))
336 {
337 static const char msg[] = "cannot set up thread-local storage: ";
338 const char nl = '\n';
339 TEMP_FAILURE_RETRY (write_not_cancel (STDERR_FILENO,
340 msg, sizeof msg - 1));
341 TEMP_FAILURE_RETRY (write_not_cancel (STDERR_FILENO,
342 lossage, strlen (lossage)));
343 TEMP_FAILURE_RETRY (write_not_cancel (STDERR_FILENO, &nl, 1));
344 }
345
346 /* Though it was allocated with libc's malloc, that was done without
347 the user's __malloc_hook installed. A later realloc that uses
348 the hooks might not work with that block from the plain malloc.
349 So we record this block as unfreeable just as the dynamic linker
350 does when it allocates the DTV before the libc malloc exists. */
351 GL(dl_initial_dtv) = GET_DTV (tcbp);
352
353 __libc_malloc_pthread_startup (false);
354 }
355# endif
356
357 self = THREAD_SELF;
358
359 /* The memory for the thread descriptor was allocated elsewhere as
360 part of the TLS allocation. We have to initialize the data
361 structure by hand. This initialization must mirror the struct
362 definition above. */
363 self->p_nextlive = self->p_prevlive = self;
364 self->p_tid = PTHREAD_THREADS_MAX;
365 self->p_lock = &__pthread_handles[0].h_lock;
366# ifndef HAVE___THREAD
367 self->p_errnop = &_errno;
368 self->p_h_errnop = &_h_errno;
369# endif
370 /* self->p_start_args need not be initialized, it's all zero. */
371 self->p_userstack = 1;
372# if __LT_SPINLOCK_INIT != 0
373 self->p_resume_count = (struct pthread_atomic) __ATOMIC_INITIALIZER;
374# endif
375 self->p_alloca_cutoff = __MAX_ALLOCA_CUTOFF;
376
377 /* Another variable which points to the thread descriptor. */
378 __pthread_main_thread = self;
379
380 /* And fill in the pointer the the thread __pthread_handles array. */
381 __pthread_handles[0].h_descr = self;
382
383#else /* USE_TLS */
384
385 /* First of all init __pthread_handles[0] and [1]. */
386# if __LT_SPINLOCK_INIT != 0
387 __pthread_handles[0].h_lock = __LOCK_INITIALIZER;
388 __pthread_handles[1].h_lock = __LOCK_INITIALIZER;
389# endif
390 __pthread_handles[0].h_descr = &__pthread_initial_thread;
391 __pthread_handles[1].h_descr = &__pthread_manager_thread;
392
393 /* If we have special thread_self processing, initialize that for the
394 main thread now. */
395# ifdef INIT_THREAD_SELF
396 INIT_THREAD_SELF(&__pthread_initial_thread, 0);
397# endif
398#endif
399
400#if HP_TIMING_AVAIL
401# ifdef USE_TLS
402 self->p_cpuclock_offset = GL(dl_cpuclock_offset);
403# else
404 __pthread_initial_thread.p_cpuclock_offset = GL(dl_cpuclock_offset);
405# endif
406#endif
407
408 __libc_multiple_threads_ptr = __libc_pthread_init (ptr_pthread_functions);
409}
410
411
412void
413__pthread_init_max_stacksize(void)
414{
415 struct rlimit limit;
416 size_t max_stack;
417
418 getrlimit(RLIMIT_STACK, &limit);
419#ifdef FLOATING_STACKS
420 if (limit.rlim_cur == RLIM_INFINITY)
421 limit.rlim_cur = ARCH_STACK_MAX_SIZE;
422# ifdef NEED_SEPARATE_REGISTER_STACK
423 max_stack = limit.rlim_cur / 2;
424# else
425 max_stack = limit.rlim_cur;
426# endif
427#else
428 /* Play with the stack size limit to make sure that no stack ever grows
429 beyond STACK_SIZE minus one page (to act as a guard page). */
430# ifdef NEED_SEPARATE_REGISTER_STACK
431 /* STACK_SIZE bytes hold both the main stack and register backing
432 store. The rlimit value applies to each individually. */
433 max_stack = STACK_SIZE/2 - __getpagesize ();
434# else
435 max_stack = STACK_SIZE - __getpagesize();
436# endif
437 if (limit.rlim_cur > max_stack) {
438 limit.rlim_cur = max_stack;
439 setrlimit(RLIMIT_STACK, &limit);
440 }
441#endif
442 __pthread_max_stacksize = max_stack;
443 if (max_stack / 4 < __MAX_ALLOCA_CUTOFF)
444 {
445#ifdef USE_TLS
446 pthread_descr self = THREAD_SELF;
447 self->p_alloca_cutoff = max_stack / 4;
448#else
449 __pthread_initial_thread.p_alloca_cutoff = max_stack / 4;
450#endif
451 }
452}
453
454/* psm: we do not have any ld.so support yet
455 * remove the USE_TLS guard if nptl is added */
456#if defined SHARED && defined USE_TLS
457# if USE___THREAD
458/* When using __thread for this, we do it in libc so as not
459 to give libpthread its own TLS segment just for this. */
460extern void **__libc_dl_error_tsd (void) __attribute__ ((const));
461# else
462static void ** __attribute__ ((const))
463__libc_dl_error_tsd (void)
464{
465 return &thread_self ()->p_libc_specific[_LIBC_TSD_KEY_DL_ERROR];
466}
467# endif
468#endif
469
470#ifdef USE_TLS
471static __inline__ void __attribute__((always_inline))
472init_one_static_tls (pthread_descr descr, struct link_map *map)
473{
474# if defined(TLS_TCB_AT_TP)
475 dtv_t *dtv = GET_DTV (descr);
476 void *dest = (char *) descr - map->l_tls_offset;
477# elif defined(TLS_DTV_AT_TP)
478 dtv_t *dtv = GET_DTV ((pthread_descr) ((char *) descr + TLS_PRE_TCB_SIZE));
479 void *dest = (char *) descr + map->l_tls_offset + TLS_PRE_TCB_SIZE;
480# else
481# error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
482# endif
483
484 /* Fill in the DTV slot so that a later LD/GD access will find it. */
485 dtv[map->l_tls_modid].pointer.val = dest;
486 dtv[map->l_tls_modid].pointer.is_static = true;
487
488 /* Initialize the memory. */
489 memset (__mempcpy (dest, map->l_tls_initimage, map->l_tls_initimage_size),
490 '\0', map->l_tls_blocksize - map->l_tls_initimage_size);
491}
492
493static void
494__pthread_init_static_tls (struct link_map *map)
495{
496 size_t i;
497
498 for (i = 0; i < PTHREAD_THREADS_MAX; ++i)
499 if (__pthread_handles[i].h_descr != NULL && i != 1)
500 {
501 __pthread_lock (&__pthread_handles[i].h_lock, NULL);
502 if (__pthread_handles[i].h_descr != NULL)
503 init_one_static_tls (__pthread_handles[i].h_descr, map);
504 __pthread_unlock (&__pthread_handles[i].h_lock);
505 }
506}
507#endif
508
509static void pthread_initialize(void)
510{
511 struct sigaction sa;
512 sigset_t mask;
513
514 /* If already done (e.g. by a constructor called earlier!), bail out */
515 if (__pthread_initial_thread_bos != NULL) return;
516#ifdef TEST_FOR_COMPARE_AND_SWAP
517 /* Test if compare-and-swap is available */
518 __pthread_has_cas = compare_and_swap_is_available();
519#endif
520#ifdef FLOATING_STACKS
521 /* We don't need to know the bottom of the stack. Give the pointer some
522 value to signal that initialization happened. */
523 __pthread_initial_thread_bos = (void *) -1l;
524#else
525 /* Determine stack size limits . */
526 __pthread_init_max_stacksize ();
527# ifdef _STACK_GROWS_UP
528 /* The initial thread already has all the stack it needs */
529 __pthread_initial_thread_bos = (char *)
530 ((long)CURRENT_STACK_FRAME &~ (STACK_SIZE - 1));
531# else
532 /* For the initial stack, reserve at least STACK_SIZE bytes of stack
533 below the current stack address, and align that on a
534 STACK_SIZE boundary. */
535 __pthread_initial_thread_bos =
536 (char *)(((long)CURRENT_STACK_FRAME - 2 * STACK_SIZE) & ~(STACK_SIZE - 1));
537# endif
538#endif
539#ifdef USE_TLS
540 /* Update the descriptor for the initial thread. */
541 THREAD_SETMEM (((pthread_descr) NULL), p_pid, __getpid());
542# if !defined HAVE___THREAD && defined __UCLIBC_HAS_RESOLVER_SUPPORT__
543 /* Likewise for the resolver state _res. */
544 THREAD_SETMEM (((pthread_descr) NULL), p_resp, __resp);
545# endif
546#else
547 /* Update the descriptor for the initial thread. */
548 __pthread_initial_thread.p_pid = __getpid();
549# if defined __UCLIBC_HAS_RESOLVER_SUPPORT__
550 /* Likewise for the resolver state _res. */
551 __pthread_initial_thread.p_resp = __resp;
552# endif
553#endif
554#if !__ASSUME_REALTIME_SIGNALS
555 /* Initialize real-time signals. */
556 init_rtsigs ();
557#endif
558 /* Setup signal handlers for the initial thread.
559 Since signal handlers are shared between threads, these settings
560 will be inherited by all other threads. */
561 memset(&sa, 0, sizeof(sa));
562 sa.sa_handler = pthread_handle_sigrestart;
563 __libc_sigaction(__pthread_sig_restart, &sa, NULL);
564 sa.sa_handler = pthread_handle_sigcancel;
565 sigaddset(&sa.sa_mask, __pthread_sig_restart);
566 __libc_sigaction(__pthread_sig_cancel, &sa, NULL);
567 if (__pthread_sig_debug > 0) {
568 sa.sa_handler = pthread_handle_sigdebug;
569 __sigemptyset(&sa.sa_mask);
570 __libc_sigaction(__pthread_sig_debug, &sa, NULL);
571 }
572 /* Initially, block __pthread_sig_restart. Will be unblocked on demand. */
573 __sigemptyset(&mask);
574 sigaddset(&mask, __pthread_sig_restart);
575 sigprocmask(SIG_BLOCK, &mask, NULL);
576 /* And unblock __pthread_sig_cancel if it has been blocked. */
577 sigdelset(&mask, __pthread_sig_restart);
578 sigaddset(&mask, __pthread_sig_cancel);
579 sigprocmask(SIG_UNBLOCK, &mask, NULL);
580 /* Register an exit function to kill all other threads. */
581 /* Do it early so that user-registered atexit functions are called
582 before pthread_*exit_process. */
583#ifndef HAVE_Z_NODELETE
584 if (__builtin_expect (&__dso_handle != NULL, 1))
585 __cxa_atexit ((void (*) (void *)) pthread_atexit_process, NULL,
586 __dso_handle);
587 else
588#endif
589 __on_exit (pthread_onexit_process, NULL);
590 /* How many processors. */
591 __pthread_smp_kernel = is_smp_system ();
592
593/* psm: we do not have any ld.so support yet
594 * remove the USE_TLS guard if nptl is added */
595#if defined SHARED && defined USE_TLS
596 /* Transfer the old value from the dynamic linker's internal location. */
597 *__libc_dl_error_tsd () = *(*GL(dl_error_catch_tsd)) ();
598 GL(dl_error_catch_tsd) = &__libc_dl_error_tsd;
599
600 /* Make __rtld_lock_{,un}lock_recursive use pthread_mutex_{,un}lock,
601 keep the lock count from the ld.so implementation. */
602 GL(dl_rtld_lock_recursive) = (void *) __pthread_mutex_lock;
603 GL(dl_rtld_unlock_recursive) = (void *) __pthread_mutex_unlock;
604 unsigned int rtld_lock_count = GL(dl_load_lock).mutex.__m_count;
605 GL(dl_load_lock).mutex.__m_count = 0;
606 while (rtld_lock_count-- > 0)
607 __pthread_mutex_lock (&GL(dl_load_lock).mutex);
608#endif
609
610#ifdef USE_TLS
611 GL(dl_init_static_tls) = &__pthread_init_static_tls;
612#endif
613
614 /* uClibc-specific stdio initialization for threads. */
615 {
616 FILE *fp;
617 _stdio_user_locking = 0; /* 2 if threading not initialized */
618 for (fp = _stdio_openlist; fp != NULL; fp = fp->__nextopen) {
619 if (fp->__user_locking != 1) {
620 fp->__user_locking = 0;
621 }
622 }
623 }
624}
625
626void __pthread_initialize(void)
627{
628 pthread_initialize();
629}
630
631int __pthread_initialize_manager(void)
632{
633 int manager_pipe[2];
634 int pid;
635 struct pthread_request request;
636 int report_events;
637 pthread_descr mgr;
638#ifdef USE_TLS
639 tcbhead_t *tcbp;
640#endif
641
642 __pthread_multiple_threads = 1;
643#if TLS_MULTIPLE_THREADS_IN_TCB || !defined USE_TLS || !TLS_DTV_AT_TP
644 __pthread_main_thread->p_multiple_threads = 1;
645#endif
646 *__libc_multiple_threads_ptr = 1;
647
648#ifndef HAVE_Z_NODELETE
649 if (__builtin_expect (&__dso_handle != NULL, 1))
650 __cxa_atexit ((void (*) (void *)) pthread_atexit_retcode, NULL,
651 __dso_handle);
652#endif
653
654 if (__pthread_max_stacksize == 0)
655 __pthread_init_max_stacksize ();
656 /* If basic initialization not done yet (e.g. we're called from a
657 constructor run before our constructor), do it now */
658 if (__pthread_initial_thread_bos == NULL) pthread_initialize();
659 /* Setup stack for thread manager */
660 __pthread_manager_thread_bos = malloc(THREAD_MANAGER_STACK_SIZE);
661 if (__pthread_manager_thread_bos == NULL) return -1;
662 __pthread_manager_thread_tos =
663 __pthread_manager_thread_bos + THREAD_MANAGER_STACK_SIZE;
664 /* Setup pipe to communicate with thread manager */
665 if (pipe(manager_pipe) == -1) {
666 free(__pthread_manager_thread_bos);
667 return -1;
668 }
669
670#ifdef USE_TLS
671 /* Allocate memory for the thread descriptor and the dtv. */
672 tcbp = _dl_allocate_tls (NULL);
673 if (tcbp == NULL) {
674 free(__pthread_manager_thread_bos);
675 close_not_cancel(manager_pipe[0]);
676 close_not_cancel(manager_pipe[1]);
677 return -1;
678 }
679
680# if defined(TLS_TCB_AT_TP)
681 mgr = (pthread_descr) tcbp;
682# elif defined(TLS_DTV_AT_TP)
683 /* pthread_descr is located right below tcbhead_t which _dl_allocate_tls
684 returns. */
685 mgr = (pthread_descr) ((char *) tcbp - TLS_PRE_TCB_SIZE);
686# endif
687 __pthread_handles[1].h_descr = manager_thread = mgr;
688
689 /* Initialize the descriptor. */
690#if !defined USE_TLS || !TLS_DTV_AT_TP
691 mgr->p_header.data.tcb = tcbp;
692 mgr->p_header.data.self = mgr;
693 mgr->p_header.data.multiple_threads = 1;
694#elif TLS_MULTIPLE_THREADS_IN_TCB
695 mgr->p_multiple_threads = 1;
696#endif
697 mgr->p_lock = &__pthread_handles[1].h_lock;
698# ifndef HAVE___THREAD
699 mgr->p_errnop = &mgr->p_errno;
700# endif
701 mgr->p_start_args = (struct pthread_start_args) PTHREAD_START_ARGS_INITIALIZER(__pthread_manager);
702 mgr->p_nr = 1;
703# if __LT_SPINLOCK_INIT != 0
704 self->p_resume_count = (struct pthread_atomic) __ATOMIC_INITIALIZER;
705# endif
706 mgr->p_alloca_cutoff = PTHREAD_STACK_MIN / 4;
707#else
708 mgr = &__pthread_manager_thread;
709#endif
710
711 __pthread_manager_request = manager_pipe[1]; /* writing end */
712 __pthread_manager_reader = manager_pipe[0]; /* reading end */
713
714 /* Start the thread manager */
715 pid = 0;
716#ifdef USE_TLS
717 if (__linuxthreads_initial_report_events != 0)
718 THREAD_SETMEM (((pthread_descr) NULL), p_report_events,
719 __linuxthreads_initial_report_events);
720 report_events = THREAD_GETMEM (((pthread_descr) NULL), p_report_events);
721#else
722 if (__linuxthreads_initial_report_events != 0)
723 __pthread_initial_thread.p_report_events
724 = __linuxthreads_initial_report_events;
725 report_events = __pthread_initial_thread.p_report_events;
726#endif
727 if (__builtin_expect (report_events, 0))
728 {
729 /* It's a bit more complicated. We have to report the creation of
730 the manager thread. */
731 int idx = __td_eventword (TD_CREATE);
732 uint32_t mask = __td_eventmask (TD_CREATE);
733 uint32_t event_bits;
734
735#ifdef USE_TLS
736 event_bits = THREAD_GETMEM_NC (((pthread_descr) NULL),
737 p_eventbuf.eventmask.event_bits[idx]);
738#else
739 event_bits = __pthread_initial_thread.p_eventbuf.eventmask.event_bits[idx];
740#endif
741
742 if ((mask & (__pthread_threads_events.event_bits[idx] | event_bits))
743 != 0)
744 {
745 __pthread_lock(mgr->p_lock, NULL);
746
747#ifdef NEED_SEPARATE_REGISTER_STACK
748 pid = __clone2(__pthread_manager_event,
749 (void **) __pthread_manager_thread_bos,
750 THREAD_MANAGER_STACK_SIZE,
751 CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND | CLONE_SYSVSEM,
752 mgr);
753#elif defined _STACK_GROWS_UP
754 pid = __clone(__pthread_manager_event,
755 (void **) __pthread_manager_thread_bos,
756 CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND | CLONE_SYSVSEM,
757 mgr);
758#else
759 pid = __clone(__pthread_manager_event,
760 (void **) __pthread_manager_thread_tos,
761 CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND | CLONE_SYSVSEM,
762 mgr);
763#endif
764
765 if (pid != -1)
766 {
767 /* Now fill in the information about the new thread in
768 the newly created thread's data structure. We cannot let
769 the new thread do this since we don't know whether it was
770 already scheduled when we send the event. */
771 mgr->p_eventbuf.eventdata = mgr;
772 mgr->p_eventbuf.eventnum = TD_CREATE;
773 __pthread_last_event = mgr;
774 mgr->p_tid = 2* PTHREAD_THREADS_MAX + 1;
775 mgr->p_pid = pid;
776
777 /* Now call the function which signals the event. */
778 __linuxthreads_create_event ();
779 }
780
781 /* Now restart the thread. */
782 __pthread_unlock(mgr->p_lock);
783 }
784 }
785
786 if (__builtin_expect (pid, 0) == 0)
787 {
788#ifdef NEED_SEPARATE_REGISTER_STACK
789 pid = __clone2(__pthread_manager, (void **) __pthread_manager_thread_bos,
790 THREAD_MANAGER_STACK_SIZE,
791 CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND | CLONE_SYSVSEM, mgr);
792#elif defined _STACK_GROWS_UP
793 pid = __clone(__pthread_manager, (void **) __pthread_manager_thread_bos,
794 CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND | CLONE_SYSVSEM, mgr);
795#else
796 pid = __clone(__pthread_manager, (void **) __pthread_manager_thread_tos,
797 CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND | CLONE_SYSVSEM, mgr);
798#endif
799 }
800 if (__builtin_expect (pid, 0) == -1) {
801#ifdef USE_TLS
802 _dl_deallocate_tls (tcbp, true);
803#endif
804 free(__pthread_manager_thread_bos);
805 close_not_cancel(manager_pipe[0]);
806 close_not_cancel(manager_pipe[1]);
807 return -1;
808 }
809 mgr->p_tid = 2* PTHREAD_THREADS_MAX + 1;
810 mgr->p_pid = pid;
811 /* Make gdb aware of new thread manager */
812 if (__builtin_expect (__pthread_threads_debug, 0) && __pthread_sig_debug > 0)
813 {
814 raise(__pthread_sig_debug);
815 /* We suspend ourself and gdb will wake us up when it is
816 ready to handle us. */
817 __pthread_wait_for_restart_signal(thread_self());
818 }
819 /* Synchronize debugging of the thread manager */
820 request.req_kind = REQ_DEBUG;
821 TEMP_FAILURE_RETRY(write_not_cancel(__pthread_manager_request,
822 (char *) &request, sizeof(request)));
823 return 0;
824}
825
826/* Thread creation */
827
828int __pthread_create(pthread_t *thread, const pthread_attr_t *attr,
829 void * (*start_routine)(void *), void *arg)
830{
831 pthread_descr self = thread_self();
832 struct pthread_request request;
833 int retval;
834 if (__builtin_expect (__pthread_manager_request, 0) < 0) {
835 if (__pthread_initialize_manager() < 0) return EAGAIN;
836 }
837 request.req_thread = self;
838 request.req_kind = REQ_CREATE;
839 request.req_args.create.attr = attr;
840 request.req_args.create.fn = start_routine;
841 request.req_args.create.arg = arg;
842 sigprocmask(SIG_SETMASK, NULL, &request.req_args.create.mask);
843 TEMP_FAILURE_RETRY(write_not_cancel(__pthread_manager_request,
844 (char *) &request, sizeof(request)));
845 suspend(self);
846 retval = THREAD_GETMEM(self, p_retcode);
847 if (__builtin_expect (retval, 0) == 0)
848 *thread = (pthread_t) THREAD_GETMEM(self, p_retval);
849 return retval;
850}
851strong_alias (__pthread_create, pthread_create)
852
853/* Simple operations on thread identifiers */
854
855pthread_descr __pthread_thread_self(void)
856{
857 return thread_self();
858}
859
860pthread_t __pthread_self(void)
861{
862 pthread_descr self = thread_self();
863 return THREAD_GETMEM(self, p_tid);
864}
865strong_alias (__pthread_self, pthread_self)
866
867int __pthread_equal(pthread_t thread1, pthread_t thread2)
868{
869 return thread1 == thread2;
870}
871strong_alias (__pthread_equal, pthread_equal)
872
873/* Helper function for thread_self in the case of user-provided stacks */
874
875#ifndef THREAD_SELF
876
877pthread_descr __pthread_find_self(void)
878{
879 char * sp = CURRENT_STACK_FRAME;
880 pthread_handle h;
881
882 /* __pthread_handles[0] is the initial thread, __pthread_handles[1] is
883 the manager threads handled specially in thread_self(), so start at 2 */
884 h = __pthread_handles + 2;
885# ifdef _STACK_GROWS_UP
886 while (! (sp >= (char *) h->h_descr && sp < (char *) h->h_descr->p_guardaddr)) h++;
887# else
888 while (! (sp <= (char *) h->h_descr && sp >= h->h_bottom)) h++;
889# endif
890 return h->h_descr;
891}
892
893#else
894
895pthread_descr __pthread_self_stack(void)
896{
897 char *sp = CURRENT_STACK_FRAME;
898 pthread_handle h;
899
900 if (sp >= __pthread_manager_thread_bos && sp < __pthread_manager_thread_tos)
901 return manager_thread;
902 h = __pthread_handles + 2;
903# ifdef USE_TLS
904# ifdef _STACK_GROWS_UP
905 while (h->h_descr == NULL
906 || ! (sp >= h->h_descr->p_stackaddr && sp < h->h_descr->p_guardaddr))
907 h++;
908# else
909 while (h->h_descr == NULL
910 || ! (sp <= (char *) h->h_descr->p_stackaddr && sp >= h->h_bottom))
911 h++;
912# endif
913# else
914# ifdef _STACK_GROWS_UP
915 while (! (sp >= (char *) h->h_descr && sp < h->h_descr->p_guardaddr))
916 h++;
917# else
918 while (! (sp <= (char *) h->h_descr && sp >= h->h_bottom))
919 h++;
920# endif
921# endif
922 return h->h_descr;
923}
924
925#endif
926
927/* Thread scheduling */
928
929int __pthread_setschedparam(pthread_t thread, int policy,
930 const struct sched_param *param)
931{
932 pthread_handle handle = thread_handle(thread);
933 pthread_descr th;
934
935 __pthread_lock(&handle->h_lock, NULL);
936 if (__builtin_expect (invalid_handle(handle, thread), 0)) {
937 __pthread_unlock(&handle->h_lock);
938 return ESRCH;
939 }
940 th = handle->h_descr;
941 if (__builtin_expect (__sched_setscheduler(th->p_pid, policy, param) == -1,
942 0)) {
943 __pthread_unlock(&handle->h_lock);
944 return errno;
945 }
946 th->p_priority = policy == SCHED_OTHER ? 0 : param->sched_priority;
947 __pthread_unlock(&handle->h_lock);
948 if (__pthread_manager_request >= 0)
949 __pthread_manager_adjust_prio(th->p_priority);
950 return 0;
951}
952strong_alias (__pthread_setschedparam, pthread_setschedparam)
953
954int __pthread_getschedparam(pthread_t thread, int *policy,
955 struct sched_param *param)
956{
957 pthread_handle handle = thread_handle(thread);
958 int pid, pol;
959
960 __pthread_lock(&handle->h_lock, NULL);
961 if (__builtin_expect (invalid_handle(handle, thread), 0)) {
962 __pthread_unlock(&handle->h_lock);
963 return ESRCH;
964 }
965 pid = handle->h_descr->p_pid;
966 __pthread_unlock(&handle->h_lock);
967 pol = __sched_getscheduler(pid);
968 if (__builtin_expect (pol, 0) == -1) return errno;
969 if (__sched_getparam(pid, param) == -1) return errno;
970 *policy = pol;
971 return 0;
972}
973strong_alias (__pthread_getschedparam, pthread_getschedparam)
974
975/* Process-wide exit() request */
976
977static void pthread_onexit_process(int retcode, void *arg)
978{
979 if (__builtin_expect (__pthread_manager_request, 0) >= 0) {
980 struct pthread_request request;
981 pthread_descr self = thread_self();
982
983 /* Make sure we come back here after suspend(), in case we entered
984 from a signal handler. */
985 THREAD_SETMEM(self, p_signal_jmp, NULL);
986
987 request.req_thread = self;
988 request.req_kind = REQ_PROCESS_EXIT;
989 request.req_args.exit.code = retcode;
990 TEMP_FAILURE_RETRY(write_not_cancel(__pthread_manager_request,
991 (char *) &request, sizeof(request)));
992 suspend(self);
993 /* Main thread should accumulate times for thread manager and its
994 children, so that timings for main thread account for all threads. */
995 if (self == __pthread_main_thread)
996 {
997#ifdef USE_TLS
998 waitpid(manager_thread->p_pid, NULL, __WCLONE);
999#else
1000 waitpid(__pthread_manager_thread.p_pid, NULL, __WCLONE);
1001#endif
1002 /* Since all threads have been asynchronously terminated
1003 (possibly holding locks), free cannot be used any more.
1004 For mtrace, we'd like to print something though. */
1005 /* #ifdef USE_TLS
1006 tcbhead_t *tcbp = (tcbhead_t *) manager_thread;
1007 # if defined(TLS_DTV_AT_TP)
1008 tcbp = (tcbhead_t) ((char *) tcbp + TLS_PRE_TCB_SIZE);
1009 # endif
1010 _dl_deallocate_tls (tcbp, true);
1011 #endif
1012 free (__pthread_manager_thread_bos); */
1013 __pthread_manager_thread_bos = __pthread_manager_thread_tos = NULL;
1014 }
1015 }
1016}
1017
1018#ifndef HAVE_Z_NODELETE
1019static int __pthread_atexit_retcode;
1020
1021static void pthread_atexit_process(void *arg, int retcode)
1022{
1023 pthread_onexit_process (retcode ?: __pthread_atexit_retcode, arg);
1024}
1025
1026static void pthread_atexit_retcode(void *arg, int retcode)
1027{
1028 __pthread_atexit_retcode = retcode;
1029}
1030#endif
1031
1032/* The handler for the RESTART signal just records the signal received
1033 in the thread descriptor, and optionally performs a siglongjmp
1034 (for pthread_cond_timedwait). */
1035
1036static void pthread_handle_sigrestart(int sig)
1037{
1038 pthread_descr self = check_thread_self();
1039 THREAD_SETMEM(self, p_signal, sig);
1040 if (THREAD_GETMEM(self, p_signal_jmp) != NULL)
1041 siglongjmp(*THREAD_GETMEM(self, p_signal_jmp), 1);
1042}
1043
1044/* The handler for the CANCEL signal checks for cancellation
1045 (in asynchronous mode), for process-wide exit and exec requests.
1046 For the thread manager thread, redirect the signal to
1047 __pthread_manager_sighandler. */
1048
1049static void pthread_handle_sigcancel(int sig)
1050{
1051 pthread_descr self = check_thread_self();
1052 sigjmp_buf * jmpbuf;
1053
1054 if (self == manager_thread)
1055 {
1056 __pthread_manager_sighandler(sig);
1057 return;
1058 }
1059 if (__builtin_expect (__pthread_exit_requested, 0)) {
1060 /* Main thread should accumulate times for thread manager and its
1061 children, so that timings for main thread account for all threads. */
1062 if (self == __pthread_main_thread) {
1063#ifdef USE_TLS
1064 waitpid(manager_thread->p_pid, NULL, __WCLONE);
1065#else
1066 waitpid(__pthread_manager_thread.p_pid, NULL, __WCLONE);
1067#endif
1068 }
1069 _exit(__pthread_exit_code);
1070 }
1071 if (__builtin_expect (THREAD_GETMEM(self, p_canceled), 0)
1072 && THREAD_GETMEM(self, p_cancelstate) == PTHREAD_CANCEL_ENABLE) {
1073 if (THREAD_GETMEM(self, p_canceltype) == PTHREAD_CANCEL_ASYNCHRONOUS)
1074 __pthread_do_exit(PTHREAD_CANCELED, CURRENT_STACK_FRAME);
1075 jmpbuf = THREAD_GETMEM(self, p_cancel_jmp);
1076 if (jmpbuf != NULL) {
1077 THREAD_SETMEM(self, p_cancel_jmp, NULL);
1078 siglongjmp(*jmpbuf, 1);
1079 }
1080 }
1081}
1082
1083/* Handler for the DEBUG signal.
1084 The debugging strategy is as follows:
1085 On reception of a REQ_DEBUG request (sent by new threads created to
1086 the thread manager under debugging mode), the thread manager throws
1087 __pthread_sig_debug to itself. The debugger (if active) intercepts
1088 this signal, takes into account new threads and continue execution
1089 of the thread manager by propagating the signal because it doesn't
1090 know what it is specifically done for. In the current implementation,
1091 the thread manager simply discards it. */
1092
1093static void pthread_handle_sigdebug(int sig)
1094{
1095 /* Nothing */
1096}
1097
1098/* Reset the state of the thread machinery after a fork().
1099 Close the pipe used for requests and set the main thread to the forked
1100 thread.
1101 Notice that we can't free the stack segments, as the forked thread
1102 may hold pointers into them. */
1103
1104void __pthread_reset_main_thread(void)
1105{
1106 pthread_descr self = thread_self();
1107
1108 if (__pthread_manager_request != -1) {
1109 /* Free the thread manager stack */
1110 free(__pthread_manager_thread_bos);
1111 __pthread_manager_thread_bos = __pthread_manager_thread_tos = NULL;
1112 /* Close the two ends of the pipe */
1113 close_not_cancel(__pthread_manager_request);
1114 close_not_cancel(__pthread_manager_reader);
1115 __pthread_manager_request = __pthread_manager_reader = -1;
1116 }
1117
1118 /* Update the pid of the main thread */
1119 THREAD_SETMEM(self, p_pid, __getpid());
1120 /* Make the forked thread the main thread */
1121 __pthread_main_thread = self;
1122 THREAD_SETMEM(self, p_nextlive, self);
1123 THREAD_SETMEM(self, p_prevlive, self);
1124#if !(USE_TLS && HAVE___THREAD)
1125 /* Now this thread modifies the global variables. */
1126 THREAD_SETMEM(self, p_errnop, &_errno);
1127 THREAD_SETMEM(self, p_h_errnop, &_h_errno);
1128# if defined __UCLIBC_HAS_RESOLVER_SUPPORT__
1129 THREAD_SETMEM(self, p_resp, __resp);
1130# endif
1131#endif
1132
1133#ifndef FLOATING_STACKS
1134 /* This is to undo the setrlimit call in __pthread_init_max_stacksize.
1135 XXX This can be wrong if the user set the limit during the run. */
1136 {
1137 struct rlimit limit;
1138 if (getrlimit (RLIMIT_STACK, &limit) == 0
1139 && limit.rlim_cur != limit.rlim_max)
1140 {
1141 limit.rlim_cur = limit.rlim_max;
1142 setrlimit(RLIMIT_STACK, &limit);
1143 }
1144 }
1145#endif
1146}
1147
1148/* Process-wide exec() request */
1149
1150void __pthread_kill_other_threads_np(void)
1151{
1152 struct sigaction sa;
1153 /* Terminate all other threads and thread manager */
1154 pthread_onexit_process(0, NULL);
1155 /* Make current thread the main thread in case the calling thread
1156 changes its mind, does not exec(), and creates new threads instead. */
1157 __pthread_reset_main_thread();
1158
1159 /* Reset the signal handlers behaviour for the signals the
1160 implementation uses since this would be passed to the new
1161 process. */
1162 memset(&sa, 0, sizeof(sa));
1163 if (SIG_DFL) /* if it's constant zero, it's already done */
1164 sa.sa_handler = SIG_DFL;
1165 __libc_sigaction(__pthread_sig_restart, &sa, NULL);
1166 __libc_sigaction(__pthread_sig_cancel, &sa, NULL);
1167 if (__pthread_sig_debug > 0)
1168 __libc_sigaction(__pthread_sig_debug, &sa, NULL);
1169}
1170weak_alias (__pthread_kill_other_threads_np, pthread_kill_other_threads_np)
1171
1172/* Concurrency symbol level. */
1173static int current_level;
1174
1175int __pthread_setconcurrency(int level)
1176{
1177 /* We don't do anything unless we have found a useful interpretation. */
1178 current_level = level;
1179 return 0;
1180}
1181weak_alias (__pthread_setconcurrency, pthread_setconcurrency)
1182
1183int __pthread_getconcurrency(void)
1184{
1185 return current_level;
1186}
1187weak_alias (__pthread_getconcurrency, pthread_getconcurrency)
1188
1189/* Primitives for controlling thread execution */
1190
1191void __pthread_wait_for_restart_signal(pthread_descr self)
1192{
1193 sigset_t mask;
1194
1195 sigprocmask(SIG_SETMASK, NULL, &mask); /* Get current signal mask */
1196 sigdelset(&mask, __pthread_sig_restart); /* Unblock the restart signal */
1197 THREAD_SETMEM(self, p_signal, 0);
1198 do {
1199 __pthread_sigsuspend(&mask); /* Wait for signal. Must not be a
1200 cancellation point. */
1201 } while (THREAD_GETMEM(self, p_signal) !=__pthread_sig_restart);
1202
1203 READ_MEMORY_BARRIER(); /* See comment in __pthread_restart_new */
1204}
1205
1206#if !__ASSUME_REALTIME_SIGNALS
1207/* The _old variants are for 2.0 and early 2.1 kernels which don't have RT
1208 signals.
1209 On these kernels, we use SIGUSR1 and SIGUSR2 for restart and cancellation.
1210 Since the restart signal does not queue, we use an atomic counter to create
1211 queuing semantics. This is needed to resolve a rare race condition in
1212 pthread_cond_timedwait_relative. */
1213
1214void __pthread_restart_old(pthread_descr th)
1215{
1216 if (pthread_atomic_increment(&th->p_resume_count) == -1)
1217 kill(th->p_pid, __pthread_sig_restart);
1218}
1219
1220void __pthread_suspend_old(pthread_descr self)
1221{
1222 if (pthread_atomic_decrement(&self->p_resume_count) <= 0)
1223 __pthread_wait_for_restart_signal(self);
1224}
1225
1226int
1227__pthread_timedsuspend_old(pthread_descr self, const struct timespec *abstime)
1228{
1229 sigset_t unblock, initial_mask;
1230 int was_signalled = 0;
1231 sigjmp_buf jmpbuf;
1232
1233 if (pthread_atomic_decrement(&self->p_resume_count) == 0) {
1234 /* Set up a longjmp handler for the restart signal, unblock
1235 the signal and sleep. */
1236
1237 if (sigsetjmp(jmpbuf, 1) == 0) {
1238 THREAD_SETMEM(self, p_signal_jmp, &jmpbuf);
1239 THREAD_SETMEM(self, p_signal, 0);
1240 /* Unblock the restart signal */
1241 __sigemptyset(&unblock);
1242 sigaddset(&unblock, __pthread_sig_restart);
1243 sigprocmask(SIG_UNBLOCK, &unblock, &initial_mask);
1244
1245 while (1) {
1246 struct timeval now;
1247 struct timespec reltime;
1248
1249 /* Compute a time offset relative to now. */
1250 __gettimeofday (&now, NULL);
1251 reltime.tv_nsec = abstime->tv_nsec - now.tv_usec * 1000;
1252 reltime.tv_sec = abstime->tv_sec - now.tv_sec;
1253 if (reltime.tv_nsec < 0) {
1254 reltime.tv_nsec += 1000000000;
1255 reltime.tv_sec -= 1;
1256 }
1257
1258 /* Sleep for the required duration. If woken by a signal,
1259 resume waiting as required by Single Unix Specification. */
1260 if (reltime.tv_sec < 0 || nanosleep(&reltime, NULL) == 0)
1261 break;
1262 }
1263
1264 /* Block the restart signal again */
1265 sigprocmask(SIG_SETMASK, &initial_mask, NULL);
1266 was_signalled = 0;
1267 } else {
1268 was_signalled = 1;
1269 }
1270 THREAD_SETMEM(self, p_signal_jmp, NULL);
1271 }
1272
1273 /* Now was_signalled is true if we exited the above code
1274 due to the delivery of a restart signal. In that case,
1275 we know we have been dequeued and resumed and that the
1276 resume count is balanced. Otherwise, there are some
1277 cases to consider. First, try to bump up the resume count
1278 back to zero. If it goes to 1, it means restart() was
1279 invoked on this thread. The signal must be consumed
1280 and the count bumped down and everything is cool. We
1281 can return a 1 to the caller.
1282 Otherwise, no restart was delivered yet, so a potential
1283 race exists; we return a 0 to the caller which must deal
1284 with this race in an appropriate way; for example by
1285 atomically removing the thread from consideration for a
1286 wakeup---if such a thing fails, it means a restart is
1287 being delivered. */
1288
1289 if (!was_signalled) {
1290 if (pthread_atomic_increment(&self->p_resume_count) != -1) {
1291 __pthread_wait_for_restart_signal(self);
1292 pthread_atomic_decrement(&self->p_resume_count); /* should be zero now! */
1293 /* woke spontaneously and consumed restart signal */
1294 return 1;
1295 }
1296 /* woke spontaneously but did not consume restart---caller must resolve */
1297 return 0;
1298 }
1299 /* woken due to restart signal */
1300 return 1;
1301}
1302#endif /* __ASSUME_REALTIME_SIGNALS */
1303
1304void __pthread_restart_new(pthread_descr th)
1305{
1306 /* The barrier is proabably not needed, in which case it still documents
1307 our assumptions. The intent is to commit previous writes to shared
1308 memory so the woken thread will have a consistent view. Complementary
1309 read barriers are present to the suspend functions. */
1310 WRITE_MEMORY_BARRIER();
1311 kill(th->p_pid, __pthread_sig_restart);
1312}
1313
1314/* There is no __pthread_suspend_new because it would just
1315 be a wasteful wrapper for __pthread_wait_for_restart_signal */
1316
1317int
1318__pthread_timedsuspend_new(pthread_descr self, const struct timespec *abstime)
1319{
1320 sigset_t unblock, initial_mask;
1321 int was_signalled = 0;
1322 sigjmp_buf jmpbuf;
1323
1324 if (sigsetjmp(jmpbuf, 1) == 0) {
1325 THREAD_SETMEM(self, p_signal_jmp, &jmpbuf);
1326 THREAD_SETMEM(self, p_signal, 0);
1327 /* Unblock the restart signal */
1328 __sigemptyset(&unblock);
1329 sigaddset(&unblock, __pthread_sig_restart);
1330 sigprocmask(SIG_UNBLOCK, &unblock, &initial_mask);
1331
1332 while (1) {
1333 struct timeval now;
1334 struct timespec reltime;
1335
1336 /* Compute a time offset relative to now. */
1337 __gettimeofday (&now, NULL);
1338 reltime.tv_nsec = abstime->tv_nsec - now.tv_usec * 1000;
1339 reltime.tv_sec = abstime->tv_sec - now.tv_sec;
1340 if (reltime.tv_nsec < 0) {
1341 reltime.tv_nsec += 1000000000;
1342 reltime.tv_sec -= 1;
1343 }
1344
1345 /* Sleep for the required duration. If woken by a signal,
1346 resume waiting as required by Single Unix Specification. */
1347 if (reltime.tv_sec < 0 || nanosleep(&reltime, NULL) == 0)
1348 break;
1349 }
1350
1351 /* Block the restart signal again */
1352 sigprocmask(SIG_SETMASK, &initial_mask, NULL);
1353 was_signalled = 0;
1354 } else {
1355 was_signalled = 1;
1356 }
1357 THREAD_SETMEM(self, p_signal_jmp, NULL);
1358
1359 /* Now was_signalled is true if we exited the above code
1360 due to the delivery of a restart signal. In that case,
1361 everything is cool. We have been removed from whatever
1362 we were waiting on by the other thread, and consumed its signal.
1363
1364 Otherwise we this thread woke up spontaneously, or due to a signal other
1365 than restart. This is an ambiguous case that must be resolved by
1366 the caller; the thread is still eligible for a restart wakeup
1367 so there is a race. */
1368
1369 READ_MEMORY_BARRIER(); /* See comment in __pthread_restart_new */
1370 return was_signalled;
1371}
1372
1373
1374/* Debugging aid */
1375
1376#ifdef DEBUG
1377#include <stdarg.h>
1378
1379void __pthread_message(const char * fmt, ...)
1380{
1381 char buffer[1024];
1382 va_list args;
1383 sprintf(buffer, "%05d : ", __getpid());
1384 va_start(args, fmt);
1385 vsnprintf(buffer + 8, sizeof(buffer) - 8, fmt, args);
1386 va_end(args);
1387 TEMP_FAILURE_RETRY(write_not_cancel(2, buffer, strlen(buffer)));
1388}
1389
1390#endif