blob: 42e03f4dc05c3898ebbc49b00ad24005daecf0e2 [file] [log] [blame]
lh9ed821d2023-04-07 01:36:19 -07001/* Linuxthreads - a simple clone()-based implementation of Posix */
2/* threads for Linux. */
3/* Copyright (C) 1996 Xavier Leroy (Xavier.Leroy@inria.fr) */
4/* */
5/* This program is free software; you can redistribute it and/or */
6/* modify it under the terms of the GNU Library General Public License */
7/* as published by the Free Software Foundation; either version 2 */
8/* of the License, or (at your option) any later version. */
9/* */
10/* This program is distributed in the hope that it will be useful, */
11/* but WITHOUT ANY WARRANTY; without even the implied warranty of */
12/* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
13/* GNU Library General Public License for more details. */
14
15/* Thread creation, initialization, and basic low-level routines */
16
17#define __FORCE_GLIBC
18#include <features.h>
19#include <errno.h>
20#include <netdb.h> /* for h_errno */
21#include <stddef.h>
22#include <stdio.h>
23#include <stdlib.h>
24#include <string.h>
25#include <unistd.h>
26#include <fcntl.h>
27#include <sys/wait.h>
28#include <sys/resource.h>
29#include "pthread.h"
30#include "internals.h"
31#include "spinlock.h"
32#include "restart.h"
33#include "debug.h" /* added to linuxthreads -StS */
34
35
36/* Mods for uClibc: Some includes */
37#include <signal.h>
38#include <sys/types.h>
39#include <sys/syscall.h>
40
41/* mods for uClibc: __libc_sigaction is not in any standard headers */
42extern __typeof(sigaction) __libc_sigaction;
43libpthread_hidden_proto(waitpid)
44libpthread_hidden_proto(raise)
45
46/* These variables are used by the setup code. */
47extern int _errno;
48extern int _h_errno;
49
50
51/* Descriptor of the initial thread */
52
53struct _pthread_descr_struct __pthread_initial_thread = {
54 &__pthread_initial_thread, /* pthread_descr p_nextlive */
55 &__pthread_initial_thread, /* pthread_descr p_prevlive */
56 NULL, /* pthread_descr p_nextwaiting */
57 NULL, /* pthread_descr p_nextlock */
58 PTHREAD_THREADS_MAX, /* pthread_t p_tid */
59 0, /* int p_pid */
60 0, /* int p_priority */
61 &__pthread_handles[0].h_lock, /* struct _pthread_fastlock * p_lock */
62 0, /* int p_signal */
63 NULL, /* sigjmp_buf * p_signal_buf */
64 NULL, /* sigjmp_buf * p_cancel_buf */
65 0, /* char p_terminated */
66 0, /* char p_detached */
67 0, /* char p_exited */
68 NULL, /* void * p_retval */
69 0, /* int p_retval */
70 NULL, /* pthread_descr p_joining */
71 NULL, /* struct _pthread_cleanup_buffer * p_cleanup */
72 0, /* char p_cancelstate */
73 0, /* char p_canceltype */
74 0, /* char p_canceled */
75 &_errno, /* int *p_errnop */
76 0, /* int p_errno */
77 &_h_errno, /* int *p_h_errnop */
78 0, /* int p_h_errno */
79 NULL, /* char * p_in_sighandler */
80 0, /* char p_sigwaiting */
81 PTHREAD_START_ARGS_INITIALIZER, /* struct pthread_start_args p_start_args */
82 {NULL}, /* void ** p_specific[PTHREAD_KEY_1STLEVEL_SIZE] */
83 {NULL}, /* void * p_libc_specific[_LIBC_TSD_KEY_N] */
84 0, /* int p_userstack */
85 NULL, /* void * p_guardaddr */
86 0, /* size_t p_guardsize */
87 &__pthread_initial_thread, /* pthread_descr p_self */
88 0, /* Always index 0 */
89 0, /* int p_report_events */
90 {{{0, }}, 0, NULL}, /* td_eventbuf_t p_eventbuf */
91 __ATOMIC_INITIALIZER, /* struct pthread_atomic p_resume_count */
92 0, /* char p_woken_by_cancel */
93 0, /* char p_condvar_avail */
94 0, /* char p_sem_avail */
95 NULL, /* struct pthread_extricate_if *p_extricate */
96 NULL, /* pthread_readlock_info *p_readlock_list; */
97 NULL, /* pthread_readlock_info *p_readlock_free; */
98 0 /* int p_untracked_readlock_count; */
99#ifdef __UCLIBC_HAS_XLOCALE__
100 ,
101 &__global_locale_data, /* __locale_t locale; */
102#endif /* __UCLIBC_HAS_XLOCALE__ */
103};
104
105/* Descriptor of the manager thread; none of this is used but the error
106 variables, the p_pid and p_priority fields,
107 and the address for identification. */
108#define manager_thread (&__pthread_manager_thread)
109struct _pthread_descr_struct __pthread_manager_thread = {
110 NULL, /* pthread_descr p_nextlive */
111 NULL, /* pthread_descr p_prevlive */
112 NULL, /* pthread_descr p_nextwaiting */
113 NULL, /* pthread_descr p_nextlock */
114 0, /* int p_tid */
115 0, /* int p_pid */
116 0, /* int p_priority */
117 &__pthread_handles[1].h_lock, /* struct _pthread_fastlock * p_lock */
118 0, /* int p_signal */
119 NULL, /* sigjmp_buf * p_signal_buf */
120 NULL, /* sigjmp_buf * p_cancel_buf */
121 0, /* char p_terminated */
122 0, /* char p_detached */
123 0, /* char p_exited */
124 NULL, /* void * p_retval */
125 0, /* int p_retval */
126 NULL, /* pthread_descr p_joining */
127 NULL, /* struct _pthread_cleanup_buffer * p_cleanup */
128 0, /* char p_cancelstate */
129 0, /* char p_canceltype */
130 0, /* char p_canceled */
131 &__pthread_manager_thread.p_errno, /* int *p_errnop */
132 0, /* int p_errno */
133 NULL, /* int *p_h_errnop */
134 0, /* int p_h_errno */
135 NULL, /* char * p_in_sighandler */
136 0, /* char p_sigwaiting */
137 PTHREAD_START_ARGS_INITIALIZER, /* struct pthread_start_args p_start_args */
138 {NULL}, /* void ** p_specific[PTHREAD_KEY_1STLEVEL_SIZE] */
139 {NULL}, /* void * p_libc_specific[_LIBC_TSD_KEY_N] */
140 0, /* int p_userstack */
141 NULL, /* void * p_guardaddr */
142 0, /* size_t p_guardsize */
143 &__pthread_manager_thread, /* pthread_descr p_self */
144 1, /* Always index 1 */
145 0, /* int p_report_events */
146 {{{0, }}, 0, NULL}, /* td_eventbuf_t p_eventbuf */
147 __ATOMIC_INITIALIZER, /* struct pthread_atomic p_resume_count */
148 0, /* char p_woken_by_cancel */
149 0, /* char p_condvar_avail */
150 0, /* char p_sem_avail */
151 NULL, /* struct pthread_extricate_if *p_extricate */
152 NULL, /* pthread_readlock_info *p_readlock_list; */
153 NULL, /* pthread_readlock_info *p_readlock_free; */
154 0 /* int p_untracked_readlock_count; */
155#ifdef __UCLIBC_HAS_XLOCALE__
156 ,
157 &__global_locale_data, /* __locale_t locale; */
158#endif /* __UCLIBC_HAS_XLOCALE__ */
159};
160
161/* Pointer to the main thread (the father of the thread manager thread) */
162/* Originally, this is the initial thread, but this changes after fork() */
163
164pthread_descr __pthread_main_thread = &__pthread_initial_thread;
165
166/* Limit between the stack of the initial thread (above) and the
167 stacks of other threads (below). Aligned on a STACK_SIZE boundary. */
168
169char *__pthread_initial_thread_bos = NULL;
170
171#ifndef __ARCH_USE_MMU__
172/* See nommu notes in internals.h and pthread_initialize() below. */
173char *__pthread_initial_thread_tos = NULL;
174char *__pthread_initial_thread_mid = NULL;
175#endif /* __ARCH_USE_MMU__ */
176
177/* File descriptor for sending requests to the thread manager. */
178/* Initially -1, meaning that the thread manager is not running. */
179
180int __pthread_manager_request = -1;
181
182/* Other end of the pipe for sending requests to the thread manager. */
183
184int __pthread_manager_reader;
185
186/* Limits of the thread manager stack */
187
188char *__pthread_manager_thread_bos = NULL;
189char *__pthread_manager_thread_tos = NULL;
190
191/* For process-wide exit() */
192
193int __pthread_exit_requested = 0;
194int __pthread_exit_code = 0;
195
196/* Communicate relevant LinuxThreads constants to gdb */
197
198const int __pthread_threads_max = PTHREAD_THREADS_MAX;
199const int __pthread_sizeof_handle = sizeof(struct pthread_handle_struct);
200const int __pthread_offsetof_descr = offsetof(struct pthread_handle_struct, h_descr);
201const int __pthread_offsetof_pid = offsetof(struct _pthread_descr_struct,
202 p_pid);
203const int __linuxthreads_pthread_sizeof_descr
204 = sizeof(struct _pthread_descr_struct);
205
206const int __linuxthreads_initial_report_events;
207
208const char __linuxthreads_version[] = VERSION;
209
210/* Forward declarations */
211static void pthread_onexit_process(int retcode, void *arg);
212static void pthread_handle_sigcancel(int sig);
213static void pthread_handle_sigrestart(int sig);
214static void pthread_handle_sigdebug(int sig);
215int __pthread_timedsuspend_new(pthread_descr self, const struct timespec *abstime);
216
217/* Signal numbers used for the communication.
218 In these variables we keep track of the used variables. If the
219 platform does not support any real-time signals we will define the
220 values to some unreasonable value which will signal failing of all
221 the functions below. */
222#ifndef __NR_rt_sigaction
223static int current_rtmin = -1;
224static int current_rtmax = -1;
225int __pthread_sig_restart = SIGUSR1;
226int __pthread_sig_cancel = SIGUSR2;
227int __pthread_sig_debug;
228#else
229
230#if __SIGRTMAX - __SIGRTMIN >= 3
231static int current_rtmin = __SIGRTMIN + 3;
232static int current_rtmax = __SIGRTMAX;
233int __pthread_sig_restart = __SIGRTMIN;
234int __pthread_sig_cancel = __SIGRTMIN + 1;
235int __pthread_sig_debug = __SIGRTMIN + 2;
236void (*__pthread_restart)(pthread_descr) = __pthread_restart_new;
237void (*__pthread_suspend)(pthread_descr) = __pthread_wait_for_restart_signal;
238int (*__pthread_timedsuspend)(pthread_descr, const struct timespec *) = __pthread_timedsuspend_new;
239#else
240static int current_rtmin = __SIGRTMIN;
241static int current_rtmax = __SIGRTMAX;
242int __pthread_sig_restart = SIGUSR1;
243int __pthread_sig_cancel = SIGUSR2;
244int __pthread_sig_debug;
245void (*__pthread_restart)(pthread_descr) = __pthread_restart_old;
246void (*__pthread_suspend)(pthread_descr) = __pthread_suspend_old;
247int (*__pthread_timedsuspend)(pthread_descr, const struct timespec *) = __pthread_timedsuspend_old;
248
249#endif
250
251/* Return number of available real-time signal with highest priority. */
252int __libc_current_sigrtmin (void)
253{
254 return current_rtmin;
255}
256
257/* Return number of available real-time signal with lowest priority. */
258int __libc_current_sigrtmax (void)
259{
260 return current_rtmax;
261}
262
263/* Allocate real-time signal with highest/lowest available
264 priority. Please note that we don't use a lock since we assume
265 this function to be called at program start. */
266int __libc_allocate_rtsig (int high);
267int __libc_allocate_rtsig (int high)
268{
269 if (current_rtmin == -1 || current_rtmin > current_rtmax)
270 /* We don't have anymore signal available. */
271 return -1;
272 return high ? current_rtmin++ : current_rtmax--;
273}
274#endif
275
276/* Initialize the pthread library.
277 Initialization is split in two functions:
278 - a constructor function that blocks the __pthread_sig_restart signal
279 (must do this very early, since the program could capture the signal
280 mask with e.g. sigsetjmp before creating the first thread);
281 - a regular function called from pthread_create when needed. */
282
283static void pthread_initialize(void) __attribute__((constructor));
284
285libpthread_hidden_proto(pthread_attr_destroy)
286libpthread_hidden_proto(pthread_attr_init)
287libpthread_hidden_proto(pthread_attr_getdetachstate)
288libpthread_hidden_proto(pthread_attr_setdetachstate)
289libpthread_hidden_proto(pthread_attr_getinheritsched)
290libpthread_hidden_proto(pthread_attr_setinheritsched)
291libpthread_hidden_proto(pthread_attr_setschedparam)
292libpthread_hidden_proto(pthread_attr_getschedparam)
293libpthread_hidden_proto(pthread_attr_getschedpolicy)
294libpthread_hidden_proto(pthread_attr_setschedpolicy)
295libpthread_hidden_proto(pthread_attr_getscope)
296libpthread_hidden_proto(pthread_attr_setscope)
297
298libpthread_hidden_proto(pthread_exit)
299
300libpthread_hidden_proto(pthread_equal)
301libpthread_hidden_proto(pthread_self)
302libpthread_hidden_proto(pthread_getschedparam)
303libpthread_hidden_proto(pthread_setschedparam)
304
305libpthread_hidden_proto(pthread_setcancelstate)
306libpthread_hidden_proto(pthread_setcanceltype)
307libpthread_hidden_proto(_pthread_cleanup_push_defer)
308libpthread_hidden_proto(_pthread_cleanup_pop_restore)
309
310libpthread_hidden_proto(pthread_cond_broadcast)
311libpthread_hidden_proto(pthread_cond_destroy)
312libpthread_hidden_proto(pthread_cond_init)
313libpthread_hidden_proto(pthread_cond_signal)
314libpthread_hidden_proto(pthread_cond_wait)
315libpthread_hidden_proto(pthread_cond_timedwait)
316
317libpthread_hidden_proto(pthread_condattr_destroy)
318libpthread_hidden_proto(pthread_condattr_init)
319
320struct pthread_functions __pthread_functions =
321 {
322#ifndef USE___THREAD
323 .ptr_pthread_internal_tsd_set = __pthread_internal_tsd_set,
324 .ptr_pthread_internal_tsd_get = __pthread_internal_tsd_get,
325 .ptr_pthread_internal_tsd_address = __pthread_internal_tsd_address,
326#endif
327/*
328 .ptr_pthread_fork = __pthread_fork,
329*/
330 .ptr_pthread_attr_destroy = pthread_attr_destroy,
331 .ptr_pthread_attr_init = pthread_attr_init,
332 .ptr_pthread_attr_getdetachstate = pthread_attr_getdetachstate,
333 .ptr_pthread_attr_setdetachstate = pthread_attr_setdetachstate,
334 .ptr_pthread_attr_getinheritsched = pthread_attr_getinheritsched,
335 .ptr_pthread_attr_setinheritsched = pthread_attr_setinheritsched,
336 .ptr_pthread_attr_getschedparam = pthread_attr_getschedparam,
337 .ptr_pthread_attr_setschedparam = pthread_attr_setschedparam,
338 .ptr_pthread_attr_getschedpolicy = pthread_attr_getschedpolicy,
339 .ptr_pthread_attr_setschedpolicy = pthread_attr_setschedpolicy,
340 .ptr_pthread_attr_getscope = pthread_attr_getscope,
341 .ptr_pthread_attr_setscope = pthread_attr_setscope,
342 .ptr_pthread_condattr_destroy = pthread_condattr_destroy,
343 .ptr_pthread_condattr_init = pthread_condattr_init,
344 .ptr_pthread_cond_broadcast = pthread_cond_broadcast,
345 .ptr_pthread_cond_destroy = pthread_cond_destroy,
346 .ptr_pthread_cond_init = pthread_cond_init,
347 .ptr_pthread_cond_signal = pthread_cond_signal,
348 .ptr_pthread_cond_wait = pthread_cond_wait,
349 .ptr_pthread_cond_timedwait = pthread_cond_timedwait,
350 .ptr_pthread_equal = pthread_equal,
351 .ptr___pthread_exit = pthread_exit,
352 .ptr_pthread_getschedparam = pthread_getschedparam,
353 .ptr_pthread_setschedparam = pthread_setschedparam,
354 .ptr_pthread_mutex_destroy = __pthread_mutex_destroy,
355 .ptr_pthread_mutex_init = __pthread_mutex_init,
356 .ptr_pthread_mutex_lock = __pthread_mutex_lock,
357 .ptr_pthread_mutex_trylock = __pthread_mutex_trylock,
358 .ptr_pthread_mutex_unlock = __pthread_mutex_unlock,
359 .ptr_pthread_self = pthread_self,
360 .ptr_pthread_setcancelstate = pthread_setcancelstate,
361 .ptr_pthread_setcanceltype = pthread_setcanceltype,
362/*
363 .ptr_pthread_do_exit = pthread_do_exit,
364 .ptr_pthread_thread_self = pthread_thread_self,
365 .ptr_pthread_cleanup_upto = pthread_cleanup_upto,
366 .ptr_pthread_sigaction = pthread_sigaction,
367 .ptr_pthread_sigwait = pthread_sigwait,
368 .ptr_pthread_raise = pthread_raise,
369 .ptr__pthread_cleanup_push = _pthread_cleanup_push,
370 .ptr__pthread_cleanup_pop = _pthread_cleanup_pop
371*/
372 .ptr__pthread_cleanup_push_defer = __pthread_cleanup_push_defer,
373 .ptr__pthread_cleanup_pop_restore = __pthread_cleanup_pop_restore,
374 };
375#ifdef SHARED
376# define ptr_pthread_functions &__pthread_functions
377#else
378# define ptr_pthread_functions NULL
379#endif
380
381static int *__libc_multiple_threads_ptr;
382
383 /* Do some minimal initialization which has to be done during the
384 startup of the C library. */
385void __pthread_initialize_minimal(void)
386{
387 /* If we have special thread_self processing, initialize
388 * that for the main thread now. */
389#ifdef INIT_THREAD_SELF
390 INIT_THREAD_SELF(&__pthread_initial_thread, 0);
391#endif
392
393 __libc_multiple_threads_ptr = __libc_pthread_init (ptr_pthread_functions);
394}
395
396
397static void pthread_initialize(void)
398{
399 struct sigaction sa;
400 sigset_t mask;
401#ifdef __ARCH_USE_MMU__
402 struct rlimit limit;
403 rlim_t max_stack;
404#endif
405
406 /* If already done (e.g. by a constructor called earlier!), bail out */
407 if (__pthread_initial_thread_bos != NULL) return;
408#ifdef TEST_FOR_COMPARE_AND_SWAP
409 /* Test if compare-and-swap is available */
410 __pthread_has_cas = compare_and_swap_is_available();
411#endif
412 /* For the initial stack, reserve at least STACK_SIZE bytes of stack
413 below the current stack address, and align that on a
414 STACK_SIZE boundary. */
415 __pthread_initial_thread_bos =
416 (char *)(((long)CURRENT_STACK_FRAME - 2 * STACK_SIZE) & ~(STACK_SIZE - 1));
417 /* Update the descriptor for the initial thread. */
418 __pthread_initial_thread.p_pid = getpid();
419 /* If we have special thread_self processing, initialize that for the
420 main thread now. */
421#ifdef INIT_THREAD_SELF
422 INIT_THREAD_SELF(&__pthread_initial_thread, 0);
423#endif
424 /* The errno/h_errno variable of the main thread are the global ones. */
425 __pthread_initial_thread.p_errnop = &_errno;
426 __pthread_initial_thread.p_h_errnop = &_h_errno;
427
428#ifdef __UCLIBC_HAS_XLOCALE__
429 /* The locale of the main thread is the current locale in use. */
430 __pthread_initial_thread.locale = __curlocale_var;
431#endif /* __UCLIBC_HAS_XLOCALE__ */
432
433 { /* uClibc-specific stdio initialization for threads. */
434 FILE *fp;
435
436 _stdio_user_locking = 0; /* 2 if threading not initialized */
437 for (fp = _stdio_openlist; fp != NULL; fp = fp->__nextopen) {
438 if (fp->__user_locking != 1) {
439 fp->__user_locking = 0;
440 }
441 }
442 }
443
444 /* Play with the stack size limit to make sure that no stack ever grows
445 beyond STACK_SIZE minus two pages (one page for the thread descriptor
446 immediately beyond, and one page to act as a guard page). */
447
448#ifdef __ARCH_USE_MMU__
449 /* We cannot allocate a huge chunk of memory to mmap all thread stacks later
450 * on a non-MMU system. Thus, we don't need the rlimit either. -StS */
451 getrlimit(RLIMIT_STACK, &limit);
452 max_stack = STACK_SIZE - 2 * getpagesize();
453 if (limit.rlim_cur > max_stack) {
454 limit.rlim_cur = max_stack;
455 setrlimit(RLIMIT_STACK, &limit);
456 }
457#else
458 /* For non-MMU, the initial thread stack can reside anywhere in memory.
459 * We don't have a way of knowing where the kernel started things -- top
460 * or bottom (well, that isn't exactly true, but the solution is fairly
461 * complex and error prone). All we can determine here is an address
462 * that lies within that stack. Save that address as a reference so that
463 * as other thread stacks are created, we can adjust the estimated bounds
464 * of the initial thread's stack appropriately.
465 *
466 * This checking is handled in NOMMU_INITIAL_THREAD_BOUNDS(), so see that
467 * for a few more details.
468 */
469 __pthread_initial_thread_mid = CURRENT_STACK_FRAME;
470 __pthread_initial_thread_tos = (char *) -1;
471 __pthread_initial_thread_bos = (char *) 1; /* set it non-zero so we know we have been here */
472 PDEBUG("initial thread stack bounds: bos=%p, tos=%p\n",
473 __pthread_initial_thread_bos, __pthread_initial_thread_tos);
474#endif /* __ARCH_USE_MMU__ */
475
476 /* Setup signal handlers for the initial thread.
477 Since signal handlers are shared between threads, these settings
478 will be inherited by all other threads. */
479 memset(&sa, 0, sizeof(sa));
480 sa.sa_handler = pthread_handle_sigrestart;
481 __libc_sigaction(__pthread_sig_restart, &sa, NULL);
482 sa.sa_handler = pthread_handle_sigcancel;
483 sigaddset(&sa.sa_mask, __pthread_sig_restart);
484 __libc_sigaction(__pthread_sig_cancel, &sa, NULL);
485 if (__pthread_sig_debug > 0) {
486 sa.sa_handler = pthread_handle_sigdebug;
487 __sigemptyset(&sa.sa_mask);
488 __libc_sigaction(__pthread_sig_debug, &sa, NULL);
489 }
490 /* Initially, block __pthread_sig_restart. Will be unblocked on demand. */
491 __sigemptyset(&mask);
492 sigaddset(&mask, __pthread_sig_restart);
493 sigprocmask(SIG_BLOCK, &mask, NULL);
494 /* And unblock __pthread_sig_cancel if it has been blocked. */
495 sigdelset(&mask, __pthread_sig_restart);
496 sigaddset(&mask, __pthread_sig_cancel);
497 sigprocmask(SIG_UNBLOCK, &mask, NULL);
498 /* Register an exit function to kill all other threads. */
499 /* Do it early so that user-registered atexit functions are called
500 before pthread_onexit_process. */
501 on_exit(pthread_onexit_process, NULL);
502}
503
504void __pthread_initialize(void);
505void __pthread_initialize(void)
506{
507 pthread_initialize();
508}
509
510int __pthread_initialize_manager(void)
511{
512 int manager_pipe[2];
513 int pid;
514 int report_events;
515 struct pthread_request request;
516
517 *__libc_multiple_threads_ptr = 1;
518
519 /* If basic initialization not done yet (e.g. we're called from a
520 constructor run before our constructor), do it now */
521 if (__pthread_initial_thread_bos == NULL) pthread_initialize();
522 /* Setup stack for thread manager */
523 __pthread_manager_thread_bos = malloc(THREAD_MANAGER_STACK_SIZE);
524 if (__pthread_manager_thread_bos == NULL) return -1;
525 __pthread_manager_thread_tos =
526 __pthread_manager_thread_bos + THREAD_MANAGER_STACK_SIZE;
527
528 /* On non-MMU systems we make sure that the initial thread bounds don't overlap
529 * with the manager stack frame */
530 NOMMU_INITIAL_THREAD_BOUNDS(__pthread_manager_thread_tos,__pthread_manager_thread_bos);
531 PDEBUG("manager stack: size=%d, bos=%p, tos=%p\n", THREAD_MANAGER_STACK_SIZE,
532 __pthread_manager_thread_bos, __pthread_manager_thread_tos);
533#if 0
534 PDEBUG("initial stack: estimate bos=%p, tos=%p\n",
535 __pthread_initial_thread_bos, __pthread_initial_thread_tos);
536#endif
537
538 /* Setup pipe to communicate with thread manager */
539 if (pipe(manager_pipe) == -1) {
540 free(__pthread_manager_thread_bos);
541 return -1;
542 }
543 /* Start the thread manager */
544 pid = 0;
545#if defined(USE_TLS) && USE_TLS
546 if (__linuxthreads_initial_report_events != 0)
547 THREAD_SETMEM (((pthread_descr) NULL), p_report_events,
548 __linuxthreads_initial_report_events);
549 report_events = THREAD_GETMEM (((pthread_descr) NULL), p_report_events);
550#else
551 if (__linuxthreads_initial_report_events != 0)
552 __pthread_initial_thread.p_report_events
553 = __linuxthreads_initial_report_events;
554 report_events = __pthread_initial_thread.p_report_events;
555#endif
556 if (__builtin_expect (report_events, 0))
557 {
558 /* It's a bit more complicated. We have to report the creation of
559 the manager thread. */
560 int idx = __td_eventword (TD_CREATE);
561 uint32_t mask = __td_eventmask (TD_CREATE);
562
563 if ((mask & (__pthread_threads_events.event_bits[idx]
564 | __pthread_initial_thread.p_eventbuf.eventmask.event_bits[idx]))
565 != 0)
566 {
567
568 __pthread_lock(__pthread_manager_thread.p_lock, NULL);
569
570#ifdef __ia64__
571 pid = __clone2(__pthread_manager_event,
572 (void **) __pthread_manager_thread_tos,
573 THREAD_MANAGER_STACK_SIZE,
574 CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND,
575 (void *)(long)manager_pipe[0]);
576#else
577 pid = clone(__pthread_manager_event,
578 (void **) __pthread_manager_thread_tos,
579 CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND,
580 (void *)(long)manager_pipe[0]);
581#endif
582
583 if (pid != -1)
584 {
585 /* Now fill in the information about the new thread in
586 the newly created thread's data structure. We cannot let
587 the new thread do this since we don't know whether it was
588 already scheduled when we send the event. */
589 __pthread_manager_thread.p_eventbuf.eventdata =
590 &__pthread_manager_thread;
591 __pthread_manager_thread.p_eventbuf.eventnum = TD_CREATE;
592 __pthread_last_event = &__pthread_manager_thread;
593 __pthread_manager_thread.p_tid = 2* PTHREAD_THREADS_MAX + 1;
594 __pthread_manager_thread.p_pid = pid;
595
596 /* Now call the function which signals the event. */
597 __linuxthreads_create_event ();
598 }
599 /* Now restart the thread. */
600 __pthread_unlock(__pthread_manager_thread.p_lock);
601 }
602 }
603
604 if (pid == 0) {
605#ifdef __ia64__
606 pid = __clone2(__pthread_manager, (void **) __pthread_manager_thread_tos,
607 THREAD_MANAGER_STACK_SIZE,
608 CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND,
609 (void *)(long)manager_pipe[0]);
610#else
611 pid = clone(__pthread_manager, (void **) __pthread_manager_thread_tos,
612 CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND,
613 (void *)(long)manager_pipe[0]);
614#endif
615 }
616 if (pid == -1) {
617 free(__pthread_manager_thread_bos);
618 close(manager_pipe[0]);
619 close(manager_pipe[1]);
620 return -1;
621 }
622 __pthread_manager_request = manager_pipe[1]; /* writing end */
623 __pthread_manager_reader = manager_pipe[0]; /* reading end */
624 __pthread_manager_thread.p_tid = 2* PTHREAD_THREADS_MAX + 1;
625 __pthread_manager_thread.p_pid = pid;
626
627 /* Make gdb aware of new thread manager */
628 if (__pthread_threads_debug && __pthread_sig_debug > 0)
629 {
630 raise(__pthread_sig_debug);
631 /* We suspend ourself and gdb will wake us up when it is
632 ready to handle us. */
633 __pthread_wait_for_restart_signal(thread_self());
634 }
635 /* Synchronize debugging of the thread manager */
636 PDEBUG("send REQ_DEBUG to manager thread\n");
637 request.req_kind = REQ_DEBUG;
638 TEMP_FAILURE_RETRY(write(__pthread_manager_request,
639 (char *) &request, sizeof(request)));
640 return 0;
641}
642
643/* Thread creation */
644
645int pthread_create(pthread_t *thread, const pthread_attr_t *attr,
646 void * (*start_routine)(void *), void *arg)
647{
648 pthread_descr self = thread_self();
649 struct pthread_request request;
650 if (__pthread_manager_request < 0) {
651 if (__pthread_initialize_manager() < 0) return EAGAIN;
652 }
653 request.req_thread = self;
654 request.req_kind = REQ_CREATE;
655 request.req_args.create.attr = attr;
656 request.req_args.create.fn = start_routine;
657 request.req_args.create.arg = arg;
658 sigprocmask(SIG_SETMASK, NULL, &request.req_args.create.mask);
659 PDEBUG("write REQ_CREATE to manager thread\n");
660 TEMP_FAILURE_RETRY(write(__pthread_manager_request,
661 (char *) &request, sizeof(request)));
662 PDEBUG("before suspend(self)\n");
663 suspend(self);
664 PDEBUG("after suspend(self)\n");
665 if (THREAD_GETMEM(self, p_retcode) == 0)
666 *thread = (pthread_t) THREAD_GETMEM(self, p_retval);
667 return THREAD_GETMEM(self, p_retcode);
668}
669
670/* Simple operations on thread identifiers */
671
672pthread_t pthread_self(void)
673{
674 pthread_descr self = thread_self();
675 return THREAD_GETMEM(self, p_tid);
676}
677libpthread_hidden_def (pthread_self)
678
679int pthread_equal(pthread_t thread1, pthread_t thread2)
680{
681 return thread1 == thread2;
682}
683libpthread_hidden_def (pthread_equal)
684
685/* Helper function for thread_self in the case of user-provided stacks */
686
687#ifndef THREAD_SELF
688
689pthread_descr __pthread_find_self(void)
690{
691 char * sp = CURRENT_STACK_FRAME;
692 pthread_handle h;
693
694 /* __pthread_handles[0] is the initial thread, __pthread_handles[1] is
695 the manager threads handled specially in thread_self(), so start at 2 */
696 h = __pthread_handles + 2;
697 while (! (sp <= (char *) h->h_descr && sp >= h->h_bottom)) h++;
698
699#ifdef DEBUG_PT
700 if (h->h_descr == NULL) {
701 printf("*** %s ERROR descriptor is NULL!!!!! ***\n\n", __FUNCTION__);
702 _exit(1);
703 }
704#endif
705
706 return h->h_descr;
707}
708#else
709
710static pthread_descr thread_self_stack(void)
711{
712 char *sp = CURRENT_STACK_FRAME;
713 pthread_handle h;
714
715 if (sp >= __pthread_manager_thread_bos && sp < __pthread_manager_thread_tos)
716 return manager_thread;
717 h = __pthread_handles + 2;
718# if defined(USE_TLS) && USE_TLS
719 while (h->h_descr == NULL
720 || ! (sp <= (char *) h->h_descr->p_stackaddr && sp >= h->h_bottom))
721 h++;
722# else
723 while (! (sp <= (char *) h->h_descr && sp >= h->h_bottom))
724 h++;
725# endif
726 return h->h_descr;
727}
728
729#endif
730
731/* Thread scheduling */
732
733int pthread_setschedparam(pthread_t thread, int policy,
734 const struct sched_param *param)
735{
736 pthread_handle handle = thread_handle(thread);
737 pthread_descr th;
738
739 __pthread_lock(&handle->h_lock, NULL);
740 if (invalid_handle(handle, thread)) {
741 __pthread_unlock(&handle->h_lock);
742 return ESRCH;
743 }
744 th = handle->h_descr;
745 if (sched_setscheduler(th->p_pid, policy, param) == -1) {
746 __pthread_unlock(&handle->h_lock);
747 return errno;
748 }
749 th->p_priority = policy == SCHED_OTHER ? 0 : param->sched_priority;
750 __pthread_unlock(&handle->h_lock);
751 if (__pthread_manager_request >= 0)
752 __pthread_manager_adjust_prio(th->p_priority);
753 return 0;
754}
755libpthread_hidden_def(pthread_setschedparam)
756
757int pthread_getschedparam(pthread_t thread, int *policy,
758 struct sched_param *param)
759{
760 pthread_handle handle = thread_handle(thread);
761 int pid, pol;
762
763 __pthread_lock(&handle->h_lock, NULL);
764 if (invalid_handle(handle, thread)) {
765 __pthread_unlock(&handle->h_lock);
766 return ESRCH;
767 }
768 pid = handle->h_descr->p_pid;
769 __pthread_unlock(&handle->h_lock);
770 pol = sched_getscheduler(pid);
771 if (pol == -1) return errno;
772 if (sched_getparam(pid, param) == -1) return errno;
773 *policy = pol;
774 return 0;
775}
776libpthread_hidden_def(pthread_getschedparam)
777
778/* Process-wide exit() request */
779
780static void pthread_onexit_process(int retcode, void *arg attribute_unused)
781{
782 struct pthread_request request;
783 pthread_descr self = thread_self();
784
785 if (__pthread_manager_request >= 0) {
786 request.req_thread = self;
787 request.req_kind = REQ_PROCESS_EXIT;
788 request.req_args.exit.code = retcode;
789 TEMP_FAILURE_RETRY(write(__pthread_manager_request,
790 (char *) &request, sizeof(request)));
791 suspend(self);
792 /* Main thread should accumulate times for thread manager and its
793 children, so that timings for main thread account for all threads. */
794 if (self == __pthread_main_thread) {
795 waitpid(__pthread_manager_thread.p_pid, NULL, __WCLONE);
796 /* Since all threads have been asynchronously terminated
797 * (possibly holding locks), free cannot be used any more. */
798 __pthread_manager_thread_bos = __pthread_manager_thread_tos = NULL;
799 }
800 }
801}
802
803/* The handler for the RESTART signal just records the signal received
804 in the thread descriptor, and optionally performs a siglongjmp
805 (for pthread_cond_timedwait). */
806
807static void pthread_handle_sigrestart(int sig)
808{
809 pthread_descr self = thread_self();
810 THREAD_SETMEM(self, p_signal, sig);
811 if (THREAD_GETMEM(self, p_signal_jmp) != NULL)
812 siglongjmp(*THREAD_GETMEM(self, p_signal_jmp), 1);
813}
814
815/* The handler for the CANCEL signal checks for cancellation
816 (in asynchronous mode), for process-wide exit and exec requests.
817 For the thread manager thread, redirect the signal to
818 __pthread_manager_sighandler. */
819
820static void pthread_handle_sigcancel(int sig)
821{
822 pthread_descr self = thread_self();
823 sigjmp_buf * jmpbuf;
824
825
826 if (self == &__pthread_manager_thread)
827 {
828#ifdef THREAD_SELF
829 /* A new thread might get a cancel signal before it is fully
830 initialized, so that the thread register might still point to the
831 manager thread. Double check that this is really the manager
832 thread. */
833 pthread_descr real_self = thread_self_stack();
834 if (real_self == &__pthread_manager_thread)
835 {
836 __pthread_manager_sighandler(sig);
837 return;
838 }
839 /* Oops, thread_self() isn't working yet.. */
840 self = real_self;
841# ifdef INIT_THREAD_SELF
842 INIT_THREAD_SELF(self, self->p_nr);
843# endif
844#else
845 __pthread_manager_sighandler(sig);
846 return;
847#endif
848 }
849 if (__builtin_expect (__pthread_exit_requested, 0)) {
850 /* Main thread should accumulate times for thread manager and its
851 children, so that timings for main thread account for all threads. */
852 if (self == __pthread_main_thread) {
853#if defined(USE_TLS) && USE_TLS
854 waitpid(__pthread_manager_thread->p_pid, NULL, __WCLONE);
855#else
856 waitpid(__pthread_manager_thread.p_pid, NULL, __WCLONE);
857#endif
858 }
859 _exit(__pthread_exit_code);
860 }
861 if (__builtin_expect (THREAD_GETMEM(self, p_canceled), 0)
862 && THREAD_GETMEM(self, p_cancelstate) == PTHREAD_CANCEL_ENABLE) {
863 if (THREAD_GETMEM(self, p_canceltype) == PTHREAD_CANCEL_ASYNCHRONOUS)
864 __pthread_do_exit(PTHREAD_CANCELED, CURRENT_STACK_FRAME);
865 jmpbuf = THREAD_GETMEM(self, p_cancel_jmp);
866 if (jmpbuf != NULL) {
867 THREAD_SETMEM(self, p_cancel_jmp, NULL);
868 siglongjmp(*jmpbuf, 1);
869 }
870 }
871}
872
873/* Handler for the DEBUG signal.
874 The debugging strategy is as follows:
875 On reception of a REQ_DEBUG request (sent by new threads created to
876 the thread manager under debugging mode), the thread manager throws
877 __pthread_sig_debug to itself. The debugger (if active) intercepts
878 this signal, takes into account new threads and continue execution
879 of the thread manager by propagating the signal because it doesn't
880 know what it is specifically done for. In the current implementation,
881 the thread manager simply discards it. */
882
883static void pthread_handle_sigdebug(int sig attribute_unused)
884{
885 /* Nothing */
886}
887
888/* Reset the state of the thread machinery after a fork().
889 Close the pipe used for requests and set the main thread to the forked
890 thread.
891 Notice that we can't free the stack segments, as the forked thread
892 may hold pointers into them. */
893
894void __pthread_reset_main_thread(void)
895{
896 pthread_descr self = thread_self();
897
898 if (__pthread_manager_request != -1) {
899 /* Free the thread manager stack */
900 free(__pthread_manager_thread_bos);
901 __pthread_manager_thread_bos = __pthread_manager_thread_tos = NULL;
902 /* Close the two ends of the pipe */
903 close(__pthread_manager_request);
904 close(__pthread_manager_reader);
905 __pthread_manager_request = __pthread_manager_reader = -1;
906 }
907
908 /* Update the pid of the main thread */
909 THREAD_SETMEM(self, p_pid, getpid());
910 /* Make the forked thread the main thread */
911 __pthread_main_thread = self;
912 THREAD_SETMEM(self, p_nextlive, self);
913 THREAD_SETMEM(self, p_prevlive, self);
914 /* Now this thread modifies the global variables. */
915 THREAD_SETMEM(self, p_errnop, &_errno);
916 THREAD_SETMEM(self, p_h_errnop, &_h_errno);
917}
918
919/* Process-wide exec() request */
920
921void __pthread_kill_other_threads_np(void)
922{
923 struct sigaction sa;
924 /* Terminate all other threads and thread manager */
925 pthread_onexit_process(0, NULL);
926 /* Make current thread the main thread in case the calling thread
927 changes its mind, does not exec(), and creates new threads instead. */
928 __pthread_reset_main_thread();
929 /* Reset the signal handlers behaviour for the signals the
930 implementation uses since this would be passed to the new
931 process. */
932 memset(&sa, 0, sizeof(sa));
933 if (SIG_DFL) /* if it's constant zero, it's already done */
934 sa.sa_handler = SIG_DFL;
935 __libc_sigaction(__pthread_sig_restart, &sa, NULL);
936 __libc_sigaction(__pthread_sig_cancel, &sa, NULL);
937 if (__pthread_sig_debug > 0)
938 __libc_sigaction(__pthread_sig_debug, &sa, NULL);
939}
940weak_alias (__pthread_kill_other_threads_np, pthread_kill_other_threads_np)
941
942/* Concurrency symbol level. */
943static int current_level;
944
945int __pthread_setconcurrency(int level)
946{
947 /* We don't do anything unless we have found a useful interpretation. */
948 current_level = level;
949 return 0;
950}
951weak_alias (__pthread_setconcurrency, pthread_setconcurrency)
952
953int __pthread_getconcurrency(void)
954{
955 return current_level;
956}
957weak_alias (__pthread_getconcurrency, pthread_getconcurrency)
958
959
960/* Primitives for controlling thread execution */
961
962void __pthread_wait_for_restart_signal(pthread_descr self)
963{
964 sigset_t mask;
965
966 sigprocmask(SIG_SETMASK, NULL, &mask); /* Get current signal mask */
967 sigdelset(&mask, __pthread_sig_restart); /* Unblock the restart signal */
968 THREAD_SETMEM(self, p_signal, 0);
969 do {
970 sigsuspend(&mask); /* Wait for signal */
971 } while (THREAD_GETMEM(self, p_signal) !=__pthread_sig_restart);
972
973 READ_MEMORY_BARRIER(); /* See comment in __pthread_restart_new */
974}
975
976#ifndef __NR_rt_sigaction
977/* The _old variants are for 2.0 and early 2.1 kernels which don't have RT
978 signals.
979 On these kernels, we use SIGUSR1 and SIGUSR2 for restart and cancellation.
980 Since the restart signal does not queue, we use an atomic counter to create
981 queuing semantics. This is needed to resolve a rare race condition in
982 pthread_cond_timedwait_relative. */
983
984void __pthread_restart_old(pthread_descr th)
985{
986 if (atomic_increment(&th->p_resume_count) == -1)
987 kill(th->p_pid, __pthread_sig_restart);
988}
989
990void __pthread_suspend_old(pthread_descr self)
991{
992 if (atomic_decrement(&self->p_resume_count) <= 0)
993 __pthread_wait_for_restart_signal(self);
994}
995
996int
997__pthread_timedsuspend_old(pthread_descr self, const struct timespec *abstime)
998{
999 sigset_t unblock, initial_mask;
1000 int was_signalled = 0;
1001 sigjmp_buf jmpbuf;
1002
1003 if (atomic_decrement(&self->p_resume_count) == 0) {
1004 /* Set up a longjmp handler for the restart signal, unblock
1005 the signal and sleep. */
1006
1007 if (sigsetjmp(jmpbuf, 1) == 0) {
1008 THREAD_SETMEM(self, p_signal_jmp, &jmpbuf);
1009 THREAD_SETMEM(self, p_signal, 0);
1010 /* Unblock the restart signal */
1011 __sigemptyset(&unblock);
1012 sigaddset(&unblock, __pthread_sig_restart);
1013 sigprocmask(SIG_UNBLOCK, &unblock, &initial_mask);
1014
1015 while (1) {
1016 struct timeval now;
1017 struct timespec reltime;
1018
1019 /* Compute a time offset relative to now. */
1020 gettimeofday (&now, NULL);
1021 reltime.tv_nsec = abstime->tv_nsec - now.tv_usec * 1000;
1022 reltime.tv_sec = abstime->tv_sec - now.tv_sec;
1023 if (reltime.tv_nsec < 0) {
1024 reltime.tv_nsec += 1000000000;
1025 reltime.tv_sec -= 1;
1026 }
1027
1028 /* Sleep for the required duration. If woken by a signal,
1029 resume waiting as required by Single Unix Specification. */
1030 if (reltime.tv_sec < 0 || nanosleep(&reltime, NULL) == 0)
1031 break;
1032 }
1033
1034 /* Block the restart signal again */
1035 sigprocmask(SIG_SETMASK, &initial_mask, NULL);
1036 was_signalled = 0;
1037 } else {
1038 was_signalled = 1;
1039 }
1040 THREAD_SETMEM(self, p_signal_jmp, NULL);
1041 }
1042
1043 /* Now was_signalled is true if we exited the above code
1044 due to the delivery of a restart signal. In that case,
1045 we know we have been dequeued and resumed and that the
1046 resume count is balanced. Otherwise, there are some
1047 cases to consider. First, try to bump up the resume count
1048 back to zero. If it goes to 1, it means restart() was
1049 invoked on this thread. The signal must be consumed
1050 and the count bumped down and everything is cool. We
1051 can return a 1 to the caller.
1052 Otherwise, no restart was delivered yet, so a potential
1053 race exists; we return a 0 to the caller which must deal
1054 with this race in an appropriate way; for example by
1055 atomically removing the thread from consideration for a
1056 wakeup---if such a thing fails, it means a restart is
1057 being delivered. */
1058
1059 if (!was_signalled) {
1060 if (atomic_increment(&self->p_resume_count) != -1) {
1061 __pthread_wait_for_restart_signal(self);
1062 atomic_decrement(&self->p_resume_count); /* should be zero now! */
1063 /* woke spontaneously and consumed restart signal */
1064 return 1;
1065 }
1066 /* woke spontaneously but did not consume restart---caller must resolve */
1067 return 0;
1068 }
1069 /* woken due to restart signal */
1070 return 1;
1071}
1072#endif /* __NR_rt_sigaction */
1073
1074
1075#ifdef __NR_rt_sigaction
1076void __pthread_restart_new(pthread_descr th)
1077{
1078 /* The barrier is proabably not needed, in which case it still documents
1079 our assumptions. The intent is to commit previous writes to shared
1080 memory so the woken thread will have a consistent view. Complementary
1081 read barriers are present to the suspend functions. */
1082 WRITE_MEMORY_BARRIER();
1083 kill(th->p_pid, __pthread_sig_restart);
1084}
1085
1086int __pthread_timedsuspend_new(pthread_descr self, const struct timespec *abstime)
1087{
1088 sigset_t unblock, initial_mask;
1089 int was_signalled = 0;
1090 sigjmp_buf jmpbuf;
1091
1092 if (sigsetjmp(jmpbuf, 1) == 0) {
1093 THREAD_SETMEM(self, p_signal_jmp, &jmpbuf);
1094 THREAD_SETMEM(self, p_signal, 0);
1095 /* Unblock the restart signal */
1096 __sigemptyset(&unblock);
1097 sigaddset(&unblock, __pthread_sig_restart);
1098 sigprocmask(SIG_UNBLOCK, &unblock, &initial_mask);
1099
1100 while (1) {
1101 struct timeval now;
1102 struct timespec reltime;
1103
1104 /* Compute a time offset relative to now. */
1105 gettimeofday (&now, NULL);
1106 reltime.tv_nsec = abstime->tv_nsec - now.tv_usec * 1000;
1107 reltime.tv_sec = abstime->tv_sec - now.tv_sec;
1108 if (reltime.tv_nsec < 0) {
1109 reltime.tv_nsec += 1000000000;
1110 reltime.tv_sec -= 1;
1111 }
1112
1113 /* Sleep for the required duration. If woken by a signal,
1114 resume waiting as required by Single Unix Specification. */
1115 if (reltime.tv_sec < 0 || nanosleep(&reltime, NULL) == 0)
1116 break;
1117 }
1118
1119 /* Block the restart signal again */
1120 sigprocmask(SIG_SETMASK, &initial_mask, NULL);
1121 was_signalled = 0;
1122 } else {
1123 was_signalled = 1;
1124 }
1125 THREAD_SETMEM(self, p_signal_jmp, NULL);
1126
1127 /* Now was_signalled is true if we exited the above code
1128 due to the delivery of a restart signal. In that case,
1129 everything is cool. We have been removed from whatever
1130 we were waiting on by the other thread, and consumed its signal.
1131
1132 Otherwise we this thread woke up spontaneously, or due to a signal other
1133 than restart. This is an ambiguous case that must be resolved by
1134 the caller; the thread is still eligible for a restart wakeup
1135 so there is a race. */
1136
1137 READ_MEMORY_BARRIER(); /* See comment in __pthread_restart_new */
1138 return was_signalled;
1139}
1140#endif
1141
1142/* Debugging aid */
1143
1144#ifdef DEBUG_PT
1145#include <stdarg.h>
1146
1147void __pthread_message(char * fmt, ...)
1148{
1149 char buffer[1024];
1150 va_list args;
1151 sprintf(buffer, "%05d : ", getpid());
1152 va_start(args, fmt);
1153 vsnprintf(buffer + 8, sizeof(buffer) - 8, fmt, args);
1154 va_end(args);
1155 TEMP_FAILURE_RETRY(write(2, buffer, strlen(buffer)));
1156}
1157
1158#endif
1159
1160
1161#ifndef __PIC__
1162/* We need a hook to force the cancellation wrappers to be linked in when
1163 static libpthread is used. */
1164extern const char __pthread_provide_wrappers;
1165static const char *const __pthread_require_wrappers =
1166 &__pthread_provide_wrappers;
1167#endif