yuezonghe | 824eb0c | 2024-06-27 02:32:26 -0700 | [diff] [blame^] | 1 | /* Linuxthreads - a simple clone()-based implementation of Posix */ |
| 2 | /* threads for Linux. */ |
| 3 | /* Copyright (C) 1996 Xavier Leroy (Xavier.Leroy@inria.fr) */ |
| 4 | /* */ |
| 5 | /* This program is free software; you can redistribute it and/or */ |
| 6 | /* modify it under the terms of the GNU Library General Public License */ |
| 7 | /* as published by the Free Software Foundation; either version 2 */ |
| 8 | /* of the License, or (at your option) any later version. */ |
| 9 | /* */ |
| 10 | /* This program is distributed in the hope that it will be useful, */ |
| 11 | /* but WITHOUT ANY WARRANTY; without even the implied warranty of */ |
| 12 | /* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */ |
| 13 | /* GNU Library General Public License for more details. */ |
| 14 | |
| 15 | #ifndef _INTERNALS_H |
| 16 | #define _INTERNALS_H 1 |
| 17 | |
| 18 | /* Internal data structures */ |
| 19 | |
| 20 | /* Includes */ |
| 21 | |
| 22 | #include <bits/libc-tsd.h> /* for _LIBC_TSD_KEY_N */ |
| 23 | #include <limits.h> |
| 24 | #include <setjmp.h> |
| 25 | #include <signal.h> |
| 26 | #include <unistd.h> |
| 27 | #include <bits/stackinfo.h> |
| 28 | #include <sys/types.h> |
| 29 | #include <sys/wait.h> |
| 30 | #include "pt-machine.h" |
| 31 | #include "semaphore.h" |
| 32 | #include "../linuxthreads.old_db/thread_dbP.h" |
| 33 | #ifdef __UCLIBC_HAS_XLOCALE__ |
| 34 | #include <bits/uClibc_locale.h> |
| 35 | #endif /* __UCLIBC_HAS_XLOCALE__ */ |
| 36 | |
| 37 | /* Use a funky version in a probably vein attempt at preventing gdb |
| 38 | * from dlopen()'ing glibc's libthread_db library... */ |
| 39 | #define VERSION __stringify(__UCLIBC_MAJOR__) "." __stringify(__UCLIBC_MINOR__) "." __stringify(__UCLIBC_SUBLEVEL__) |
| 40 | |
| 41 | #ifndef THREAD_GETMEM |
| 42 | # define THREAD_GETMEM(descr, member) descr->member |
| 43 | #endif |
| 44 | #ifndef THREAD_GETMEM_NC |
| 45 | # define THREAD_GETMEM_NC(descr, member) descr->member |
| 46 | #endif |
| 47 | #ifndef THREAD_SETMEM |
| 48 | # define THREAD_SETMEM(descr, member, value) descr->member = (value) |
| 49 | #endif |
| 50 | #ifndef THREAD_SETMEM_NC |
| 51 | # define THREAD_SETMEM_NC(descr, member, value) descr->member = (value) |
| 52 | #endif |
| 53 | |
| 54 | /* Arguments passed to thread creation routine */ |
| 55 | |
| 56 | struct pthread_start_args { |
| 57 | void * (*start_routine)(void *); /* function to run */ |
| 58 | void * arg; /* its argument */ |
| 59 | sigset_t mask; /* initial signal mask for thread */ |
| 60 | int schedpolicy; /* initial scheduling policy (if any) */ |
| 61 | struct sched_param schedparam; /* initial scheduling parameters (if any) */ |
| 62 | }; |
| 63 | |
| 64 | |
| 65 | /* We keep thread specific data in a special data structure, a two-level |
| 66 | array. The top-level array contains pointers to dynamically allocated |
| 67 | arrays of a certain number of data pointers. So we can implement a |
| 68 | sparse array. Each dynamic second-level array has |
| 69 | PTHREAD_KEY_2NDLEVEL_SIZE |
| 70 | entries. This value shouldn't be too large. */ |
| 71 | #define PTHREAD_KEY_2NDLEVEL_SIZE 32 |
| 72 | |
| 73 | /* We need to address PTHREAD_KEYS_MAX key with PTHREAD_KEY_2NDLEVEL_SIZE |
| 74 | keys in each subarray. */ |
| 75 | #define PTHREAD_KEY_1STLEVEL_SIZE \ |
| 76 | ((PTHREAD_KEYS_MAX + PTHREAD_KEY_2NDLEVEL_SIZE - 1) \ |
| 77 | / PTHREAD_KEY_2NDLEVEL_SIZE) |
| 78 | |
| 79 | typedef void (*destr_function)(void *); |
| 80 | |
| 81 | struct pthread_key_struct { |
| 82 | int in_use; /* already allocated? */ |
| 83 | destr_function destr; /* destruction routine */ |
| 84 | }; |
| 85 | |
| 86 | |
| 87 | #define PTHREAD_START_ARGS_INITIALIZER { NULL, NULL, {{0, }}, 0, { 0 } } |
| 88 | |
| 89 | /* The type of thread descriptors */ |
| 90 | |
| 91 | typedef struct _pthread_descr_struct * pthread_descr; |
| 92 | |
| 93 | /* Callback interface for removing the thread from waiting on an |
| 94 | object if it is cancelled while waiting or about to wait. |
| 95 | This hold a pointer to the object, and a pointer to a function |
| 96 | which ``extricates'' the thread from its enqueued state. |
| 97 | The function takes two arguments: pointer to the wait object, |
| 98 | and a pointer to the thread. It returns 1 if an extrication |
| 99 | actually occured, and hence the thread must also be signalled. |
| 100 | It returns 0 if the thread had already been extricated. */ |
| 101 | |
| 102 | typedef struct _pthread_extricate_struct { |
| 103 | void *pu_object; |
| 104 | int (*pu_extricate_func)(void *, pthread_descr); |
| 105 | } pthread_extricate_if; |
| 106 | |
| 107 | /* Atomic counter made possible by compare_and_swap */ |
| 108 | |
| 109 | struct pthread_atomic { |
| 110 | long p_count; |
| 111 | int p_spinlock; |
| 112 | }; |
| 113 | |
| 114 | /* Context info for read write locks. The pthread_rwlock_info structure |
| 115 | is information about a lock that has been read-locked by the thread |
| 116 | in whose list this structure appears. The pthread_rwlock_context |
| 117 | is embedded in the thread context and contains a pointer to the |
| 118 | head of the list of lock info structures, as well as a count of |
| 119 | read locks that are untracked, because no info structure could be |
| 120 | allocated for them. */ |
| 121 | |
| 122 | struct _pthread_rwlock_t; |
| 123 | |
| 124 | typedef struct _pthread_rwlock_info { |
| 125 | struct _pthread_rwlock_info *pr_next; |
| 126 | struct _pthread_rwlock_t *pr_lock; |
| 127 | int pr_lock_count; |
| 128 | } pthread_readlock_info; |
| 129 | |
| 130 | struct _pthread_descr_struct { |
| 131 | pthread_descr p_nextlive, p_prevlive; |
| 132 | /* Double chaining of active threads */ |
| 133 | pthread_descr p_nextwaiting; /* Next element in the queue holding the thr */ |
| 134 | pthread_descr p_nextlock; /* can be on a queue and waiting on a lock */ |
| 135 | pthread_t p_tid; /* Thread identifier */ |
| 136 | int p_pid; /* PID of Unix process */ |
| 137 | int p_priority; /* Thread priority (== 0 if not realtime) */ |
| 138 | struct _pthread_fastlock * p_lock; /* Spinlock for synchronized accesses */ |
| 139 | int p_signal; /* last signal received */ |
| 140 | sigjmp_buf * p_signal_jmp; /* where to siglongjmp on a signal or NULL */ |
| 141 | sigjmp_buf * p_cancel_jmp; /* where to siglongjmp on a cancel or NULL */ |
| 142 | char p_terminated; /* true if terminated e.g. by pthread_exit */ |
| 143 | char p_detached; /* true if detached */ |
| 144 | char p_exited; /* true if the assoc. process terminated */ |
| 145 | void * p_retval; /* placeholder for return value */ |
| 146 | int p_retcode; /* placeholder for return code */ |
| 147 | pthread_descr p_joining; /* thread joining on that thread or NULL */ |
| 148 | struct _pthread_cleanup_buffer * p_cleanup; /* cleanup functions */ |
| 149 | char p_cancelstate; /* cancellation state */ |
| 150 | char p_canceltype; /* cancellation type (deferred/async) */ |
| 151 | char p_canceled; /* cancellation request pending */ |
| 152 | int * p_errnop; /* pointer to used errno variable */ |
| 153 | int p_errno; /* error returned by last system call */ |
| 154 | int * p_h_errnop; /* pointer to used h_errno variable */ |
| 155 | int p_h_errno; /* error returned by last netdb function */ |
| 156 | char * p_in_sighandler; /* stack address of sighandler, or NULL */ |
| 157 | char p_sigwaiting; /* true if a sigwait() is in progress */ |
| 158 | struct pthread_start_args p_start_args; /* arguments for thread creation */ |
| 159 | void ** p_specific[PTHREAD_KEY_1STLEVEL_SIZE]; /* thread-specific data */ |
| 160 | void * p_libc_specific[_LIBC_TSD_KEY_N]; /* thread-specific data for libc */ |
| 161 | int p_userstack; /* nonzero if the user provided the stack */ |
| 162 | void *p_guardaddr; /* address of guard area or NULL */ |
| 163 | size_t p_guardsize; /* size of guard area */ |
| 164 | pthread_descr p_self; /* Pointer to this structure */ |
| 165 | int p_nr; /* Index of descriptor in __pthread_handles */ |
| 166 | int p_report_events; /* Nonzero if events must be reported. */ |
| 167 | td_eventbuf_t p_eventbuf; /* Data for event. */ |
| 168 | struct pthread_atomic p_resume_count; /* number of times restart() was |
| 169 | called on thread */ |
| 170 | char p_woken_by_cancel; /* cancellation performed wakeup */ |
| 171 | char p_condvar_avail; /* flag if conditional variable became avail */ |
| 172 | char p_sem_avail; /* flag if semaphore became available */ |
| 173 | pthread_extricate_if *p_extricate; /* See above */ |
| 174 | pthread_readlock_info *p_readlock_list; /* List of readlock info structs */ |
| 175 | pthread_readlock_info *p_readlock_free; /* Free list of structs */ |
| 176 | int p_untracked_readlock_count; /* Readlocks not tracked by list */ |
| 177 | /* New elements must be added at the end. */ |
| 178 | #ifdef __UCLIBC_HAS_XLOCALE__ |
| 179 | __locale_t locale; /* thread-specific locale from uselocale() only! */ |
| 180 | #endif /* __UCLIBC_HAS_XLOCALE__ */ |
| 181 | } __attribute__ ((aligned(32))); /* We need to align the structure so that |
| 182 | doubles are aligned properly. This is 8 |
| 183 | bytes on MIPS and 16 bytes on MIPS64. |
| 184 | 32 bytes might give better cache |
| 185 | utilization. */ |
| 186 | |
| 187 | /* The type of thread handles. */ |
| 188 | |
| 189 | typedef struct pthread_handle_struct * pthread_handle; |
| 190 | |
| 191 | struct pthread_handle_struct { |
| 192 | struct _pthread_fastlock h_lock; /* Fast lock for sychronized access */ |
| 193 | pthread_descr h_descr; /* Thread descriptor or NULL if invalid */ |
| 194 | char * h_bottom; /* Lowest address in the stack thread */ |
| 195 | }; |
| 196 | |
| 197 | /* The type of messages sent to the thread manager thread */ |
| 198 | |
| 199 | struct pthread_request { |
| 200 | pthread_descr req_thread; /* Thread doing the request */ |
| 201 | enum { /* Request kind */ |
| 202 | REQ_CREATE, REQ_FREE, REQ_PROCESS_EXIT, REQ_MAIN_THREAD_EXIT, |
| 203 | REQ_POST, REQ_DEBUG, REQ_KICK |
| 204 | } req_kind; |
| 205 | union { /* Arguments for request */ |
| 206 | struct { /* For REQ_CREATE: */ |
| 207 | const pthread_attr_t * attr; /* thread attributes */ |
| 208 | void * (*fn)(void *); /* start function */ |
| 209 | void * arg; /* argument to start function */ |
| 210 | sigset_t mask; /* signal mask */ |
| 211 | } create; |
| 212 | struct { /* For REQ_FREE: */ |
| 213 | pthread_t thread_id; /* identifier of thread to free */ |
| 214 | } free; |
| 215 | struct { /* For REQ_PROCESS_EXIT: */ |
| 216 | int code; /* exit status */ |
| 217 | } exit; |
| 218 | void * post; /* For REQ_POST: the semaphore */ |
| 219 | } req_args; |
| 220 | }; |
| 221 | |
| 222 | |
| 223 | /* Signals used for suspend/restart and for cancellation notification. */ |
| 224 | |
| 225 | extern int __pthread_sig_restart; |
| 226 | extern int __pthread_sig_cancel; |
| 227 | |
| 228 | /* Signal used for interfacing with gdb */ |
| 229 | |
| 230 | extern int __pthread_sig_debug; |
| 231 | |
| 232 | /* Global array of thread handles, used for validating a thread id |
| 233 | and retrieving the corresponding thread descriptor. Also used for |
| 234 | mapping the available stack segments. */ |
| 235 | |
| 236 | extern struct pthread_handle_struct __pthread_handles[PTHREAD_THREADS_MAX]; |
| 237 | |
| 238 | /* Descriptor of the initial thread */ |
| 239 | |
| 240 | extern struct _pthread_descr_struct __pthread_initial_thread; |
| 241 | |
| 242 | /* Descriptor of the manager thread */ |
| 243 | |
| 244 | extern struct _pthread_descr_struct __pthread_manager_thread; |
| 245 | |
| 246 | /* Descriptor of the main thread */ |
| 247 | |
| 248 | extern pthread_descr __pthread_main_thread; |
| 249 | |
| 250 | /* Limit between the stack of the initial thread (above) and the |
| 251 | stacks of other threads (below). Aligned on a STACK_SIZE boundary. |
| 252 | Initially 0, meaning that the current thread is (by definition) |
| 253 | the initial thread. */ |
| 254 | |
| 255 | extern char *__pthread_initial_thread_bos; |
| 256 | #ifndef __ARCH_USE_MMU__ |
| 257 | /* For non-MMU systems, we have no idea the bounds of the initial thread |
| 258 | * stack, so we have to track it on the fly relative to other stacks. Do |
| 259 | * so by scaling back our assumptions on the limits of the bos/tos relative |
| 260 | * to the known mid point. See also the comments in pthread_initialize(). */ |
| 261 | extern char *__pthread_initial_thread_tos, *__pthread_initial_thread_mid; |
| 262 | #define NOMMU_INITIAL_THREAD_BOUNDS(tos,bos) \ |
| 263 | do { \ |
| 264 | char *__tos = (tos); \ |
| 265 | char *__bos = (bos); \ |
| 266 | if (__tos >= __pthread_initial_thread_bos && \ |
| 267 | __bos < __pthread_initial_thread_tos) { \ |
| 268 | if (__bos < __pthread_initial_thread_mid) \ |
| 269 | __pthread_initial_thread_bos = __tos; \ |
| 270 | else \ |
| 271 | __pthread_initial_thread_tos = __bos; \ |
| 272 | } \ |
| 273 | } while (0) |
| 274 | #else |
| 275 | #define NOMMU_INITIAL_THREAD_BOUNDS(tos,bos) /* empty */ |
| 276 | #endif /* __ARCH_USE_MMU__ */ |
| 277 | |
| 278 | |
| 279 | /* Indicate whether at least one thread has a user-defined stack (if 1), |
| 280 | or all threads have stacks supplied by LinuxThreads (if 0). */ |
| 281 | |
| 282 | extern int __pthread_nonstandard_stacks; |
| 283 | |
| 284 | /* File descriptor for sending requests to the thread manager. |
| 285 | Initially -1, meaning that __pthread_initialize_manager must be called. */ |
| 286 | |
| 287 | extern int __pthread_manager_request; |
| 288 | |
| 289 | /* Other end of the pipe for sending requests to the thread manager. */ |
| 290 | |
| 291 | extern int __pthread_manager_reader; |
| 292 | |
| 293 | /* Limits of the thread manager stack. */ |
| 294 | |
| 295 | extern char *__pthread_manager_thread_bos; |
| 296 | extern char *__pthread_manager_thread_tos; |
| 297 | |
| 298 | /* Pending request for a process-wide exit */ |
| 299 | |
| 300 | extern int __pthread_exit_requested, __pthread_exit_code; |
| 301 | |
| 302 | /* Set to 1 by gdb if we're debugging */ |
| 303 | |
| 304 | extern volatile int __pthread_threads_debug; |
| 305 | |
| 306 | /* Globally enabled events. */ |
| 307 | extern volatile td_thr_events_t __pthread_threads_events; |
| 308 | |
| 309 | /* Pointer to descriptor of thread with last event. */ |
| 310 | extern volatile pthread_descr __pthread_last_event; |
| 311 | |
| 312 | /* Return the handle corresponding to a thread id */ |
| 313 | |
| 314 | static __inline__ pthread_handle thread_handle(pthread_t id) |
| 315 | { |
| 316 | return &__pthread_handles[id % PTHREAD_THREADS_MAX]; |
| 317 | } |
| 318 | |
| 319 | /* Validate a thread handle. Must have acquired h->h_spinlock before. */ |
| 320 | |
| 321 | static __inline__ int invalid_handle(pthread_handle h, pthread_t id) |
| 322 | { |
| 323 | return h->h_descr == NULL || h->h_descr->p_tid != id; |
| 324 | } |
| 325 | |
| 326 | /* Fill in defaults left unspecified by pt-machine.h. */ |
| 327 | |
| 328 | /* The page size we can get from the system. This should likely not be |
| 329 | changed by the machine file but, you never know. */ |
| 330 | extern size_t __pagesize; |
| 331 | #include <bits/uClibc_page.h> |
| 332 | #ifndef PAGE_SIZE |
| 333 | #define PAGE_SIZE (sysconf (_SC_PAGESIZE)) |
| 334 | #endif |
| 335 | |
| 336 | /* The max size of the thread stack segments. If the default |
| 337 | THREAD_SELF implementation is used, this must be a power of two and |
| 338 | a multiple of PAGE_SIZE. */ |
| 339 | #ifndef STACK_SIZE |
| 340 | #ifdef __ARCH_USE_MMU__ |
| 341 | #define STACK_SIZE (2 * 1024 * 1024) |
| 342 | #else |
| 343 | #define STACK_SIZE (4 * __pagesize) |
| 344 | #endif |
| 345 | #endif |
| 346 | |
| 347 | /* The initial size of the thread stack. Must be a multiple of PAGE_SIZE. */ |
| 348 | #ifndef INITIAL_STACK_SIZE |
| 349 | #define INITIAL_STACK_SIZE (4 * __pagesize) |
| 350 | #endif |
| 351 | |
| 352 | /* Size of the thread manager stack. The "- 32" avoids wasting space |
| 353 | with some malloc() implementations. */ |
| 354 | #ifndef THREAD_MANAGER_STACK_SIZE |
| 355 | #define THREAD_MANAGER_STACK_SIZE (2 * __pagesize - 32) |
| 356 | #endif |
| 357 | |
| 358 | /* The base of the "array" of thread stacks. The array will grow down from |
| 359 | here. Defaults to the calculated bottom of the initial application |
| 360 | stack. */ |
| 361 | #ifndef THREAD_STACK_START_ADDRESS |
| 362 | #define THREAD_STACK_START_ADDRESS __pthread_initial_thread_bos |
| 363 | #endif |
| 364 | |
| 365 | /* Get some notion of the current stack. Need not be exactly the top |
| 366 | of the stack, just something somewhere in the current frame. */ |
| 367 | #ifndef CURRENT_STACK_FRAME |
| 368 | #define CURRENT_STACK_FRAME ({ char __csf; &__csf; }) |
| 369 | #endif |
| 370 | |
| 371 | /* If MEMORY_BARRIER isn't defined in pt-machine.h, assume the |
| 372 | architecture doesn't need a memory barrier instruction (e.g. Intel |
| 373 | x86). Still we need the compiler to respect the barrier and emit |
| 374 | all outstanding operations which modify memory. Some architectures |
| 375 | distinguish between full, read and write barriers. */ |
| 376 | #ifndef MEMORY_BARRIER |
| 377 | #define MEMORY_BARRIER() __asm__ ("" : : : "memory") |
| 378 | #endif |
| 379 | #ifndef READ_MEMORY_BARRIER |
| 380 | #define READ_MEMORY_BARRIER() MEMORY_BARRIER() |
| 381 | #endif |
| 382 | #ifndef WRITE_MEMORY_BARRIER |
| 383 | #define WRITE_MEMORY_BARRIER() MEMORY_BARRIER() |
| 384 | #endif |
| 385 | |
| 386 | /* Recover thread descriptor for the current thread */ |
| 387 | |
| 388 | extern pthread_descr __pthread_find_self (void) __attribute__ ((const)); |
| 389 | |
| 390 | static __inline__ pthread_descr thread_self (void) __attribute__ ((const)); |
| 391 | static __inline__ pthread_descr thread_self (void) |
| 392 | { |
| 393 | #ifdef THREAD_SELF |
| 394 | return THREAD_SELF; |
| 395 | #else |
| 396 | char *sp = CURRENT_STACK_FRAME; |
| 397 | #ifdef __ARCH_USE_MMU__ |
| 398 | if (sp >= __pthread_initial_thread_bos) |
| 399 | return &__pthread_initial_thread; |
| 400 | else if (sp >= __pthread_manager_thread_bos |
| 401 | && sp < __pthread_manager_thread_tos) |
| 402 | return &__pthread_manager_thread; |
| 403 | else if (__pthread_nonstandard_stacks) |
| 404 | return __pthread_find_self(); |
| 405 | else |
| 406 | return (pthread_descr)(((unsigned long)sp | (STACK_SIZE-1))+1) - 1; |
| 407 | #else |
| 408 | /* For non-MMU we need to be more careful about the initial thread stack. |
| 409 | * We refine the initial thread stack bounds dynamically as we allocate |
| 410 | * the other stack frame such that it doesn't overlap with them. Then |
| 411 | * we can be sure to pick the right thread according to the current SP */ |
| 412 | |
| 413 | /* Since we allow other stack frames to be above or below, we need to |
| 414 | * treat this case special. When pthread_initialize() wasn't called yet, |
| 415 | * only the initial thread is there. */ |
| 416 | if (__pthread_initial_thread_bos == NULL) { |
| 417 | return &__pthread_initial_thread; |
| 418 | } |
| 419 | else if (sp >= __pthread_initial_thread_bos |
| 420 | && sp < __pthread_initial_thread_tos) { |
| 421 | return &__pthread_initial_thread; |
| 422 | } |
| 423 | else if (sp >= __pthread_manager_thread_bos |
| 424 | && sp < __pthread_manager_thread_tos) { |
| 425 | return &__pthread_manager_thread; |
| 426 | } |
| 427 | else { |
| 428 | return __pthread_find_self(); |
| 429 | } |
| 430 | #endif /* __ARCH_USE_MMU__ */ |
| 431 | #endif |
| 432 | } |
| 433 | |
| 434 | /* Max number of times we must spin on a spinlock calling sched_yield(). |
| 435 | After MAX_SPIN_COUNT iterations, we put the calling thread to sleep. */ |
| 436 | |
| 437 | #ifndef MAX_SPIN_COUNT |
| 438 | #define MAX_SPIN_COUNT 50 |
| 439 | #endif |
| 440 | |
| 441 | /* Duration of sleep (in nanoseconds) when we can't acquire a spinlock |
| 442 | after MAX_SPIN_COUNT iterations of sched_yield(). |
| 443 | With the 2.0 and 2.1 kernels, this MUST BE > 2ms. |
| 444 | (Otherwise the kernel does busy-waiting for realtime threads, |
| 445 | giving other threads no chance to run.) */ |
| 446 | |
| 447 | #ifndef SPIN_SLEEP_DURATION |
| 448 | #define SPIN_SLEEP_DURATION 2000001 |
| 449 | #endif |
| 450 | |
| 451 | /* Defined and used in libc.so. */ |
| 452 | extern int __libc_multiple_threads attribute_hidden; |
| 453 | extern int __librt_multiple_threads; |
| 454 | |
| 455 | /* Internal global functions */ |
| 456 | |
| 457 | void __pthread_do_exit (void *retval, char *currentframe) |
| 458 | __attribute__ ((__noreturn__)); |
| 459 | void __pthread_destroy_specifics(void); |
| 460 | void __pthread_perform_cleanup(char *currentframe); |
| 461 | int __pthread_initialize_manager(void); |
| 462 | void __pthread_message(char * fmt, ...) |
| 463 | __attribute__ ((__format__ (printf, 1, 2))); |
| 464 | int __pthread_manager(void *reqfd); |
| 465 | int __pthread_manager_event(void *reqfd); |
| 466 | void __pthread_manager_sighandler(int sig); |
| 467 | void __pthread_reset_main_thread(void); |
| 468 | void __fresetlockfiles(void); |
| 469 | void __pthread_manager_adjust_prio(int thread_prio); |
| 470 | void __pthread_initialize_minimal (void); |
| 471 | |
| 472 | extern void __pthread_exit (void *retval) |
| 473 | #if defined NOT_IN_libc && defined IS_IN_libpthread |
| 474 | attribute_noreturn |
| 475 | #endif |
| 476 | ; |
| 477 | |
| 478 | extern int __pthread_attr_setguardsize __P ((pthread_attr_t *__attr, |
| 479 | size_t __guardsize)); |
| 480 | extern int __pthread_attr_getguardsize __P ((__const pthread_attr_t *__attr, |
| 481 | size_t *__guardsize)); |
| 482 | extern int __pthread_attr_setstackaddr __P ((pthread_attr_t *__attr, |
| 483 | void *__stackaddr)); |
| 484 | extern int __pthread_attr_getstackaddr __P ((__const pthread_attr_t *__attr, |
| 485 | void **__stackaddr)); |
| 486 | extern int __pthread_attr_setstacksize __P ((pthread_attr_t *__attr, |
| 487 | size_t __stacksize)); |
| 488 | extern int __pthread_attr_getstacksize __P ((__const pthread_attr_t *__attr, |
| 489 | size_t *__stacksize)); |
| 490 | extern int __pthread_getconcurrency __P ((void)); |
| 491 | extern int __pthread_setconcurrency __P ((int __level)); |
| 492 | extern void __pthread_kill_other_threads_np __P ((void)); |
| 493 | |
| 494 | extern void __pthread_restart_old(pthread_descr th); |
| 495 | extern void __pthread_suspend_old(pthread_descr self); |
| 496 | extern int __pthread_timedsuspend_old(pthread_descr self, const struct timespec *abstime); |
| 497 | |
| 498 | extern void __pthread_restart_new(pthread_descr th); |
| 499 | extern void __pthread_suspend_new(pthread_descr self); |
| 500 | extern int __pthread_timedsuspend_new(pthread_descr self, const struct timespec *abstime); |
| 501 | |
| 502 | extern void __pthread_wait_for_restart_signal(pthread_descr self); |
| 503 | |
| 504 | /* Global pointers to old or new suspend functions */ |
| 505 | |
| 506 | extern void (*__pthread_restart)(pthread_descr); |
| 507 | extern void (*__pthread_suspend)(pthread_descr); |
| 508 | |
| 509 | #if defined NOT_IN_libc && defined IS_IN_libpthread |
| 510 | extern __typeof(pthread_mutex_init) __pthread_mutex_init attribute_hidden; |
| 511 | extern __typeof(pthread_mutex_destroy) __pthread_mutex_destroy attribute_hidden; |
| 512 | extern __typeof(pthread_mutex_lock) __pthread_mutex_lock attribute_hidden; |
| 513 | extern __typeof(pthread_mutex_trylock) __pthread_mutex_trylock attribute_hidden; |
| 514 | extern __typeof(pthread_mutex_unlock) __pthread_mutex_unlock attribute_hidden; |
| 515 | #endif |
| 516 | |
| 517 | /* Prototypes for some of the new semaphore functions. */ |
| 518 | extern int __new_sem_post (sem_t * sem); |
| 519 | |
| 520 | /* TSD. */ |
| 521 | extern int __pthread_internal_tsd_set (int key, const void * pointer); |
| 522 | extern void * __pthread_internal_tsd_get (int key); |
| 523 | extern void ** __attribute__ ((__const__)) |
| 524 | __pthread_internal_tsd_address (int key); |
| 525 | |
| 526 | /* The functions called the signal events. */ |
| 527 | extern void __linuxthreads_create_event (void); |
| 528 | extern void __linuxthreads_death_event (void); |
| 529 | extern void __linuxthreads_reap_event (void); |
| 530 | |
| 531 | #include <pthread-functions.h> |
| 532 | |
| 533 | extern int * __libc_pthread_init (const struct pthread_functions *functions); |
| 534 | |
| 535 | #endif /* internals.h */ |