| xf.li | bdd93d5 | 2023-05-12 07:10:14 -0700 | [diff] [blame] | 1 | /* Copyright (C) 2002-2016 Free Software Foundation, Inc. | 
|  | 2 | This file is part of the GNU C Library. | 
|  | 3 | Contributed by Ulrich Drepper <drepper@redhat.com>, 2002. | 
|  | 4 |  | 
|  | 5 | The GNU C Library is free software; you can redistribute it and/or | 
|  | 6 | modify it under the terms of the GNU Lesser General Public | 
|  | 7 | License as published by the Free Software Foundation; either | 
|  | 8 | version 2.1 of the License, or (at your option) any later version. | 
|  | 9 |  | 
|  | 10 | The GNU C Library is distributed in the hope that it will be useful, | 
|  | 11 | but WITHOUT ANY WARRANTY; without even the implied warranty of | 
|  | 12 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU | 
|  | 13 | Lesser General Public License for more details. | 
|  | 14 |  | 
|  | 15 | You should have received a copy of the GNU Lesser General Public | 
|  | 16 | License along with the GNU C Library; if not, see | 
|  | 17 | <http://www.gnu.org/licenses/>.  */ | 
|  | 18 |  | 
|  | 19 | #include <ctype.h> | 
|  | 20 | #include <errno.h> | 
|  | 21 | #include <stdbool.h> | 
|  | 22 | #include <stdlib.h> | 
|  | 23 | #include <string.h> | 
|  | 24 | #include <stdint.h> | 
|  | 25 | #include "pthreadP.h" | 
|  | 26 | #include <hp-timing.h> | 
|  | 27 | #include <ldsodefs.h> | 
|  | 28 | #include <atomic.h> | 
|  | 29 | #include <libc-internal.h> | 
|  | 30 | #include <resolv.h> | 
|  | 31 | #include <kernel-features.h> | 
|  | 32 | #include <exit-thread.h> | 
|  | 33 | #include <default-sched.h> | 
|  | 34 | #include <futex-internal.h> | 
|  | 35 |  | 
|  | 36 | #include <shlib-compat.h> | 
|  | 37 |  | 
|  | 38 | #include <stap-probe.h> | 
|  | 39 |  | 
|  | 40 |  | 
|  | 41 | /* Nozero if debugging mode is enabled.  */ | 
|  | 42 | int __pthread_debug; | 
|  | 43 |  | 
|  | 44 | /* Globally enabled events.  */ | 
|  | 45 | static td_thr_events_t __nptl_threads_events __attribute_used__; | 
|  | 46 |  | 
|  | 47 | /* Pointer to descriptor with the last event.  */ | 
|  | 48 | static struct pthread *__nptl_last_event __attribute_used__; | 
|  | 49 |  | 
|  | 50 | /* Number of threads running.  */ | 
|  | 51 | unsigned int __nptl_nthreads = 1; | 
|  | 52 |  | 
|  | 53 |  | 
|  | 54 | /* Code to allocate and deallocate a stack.  */ | 
|  | 55 | #include "allocatestack.c" | 
|  | 56 |  | 
|  | 57 | /* createthread.c defines this function, and two macros: | 
|  | 58 | START_THREAD_DEFN and START_THREAD_SELF (see below). | 
|  | 59 |  | 
|  | 60 | create_thread is obliged to initialize PD->stopped_start.  It | 
|  | 61 | should be true if the STOPPED_START parameter is true, or if | 
|  | 62 | create_thread needs the new thread to synchronize at startup for | 
|  | 63 | some other implementation reason.  If PD->stopped_start will be | 
|  | 64 | true, then create_thread is obliged to perform the operation | 
|  | 65 | "lll_lock (PD->lock, LLL_PRIVATE)" before starting the thread. | 
|  | 66 |  | 
|  | 67 | The return value is zero for success or an errno code for failure. | 
|  | 68 | If the return value is ENOMEM, that will be translated to EAGAIN, | 
|  | 69 | so create_thread need not do that.  On failure, *THREAD_RAN should | 
|  | 70 | be set to true iff the thread actually started up and then got | 
|  | 71 | cancelled before calling user code (*PD->start_routine), in which | 
|  | 72 | case it is responsible for doing its own cleanup.  */ | 
|  | 73 |  | 
|  | 74 | static int create_thread (struct pthread *pd, const struct pthread_attr *attr, | 
|  | 75 | bool stopped_start, STACK_VARIABLES_PARMS, | 
|  | 76 | bool *thread_ran); | 
|  | 77 |  | 
|  | 78 | #include <createthread.c> | 
|  | 79 |  | 
|  | 80 |  | 
|  | 81 | struct pthread * | 
|  | 82 | internal_function | 
|  | 83 | __find_in_stack_list (struct pthread *pd) | 
|  | 84 | { | 
|  | 85 | list_t *entry; | 
|  | 86 | struct pthread *result = NULL; | 
|  | 87 |  | 
|  | 88 | lll_lock (stack_cache_lock, LLL_PRIVATE); | 
|  | 89 |  | 
|  | 90 | list_for_each (entry, &stack_used) | 
|  | 91 | { | 
|  | 92 | struct pthread *curp; | 
|  | 93 |  | 
|  | 94 | curp = list_entry (entry, struct pthread, list); | 
|  | 95 | if (curp == pd) | 
|  | 96 | { | 
|  | 97 | result = curp; | 
|  | 98 | break; | 
|  | 99 | } | 
|  | 100 | } | 
|  | 101 |  | 
|  | 102 | if (result == NULL) | 
|  | 103 | list_for_each (entry, &__stack_user) | 
|  | 104 | { | 
|  | 105 | struct pthread *curp; | 
|  | 106 |  | 
|  | 107 | curp = list_entry (entry, struct pthread, list); | 
|  | 108 | if (curp == pd) | 
|  | 109 | { | 
|  | 110 | result = curp; | 
|  | 111 | break; | 
|  | 112 | } | 
|  | 113 | } | 
|  | 114 |  | 
|  | 115 | lll_unlock (stack_cache_lock, LLL_PRIVATE); | 
|  | 116 |  | 
|  | 117 | return result; | 
|  | 118 | } | 
|  | 119 |  | 
|  | 120 |  | 
|  | 121 | /* Deallocate POSIX thread-local-storage.  */ | 
|  | 122 | void | 
|  | 123 | attribute_hidden | 
|  | 124 | __nptl_deallocate_tsd (void) | 
|  | 125 | { | 
|  | 126 | struct pthread *self = THREAD_SELF; | 
|  | 127 |  | 
|  | 128 | /* Maybe no data was ever allocated.  This happens often so we have | 
|  | 129 | a flag for this.  */ | 
|  | 130 | if (THREAD_GETMEM (self, specific_used)) | 
|  | 131 | { | 
|  | 132 | size_t round; | 
|  | 133 | size_t cnt; | 
|  | 134 |  | 
|  | 135 | round = 0; | 
|  | 136 | do | 
|  | 137 | { | 
|  | 138 | size_t idx; | 
|  | 139 |  | 
|  | 140 | /* So far no new nonzero data entry.  */ | 
|  | 141 | THREAD_SETMEM (self, specific_used, false); | 
|  | 142 |  | 
|  | 143 | for (cnt = idx = 0; cnt < PTHREAD_KEY_1STLEVEL_SIZE; ++cnt) | 
|  | 144 | { | 
|  | 145 | struct pthread_key_data *level2; | 
|  | 146 |  | 
|  | 147 | level2 = THREAD_GETMEM_NC (self, specific, cnt); | 
|  | 148 |  | 
|  | 149 | if (level2 != NULL) | 
|  | 150 | { | 
|  | 151 | size_t inner; | 
|  | 152 |  | 
|  | 153 | for (inner = 0; inner < PTHREAD_KEY_2NDLEVEL_SIZE; | 
|  | 154 | ++inner, ++idx) | 
|  | 155 | { | 
|  | 156 | void *data = level2[inner].data; | 
|  | 157 |  | 
|  | 158 | if (data != NULL) | 
|  | 159 | { | 
|  | 160 | /* Always clear the data.  */ | 
|  | 161 | level2[inner].data = NULL; | 
|  | 162 |  | 
|  | 163 | /* Make sure the data corresponds to a valid | 
|  | 164 | key.  This test fails if the key was | 
|  | 165 | deallocated and also if it was | 
|  | 166 | re-allocated.  It is the user's | 
|  | 167 | responsibility to free the memory in this | 
|  | 168 | case.  */ | 
|  | 169 | if (level2[inner].seq | 
|  | 170 | == __pthread_keys[idx].seq | 
|  | 171 | /* It is not necessary to register a destructor | 
|  | 172 | function.  */ | 
|  | 173 | && __pthread_keys[idx].destr != NULL) | 
|  | 174 | /* Call the user-provided destructor.  */ | 
|  | 175 | __pthread_keys[idx].destr (data); | 
|  | 176 | } | 
|  | 177 | } | 
|  | 178 | } | 
|  | 179 | else | 
|  | 180 | idx += PTHREAD_KEY_1STLEVEL_SIZE; | 
|  | 181 | } | 
|  | 182 |  | 
|  | 183 | if (THREAD_GETMEM (self, specific_used) == 0) | 
|  | 184 | /* No data has been modified.  */ | 
|  | 185 | goto just_free; | 
|  | 186 | } | 
|  | 187 | /* We only repeat the process a fixed number of times.  */ | 
|  | 188 | while (__builtin_expect (++round < PTHREAD_DESTRUCTOR_ITERATIONS, 0)); | 
|  | 189 |  | 
|  | 190 | /* Just clear the memory of the first block for reuse.  */ | 
|  | 191 | memset (&THREAD_SELF->specific_1stblock, '\0', | 
|  | 192 | sizeof (self->specific_1stblock)); | 
|  | 193 |  | 
|  | 194 | just_free: | 
|  | 195 | /* Free the memory for the other blocks.  */ | 
|  | 196 | for (cnt = 1; cnt < PTHREAD_KEY_1STLEVEL_SIZE; ++cnt) | 
|  | 197 | { | 
|  | 198 | struct pthread_key_data *level2; | 
|  | 199 |  | 
|  | 200 | level2 = THREAD_GETMEM_NC (self, specific, cnt); | 
|  | 201 | if (level2 != NULL) | 
|  | 202 | { | 
|  | 203 | /* The first block is allocated as part of the thread | 
|  | 204 | descriptor.  */ | 
|  | 205 | free (level2); | 
|  | 206 | THREAD_SETMEM_NC (self, specific, cnt, NULL); | 
|  | 207 | } | 
|  | 208 | } | 
|  | 209 |  | 
|  | 210 | THREAD_SETMEM (self, specific_used, false); | 
|  | 211 | } | 
|  | 212 | } | 
|  | 213 |  | 
|  | 214 |  | 
|  | 215 | /* Deallocate a thread's stack after optionally making sure the thread | 
|  | 216 | descriptor is still valid.  */ | 
|  | 217 | void | 
|  | 218 | internal_function | 
|  | 219 | __free_tcb (struct pthread *pd) | 
|  | 220 | { | 
|  | 221 | /* The thread is exiting now.  */ | 
|  | 222 | if (__builtin_expect (atomic_bit_test_set (&pd->cancelhandling, | 
|  | 223 | TERMINATED_BIT) == 0, 1)) | 
|  | 224 | { | 
|  | 225 | /* Remove the descriptor from the list.  */ | 
|  | 226 | if (DEBUGGING_P && __find_in_stack_list (pd) == NULL) | 
|  | 227 | /* Something is really wrong.  The descriptor for a still | 
|  | 228 | running thread is gone.  */ | 
|  | 229 | abort (); | 
|  | 230 |  | 
|  | 231 | /* Free TPP data.  */ | 
|  | 232 | if (__glibc_unlikely (pd->tpp != NULL)) | 
|  | 233 | { | 
|  | 234 | struct priority_protection_data *tpp = pd->tpp; | 
|  | 235 |  | 
|  | 236 | pd->tpp = NULL; | 
|  | 237 | free (tpp); | 
|  | 238 | } | 
|  | 239 |  | 
|  | 240 | /* Queue the stack memory block for reuse and exit the process.  The | 
|  | 241 | kernel will signal via writing to the address returned by | 
|  | 242 | QUEUE-STACK when the stack is available.  */ | 
|  | 243 | __deallocate_stack (pd); | 
|  | 244 | } | 
|  | 245 | } | 
|  | 246 |  | 
|  | 247 |  | 
|  | 248 | /* Local function to start thread and handle cleanup. | 
|  | 249 | createthread.c defines the macro START_THREAD_DEFN to the | 
|  | 250 | declaration that its create_thread function will refer to, and | 
|  | 251 | START_THREAD_SELF to the expression to optimally deliver the new | 
|  | 252 | thread's THREAD_SELF value.  */ | 
|  | 253 | START_THREAD_DEFN | 
|  | 254 | { | 
|  | 255 | struct pthread *pd = START_THREAD_SELF; | 
|  | 256 |  | 
|  | 257 | #if HP_TIMING_AVAIL | 
|  | 258 | /* Remember the time when the thread was started.  */ | 
|  | 259 | hp_timing_t now; | 
|  | 260 | HP_TIMING_NOW (now); | 
|  | 261 | THREAD_SETMEM (pd, cpuclock_offset, now); | 
|  | 262 | #endif | 
|  | 263 |  | 
|  | 264 | /* Initialize resolver state pointer.  */ | 
|  | 265 | __resp = &pd->res; | 
|  | 266 |  | 
|  | 267 | /* Initialize pointers to locale data.  */ | 
|  | 268 | __ctype_init (); | 
|  | 269 |  | 
|  | 270 | /* Allow setxid from now onwards.  */ | 
|  | 271 | if (__glibc_unlikely (atomic_exchange_acq (&pd->setxid_futex, 0) == -2)) | 
|  | 272 | futex_wake (&pd->setxid_futex, 1, FUTEX_PRIVATE); | 
|  | 273 |  | 
|  | 274 | #ifdef __NR_set_robust_list | 
|  | 275 | # ifndef __ASSUME_SET_ROBUST_LIST | 
|  | 276 | if (__set_robust_list_avail >= 0) | 
|  | 277 | # endif | 
|  | 278 | { | 
|  | 279 | INTERNAL_SYSCALL_DECL (err); | 
|  | 280 | /* This call should never fail because the initial call in init.c | 
|  | 281 | succeeded.  */ | 
|  | 282 | INTERNAL_SYSCALL (set_robust_list, err, 2, &pd->robust_head, | 
|  | 283 | sizeof (struct robust_list_head)); | 
|  | 284 | } | 
|  | 285 | #endif | 
|  | 286 |  | 
|  | 287 | #ifdef SIGCANCEL | 
|  | 288 | /* If the parent was running cancellation handlers while creating | 
|  | 289 | the thread the new thread inherited the signal mask.  Reset the | 
|  | 290 | cancellation signal mask.  */ | 
|  | 291 | if (__glibc_unlikely (pd->parent_cancelhandling & CANCELING_BITMASK)) | 
|  | 292 | { | 
|  | 293 | INTERNAL_SYSCALL_DECL (err); | 
|  | 294 | sigset_t mask; | 
|  | 295 | __sigemptyset (&mask); | 
|  | 296 | __sigaddset (&mask, SIGCANCEL); | 
|  | 297 | (void) INTERNAL_SYSCALL (rt_sigprocmask, err, 4, SIG_UNBLOCK, &mask, | 
|  | 298 | NULL, _NSIG / 8); | 
|  | 299 | } | 
|  | 300 | #endif | 
|  | 301 |  | 
|  | 302 | /* This is where the try/finally block should be created.  For | 
|  | 303 | compilers without that support we do use setjmp.  */ | 
|  | 304 | struct pthread_unwind_buf unwind_buf; | 
|  | 305 |  | 
|  | 306 | /* No previous handlers.  */ | 
|  | 307 | unwind_buf.priv.data.prev = NULL; | 
|  | 308 | unwind_buf.priv.data.cleanup = NULL; | 
|  | 309 |  | 
|  | 310 | int not_first_call; | 
|  | 311 | not_first_call = setjmp ((struct __jmp_buf_tag *) unwind_buf.cancel_jmp_buf); | 
|  | 312 | if (__glibc_likely (! not_first_call)) | 
|  | 313 | { | 
|  | 314 | /* Store the new cleanup handler info.  */ | 
|  | 315 | THREAD_SETMEM (pd, cleanup_jmp_buf, &unwind_buf); | 
|  | 316 |  | 
|  | 317 | if (__glibc_unlikely (pd->stopped_start)) | 
|  | 318 | { | 
|  | 319 | int oldtype = CANCEL_ASYNC (); | 
|  | 320 |  | 
|  | 321 | /* Get the lock the parent locked to force synchronization.  */ | 
|  | 322 | lll_lock (pd->lock, LLL_PRIVATE); | 
|  | 323 | /* And give it up right away.  */ | 
|  | 324 | lll_unlock (pd->lock, LLL_PRIVATE); | 
|  | 325 |  | 
|  | 326 | CANCEL_RESET (oldtype); | 
|  | 327 | } | 
|  | 328 |  | 
|  | 329 | LIBC_PROBE (pthread_start, 3, (pthread_t) pd, pd->start_routine, pd->arg); | 
|  | 330 |  | 
|  | 331 | /* Run the code the user provided.  */ | 
|  | 332 | #ifdef CALL_THREAD_FCT | 
|  | 333 | THREAD_SETMEM (pd, result, CALL_THREAD_FCT (pd)); | 
|  | 334 | #else | 
|  | 335 | THREAD_SETMEM (pd, result, pd->start_routine (pd->arg)); | 
|  | 336 | #endif | 
|  | 337 | } | 
|  | 338 |  | 
|  | 339 | /* Call destructors for the thread_local TLS variables.  */ | 
|  | 340 | #ifndef SHARED | 
|  | 341 | if (&__call_tls_dtors != NULL) | 
|  | 342 | #endif | 
|  | 343 | __call_tls_dtors (); | 
|  | 344 |  | 
|  | 345 | /* Run the destructor for the thread-local data.  */ | 
|  | 346 | __nptl_deallocate_tsd (); | 
|  | 347 |  | 
|  | 348 | /* Clean up any state libc stored in thread-local variables.  */ | 
|  | 349 | __libc_thread_freeres (); | 
|  | 350 |  | 
|  | 351 | /* If this is the last thread we terminate the process now.  We | 
|  | 352 | do not notify the debugger, it might just irritate it if there | 
|  | 353 | is no thread left.  */ | 
|  | 354 | if (__glibc_unlikely (atomic_decrement_and_test (&__nptl_nthreads))) | 
|  | 355 | /* This was the last thread.  */ | 
|  | 356 | exit (0); | 
|  | 357 |  | 
|  | 358 | /* Report the death of the thread if this is wanted.  */ | 
|  | 359 | if (__glibc_unlikely (pd->report_events)) | 
|  | 360 | { | 
|  | 361 | /* See whether TD_DEATH is in any of the mask.  */ | 
|  | 362 | const int idx = __td_eventword (TD_DEATH); | 
|  | 363 | const uint32_t mask = __td_eventmask (TD_DEATH); | 
|  | 364 |  | 
|  | 365 | if ((mask & (__nptl_threads_events.event_bits[idx] | 
|  | 366 | | pd->eventbuf.eventmask.event_bits[idx])) != 0) | 
|  | 367 | { | 
|  | 368 | /* Yep, we have to signal the death.  Add the descriptor to | 
|  | 369 | the list but only if it is not already on it.  */ | 
|  | 370 | if (pd->nextevent == NULL) | 
|  | 371 | { | 
|  | 372 | pd->eventbuf.eventnum = TD_DEATH; | 
|  | 373 | pd->eventbuf.eventdata = pd; | 
|  | 374 |  | 
|  | 375 | do | 
|  | 376 | pd->nextevent = __nptl_last_event; | 
|  | 377 | while (atomic_compare_and_exchange_bool_acq (&__nptl_last_event, | 
|  | 378 | pd, pd->nextevent)); | 
|  | 379 | } | 
|  | 380 |  | 
|  | 381 | /* Now call the function to signal the event.  */ | 
|  | 382 | __nptl_death_event (); | 
|  | 383 | } | 
|  | 384 | } | 
|  | 385 |  | 
|  | 386 | /* The thread is exiting now.  Don't set this bit until after we've hit | 
|  | 387 | the event-reporting breakpoint, so that td_thr_get_info on us while at | 
|  | 388 | the breakpoint reports TD_THR_RUN state rather than TD_THR_ZOMBIE.  */ | 
|  | 389 | atomic_bit_set (&pd->cancelhandling, EXITING_BIT); | 
|  | 390 |  | 
|  | 391 | #ifndef __ASSUME_SET_ROBUST_LIST | 
|  | 392 | /* If this thread has any robust mutexes locked, handle them now.  */ | 
|  | 393 | # ifdef __PTHREAD_MUTEX_HAVE_PREV | 
|  | 394 | void *robust = pd->robust_head.list; | 
|  | 395 | # else | 
|  | 396 | __pthread_slist_t *robust = pd->robust_list.__next; | 
|  | 397 | # endif | 
|  | 398 | /* We let the kernel do the notification if it is able to do so. | 
|  | 399 | If we have to do it here there for sure are no PI mutexes involved | 
|  | 400 | since the kernel support for them is even more recent.  */ | 
|  | 401 | if (__set_robust_list_avail < 0 | 
|  | 402 | && __builtin_expect (robust != (void *) &pd->robust_head, 0)) | 
|  | 403 | { | 
|  | 404 | do | 
|  | 405 | { | 
|  | 406 | struct __pthread_mutex_s *this = (struct __pthread_mutex_s *) | 
|  | 407 | ((char *) robust - offsetof (struct __pthread_mutex_s, | 
|  | 408 | __list.__next)); | 
|  | 409 | robust = *((void **) robust); | 
|  | 410 |  | 
|  | 411 | # ifdef __PTHREAD_MUTEX_HAVE_PREV | 
|  | 412 | this->__list.__prev = NULL; | 
|  | 413 | # endif | 
|  | 414 | this->__list.__next = NULL; | 
|  | 415 |  | 
|  | 416 | atomic_or (&this->__lock, FUTEX_OWNER_DIED); | 
|  | 417 | futex_wake ((unsigned int *) &this->__lock, 1, | 
|  | 418 | /* XYZ */ FUTEX_SHARED); | 
|  | 419 | } | 
|  | 420 | while (robust != (void *) &pd->robust_head); | 
|  | 421 | } | 
|  | 422 | #endif | 
|  | 423 |  | 
|  | 424 | /* Mark the memory of the stack as usable to the kernel.  We free | 
|  | 425 | everything except for the space used for the TCB itself.  */ | 
|  | 426 | size_t pagesize_m1 = __getpagesize () - 1; | 
|  | 427 | #ifdef _STACK_GROWS_DOWN | 
|  | 428 | char *sp = CURRENT_STACK_FRAME; | 
|  | 429 | size_t freesize = (sp - (char *) pd->stackblock) & ~pagesize_m1; | 
|  | 430 | #else | 
|  | 431 | # error "to do" | 
|  | 432 | #endif | 
|  | 433 | assert (freesize < pd->stackblock_size); | 
|  | 434 | if (freesize > PTHREAD_STACK_MIN) | 
|  | 435 | __madvise (pd->stackblock, freesize - PTHREAD_STACK_MIN, MADV_DONTNEED); | 
|  | 436 |  | 
|  | 437 | /* If the thread is detached free the TCB.  */ | 
|  | 438 | if (IS_DETACHED (pd)) | 
|  | 439 | /* Free the TCB.  */ | 
|  | 440 | __free_tcb (pd); | 
|  | 441 | else if (__glibc_unlikely (pd->cancelhandling & SETXID_BITMASK)) | 
|  | 442 | { | 
|  | 443 | /* Some other thread might call any of the setXid functions and expect | 
|  | 444 | us to reply.  In this case wait until we did that.  */ | 
|  | 445 | do | 
|  | 446 | /* XXX This differs from the typical futex_wait_simple pattern in that | 
|  | 447 | the futex_wait condition (setxid_futex) is different from the | 
|  | 448 | condition used in the surrounding loop (cancelhandling).  We need | 
|  | 449 | to check and document why this is correct.  */ | 
|  | 450 | futex_wait_simple (&pd->setxid_futex, 0, FUTEX_PRIVATE); | 
|  | 451 | while (pd->cancelhandling & SETXID_BITMASK); | 
|  | 452 |  | 
|  | 453 | /* Reset the value so that the stack can be reused.  */ | 
|  | 454 | pd->setxid_futex = 0; | 
|  | 455 | } | 
|  | 456 |  | 
|  | 457 | /* We cannot call '_exit' here.  '_exit' will terminate the process. | 
|  | 458 |  | 
|  | 459 | The 'exit' implementation in the kernel will signal when the | 
|  | 460 | process is really dead since 'clone' got passed the CLONE_CHILD_CLEARTID | 
|  | 461 | flag.  The 'tid' field in the TCB will be set to zero. | 
|  | 462 |  | 
|  | 463 | The exit code is zero since in case all threads exit by calling | 
|  | 464 | 'pthread_exit' the exit status must be 0 (zero).  */ | 
|  | 465 | __exit_thread (); | 
|  | 466 |  | 
|  | 467 | /* NOTREACHED */ | 
|  | 468 | } | 
|  | 469 |  | 
|  | 470 |  | 
|  | 471 | /* Return true iff obliged to report TD_CREATE events.  */ | 
|  | 472 | static bool | 
|  | 473 | report_thread_creation (struct pthread *pd) | 
|  | 474 | { | 
|  | 475 | if (__glibc_unlikely (THREAD_GETMEM (THREAD_SELF, report_events))) | 
|  | 476 | { | 
|  | 477 | /* The parent thread is supposed to report events. | 
|  | 478 | Check whether the TD_CREATE event is needed, too.  */ | 
|  | 479 | const size_t idx = __td_eventword (TD_CREATE); | 
|  | 480 | const uint32_t mask = __td_eventmask (TD_CREATE); | 
|  | 481 |  | 
|  | 482 | return ((mask & (__nptl_threads_events.event_bits[idx] | 
|  | 483 | | pd->eventbuf.eventmask.event_bits[idx])) != 0); | 
|  | 484 | } | 
|  | 485 | return false; | 
|  | 486 | } | 
|  | 487 |  | 
|  | 488 |  | 
|  | 489 | int | 
|  | 490 | __pthread_create_2_1 (pthread_t *newthread, const pthread_attr_t *attr, | 
|  | 491 | void *(*start_routine) (void *), void *arg) | 
|  | 492 | { | 
|  | 493 | STACK_VARIABLES; | 
|  | 494 |  | 
|  | 495 | const struct pthread_attr *iattr = (struct pthread_attr *) attr; | 
|  | 496 | struct pthread_attr default_attr; | 
|  | 497 | bool free_cpuset = false; | 
|  | 498 | if (iattr == NULL) | 
|  | 499 | { | 
|  | 500 | lll_lock (__default_pthread_attr_lock, LLL_PRIVATE); | 
|  | 501 | default_attr = __default_pthread_attr; | 
|  | 502 | size_t cpusetsize = default_attr.cpusetsize; | 
|  | 503 | if (cpusetsize > 0) | 
|  | 504 | { | 
|  | 505 | cpu_set_t *cpuset; | 
|  | 506 | if (__glibc_likely (__libc_use_alloca (cpusetsize))) | 
|  | 507 | cpuset = __alloca (cpusetsize); | 
|  | 508 | else | 
|  | 509 | { | 
|  | 510 | cpuset = malloc (cpusetsize); | 
|  | 511 | if (cpuset == NULL) | 
|  | 512 | { | 
|  | 513 | lll_unlock (__default_pthread_attr_lock, LLL_PRIVATE); | 
|  | 514 | return ENOMEM; | 
|  | 515 | } | 
|  | 516 | free_cpuset = true; | 
|  | 517 | } | 
|  | 518 | memcpy (cpuset, default_attr.cpuset, cpusetsize); | 
|  | 519 | default_attr.cpuset = cpuset; | 
|  | 520 | } | 
|  | 521 | lll_unlock (__default_pthread_attr_lock, LLL_PRIVATE); | 
|  | 522 | iattr = &default_attr; | 
|  | 523 | } | 
|  | 524 |  | 
|  | 525 | struct pthread *pd = NULL; | 
|  | 526 | int err = ALLOCATE_STACK (iattr, &pd); | 
|  | 527 | int retval = 0; | 
|  | 528 |  | 
|  | 529 | if (__glibc_unlikely (err != 0)) | 
|  | 530 | /* Something went wrong.  Maybe a parameter of the attributes is | 
|  | 531 | invalid or we could not allocate memory.  Note we have to | 
|  | 532 | translate error codes.  */ | 
|  | 533 | { | 
|  | 534 | retval = err == ENOMEM ? EAGAIN : err; | 
|  | 535 | goto out; | 
|  | 536 | } | 
|  | 537 |  | 
|  | 538 |  | 
|  | 539 | /* Initialize the TCB.  All initializations with zero should be | 
|  | 540 | performed in 'get_cached_stack'.  This way we avoid doing this if | 
|  | 541 | the stack freshly allocated with 'mmap'.  */ | 
|  | 542 |  | 
|  | 543 | #if TLS_TCB_AT_TP | 
|  | 544 | /* Reference to the TCB itself.  */ | 
|  | 545 | pd->header.self = pd; | 
|  | 546 |  | 
|  | 547 | /* Self-reference for TLS.  */ | 
|  | 548 | pd->header.tcb = pd; | 
|  | 549 | #endif | 
|  | 550 |  | 
|  | 551 | /* Store the address of the start routine and the parameter.  Since | 
|  | 552 | we do not start the function directly the stillborn thread will | 
|  | 553 | get the information from its thread descriptor.  */ | 
|  | 554 | pd->start_routine = start_routine; | 
|  | 555 | pd->arg = arg; | 
|  | 556 |  | 
|  | 557 | /* Copy the thread attribute flags.  */ | 
|  | 558 | struct pthread *self = THREAD_SELF; | 
|  | 559 | pd->flags = ((iattr->flags & ~(ATTR_FLAG_SCHED_SET | ATTR_FLAG_POLICY_SET)) | 
|  | 560 | | (self->flags & (ATTR_FLAG_SCHED_SET | ATTR_FLAG_POLICY_SET))); | 
|  | 561 |  | 
|  | 562 | /* Initialize the field for the ID of the thread which is waiting | 
|  | 563 | for us.  This is a self-reference in case the thread is created | 
|  | 564 | detached.  */ | 
|  | 565 | pd->joinid = iattr->flags & ATTR_FLAG_DETACHSTATE ? pd : NULL; | 
|  | 566 |  | 
|  | 567 | /* The debug events are inherited from the parent.  */ | 
|  | 568 | pd->eventbuf = self->eventbuf; | 
|  | 569 |  | 
|  | 570 |  | 
|  | 571 | /* Copy the parent's scheduling parameters.  The flags will say what | 
|  | 572 | is valid and what is not.  */ | 
|  | 573 | pd->schedpolicy = self->schedpolicy; | 
|  | 574 | pd->schedparam = self->schedparam; | 
|  | 575 |  | 
|  | 576 | /* Copy the stack guard canary.  */ | 
|  | 577 | #ifdef THREAD_COPY_STACK_GUARD | 
|  | 578 | THREAD_COPY_STACK_GUARD (pd); | 
|  | 579 | #endif | 
|  | 580 |  | 
|  | 581 | /* Copy the pointer guard value.  */ | 
|  | 582 | #ifdef THREAD_COPY_POINTER_GUARD | 
|  | 583 | THREAD_COPY_POINTER_GUARD (pd); | 
|  | 584 | #endif | 
|  | 585 |  | 
|  | 586 | /* Verify the sysinfo bits were copied in allocate_stack if needed.  */ | 
|  | 587 | #ifdef NEED_DL_SYSINFO | 
|  | 588 | CHECK_THREAD_SYSINFO (pd); | 
|  | 589 | #endif | 
|  | 590 |  | 
|  | 591 | /* Inform start_thread (above) about cancellation state that might | 
|  | 592 | translate into inherited signal state.  */ | 
|  | 593 | pd->parent_cancelhandling = THREAD_GETMEM (THREAD_SELF, cancelhandling); | 
|  | 594 |  | 
|  | 595 | /* Determine scheduling parameters for the thread.  */ | 
|  | 596 | if (__builtin_expect ((iattr->flags & ATTR_FLAG_NOTINHERITSCHED) != 0, 0) | 
|  | 597 | && (iattr->flags & (ATTR_FLAG_SCHED_SET | ATTR_FLAG_POLICY_SET)) != 0) | 
|  | 598 | { | 
|  | 599 | /* Use the scheduling parameters the user provided.  */ | 
|  | 600 | if (iattr->flags & ATTR_FLAG_POLICY_SET) | 
|  | 601 | { | 
|  | 602 | pd->schedpolicy = iattr->schedpolicy; | 
|  | 603 | pd->flags |= ATTR_FLAG_POLICY_SET; | 
|  | 604 | } | 
|  | 605 | if (iattr->flags & ATTR_FLAG_SCHED_SET) | 
|  | 606 | { | 
|  | 607 | /* The values were validated in pthread_attr_setschedparam.  */ | 
|  | 608 | pd->schedparam = iattr->schedparam; | 
|  | 609 | pd->flags |= ATTR_FLAG_SCHED_SET; | 
|  | 610 | } | 
|  | 611 |  | 
|  | 612 | if ((pd->flags & (ATTR_FLAG_SCHED_SET | ATTR_FLAG_POLICY_SET)) | 
|  | 613 | != (ATTR_FLAG_SCHED_SET | ATTR_FLAG_POLICY_SET)) | 
|  | 614 | collect_default_sched (pd); | 
|  | 615 | } | 
|  | 616 |  | 
|  | 617 | /* Pass the descriptor to the caller.  */ | 
|  | 618 | *newthread = (pthread_t) pd; | 
|  | 619 |  | 
|  | 620 | LIBC_PROBE (pthread_create, 4, newthread, attr, start_routine, arg); | 
|  | 621 |  | 
|  | 622 | /* One more thread.  We cannot have the thread do this itself, since it | 
|  | 623 | might exist but not have been scheduled yet by the time we've returned | 
|  | 624 | and need to check the value to behave correctly.  We must do it before | 
|  | 625 | creating the thread, in case it does get scheduled first and then | 
|  | 626 | might mistakenly think it was the only thread.  In the failure case, | 
|  | 627 | we momentarily store a false value; this doesn't matter because there | 
|  | 628 | is no kosher thing a signal handler interrupting us right here can do | 
|  | 629 | that cares whether the thread count is correct.  */ | 
|  | 630 | atomic_increment (&__nptl_nthreads); | 
|  | 631 |  | 
|  | 632 | bool thread_ran = false; | 
|  | 633 |  | 
|  | 634 | /* Start the thread.  */ | 
|  | 635 | if (__glibc_unlikely (report_thread_creation (pd))) | 
|  | 636 | { | 
|  | 637 | /* Create the thread.  We always create the thread stopped | 
|  | 638 | so that it does not get far before we tell the debugger.  */ | 
|  | 639 | retval = create_thread (pd, iattr, true, STACK_VARIABLES_ARGS, | 
|  | 640 | &thread_ran); | 
|  | 641 | if (retval == 0) | 
|  | 642 | { | 
|  | 643 | /* create_thread should have set this so that the logic below can | 
|  | 644 | test it.  */ | 
|  | 645 | assert (pd->stopped_start); | 
|  | 646 |  | 
|  | 647 | /* Now fill in the information about the new thread in | 
|  | 648 | the newly created thread's data structure.  We cannot let | 
|  | 649 | the new thread do this since we don't know whether it was | 
|  | 650 | already scheduled when we send the event.  */ | 
|  | 651 | pd->eventbuf.eventnum = TD_CREATE; | 
|  | 652 | pd->eventbuf.eventdata = pd; | 
|  | 653 |  | 
|  | 654 | /* Enqueue the descriptor.  */ | 
|  | 655 | do | 
|  | 656 | pd->nextevent = __nptl_last_event; | 
|  | 657 | while (atomic_compare_and_exchange_bool_acq (&__nptl_last_event, | 
|  | 658 | pd, pd->nextevent) | 
|  | 659 | != 0); | 
|  | 660 |  | 
|  | 661 | /* Now call the function which signals the event.  */ | 
|  | 662 | __nptl_create_event (); | 
|  | 663 | } | 
|  | 664 | } | 
|  | 665 | else | 
|  | 666 | retval = create_thread (pd, iattr, false, STACK_VARIABLES_ARGS, | 
|  | 667 | &thread_ran); | 
|  | 668 |  | 
|  | 669 | if (__glibc_unlikely (retval != 0)) | 
|  | 670 | { | 
|  | 671 | /* If thread creation "failed", that might mean that the thread got | 
|  | 672 | created and ran a little--short of running user code--but then | 
|  | 673 | create_thread cancelled it.  In that case, the thread will do all | 
|  | 674 | its own cleanup just like a normal thread exit after a successful | 
|  | 675 | creation would do.  */ | 
|  | 676 |  | 
|  | 677 | if (thread_ran) | 
|  | 678 | assert (pd->stopped_start); | 
|  | 679 | else | 
|  | 680 | { | 
|  | 681 | /* Oops, we lied for a second.  */ | 
|  | 682 | atomic_decrement (&__nptl_nthreads); | 
|  | 683 |  | 
|  | 684 | /* Perhaps a thread wants to change the IDs and is waiting for this | 
|  | 685 | stillborn thread.  */ | 
|  | 686 | if (__glibc_unlikely (atomic_exchange_acq (&pd->setxid_futex, 0) | 
|  | 687 | == -2)) | 
|  | 688 | futex_wake (&pd->setxid_futex, 1, FUTEX_PRIVATE); | 
|  | 689 |  | 
|  | 690 | /* Free the resources.  */ | 
|  | 691 | __deallocate_stack (pd); | 
|  | 692 | } | 
|  | 693 |  | 
|  | 694 | /* We have to translate error codes.  */ | 
|  | 695 | if (retval == ENOMEM) | 
|  | 696 | retval = EAGAIN; | 
|  | 697 | } | 
|  | 698 | else | 
|  | 699 | { | 
|  | 700 | if (pd->stopped_start) | 
|  | 701 | /* The thread blocked on this lock either because we're doing TD_CREATE | 
|  | 702 | event reporting, or for some other reason that create_thread chose. | 
|  | 703 | Now let it run free.  */ | 
|  | 704 | lll_unlock (pd->lock, LLL_PRIVATE); | 
|  | 705 |  | 
|  | 706 | /* We now have for sure more than one thread.  The main thread might | 
|  | 707 | not yet have the flag set.  No need to set the global variable | 
|  | 708 | again if this is what we use.  */ | 
|  | 709 | THREAD_SETMEM (THREAD_SELF, header.multiple_threads, 1); | 
|  | 710 | } | 
|  | 711 |  | 
|  | 712 | out: | 
|  | 713 | if (__glibc_unlikely (free_cpuset)) | 
|  | 714 | free (default_attr.cpuset); | 
|  | 715 |  | 
|  | 716 | return retval; | 
|  | 717 | } | 
|  | 718 | versioned_symbol (libpthread, __pthread_create_2_1, pthread_create, GLIBC_2_1); | 
|  | 719 |  | 
|  | 720 |  | 
|  | 721 | #if SHLIB_COMPAT(libpthread, GLIBC_2_0, GLIBC_2_1) | 
|  | 722 | int | 
|  | 723 | __pthread_create_2_0 (pthread_t *newthread, const pthread_attr_t *attr, | 
|  | 724 | void *(*start_routine) (void *), void *arg) | 
|  | 725 | { | 
|  | 726 | /* The ATTR attribute is not really of type `pthread_attr_t *'.  It has | 
|  | 727 | the old size and access to the new members might crash the program. | 
|  | 728 | We convert the struct now.  */ | 
|  | 729 | struct pthread_attr new_attr; | 
|  | 730 |  | 
|  | 731 | if (attr != NULL) | 
|  | 732 | { | 
|  | 733 | struct pthread_attr *iattr = (struct pthread_attr *) attr; | 
|  | 734 | size_t ps = __getpagesize (); | 
|  | 735 |  | 
|  | 736 | /* Copy values from the user-provided attributes.  */ | 
|  | 737 | new_attr.schedparam = iattr->schedparam; | 
|  | 738 | new_attr.schedpolicy = iattr->schedpolicy; | 
|  | 739 | new_attr.flags = iattr->flags; | 
|  | 740 |  | 
|  | 741 | /* Fill in default values for the fields not present in the old | 
|  | 742 | implementation.  */ | 
|  | 743 | new_attr.guardsize = ps; | 
|  | 744 | new_attr.stackaddr = NULL; | 
|  | 745 | new_attr.stacksize = 0; | 
|  | 746 | new_attr.cpuset = NULL; | 
|  | 747 |  | 
|  | 748 | /* We will pass this value on to the real implementation.  */ | 
|  | 749 | attr = (pthread_attr_t *) &new_attr; | 
|  | 750 | } | 
|  | 751 |  | 
|  | 752 | return __pthread_create_2_1 (newthread, attr, start_routine, arg); | 
|  | 753 | } | 
|  | 754 | compat_symbol (libpthread, __pthread_create_2_0, pthread_create, | 
|  | 755 | GLIBC_2_0); | 
|  | 756 | #endif | 
|  | 757 |  | 
|  | 758 | /* Information for libthread_db.  */ | 
|  | 759 |  | 
|  | 760 | #include "../nptl_db/db_info.c" | 
|  | 761 |  | 
|  | 762 | /* If pthread_create is present, libgcc_eh.a and libsupc++.a expects some other POSIX thread | 
|  | 763 | functions to be present as well.  */ | 
|  | 764 | PTHREAD_STATIC_FN_REQUIRE (pthread_mutex_lock) | 
|  | 765 | PTHREAD_STATIC_FN_REQUIRE (pthread_mutex_trylock) | 
|  | 766 | PTHREAD_STATIC_FN_REQUIRE (pthread_mutex_unlock) | 
|  | 767 |  | 
|  | 768 | PTHREAD_STATIC_FN_REQUIRE (pthread_once) | 
|  | 769 | PTHREAD_STATIC_FN_REQUIRE (pthread_cancel) | 
|  | 770 |  | 
|  | 771 | PTHREAD_STATIC_FN_REQUIRE (pthread_key_create) | 
|  | 772 | PTHREAD_STATIC_FN_REQUIRE (pthread_key_delete) | 
|  | 773 | PTHREAD_STATIC_FN_REQUIRE (pthread_setspecific) | 
|  | 774 | PTHREAD_STATIC_FN_REQUIRE (pthread_getspecific) |