| xf.li | bdd93d5 | 2023-05-12 07:10:14 -0700 | [diff] [blame] | 1 | /* Copyright (C) 2002-2016 Free Software Foundation, Inc. | 
 | 2 |    This file is part of the GNU C Library. | 
 | 3 |    Contributed by Ulrich Drepper <drepper@redhat.com>, 2002. | 
 | 4 |  | 
 | 5 |    The GNU C Library is free software; you can redistribute it and/or | 
 | 6 |    modify it under the terms of the GNU Lesser General Public | 
 | 7 |    License as published by the Free Software Foundation; either | 
 | 8 |    version 2.1 of the License, or (at your option) any later version. | 
 | 9 |  | 
 | 10 |    The GNU C Library is distributed in the hope that it will be useful, | 
 | 11 |    but WITHOUT ANY WARRANTY; without even the implied warranty of | 
 | 12 |    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU | 
 | 13 |    Lesser General Public License for more details. | 
 | 14 |  | 
 | 15 |    You should have received a copy of the GNU Lesser General Public | 
 | 16 |    License along with the GNU C Library; if not, see | 
 | 17 |    <http://www.gnu.org/licenses/>.  */ | 
 | 18 |  | 
 | 19 | #ifndef _DESCR_H | 
 | 20 | #define _DESCR_H	1 | 
 | 21 |  | 
 | 22 | #include <limits.h> | 
 | 23 | #include <sched.h> | 
 | 24 | #include <setjmp.h> | 
 | 25 | #include <stdbool.h> | 
 | 26 | #include <sys/types.h> | 
 | 27 | #include <hp-timing.h> | 
 | 28 | #define __need_list_t | 
 | 29 | #include <list.h> | 
 | 30 | #include <lowlevellock.h> | 
 | 31 | #include <pthreaddef.h> | 
 | 32 | #include <dl-sysdep.h> | 
 | 33 | #include "../nptl_db/thread_db.h" | 
 | 34 | #include <tls.h> | 
 | 35 | #include <unwind.h> | 
 | 36 | #define __need_res_state | 
 | 37 | #include <resolv.h> | 
 | 38 | #include <kernel-features.h> | 
 | 39 |  | 
 | 40 | #ifndef TCB_ALIGNMENT | 
 | 41 | # define TCB_ALIGNMENT	sizeof (double) | 
 | 42 | #endif | 
 | 43 |  | 
 | 44 |  | 
 | 45 | /* We keep thread specific data in a special data structure, a two-level | 
 | 46 |    array.  The top-level array contains pointers to dynamically allocated | 
 | 47 |    arrays of a certain number of data pointers.  So we can implement a | 
 | 48 |    sparse array.  Each dynamic second-level array has | 
 | 49 |         PTHREAD_KEY_2NDLEVEL_SIZE | 
 | 50 |    entries.  This value shouldn't be too large.  */ | 
 | 51 | #define PTHREAD_KEY_2NDLEVEL_SIZE       32 | 
 | 52 |  | 
 | 53 | /* We need to address PTHREAD_KEYS_MAX key with PTHREAD_KEY_2NDLEVEL_SIZE | 
 | 54 |    keys in each subarray.  */ | 
 | 55 | #define PTHREAD_KEY_1STLEVEL_SIZE \ | 
 | 56 |   ((PTHREAD_KEYS_MAX + PTHREAD_KEY_2NDLEVEL_SIZE - 1) \ | 
 | 57 |    / PTHREAD_KEY_2NDLEVEL_SIZE) | 
 | 58 |  | 
 | 59 |  | 
 | 60 |  | 
 | 61 |  | 
 | 62 | /* Internal version of the buffer to store cancellation handler | 
 | 63 |    information.  */ | 
 | 64 | struct pthread_unwind_buf | 
 | 65 | { | 
 | 66 |   struct | 
 | 67 |   { | 
 | 68 |     __jmp_buf jmp_buf; | 
 | 69 |     int mask_was_saved; | 
 | 70 |   } cancel_jmp_buf[1]; | 
 | 71 |  | 
 | 72 |   union | 
 | 73 |   { | 
 | 74 |     /* This is the placeholder of the public version.  */ | 
 | 75 |     void *pad[4]; | 
 | 76 |  | 
 | 77 |     struct | 
 | 78 |     { | 
 | 79 |       /* Pointer to the previous cleanup buffer.  */ | 
 | 80 |       struct pthread_unwind_buf *prev; | 
 | 81 |  | 
 | 82 |       /* Backward compatibility: state of the old-style cleanup | 
 | 83 | 	 handler at the time of the previous new-style cleanup handler | 
 | 84 | 	 installment.  */ | 
 | 85 |       struct _pthread_cleanup_buffer *cleanup; | 
 | 86 |  | 
 | 87 |       /* Cancellation type before the push call.  */ | 
 | 88 |       int canceltype; | 
 | 89 |     } data; | 
 | 90 |   } priv; | 
 | 91 | }; | 
 | 92 |  | 
 | 93 |  | 
 | 94 | /* Opcodes and data types for communication with the signal handler to | 
 | 95 |    change user/group IDs.  */ | 
 | 96 | struct xid_command | 
 | 97 | { | 
 | 98 |   int syscall_no; | 
 | 99 |   long int id[3]; | 
 | 100 |   volatile int cntr; | 
 | 101 |   volatile int error; /* -1: no call yet, 0: success seen, >0: error seen.  */ | 
 | 102 | }; | 
 | 103 |  | 
 | 104 |  | 
 | 105 | /* Data structure used by the kernel to find robust futexes.  */ | 
 | 106 | struct robust_list_head | 
 | 107 | { | 
 | 108 |   void *list; | 
 | 109 |   long int futex_offset; | 
 | 110 |   void *list_op_pending; | 
 | 111 | }; | 
 | 112 |  | 
 | 113 |  | 
 | 114 | /* Data strcture used to handle thread priority protection.  */ | 
 | 115 | struct priority_protection_data | 
 | 116 | { | 
 | 117 |   int priomax; | 
 | 118 |   unsigned int priomap[]; | 
 | 119 | }; | 
 | 120 |  | 
 | 121 |  | 
 | 122 | /* Thread descriptor data structure.  */ | 
 | 123 | struct pthread | 
 | 124 | { | 
 | 125 |   union | 
 | 126 |   { | 
 | 127 | #if !TLS_DTV_AT_TP | 
 | 128 |     /* This overlaps the TCB as used for TLS without threads (see tls.h).  */ | 
 | 129 |     tcbhead_t header; | 
 | 130 | #else | 
 | 131 |     struct | 
 | 132 |     { | 
 | 133 |       /* multiple_threads is enabled either when the process has spawned at | 
 | 134 | 	 least one thread or when a single-threaded process cancels itself. | 
 | 135 | 	 This enables additional code to introduce locking before doing some | 
 | 136 | 	 compare_and_exchange operations and also enable cancellation points. | 
 | 137 | 	 The concepts of multiple threads and cancellation points ideally | 
 | 138 | 	 should be separate, since it is not necessary for multiple threads to | 
 | 139 | 	 have been created for cancellation points to be enabled, as is the | 
 | 140 | 	 case is when single-threaded process cancels itself. | 
 | 141 |  | 
 | 142 | 	 Since enabling multiple_threads enables additional code in | 
 | 143 | 	 cancellation points and compare_and_exchange operations, there is a | 
 | 144 | 	 potential for an unneeded performance hit when it is enabled in a | 
 | 145 | 	 single-threaded, self-canceling process.  This is OK though, since a | 
 | 146 | 	 single-threaded process will enable async cancellation only when it | 
 | 147 | 	 looks to cancel itself and is hence going to end anyway.  */ | 
 | 148 |       int multiple_threads; | 
 | 149 |       int gscope_flag; | 
 | 150 | # ifndef __ASSUME_PRIVATE_FUTEX | 
 | 151 |       int private_futex; | 
 | 152 | # endif | 
 | 153 |     } header; | 
 | 154 | #endif | 
 | 155 |  | 
 | 156 |     /* This extra padding has no special purpose, and this structure layout | 
 | 157 |        is private and subject to change without affecting the official ABI. | 
 | 158 |        We just have it here in case it might be convenient for some | 
 | 159 |        implementation-specific instrumentation hack or suchlike.  */ | 
 | 160 |     void *__padding[24]; | 
 | 161 |   }; | 
 | 162 |  | 
 | 163 |   /* This descriptor's link on the `stack_used' or `__stack_user' list.  */ | 
 | 164 |   list_t list; | 
 | 165 |  | 
 | 166 |   /* Thread ID - which is also a 'is this thread descriptor (and | 
 | 167 |      therefore stack) used' flag.  */ | 
 | 168 |   pid_t tid; | 
 | 169 |  | 
 | 170 |   /* Process ID - thread group ID in kernel speak.  */ | 
 | 171 |   pid_t pid; | 
 | 172 |  | 
 | 173 |   /* List of robust mutexes the thread is holding.  */ | 
 | 174 | #ifdef __PTHREAD_MUTEX_HAVE_PREV | 
 | 175 |   void *robust_prev; | 
 | 176 |   struct robust_list_head robust_head; | 
 | 177 |  | 
 | 178 |   /* The list above is strange.  It is basically a double linked list | 
 | 179 |      but the pointer to the next/previous element of the list points | 
 | 180 |      in the middle of the object, the __next element.  Whenever | 
 | 181 |      casting to __pthread_list_t we need to adjust the pointer | 
 | 182 |      first.  */ | 
 | 183 | # define QUEUE_PTR_ADJUST (offsetof (__pthread_list_t, __next)) | 
 | 184 |  | 
 | 185 | # define ENQUEUE_MUTEX_BOTH(mutex, val)					      \ | 
 | 186 |   do {									      \ | 
 | 187 |     __pthread_list_t *next = (__pthread_list_t *)			      \ | 
 | 188 |       ((((uintptr_t) THREAD_GETMEM (THREAD_SELF, robust_head.list)) & ~1ul)   \ | 
 | 189 |        - QUEUE_PTR_ADJUST);						      \ | 
 | 190 |     next->__prev = (void *) &mutex->__data.__list.__next;		      \ | 
 | 191 |     mutex->__data.__list.__next = THREAD_GETMEM (THREAD_SELF,		      \ | 
 | 192 | 						 robust_head.list);	      \ | 
 | 193 |     mutex->__data.__list.__prev = (void *) &THREAD_SELF->robust_head;	      \ | 
 | 194 |     THREAD_SETMEM (THREAD_SELF, robust_head.list,			      \ | 
 | 195 | 		   (void *) (((uintptr_t) &mutex->__data.__list.__next)	      \ | 
 | 196 | 			     | val));					      \ | 
 | 197 |   } while (0) | 
 | 198 | # define DEQUEUE_MUTEX(mutex) \ | 
 | 199 |   do {									      \ | 
 | 200 |     __pthread_list_t *next = (__pthread_list_t *)			      \ | 
 | 201 |       ((char *) (((uintptr_t) mutex->__data.__list.__next) & ~1ul)	      \ | 
 | 202 |        - QUEUE_PTR_ADJUST);						      \ | 
 | 203 |     next->__prev = mutex->__data.__list.__prev;				      \ | 
 | 204 |     __pthread_list_t *prev = (__pthread_list_t *)			      \ | 
 | 205 |       ((char *) (((uintptr_t) mutex->__data.__list.__prev) & ~1ul)	      \ | 
 | 206 |        - QUEUE_PTR_ADJUST);						      \ | 
 | 207 |     prev->__next = mutex->__data.__list.__next;				      \ | 
 | 208 |     mutex->__data.__list.__prev = NULL;					      \ | 
 | 209 |     mutex->__data.__list.__next = NULL;					      \ | 
 | 210 |   } while (0) | 
 | 211 | #else | 
 | 212 |   union | 
 | 213 |   { | 
 | 214 |     __pthread_slist_t robust_list; | 
 | 215 |     struct robust_list_head robust_head; | 
 | 216 |   }; | 
 | 217 |  | 
 | 218 | # define ENQUEUE_MUTEX_BOTH(mutex, val)					      \ | 
 | 219 |   do {									      \ | 
 | 220 |     mutex->__data.__list.__next						      \ | 
 | 221 |       = THREAD_GETMEM (THREAD_SELF, robust_list.__next);		      \ | 
 | 222 |     THREAD_SETMEM (THREAD_SELF, robust_list.__next,			      \ | 
 | 223 | 		   (void *) (((uintptr_t) &mutex->__data.__list) | val));     \ | 
 | 224 |   } while (0) | 
 | 225 | # define DEQUEUE_MUTEX(mutex) \ | 
 | 226 |   do {									      \ | 
 | 227 |     __pthread_slist_t *runp = (__pthread_slist_t *)			      \ | 
 | 228 |       (((uintptr_t) THREAD_GETMEM (THREAD_SELF, robust_list.__next)) & ~1ul); \ | 
 | 229 |     if (runp == &mutex->__data.__list)					      \ | 
 | 230 |       THREAD_SETMEM (THREAD_SELF, robust_list.__next, runp->__next);	      \ | 
 | 231 |     else								      \ | 
 | 232 |       {									      \ | 
 | 233 | 	__pthread_slist_t *next = (__pthread_slist_t *)		      \ | 
 | 234 | 	  (((uintptr_t) runp->__next) & ~1ul);				      \ | 
 | 235 | 	while (next != &mutex->__data.__list)				      \ | 
 | 236 | 	  {								      \ | 
 | 237 | 	    runp = next;						      \ | 
 | 238 | 	    next = (__pthread_slist_t *) (((uintptr_t) runp->__next) & ~1ul); \ | 
 | 239 | 	  }								      \ | 
 | 240 | 									      \ | 
 | 241 | 	runp->__next = next->__next;					      \ | 
 | 242 | 	mutex->__data.__list.__next = NULL;				      \ | 
 | 243 |       }									      \ | 
 | 244 |   } while (0) | 
 | 245 | #endif | 
 | 246 | #define ENQUEUE_MUTEX(mutex) ENQUEUE_MUTEX_BOTH (mutex, 0) | 
 | 247 | #define ENQUEUE_MUTEX_PI(mutex) ENQUEUE_MUTEX_BOTH (mutex, 1) | 
 | 248 |  | 
 | 249 |   /* List of cleanup buffers.  */ | 
 | 250 |   struct _pthread_cleanup_buffer *cleanup; | 
 | 251 |  | 
 | 252 |   /* Unwind information.  */ | 
 | 253 |   struct pthread_unwind_buf *cleanup_jmp_buf; | 
 | 254 | #define HAVE_CLEANUP_JMP_BUF | 
 | 255 |  | 
 | 256 |   /* Flags determining processing of cancellation.  */ | 
 | 257 |   int cancelhandling; | 
 | 258 |   /* Bit set if cancellation is disabled.  */ | 
 | 259 | #define CANCELSTATE_BIT		0 | 
 | 260 | #define CANCELSTATE_BITMASK	(0x01 << CANCELSTATE_BIT) | 
 | 261 |   /* Bit set if asynchronous cancellation mode is selected.  */ | 
 | 262 | #define CANCELTYPE_BIT		1 | 
 | 263 | #define CANCELTYPE_BITMASK	(0x01 << CANCELTYPE_BIT) | 
 | 264 |   /* Bit set if canceling has been initiated.  */ | 
 | 265 | #define CANCELING_BIT		2 | 
 | 266 | #define CANCELING_BITMASK	(0x01 << CANCELING_BIT) | 
 | 267 |   /* Bit set if canceled.  */ | 
 | 268 | #define CANCELED_BIT		3 | 
 | 269 | #define CANCELED_BITMASK	(0x01 << CANCELED_BIT) | 
 | 270 |   /* Bit set if thread is exiting.  */ | 
 | 271 | #define EXITING_BIT		4 | 
 | 272 | #define EXITING_BITMASK		(0x01 << EXITING_BIT) | 
 | 273 |   /* Bit set if thread terminated and TCB is freed.  */ | 
 | 274 | #define TERMINATED_BIT		5 | 
 | 275 | #define TERMINATED_BITMASK	(0x01 << TERMINATED_BIT) | 
 | 276 |   /* Bit set if thread is supposed to change XID.  */ | 
 | 277 | #define SETXID_BIT		6 | 
 | 278 | #define SETXID_BITMASK		(0x01 << SETXID_BIT) | 
 | 279 |   /* Mask for the rest.  Helps the compiler to optimize.  */ | 
 | 280 | #define CANCEL_RESTMASK		0xffffff80 | 
 | 281 |  | 
 | 282 | #define CANCEL_ENABLED_AND_CANCELED(value) \ | 
 | 283 |   (((value) & (CANCELSTATE_BITMASK | CANCELED_BITMASK | EXITING_BITMASK	      \ | 
 | 284 | 	       | CANCEL_RESTMASK | TERMINATED_BITMASK)) == CANCELED_BITMASK) | 
 | 285 | #define CANCEL_ENABLED_AND_CANCELED_AND_ASYNCHRONOUS(value) \ | 
 | 286 |   (((value) & (CANCELSTATE_BITMASK | CANCELTYPE_BITMASK | CANCELED_BITMASK    \ | 
 | 287 | 	       | EXITING_BITMASK | CANCEL_RESTMASK | TERMINATED_BITMASK))     \ | 
 | 288 |    == (CANCELTYPE_BITMASK | CANCELED_BITMASK)) | 
 | 289 |  | 
 | 290 |   /* Flags.  Including those copied from the thread attribute.  */ | 
 | 291 |   int flags; | 
 | 292 |  | 
 | 293 |   /* We allocate one block of references here.  This should be enough | 
 | 294 |      to avoid allocating any memory dynamically for most applications.  */ | 
 | 295 |   struct pthread_key_data | 
 | 296 |   { | 
 | 297 |     /* Sequence number.  We use uintptr_t to not require padding on | 
 | 298 |        32- and 64-bit machines.  On 64-bit machines it helps to avoid | 
 | 299 |        wrapping, too.  */ | 
 | 300 |     uintptr_t seq; | 
 | 301 |  | 
 | 302 |     /* Data pointer.  */ | 
 | 303 |     void *data; | 
 | 304 |   } specific_1stblock[PTHREAD_KEY_2NDLEVEL_SIZE]; | 
 | 305 |  | 
 | 306 |   /* Two-level array for the thread-specific data.  */ | 
 | 307 |   struct pthread_key_data *specific[PTHREAD_KEY_1STLEVEL_SIZE]; | 
 | 308 |  | 
 | 309 |   /* Flag which is set when specific data is set.  */ | 
 | 310 |   bool specific_used; | 
 | 311 |  | 
 | 312 |   /* True if events must be reported.  */ | 
 | 313 |   bool report_events; | 
 | 314 |  | 
 | 315 |   /* True if the user provided the stack.  */ | 
 | 316 |   bool user_stack; | 
 | 317 |  | 
 | 318 |   /* True if thread must stop at startup time.  */ | 
 | 319 |   bool stopped_start; | 
 | 320 |  | 
 | 321 |   /* The parent's cancel handling at the time of the pthread_create | 
 | 322 |      call.  This might be needed to undo the effects of a cancellation.  */ | 
 | 323 |   int parent_cancelhandling; | 
 | 324 |  | 
 | 325 |   /* Lock to synchronize access to the descriptor.  */ | 
 | 326 |   int lock; | 
 | 327 |  | 
 | 328 |   /* Lock for synchronizing setxid calls.  */ | 
 | 329 |   unsigned int setxid_futex; | 
 | 330 |  | 
 | 331 | #if HP_TIMING_AVAIL | 
 | 332 |   /* Offset of the CPU clock at start thread start time.  */ | 
 | 333 |   hp_timing_t cpuclock_offset; | 
 | 334 | #endif | 
 | 335 |  | 
 | 336 |   /* If the thread waits to join another one the ID of the latter is | 
 | 337 |      stored here. | 
 | 338 |  | 
 | 339 |      In case a thread is detached this field contains a pointer of the | 
 | 340 |      TCB if the thread itself.  This is something which cannot happen | 
 | 341 |      in normal operation.  */ | 
 | 342 |   struct pthread *joinid; | 
 | 343 |   /* Check whether a thread is detached.  */ | 
 | 344 | #define IS_DETACHED(pd) ((pd)->joinid == (pd)) | 
 | 345 |  | 
 | 346 |   /* The result of the thread function.  */ | 
 | 347 |   void *result; | 
 | 348 |  | 
 | 349 |   /* Scheduling parameters for the new thread.  */ | 
 | 350 |   struct sched_param schedparam; | 
 | 351 |   int schedpolicy; | 
 | 352 |  | 
 | 353 |   /* Start position of the code to be executed and the argument passed | 
 | 354 |      to the function.  */ | 
 | 355 |   void *(*start_routine) (void *); | 
 | 356 |   void *arg; | 
 | 357 |  | 
 | 358 |   /* Debug state.  */ | 
 | 359 |   td_eventbuf_t eventbuf; | 
 | 360 |   /* Next descriptor with a pending event.  */ | 
 | 361 |   struct pthread *nextevent; | 
 | 362 |  | 
 | 363 |   /* Machine-specific unwind info.  */ | 
 | 364 |   struct _Unwind_Exception exc; | 
 | 365 |  | 
 | 366 |   /* If nonzero pointer to area allocated for the stack and its | 
 | 367 |      size.  */ | 
 | 368 |   void *stackblock; | 
 | 369 |   size_t stackblock_size; | 
 | 370 |   /* Size of the included guard area.  */ | 
 | 371 |   size_t guardsize; | 
 | 372 |   /* This is what the user specified and what we will report.  */ | 
 | 373 |   size_t reported_guardsize; | 
 | 374 |  | 
 | 375 |   /* Thread Priority Protection data.  */ | 
 | 376 |   struct priority_protection_data *tpp; | 
 | 377 |  | 
 | 378 |   /* Resolver state.  */ | 
 | 379 |   struct __res_state res; | 
 | 380 |  | 
 | 381 |   /* This member must be last.  */ | 
 | 382 |   char end_padding[]; | 
 | 383 |  | 
 | 384 | #define PTHREAD_STRUCT_END_PADDING \ | 
 | 385 |   (sizeof (struct pthread) - offsetof (struct pthread, end_padding)) | 
 | 386 | } __attribute ((aligned (TCB_ALIGNMENT))); | 
 | 387 |  | 
 | 388 |  | 
 | 389 | #endif	/* descr.h */ |