xf.li | bfc6e71 | 2025-02-07 01:54:34 -0800 | [diff] [blame^] | 1 | /* Helper code for POSIX timer implementation on NPTL. |
| 2 | Copyright (C) 2000-2016 Free Software Foundation, Inc. |
| 3 | This file is part of the GNU C Library. |
| 4 | Contributed by Kaz Kylheku <kaz@ashi.footprints.net>. |
| 5 | |
| 6 | The GNU C Library is free software; you can redistribute it and/or |
| 7 | modify it under the terms of the GNU Lesser General Public License as |
| 8 | published by the Free Software Foundation; either version 2.1 of the |
| 9 | License, or (at your option) any later version. |
| 10 | |
| 11 | The GNU C Library is distributed in the hope that it will be useful, |
| 12 | but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| 14 | Lesser General Public License for more details. |
| 15 | |
| 16 | You should have received a copy of the GNU Lesser General Public |
| 17 | License along with the GNU C Library; see the file COPYING.LIB. If |
| 18 | not, see <http://www.gnu.org/licenses/>. */ |
| 19 | |
| 20 | #include <assert.h> |
| 21 | #include <errno.h> |
| 22 | #include <pthread.h> |
| 23 | #include <stddef.h> |
| 24 | #include <stdlib.h> |
| 25 | #include <string.h> |
| 26 | #include <sysdep.h> |
| 27 | #include <time.h> |
| 28 | #include <unistd.h> |
| 29 | #include <sys/syscall.h> |
| 30 | |
| 31 | #include "posix-timer.h" |
| 32 | #include <nptl/pthreadP.h> |
| 33 | |
| 34 | |
| 35 | /* Number of threads used. */ |
| 36 | #define THREAD_MAXNODES 16 |
| 37 | |
| 38 | /* Array containing the descriptors for the used threads. */ |
| 39 | static struct thread_node thread_array[THREAD_MAXNODES]; |
| 40 | |
| 41 | /* Static array with the structures for all the timers. */ |
| 42 | struct timer_node __timer_array[TIMER_MAX]; |
| 43 | |
| 44 | /* Global lock to protect operation on the lists. */ |
| 45 | pthread_mutex_t __timer_mutex = PTHREAD_MUTEX_INITIALIZER; |
| 46 | |
| 47 | /* Variable to protext initialization. */ |
| 48 | pthread_once_t __timer_init_once_control = PTHREAD_ONCE_INIT; |
| 49 | |
| 50 | /* Nonzero if initialization of timer implementation failed. */ |
| 51 | int __timer_init_failed; |
| 52 | |
| 53 | /* Node for the thread used to deliver signals. */ |
| 54 | struct thread_node __timer_signal_thread_rclk; |
| 55 | |
| 56 | /* Lists to keep free and used timers and threads. */ |
| 57 | static struct list_head timer_free_list; |
| 58 | static struct list_head thread_free_list; |
| 59 | static struct list_head thread_active_list; |
| 60 | |
| 61 | |
| 62 | #ifdef __NR_rt_sigqueueinfo |
| 63 | extern int __syscall_rt_sigqueueinfo (int, int, siginfo_t *); |
| 64 | #endif |
| 65 | |
| 66 | |
| 67 | /* List handling functions. */ |
| 68 | static inline void |
| 69 | list_append (struct list_head *list, struct list_head *newp) |
| 70 | { |
| 71 | newp->prev = list->prev; |
| 72 | newp->next = list; |
| 73 | list->prev->next = newp; |
| 74 | list->prev = newp; |
| 75 | } |
| 76 | |
| 77 | static inline void |
| 78 | list_insbefore (struct list_head *list, struct list_head *newp) |
| 79 | { |
| 80 | list_append (list, newp); |
| 81 | } |
| 82 | |
| 83 | /* |
| 84 | * Like list_unlink_ip, except that calling it on a node that |
| 85 | * is already unlinked is disastrous rather than a noop. |
| 86 | */ |
| 87 | |
| 88 | static inline void |
| 89 | list_unlink (struct list_head *list) |
| 90 | { |
| 91 | struct list_head *lnext = list->next, *lprev = list->prev; |
| 92 | |
| 93 | lnext->prev = lprev; |
| 94 | lprev->next = lnext; |
| 95 | } |
| 96 | |
| 97 | static inline struct list_head * |
| 98 | list_first (struct list_head *list) |
| 99 | { |
| 100 | return list->next; |
| 101 | } |
| 102 | |
| 103 | static inline struct list_head * |
| 104 | list_null (struct list_head *list) |
| 105 | { |
| 106 | return list; |
| 107 | } |
| 108 | |
| 109 | static inline struct list_head * |
| 110 | list_next (struct list_head *list) |
| 111 | { |
| 112 | return list->next; |
| 113 | } |
| 114 | |
| 115 | static inline int |
| 116 | list_isempty (struct list_head *list) |
| 117 | { |
| 118 | return list->next == list; |
| 119 | } |
| 120 | |
| 121 | |
| 122 | /* Functions build on top of the list functions. */ |
| 123 | static inline struct thread_node * |
| 124 | thread_links2ptr (struct list_head *list) |
| 125 | { |
| 126 | return (struct thread_node *) ((char *) list |
| 127 | - offsetof (struct thread_node, links)); |
| 128 | } |
| 129 | |
| 130 | static inline struct timer_node * |
| 131 | timer_links2ptr (struct list_head *list) |
| 132 | { |
| 133 | return (struct timer_node *) ((char *) list |
| 134 | - offsetof (struct timer_node, links)); |
| 135 | } |
| 136 | |
| 137 | |
| 138 | /* Initialize a newly allocated thread structure. */ |
| 139 | static void |
| 140 | thread_init (struct thread_node *thread, const pthread_attr_t *attr, clockid_t clock_id) |
| 141 | { |
| 142 | if (attr != NULL) |
| 143 | thread->attr = *attr; |
| 144 | else |
| 145 | { |
| 146 | pthread_attr_init (&thread->attr); |
| 147 | pthread_attr_setdetachstate (&thread->attr, PTHREAD_CREATE_DETACHED); |
| 148 | } |
| 149 | |
| 150 | thread->exists = 0; |
| 151 | INIT_LIST_HEAD (&thread->timer_queue); |
| 152 | pthread_cond_init (&thread->cond, 0); |
| 153 | thread->current_timer = 0; |
| 154 | thread->captured = pthread_self (); |
| 155 | thread->clock_id = clock_id; |
| 156 | } |
| 157 | |
| 158 | |
| 159 | /* Initialize the global lists, and acquire global resources. Error |
| 160 | reporting is done by storing a non-zero value to the global variable |
| 161 | timer_init_failed. */ |
| 162 | static void |
| 163 | init_module (void) |
| 164 | { |
| 165 | int i; |
| 166 | |
| 167 | INIT_LIST_HEAD (&timer_free_list); |
| 168 | INIT_LIST_HEAD (&thread_free_list); |
| 169 | INIT_LIST_HEAD (&thread_active_list); |
| 170 | |
| 171 | for (i = 0; i < TIMER_MAX; ++i) |
| 172 | { |
| 173 | list_append (&timer_free_list, &__timer_array[i].links); |
| 174 | __timer_array[i].inuse = TIMER_FREE; |
| 175 | } |
| 176 | |
| 177 | for (i = 0; i < THREAD_MAXNODES; ++i) |
| 178 | list_append (&thread_free_list, &thread_array[i].links); |
| 179 | |
| 180 | thread_init (&__timer_signal_thread_rclk, 0, CLOCK_REALTIME); |
| 181 | } |
| 182 | |
| 183 | |
| 184 | /* This is a handler executed in a child process after a fork() |
| 185 | occurs. It reinitializes the module, resetting all of the data |
| 186 | structures to their initial state. The mutex is initialized in |
| 187 | case it was locked in the parent process. */ |
| 188 | static void |
| 189 | reinit_after_fork (void) |
| 190 | { |
| 191 | init_module (); |
| 192 | pthread_mutex_init (&__timer_mutex, 0); |
| 193 | } |
| 194 | |
| 195 | |
| 196 | /* Called once form pthread_once in timer_init. This initializes the |
| 197 | module and ensures that reinit_after_fork will be executed in any |
| 198 | child process. */ |
| 199 | void |
| 200 | __timer_init_once (void) |
| 201 | { |
| 202 | init_module (); |
| 203 | pthread_atfork (0, 0, reinit_after_fork); |
| 204 | } |
| 205 | |
| 206 | |
| 207 | /* Deinitialize a thread that is about to be deallocated. */ |
| 208 | static void |
| 209 | thread_deinit (struct thread_node *thread) |
| 210 | { |
| 211 | assert (list_isempty (&thread->timer_queue)); |
| 212 | pthread_cond_destroy (&thread->cond); |
| 213 | } |
| 214 | |
| 215 | |
| 216 | /* Allocate a thread structure from the global free list. Global |
| 217 | mutex lock must be held by caller. The thread is moved to |
| 218 | the active list. */ |
| 219 | struct thread_node * |
| 220 | __timer_thread_alloc (const pthread_attr_t *desired_attr, clockid_t clock_id) |
| 221 | { |
| 222 | struct list_head *node = list_first (&thread_free_list); |
| 223 | |
| 224 | if (node != list_null (&thread_free_list)) |
| 225 | { |
| 226 | struct thread_node *thread = thread_links2ptr (node); |
| 227 | list_unlink (node); |
| 228 | thread_init (thread, desired_attr, clock_id); |
| 229 | list_append (&thread_active_list, node); |
| 230 | return thread; |
| 231 | } |
| 232 | |
| 233 | return 0; |
| 234 | } |
| 235 | |
| 236 | |
| 237 | /* Return a thread structure to the global free list. Global lock |
| 238 | must be held by caller. */ |
| 239 | void |
| 240 | __timer_thread_dealloc (struct thread_node *thread) |
| 241 | { |
| 242 | thread_deinit (thread); |
| 243 | list_unlink (&thread->links); |
| 244 | list_append (&thread_free_list, &thread->links); |
| 245 | } |
| 246 | |
| 247 | |
| 248 | /* Each of our threads which terminates executes this cleanup |
| 249 | handler. We never terminate threads ourselves; if a thread gets here |
| 250 | it means that the evil application has killed it. If the thread has |
| 251 | timers, these require servicing and so we must hire a replacement |
| 252 | thread right away. We must also unblock another thread that may |
| 253 | have been waiting for this thread to finish servicing a timer (see |
| 254 | timer_delete()). */ |
| 255 | |
| 256 | static void |
| 257 | thread_cleanup (void *val) |
| 258 | { |
| 259 | if (val != NULL) |
| 260 | { |
| 261 | struct thread_node *thread = val; |
| 262 | |
| 263 | /* How did the signal thread get killed? */ |
| 264 | assert (thread != &__timer_signal_thread_rclk); |
| 265 | |
| 266 | pthread_mutex_lock (&__timer_mutex); |
| 267 | |
| 268 | thread->exists = 0; |
| 269 | |
| 270 | /* We are no longer processing a timer event. */ |
| 271 | thread->current_timer = 0; |
| 272 | |
| 273 | if (list_isempty (&thread->timer_queue)) |
| 274 | __timer_thread_dealloc (thread); |
| 275 | else |
| 276 | (void) __timer_thread_start (thread); |
| 277 | |
| 278 | pthread_mutex_unlock (&__timer_mutex); |
| 279 | |
| 280 | /* Unblock potentially blocked timer_delete(). */ |
| 281 | pthread_cond_broadcast (&thread->cond); |
| 282 | } |
| 283 | } |
| 284 | |
| 285 | |
| 286 | /* Handle a timer which is supposed to go off now. */ |
| 287 | static void |
| 288 | thread_expire_timer (struct thread_node *self, struct timer_node *timer) |
| 289 | { |
| 290 | self->current_timer = timer; /* Lets timer_delete know timer is running. */ |
| 291 | |
| 292 | pthread_mutex_unlock (&__timer_mutex); |
| 293 | |
| 294 | switch (__builtin_expect (timer->event.sigev_notify, SIGEV_SIGNAL)) |
| 295 | { |
| 296 | case SIGEV_NONE: |
| 297 | break; |
| 298 | |
| 299 | case SIGEV_SIGNAL: |
| 300 | #ifdef __NR_rt_sigqueueinfo |
| 301 | { |
| 302 | siginfo_t info; |
| 303 | |
| 304 | /* First, clear the siginfo_t structure, so that we don't pass our |
| 305 | stack content to other tasks. */ |
| 306 | memset (&info, 0, sizeof (siginfo_t)); |
| 307 | /* We must pass the information about the data in a siginfo_t |
| 308 | value. */ |
| 309 | info.si_signo = timer->event.sigev_signo; |
| 310 | info.si_code = SI_TIMER; |
| 311 | info.si_pid = timer->creator_pid; |
| 312 | info.si_uid = getuid (); |
| 313 | info.si_value = timer->event.sigev_value; |
| 314 | |
| 315 | INLINE_SYSCALL (rt_sigqueueinfo, 3, info.si_pid, info.si_signo, &info); |
| 316 | } |
| 317 | #else |
| 318 | if (pthread_kill (self->captured, timer->event.sigev_signo) != 0) |
| 319 | { |
| 320 | if (pthread_kill (self->id, timer->event.sigev_signo) != 0) |
| 321 | abort (); |
| 322 | } |
| 323 | #endif |
| 324 | break; |
| 325 | |
| 326 | case SIGEV_THREAD: |
| 327 | timer->event.sigev_notify_function (timer->event.sigev_value); |
| 328 | break; |
| 329 | |
| 330 | default: |
| 331 | assert (! "unknown event"); |
| 332 | break; |
| 333 | } |
| 334 | |
| 335 | pthread_mutex_lock (&__timer_mutex); |
| 336 | |
| 337 | self->current_timer = 0; |
| 338 | |
| 339 | pthread_cond_broadcast (&self->cond); |
| 340 | } |
| 341 | |
| 342 | |
| 343 | /* Thread function; executed by each timer thread. The job of this |
| 344 | function is to wait on the thread's timer queue and expire the |
| 345 | timers in chronological order as close to their scheduled time as |
| 346 | possible. */ |
| 347 | static void |
| 348 | __attribute__ ((noreturn)) |
| 349 | thread_func (void *arg) |
| 350 | { |
| 351 | struct thread_node *self = arg; |
| 352 | |
| 353 | /* Register cleanup handler, in case rogue application terminates |
| 354 | this thread. (This cannot happen to __timer_signal_thread, which |
| 355 | doesn't invoke application callbacks). */ |
| 356 | |
| 357 | pthread_cleanup_push (thread_cleanup, self); |
| 358 | |
| 359 | pthread_mutex_lock (&__timer_mutex); |
| 360 | |
| 361 | while (1) |
| 362 | { |
| 363 | struct list_head *first; |
| 364 | struct timer_node *timer = NULL; |
| 365 | |
| 366 | /* While the timer queue is not empty, inspect the first node. */ |
| 367 | first = list_first (&self->timer_queue); |
| 368 | if (first != list_null (&self->timer_queue)) |
| 369 | { |
| 370 | struct timespec now; |
| 371 | |
| 372 | timer = timer_links2ptr (first); |
| 373 | |
| 374 | /* This assumes that the elements of the list of one thread |
| 375 | are all for the same clock. */ |
| 376 | clock_gettime (timer->clock, &now); |
| 377 | |
| 378 | while (1) |
| 379 | { |
| 380 | /* If the timer is due or overdue, remove it from the queue. |
| 381 | If it's a periodic timer, re-compute its new time and |
| 382 | requeue it. Either way, perform the timer expiry. */ |
| 383 | if (timespec_compare (&now, &timer->expirytime) < 0) |
| 384 | break; |
| 385 | |
| 386 | list_unlink_ip (first); |
| 387 | |
| 388 | if (__builtin_expect (timer->value.it_interval.tv_sec, 0) != 0 |
| 389 | || timer->value.it_interval.tv_nsec != 0) |
| 390 | { |
| 391 | timer->overrun_count = 0; |
| 392 | timespec_add (&timer->expirytime, &timer->expirytime, |
| 393 | &timer->value.it_interval); |
| 394 | while (timespec_compare (&timer->expirytime, &now) < 0) |
| 395 | { |
| 396 | timespec_add (&timer->expirytime, &timer->expirytime, |
| 397 | &timer->value.it_interval); |
| 398 | if (timer->overrun_count < DELAYTIMER_MAX) |
| 399 | ++timer->overrun_count; |
| 400 | } |
| 401 | __timer_thread_queue_timer (self, timer); |
| 402 | } |
| 403 | |
| 404 | thread_expire_timer (self, timer); |
| 405 | |
| 406 | first = list_first (&self->timer_queue); |
| 407 | if (first == list_null (&self->timer_queue)) |
| 408 | break; |
| 409 | |
| 410 | timer = timer_links2ptr (first); |
| 411 | } |
| 412 | } |
| 413 | |
| 414 | /* If the queue is not empty, wait until the expiry time of the |
| 415 | first node. Otherwise wait indefinitely. Insertions at the |
| 416 | head of the queue must wake up the thread by broadcasting |
| 417 | this condition variable. */ |
| 418 | if (timer != NULL) |
| 419 | pthread_cond_timedwait (&self->cond, &__timer_mutex, |
| 420 | &timer->expirytime); |
| 421 | else |
| 422 | pthread_cond_wait (&self->cond, &__timer_mutex); |
| 423 | } |
| 424 | /* This macro will never be executed since the while loop loops |
| 425 | forever - but we have to add it for proper nesting. */ |
| 426 | pthread_cleanup_pop (1); |
| 427 | } |
| 428 | |
| 429 | |
| 430 | /* Enqueue a timer in wakeup order in the thread's timer queue. |
| 431 | Returns 1 if the timer was inserted at the head of the queue, |
| 432 | causing the queue's next wakeup time to change. */ |
| 433 | |
| 434 | int |
| 435 | __timer_thread_queue_timer (struct thread_node *thread, |
| 436 | struct timer_node *insert) |
| 437 | { |
| 438 | struct list_head *iter; |
| 439 | int athead = 1; |
| 440 | |
| 441 | for (iter = list_first (&thread->timer_queue); |
| 442 | iter != list_null (&thread->timer_queue); |
| 443 | iter = list_next (iter)) |
| 444 | { |
| 445 | struct timer_node *timer = timer_links2ptr (iter); |
| 446 | |
| 447 | if (timespec_compare (&insert->expirytime, &timer->expirytime) < 0) |
| 448 | break; |
| 449 | athead = 0; |
| 450 | } |
| 451 | |
| 452 | list_insbefore (iter, &insert->links); |
| 453 | return athead; |
| 454 | } |
| 455 | |
| 456 | |
| 457 | /* Start a thread and associate it with the given thread node. Global |
| 458 | lock must be held by caller. */ |
| 459 | int |
| 460 | __timer_thread_start (struct thread_node *thread) |
| 461 | { |
| 462 | int retval = 1; |
| 463 | |
| 464 | assert (!thread->exists); |
| 465 | thread->exists = 1; |
| 466 | |
| 467 | if (pthread_create (&thread->id, &thread->attr, |
| 468 | (void *(*) (void *)) thread_func, thread) != 0) |
| 469 | { |
| 470 | thread->exists = 0; |
| 471 | retval = -1; |
| 472 | } |
| 473 | |
| 474 | return retval; |
| 475 | } |
| 476 | |
| 477 | |
| 478 | void |
| 479 | __timer_thread_wakeup (struct thread_node *thread) |
| 480 | { |
| 481 | pthread_cond_broadcast (&thread->cond); |
| 482 | } |
| 483 | |
| 484 | |
| 485 | /* Compare two pthread_attr_t thread attributes for exact equality. |
| 486 | Returns 1 if they are equal, otherwise zero if they are not equal |
| 487 | or contain illegal values. This version is NPTL-specific for |
| 488 | performance reason. One could use the access functions to get the |
| 489 | values of all the fields of the attribute structure. */ |
| 490 | static int |
| 491 | thread_attr_compare (const pthread_attr_t *left, const pthread_attr_t *right) |
| 492 | { |
| 493 | struct pthread_attr *ileft = (struct pthread_attr *) left; |
| 494 | struct pthread_attr *iright = (struct pthread_attr *) right; |
| 495 | |
| 496 | return (ileft->flags == iright->flags |
| 497 | && ileft->schedpolicy == iright->schedpolicy |
| 498 | && (ileft->schedparam.sched_priority |
| 499 | == iright->schedparam.sched_priority) |
| 500 | && ileft->guardsize == iright->guardsize |
| 501 | && ileft->stackaddr == iright->stackaddr |
| 502 | && ileft->stacksize == iright->stacksize |
| 503 | && ((ileft->cpuset == NULL && iright->cpuset == NULL) |
| 504 | || (ileft->cpuset != NULL && iright->cpuset != NULL |
| 505 | && ileft->cpusetsize == iright->cpusetsize |
| 506 | && memcmp (ileft->cpuset, iright->cpuset, |
| 507 | ileft->cpusetsize) == 0))); |
| 508 | } |
| 509 | |
| 510 | |
| 511 | /* Search the list of active threads and find one which has matching |
| 512 | attributes. Global mutex lock must be held by caller. */ |
| 513 | struct thread_node * |
| 514 | __timer_thread_find_matching (const pthread_attr_t *desired_attr, |
| 515 | clockid_t desired_clock_id) |
| 516 | { |
| 517 | struct list_head *iter = list_first (&thread_active_list); |
| 518 | |
| 519 | while (iter != list_null (&thread_active_list)) |
| 520 | { |
| 521 | struct thread_node *candidate = thread_links2ptr (iter); |
| 522 | |
| 523 | if (thread_attr_compare (desired_attr, &candidate->attr) |
| 524 | && desired_clock_id == candidate->clock_id) |
| 525 | return candidate; |
| 526 | |
| 527 | iter = list_next (iter); |
| 528 | } |
| 529 | |
| 530 | return NULL; |
| 531 | } |
| 532 | |
| 533 | |
| 534 | /* Grab a free timer structure from the global free list. The global |
| 535 | lock must be held by the caller. */ |
| 536 | struct timer_node * |
| 537 | __timer_alloc (void) |
| 538 | { |
| 539 | struct list_head *node = list_first (&timer_free_list); |
| 540 | |
| 541 | if (node != list_null (&timer_free_list)) |
| 542 | { |
| 543 | struct timer_node *timer = timer_links2ptr (node); |
| 544 | list_unlink_ip (node); |
| 545 | timer->inuse = TIMER_INUSE; |
| 546 | timer->refcount = 1; |
| 547 | return timer; |
| 548 | } |
| 549 | |
| 550 | return NULL; |
| 551 | } |
| 552 | |
| 553 | |
| 554 | /* Return a timer structure to the global free list. The global lock |
| 555 | must be held by the caller. */ |
| 556 | void |
| 557 | __timer_dealloc (struct timer_node *timer) |
| 558 | { |
| 559 | assert (timer->refcount == 0); |
| 560 | timer->thread = NULL; /* Break association between timer and thread. */ |
| 561 | timer->inuse = TIMER_FREE; |
| 562 | list_append (&timer_free_list, &timer->links); |
| 563 | } |
| 564 | |
| 565 | |
| 566 | /* Thread cancellation handler which unlocks a mutex. */ |
| 567 | void |
| 568 | __timer_mutex_cancel_handler (void *arg) |
| 569 | { |
| 570 | pthread_mutex_unlock (arg); |
| 571 | } |