blob: d6ab60ec44bff3809686007a2ef509c49e3c63de [file] [log] [blame]
yuezonghe824eb0c2024-06-27 02:32:26 -07001/* Copyright (C) 2002-2007, 2008 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
4
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
9
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
14
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, write to the Free
17 Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
18 02111-1307 USA. */
19
20#include <assert.h>
21#include <errno.h>
22#include <time.h>
23#include "pthreadP.h"
24#include <lowlevellock.h>
25#include <not-cancel.h>
26
27/* We need to build this function with optimization to avoid
28 * lll_timedlock erroring out with
29 * error: can't find a register in class ‘GENERAL_REGS’ while reloading ‘asm’
30 */
31int
32attribute_optimize("Os")
33pthread_mutex_timedlock (
34 pthread_mutex_t *mutex,
35 const struct timespec *abstime)
36{
37 int oldval;
38 pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
39 int result = 0;
40
41 /* We must not check ABSTIME here. If the thread does not block
42 abstime must not be checked for a valid value. */
43
44 switch (__builtin_expect (PTHREAD_MUTEX_TYPE (mutex),
45 PTHREAD_MUTEX_TIMED_NP))
46 {
47 /* Recursive mutex. */
48 case PTHREAD_MUTEX_RECURSIVE_NP:
49 /* Check whether we already hold the mutex. */
50 if (mutex->__data.__owner == id)
51 {
52 /* Just bump the counter. */
53 if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
54 /* Overflow of the counter. */
55 return EAGAIN;
56
57 ++mutex->__data.__count;
58
59 goto out;
60 }
61
62 /* We have to get the mutex. */
63 result = lll_timedlock (mutex->__data.__lock, abstime,
64 PTHREAD_MUTEX_PSHARED (mutex));
65
66 if (result != 0)
67 goto out;
68
69 /* Only locked once so far. */
70 mutex->__data.__count = 1;
71 break;
72
73 /* Error checking mutex. */
74 case PTHREAD_MUTEX_ERRORCHECK_NP:
75 /* Check whether we already hold the mutex. */
76 if (__builtin_expect (mutex->__data.__owner == id, 0))
77 return EDEADLK;
78
79 /* FALLTHROUGH */
80
81 case PTHREAD_MUTEX_TIMED_NP:
82 simple:
83 /* Normal mutex. */
84 result = lll_timedlock (mutex->__data.__lock, abstime,
85 PTHREAD_MUTEX_PSHARED (mutex));
86 break;
87
88 case PTHREAD_MUTEX_ADAPTIVE_NP:
89 if (! __is_smp)
90 goto simple;
91
92 if (lll_trylock (mutex->__data.__lock) != 0)
93 {
94 int cnt = 0;
95 int max_cnt = MIN (MAX_ADAPTIVE_COUNT,
96 mutex->__data.__spins * 2 + 10);
97 do
98 {
99 if (cnt++ >= max_cnt)
100 {
101 result = lll_timedlock (mutex->__data.__lock, abstime,
102 PTHREAD_MUTEX_PSHARED (mutex));
103 break;
104 }
105
106#ifdef BUSY_WAIT_NOP
107 BUSY_WAIT_NOP;
108#endif
109 }
110 while (lll_trylock (mutex->__data.__lock) != 0);
111
112 mutex->__data.__spins += (cnt - mutex->__data.__spins) / 8;
113 }
114 break;
115
116 case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP:
117 case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP:
118 case PTHREAD_MUTEX_ROBUST_NORMAL_NP:
119 case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP:
120 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
121 &mutex->__data.__list.__next);
122
123 oldval = mutex->__data.__lock;
124 do
125 {
126 again:
127 if ((oldval & FUTEX_OWNER_DIED) != 0)
128 {
129 /* The previous owner died. Try locking the mutex. */
130 int newval = id | (oldval & FUTEX_WAITERS);
131
132 newval
133 = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
134 newval, oldval);
135 if (newval != oldval)
136 {
137 oldval = newval;
138 goto again;
139 }
140
141 /* We got the mutex. */
142 mutex->__data.__count = 1;
143 /* But it is inconsistent unless marked otherwise. */
144 mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
145
146 ENQUEUE_MUTEX (mutex);
147 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
148
149 /* Note that we deliberately exit here. If we fall
150 through to the end of the function __nusers would be
151 incremented which is not correct because the old
152 owner has to be discounted. */
153 return EOWNERDEAD;
154 }
155
156 /* Check whether we already hold the mutex. */
157 if (__builtin_expect ((oldval & FUTEX_TID_MASK) == id, 0))
158 {
159 int kind = PTHREAD_MUTEX_TYPE (mutex);
160 if (kind == PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP)
161 {
162 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
163 NULL);
164 return EDEADLK;
165 }
166
167 if (kind == PTHREAD_MUTEX_ROBUST_RECURSIVE_NP)
168 {
169 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
170 NULL);
171
172 /* Just bump the counter. */
173 if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
174 /* Overflow of the counter. */
175 return EAGAIN;
176
177 ++mutex->__data.__count;
178
179 return 0;
180 }
181 }
182
183 result = lll_robust_timedlock (mutex->__data.__lock, abstime, id,
184 PTHREAD_ROBUST_MUTEX_PSHARED (mutex));
185
186 if (__builtin_expect (mutex->__data.__owner
187 == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
188 {
189 /* This mutex is now not recoverable. */
190 mutex->__data.__count = 0;
191 lll_unlock (mutex->__data.__lock,
192 PTHREAD_ROBUST_MUTEX_PSHARED (mutex));
193 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
194 return ENOTRECOVERABLE;
195 }
196
197 if (result == ETIMEDOUT || result == EINVAL)
198 goto out;
199
200 oldval = result;
201 }
202 while ((oldval & FUTEX_OWNER_DIED) != 0);
203
204 mutex->__data.__count = 1;
205 ENQUEUE_MUTEX (mutex);
206 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
207 break;
208
209 case PTHREAD_MUTEX_PI_RECURSIVE_NP:
210 case PTHREAD_MUTEX_PI_ERRORCHECK_NP:
211 case PTHREAD_MUTEX_PI_NORMAL_NP:
212 case PTHREAD_MUTEX_PI_ADAPTIVE_NP:
213 case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP:
214 case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP:
215 case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP:
216 case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP:
217 {
218 int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP;
219 int robust = mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP;
220
221 if (robust)
222 /* Note: robust PI futexes are signaled by setting bit 0. */
223 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
224 (void *) (((uintptr_t) &mutex->__data.__list.__next)
225 | 1));
226
227 oldval = mutex->__data.__lock;
228
229 /* Check whether we already hold the mutex. */
230 if (__builtin_expect ((oldval & FUTEX_TID_MASK) == id, 0))
231 {
232 if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
233 {
234 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
235 return EDEADLK;
236 }
237
238 if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
239 {
240 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
241
242 /* Just bump the counter. */
243 if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
244 /* Overflow of the counter. */
245 return EAGAIN;
246
247 ++mutex->__data.__count;
248
249 return 0;
250 }
251 }
252
253 oldval = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
254 id, 0);
255
256 if (oldval != 0)
257 {
258 /* The mutex is locked. The kernel will now take care of
259 everything. The timeout value must be a relative value.
260 Convert it. */
261 int private = (robust
262 ? PTHREAD_ROBUST_MUTEX_PSHARED (mutex)
263 : PTHREAD_MUTEX_PSHARED (mutex));
264 INTERNAL_SYSCALL_DECL (__err);
265
266 int e = INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
267 __lll_private_flag (FUTEX_LOCK_PI,
268 private), 1,
269 abstime);
270 if (INTERNAL_SYSCALL_ERROR_P (e, __err))
271 {
272 if (INTERNAL_SYSCALL_ERRNO (e, __err) == ETIMEDOUT)
273 return ETIMEDOUT;
274
275 if (INTERNAL_SYSCALL_ERRNO (e, __err) == ESRCH
276 || INTERNAL_SYSCALL_ERRNO (e, __err) == EDEADLK)
277 {
278 assert (INTERNAL_SYSCALL_ERRNO (e, __err) != EDEADLK
279 || (kind != PTHREAD_MUTEX_ERRORCHECK_NP
280 && kind != PTHREAD_MUTEX_RECURSIVE_NP));
281 /* ESRCH can happen only for non-robust PI mutexes where
282 the owner of the lock died. */
283 assert (INTERNAL_SYSCALL_ERRNO (e, __err) != ESRCH
284 || !robust);
285
286 /* Delay the thread until the timeout is reached.
287 Then return ETIMEDOUT. */
288 struct timespec reltime;
289 struct timespec now;
290
291 INTERNAL_SYSCALL (clock_gettime, __err, 2, CLOCK_REALTIME,
292 &now);
293 reltime.tv_sec = abstime->tv_sec - now.tv_sec;
294 reltime.tv_nsec = abstime->tv_nsec - now.tv_nsec;
295 if (reltime.tv_nsec < 0)
296 {
297 reltime.tv_nsec += 1000000000;
298 --reltime.tv_sec;
299 }
300 if (reltime.tv_sec >= 0)
301 while (nanosleep_not_cancel (&reltime, &reltime) != 0)
302 continue;
303
304 return ETIMEDOUT;
305 }
306
307 return INTERNAL_SYSCALL_ERRNO (e, __err);
308 }
309
310 oldval = mutex->__data.__lock;
311
312 assert (robust || (oldval & FUTEX_OWNER_DIED) == 0);
313 }
314
315 if (__builtin_expect (oldval & FUTEX_OWNER_DIED, 0))
316 {
317 atomic_and (&mutex->__data.__lock, ~FUTEX_OWNER_DIED);
318
319 /* We got the mutex. */
320 mutex->__data.__count = 1;
321 /* But it is inconsistent unless marked otherwise. */
322 mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
323
324 ENQUEUE_MUTEX_PI (mutex);
325 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
326
327 /* Note that we deliberately exit here. If we fall
328 through to the end of the function __nusers would be
329 incremented which is not correct because the old owner
330 has to be discounted. */
331 return EOWNERDEAD;
332 }
333
334 if (robust
335 && __builtin_expect (mutex->__data.__owner
336 == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
337 {
338 /* This mutex is now not recoverable. */
339 mutex->__data.__count = 0;
340
341 INTERNAL_SYSCALL_DECL (__err);
342 INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
343 __lll_private_flag (FUTEX_UNLOCK_PI,
344 PTHREAD_ROBUST_MUTEX_PSHARED (mutex)),
345 0, 0);
346
347 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
348 return ENOTRECOVERABLE;
349 }
350
351 mutex->__data.__count = 1;
352 if (robust)
353 {
354 ENQUEUE_MUTEX_PI (mutex);
355 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
356 }
357 }
358 break;
359
360 case PTHREAD_MUTEX_PP_RECURSIVE_NP:
361 case PTHREAD_MUTEX_PP_ERRORCHECK_NP:
362 case PTHREAD_MUTEX_PP_NORMAL_NP:
363 case PTHREAD_MUTEX_PP_ADAPTIVE_NP:
364 {
365 int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP;
366
367 oldval = mutex->__data.__lock;
368
369 /* Check whether we already hold the mutex. */
370 if (mutex->__data.__owner == id)
371 {
372 if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
373 return EDEADLK;
374
375 if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
376 {
377 /* Just bump the counter. */
378 if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
379 /* Overflow of the counter. */
380 return EAGAIN;
381
382 ++mutex->__data.__count;
383
384 return 0;
385 }
386 }
387
388 int oldprio = -1, ceilval;
389 do
390 {
391 int ceiling = (oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK)
392 >> PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
393
394 if (__pthread_current_priority () > ceiling)
395 {
396 result = EINVAL;
397 failpp:
398 if (oldprio != -1)
399 __pthread_tpp_change_priority (oldprio, -1);
400 return result;
401 }
402
403 result = __pthread_tpp_change_priority (oldprio, ceiling);
404 if (result)
405 return result;
406
407 ceilval = ceiling << PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
408 oldprio = ceiling;
409
410 oldval
411 = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
412 ceilval | 1, ceilval);
413
414 if (oldval == ceilval)
415 break;
416
417 do
418 {
419 oldval
420 = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
421 ceilval | 2,
422 ceilval | 1);
423
424 if ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval)
425 break;
426
427 if (oldval != ceilval)
428 {
429 /* Reject invalid timeouts. */
430 if (abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000)
431 {
432 result = EINVAL;
433 goto failpp;
434 }
435
436 struct timeval tv;
437 struct timespec rt;
438
439 /* Get the current time. */
440 (void) gettimeofday (&tv, NULL);
441
442 /* Compute relative timeout. */
443 rt.tv_sec = abstime->tv_sec - tv.tv_sec;
444 rt.tv_nsec = abstime->tv_nsec - tv.tv_usec * 1000;
445 if (rt.tv_nsec < 0)
446 {
447 rt.tv_nsec += 1000000000;
448 --rt.tv_sec;
449 }
450
451 /* Already timed out? */
452 if (rt.tv_sec < 0)
453 {
454 result = ETIMEDOUT;
455 goto failpp;
456 }
457
458 lll_futex_timed_wait (&mutex->__data.__lock,
459 ceilval | 2, &rt,
460 PTHREAD_MUTEX_PSHARED (mutex));
461 }
462 }
463 while (atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
464 ceilval | 2, ceilval)
465 != ceilval);
466 }
467 while ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval);
468
469 assert (mutex->__data.__owner == 0);
470 mutex->__data.__count = 1;
471 }
472 break;
473
474 default:
475 /* Correct code cannot set any other type. */
476 return EINVAL;
477 }
478
479 if (result == 0)
480 {
481 /* Record the ownership. */
482 mutex->__data.__owner = id;
483 ++mutex->__data.__nusers;
484 }
485
486 out:
487 return result;
488}