| lh | 9ed821d | 2023-04-07 01:36:19 -0700 | [diff] [blame] | 1 | /* Read-write lock implementation. | 
|  | 2 | Copyright (C) 1998 Free Software Foundation, Inc. | 
|  | 3 | This file is part of the GNU C Library. | 
|  | 4 | Contributed by Xavier Leroy <Xavier.Leroy@inria.fr> | 
|  | 5 | and Ulrich Drepper <drepper@cygnus.com>, 1998. | 
|  | 6 |  | 
|  | 7 | The GNU C Library is free software; you can redistribute it and/or | 
|  | 8 | modify it under the terms of the GNU Library General Public License as | 
|  | 9 | published by the Free Software Foundation; either version 2 of the | 
|  | 10 | License, or (at your option) any later version. | 
|  | 11 |  | 
|  | 12 | The GNU C Library is distributed in the hope that it will be useful, | 
|  | 13 | but WITHOUT ANY WARRANTY; without even the implied warranty of | 
|  | 14 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU | 
|  | 15 | Library General Public License for more details. | 
|  | 16 |  | 
|  | 17 | You should have received a copy of the GNU Library General Public | 
|  | 18 | License along with the GNU C Library; see the file COPYING.LIB.  If not, | 
|  | 19 | write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, | 
|  | 20 | Boston, MA 02111-1307, USA.  */ | 
|  | 21 |  | 
|  | 22 | #include <errno.h> | 
|  | 23 | #include <pthread.h> | 
|  | 24 | #include <stdlib.h> | 
|  | 25 | #include "internals.h" | 
|  | 26 | #include "queue.h" | 
|  | 27 | #include "spinlock.h" | 
|  | 28 | #include "restart.h" | 
|  | 29 |  | 
|  | 30 | /* | 
|  | 31 | * Check whether the calling thread already owns one or more read locks on the | 
|  | 32 | * specified lock. If so, return a pointer to the read lock info structure | 
|  | 33 | * corresponding to that lock. | 
|  | 34 | */ | 
|  | 35 |  | 
|  | 36 | static pthread_readlock_info * | 
|  | 37 | rwlock_is_in_list(pthread_descr self, pthread_rwlock_t *rwlock) | 
|  | 38 | { | 
|  | 39 | pthread_readlock_info *info; | 
|  | 40 |  | 
|  | 41 | for (info = self->p_readlock_list; info != NULL; info = info->pr_next) | 
|  | 42 | { | 
|  | 43 | if (info->pr_lock == rwlock) | 
|  | 44 | return info; | 
|  | 45 | } | 
|  | 46 |  | 
|  | 47 | return NULL; | 
|  | 48 | } | 
|  | 49 |  | 
|  | 50 | /* | 
|  | 51 | * Add a new lock to the thread's list of locks for which it has a read lock. | 
|  | 52 | * A new info node must be allocated for this, which is taken from the thread's | 
|  | 53 | * free list, or by calling malloc. If malloc fails, a null pointer is | 
|  | 54 | * returned. Otherwise the lock info structure is initialized and pushed | 
|  | 55 | * onto the thread's list. | 
|  | 56 | */ | 
|  | 57 |  | 
|  | 58 | static pthread_readlock_info * | 
|  | 59 | rwlock_add_to_list(pthread_descr self, pthread_rwlock_t *rwlock) | 
|  | 60 | { | 
|  | 61 | pthread_readlock_info *info = self->p_readlock_free; | 
|  | 62 |  | 
|  | 63 | if (info != NULL) | 
|  | 64 | self->p_readlock_free = info->pr_next; | 
|  | 65 | else | 
|  | 66 | info = malloc(sizeof *info); | 
|  | 67 |  | 
|  | 68 | if (info == NULL) | 
|  | 69 | return NULL; | 
|  | 70 |  | 
|  | 71 | info->pr_lock_count = 1; | 
|  | 72 | info->pr_lock = rwlock; | 
|  | 73 | info->pr_next = self->p_readlock_list; | 
|  | 74 | self->p_readlock_list = info; | 
|  | 75 |  | 
|  | 76 | return info; | 
|  | 77 | } | 
|  | 78 |  | 
|  | 79 | /* | 
|  | 80 | * If the thread owns a read lock over the given pthread_rwlock_t, | 
|  | 81 | * and this read lock is tracked in the thread's lock list, | 
|  | 82 | * this function returns a pointer to the info node in that list. | 
|  | 83 | * It also decrements the lock count within that node, and if | 
|  | 84 | * it reaches zero, it removes the node from the list. | 
|  | 85 | * If nothing is found, it returns a null pointer. | 
|  | 86 | */ | 
|  | 87 |  | 
|  | 88 | static pthread_readlock_info * | 
|  | 89 | rwlock_remove_from_list(pthread_descr self, pthread_rwlock_t *rwlock) | 
|  | 90 | { | 
|  | 91 | pthread_readlock_info **pinfo; | 
|  | 92 |  | 
|  | 93 | for (pinfo = &self->p_readlock_list; *pinfo != NULL; pinfo = &(*pinfo)->pr_next) | 
|  | 94 | { | 
|  | 95 | if ((*pinfo)->pr_lock == rwlock) | 
|  | 96 | { | 
|  | 97 | pthread_readlock_info *info = *pinfo; | 
|  | 98 | if (--info->pr_lock_count == 0) | 
|  | 99 | *pinfo = info->pr_next; | 
|  | 100 | return info; | 
|  | 101 | } | 
|  | 102 | } | 
|  | 103 |  | 
|  | 104 | return NULL; | 
|  | 105 | } | 
|  | 106 |  | 
|  | 107 | /* | 
|  | 108 | * This function checks whether the conditions are right to place a read lock. | 
|  | 109 | * It returns 1 if so, otherwise zero. The rwlock's internal lock must be | 
|  | 110 | * locked upon entry. | 
|  | 111 | */ | 
|  | 112 |  | 
|  | 113 | static int | 
|  | 114 | rwlock_can_rdlock(pthread_rwlock_t *rwlock, int have_lock_already) | 
|  | 115 | { | 
|  | 116 | /* Can't readlock; it is write locked. */ | 
|  | 117 | if (rwlock->__rw_writer != NULL) | 
|  | 118 | return 0; | 
|  | 119 |  | 
|  | 120 | /* Lock prefers readers; get it. */ | 
|  | 121 | if (rwlock->__rw_kind == PTHREAD_RWLOCK_PREFER_READER_NP) | 
|  | 122 | return 1; | 
|  | 123 |  | 
|  | 124 | /* Lock prefers writers, but none are waiting. */ | 
|  | 125 | if (queue_is_empty(&rwlock->__rw_write_waiting)) | 
|  | 126 | return 1; | 
|  | 127 |  | 
|  | 128 | /* Writers are waiting, but this thread already has a read lock */ | 
|  | 129 | if (have_lock_already) | 
|  | 130 | return 1; | 
|  | 131 |  | 
|  | 132 | /* Writers are waiting, and this is a new lock */ | 
|  | 133 | return 0; | 
|  | 134 | } | 
|  | 135 |  | 
|  | 136 | /* | 
|  | 137 | * This function helps support brain-damaged recursive read locking | 
|  | 138 | * semantics required by Unix 98, while maintaining write priority. | 
|  | 139 | * This basically determines whether this thread already holds a read lock | 
|  | 140 | * already. It returns 1 if so, otherwise it returns 0. | 
|  | 141 | * | 
|  | 142 | * If the thread has any ``untracked read locks'' then it just assumes | 
|  | 143 | * that this lock is among them, just to be safe, and returns 1. | 
|  | 144 | * | 
|  | 145 | * Also, if it finds the thread's lock in the list, it sets the pointer | 
|  | 146 | * referenced by pexisting to refer to the list entry. | 
|  | 147 | * | 
|  | 148 | * If the thread has no untracked locks, and the lock is not found | 
|  | 149 | * in its list, then it is added to the list. If this fails, | 
|  | 150 | * then *pout_of_mem is set to 1. | 
|  | 151 | */ | 
|  | 152 |  | 
|  | 153 | static int | 
|  | 154 | rwlock_have_already(pthread_descr *pself, pthread_rwlock_t *rwlock, | 
|  | 155 | pthread_readlock_info **pexisting, int *pout_of_mem) | 
|  | 156 | { | 
|  | 157 | pthread_readlock_info *existing = NULL; | 
|  | 158 | int out_of_mem = 0, have_lock_already = 0; | 
|  | 159 | pthread_descr self = *pself; | 
|  | 160 |  | 
|  | 161 | if (rwlock->__rw_kind == PTHREAD_RWLOCK_PREFER_WRITER_NP) | 
|  | 162 | { | 
|  | 163 | if (!self) | 
|  | 164 | self = thread_self(); | 
|  | 165 |  | 
|  | 166 | existing = rwlock_is_in_list(self, rwlock); | 
|  | 167 |  | 
|  | 168 | if (existing != NULL || self->p_untracked_readlock_count > 0) | 
|  | 169 | have_lock_already = 1; | 
|  | 170 | else | 
|  | 171 | { | 
|  | 172 | existing = rwlock_add_to_list(self, rwlock); | 
|  | 173 | if (existing == NULL) | 
|  | 174 | out_of_mem = 1; | 
|  | 175 | } | 
|  | 176 | } | 
|  | 177 |  | 
|  | 178 | *pout_of_mem = out_of_mem; | 
|  | 179 | *pexisting = existing; | 
|  | 180 | *pself = self; | 
|  | 181 |  | 
|  | 182 | return have_lock_already; | 
|  | 183 | } | 
|  | 184 |  | 
|  | 185 | int | 
|  | 186 | pthread_rwlock_init (pthread_rwlock_t *rwlock, | 
|  | 187 | const pthread_rwlockattr_t *attr) | 
|  | 188 | { | 
|  | 189 | __pthread_init_lock(&rwlock->__rw_lock); | 
|  | 190 | rwlock->__rw_readers = 0; | 
|  | 191 | rwlock->__rw_writer = NULL; | 
|  | 192 | rwlock->__rw_read_waiting = NULL; | 
|  | 193 | rwlock->__rw_write_waiting = NULL; | 
|  | 194 |  | 
|  | 195 | if (attr == NULL) | 
|  | 196 | { | 
|  | 197 | rwlock->__rw_kind = PTHREAD_RWLOCK_DEFAULT_NP; | 
|  | 198 | rwlock->__rw_pshared = PTHREAD_PROCESS_PRIVATE; | 
|  | 199 | } | 
|  | 200 | else | 
|  | 201 | { | 
|  | 202 | rwlock->__rw_kind = attr->__lockkind; | 
|  | 203 | rwlock->__rw_pshared = attr->__pshared; | 
|  | 204 | } | 
|  | 205 |  | 
|  | 206 | return 0; | 
|  | 207 | } | 
|  | 208 |  | 
|  | 209 |  | 
|  | 210 | int | 
|  | 211 | pthread_rwlock_destroy (pthread_rwlock_t *rwlock) | 
|  | 212 | { | 
|  | 213 | int readers; | 
|  | 214 | _pthread_descr writer; | 
|  | 215 |  | 
|  | 216 | __pthread_lock (&rwlock->__rw_lock, NULL); | 
|  | 217 | readers = rwlock->__rw_readers; | 
|  | 218 | writer = rwlock->__rw_writer; | 
|  | 219 | __pthread_unlock (&rwlock->__rw_lock); | 
|  | 220 |  | 
|  | 221 | if (readers > 0 || writer != NULL) | 
|  | 222 | return EBUSY; | 
|  | 223 |  | 
|  | 224 | return 0; | 
|  | 225 | } | 
|  | 226 |  | 
|  | 227 | int | 
|  | 228 | pthread_rwlock_rdlock (pthread_rwlock_t *rwlock) | 
|  | 229 | { | 
|  | 230 | pthread_descr self = NULL; | 
|  | 231 | pthread_readlock_info *existing; | 
|  | 232 | int out_of_mem, have_lock_already; | 
|  | 233 |  | 
|  | 234 | have_lock_already = rwlock_have_already(&self, rwlock, | 
|  | 235 | &existing, &out_of_mem); | 
|  | 236 |  | 
|  | 237 | for (;;) | 
|  | 238 | { | 
|  | 239 | if (self == NULL) | 
|  | 240 | self = thread_self (); | 
|  | 241 |  | 
|  | 242 | __pthread_lock (&rwlock->__rw_lock, self); | 
|  | 243 |  | 
|  | 244 | if (rwlock_can_rdlock(rwlock, have_lock_already)) | 
|  | 245 | break; | 
|  | 246 |  | 
|  | 247 | enqueue (&rwlock->__rw_read_waiting, self); | 
|  | 248 | __pthread_unlock (&rwlock->__rw_lock); | 
|  | 249 | suspend (self); /* This is not a cancellation point */ | 
|  | 250 | } | 
|  | 251 |  | 
|  | 252 | ++rwlock->__rw_readers; | 
|  | 253 | __pthread_unlock (&rwlock->__rw_lock); | 
|  | 254 |  | 
|  | 255 | if (have_lock_already || out_of_mem) | 
|  | 256 | { | 
|  | 257 | if (existing != NULL) | 
|  | 258 | existing->pr_lock_count++; | 
|  | 259 | else | 
|  | 260 | self->p_untracked_readlock_count++; | 
|  | 261 | } | 
|  | 262 |  | 
|  | 263 | return 0; | 
|  | 264 | } | 
|  | 265 |  | 
|  | 266 | int | 
|  | 267 | pthread_rwlock_tryrdlock (pthread_rwlock_t *rwlock) | 
|  | 268 | { | 
|  | 269 | pthread_descr self = thread_self(); | 
|  | 270 | pthread_readlock_info *existing; | 
|  | 271 | int out_of_mem, have_lock_already; | 
|  | 272 | int retval = EBUSY; | 
|  | 273 |  | 
|  | 274 | have_lock_already = rwlock_have_already(&self, rwlock, | 
|  | 275 | &existing, &out_of_mem); | 
|  | 276 |  | 
|  | 277 | __pthread_lock (&rwlock->__rw_lock, self); | 
|  | 278 |  | 
|  | 279 | /* 0 is passed to here instead of have_lock_already. | 
|  | 280 | This is to meet Single Unix Spec requirements: | 
|  | 281 | if writers are waiting, pthread_rwlock_tryrdlock | 
|  | 282 | does not acquire a read lock, even if the caller has | 
|  | 283 | one or more read locks already. */ | 
|  | 284 |  | 
|  | 285 | if (rwlock_can_rdlock(rwlock, 0)) | 
|  | 286 | { | 
|  | 287 | ++rwlock->__rw_readers; | 
|  | 288 | retval = 0; | 
|  | 289 | } | 
|  | 290 |  | 
|  | 291 | __pthread_unlock (&rwlock->__rw_lock); | 
|  | 292 |  | 
|  | 293 | if (retval == 0) | 
|  | 294 | { | 
|  | 295 | if (have_lock_already || out_of_mem) | 
|  | 296 | { | 
|  | 297 | if (existing != NULL) | 
|  | 298 | existing->pr_lock_count++; | 
|  | 299 | else | 
|  | 300 | self->p_untracked_readlock_count++; | 
|  | 301 | } | 
|  | 302 | } | 
|  | 303 |  | 
|  | 304 | return retval; | 
|  | 305 | } | 
|  | 306 |  | 
|  | 307 |  | 
|  | 308 | int | 
|  | 309 | pthread_rwlock_wrlock (pthread_rwlock_t *rwlock) | 
|  | 310 | { | 
|  | 311 | pthread_descr self = thread_self (); | 
|  | 312 |  | 
|  | 313 | while(1) | 
|  | 314 | { | 
|  | 315 | __pthread_lock (&rwlock->__rw_lock, self); | 
|  | 316 | if (rwlock->__rw_readers == 0 && rwlock->__rw_writer == NULL) | 
|  | 317 | { | 
|  | 318 | rwlock->__rw_writer = self; | 
|  | 319 | __pthread_unlock (&rwlock->__rw_lock); | 
|  | 320 | return 0; | 
|  | 321 | } | 
|  | 322 |  | 
|  | 323 | /* Suspend ourselves, then try again */ | 
|  | 324 | enqueue (&rwlock->__rw_write_waiting, self); | 
|  | 325 | __pthread_unlock (&rwlock->__rw_lock); | 
|  | 326 | suspend (self); /* This is not a cancellation point */ | 
|  | 327 | } | 
|  | 328 | } | 
|  | 329 |  | 
|  | 330 |  | 
|  | 331 | int | 
|  | 332 | pthread_rwlock_trywrlock (pthread_rwlock_t *rwlock) | 
|  | 333 | { | 
|  | 334 | int result = EBUSY; | 
|  | 335 |  | 
|  | 336 | __pthread_lock (&rwlock->__rw_lock, NULL); | 
|  | 337 | if (rwlock->__rw_readers == 0 && rwlock->__rw_writer == NULL) | 
|  | 338 | { | 
|  | 339 | rwlock->__rw_writer = thread_self (); | 
|  | 340 | result = 0; | 
|  | 341 | } | 
|  | 342 | __pthread_unlock (&rwlock->__rw_lock); | 
|  | 343 |  | 
|  | 344 | return result; | 
|  | 345 | } | 
|  | 346 |  | 
|  | 347 |  | 
|  | 348 | int | 
|  | 349 | pthread_rwlock_unlock (pthread_rwlock_t *rwlock) | 
|  | 350 | { | 
|  | 351 | pthread_descr torestart; | 
|  | 352 | pthread_descr th; | 
|  | 353 |  | 
|  | 354 | __pthread_lock (&rwlock->__rw_lock, NULL); | 
|  | 355 | if (rwlock->__rw_writer != NULL) | 
|  | 356 | { | 
|  | 357 | /* Unlocking a write lock.  */ | 
|  | 358 | if (rwlock->__rw_writer != thread_self ()) | 
|  | 359 | { | 
|  | 360 | __pthread_unlock (&rwlock->__rw_lock); | 
|  | 361 | return EPERM; | 
|  | 362 | } | 
|  | 363 | rwlock->__rw_writer = NULL; | 
|  | 364 |  | 
|  | 365 | if (rwlock->__rw_kind == PTHREAD_RWLOCK_PREFER_READER_NP | 
|  | 366 | || (th = dequeue (&rwlock->__rw_write_waiting)) == NULL) | 
|  | 367 | { | 
|  | 368 | /* Restart all waiting readers.  */ | 
|  | 369 | torestart = rwlock->__rw_read_waiting; | 
|  | 370 | rwlock->__rw_read_waiting = NULL; | 
|  | 371 | __pthread_unlock (&rwlock->__rw_lock); | 
|  | 372 | while ((th = dequeue (&torestart)) != NULL) | 
|  | 373 | restart (th); | 
|  | 374 | } | 
|  | 375 | else | 
|  | 376 | { | 
|  | 377 | /* Restart one waiting writer.  */ | 
|  | 378 | __pthread_unlock (&rwlock->__rw_lock); | 
|  | 379 | restart (th); | 
|  | 380 | } | 
|  | 381 | } | 
|  | 382 | else | 
|  | 383 | { | 
|  | 384 | /* Unlocking a read lock.  */ | 
|  | 385 | if (rwlock->__rw_readers == 0) | 
|  | 386 | { | 
|  | 387 | __pthread_unlock (&rwlock->__rw_lock); | 
|  | 388 | return EPERM; | 
|  | 389 | } | 
|  | 390 |  | 
|  | 391 | --rwlock->__rw_readers; | 
|  | 392 | if (rwlock->__rw_readers == 0) | 
|  | 393 | /* Restart one waiting writer, if any.  */ | 
|  | 394 | th = dequeue (&rwlock->__rw_write_waiting); | 
|  | 395 | else | 
|  | 396 | th = NULL; | 
|  | 397 |  | 
|  | 398 | __pthread_unlock (&rwlock->__rw_lock); | 
|  | 399 | if (th != NULL) | 
|  | 400 | restart (th); | 
|  | 401 |  | 
|  | 402 | /* Recursive lock fixup */ | 
|  | 403 |  | 
|  | 404 | if (rwlock->__rw_kind == PTHREAD_RWLOCK_PREFER_WRITER_NP) | 
|  | 405 | { | 
|  | 406 | pthread_descr self = thread_self(); | 
|  | 407 | pthread_readlock_info *victim = rwlock_remove_from_list(self, rwlock); | 
|  | 408 |  | 
|  | 409 | if (victim != NULL) | 
|  | 410 | { | 
|  | 411 | if (victim->pr_lock_count == 0) | 
|  | 412 | { | 
|  | 413 | victim->pr_next = self->p_readlock_free; | 
|  | 414 | self->p_readlock_free = victim; | 
|  | 415 | } | 
|  | 416 | } | 
|  | 417 | else | 
|  | 418 | { | 
|  | 419 | if (self->p_untracked_readlock_count > 0) | 
|  | 420 | self->p_untracked_readlock_count--; | 
|  | 421 | } | 
|  | 422 | } | 
|  | 423 | } | 
|  | 424 |  | 
|  | 425 | return 0; | 
|  | 426 | } | 
|  | 427 |  | 
|  | 428 |  | 
|  | 429 |  | 
|  | 430 | int | 
|  | 431 | pthread_rwlockattr_init (pthread_rwlockattr_t *attr) | 
|  | 432 | { | 
|  | 433 | attr->__lockkind = 0; | 
|  | 434 | attr->__pshared = 0; | 
|  | 435 |  | 
|  | 436 | return 0; | 
|  | 437 | } | 
|  | 438 |  | 
|  | 439 |  | 
|  | 440 | int | 
|  | 441 | pthread_rwlockattr_destroy (pthread_rwlockattr_t *attr attribute_unused) | 
|  | 442 | { | 
|  | 443 | return 0; | 
|  | 444 | } | 
|  | 445 |  | 
|  | 446 |  | 
|  | 447 | int | 
|  | 448 | pthread_rwlockattr_getpshared (const pthread_rwlockattr_t *attr, int *pshared) | 
|  | 449 | { | 
|  | 450 | *pshared = attr->__pshared; | 
|  | 451 | return 0; | 
|  | 452 | } | 
|  | 453 |  | 
|  | 454 |  | 
|  | 455 | int | 
|  | 456 | pthread_rwlockattr_setpshared (pthread_rwlockattr_t *attr, int pshared) | 
|  | 457 | { | 
|  | 458 | if (pshared != PTHREAD_PROCESS_PRIVATE && pshared != PTHREAD_PROCESS_SHARED) | 
|  | 459 | return EINVAL; | 
|  | 460 |  | 
|  | 461 | attr->__pshared = pshared; | 
|  | 462 |  | 
|  | 463 | return 0; | 
|  | 464 | } | 
|  | 465 |  | 
|  | 466 |  | 
|  | 467 | int | 
|  | 468 | pthread_rwlockattr_getkind_np (const pthread_rwlockattr_t *attr, int *pref) | 
|  | 469 | { | 
|  | 470 | *pref = attr->__lockkind; | 
|  | 471 | return 0; | 
|  | 472 | } | 
|  | 473 |  | 
|  | 474 |  | 
|  | 475 | int | 
|  | 476 | pthread_rwlockattr_setkind_np (pthread_rwlockattr_t *attr, int pref) | 
|  | 477 | { | 
|  | 478 | if (pref != PTHREAD_RWLOCK_PREFER_READER_NP | 
|  | 479 | && pref != PTHREAD_RWLOCK_PREFER_WRITER_NP | 
|  | 480 | && pref != PTHREAD_RWLOCK_DEFAULT_NP) | 
|  | 481 | return EINVAL; | 
|  | 482 |  | 
|  | 483 | attr->__lockkind = pref; | 
|  | 484 |  | 
|  | 485 | return 0; | 
|  | 486 | } |