| xj | b04a402 | 2021-11-25 15:01:52 +0800 | [diff] [blame] | 1 | /* | 
|  | 2 | * Queue read/write lock | 
|  | 3 | * | 
|  | 4 | * This program is free software; you can redistribute it and/or modify | 
|  | 5 | * it under the terms of the GNU General Public License as published by | 
|  | 6 | * the Free Software Foundation; either version 2 of the License, or | 
|  | 7 | * (at your option) any later version. | 
|  | 8 | * | 
|  | 9 | * This program is distributed in the hope that it will be useful, | 
|  | 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 
|  | 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | 
|  | 12 | * GNU General Public License for more details. | 
|  | 13 | * | 
|  | 14 | * (C) Copyright 2013-2014 Hewlett-Packard Development Company, L.P. | 
|  | 15 | * | 
|  | 16 | * Authors: Waiman Long <waiman.long@hp.com> | 
|  | 17 | */ | 
|  | 18 | #ifndef __ASM_GENERIC_QRWLOCK_H | 
|  | 19 | #define __ASM_GENERIC_QRWLOCK_H | 
|  | 20 |  | 
|  | 21 | #include <linux/atomic.h> | 
|  | 22 | #include <asm/barrier.h> | 
|  | 23 | #include <asm/processor.h> | 
|  | 24 |  | 
|  | 25 | #include <asm-generic/qrwlock_types.h> | 
|  | 26 |  | 
|  | 27 | /* | 
|  | 28 | * Writer states & reader shift and bias. | 
|  | 29 | */ | 
|  | 30 | #define	_QW_WAITING	0x100		/* A writer is waiting	   */ | 
|  | 31 | #define	_QW_LOCKED	0x0ff		/* A writer holds the lock */ | 
|  | 32 | #define	_QW_WMASK	0x1ff		/* Writer mask		   */ | 
|  | 33 | #define	_QR_SHIFT	9		/* Reader count shift	   */ | 
|  | 34 | #define _QR_BIAS	(1U << _QR_SHIFT) | 
|  | 35 |  | 
|  | 36 | /* | 
|  | 37 | * External function declarations | 
|  | 38 | */ | 
|  | 39 | extern void queued_read_lock_slowpath(struct qrwlock *lock); | 
|  | 40 | extern void queued_write_lock_slowpath(struct qrwlock *lock); | 
|  | 41 |  | 
|  | 42 | /** | 
|  | 43 | * queued_read_trylock - try to acquire read lock of a queue rwlock | 
|  | 44 | * @lock : Pointer to queue rwlock structure | 
|  | 45 | * Return: 1 if lock acquired, 0 if failed | 
|  | 46 | */ | 
|  | 47 | static inline int queued_read_trylock(struct qrwlock *lock) | 
|  | 48 | { | 
|  | 49 | u32 cnts; | 
|  | 50 |  | 
|  | 51 | cnts = atomic_read(&lock->cnts); | 
|  | 52 | if (likely(!(cnts & _QW_WMASK))) { | 
|  | 53 | cnts = (u32)atomic_add_return_acquire(_QR_BIAS, &lock->cnts); | 
|  | 54 | if (likely(!(cnts & _QW_WMASK))) | 
|  | 55 | return 1; | 
|  | 56 | atomic_sub(_QR_BIAS, &lock->cnts); | 
|  | 57 | } | 
|  | 58 | return 0; | 
|  | 59 | } | 
|  | 60 |  | 
|  | 61 | /** | 
|  | 62 | * queued_write_trylock - try to acquire write lock of a queue rwlock | 
|  | 63 | * @lock : Pointer to queue rwlock structure | 
|  | 64 | * Return: 1 if lock acquired, 0 if failed | 
|  | 65 | */ | 
|  | 66 | static inline int queued_write_trylock(struct qrwlock *lock) | 
|  | 67 | { | 
|  | 68 | u32 cnts; | 
|  | 69 |  | 
|  | 70 | cnts = atomic_read(&lock->cnts); | 
|  | 71 | if (unlikely(cnts)) | 
|  | 72 | return 0; | 
|  | 73 |  | 
|  | 74 | return likely(atomic_cmpxchg_acquire(&lock->cnts, | 
|  | 75 | cnts, cnts | _QW_LOCKED) == cnts); | 
|  | 76 | } | 
|  | 77 | /** | 
|  | 78 | * queued_read_lock - acquire read lock of a queue rwlock | 
|  | 79 | * @lock: Pointer to queue rwlock structure | 
|  | 80 | */ | 
|  | 81 | static inline void queued_read_lock(struct qrwlock *lock) | 
|  | 82 | { | 
|  | 83 | u32 cnts; | 
|  | 84 |  | 
|  | 85 | cnts = atomic_add_return_acquire(_QR_BIAS, &lock->cnts); | 
|  | 86 | if (likely(!(cnts & _QW_WMASK))) | 
|  | 87 | return; | 
|  | 88 |  | 
|  | 89 | /* The slowpath will decrement the reader count, if necessary. */ | 
|  | 90 | queued_read_lock_slowpath(lock); | 
|  | 91 | } | 
|  | 92 |  | 
|  | 93 | /** | 
|  | 94 | * queued_write_lock - acquire write lock of a queue rwlock | 
|  | 95 | * @lock : Pointer to queue rwlock structure | 
|  | 96 | */ | 
|  | 97 | static inline void queued_write_lock(struct qrwlock *lock) | 
|  | 98 | { | 
|  | 99 | /* Optimize for the unfair lock case where the fair flag is 0. */ | 
|  | 100 | if (atomic_cmpxchg_acquire(&lock->cnts, 0, _QW_LOCKED) == 0) | 
|  | 101 | return; | 
|  | 102 |  | 
|  | 103 | queued_write_lock_slowpath(lock); | 
|  | 104 | } | 
|  | 105 |  | 
|  | 106 | /** | 
|  | 107 | * queued_read_unlock - release read lock of a queue rwlock | 
|  | 108 | * @lock : Pointer to queue rwlock structure | 
|  | 109 | */ | 
|  | 110 | static inline void queued_read_unlock(struct qrwlock *lock) | 
|  | 111 | { | 
|  | 112 | /* | 
|  | 113 | * Atomically decrement the reader count | 
|  | 114 | */ | 
|  | 115 | (void)atomic_sub_return_release(_QR_BIAS, &lock->cnts); | 
|  | 116 | } | 
|  | 117 |  | 
|  | 118 | /** | 
|  | 119 | * queued_write_unlock - release write lock of a queue rwlock | 
|  | 120 | * @lock : Pointer to queue rwlock structure | 
|  | 121 | */ | 
|  | 122 | static inline void queued_write_unlock(struct qrwlock *lock) | 
|  | 123 | { | 
|  | 124 | smp_store_release(&lock->wlocked, 0); | 
|  | 125 | } | 
|  | 126 |  | 
|  | 127 | /* | 
|  | 128 | * Remapping rwlock architecture specific functions to the corresponding | 
|  | 129 | * queue rwlock functions. | 
|  | 130 | */ | 
|  | 131 | #define arch_read_lock(l)	queued_read_lock(l) | 
|  | 132 | #define arch_write_lock(l)	queued_write_lock(l) | 
|  | 133 | #define arch_read_trylock(l)	queued_read_trylock(l) | 
|  | 134 | #define arch_write_trylock(l)	queued_write_trylock(l) | 
|  | 135 | #define arch_read_unlock(l)	queued_read_unlock(l) | 
|  | 136 | #define arch_write_unlock(l)	queued_write_unlock(l) | 
|  | 137 |  | 
|  | 138 | #endif /* __ASM_GENERIC_QRWLOCK_H */ |