blob: c2dcae449b59d5d931e61db304e37fd01e8cd383 [file] [log] [blame]
lh9ed821d2023-04-07 01:36:19 -07001#ifndef __LINUX_SEQLOCK_H
2#define __LINUX_SEQLOCK_H
3/*
4 * Reader/writer consistent mechanism without starving writers. This type of
5 * lock for data where the reader wants a consistent set of information
6 * and is willing to retry if the information changes. Readers never
7 * block but they may have to retry if a writer is in
8 * progress. Writers do not wait for readers.
9 *
10 * This is not as cache friendly as brlock. Also, this will not work
11 * for data that contains pointers, because any writer could
12 * invalidate a pointer that a reader was following.
13 *
14 * Expected reader usage:
15 * do {
16 * seq = read_seqbegin(&foo);
17 * ...
18 * } while (read_seqretry(&foo, seq));
19 *
20 *
21 * On non-SMP the spin locks disappear but the writer still needs
22 * to increment the sequence variables because an interrupt routine could
23 * change the state of the data.
24 *
25 * Based on x86_64 vsyscall gettimeofday
26 * by Keith Owens and Andrea Arcangeli
27 */
28
29#include <linux/spinlock.h>
30#include <linux/preempt.h>
31#include <asm/processor.h>
32
33/*
34 * Version using sequence counter only.
35 * This can be used when code has its own mutex protecting the
36 * updating starting before the write_seqcountbeqin() and ending
37 * after the write_seqcount_end().
38 */
39typedef struct seqcount {
40 unsigned sequence;
41} seqcount_t;
42
43#define SEQCNT_ZERO { 0 }
44#define seqcount_init(x) do { *(x) = (seqcount_t) SEQCNT_ZERO; } while (0)
45
46/**
47 * __read_seqcount_begin - begin a seq-read critical section (without barrier)
48 * @s: pointer to seqcount_t
49 * Returns: count to be passed to read_seqcount_retry
50 *
51 * __read_seqcount_begin is like read_seqcount_begin, but has no smp_rmb()
52 * barrier. Callers should ensure that smp_rmb() or equivalent ordering is
53 * provided before actually loading any of the variables that are to be
54 * protected in this critical section.
55 *
56 * Use carefully, only in critical code, and comment how the barrier is
57 * provided.
58 */
59static inline unsigned __read_seqcount_begin(const seqcount_t *s)
60{
61 unsigned ret;
62
63repeat:
64 ret = ACCESS_ONCE(s->sequence);
65 if (unlikely(ret & 1)) {
66 cpu_relax();
67 goto repeat;
68 }
69 return ret;
70}
71
72/**
73 * read_seqcount_begin - begin a seq-read critical section
74 * @s: pointer to seqcount_t
75 * Returns: count to be passed to read_seqcount_retry
76 *
77 * read_seqcount_begin opens a read critical section of the given seqcount.
78 * Validity of the critical section is tested by checking read_seqcount_retry
79 * function.
80 */
81static inline unsigned read_seqcount_begin(const seqcount_t *s)
82{
83 unsigned ret = __read_seqcount_begin(s);
84 smp_rmb();
85 return ret;
86}
87
88/**
89 * raw_seqcount_begin - begin a seq-read critical section
90 * @s: pointer to seqcount_t
91 * Returns: count to be passed to read_seqcount_retry
92 *
93 * raw_seqcount_begin opens a read critical section of the given seqcount.
94 * Validity of the critical section is tested by checking read_seqcount_retry
95 * function.
96 *
97 * Unlike read_seqcount_begin(), this function will not wait for the count
98 * to stabilize. If a writer is active when we begin, we will fail the
99 * read_seqcount_retry() instead of stabilizing at the beginning of the
100 * critical section.
101 */
102static inline unsigned raw_seqcount_begin(const seqcount_t *s)
103{
104 unsigned ret = ACCESS_ONCE(s->sequence);
105 smp_rmb();
106 return ret & ~1;
107}
108
109/**
110 * __read_seqcount_retry - end a seq-read critical section (without barrier)
111 * @s: pointer to seqcount_t
112 * @start: count, from read_seqcount_begin
113 * Returns: 1 if retry is required, else 0
114 *
115 * __read_seqcount_retry is like read_seqcount_retry, but has no smp_rmb()
116 * barrier. Callers should ensure that smp_rmb() or equivalent ordering is
117 * provided before actually loading any of the variables that are to be
118 * protected in this critical section.
119 *
120 * Use carefully, only in critical code, and comment how the barrier is
121 * provided.
122 */
123static inline int __read_seqcount_retry(const seqcount_t *s, unsigned start)
124{
125 return unlikely(s->sequence != start);
126}
127
128/**
129 * read_seqcount_retry - end a seq-read critical section
130 * @s: pointer to seqcount_t
131 * @start: count, from read_seqcount_begin
132 * Returns: 1 if retry is required, else 0
133 *
134 * read_seqcount_retry closes a read critical section of the given seqcount.
135 * If the critical section was invalid, it must be ignored (and typically
136 * retried).
137 */
138static inline int read_seqcount_retry(const seqcount_t *s, unsigned start)
139{
140 smp_rmb();
141 return __read_seqcount_retry(s, start);
142}
143
144
145/*
146 * Sequence counter only version assumes that callers are using their
147 * own mutexing.
148 */
149static inline void __write_seqcount_begin(seqcount_t *s)
150{
151 s->sequence++;
152 smp_wmb();
153}
154
155static inline void write_seqcount_begin(seqcount_t *s)
156{
157 preempt_disable_rt();
158 __write_seqcount_begin(s);
159}
160
161static inline void __write_seqcount_end(seqcount_t *s)
162{
163 smp_wmb();
164 s->sequence++;
165}
166
167static inline void write_seqcount_end(seqcount_t *s)
168{
169 __write_seqcount_end(s);
170 preempt_enable_rt();
171}
172
173/**
174 * write_seqcount_barrier - invalidate in-progress read-side seq operations
175 * @s: pointer to seqcount_t
176 *
177 * After write_seqcount_barrier, no read-side seq operations will complete
178 * successfully and see data older than this.
179 */
180static inline void write_seqcount_barrier(seqcount_t *s)
181{
182 smp_wmb();
183 s->sequence+=2;
184}
185
186typedef struct {
187 struct seqcount seqcount;
188 spinlock_t lock;
189} seqlock_t;
190
191/*
192 * These macros triggered gcc-3.x compile-time problems. We think these are
193 * OK now. Be cautious.
194 */
195#define __SEQLOCK_UNLOCKED(lockname) \
196 { \
197 .seqcount = SEQCNT_ZERO, \
198 .lock = __SPIN_LOCK_UNLOCKED(lockname) \
199 }
200
201#define seqlock_init(x) \
202 do { \
203 seqcount_init(&(x)->seqcount); \
204 spin_lock_init(&(x)->lock); \
205 } while (0)
206
207#define DEFINE_SEQLOCK(x) \
208 seqlock_t x = __SEQLOCK_UNLOCKED(x)
209
210/*
211 * Read side functions for starting and finalizing a read side section.
212 */
213#ifndef CONFIG_PREEMPT_RT_FULL
214static inline unsigned read_seqbegin(const seqlock_t *sl)
215{
216 return read_seqcount_begin(&sl->seqcount);
217}
218#else
219/*
220 * Starvation safe read side for RT
221 */
222static inline unsigned read_seqbegin(seqlock_t *sl)
223{
224 unsigned ret;
225
226repeat:
227 ret = sl->seqcount.sequence;
228 if (unlikely(ret & 1)) {
229 /*
230 * Take the lock and let the writer proceed (i.e. evtl
231 * boost it), otherwise we could loop here forever.
232 */
233 spin_lock(&sl->lock);
234 spin_unlock(&sl->lock);
235 goto repeat;
236 }
237 return ret;
238}
239#endif
240
241static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
242{
243 return read_seqcount_retry(&sl->seqcount, start);
244}
245
246/*
247 * Lock out other writers and update the count.
248 * Acts like a normal spin_lock/unlock.
249 * Don't need preempt_disable() because that is in the spin_lock already.
250 */
251static inline void write_seqlock(seqlock_t *sl)
252{
253 spin_lock(&sl->lock);
254 __write_seqcount_begin(&sl->seqcount);
255}
256
257static inline void write_sequnlock(seqlock_t *sl)
258{
259 __write_seqcount_end(&sl->seqcount);
260 spin_unlock(&sl->lock);
261}
262
263static inline void write_seqlock_bh(seqlock_t *sl)
264{
265 spin_lock_bh(&sl->lock);
266 __write_seqcount_begin(&sl->seqcount);
267}
268
269static inline void write_sequnlock_bh(seqlock_t *sl)
270{
271 __write_seqcount_end(&sl->seqcount);
272 spin_unlock_bh(&sl->lock);
273}
274
275static inline void write_seqlock_irq(seqlock_t *sl)
276{
277 spin_lock_irq(&sl->lock);
278 __write_seqcount_begin(&sl->seqcount);
279}
280
281static inline void write_sequnlock_irq(seqlock_t *sl)
282{
283 __write_seqcount_end(&sl->seqcount);
284 spin_unlock_irq(&sl->lock);
285}
286
287static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl)
288{
289 unsigned long flags;
290
291 spin_lock_irqsave(&sl->lock, flags);
292 __write_seqcount_begin(&sl->seqcount);
293 return flags;
294}
295
296#define write_seqlock_irqsave(lock, flags) \
297 do { flags = __write_seqlock_irqsave(lock); } while (0)
298
299static inline void
300write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags)
301{
302 __write_seqcount_end(&sl->seqcount);
303 spin_unlock_irqrestore(&sl->lock, flags);
304}
305
306#endif /* __LINUX_SEQLOCK_H */