blob: da9775b3038ffdb6e9114ccee7891f38a8536e92 [file] [log] [blame]
lh9ed821d2023-04-07 01:36:19 -07001/*
2 * Copyright (2004) Linus Torvalds
3 *
4 * Author: Zwane Mwaikambo <zwane@fsmlabs.com>
5 *
6 * Copyright (2004, 2005) Ingo Molnar
7 *
8 * This file contains the spinlock/rwlock implementations for the
9 * SMP and the DEBUG_SPINLOCK cases. (UP-nondebug inlines them)
10 *
11 * Note that some architectures have special knowledge about the
12 * stack frames of these functions in their profile_pc. If you
13 * change anything significant here that could change the stack
14 * frame contact the architecture maintainers.
15 */
16
17#include <linux/linkage.h>
18#include <linux/preempt.h>
19#include <linux/spinlock.h>
20#include <linux/interrupt.h>
21#include <linux/debug_locks.h>
22#include <linux/export.h>
23
24/*
25 * If lockdep is enabled then we use the non-preemption spin-ops
26 * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are
27 * not re-enabled during lock-acquire (which the preempt-spin-ops do):
28 */
29#if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC)
30/*
31 * The __lock_function inlines are taken from
32 * include/linux/spinlock_api_smp.h
33 */
34#else
35#define raw_read_can_lock(l) read_can_lock(l)
36#define raw_write_can_lock(l) write_can_lock(l)
37/*
38 * We build the __lock_function inlines here. They are too large for
39 * inlining all over the place, but here is only one user per function
40 * which embedds them into the calling _lock_function below.
41 *
42 * This could be a long-held lock. We both prepare to spin for a long
43 * time (making _this_ CPU preemptable if possible), and we also signal
44 * towards that other CPU that it should break the lock ASAP.
45 */
46#define BUILD_LOCK_OPS(op, locktype) \
47void __lockfunc __raw_##op##_lock(locktype##_t *lock) \
48{ \
49 for (;;) { \
50 preempt_disable(); \
51 if (likely(do_raw_##op##_trylock(lock))) \
52 break; \
53 preempt_enable(); \
54 \
55 if (!(lock)->break_lock) \
56 (lock)->break_lock = 1; \
57 while (!raw_##op##_can_lock(lock) && (lock)->break_lock)\
58 arch_##op##_relax(&lock->raw_lock); \
59 } \
60 (lock)->break_lock = 0; \
61} \
62 \
63unsigned long __lockfunc __raw_##op##_lock_irqsave(locktype##_t *lock) \
64{ \
65 unsigned long flags; \
66 \
67 for (;;) { \
68 preempt_disable(); \
69 local_irq_save(flags); \
70 if (likely(do_raw_##op##_trylock(lock))) \
71 break; \
72 local_irq_restore(flags); \
73 preempt_enable(); \
74 \
75 if (!(lock)->break_lock) \
76 (lock)->break_lock = 1; \
77 while (!raw_##op##_can_lock(lock) && (lock)->break_lock)\
78 arch_##op##_relax(&lock->raw_lock); \
79 } \
80 (lock)->break_lock = 0; \
81 return flags; \
82} \
83 \
84void __lockfunc __raw_##op##_lock_irq(locktype##_t *lock) \
85{ \
86 _raw_##op##_lock_irqsave(lock); \
87} \
88 \
89void __lockfunc __raw_##op##_lock_bh(locktype##_t *lock) \
90{ \
91 unsigned long flags; \
92 \
93 /* */ \
94 /* Careful: we must exclude softirqs too, hence the */ \
95 /* irq-disabling. We use the generic preemption-aware */ \
96 /* function: */ \
97 /**/ \
98 flags = _raw_##op##_lock_irqsave(lock); \
99 local_bh_disable(); \
100 local_irq_restore(flags); \
101} \
102
103/*
104 * Build preemption-friendly versions of the following
105 * lock-spinning functions:
106 *
107 * __[spin|read|write]_lock()
108 * __[spin|read|write]_lock_irq()
109 * __[spin|read|write]_lock_irqsave()
110 * __[spin|read|write]_lock_bh()
111 */
112BUILD_LOCK_OPS(spin, raw_spinlock);
113
114#ifndef CONFIG_PREEMPT_RT_FULL
115BUILD_LOCK_OPS(read, rwlock);
116BUILD_LOCK_OPS(write, rwlock);
117#endif
118
119#endif
120
121#ifndef CONFIG_INLINE_SPIN_TRYLOCK
122int __lockfunc _raw_spin_trylock(raw_spinlock_t *lock)
123{
124 return __raw_spin_trylock(lock);
125}
126EXPORT_SYMBOL(_raw_spin_trylock);
127#endif
128
129#ifndef CONFIG_INLINE_SPIN_TRYLOCK_BH
130int __lockfunc _raw_spin_trylock_bh(raw_spinlock_t *lock)
131{
132 return __raw_spin_trylock_bh(lock);
133}
134EXPORT_SYMBOL(_raw_spin_trylock_bh);
135#endif
136
137#ifndef CONFIG_INLINE_SPIN_LOCK
138void __lockfunc _raw_spin_lock(raw_spinlock_t *lock)
139{
140 __raw_spin_lock(lock);
141}
142EXPORT_SYMBOL(_raw_spin_lock);
143#endif
144
145#ifndef CONFIG_INLINE_SPIN_LOCK_IRQSAVE
146unsigned long __lockfunc _raw_spin_lock_irqsave(raw_spinlock_t *lock)
147{
148 return __raw_spin_lock_irqsave(lock);
149}
150EXPORT_SYMBOL(_raw_spin_lock_irqsave);
151#endif
152
153#ifndef CONFIG_INLINE_SPIN_LOCK_IRQ
154void __lockfunc _raw_spin_lock_irq(raw_spinlock_t *lock)
155{
156 __raw_spin_lock_irq(lock);
157}
158EXPORT_SYMBOL(_raw_spin_lock_irq);
159#endif
160
161#ifndef CONFIG_INLINE_SPIN_LOCK_BH
162void __lockfunc _raw_spin_lock_bh(raw_spinlock_t *lock)
163{
164 __raw_spin_lock_bh(lock);
165}
166EXPORT_SYMBOL(_raw_spin_lock_bh);
167#endif
168
169#ifdef CONFIG_UNINLINE_SPIN_UNLOCK
170void __lockfunc _raw_spin_unlock(raw_spinlock_t *lock)
171{
172 __raw_spin_unlock(lock);
173}
174EXPORT_SYMBOL(_raw_spin_unlock);
175#endif
176
177#ifndef CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE
178void __lockfunc _raw_spin_unlock_irqrestore(raw_spinlock_t *lock, unsigned long flags)
179{
180 __raw_spin_unlock_irqrestore(lock, flags);
181}
182EXPORT_SYMBOL(_raw_spin_unlock_irqrestore);
183#endif
184
185#ifndef CONFIG_INLINE_SPIN_UNLOCK_IRQ
186void __lockfunc _raw_spin_unlock_irq(raw_spinlock_t *lock)
187{
188 __raw_spin_unlock_irq(lock);
189}
190EXPORT_SYMBOL(_raw_spin_unlock_irq);
191#endif
192
193#ifndef CONFIG_INLINE_SPIN_UNLOCK_BH
194void __lockfunc _raw_spin_unlock_bh(raw_spinlock_t *lock)
195{
196 __raw_spin_unlock_bh(lock);
197}
198EXPORT_SYMBOL(_raw_spin_unlock_bh);
199#endif
200
201#ifndef CONFIG_PREEMPT_RT_FULL
202
203#ifndef CONFIG_INLINE_READ_TRYLOCK
204int __lockfunc _raw_read_trylock(rwlock_t *lock)
205{
206 return __raw_read_trylock(lock);
207}
208EXPORT_SYMBOL(_raw_read_trylock);
209#endif
210
211#ifndef CONFIG_INLINE_READ_LOCK
212void __lockfunc _raw_read_lock(rwlock_t *lock)
213{
214 __raw_read_lock(lock);
215}
216EXPORT_SYMBOL(_raw_read_lock);
217#endif
218
219#ifndef CONFIG_INLINE_READ_LOCK_IRQSAVE
220unsigned long __lockfunc _raw_read_lock_irqsave(rwlock_t *lock)
221{
222 return __raw_read_lock_irqsave(lock);
223}
224EXPORT_SYMBOL(_raw_read_lock_irqsave);
225#endif
226
227#ifndef CONFIG_INLINE_READ_LOCK_IRQ
228void __lockfunc _raw_read_lock_irq(rwlock_t *lock)
229{
230 __raw_read_lock_irq(lock);
231}
232EXPORT_SYMBOL(_raw_read_lock_irq);
233#endif
234
235#ifndef CONFIG_INLINE_READ_LOCK_BH
236void __lockfunc _raw_read_lock_bh(rwlock_t *lock)
237{
238 __raw_read_lock_bh(lock);
239}
240EXPORT_SYMBOL(_raw_read_lock_bh);
241#endif
242
243#ifndef CONFIG_INLINE_READ_UNLOCK
244void __lockfunc _raw_read_unlock(rwlock_t *lock)
245{
246 __raw_read_unlock(lock);
247}
248EXPORT_SYMBOL(_raw_read_unlock);
249#endif
250
251#ifndef CONFIG_INLINE_READ_UNLOCK_IRQRESTORE
252void __lockfunc _raw_read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
253{
254 __raw_read_unlock_irqrestore(lock, flags);
255}
256EXPORT_SYMBOL(_raw_read_unlock_irqrestore);
257#endif
258
259#ifndef CONFIG_INLINE_READ_UNLOCK_IRQ
260void __lockfunc _raw_read_unlock_irq(rwlock_t *lock)
261{
262 __raw_read_unlock_irq(lock);
263}
264EXPORT_SYMBOL(_raw_read_unlock_irq);
265#endif
266
267#ifndef CONFIG_INLINE_READ_UNLOCK_BH
268void __lockfunc _raw_read_unlock_bh(rwlock_t *lock)
269{
270 __raw_read_unlock_bh(lock);
271}
272EXPORT_SYMBOL(_raw_read_unlock_bh);
273#endif
274
275#ifndef CONFIG_INLINE_WRITE_TRYLOCK
276int __lockfunc _raw_write_trylock(rwlock_t *lock)
277{
278 return __raw_write_trylock(lock);
279}
280EXPORT_SYMBOL(_raw_write_trylock);
281#endif
282
283#ifndef CONFIG_INLINE_WRITE_LOCK
284void __lockfunc _raw_write_lock(rwlock_t *lock)
285{
286 __raw_write_lock(lock);
287}
288EXPORT_SYMBOL(_raw_write_lock);
289#endif
290
291#ifndef CONFIG_INLINE_WRITE_LOCK_IRQSAVE
292unsigned long __lockfunc _raw_write_lock_irqsave(rwlock_t *lock)
293{
294 return __raw_write_lock_irqsave(lock);
295}
296EXPORT_SYMBOL(_raw_write_lock_irqsave);
297#endif
298
299#ifndef CONFIG_INLINE_WRITE_LOCK_IRQ
300void __lockfunc _raw_write_lock_irq(rwlock_t *lock)
301{
302 __raw_write_lock_irq(lock);
303}
304EXPORT_SYMBOL(_raw_write_lock_irq);
305#endif
306
307#ifndef CONFIG_INLINE_WRITE_LOCK_BH
308void __lockfunc _raw_write_lock_bh(rwlock_t *lock)
309{
310 __raw_write_lock_bh(lock);
311}
312EXPORT_SYMBOL(_raw_write_lock_bh);
313#endif
314
315#ifndef CONFIG_INLINE_WRITE_UNLOCK
316void __lockfunc _raw_write_unlock(rwlock_t *lock)
317{
318 __raw_write_unlock(lock);
319}
320EXPORT_SYMBOL(_raw_write_unlock);
321#endif
322
323#ifndef CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE
324void __lockfunc _raw_write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
325{
326 __raw_write_unlock_irqrestore(lock, flags);
327}
328EXPORT_SYMBOL(_raw_write_unlock_irqrestore);
329#endif
330
331#ifndef CONFIG_INLINE_WRITE_UNLOCK_IRQ
332void __lockfunc _raw_write_unlock_irq(rwlock_t *lock)
333{
334 __raw_write_unlock_irq(lock);
335}
336EXPORT_SYMBOL(_raw_write_unlock_irq);
337#endif
338
339#ifndef CONFIG_INLINE_WRITE_UNLOCK_BH
340void __lockfunc _raw_write_unlock_bh(rwlock_t *lock)
341{
342 __raw_write_unlock_bh(lock);
343}
344EXPORT_SYMBOL(_raw_write_unlock_bh);
345#endif
346
347#endif /* !PREEMPT_RT_FULL */
348
349#ifdef CONFIG_DEBUG_LOCK_ALLOC
350
351void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass)
352{
353 preempt_disable();
354 spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
355 LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
356}
357EXPORT_SYMBOL(_raw_spin_lock_nested);
358
359unsigned long __lockfunc _raw_spin_lock_irqsave_nested(raw_spinlock_t *lock,
360 int subclass)
361{
362 unsigned long flags;
363
364 local_irq_save(flags);
365 preempt_disable();
366 spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
367 LOCK_CONTENDED_FLAGS(lock, do_raw_spin_trylock, do_raw_spin_lock,
368 do_raw_spin_lock_flags, &flags);
369 return flags;
370}
371EXPORT_SYMBOL(_raw_spin_lock_irqsave_nested);
372
373void __lockfunc _raw_spin_lock_nest_lock(raw_spinlock_t *lock,
374 struct lockdep_map *nest_lock)
375{
376 preempt_disable();
377 spin_acquire_nest(&lock->dep_map, 0, 0, nest_lock, _RET_IP_);
378 LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
379}
380EXPORT_SYMBOL(_raw_spin_lock_nest_lock);
381
382#endif
383
384notrace int in_lock_functions(unsigned long addr)
385{
386 /* Linker adds these: start and end of __lockfunc functions */
387 extern char __lock_text_start[], __lock_text_end[];
388
389 return addr >= (unsigned long)__lock_text_start
390 && addr < (unsigned long)__lock_text_end;
391}
392EXPORT_SYMBOL(in_lock_functions);