blob: 63fc4b3e7af306ec0a9c6570b4bf82ec977abf95 [file] [log] [blame]
lh9ed821d2023-04-07 01:36:19 -07001/*
2 * Read-Copy Update mechanism for mutual exclusion
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * Copyright IBM Corporation, 2001
19 *
20 * Authors: Dipankar Sarma <dipankar@in.ibm.com>
21 * Manfred Spraul <manfred@colorfullife.com>
22 *
23 * Based on the original work by Paul McKenney <paulmck@us.ibm.com>
24 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
25 * Papers:
26 * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf
27 * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001)
28 *
29 * For detailed explanation of Read-Copy Update mechanism see -
30 * http://lse.sourceforge.net/locking/rcupdate.html
31 *
32 */
33#include <linux/types.h>
34#include <linux/kernel.h>
35#include <linux/init.h>
36#include <linux/spinlock.h>
37#include <linux/smp.h>
38#include <linux/interrupt.h>
39#include <linux/sched.h>
40#include <linux/atomic.h>
41#include <linux/bitops.h>
42#include <linux/percpu.h>
43#include <linux/notifier.h>
44#include <linux/cpu.h>
45#include <linux/mutex.h>
46#include <linux/export.h>
47#include <linux/hardirq.h>
48
49#define CREATE_TRACE_POINTS
50#include <trace/events/rcu.h>
51
52#include "rcu.h"
53
54#ifdef CONFIG_DEBUG_LOCK_ALLOC
55static struct lock_class_key rcu_lock_key;
56struct lockdep_map rcu_lock_map =
57 STATIC_LOCKDEP_MAP_INIT("rcu_read_lock", &rcu_lock_key);
58EXPORT_SYMBOL_GPL(rcu_lock_map);
59
60static struct lock_class_key rcu_bh_lock_key;
61struct lockdep_map rcu_bh_lock_map =
62 STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_bh", &rcu_bh_lock_key);
63EXPORT_SYMBOL_GPL(rcu_bh_lock_map);
64
65static struct lock_class_key rcu_sched_lock_key;
66struct lockdep_map rcu_sched_lock_map =
67 STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_sched", &rcu_sched_lock_key);
68EXPORT_SYMBOL_GPL(rcu_sched_lock_map);
69#endif
70
71#ifdef CONFIG_DEBUG_LOCK_ALLOC
72
73int debug_lockdep_rcu_enabled(void)
74{
75 return rcu_scheduler_active && debug_locks &&
76 current->lockdep_recursion == 0;
77}
78EXPORT_SYMBOL_GPL(debug_lockdep_rcu_enabled);
79
80#ifndef CONFIG_PREEMPT_RT_FULL
81/**
82 * rcu_read_lock_bh_held() - might we be in RCU-bh read-side critical section?
83 *
84 * Check for bottom half being disabled, which covers both the
85 * CONFIG_PROVE_RCU and not cases. Note that if someone uses
86 * rcu_read_lock_bh(), but then later enables BH, lockdep (if enabled)
87 * will show the situation. This is useful for debug checks in functions
88 * that require that they be called within an RCU read-side critical
89 * section.
90 *
91 * Check debug_lockdep_rcu_enabled() to prevent false positives during boot.
92 *
93 * Note that rcu_read_lock() is disallowed if the CPU is either idle or
94 * offline from an RCU perspective, so check for those as well.
95 */
96int rcu_read_lock_bh_held(void)
97{
98 if (!debug_lockdep_rcu_enabled())
99 return 1;
100 if (rcu_is_cpu_idle())
101 return 0;
102 if (!rcu_lockdep_current_cpu_online())
103 return 0;
104 return in_softirq() || irqs_disabled();
105}
106EXPORT_SYMBOL_GPL(rcu_read_lock_bh_held);
107#endif
108
109#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
110
111struct rcu_synchronize {
112 struct rcu_head head;
113 struct completion completion;
114};
115
116/*
117 * Awaken the corresponding synchronize_rcu() instance now that a
118 * grace period has elapsed.
119 */
120static void wakeme_after_rcu(struct rcu_head *head)
121{
122 struct rcu_synchronize *rcu;
123
124 rcu = container_of(head, struct rcu_synchronize, head);
125 complete(&rcu->completion);
126}
127
128void wait_rcu_gp(call_rcu_func_t crf)
129{
130 struct rcu_synchronize rcu;
131
132 init_rcu_head_on_stack(&rcu.head);
133 init_completion(&rcu.completion);
134 /* Will wake me after RCU finished. */
135 crf(&rcu.head, wakeme_after_rcu);
136 /* Wait for it. */
137 wait_for_completion(&rcu.completion);
138 destroy_rcu_head_on_stack(&rcu.head);
139}
140EXPORT_SYMBOL_GPL(wait_rcu_gp);
141
142#ifdef CONFIG_PROVE_RCU
143/*
144 * wrapper function to avoid #include problems.
145 */
146int rcu_my_thread_group_empty(void)
147{
148 return thread_group_empty(current);
149}
150EXPORT_SYMBOL_GPL(rcu_my_thread_group_empty);
151#endif /* #ifdef CONFIG_PROVE_RCU */
152
153#ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
154static inline void debug_init_rcu_head(struct rcu_head *head)
155{
156 debug_object_init(head, &rcuhead_debug_descr);
157}
158
159static inline void debug_rcu_head_free(struct rcu_head *head)
160{
161 debug_object_free(head, &rcuhead_debug_descr);
162}
163
164/*
165 * fixup_init is called when:
166 * - an active object is initialized
167 */
168static int rcuhead_fixup_init(void *addr, enum debug_obj_state state)
169{
170 struct rcu_head *head = addr;
171
172 switch (state) {
173 case ODEBUG_STATE_ACTIVE:
174 /*
175 * Ensure that queued callbacks are all executed.
176 * If we detect that we are nested in a RCU read-side critical
177 * section, we should simply fail, otherwise we would deadlock.
178 * In !PREEMPT configurations, there is no way to tell if we are
179 * in a RCU read-side critical section or not, so we never
180 * attempt any fixup and just print a warning.
181 */
182#ifndef CONFIG_PREEMPT
183 WARN_ON_ONCE(1);
184 return 0;
185#endif
186 if (rcu_preempt_depth() != 0 || preempt_count() != 0 ||
187 irqs_disabled()) {
188 WARN_ON_ONCE(1);
189 return 0;
190 }
191 rcu_barrier();
192 rcu_barrier_sched();
193 rcu_barrier_bh();
194 debug_object_init(head, &rcuhead_debug_descr);
195 return 1;
196 default:
197 return 0;
198 }
199}
200
201/*
202 * fixup_activate is called when:
203 * - an active object is activated
204 * - an unknown object is activated (might be a statically initialized object)
205 * Activation is performed internally by call_rcu().
206 */
207static int rcuhead_fixup_activate(void *addr, enum debug_obj_state state)
208{
209 struct rcu_head *head = addr;
210
211 switch (state) {
212
213 case ODEBUG_STATE_NOTAVAILABLE:
214 /*
215 * This is not really a fixup. We just make sure that it is
216 * tracked in the object tracker.
217 */
218 debug_object_init(head, &rcuhead_debug_descr);
219 debug_object_activate(head, &rcuhead_debug_descr);
220 return 0;
221
222 case ODEBUG_STATE_ACTIVE:
223 /*
224 * Ensure that queued callbacks are all executed.
225 * If we detect that we are nested in a RCU read-side critical
226 * section, we should simply fail, otherwise we would deadlock.
227 * In !PREEMPT configurations, there is no way to tell if we are
228 * in a RCU read-side critical section or not, so we never
229 * attempt any fixup and just print a warning.
230 */
231#ifndef CONFIG_PREEMPT
232 WARN_ON_ONCE(1);
233 return 0;
234#endif
235 if (rcu_preempt_depth() != 0 || preempt_count() != 0 ||
236 irqs_disabled()) {
237 WARN_ON_ONCE(1);
238 return 0;
239 }
240 rcu_barrier();
241 rcu_barrier_sched();
242 rcu_barrier_bh();
243 debug_object_activate(head, &rcuhead_debug_descr);
244 return 1;
245 default:
246 return 0;
247 }
248}
249
250/*
251 * fixup_free is called when:
252 * - an active object is freed
253 */
254static int rcuhead_fixup_free(void *addr, enum debug_obj_state state)
255{
256 struct rcu_head *head = addr;
257
258 switch (state) {
259 case ODEBUG_STATE_ACTIVE:
260 /*
261 * Ensure that queued callbacks are all executed.
262 * If we detect that we are nested in a RCU read-side critical
263 * section, we should simply fail, otherwise we would deadlock.
264 * In !PREEMPT configurations, there is no way to tell if we are
265 * in a RCU read-side critical section or not, so we never
266 * attempt any fixup and just print a warning.
267 */
268#ifndef CONFIG_PREEMPT
269 WARN_ON_ONCE(1);
270 return 0;
271#endif
272 if (rcu_preempt_depth() != 0 || preempt_count() != 0 ||
273 irqs_disabled()) {
274 WARN_ON_ONCE(1);
275 return 0;
276 }
277 rcu_barrier();
278 rcu_barrier_sched();
279 rcu_barrier_bh();
280 debug_object_free(head, &rcuhead_debug_descr);
281 return 1;
282 default:
283 return 0;
284 }
285}
286
287/**
288 * init_rcu_head_on_stack() - initialize on-stack rcu_head for debugobjects
289 * @head: pointer to rcu_head structure to be initialized
290 *
291 * This function informs debugobjects of a new rcu_head structure that
292 * has been allocated as an auto variable on the stack. This function
293 * is not required for rcu_head structures that are statically defined or
294 * that are dynamically allocated on the heap. This function has no
295 * effect for !CONFIG_DEBUG_OBJECTS_RCU_HEAD kernel builds.
296 */
297void init_rcu_head_on_stack(struct rcu_head *head)
298{
299 debug_object_init_on_stack(head, &rcuhead_debug_descr);
300}
301EXPORT_SYMBOL_GPL(init_rcu_head_on_stack);
302
303/**
304 * destroy_rcu_head_on_stack() - destroy on-stack rcu_head for debugobjects
305 * @head: pointer to rcu_head structure to be initialized
306 *
307 * This function informs debugobjects that an on-stack rcu_head structure
308 * is about to go out of scope. As with init_rcu_head_on_stack(), this
309 * function is not required for rcu_head structures that are statically
310 * defined or that are dynamically allocated on the heap. Also as with
311 * init_rcu_head_on_stack(), this function has no effect for
312 * !CONFIG_DEBUG_OBJECTS_RCU_HEAD kernel builds.
313 */
314void destroy_rcu_head_on_stack(struct rcu_head *head)
315{
316 debug_object_free(head, &rcuhead_debug_descr);
317}
318EXPORT_SYMBOL_GPL(destroy_rcu_head_on_stack);
319
320struct debug_obj_descr rcuhead_debug_descr = {
321 .name = "rcu_head",
322 .fixup_init = rcuhead_fixup_init,
323 .fixup_activate = rcuhead_fixup_activate,
324 .fixup_free = rcuhead_fixup_free,
325};
326EXPORT_SYMBOL_GPL(rcuhead_debug_descr);
327#endif /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
328
329#if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU) || defined(CONFIG_RCU_TRACE)
330void do_trace_rcu_torture_read(char *rcutorturename, struct rcu_head *rhp)
331{
332 trace_rcu_torture_read(rcutorturename, rhp);
333}
334EXPORT_SYMBOL_GPL(do_trace_rcu_torture_read);
335#else
336#define do_trace_rcu_torture_read(rcutorturename, rhp) do { } while (0)
337#endif