blob: f4209958cc620002315187876cc645865a113c76 [file] [log] [blame]
lh9ed821d2023-04-07 01:36:19 -07001/*
2 * Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * Copyright IBM Corporation, 2008
19 *
20 * Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
21 *
22 * For detailed explanation of Read-Copy Update mechanism see -
23 * Documentation/RCU
24 */
25#include <linux/completion.h>
26#include <linux/interrupt.h>
27#include <linux/notifier.h>
28#include <linux/rcupdate.h>
29#include <linux/kernel.h>
30#include <linux/export.h>
31#include <linux/mutex.h>
32#include <linux/sched.h>
33#include <linux/types.h>
34#include <linux/init.h>
35#include <linux/time.h>
36#include <linux/cpu.h>
37#include <linux/prefetch.h>
38
39#ifdef CONFIG_RCU_TRACE
40#include <trace/events/rcu.h>
41#endif /* #else #ifdef CONFIG_RCU_TRACE */
42
43#include "rcu.h"
44
45/* Forward declarations for rcutiny_plugin.h. */
46struct rcu_ctrlblk;
47static void invoke_rcu_callbacks(void);
48static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp);
49static void rcu_process_callbacks(struct softirq_action *unused);
50static void __call_rcu(struct rcu_head *head,
51 void (*func)(struct rcu_head *rcu),
52 struct rcu_ctrlblk *rcp);
53
54#include "rcutiny_plugin.h"
55
56static long long rcu_dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
57
58/* Common code for rcu_idle_enter() and rcu_irq_exit(), see kernel/rcutree.c. */
59static void rcu_idle_enter_common(long long oldval)
60{
61 if (rcu_dynticks_nesting) {
62 RCU_TRACE(trace_rcu_dyntick("--=",
63 oldval, rcu_dynticks_nesting));
64 return;
65 }
66 RCU_TRACE(trace_rcu_dyntick("Start", oldval, rcu_dynticks_nesting));
67 if (!is_idle_task(current)) {
68 struct task_struct *idle = idle_task(smp_processor_id());
69
70 RCU_TRACE(trace_rcu_dyntick("Error on entry: not idle task",
71 oldval, rcu_dynticks_nesting));
72 ftrace_dump(DUMP_ALL);
73 WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
74 current->pid, current->comm,
75 idle->pid, idle->comm); /* must be idle task! */
76 }
77 rcu_sched_qs(0); /* implies rcu_bh_qsctr_inc(0) */
78}
79
80/*
81 * Enter idle, which is an extended quiescent state if we have fully
82 * entered that mode (i.e., if the new value of dynticks_nesting is zero).
83 */
84void rcu_idle_enter(void)
85{
86 unsigned long flags;
87 long long oldval;
88
89 local_irq_save(flags);
90 oldval = rcu_dynticks_nesting;
91 WARN_ON_ONCE((rcu_dynticks_nesting & DYNTICK_TASK_NEST_MASK) == 0);
92 if ((rcu_dynticks_nesting & DYNTICK_TASK_NEST_MASK) ==
93 DYNTICK_TASK_NEST_VALUE)
94 rcu_dynticks_nesting = 0;
95 else
96 rcu_dynticks_nesting -= DYNTICK_TASK_NEST_VALUE;
97 rcu_idle_enter_common(oldval);
98 local_irq_restore(flags);
99}
100EXPORT_SYMBOL_GPL(rcu_idle_enter);
101
102/*
103 * Exit an interrupt handler towards idle.
104 */
105void rcu_irq_exit(void)
106{
107 unsigned long flags;
108 long long oldval;
109
110 local_irq_save(flags);
111 oldval = rcu_dynticks_nesting;
112 rcu_dynticks_nesting--;
113 WARN_ON_ONCE(rcu_dynticks_nesting < 0);
114 rcu_idle_enter_common(oldval);
115 local_irq_restore(flags);
116}
117
118/* Common code for rcu_idle_exit() and rcu_irq_enter(), see kernel/rcutree.c. */
119static void rcu_idle_exit_common(long long oldval)
120{
121 if (oldval) {
122 RCU_TRACE(trace_rcu_dyntick("++=",
123 oldval, rcu_dynticks_nesting));
124 return;
125 }
126 RCU_TRACE(trace_rcu_dyntick("End", oldval, rcu_dynticks_nesting));
127 if (!is_idle_task(current)) {
128 struct task_struct *idle = idle_task(smp_processor_id());
129
130 RCU_TRACE(trace_rcu_dyntick("Error on exit: not idle task",
131 oldval, rcu_dynticks_nesting));
132 ftrace_dump(DUMP_ALL);
133 WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
134 current->pid, current->comm,
135 idle->pid, idle->comm); /* must be idle task! */
136 }
137}
138
139/*
140 * Exit idle, so that we are no longer in an extended quiescent state.
141 */
142void rcu_idle_exit(void)
143{
144 unsigned long flags;
145 long long oldval;
146
147 local_irq_save(flags);
148 oldval = rcu_dynticks_nesting;
149 WARN_ON_ONCE(rcu_dynticks_nesting < 0);
150 if (rcu_dynticks_nesting & DYNTICK_TASK_NEST_MASK)
151 rcu_dynticks_nesting += DYNTICK_TASK_NEST_VALUE;
152 else
153 rcu_dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
154 rcu_idle_exit_common(oldval);
155 local_irq_restore(flags);
156}
157EXPORT_SYMBOL_GPL(rcu_idle_exit);
158
159/*
160 * Enter an interrupt handler, moving away from idle.
161 */
162void rcu_irq_enter(void)
163{
164 unsigned long flags;
165 long long oldval;
166
167 local_irq_save(flags);
168 oldval = rcu_dynticks_nesting;
169 rcu_dynticks_nesting++;
170 WARN_ON_ONCE(rcu_dynticks_nesting == 0);
171 rcu_idle_exit_common(oldval);
172 local_irq_restore(flags);
173}
174
175#ifdef CONFIG_PROVE_RCU
176
177/*
178 * Test whether RCU thinks that the current CPU is idle.
179 */
180int rcu_is_cpu_idle(void)
181{
182 return !rcu_dynticks_nesting;
183}
184EXPORT_SYMBOL(rcu_is_cpu_idle);
185
186#endif /* #ifdef CONFIG_PROVE_RCU */
187
188/*
189 * Test whether the current CPU was interrupted from idle. Nested
190 * interrupts don't count, we must be running at the first interrupt
191 * level.
192 */
193int rcu_is_cpu_rrupt_from_idle(void)
194{
195 return rcu_dynticks_nesting <= 0;
196}
197
198/*
199 * Helper function for rcu_sched_qs() and rcu_bh_qs().
200 * Also irqs are disabled to avoid confusion due to interrupt handlers
201 * invoking call_rcu().
202 */
203static int rcu_qsctr_help(struct rcu_ctrlblk *rcp)
204{
205 if (rcp->rcucblist != NULL &&
206 rcp->donetail != rcp->curtail) {
207 rcp->donetail = rcp->curtail;
208 return 1;
209 }
210
211 return 0;
212}
213
214/*
215 * Record an rcu quiescent state. And an rcu_bh quiescent state while we
216 * are at it, given that any rcu quiescent state is also an rcu_bh
217 * quiescent state. Use "+" instead of "||" to defeat short circuiting.
218 */
219void rcu_sched_qs(int cpu)
220{
221 unsigned long flags;
222
223 local_irq_save(flags);
224 if (rcu_qsctr_help(&rcu_sched_ctrlblk) +
225 rcu_qsctr_help(&rcu_bh_ctrlblk))
226 invoke_rcu_callbacks();
227 local_irq_restore(flags);
228}
229
230/*
231 * Record an rcu_bh quiescent state.
232 */
233void rcu_bh_qs(int cpu)
234{
235 unsigned long flags;
236
237 local_irq_save(flags);
238 if (rcu_qsctr_help(&rcu_bh_ctrlblk))
239 invoke_rcu_callbacks();
240 local_irq_restore(flags);
241}
242
243/*
244 * Check to see if the scheduling-clock interrupt came from an extended
245 * quiescent state, and, if so, tell RCU about it. This function must
246 * be called from hardirq context. It is normally called from the
247 * scheduling-clock interrupt.
248 */
249void rcu_check_callbacks(int cpu, int user)
250{
251 if (user || rcu_is_cpu_rrupt_from_idle())
252 rcu_sched_qs(cpu);
253 else if (!in_softirq())
254 rcu_bh_qs(cpu);
255 rcu_preempt_check_callbacks();
256}
257
258/*
259 * Invoke the RCU callbacks on the specified rcu_ctrlkblk structure
260 * whose grace period has elapsed.
261 */
262static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
263{
264 char *rn = NULL;
265 struct rcu_head *next, *list;
266 unsigned long flags;
267 RCU_TRACE(int cb_count = 0);
268
269 /* If no RCU callbacks ready to invoke, just return. */
270 if (&rcp->rcucblist == rcp->donetail) {
271 RCU_TRACE(trace_rcu_batch_start(rcp->name, 0, 0, -1));
272 RCU_TRACE(trace_rcu_batch_end(rcp->name, 0,
273 ACCESS_ONCE(rcp->rcucblist),
274 need_resched(),
275 is_idle_task(current),
276 rcu_is_callbacks_kthread()));
277 return;
278 }
279
280 /* Move the ready-to-invoke callbacks to a local list. */
281 local_irq_save(flags);
282 if (rcp->donetail == &rcp->rcucblist) {
283 /* No callbacks ready, so just leave. */
284 local_irq_restore(flags);
285 return;
286 }
287 RCU_TRACE(trace_rcu_batch_start(rcp->name, 0, rcp->qlen, -1));
288 list = rcp->rcucblist;
289 rcp->rcucblist = *rcp->donetail;
290 *rcp->donetail = NULL;
291 if (rcp->curtail == rcp->donetail)
292 rcp->curtail = &rcp->rcucblist;
293 rcu_preempt_remove_callbacks(rcp);
294 rcp->donetail = &rcp->rcucblist;
295 local_irq_restore(flags);
296
297 /* Invoke the callbacks on the local list. */
298 RCU_TRACE(rn = rcp->name);
299 while (list) {
300 next = list->next;
301 prefetch(next);
302 debug_rcu_head_unqueue(list);
303 local_bh_disable();
304 __rcu_reclaim(rn, list);
305 local_bh_enable();
306 list = next;
307 RCU_TRACE(cb_count++);
308 }
309 RCU_TRACE(rcu_trace_sub_qlen(rcp, cb_count));
310 RCU_TRACE(trace_rcu_batch_end(rcp->name, cb_count, 0, need_resched(),
311 is_idle_task(current),
312 rcu_is_callbacks_kthread()));
313}
314
315static void rcu_process_callbacks(struct softirq_action *unused)
316{
317 __rcu_process_callbacks(&rcu_sched_ctrlblk);
318 __rcu_process_callbacks(&rcu_bh_ctrlblk);
319 rcu_preempt_process_callbacks();
320}
321
322/*
323 * Wait for a grace period to elapse. But it is illegal to invoke
324 * synchronize_sched() from within an RCU read-side critical section.
325 * Therefore, any legal call to synchronize_sched() is a quiescent
326 * state, and so on a UP system, synchronize_sched() need do nothing.
327 * Ditto for synchronize_rcu_bh(). (But Lai Jiangshan points out the
328 * benefits of doing might_sleep() to reduce latency.)
329 *
330 * Cool, huh? (Due to Josh Triplett.)
331 *
332 * But we want to make this a static inline later. The cond_resched()
333 * currently makes this problematic.
334 */
335void synchronize_sched(void)
336{
337 rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map) &&
338 !lock_is_held(&rcu_lock_map) &&
339 !lock_is_held(&rcu_sched_lock_map),
340 "Illegal synchronize_sched() in RCU read-side critical section");
341 cond_resched();
342}
343EXPORT_SYMBOL_GPL(synchronize_sched);
344
345/*
346 * Helper function for call_rcu() and call_rcu_bh().
347 */
348static void __call_rcu(struct rcu_head *head,
349 void (*func)(struct rcu_head *rcu),
350 struct rcu_ctrlblk *rcp)
351{
352 unsigned long flags;
353
354 debug_rcu_head_queue(head);
355 head->func = func;
356 head->next = NULL;
357
358 local_irq_save(flags);
359 *rcp->curtail = head;
360 rcp->curtail = &head->next;
361 RCU_TRACE(rcp->qlen++);
362 local_irq_restore(flags);
363}
364
365/*
366 * Post an RCU callback to be invoked after the end of an RCU-sched grace
367 * period. But since we have but one CPU, that would be after any
368 * quiescent state.
369 */
370void call_rcu_sched(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
371{
372 __call_rcu(head, func, &rcu_sched_ctrlblk);
373}
374EXPORT_SYMBOL_GPL(call_rcu_sched);
375
376#ifndef CONFIG_PREEMPT_RT_FULL
377/*
378 * Post an RCU bottom-half callback to be invoked after any subsequent
379 * quiescent state.
380 */
381void call_rcu_bh(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
382{
383 __call_rcu(head, func, &rcu_bh_ctrlblk);
384}
385EXPORT_SYMBOL_GPL(call_rcu_bh);
386#endif