blob: c3c97e40ccb7ec40f6b8dd7725c22d260c463ad5 [file] [log] [blame]
lh9ed821d2023-04-07 01:36:19 -07001/*
2 * Detect hard and soft lockups on a system
3 *
4 * started by Don Zickus, Copyright (C) 2010 Red Hat, Inc.
5 *
6 * Note: Most of this code is borrowed heavily from the original softlockup
7 * detector, so thanks to Ingo for the initial implementation.
8 * Some chunks also taken from the old x86-specific nmi watchdog code, thanks
9 * to those contributors as well.
10 */
11
12#define pr_fmt(fmt) "NMI watchdog: " fmt
13
14#include <linux/mm.h>
15#include <linux/cpu.h>
16#include <linux/nmi.h>
17#include <linux/init.h>
18#include <linux/delay.h>
19#include <linux/freezer.h>
20#include <linux/kthread.h>
21#include <linux/lockdep.h>
22#include <linux/notifier.h>
23#include <linux/module.h>
24#include <linux/sysctl.h>
25
26#include <asm/irq_regs.h>
27#include <linux/perf_event.h>
28
29int watchdog_enabled = 1;
30int __read_mostly watchdog_thresh = 10;
31
32static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts);
33static DEFINE_PER_CPU(struct task_struct *, softlockup_watchdog);
34static DEFINE_PER_CPU(struct hrtimer, watchdog_hrtimer);
35static DEFINE_PER_CPU(bool, softlockup_touch_sync);
36static DEFINE_PER_CPU(bool, soft_watchdog_warn);
37#ifdef CONFIG_HARDLOCKUP_DETECTOR
38static DEFINE_PER_CPU(bool, hard_watchdog_warn);
39static DEFINE_PER_CPU(bool, watchdog_nmi_touch);
40static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts);
41static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved);
42#endif
43#ifdef CONFIG_HARDLOCKUP_DETECTOR_NMI
44static DEFINE_PER_CPU(struct perf_event *, watchdog_ev);
45#endif
46
47/* boot commands */
48/*
49 * Should we panic when a soft-lockup or hard-lockup occurs:
50 */
51#ifdef CONFIG_HARDLOCKUP_DETECTOR
52static int hardlockup_panic =
53 CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE;
54
55static int __init hardlockup_panic_setup(char *str)
56{
57 if (!strncmp(str, "panic", 5))
58 hardlockup_panic = 1;
59 else if (!strncmp(str, "nopanic", 7))
60 hardlockup_panic = 0;
61 else if (!strncmp(str, "0", 1))
62 watchdog_enabled = 0;
63 return 1;
64}
65__setup("nmi_watchdog=", hardlockup_panic_setup);
66#endif
67
68unsigned int __read_mostly softlockup_panic =
69 CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE;
70
71static int __init softlockup_panic_setup(char *str)
72{
73 softlockup_panic = simple_strtoul(str, NULL, 0);
74
75 return 1;
76}
77__setup("softlockup_panic=", softlockup_panic_setup);
78
79static int __init nowatchdog_setup(char *str)
80{
81 watchdog_enabled = 0;
82 return 1;
83}
84__setup("nowatchdog", nowatchdog_setup);
85
86/* deprecated */
87static int __init nosoftlockup_setup(char *str)
88{
89 watchdog_enabled = 0;
90 return 1;
91}
92__setup("nosoftlockup", nosoftlockup_setup);
93/* */
94
95/*
96 * Hard-lockup warnings should be triggered after just a few seconds. Soft-
97 * lockups can have false positives under extreme conditions. So we generally
98 * want a higher threshold for soft lockups than for hard lockups. So we couple
99 * the thresholds with a factor: we make the soft threshold twice the amount of
100 * time the hard threshold is.
101 */
102static int get_softlockup_thresh(void)
103{
104 return watchdog_thresh * 2;
105}
106
107/*
108 * Returns seconds, approximately. We don't need nanosecond
109 * resolution, and we don't need to waste time with a big divide when
110 * 2^30ns == 1.074s.
111 */
112static unsigned long get_timestamp(int this_cpu)
113{
114 return cpu_clock(this_cpu) >> 30LL; /* 2^30 ~= 10^9 */
115}
116
117static u64 get_sample_period(void)
118{
119 /*
120 * convert watchdog_thresh from seconds to ns
121 * the divide by 5 is to give hrtimer several chances (two
122 * or three with the current relation between the soft
123 * and hard thresholds) to increment before the
124 * hardlockup detector generates a warning
125 */
126 return get_softlockup_thresh() * ((u64)NSEC_PER_SEC / 5);
127}
128
129/* Commands for resetting the watchdog */
130static void __touch_watchdog(void)
131{
132 int this_cpu = smp_processor_id();
133
134 __this_cpu_write(watchdog_touch_ts, get_timestamp(this_cpu));
135}
136
137void touch_softlockup_watchdog(void)
138{
139 __this_cpu_write(watchdog_touch_ts, 0);
140}
141EXPORT_SYMBOL(touch_softlockup_watchdog);
142
143void touch_all_softlockup_watchdogs(void)
144{
145 int cpu;
146
147 /*
148 * this is done lockless
149 * do we care if a 0 races with a timestamp?
150 * all it means is the softlock check starts one cycle later
151 */
152 for_each_online_cpu(cpu)
153 per_cpu(watchdog_touch_ts, cpu) = 0;
154}
155
156#ifdef CONFIG_HARDLOCKUP_DETECTOR
157void touch_nmi_watchdog(void)
158{
159 if (watchdog_enabled) {
160 unsigned cpu;
161
162 for_each_present_cpu(cpu) {
163 if (per_cpu(watchdog_nmi_touch, cpu) != true)
164 per_cpu(watchdog_nmi_touch, cpu) = true;
165 }
166 }
167 touch_softlockup_watchdog();
168}
169EXPORT_SYMBOL(touch_nmi_watchdog);
170
171#endif
172
173void touch_softlockup_watchdog_sync(void)
174{
175 __raw_get_cpu_var(softlockup_touch_sync) = true;
176 __raw_get_cpu_var(watchdog_touch_ts) = 0;
177}
178
179#ifdef CONFIG_HARDLOCKUP_DETECTOR_NMI
180/* watchdog detector functions */
181static int is_hardlockup(void)
182{
183 unsigned long hrint = __this_cpu_read(hrtimer_interrupts);
184
185 if (__this_cpu_read(hrtimer_interrupts_saved) == hrint)
186 return 1;
187
188 __this_cpu_write(hrtimer_interrupts_saved, hrint);
189 return 0;
190}
191#endif
192
193static int is_softlockup(unsigned long touch_ts)
194{
195 unsigned long now = get_timestamp(smp_processor_id());
196
197 /* Warn about unreasonable delays: */
198 if (time_after(now, touch_ts + get_softlockup_thresh()))
199 return now - touch_ts;
200
201 return 0;
202}
203
204#ifdef CONFIG_HARDLOCKUP_DETECTOR_NMI
205
206static DEFINE_RAW_SPINLOCK(watchdog_output_lock);
207
208static struct perf_event_attr wd_hw_attr = {
209 .type = PERF_TYPE_HARDWARE,
210 .config = PERF_COUNT_HW_CPU_CYCLES,
211 .size = sizeof(struct perf_event_attr),
212 .pinned = 1,
213 .disabled = 1,
214};
215
216/* Callback function for perf event subsystem */
217static void watchdog_overflow_callback(struct perf_event *event,
218 struct perf_sample_data *data,
219 struct pt_regs *regs)
220{
221 /* Ensure the watchdog never gets throttled */
222 event->hw.interrupts = 0;
223
224 if (__this_cpu_read(watchdog_nmi_touch) == true) {
225 __this_cpu_write(watchdog_nmi_touch, false);
226 return;
227 }
228
229 /* check for a hardlockup
230 * This is done by making sure our timer interrupt
231 * is incrementing. The timer interrupt should have
232 * fired multiple times before we overflow'd. If it hasn't
233 * then this is a good indication the cpu is stuck
234 */
235 if (is_hardlockup()) {
236 int this_cpu = smp_processor_id();
237
238 /* only print hardlockups once */
239 if (__this_cpu_read(hard_watchdog_warn) == true)
240 return;
241
242 /*
243 * If early-printk is enabled then make sure we do not
244 * lock up in printk() and kill console logging:
245 */
246 printk_kill();
247
248 if (hardlockup_panic) {
249 panic("Watchdog detected hard LOCKUP on cpu %d", this_cpu);
250 } else {
251 raw_spin_lock(&watchdog_output_lock);
252 WARN(1, "Watchdog detected hard LOCKUP on cpu %d", this_cpu);
253 raw_spin_unlock(&watchdog_output_lock);
254 }
255
256 __this_cpu_write(hard_watchdog_warn, true);
257 return;
258 }
259
260 __this_cpu_write(hard_watchdog_warn, false);
261 return;
262}
263#endif
264
265#ifdef CONFIG_HARDLOCKUP_DETECTOR
266static void watchdog_interrupt_count(void)
267{
268 __this_cpu_inc(hrtimer_interrupts);
269}
270#else
271static inline void watchdog_interrupt_count(void) { return; }
272#endif /* CONFIG_HARDLOCKUP_DETECTOR */
273
274/* watchdog kicker functions */
275static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
276{
277 unsigned long touch_ts = __this_cpu_read(watchdog_touch_ts);
278 struct pt_regs *regs = get_irq_regs();
279 int duration;
280
281 /* kick the hardlockup detector */
282 watchdog_interrupt_count();
283
284 /* kick the softlockup detector */
285 wake_up_process(__this_cpu_read(softlockup_watchdog));
286
287 /* .. and repeat */
288 hrtimer_forward_now(hrtimer, ns_to_ktime(get_sample_period()));
289
290 if (touch_ts == 0) {
291 if (unlikely(__this_cpu_read(softlockup_touch_sync))) {
292 /*
293 * If the time stamp was touched atomically
294 * make sure the scheduler tick is up to date.
295 */
296 __this_cpu_write(softlockup_touch_sync, false);
297 sched_clock_tick();
298 }
299 __touch_watchdog();
300 return HRTIMER_RESTART;
301 }
302
303 /* check for a softlockup
304 * This is done by making sure a high priority task is
305 * being scheduled. The task touches the watchdog to
306 * indicate it is getting cpu time. If it hasn't then
307 * this is a good indication some task is hogging the cpu
308 */
309 duration = is_softlockup(touch_ts);
310 if (unlikely(duration)) {
311 /* only warn once */
312 if (__this_cpu_read(soft_watchdog_warn) == true)
313 return HRTIMER_RESTART;
314
315 printk(KERN_EMERG "BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n",
316 smp_processor_id(), duration,
317 current->comm, task_pid_nr(current));
318 print_modules();
319 print_irqtrace_events(current);
320 if (regs)
321 show_regs(regs);
322 else
323 dump_stack();
324
325 if (softlockup_panic)
326 panic("softlockup: hung tasks");
327 __this_cpu_write(soft_watchdog_warn, true);
328 } else
329 __this_cpu_write(soft_watchdog_warn, false);
330
331 return HRTIMER_RESTART;
332}
333
334
335/*
336 * The watchdog thread - touches the timestamp.
337 */
338static int watchdog(void *unused)
339{
340 struct sched_param param = { .sched_priority = 0 };
341 struct hrtimer *hrtimer = &__raw_get_cpu_var(watchdog_hrtimer);
342
343 /* initialize timestamp */
344 __touch_watchdog();
345
346 /* kick off the timer for the hardlockup detector */
347 /* done here because hrtimer_start can only pin to smp_processor_id() */
348 hrtimer_start(hrtimer, ns_to_ktime(get_sample_period()),
349 HRTIMER_MODE_REL_PINNED);
350
351 set_current_state(TASK_INTERRUPTIBLE);
352 /*
353 * Run briefly (kicked by the hrtimer callback function) once every
354 * get_sample_period() seconds (4 seconds by default) to reset the
355 * softlockup timestamp. If this gets delayed for more than
356 * 2*watchdog_thresh seconds then the debug-printout triggers in
357 * watchdog_timer_fn().
358 */
359 while (!kthread_should_stop()) {
360 __touch_watchdog();
361 schedule();
362
363 if (kthread_should_stop())
364 break;
365
366 set_current_state(TASK_INTERRUPTIBLE);
367 }
368 /*
369 * Drop the policy/priority elevation during thread exit to avoid a
370 * scheduling latency spike.
371 */
372 __set_current_state(TASK_RUNNING);
373 sched_setscheduler(current, SCHED_NORMAL, &param);
374 return 0;
375}
376
377
378#ifdef CONFIG_HARDLOCKUP_DETECTOR_NMI
379static int watchdog_nmi_enable(int cpu)
380{
381 struct perf_event_attr *wd_attr;
382 struct perf_event *event = per_cpu(watchdog_ev, cpu);
383
384 /* is it already setup and enabled? */
385 if (event && event->state > PERF_EVENT_STATE_OFF)
386 goto out;
387
388 /* it is setup but not enabled */
389 if (event != NULL)
390 goto out_enable;
391
392 wd_attr = &wd_hw_attr;
393 wd_attr->sample_period = hw_nmi_get_sample_period(watchdog_thresh);
394
395 /* Try to register using hardware perf events */
396 event = perf_event_create_kernel_counter(wd_attr, cpu, NULL, watchdog_overflow_callback, NULL);
397 if (!IS_ERR(event)) {
398 pr_info("enabled, takes one hw-pmu counter.\n");
399 goto out_save;
400 }
401
402
403 /* vary the KERN level based on the returned errno */
404 if (PTR_ERR(event) == -EOPNOTSUPP)
405 pr_info("disabled (cpu%i): not supported (no LAPIC?)\n", cpu);
406 else if (PTR_ERR(event) == -ENOENT)
407 pr_warning("disabled (cpu%i): hardware events not enabled\n",
408 cpu);
409 else
410 pr_err("disabled (cpu%i): unable to create perf event: %ld\n",
411 cpu, PTR_ERR(event));
412 return PTR_ERR(event);
413
414 /* success path */
415out_save:
416 per_cpu(watchdog_ev, cpu) = event;
417out_enable:
418 perf_event_enable(per_cpu(watchdog_ev, cpu));
419out:
420 return 0;
421}
422
423static void watchdog_nmi_disable(int cpu)
424{
425 struct perf_event *event = per_cpu(watchdog_ev, cpu);
426
427 if (event) {
428 perf_event_disable(event);
429 per_cpu(watchdog_ev, cpu) = NULL;
430
431 /* should be in cleanup, but blocks oprofile */
432 perf_event_release_kernel(event);
433 }
434 return;
435}
436#else
437static int watchdog_nmi_enable(int cpu) { return 0; }
438static void watchdog_nmi_disable(int cpu) { return; }
439#endif /* CONFIG_HARDLOCKUP_DETECTOR */
440
441/* prepare/enable/disable routines */
442static void watchdog_prepare_cpu(int cpu)
443{
444 struct hrtimer *hrtimer = &per_cpu(watchdog_hrtimer, cpu);
445
446 WARN_ON(per_cpu(softlockup_watchdog, cpu));
447 hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
448 hrtimer->function = watchdog_timer_fn;
449
450#ifdef CONFIG_HARDLOCKUP_DETECTOR_OTHER_CPU
451 per_cpu(watchdog_nmi_touch, cpu) = true;
452#endif
453}
454
455static int watchdog_enable(int cpu)
456{
457 struct task_struct *p = per_cpu(softlockup_watchdog, cpu);
458 int err = 0;
459
460 /* enable the perf event */
461 err = watchdog_nmi_enable(cpu);
462
463 /* Regardless of err above, fall through and start softlockup */
464
465 /* create the watchdog thread */
466 if (!p) {
467 struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
468 p = kthread_create_on_node(watchdog, NULL, cpu_to_node(cpu), "watchdog/%d", cpu);
469 if (IS_ERR(p)) {
470 pr_err("softlockup watchdog for %i failed\n", cpu);
471 if (!err) {
472 /* if hardlockup hasn't already set this */
473 err = PTR_ERR(p);
474 /* and disable the perf event */
475 watchdog_nmi_disable(cpu);
476 }
477 goto out;
478 }
479 sched_setscheduler(p, SCHED_FIFO, &param);
480 kthread_bind(p, cpu);
481 per_cpu(watchdog_touch_ts, cpu) = 0;
482 per_cpu(softlockup_watchdog, cpu) = p;
483 wake_up_process(p);
484 }
485
486out:
487 return err;
488}
489
490static void watchdog_disable(int cpu)
491{
492 struct task_struct *p = per_cpu(softlockup_watchdog, cpu);
493 struct hrtimer *hrtimer = &per_cpu(watchdog_hrtimer, cpu);
494
495 /*
496 * cancel the timer first to stop incrementing the stats
497 * and waking up the kthread
498 */
499 hrtimer_cancel(hrtimer);
500
501 /* disable the perf event */
502 watchdog_nmi_disable(cpu);
503
504 /* stop the watchdog thread */
505 if (p) {
506 per_cpu(softlockup_watchdog, cpu) = NULL;
507 kthread_stop(p);
508 }
509}
510
511/* sysctl functions */
512#ifdef CONFIG_SYSCTL
513static void watchdog_enable_all_cpus(void)
514{
515 int cpu;
516
517 watchdog_enabled = 0;
518
519 for_each_online_cpu(cpu)
520 if (!watchdog_enable(cpu))
521 /* if any cpu succeeds, watchdog is considered
522 enabled for the system */
523 watchdog_enabled = 1;
524
525 if (!watchdog_enabled)
526 pr_err("failed to be enabled on some cpus\n");
527
528}
529
530static void watchdog_disable_all_cpus(void)
531{
532 int cpu;
533
534 for_each_online_cpu(cpu)
535 watchdog_disable(cpu);
536
537 /* if all watchdogs are disabled, then they are disabled for the system */
538 watchdog_enabled = 0;
539}
540
541
542/*
543 * proc handler for /proc/sys/kernel/nmi_watchdog,watchdog_thresh
544 */
545
546int proc_dowatchdog(struct ctl_table *table, int write,
547 void __user *buffer, size_t *lenp, loff_t *ppos)
548{
549 int ret;
550
551 ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
552 if (ret || !write)
553 goto out;
554
555 if (watchdog_enabled && watchdog_thresh)
556 watchdog_enable_all_cpus();
557 else
558 watchdog_disable_all_cpus();
559
560out:
561 return ret;
562}
563#endif /* CONFIG_SYSCTL */
564
565
566/*
567 * Create/destroy watchdog threads as CPUs come and go:
568 */
569static int __cpuinit
570cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
571{
572 int hotcpu = (unsigned long)hcpu;
573
574 switch (action) {
575 case CPU_UP_PREPARE:
576 case CPU_UP_PREPARE_FROZEN:
577 watchdog_prepare_cpu(hotcpu);
578 break;
579 case CPU_ONLINE:
580 case CPU_ONLINE_FROZEN:
581 if (watchdog_enabled)
582 watchdog_enable(hotcpu);
583 break;
584#ifdef CONFIG_HOTPLUG_CPU
585 case CPU_UP_CANCELED:
586 case CPU_UP_CANCELED_FROZEN:
587 watchdog_disable(hotcpu);
588 break;
589 case CPU_DEAD:
590 case CPU_DEAD_FROZEN:
591 watchdog_disable(hotcpu);
592 break;
593#endif /* CONFIG_HOTPLUG_CPU */
594 }
595
596 /*
597 * hardlockup and softlockup are not important enough
598 * to block cpu bring up. Just always succeed and
599 * rely on printk output to flag problems.
600 */
601 return NOTIFY_OK;
602}
603
604static struct notifier_block __cpuinitdata cpu_nfb = {
605 .notifier_call = cpu_callback
606};
607
608void __init lockup_detector_init(void)
609{
610 void *cpu = (void *)(long)smp_processor_id();
611 int err;
612
613 err = cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu);
614 WARN_ON(notifier_to_errno(err));
615
616 cpu_callback(&cpu_nfb, CPU_ONLINE, cpu);
617 register_cpu_notifier(&cpu_nfb);
618
619 return;
620}