blob: 900046843068be1065d0bb29145e349a1ef8f58b [file] [log] [blame]
yuezonghe824eb0c2024-06-27 02:32:26 -07001/*
2 * kernel/trace/latency_hist.c
3 *
4 * Add support for histograms of preemption-off latency and
5 * interrupt-off latency and wakeup latency, it depends on
6 * Real-Time Preemption Support.
7 *
8 * Copyright (C) 2005 MontaVista Software, Inc.
9 * Yi Yang <yyang@ch.mvista.com>
10 *
11 * Converted to work with the new latency tracer.
12 * Copyright (C) 2008 Red Hat, Inc.
13 * Steven Rostedt <srostedt@redhat.com>
14 *
15 */
16#include <linux/module.h>
17#include <linux/debugfs.h>
18#include <linux/seq_file.h>
19#include <linux/percpu.h>
20#include <linux/kallsyms.h>
21#include <linux/uaccess.h>
22#include <linux/sched.h>
23#include <linux/slab.h>
24#include <asm/atomic.h>
25#include <asm/div64.h>
26
27#include "trace.h"
28#include <trace/events/sched.h>
29
30#define NSECS_PER_USECS 1000L
31
32#define CREATE_TRACE_POINTS
33#include <trace/events/hist.h>
34
35enum {
36 IRQSOFF_LATENCY = 0,
37 PREEMPTOFF_LATENCY,
38 PREEMPTIRQSOFF_LATENCY,
39 WAKEUP_LATENCY,
40 WAKEUP_LATENCY_SHAREDPRIO,
41 MISSED_TIMER_OFFSETS,
42 TIMERANDWAKEUP_LATENCY,
43 MAX_LATENCY_TYPE,
44};
45
46#define MAX_ENTRY_NUM 10240
47
48struct hist_data {
49 atomic_t hist_mode; /* 0 log, 1 don't log */
50 long offset; /* set it to MAX_ENTRY_NUM/2 for a bipolar scale */
51 long min_lat;
52 long max_lat;
53 unsigned long long below_hist_bound_samples;
54 unsigned long long above_hist_bound_samples;
55 long long accumulate_lat;
56 unsigned long long total_samples;
57 unsigned long long hist_array[MAX_ENTRY_NUM];
58};
59
60struct enable_data {
61 int latency_type;
62 int enabled;
63};
64
65static char *latency_hist_dir_root = "latency_hist";
66
67#ifdef CONFIG_INTERRUPT_OFF_HIST
68static DEFINE_PER_CPU(struct hist_data, irqsoff_hist);
69static char *irqsoff_hist_dir = "irqsoff";
70static DEFINE_PER_CPU(cycles_t, hist_irqsoff_start);
71static DEFINE_PER_CPU(int, hist_irqsoff_counting);
72#endif
73
74#ifdef CONFIG_PREEMPT_OFF_HIST
75static DEFINE_PER_CPU(struct hist_data, preemptoff_hist);
76static char *preemptoff_hist_dir = "preemptoff";
77static DEFINE_PER_CPU(cycles_t, hist_preemptoff_start);
78static DEFINE_PER_CPU(int, hist_preemptoff_counting);
79#endif
80
81#if defined(CONFIG_PREEMPT_OFF_HIST) && defined(CONFIG_INTERRUPT_OFF_HIST)
82static DEFINE_PER_CPU(struct hist_data, preemptirqsoff_hist);
83static char *preemptirqsoff_hist_dir = "preemptirqsoff";
84static DEFINE_PER_CPU(cycles_t, hist_preemptirqsoff_start);
85static DEFINE_PER_CPU(int, hist_preemptirqsoff_counting);
86#endif
87
88#if defined(CONFIG_PREEMPT_OFF_HIST) || defined(CONFIG_INTERRUPT_OFF_HIST)
89static notrace void probe_preemptirqsoff_hist(void *v, int reason, int start);
90static struct enable_data preemptirqsoff_enabled_data = {
91 .latency_type = PREEMPTIRQSOFF_LATENCY,
92 .enabled = 0,
93};
94#endif
95
96#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
97 defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
98struct maxlatproc_data {
99 char comm[FIELD_SIZEOF(struct task_struct, comm)];
100 char current_comm[FIELD_SIZEOF(struct task_struct, comm)];
101 int pid;
102 int current_pid;
103 int prio;
104 int current_prio;
105 long latency;
106 long timeroffset;
107 cycle_t timestamp;
108};
109#endif
110
111#ifdef CONFIG_WAKEUP_LATENCY_HIST
112static DEFINE_PER_CPU(struct hist_data, wakeup_latency_hist);
113static DEFINE_PER_CPU(struct hist_data, wakeup_latency_hist_sharedprio);
114static char *wakeup_latency_hist_dir = "wakeup";
115static char *wakeup_latency_hist_dir_sharedprio = "sharedprio";
116static notrace void probe_wakeup_latency_hist_start(void *v,
117 struct task_struct *p);
118static notrace void probe_wakeup_latency_hist_stop(void *v,
119 struct task_struct *prev, struct task_struct *next);
120static notrace void probe_sched_migrate_task(void *,
121 struct task_struct *task, int cpu);
122static struct enable_data wakeup_latency_enabled_data = {
123 .latency_type = WAKEUP_LATENCY,
124 .enabled = 0,
125};
126static DEFINE_PER_CPU(struct maxlatproc_data, wakeup_maxlatproc);
127static DEFINE_PER_CPU(struct maxlatproc_data, wakeup_maxlatproc_sharedprio);
128static DEFINE_PER_CPU(struct task_struct *, wakeup_task);
129static DEFINE_PER_CPU(int, wakeup_sharedprio);
130static unsigned long wakeup_pid;
131#endif
132
133#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
134static DEFINE_PER_CPU(struct hist_data, missed_timer_offsets);
135static char *missed_timer_offsets_dir = "missed_timer_offsets";
136static notrace void probe_hrtimer_interrupt(void *v, int cpu,
137 long long offset, struct task_struct *curr, struct task_struct *task);
138static struct enable_data missed_timer_offsets_enabled_data = {
139 .latency_type = MISSED_TIMER_OFFSETS,
140 .enabled = 0,
141};
142static DEFINE_PER_CPU(struct maxlatproc_data, missed_timer_offsets_maxlatproc);
143static unsigned long missed_timer_offsets_pid;
144#endif
145
146#if defined(CONFIG_WAKEUP_LATENCY_HIST) && \
147 defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
148static DEFINE_PER_CPU(struct hist_data, timerandwakeup_latency_hist);
149static char *timerandwakeup_latency_hist_dir = "timerandwakeup";
150static struct enable_data timerandwakeup_enabled_data = {
151 .latency_type = TIMERANDWAKEUP_LATENCY,
152 .enabled = 0,
153};
154static DEFINE_PER_CPU(struct maxlatproc_data, timerandwakeup_maxlatproc);
155#endif
156
157void notrace latency_hist(int latency_type, int cpu, long latency,
158 long timeroffset, cycle_t stop,
159 struct task_struct *p)
160{
161 struct hist_data *my_hist;
162#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
163 defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
164 struct maxlatproc_data *mp = NULL;
165#endif
166
167 if (cpu < 0 || cpu >= NR_CPUS || latency_type < 0 ||
168 latency_type >= MAX_LATENCY_TYPE)
169 return;
170
171 switch (latency_type) {
172#ifdef CONFIG_INTERRUPT_OFF_HIST
173 case IRQSOFF_LATENCY:
174 my_hist = &per_cpu(irqsoff_hist, cpu);
175 break;
176#endif
177#ifdef CONFIG_PREEMPT_OFF_HIST
178 case PREEMPTOFF_LATENCY:
179 my_hist = &per_cpu(preemptoff_hist, cpu);
180 break;
181#endif
182#if defined(CONFIG_PREEMPT_OFF_HIST) && defined(CONFIG_INTERRUPT_OFF_HIST)
183 case PREEMPTIRQSOFF_LATENCY:
184 my_hist = &per_cpu(preemptirqsoff_hist, cpu);
185 break;
186#endif
187#ifdef CONFIG_WAKEUP_LATENCY_HIST
188 case WAKEUP_LATENCY:
189 my_hist = &per_cpu(wakeup_latency_hist, cpu);
190 mp = &per_cpu(wakeup_maxlatproc, cpu);
191 break;
192 case WAKEUP_LATENCY_SHAREDPRIO:
193 my_hist = &per_cpu(wakeup_latency_hist_sharedprio, cpu);
194 mp = &per_cpu(wakeup_maxlatproc_sharedprio, cpu);
195 break;
196#endif
197#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
198 case MISSED_TIMER_OFFSETS:
199 my_hist = &per_cpu(missed_timer_offsets, cpu);
200 mp = &per_cpu(missed_timer_offsets_maxlatproc, cpu);
201 break;
202#endif
203#if defined(CONFIG_WAKEUP_LATENCY_HIST) && \
204 defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
205 case TIMERANDWAKEUP_LATENCY:
206 my_hist = &per_cpu(timerandwakeup_latency_hist, cpu);
207 mp = &per_cpu(timerandwakeup_maxlatproc, cpu);
208 break;
209#endif
210
211 default:
212 return;
213 }
214
215 latency += my_hist->offset;
216
217 if (atomic_read(&my_hist->hist_mode) == 0)
218 return;
219
220 if (latency < 0 || latency >= MAX_ENTRY_NUM) {
221 if (latency < 0)
222 my_hist->below_hist_bound_samples++;
223 else
224 my_hist->above_hist_bound_samples++;
225 } else
226 my_hist->hist_array[latency]++;
227
228 if (unlikely(latency > my_hist->max_lat ||
229 my_hist->min_lat == LONG_MAX)) {
230#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
231 defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
232 if (latency_type == WAKEUP_LATENCY ||
233 latency_type == WAKEUP_LATENCY_SHAREDPRIO ||
234 latency_type == MISSED_TIMER_OFFSETS ||
235 latency_type == TIMERANDWAKEUP_LATENCY) {
236 strncpy(mp->comm, p->comm, sizeof(mp->comm));
237 strncpy(mp->current_comm, current->comm,
238 sizeof(mp->current_comm));
239 mp->pid = task_pid_nr(p);
240 mp->current_pid = task_pid_nr(current);
241 mp->prio = p->prio;
242 mp->current_prio = current->prio;
243 mp->latency = latency;
244 mp->timeroffset = timeroffset;
245 mp->timestamp = stop;
246 }
247#endif
248 my_hist->max_lat = latency;
249 }
250 if (unlikely(latency < my_hist->min_lat))
251 my_hist->min_lat = latency;
252 my_hist->total_samples++;
253 my_hist->accumulate_lat += latency;
254}
255
256static void *l_start(struct seq_file *m, loff_t *pos)
257{
258 loff_t *index_ptr = NULL;
259 loff_t index = *pos;
260 struct hist_data *my_hist = m->private;
261
262 if (index == 0) {
263 char minstr[32], avgstr[32], maxstr[32];
264
265 atomic_dec(&my_hist->hist_mode);
266
267 if (likely(my_hist->total_samples)) {
268 long avg = (long) div64_s64(my_hist->accumulate_lat,
269 my_hist->total_samples);
270 snprintf(minstr, sizeof(minstr), "%ld",
271 my_hist->min_lat - my_hist->offset);
272 snprintf(avgstr, sizeof(avgstr), "%ld",
273 avg - my_hist->offset);
274 snprintf(maxstr, sizeof(maxstr), "%ld",
275 my_hist->max_lat - my_hist->offset);
276 } else {
277 strcpy(minstr, "<undef>");
278 strcpy(avgstr, minstr);
279 strcpy(maxstr, minstr);
280 }
281
282 seq_printf(m, "#Minimum latency: %s microseconds\n"
283 "#Average latency: %s microseconds\n"
284 "#Maximum latency: %s microseconds\n"
285 "#Total samples: %llu\n"
286 "#There are %llu samples lower than %ld"
287 " microseconds.\n"
288 "#There are %llu samples greater or equal"
289 " than %ld microseconds.\n"
290 "#usecs\t%16s\n",
291 minstr, avgstr, maxstr,
292 my_hist->total_samples,
293 my_hist->below_hist_bound_samples,
294 -my_hist->offset,
295 my_hist->above_hist_bound_samples,
296 MAX_ENTRY_NUM - my_hist->offset,
297 "samples");
298 }
299 if (index < MAX_ENTRY_NUM) {
300 index_ptr = kmalloc(sizeof(loff_t), GFP_KERNEL);
301 if (index_ptr)
302 *index_ptr = index;
303 }
304
305 return index_ptr;
306}
307
308static void *l_next(struct seq_file *m, void *p, loff_t *pos)
309{
310 loff_t *index_ptr = p;
311 struct hist_data *my_hist = m->private;
312
313 if (++*pos >= MAX_ENTRY_NUM) {
314 atomic_inc(&my_hist->hist_mode);
315 return NULL;
316 }
317 *index_ptr = *pos;
318 return index_ptr;
319}
320
321static void l_stop(struct seq_file *m, void *p)
322{
323 kfree(p);
324}
325
326static int l_show(struct seq_file *m, void *p)
327{
328 int index = *(loff_t *) p;
329 struct hist_data *my_hist = m->private;
330
331 seq_printf(m, "%6ld\t%16llu\n", index - my_hist->offset,
332 my_hist->hist_array[index]);
333 return 0;
334}
335
336static struct seq_operations latency_hist_seq_op = {
337 .start = l_start,
338 .next = l_next,
339 .stop = l_stop,
340 .show = l_show
341};
342
343static int latency_hist_open(struct inode *inode, struct file *file)
344{
345 int ret;
346
347 ret = seq_open(file, &latency_hist_seq_op);
348 if (!ret) {
349 struct seq_file *seq = file->private_data;
350 seq->private = inode->i_private;
351 }
352 return ret;
353}
354
355static struct file_operations latency_hist_fops = {
356 .open = latency_hist_open,
357 .read = seq_read,
358 .llseek = seq_lseek,
359 .release = seq_release,
360};
361
362#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
363 defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
364static void clear_maxlatprocdata(struct maxlatproc_data *mp)
365{
366 mp->comm[0] = mp->current_comm[0] = '\0';
367 mp->prio = mp->current_prio = mp->pid = mp->current_pid =
368 mp->latency = mp->timeroffset = -1;
369 mp->timestamp = 0;
370}
371#endif
372
373static void hist_reset(struct hist_data *hist)
374{
375 atomic_dec(&hist->hist_mode);
376
377 memset(hist->hist_array, 0, sizeof(hist->hist_array));
378 hist->below_hist_bound_samples = 0ULL;
379 hist->above_hist_bound_samples = 0ULL;
380 hist->min_lat = LONG_MAX;
381 hist->max_lat = LONG_MIN;
382 hist->total_samples = 0ULL;
383 hist->accumulate_lat = 0LL;
384
385 atomic_inc(&hist->hist_mode);
386}
387
388static ssize_t
389latency_hist_reset(struct file *file, const char __user *a,
390 size_t size, loff_t *off)
391{
392 int cpu;
393 struct hist_data *hist = NULL;
394#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
395 defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
396 struct maxlatproc_data *mp = NULL;
397#endif
398 off_t latency_type = (off_t) file->private_data;
399
400 for_each_online_cpu(cpu) {
401
402 switch (latency_type) {
403#ifdef CONFIG_PREEMPT_OFF_HIST
404 case PREEMPTOFF_LATENCY:
405 hist = &per_cpu(preemptoff_hist, cpu);
406 break;
407#endif
408#ifdef CONFIG_INTERRUPT_OFF_HIST
409 case IRQSOFF_LATENCY:
410 hist = &per_cpu(irqsoff_hist, cpu);
411 break;
412#endif
413#if defined(CONFIG_INTERRUPT_OFF_HIST) && defined(CONFIG_PREEMPT_OFF_HIST)
414 case PREEMPTIRQSOFF_LATENCY:
415 hist = &per_cpu(preemptirqsoff_hist, cpu);
416 break;
417#endif
418#ifdef CONFIG_WAKEUP_LATENCY_HIST
419 case WAKEUP_LATENCY:
420 hist = &per_cpu(wakeup_latency_hist, cpu);
421 mp = &per_cpu(wakeup_maxlatproc, cpu);
422 break;
423 case WAKEUP_LATENCY_SHAREDPRIO:
424 hist = &per_cpu(wakeup_latency_hist_sharedprio, cpu);
425 mp = &per_cpu(wakeup_maxlatproc_sharedprio, cpu);
426 break;
427#endif
428#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
429 case MISSED_TIMER_OFFSETS:
430 hist = &per_cpu(missed_timer_offsets, cpu);
431 mp = &per_cpu(missed_timer_offsets_maxlatproc, cpu);
432 break;
433#endif
434#if defined(CONFIG_WAKEUP_LATENCY_HIST) && \
435 defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
436 case TIMERANDWAKEUP_LATENCY:
437 hist = &per_cpu(timerandwakeup_latency_hist, cpu);
438 mp = &per_cpu(timerandwakeup_maxlatproc, cpu);
439 break;
440#endif
441 }
442
443 hist_reset(hist);
444#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
445 defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
446 if (latency_type == WAKEUP_LATENCY ||
447 latency_type == WAKEUP_LATENCY_SHAREDPRIO ||
448 latency_type == MISSED_TIMER_OFFSETS ||
449 latency_type == TIMERANDWAKEUP_LATENCY)
450 clear_maxlatprocdata(mp);
451#endif
452 }
453
454 return size;
455}
456
457#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
458 defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
459static ssize_t
460show_pid(struct file *file, char __user *ubuf, size_t cnt, loff_t *ppos)
461{
462 char buf[64];
463 int r;
464 unsigned long *this_pid = file->private_data;
465
466 r = snprintf(buf, sizeof(buf), "%lu\n", *this_pid);
467 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
468}
469
470static ssize_t do_pid(struct file *file, const char __user *ubuf,
471 size_t cnt, loff_t *ppos)
472{
473 char buf[64];
474 unsigned long pid;
475 unsigned long *this_pid = file->private_data;
476
477 if (cnt >= sizeof(buf))
478 return -EINVAL;
479
480 if (copy_from_user(&buf, ubuf, cnt))
481 return -EFAULT;
482
483 buf[cnt] = '\0';
484
485 if (strict_strtoul(buf, 10, &pid))
486 return(-EINVAL);
487
488 *this_pid = pid;
489
490 return cnt;
491}
492#endif
493
494#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
495 defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
496static ssize_t
497show_maxlatproc(struct file *file, char __user *ubuf, size_t cnt, loff_t *ppos)
498{
499 int r;
500 struct maxlatproc_data *mp = file->private_data;
501 int strmaxlen = (TASK_COMM_LEN * 2) + (8 * 8);
502 unsigned long long t;
503 unsigned long usecs, secs;
504 char *buf;
505
506 if (mp->pid == -1 || mp->current_pid == -1) {
507 buf = "(none)\n";
508 return simple_read_from_buffer(ubuf, cnt, ppos, buf,
509 strlen(buf));
510 }
511
512 buf = kmalloc(strmaxlen, GFP_KERNEL);
513 if (buf == NULL)
514 return -ENOMEM;
515
516 t = ns2usecs(mp->timestamp);
517 usecs = do_div(t, USEC_PER_SEC);
518 secs = (unsigned long) t;
519 r = snprintf(buf, strmaxlen,
520 "%d %d %ld (%ld) %s <- %d %d %s %lu.%06lu\n", mp->pid,
521 MAX_RT_PRIO-1 - mp->prio, mp->latency, mp->timeroffset, mp->comm,
522 mp->current_pid, MAX_RT_PRIO-1 - mp->current_prio, mp->current_comm,
523 secs, usecs);
524 r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
525 kfree(buf);
526 return r;
527}
528#endif
529
530static ssize_t
531show_enable(struct file *file, char __user *ubuf, size_t cnt, loff_t *ppos)
532{
533 char buf[64];
534 struct enable_data *ed = file->private_data;
535 int r;
536
537 r = snprintf(buf, sizeof(buf), "%d\n", ed->enabled);
538 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
539}
540
541static ssize_t
542do_enable(struct file *file, const char __user *ubuf, size_t cnt, loff_t *ppos)
543{
544 char buf[64];
545 long enable;
546 struct enable_data *ed = file->private_data;
547
548 if (cnt >= sizeof(buf))
549 return -EINVAL;
550
551 if (copy_from_user(&buf, ubuf, cnt))
552 return -EFAULT;
553
554 buf[cnt] = 0;
555
556 if (strict_strtol(buf, 10, &enable))
557 return(-EINVAL);
558
559 if ((enable && ed->enabled) || (!enable && !ed->enabled))
560 return cnt;
561
562 if (enable) {
563 int ret;
564
565 switch (ed->latency_type) {
566#if defined(CONFIG_INTERRUPT_OFF_HIST) || defined(CONFIG_PREEMPT_OFF_HIST)
567 case PREEMPTIRQSOFF_LATENCY:
568 ret = register_trace_preemptirqsoff_hist(
569 probe_preemptirqsoff_hist, NULL);
570 if (ret) {
571 pr_info("wakeup trace: Couldn't assign "
572 "probe_preemptirqsoff_hist "
573 "to trace_preemptirqsoff_hist\n");
574 return ret;
575 }
576 break;
577#endif
578#ifdef CONFIG_WAKEUP_LATENCY_HIST
579 case WAKEUP_LATENCY:
580 ret = register_trace_sched_wakeup(
581 probe_wakeup_latency_hist_start, NULL);
582 if (ret) {
583 pr_info("wakeup trace: Couldn't assign "
584 "probe_wakeup_latency_hist_start "
585 "to trace_sched_wakeup\n");
586 return ret;
587 }
588 ret = register_trace_sched_wakeup_new(
589 probe_wakeup_latency_hist_start, NULL);
590 if (ret) {
591 pr_info("wakeup trace: Couldn't assign "
592 "probe_wakeup_latency_hist_start "
593 "to trace_sched_wakeup_new\n");
594 unregister_trace_sched_wakeup(
595 probe_wakeup_latency_hist_start, NULL);
596 return ret;
597 }
598 ret = register_trace_sched_switch(
599 probe_wakeup_latency_hist_stop, NULL);
600 if (ret) {
601 pr_info("wakeup trace: Couldn't assign "
602 "probe_wakeup_latency_hist_stop "
603 "to trace_sched_switch\n");
604 unregister_trace_sched_wakeup(
605 probe_wakeup_latency_hist_start, NULL);
606 unregister_trace_sched_wakeup_new(
607 probe_wakeup_latency_hist_start, NULL);
608 return ret;
609 }
610 ret = register_trace_sched_migrate_task(
611 probe_sched_migrate_task, NULL);
612 if (ret) {
613 pr_info("wakeup trace: Couldn't assign "
614 "probe_sched_migrate_task "
615 "to trace_sched_migrate_task\n");
616 unregister_trace_sched_wakeup(
617 probe_wakeup_latency_hist_start, NULL);
618 unregister_trace_sched_wakeup_new(
619 probe_wakeup_latency_hist_start, NULL);
620 unregister_trace_sched_switch(
621 probe_wakeup_latency_hist_stop, NULL);
622 return ret;
623 }
624 break;
625#endif
626#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
627 case MISSED_TIMER_OFFSETS:
628 ret = register_trace_hrtimer_interrupt(
629 probe_hrtimer_interrupt, NULL);
630 if (ret) {
631 pr_info("wakeup trace: Couldn't assign "
632 "probe_hrtimer_interrupt "
633 "to trace_hrtimer_interrupt\n");
634 return ret;
635 }
636 break;
637#endif
638#if defined(CONFIG_WAKEUP_LATENCY_HIST) && \
639 defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
640 case TIMERANDWAKEUP_LATENCY:
641 if (!wakeup_latency_enabled_data.enabled ||
642 !missed_timer_offsets_enabled_data.enabled)
643 return -EINVAL;
644 break;
645#endif
646 default:
647 break;
648 }
649 } else {
650 switch (ed->latency_type) {
651#if defined(CONFIG_INTERRUPT_OFF_HIST) || defined(CONFIG_PREEMPT_OFF_HIST)
652 case PREEMPTIRQSOFF_LATENCY:
653 {
654 int cpu;
655
656 unregister_trace_preemptirqsoff_hist(
657 probe_preemptirqsoff_hist, NULL);
658 for_each_online_cpu(cpu) {
659#ifdef CONFIG_INTERRUPT_OFF_HIST
660 per_cpu(hist_irqsoff_counting,
661 cpu) = 0;
662#endif
663#ifdef CONFIG_PREEMPT_OFF_HIST
664 per_cpu(hist_preemptoff_counting,
665 cpu) = 0;
666#endif
667#if defined(CONFIG_INTERRUPT_OFF_HIST) && defined(CONFIG_PREEMPT_OFF_HIST)
668 per_cpu(hist_preemptirqsoff_counting,
669 cpu) = 0;
670#endif
671 }
672 }
673 break;
674#endif
675#ifdef CONFIG_WAKEUP_LATENCY_HIST
676 case WAKEUP_LATENCY:
677 {
678 int cpu;
679
680 unregister_trace_sched_wakeup(
681 probe_wakeup_latency_hist_start, NULL);
682 unregister_trace_sched_wakeup_new(
683 probe_wakeup_latency_hist_start, NULL);
684 unregister_trace_sched_switch(
685 probe_wakeup_latency_hist_stop, NULL);
686 unregister_trace_sched_migrate_task(
687 probe_sched_migrate_task, NULL);
688
689 for_each_online_cpu(cpu) {
690 per_cpu(wakeup_task, cpu) = NULL;
691 per_cpu(wakeup_sharedprio, cpu) = 0;
692 }
693 }
694#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
695 timerandwakeup_enabled_data.enabled = 0;
696#endif
697 break;
698#endif
699#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
700 case MISSED_TIMER_OFFSETS:
701 unregister_trace_hrtimer_interrupt(
702 probe_hrtimer_interrupt, NULL);
703#ifdef CONFIG_WAKEUP_LATENCY_HIST
704 timerandwakeup_enabled_data.enabled = 0;
705#endif
706 break;
707#endif
708 default:
709 break;
710 }
711 }
712 ed->enabled = enable;
713 return cnt;
714}
715
716static const struct file_operations latency_hist_reset_fops = {
717 .open = tracing_open_generic,
718 .write = latency_hist_reset,
719};
720
721static const struct file_operations enable_fops = {
722 .open = tracing_open_generic,
723 .read = show_enable,
724 .write = do_enable,
725};
726
727#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
728 defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
729static const struct file_operations pid_fops = {
730 .open = tracing_open_generic,
731 .read = show_pid,
732 .write = do_pid,
733};
734
735static const struct file_operations maxlatproc_fops = {
736 .open = tracing_open_generic,
737 .read = show_maxlatproc,
738};
739#endif
740
741#if defined(CONFIG_INTERRUPT_OFF_HIST) || defined(CONFIG_PREEMPT_OFF_HIST)
742static notrace void probe_preemptirqsoff_hist(void *v, int reason,
743 int starthist)
744{
745 int cpu = raw_smp_processor_id();
746 int time_set = 0;
747
748 if (starthist) {
749 cycle_t uninitialized_var(start);
750
751 if (!preempt_count() && !irqs_disabled())
752 return;
753
754#ifdef CONFIG_INTERRUPT_OFF_HIST
755 if ((reason == IRQS_OFF || reason == TRACE_START) &&
756 !per_cpu(hist_irqsoff_counting, cpu)) {
757 per_cpu(hist_irqsoff_counting, cpu) = 1;
758 start = ftrace_now(cpu);
759 time_set++;
760 per_cpu(hist_irqsoff_start, cpu) = start;
761 }
762#endif
763
764#ifdef CONFIG_PREEMPT_OFF_HIST
765 if ((reason == PREEMPT_OFF || reason == TRACE_START) &&
766 !per_cpu(hist_preemptoff_counting, cpu)) {
767 per_cpu(hist_preemptoff_counting, cpu) = 1;
768 if (!(time_set++))
769 start = ftrace_now(cpu);
770 per_cpu(hist_preemptoff_start, cpu) = start;
771 }
772#endif
773
774#if defined(CONFIG_INTERRUPT_OFF_HIST) && defined(CONFIG_PREEMPT_OFF_HIST)
775 if (per_cpu(hist_irqsoff_counting, cpu) &&
776 per_cpu(hist_preemptoff_counting, cpu) &&
777 !per_cpu(hist_preemptirqsoff_counting, cpu)) {
778 per_cpu(hist_preemptirqsoff_counting, cpu) = 1;
779 if (!time_set)
780 start = ftrace_now(cpu);
781 per_cpu(hist_preemptirqsoff_start, cpu) = start;
782 }
783#endif
784 } else {
785 cycle_t uninitialized_var(stop);
786
787#ifdef CONFIG_INTERRUPT_OFF_HIST
788 if ((reason == IRQS_ON || reason == TRACE_STOP) &&
789 per_cpu(hist_irqsoff_counting, cpu)) {
790 cycle_t start = per_cpu(hist_irqsoff_start, cpu);
791
792 stop = ftrace_now(cpu);
793 time_set++;
794 if (start) {
795 long latency = ((long) (stop - start)) /
796 NSECS_PER_USECS;
797
798 latency_hist(IRQSOFF_LATENCY, cpu, latency, 0,
799 stop, NULL);
800 }
801 per_cpu(hist_irqsoff_counting, cpu) = 0;
802 }
803#endif
804
805#ifdef CONFIG_PREEMPT_OFF_HIST
806 if ((reason == PREEMPT_ON || reason == TRACE_STOP) &&
807 per_cpu(hist_preemptoff_counting, cpu)) {
808 cycle_t start = per_cpu(hist_preemptoff_start, cpu);
809
810 if (!(time_set++))
811 stop = ftrace_now(cpu);
812 if (start) {
813 long latency = ((long) (stop - start)) /
814 NSECS_PER_USECS;
815
816 latency_hist(PREEMPTOFF_LATENCY, cpu, latency,
817 0, stop, NULL);
818 }
819 per_cpu(hist_preemptoff_counting, cpu) = 0;
820 }
821#endif
822
823#if defined(CONFIG_INTERRUPT_OFF_HIST) && defined(CONFIG_PREEMPT_OFF_HIST)
824 if ((!per_cpu(hist_irqsoff_counting, cpu) ||
825 !per_cpu(hist_preemptoff_counting, cpu)) &&
826 per_cpu(hist_preemptirqsoff_counting, cpu)) {
827 cycle_t start = per_cpu(hist_preemptirqsoff_start, cpu);
828
829 if (!time_set)
830 stop = ftrace_now(cpu);
831 if (start) {
832 long latency = ((long) (stop - start)) /
833 NSECS_PER_USECS;
834
835 latency_hist(PREEMPTIRQSOFF_LATENCY, cpu,
836 latency, 0, stop, NULL);
837 }
838 per_cpu(hist_preemptirqsoff_counting, cpu) = 0;
839 }
840#endif
841 }
842}
843#endif
844
845#ifdef CONFIG_WAKEUP_LATENCY_HIST
846static DEFINE_RAW_SPINLOCK(wakeup_lock);
847static notrace void probe_sched_migrate_task(void *v, struct task_struct *task,
848 int cpu)
849{
850 int old_cpu = task_cpu(task);
851
852 if (cpu != old_cpu) {
853 unsigned long flags;
854 struct task_struct *cpu_wakeup_task;
855
856 raw_spin_lock_irqsave(&wakeup_lock, flags);
857
858 cpu_wakeup_task = per_cpu(wakeup_task, old_cpu);
859 if (task == cpu_wakeup_task) {
860 put_task_struct(cpu_wakeup_task);
861 per_cpu(wakeup_task, old_cpu) = NULL;
862 cpu_wakeup_task = per_cpu(wakeup_task, cpu) = task;
863 get_task_struct(cpu_wakeup_task);
864 }
865
866 raw_spin_unlock_irqrestore(&wakeup_lock, flags);
867 }
868}
869
870static notrace void probe_wakeup_latency_hist_start(void *v,
871 struct task_struct *p)
872{
873 unsigned long flags;
874 struct task_struct *curr = current;
875 int cpu = task_cpu(p);
876 struct task_struct *cpu_wakeup_task;
877
878 raw_spin_lock_irqsave(&wakeup_lock, flags);
879
880 cpu_wakeup_task = per_cpu(wakeup_task, cpu);
881
882 if (wakeup_pid) {
883 if ((cpu_wakeup_task && p->prio == cpu_wakeup_task->prio) ||
884 p->prio == curr->prio)
885 per_cpu(wakeup_sharedprio, cpu) = 1;
886 if (likely(wakeup_pid != task_pid_nr(p)))
887 goto out;
888 } else {
889 if (likely(!rt_task(p)) ||
890 (cpu_wakeup_task && p->prio > cpu_wakeup_task->prio) ||
891 p->prio > curr->prio)
892 goto out;
893 if ((cpu_wakeup_task && p->prio == cpu_wakeup_task->prio) ||
894 p->prio == curr->prio)
895 per_cpu(wakeup_sharedprio, cpu) = 1;
896 }
897
898 if (cpu_wakeup_task)
899 put_task_struct(cpu_wakeup_task);
900 cpu_wakeup_task = per_cpu(wakeup_task, cpu) = p;
901 get_task_struct(cpu_wakeup_task);
902 cpu_wakeup_task->preempt_timestamp_hist =
903 ftrace_now(raw_smp_processor_id());
904out:
905 raw_spin_unlock_irqrestore(&wakeup_lock, flags);
906}
907
908static notrace void probe_wakeup_latency_hist_stop(void *v,
909 struct task_struct *prev, struct task_struct *next)
910{
911 unsigned long flags;
912 int cpu = task_cpu(next);
913 long latency;
914 cycle_t stop;
915 struct task_struct *cpu_wakeup_task;
916
917 raw_spin_lock_irqsave(&wakeup_lock, flags);
918
919 cpu_wakeup_task = per_cpu(wakeup_task, cpu);
920
921 if (cpu_wakeup_task == NULL)
922 goto out;
923
924 /* Already running? */
925 if (unlikely(current == cpu_wakeup_task))
926 goto out_reset;
927
928 if (next != cpu_wakeup_task) {
929 if (next->prio < cpu_wakeup_task->prio)
930 goto out_reset;
931
932 if (next->prio == cpu_wakeup_task->prio)
933 per_cpu(wakeup_sharedprio, cpu) = 1;
934
935 goto out;
936 }
937
938 if (current->prio == cpu_wakeup_task->prio)
939 per_cpu(wakeup_sharedprio, cpu) = 1;
940
941 /*
942 * The task we are waiting for is about to be switched to.
943 * Calculate latency and store it in histogram.
944 */
945 stop = ftrace_now(raw_smp_processor_id());
946
947 latency = ((long) (stop - next->preempt_timestamp_hist)) /
948 NSECS_PER_USECS;
949
950 if (per_cpu(wakeup_sharedprio, cpu)) {
951 latency_hist(WAKEUP_LATENCY_SHAREDPRIO, cpu, latency, 0, stop,
952 next);
953 per_cpu(wakeup_sharedprio, cpu) = 0;
954 } else {
955 latency_hist(WAKEUP_LATENCY, cpu, latency, 0, stop, next);
956#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
957 if (timerandwakeup_enabled_data.enabled) {
958 latency_hist(TIMERANDWAKEUP_LATENCY, cpu,
959 next->timer_offset + latency, next->timer_offset,
960 stop, next);
961 }
962#endif
963 }
964
965out_reset:
966#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
967 next->timer_offset = 0;
968#endif
969 put_task_struct(cpu_wakeup_task);
970 per_cpu(wakeup_task, cpu) = NULL;
971out:
972 raw_spin_unlock_irqrestore(&wakeup_lock, flags);
973}
974#endif
975
976#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
977static notrace void probe_hrtimer_interrupt(void *v, int cpu,
978 long long latency_ns, struct task_struct *curr, struct task_struct *task)
979{
980 if (latency_ns <= 0 && task != NULL && rt_task(task) &&
981 (task->prio < curr->prio ||
982 (task->prio == curr->prio &&
983 !cpumask_test_cpu(cpu, &task->cpus_allowed)))) {
984 long latency;
985 cycle_t now;
986
987 if (missed_timer_offsets_pid) {
988 if (likely(missed_timer_offsets_pid !=
989 task_pid_nr(task)))
990 return;
991 }
992
993 now = ftrace_now(cpu);
994 latency = (long) div_s64(-latency_ns, NSECS_PER_USECS);
995 latency_hist(MISSED_TIMER_OFFSETS, cpu, latency, latency, now,
996 task);
997#ifdef CONFIG_WAKEUP_LATENCY_HIST
998 task->timer_offset = latency;
999#endif
1000 }
1001}
1002#endif
1003
1004static __init int latency_hist_init(void)
1005{
1006 struct dentry *latency_hist_root = NULL;
1007 struct dentry *dentry;
1008#ifdef CONFIG_WAKEUP_LATENCY_HIST
1009 struct dentry *dentry_sharedprio;
1010#endif
1011 struct dentry *entry;
1012 struct dentry *enable_root;
1013 int i = 0;
1014 struct hist_data *my_hist;
1015 char name[64];
1016 char *cpufmt = "CPU%d";
1017#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
1018 defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
1019 char *cpufmt_maxlatproc = "max_latency-CPU%d";
1020 struct maxlatproc_data *mp = NULL;
1021#endif
1022
1023 dentry = tracing_init_dentry();
1024 latency_hist_root = debugfs_create_dir(latency_hist_dir_root, dentry);
1025 enable_root = debugfs_create_dir("enable", latency_hist_root);
1026
1027#ifdef CONFIG_INTERRUPT_OFF_HIST
1028 dentry = debugfs_create_dir(irqsoff_hist_dir, latency_hist_root);
1029 for_each_possible_cpu(i) {
1030 sprintf(name, cpufmt, i);
1031 entry = debugfs_create_file(name, 0444, dentry,
1032 &per_cpu(irqsoff_hist, i), &latency_hist_fops);
1033 my_hist = &per_cpu(irqsoff_hist, i);
1034 atomic_set(&my_hist->hist_mode, 1);
1035 my_hist->min_lat = LONG_MAX;
1036 }
1037 entry = debugfs_create_file("reset", 0644, dentry,
1038 (void *)IRQSOFF_LATENCY, &latency_hist_reset_fops);
1039#endif
1040
1041#ifdef CONFIG_PREEMPT_OFF_HIST
1042 dentry = debugfs_create_dir(preemptoff_hist_dir,
1043 latency_hist_root);
1044 for_each_possible_cpu(i) {
1045 sprintf(name, cpufmt, i);
1046 entry = debugfs_create_file(name, 0444, dentry,
1047 &per_cpu(preemptoff_hist, i), &latency_hist_fops);
1048 my_hist = &per_cpu(preemptoff_hist, i);
1049 atomic_set(&my_hist->hist_mode, 1);
1050 my_hist->min_lat = LONG_MAX;
1051 }
1052 entry = debugfs_create_file("reset", 0644, dentry,
1053 (void *)PREEMPTOFF_LATENCY, &latency_hist_reset_fops);
1054#endif
1055
1056#if defined(CONFIG_INTERRUPT_OFF_HIST) && defined(CONFIG_PREEMPT_OFF_HIST)
1057 dentry = debugfs_create_dir(preemptirqsoff_hist_dir,
1058 latency_hist_root);
1059 for_each_possible_cpu(i) {
1060 sprintf(name, cpufmt, i);
1061 entry = debugfs_create_file(name, 0444, dentry,
1062 &per_cpu(preemptirqsoff_hist, i), &latency_hist_fops);
1063 my_hist = &per_cpu(preemptirqsoff_hist, i);
1064 atomic_set(&my_hist->hist_mode, 1);
1065 my_hist->min_lat = LONG_MAX;
1066 }
1067 entry = debugfs_create_file("reset", 0644, dentry,
1068 (void *)PREEMPTIRQSOFF_LATENCY, &latency_hist_reset_fops);
1069#endif
1070
1071#if defined(CONFIG_INTERRUPT_OFF_HIST) || defined(CONFIG_PREEMPT_OFF_HIST)
1072 entry = debugfs_create_file("preemptirqsoff", 0644,
1073 enable_root, (void *)&preemptirqsoff_enabled_data,
1074 &enable_fops);
1075#endif
1076
1077#ifdef CONFIG_WAKEUP_LATENCY_HIST
1078 dentry = debugfs_create_dir(wakeup_latency_hist_dir,
1079 latency_hist_root);
1080 dentry_sharedprio = debugfs_create_dir(
1081 wakeup_latency_hist_dir_sharedprio, dentry);
1082 for_each_possible_cpu(i) {
1083 sprintf(name, cpufmt, i);
1084
1085 entry = debugfs_create_file(name, 0444, dentry,
1086 &per_cpu(wakeup_latency_hist, i),
1087 &latency_hist_fops);
1088 my_hist = &per_cpu(wakeup_latency_hist, i);
1089 atomic_set(&my_hist->hist_mode, 1);
1090 my_hist->min_lat = LONG_MAX;
1091
1092 entry = debugfs_create_file(name, 0444, dentry_sharedprio,
1093 &per_cpu(wakeup_latency_hist_sharedprio, i),
1094 &latency_hist_fops);
1095 my_hist = &per_cpu(wakeup_latency_hist_sharedprio, i);
1096 atomic_set(&my_hist->hist_mode, 1);
1097 my_hist->min_lat = LONG_MAX;
1098
1099 sprintf(name, cpufmt_maxlatproc, i);
1100
1101 mp = &per_cpu(wakeup_maxlatproc, i);
1102 entry = debugfs_create_file(name, 0444, dentry, mp,
1103 &maxlatproc_fops);
1104 clear_maxlatprocdata(mp);
1105
1106 mp = &per_cpu(wakeup_maxlatproc_sharedprio, i);
1107 entry = debugfs_create_file(name, 0444, dentry_sharedprio, mp,
1108 &maxlatproc_fops);
1109 clear_maxlatprocdata(mp);
1110 }
1111 entry = debugfs_create_file("pid", 0644, dentry,
1112 (void *)&wakeup_pid, &pid_fops);
1113 entry = debugfs_create_file("reset", 0644, dentry,
1114 (void *)WAKEUP_LATENCY, &latency_hist_reset_fops);
1115 entry = debugfs_create_file("reset", 0644, dentry_sharedprio,
1116 (void *)WAKEUP_LATENCY_SHAREDPRIO, &latency_hist_reset_fops);
1117 entry = debugfs_create_file("wakeup", 0644,
1118 enable_root, (void *)&wakeup_latency_enabled_data,
1119 &enable_fops);
1120#endif
1121
1122#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
1123 dentry = debugfs_create_dir(missed_timer_offsets_dir,
1124 latency_hist_root);
1125 for_each_possible_cpu(i) {
1126 sprintf(name, cpufmt, i);
1127 entry = debugfs_create_file(name, 0444, dentry,
1128 &per_cpu(missed_timer_offsets, i), &latency_hist_fops);
1129 my_hist = &per_cpu(missed_timer_offsets, i);
1130 atomic_set(&my_hist->hist_mode, 1);
1131 my_hist->min_lat = LONG_MAX;
1132
1133 sprintf(name, cpufmt_maxlatproc, i);
1134 mp = &per_cpu(missed_timer_offsets_maxlatproc, i);
1135 entry = debugfs_create_file(name, 0444, dentry, mp,
1136 &maxlatproc_fops);
1137 clear_maxlatprocdata(mp);
1138 }
1139 entry = debugfs_create_file("pid", 0644, dentry,
1140 (void *)&missed_timer_offsets_pid, &pid_fops);
1141 entry = debugfs_create_file("reset", 0644, dentry,
1142 (void *)MISSED_TIMER_OFFSETS, &latency_hist_reset_fops);
1143 entry = debugfs_create_file("missed_timer_offsets", 0644,
1144 enable_root, (void *)&missed_timer_offsets_enabled_data,
1145 &enable_fops);
1146#endif
1147
1148#if defined(CONFIG_WAKEUP_LATENCY_HIST) && \
1149 defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
1150 dentry = debugfs_create_dir(timerandwakeup_latency_hist_dir,
1151 latency_hist_root);
1152 for_each_possible_cpu(i) {
1153 sprintf(name, cpufmt, i);
1154 entry = debugfs_create_file(name, 0444, dentry,
1155 &per_cpu(timerandwakeup_latency_hist, i),
1156 &latency_hist_fops);
1157 my_hist = &per_cpu(timerandwakeup_latency_hist, i);
1158 atomic_set(&my_hist->hist_mode, 1);
1159 my_hist->min_lat = LONG_MAX;
1160
1161 sprintf(name, cpufmt_maxlatproc, i);
1162 mp = &per_cpu(timerandwakeup_maxlatproc, i);
1163 entry = debugfs_create_file(name, 0444, dentry, mp,
1164 &maxlatproc_fops);
1165 clear_maxlatprocdata(mp);
1166 }
1167 entry = debugfs_create_file("reset", 0644, dentry,
1168 (void *)TIMERANDWAKEUP_LATENCY, &latency_hist_reset_fops);
1169 entry = debugfs_create_file("timerandwakeup", 0644,
1170 enable_root, (void *)&timerandwakeup_enabled_data,
1171 &enable_fops);
1172#endif
1173 return 0;
1174}
1175
1176__initcall(latency_hist_init);