blob: ffea326b1bc54a78c839f50ca9ffd44aaee121a4 [file] [log] [blame]
lh9ed821d2023-04-07 01:36:19 -07001/*
2 * Infrastructure for profiling code inserted by 'gcc -pg'.
3 *
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally ported from the -rt patch by:
8 * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
9 *
10 * Based on code in the latency_tracer, that is:
11 *
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 William Lee Irwin III
14 */
15
16#include <linux/stop_machine.h>
17#include <linux/clocksource.h>
18#include <linux/kallsyms.h>
19#include <linux/seq_file.h>
20#include <linux/suspend.h>
21#include <linux/debugfs.h>
22#include <linux/hardirq.h>
23#include <linux/kthread.h>
24#include <linux/uaccess.h>
25#include <linux/bsearch.h>
26#include <linux/module.h>
27#include <linux/ftrace.h>
28#include <linux/sysctl.h>
29#include <linux/slab.h>
30#include <linux/ctype.h>
31#include <linux/sort.h>
32#include <linux/list.h>
33#include <linux/hash.h>
34#include <linux/rcupdate.h>
35
36#include <trace/events/sched.h>
37
38#include <asm/setup.h>
39
40#include "trace_output.h"
41#include "trace_stat.h"
42
43#define FTRACE_WARN_ON(cond) \
44 ({ \
45 int ___r = cond; \
46 if (WARN_ON(___r)) \
47 ftrace_kill(); \
48 ___r; \
49 })
50
51#define FTRACE_WARN_ON_ONCE(cond) \
52 ({ \
53 int ___r = cond; \
54 if (WARN_ON_ONCE(___r)) \
55 ftrace_kill(); \
56 ___r; \
57 })
58
59/* hash bits for specific function selection */
60#define FTRACE_HASH_BITS 7
61#define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS)
62#define FTRACE_HASH_DEFAULT_BITS 10
63#define FTRACE_HASH_MAX_BITS 12
64
65#define FL_GLOBAL_CONTROL_MASK (FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_CONTROL)
66
67/* ftrace_enabled is a method to turn ftrace on or off */
68int ftrace_enabled __read_mostly;
69static int last_ftrace_enabled;
70
71/* Quick disabling of function tracer. */
72int function_trace_stop;
73
74/* List for set_ftrace_pid's pids. */
75LIST_HEAD(ftrace_pids);
76struct ftrace_pid {
77 struct list_head list;
78 struct pid *pid;
79};
80
81/*
82 * ftrace_disabled is set when an anomaly is discovered.
83 * ftrace_disabled is much stronger than ftrace_enabled.
84 */
85static int ftrace_disabled __read_mostly;
86
87static DEFINE_MUTEX(ftrace_lock);
88
89static struct ftrace_ops ftrace_list_end __read_mostly = {
90 .func = ftrace_stub,
91};
92
93static struct ftrace_ops *ftrace_global_list __read_mostly = &ftrace_list_end;
94static struct ftrace_ops *ftrace_control_list __read_mostly = &ftrace_list_end;
95static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end;
96ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
97static ftrace_func_t __ftrace_trace_function_delay __read_mostly = ftrace_stub;
98ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub;
99ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
100static struct ftrace_ops global_ops;
101static struct ftrace_ops control_ops;
102
103static void
104ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip);
105
106/*
107 * Traverse the ftrace_global_list, invoking all entries. The reason that we
108 * can use rcu_dereference_raw() is that elements removed from this list
109 * are simply leaked, so there is no need to interact with a grace-period
110 * mechanism. The rcu_dereference_raw() calls are needed to handle
111 * concurrent insertions into the ftrace_global_list.
112 *
113 * Silly Alpha and silly pointer-speculation compiler optimizations!
114 */
115static void ftrace_global_list_func(unsigned long ip,
116 unsigned long parent_ip)
117{
118 struct ftrace_ops *op;
119
120 if (unlikely(trace_recursion_test(TRACE_GLOBAL_BIT)))
121 return;
122
123 trace_recursion_set(TRACE_GLOBAL_BIT);
124 op = rcu_dereference_raw(ftrace_global_list); /*see above*/
125 while (op != &ftrace_list_end) {
126 op->func(ip, parent_ip);
127 op = rcu_dereference_raw(op->next); /*see above*/
128 };
129 trace_recursion_clear(TRACE_GLOBAL_BIT);
130}
131
132static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip)
133{
134 if (!test_tsk_trace_trace(current))
135 return;
136
137 ftrace_pid_function(ip, parent_ip);
138}
139
140static void set_ftrace_pid_function(ftrace_func_t func)
141{
142 /* do not set ftrace_pid_function to itself! */
143 if (func != ftrace_pid_func)
144 ftrace_pid_function = func;
145}
146
147/**
148 * clear_ftrace_function - reset the ftrace function
149 *
150 * This NULLs the ftrace function and in essence stops
151 * tracing. There may be lag
152 */
153void clear_ftrace_function(void)
154{
155 ftrace_trace_function = ftrace_stub;
156 __ftrace_trace_function = ftrace_stub;
157 __ftrace_trace_function_delay = ftrace_stub;
158 ftrace_pid_function = ftrace_stub;
159}
160
161#ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
162/*
163 * For those archs that do not test ftrace_trace_stop in their
164 * mcount call site, we need to do it from C.
165 */
166static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip)
167{
168 if (function_trace_stop)
169 return;
170
171 __ftrace_trace_function(ip, parent_ip);
172}
173#endif
174
175static void control_ops_disable_all(struct ftrace_ops *ops)
176{
177 int cpu;
178
179 for_each_possible_cpu(cpu)
180 *per_cpu_ptr(ops->disabled, cpu) = 1;
181}
182
183static int control_ops_alloc(struct ftrace_ops *ops)
184{
185 int __percpu *disabled;
186
187 disabled = alloc_percpu(int);
188 if (!disabled)
189 return -ENOMEM;
190
191 ops->disabled = disabled;
192 control_ops_disable_all(ops);
193 return 0;
194}
195
196static void control_ops_free(struct ftrace_ops *ops)
197{
198 free_percpu(ops->disabled);
199}
200
201static void update_global_ops(void)
202{
203 ftrace_func_t func;
204
205 /*
206 * If there's only one function registered, then call that
207 * function directly. Otherwise, we need to iterate over the
208 * registered callers.
209 */
210 if (ftrace_global_list == &ftrace_list_end ||
211 ftrace_global_list->next == &ftrace_list_end)
212 func = ftrace_global_list->func;
213 else
214 func = ftrace_global_list_func;
215
216 /* If we filter on pids, update to use the pid function */
217 if (!list_empty(&ftrace_pids)) {
218 set_ftrace_pid_function(func);
219 func = ftrace_pid_func;
220 }
221
222 global_ops.func = func;
223}
224
225static void ftrace_sync(struct work_struct *work)
226{
227 /*
228 * This function is just a stub to implement a hard force
229 * of synchronize_sched(). This requires synchronizing
230 * tasks even in userspace and idle.
231 *
232 * Yes, function tracing is rude.
233 */
234}
235
236static void ftrace_sync_ipi(void *data)
237{
238 /* Probably not needed, but do it anyway */
239 smp_rmb();
240}
241
242#ifdef CONFIG_FUNCTION_GRAPH_TRACER
243static void update_function_graph_func(void);
244#else
245static inline void update_function_graph_func(void) { }
246#endif
247
248static void update_ftrace_function(void)
249{
250 ftrace_func_t func;
251
252 update_global_ops();
253
254 /*
255 * If we are at the end of the list and this ops is
256 * not dynamic, then have the mcount trampoline call
257 * the function directly
258 */
259 if (ftrace_ops_list == &ftrace_list_end ||
260 (ftrace_ops_list->next == &ftrace_list_end &&
261 !(ftrace_ops_list->flags & FTRACE_OPS_FL_DYNAMIC)))
262 func = ftrace_ops_list->func;
263 else
264 func = ftrace_ops_list_func;
265
266 update_function_graph_func();
267
268#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
269 ftrace_trace_function = func;
270#else
271#ifdef CONFIG_DYNAMIC_FTRACE
272 /* do not update till all functions have been modified */
273 __ftrace_trace_function_delay = func;
274#else
275 __ftrace_trace_function = func;
276#endif
277 ftrace_trace_function =
278 (func == ftrace_stub) ? func : ftrace_test_stop_func;
279#endif
280}
281
282static void add_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
283{
284 ops->next = *list;
285 /*
286 * We are entering ops into the list but another
287 * CPU might be walking that list. We need to make sure
288 * the ops->next pointer is valid before another CPU sees
289 * the ops pointer included into the list.
290 */
291 rcu_assign_pointer(*list, ops);
292}
293
294static int remove_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
295{
296 struct ftrace_ops **p;
297
298 /*
299 * If we are removing the last function, then simply point
300 * to the ftrace_stub.
301 */
302 if (*list == ops && ops->next == &ftrace_list_end) {
303 *list = &ftrace_list_end;
304 return 0;
305 }
306
307 for (p = list; *p != &ftrace_list_end; p = &(*p)->next)
308 if (*p == ops)
309 break;
310
311 if (*p != ops)
312 return -1;
313
314 *p = (*p)->next;
315 return 0;
316}
317
318static void add_ftrace_list_ops(struct ftrace_ops **list,
319 struct ftrace_ops *main_ops,
320 struct ftrace_ops *ops)
321{
322 int first = *list == &ftrace_list_end;
323 add_ftrace_ops(list, ops);
324 if (first)
325 add_ftrace_ops(&ftrace_ops_list, main_ops);
326}
327
328static int remove_ftrace_list_ops(struct ftrace_ops **list,
329 struct ftrace_ops *main_ops,
330 struct ftrace_ops *ops)
331{
332 int ret = remove_ftrace_ops(list, ops);
333 if (!ret && *list == &ftrace_list_end)
334 ret = remove_ftrace_ops(&ftrace_ops_list, main_ops);
335 return ret;
336}
337
338static int __register_ftrace_function(struct ftrace_ops *ops)
339{
340 if (FTRACE_WARN_ON(ops == &global_ops))
341 return -EINVAL;
342
343 if (WARN_ON(ops->flags & FTRACE_OPS_FL_ENABLED))
344 return -EBUSY;
345
346 /* We don't support both control and global flags set. */
347 if ((ops->flags & FL_GLOBAL_CONTROL_MASK) == FL_GLOBAL_CONTROL_MASK)
348 return -EINVAL;
349
350 if (!core_kernel_data((unsigned long)ops))
351 ops->flags |= FTRACE_OPS_FL_DYNAMIC;
352
353 if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
354 add_ftrace_list_ops(&ftrace_global_list, &global_ops, ops);
355 ops->flags |= FTRACE_OPS_FL_ENABLED;
356 } else if (ops->flags & FTRACE_OPS_FL_CONTROL) {
357 if (control_ops_alloc(ops))
358 return -ENOMEM;
359 add_ftrace_list_ops(&ftrace_control_list, &control_ops, ops);
360 } else
361 add_ftrace_ops(&ftrace_ops_list, ops);
362
363 if (ftrace_enabled)
364 update_ftrace_function();
365
366 return 0;
367}
368
369static int __unregister_ftrace_function(struct ftrace_ops *ops)
370{
371 int ret;
372
373 if (WARN_ON(!(ops->flags & FTRACE_OPS_FL_ENABLED)))
374 return -EBUSY;
375
376 if (FTRACE_WARN_ON(ops == &global_ops))
377 return -EINVAL;
378
379 if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
380 ret = remove_ftrace_list_ops(&ftrace_global_list,
381 &global_ops, ops);
382 if (!ret)
383 ops->flags &= ~FTRACE_OPS_FL_ENABLED;
384 } else if (ops->flags & FTRACE_OPS_FL_CONTROL) {
385 ret = remove_ftrace_list_ops(&ftrace_control_list,
386 &control_ops, ops);
387 } else
388 ret = remove_ftrace_ops(&ftrace_ops_list, ops);
389
390 if (ret < 0)
391 return ret;
392
393 if (ftrace_enabled)
394 update_ftrace_function();
395
396 return 0;
397}
398
399static void ftrace_update_pid_func(void)
400{
401 /* Only do something if we are tracing something */
402 if (ftrace_trace_function == ftrace_stub)
403 return;
404
405 update_ftrace_function();
406}
407
408#ifdef CONFIG_FUNCTION_PROFILER
409struct ftrace_profile {
410 struct hlist_node node;
411 unsigned long ip;
412 unsigned long counter;
413#ifdef CONFIG_FUNCTION_GRAPH_TRACER
414 unsigned long long time;
415 unsigned long long time_squared;
416#endif
417};
418
419struct ftrace_profile_page {
420 struct ftrace_profile_page *next;
421 unsigned long index;
422 struct ftrace_profile records[];
423};
424
425struct ftrace_profile_stat {
426 atomic_t disabled;
427 struct hlist_head *hash;
428 struct ftrace_profile_page *pages;
429 struct ftrace_profile_page *start;
430 struct tracer_stat stat;
431};
432
433#define PROFILE_RECORDS_SIZE \
434 (PAGE_SIZE - offsetof(struct ftrace_profile_page, records))
435
436#define PROFILES_PER_PAGE \
437 (PROFILE_RECORDS_SIZE / sizeof(struct ftrace_profile))
438
439static int ftrace_profile_bits __read_mostly;
440static int ftrace_profile_enabled __read_mostly;
441
442/* ftrace_profile_lock - synchronize the enable and disable of the profiler */
443static DEFINE_MUTEX(ftrace_profile_lock);
444
445static DEFINE_PER_CPU(struct ftrace_profile_stat, ftrace_profile_stats);
446
447#define FTRACE_PROFILE_HASH_SIZE 1024 /* must be power of 2 */
448
449static void *
450function_stat_next(void *v, int idx)
451{
452 struct ftrace_profile *rec = v;
453 struct ftrace_profile_page *pg;
454
455 pg = (struct ftrace_profile_page *)((unsigned long)rec & PAGE_MASK);
456
457 again:
458 if (idx != 0)
459 rec++;
460
461 if ((void *)rec >= (void *)&pg->records[pg->index]) {
462 pg = pg->next;
463 if (!pg)
464 return NULL;
465 rec = &pg->records[0];
466 if (!rec->counter)
467 goto again;
468 }
469
470 return rec;
471}
472
473static void *function_stat_start(struct tracer_stat *trace)
474{
475 struct ftrace_profile_stat *stat =
476 container_of(trace, struct ftrace_profile_stat, stat);
477
478 if (!stat || !stat->start)
479 return NULL;
480
481 return function_stat_next(&stat->start->records[0], 0);
482}
483
484#ifdef CONFIG_FUNCTION_GRAPH_TRACER
485/* function graph compares on total time */
486static int function_stat_cmp(void *p1, void *p2)
487{
488 struct ftrace_profile *a = p1;
489 struct ftrace_profile *b = p2;
490
491 if (a->time < b->time)
492 return -1;
493 if (a->time > b->time)
494 return 1;
495 else
496 return 0;
497}
498#else
499/* not function graph compares against hits */
500static int function_stat_cmp(void *p1, void *p2)
501{
502 struct ftrace_profile *a = p1;
503 struct ftrace_profile *b = p2;
504
505 if (a->counter < b->counter)
506 return -1;
507 if (a->counter > b->counter)
508 return 1;
509 else
510 return 0;
511}
512#endif
513
514static int function_stat_headers(struct seq_file *m)
515{
516#ifdef CONFIG_FUNCTION_GRAPH_TRACER
517 seq_printf(m, " Function "
518 "Hit Time Avg s^2\n"
519 " -------- "
520 "--- ---- --- ---\n");
521#else
522 seq_printf(m, " Function Hit\n"
523 " -------- ---\n");
524#endif
525 return 0;
526}
527
528static int function_stat_show(struct seq_file *m, void *v)
529{
530 struct ftrace_profile *rec = v;
531 char str[KSYM_SYMBOL_LEN];
532 int ret = 0;
533#ifdef CONFIG_FUNCTION_GRAPH_TRACER
534 static struct trace_seq s;
535 unsigned long long avg;
536 unsigned long long stddev;
537#endif
538 mutex_lock(&ftrace_profile_lock);
539
540 /* we raced with function_profile_reset() */
541 if (unlikely(rec->counter == 0)) {
542 ret = -EBUSY;
543 goto out;
544 }
545
546 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
547 seq_printf(m, " %-30.30s %10lu", str, rec->counter);
548
549#ifdef CONFIG_FUNCTION_GRAPH_TRACER
550 seq_printf(m, " ");
551 avg = rec->time;
552 do_div(avg, rec->counter);
553
554 /* Sample standard deviation (s^2) */
555 if (rec->counter <= 1)
556 stddev = 0;
557 else {
558 stddev = rec->time_squared - rec->counter * avg * avg;
559 /*
560 * Divide only 1000 for ns^2 -> us^2 conversion.
561 * trace_print_graph_duration will divide 1000 again.
562 */
563 do_div(stddev, (rec->counter - 1) * 1000);
564 }
565
566 trace_seq_init(&s);
567 trace_print_graph_duration(rec->time, &s);
568 trace_seq_puts(&s, " ");
569 trace_print_graph_duration(avg, &s);
570 trace_seq_puts(&s, " ");
571 trace_print_graph_duration(stddev, &s);
572 trace_print_seq(m, &s);
573#endif
574 seq_putc(m, '\n');
575out:
576 mutex_unlock(&ftrace_profile_lock);
577
578 return ret;
579}
580
581static void ftrace_profile_reset(struct ftrace_profile_stat *stat)
582{
583 struct ftrace_profile_page *pg;
584
585 pg = stat->pages = stat->start;
586
587 while (pg) {
588 memset(pg->records, 0, PROFILE_RECORDS_SIZE);
589 pg->index = 0;
590 pg = pg->next;
591 }
592
593 memset(stat->hash, 0,
594 FTRACE_PROFILE_HASH_SIZE * sizeof(struct hlist_head));
595}
596
597int ftrace_profile_pages_init(struct ftrace_profile_stat *stat)
598{
599 struct ftrace_profile_page *pg;
600 int functions;
601 int pages;
602 int i;
603
604 /* If we already allocated, do nothing */
605 if (stat->pages)
606 return 0;
607
608 stat->pages = (void *)get_zeroed_page(GFP_KERNEL);
609 if (!stat->pages)
610 return -ENOMEM;
611
612#ifdef CONFIG_DYNAMIC_FTRACE
613 functions = ftrace_update_tot_cnt;
614#else
615 /*
616 * We do not know the number of functions that exist because
617 * dynamic tracing is what counts them. With past experience
618 * we have around 20K functions. That should be more than enough.
619 * It is highly unlikely we will execute every function in
620 * the kernel.
621 */
622 functions = 20000;
623#endif
624
625 pg = stat->start = stat->pages;
626
627 pages = DIV_ROUND_UP(functions, PROFILES_PER_PAGE);
628
629 for (i = 1; i < pages; i++) {
630 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
631 if (!pg->next)
632 goto out_free;
633 pg = pg->next;
634 }
635
636 return 0;
637
638 out_free:
639 pg = stat->start;
640 while (pg) {
641 unsigned long tmp = (unsigned long)pg;
642
643 pg = pg->next;
644 free_page(tmp);
645 }
646
647 stat->pages = NULL;
648 stat->start = NULL;
649
650 return -ENOMEM;
651}
652
653static int ftrace_profile_init_cpu(int cpu)
654{
655 struct ftrace_profile_stat *stat;
656 int size;
657
658 stat = &per_cpu(ftrace_profile_stats, cpu);
659
660 if (stat->hash) {
661 /* If the profile is already created, simply reset it */
662 ftrace_profile_reset(stat);
663 return 0;
664 }
665
666 /*
667 * We are profiling all functions, but usually only a few thousand
668 * functions are hit. We'll make a hash of 1024 items.
669 */
670 size = FTRACE_PROFILE_HASH_SIZE;
671
672 stat->hash = kzalloc(sizeof(struct hlist_head) * size, GFP_KERNEL);
673
674 if (!stat->hash)
675 return -ENOMEM;
676
677 if (!ftrace_profile_bits) {
678 size--;
679
680 for (; size; size >>= 1)
681 ftrace_profile_bits++;
682 }
683
684 /* Preallocate the function profiling pages */
685 if (ftrace_profile_pages_init(stat) < 0) {
686 kfree(stat->hash);
687 stat->hash = NULL;
688 return -ENOMEM;
689 }
690
691 return 0;
692}
693
694static int ftrace_profile_init(void)
695{
696 int cpu;
697 int ret = 0;
698
699 for_each_possible_cpu(cpu) {
700 ret = ftrace_profile_init_cpu(cpu);
701 if (ret)
702 break;
703 }
704
705 return ret;
706}
707
708/* interrupts must be disabled */
709static struct ftrace_profile *
710ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip)
711{
712 struct ftrace_profile *rec;
713 struct hlist_head *hhd;
714 struct hlist_node *n;
715 unsigned long key;
716
717 key = hash_long(ip, ftrace_profile_bits);
718 hhd = &stat->hash[key];
719
720 if (hlist_empty(hhd))
721 return NULL;
722
723 hlist_for_each_entry_rcu(rec, n, hhd, node) {
724 if (rec->ip == ip)
725 return rec;
726 }
727
728 return NULL;
729}
730
731static void ftrace_add_profile(struct ftrace_profile_stat *stat,
732 struct ftrace_profile *rec)
733{
734 unsigned long key;
735
736 key = hash_long(rec->ip, ftrace_profile_bits);
737 hlist_add_head_rcu(&rec->node, &stat->hash[key]);
738}
739
740/*
741 * The memory is already allocated, this simply finds a new record to use.
742 */
743static struct ftrace_profile *
744ftrace_profile_alloc(struct ftrace_profile_stat *stat, unsigned long ip)
745{
746 struct ftrace_profile *rec = NULL;
747
748 /* prevent recursion (from NMIs) */
749 if (atomic_inc_return(&stat->disabled) != 1)
750 goto out;
751
752 /*
753 * Try to find the function again since an NMI
754 * could have added it
755 */
756 rec = ftrace_find_profiled_func(stat, ip);
757 if (rec)
758 goto out;
759
760 if (stat->pages->index == PROFILES_PER_PAGE) {
761 if (!stat->pages->next)
762 goto out;
763 stat->pages = stat->pages->next;
764 }
765
766 rec = &stat->pages->records[stat->pages->index++];
767 rec->ip = ip;
768 ftrace_add_profile(stat, rec);
769
770 out:
771 atomic_dec(&stat->disabled);
772
773 return rec;
774}
775
776static void
777function_profile_call(unsigned long ip, unsigned long parent_ip)
778{
779 struct ftrace_profile_stat *stat;
780 struct ftrace_profile *rec;
781 unsigned long flags;
782
783 if (!ftrace_profile_enabled)
784 return;
785
786 local_irq_save(flags);
787
788 stat = &__get_cpu_var(ftrace_profile_stats);
789 if (!stat->hash || !ftrace_profile_enabled)
790 goto out;
791
792 rec = ftrace_find_profiled_func(stat, ip);
793 if (!rec) {
794 rec = ftrace_profile_alloc(stat, ip);
795 if (!rec)
796 goto out;
797 }
798
799 rec->counter++;
800 out:
801 local_irq_restore(flags);
802}
803
804#ifdef CONFIG_FUNCTION_GRAPH_TRACER
805static int profile_graph_entry(struct ftrace_graph_ent *trace)
806{
807 function_profile_call(trace->func, 0);
808 return 1;
809}
810
811static void profile_graph_return(struct ftrace_graph_ret *trace)
812{
813 struct ftrace_profile_stat *stat;
814 unsigned long long calltime;
815 struct ftrace_profile *rec;
816 unsigned long flags;
817
818 local_irq_save(flags);
819 stat = &__get_cpu_var(ftrace_profile_stats);
820 if (!stat->hash || !ftrace_profile_enabled)
821 goto out;
822
823 /* If the calltime was zero'd ignore it */
824 if (!trace->calltime)
825 goto out;
826
827 calltime = trace->rettime - trace->calltime;
828
829 if (!(trace_flags & TRACE_ITER_GRAPH_TIME)) {
830 int index;
831
832 index = trace->depth;
833
834 /* Append this call time to the parent time to subtract */
835 if (index)
836 current->ret_stack[index - 1].subtime += calltime;
837
838 if (current->ret_stack[index].subtime < calltime)
839 calltime -= current->ret_stack[index].subtime;
840 else
841 calltime = 0;
842 }
843
844 rec = ftrace_find_profiled_func(stat, trace->func);
845 if (rec) {
846 rec->time += calltime;
847 rec->time_squared += calltime * calltime;
848 }
849
850 out:
851 local_irq_restore(flags);
852}
853
854static int register_ftrace_profiler(void)
855{
856 return register_ftrace_graph(&profile_graph_return,
857 &profile_graph_entry);
858}
859
860static void unregister_ftrace_profiler(void)
861{
862 unregister_ftrace_graph();
863}
864#else
865static struct ftrace_ops ftrace_profile_ops __read_mostly = {
866 .func = function_profile_call,
867};
868
869static int register_ftrace_profiler(void)
870{
871 return register_ftrace_function(&ftrace_profile_ops);
872}
873
874static void unregister_ftrace_profiler(void)
875{
876 unregister_ftrace_function(&ftrace_profile_ops);
877}
878#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
879
880static ssize_t
881ftrace_profile_write(struct file *filp, const char __user *ubuf,
882 size_t cnt, loff_t *ppos)
883{
884 unsigned long val;
885 int ret;
886
887 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
888 if (ret)
889 return ret;
890
891 val = !!val;
892
893 mutex_lock(&ftrace_profile_lock);
894 if (ftrace_profile_enabled ^ val) {
895 if (val) {
896 ret = ftrace_profile_init();
897 if (ret < 0) {
898 cnt = ret;
899 goto out;
900 }
901
902 ret = register_ftrace_profiler();
903 if (ret < 0) {
904 cnt = ret;
905 goto out;
906 }
907 ftrace_profile_enabled = 1;
908 } else {
909 ftrace_profile_enabled = 0;
910 /*
911 * unregister_ftrace_profiler calls stop_machine
912 * so this acts like an synchronize_sched.
913 */
914 unregister_ftrace_profiler();
915 }
916 }
917 out:
918 mutex_unlock(&ftrace_profile_lock);
919
920 *ppos += cnt;
921
922 return cnt;
923}
924
925static ssize_t
926ftrace_profile_read(struct file *filp, char __user *ubuf,
927 size_t cnt, loff_t *ppos)
928{
929 char buf[64]; /* big enough to hold a number */
930 int r;
931
932 r = sprintf(buf, "%u\n", ftrace_profile_enabled);
933 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
934}
935
936static const struct file_operations ftrace_profile_fops = {
937 .open = tracing_open_generic,
938 .read = ftrace_profile_read,
939 .write = ftrace_profile_write,
940 .llseek = default_llseek,
941};
942
943/* used to initialize the real stat files */
944static struct tracer_stat function_stats __initdata = {
945 .name = "functions",
946 .stat_start = function_stat_start,
947 .stat_next = function_stat_next,
948 .stat_cmp = function_stat_cmp,
949 .stat_headers = function_stat_headers,
950 .stat_show = function_stat_show
951};
952
953static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
954{
955 struct ftrace_profile_stat *stat;
956 struct dentry *entry;
957 char *name;
958 int ret;
959 int cpu;
960
961 for_each_possible_cpu(cpu) {
962 stat = &per_cpu(ftrace_profile_stats, cpu);
963
964 /* allocate enough for function name + cpu number */
965 name = kmalloc(32, GFP_KERNEL);
966 if (!name) {
967 /*
968 * The files created are permanent, if something happens
969 * we still do not free memory.
970 */
971 WARN(1,
972 "Could not allocate stat file for cpu %d\n",
973 cpu);
974 return;
975 }
976 stat->stat = function_stats;
977 snprintf(name, 32, "function%d", cpu);
978 stat->stat.name = name;
979 ret = register_stat_tracer(&stat->stat);
980 if (ret) {
981 WARN(1,
982 "Could not register function stat for cpu %d\n",
983 cpu);
984 kfree(name);
985 return;
986 }
987 }
988
989 entry = debugfs_create_file("function_profile_enabled", 0644,
990 d_tracer, NULL, &ftrace_profile_fops);
991 if (!entry)
992 pr_warning("Could not create debugfs "
993 "'function_profile_enabled' entry\n");
994}
995
996#else /* CONFIG_FUNCTION_PROFILER */
997static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
998{
999}
1000#endif /* CONFIG_FUNCTION_PROFILER */
1001
1002static struct pid * const ftrace_swapper_pid = &init_struct_pid;
1003
1004loff_t
1005ftrace_filter_lseek(struct file *file, loff_t offset, int whence)
1006{
1007 loff_t ret;
1008
1009 if (file->f_mode & FMODE_READ)
1010 ret = seq_lseek(file, offset, whence);
1011 else
1012 file->f_pos = ret = 1;
1013
1014 return ret;
1015}
1016
1017#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1018static int ftrace_graph_active;
1019#else
1020# define ftrace_graph_active 0
1021#endif
1022
1023#ifdef CONFIG_DYNAMIC_FTRACE
1024
1025#ifndef CONFIG_FTRACE_MCOUNT_RECORD
1026# error Dynamic ftrace depends on MCOUNT_RECORD
1027#endif
1028
1029static struct hlist_head ftrace_func_hash[FTRACE_FUNC_HASHSIZE] __read_mostly;
1030
1031struct ftrace_func_probe {
1032 struct hlist_node node;
1033 struct ftrace_probe_ops *ops;
1034 unsigned long flags;
1035 unsigned long ip;
1036 void *data;
1037 struct rcu_head rcu;
1038};
1039
1040struct ftrace_func_entry {
1041 struct hlist_node hlist;
1042 unsigned long ip;
1043};
1044
1045struct ftrace_hash {
1046 unsigned long size_bits;
1047 struct hlist_head *buckets;
1048 unsigned long count;
1049 struct rcu_head rcu;
1050};
1051
1052/*
1053 * We make these constant because no one should touch them,
1054 * but they are used as the default "empty hash", to avoid allocating
1055 * it all the time. These are in a read only section such that if
1056 * anyone does try to modify it, it will cause an exception.
1057 */
1058static const struct hlist_head empty_buckets[1];
1059static const struct ftrace_hash empty_hash = {
1060 .buckets = (struct hlist_head *)empty_buckets,
1061};
1062#define EMPTY_HASH ((struct ftrace_hash *)&empty_hash)
1063
1064static struct ftrace_ops global_ops = {
1065 .func = ftrace_stub,
1066 .notrace_hash = EMPTY_HASH,
1067 .filter_hash = EMPTY_HASH,
1068};
1069
1070static DEFINE_MUTEX(ftrace_regex_lock);
1071
1072struct ftrace_page {
1073 struct ftrace_page *next;
1074 struct dyn_ftrace *records;
1075 int index;
1076 int size;
1077};
1078
1079static struct ftrace_page *ftrace_new_pgs;
1080
1081#define ENTRY_SIZE sizeof(struct dyn_ftrace)
1082#define ENTRIES_PER_PAGE (PAGE_SIZE / ENTRY_SIZE)
1083
1084/* estimate from running different kernels */
1085#define NR_TO_INIT 10000
1086
1087static struct ftrace_page *ftrace_pages_start;
1088static struct ftrace_page *ftrace_pages;
1089
1090static bool ftrace_hash_empty(struct ftrace_hash *hash)
1091{
1092 return !hash || !hash->count;
1093}
1094
1095static struct ftrace_func_entry *
1096ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
1097{
1098 unsigned long key;
1099 struct ftrace_func_entry *entry;
1100 struct hlist_head *hhd;
1101 struct hlist_node *n;
1102
1103 if (ftrace_hash_empty(hash))
1104 return NULL;
1105
1106 if (hash->size_bits > 0)
1107 key = hash_long(ip, hash->size_bits);
1108 else
1109 key = 0;
1110
1111 hhd = &hash->buckets[key];
1112
1113 hlist_for_each_entry_rcu(entry, n, hhd, hlist) {
1114 if (entry->ip == ip)
1115 return entry;
1116 }
1117 return NULL;
1118}
1119
1120static void __add_hash_entry(struct ftrace_hash *hash,
1121 struct ftrace_func_entry *entry)
1122{
1123 struct hlist_head *hhd;
1124 unsigned long key;
1125
1126 if (hash->size_bits)
1127 key = hash_long(entry->ip, hash->size_bits);
1128 else
1129 key = 0;
1130
1131 hhd = &hash->buckets[key];
1132 hlist_add_head(&entry->hlist, hhd);
1133 hash->count++;
1134}
1135
1136static int add_hash_entry(struct ftrace_hash *hash, unsigned long ip)
1137{
1138 struct ftrace_func_entry *entry;
1139
1140 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1141 if (!entry)
1142 return -ENOMEM;
1143
1144 entry->ip = ip;
1145 __add_hash_entry(hash, entry);
1146
1147 return 0;
1148}
1149
1150static void
1151free_hash_entry(struct ftrace_hash *hash,
1152 struct ftrace_func_entry *entry)
1153{
1154 hlist_del(&entry->hlist);
1155 kfree(entry);
1156 hash->count--;
1157}
1158
1159static void
1160remove_hash_entry(struct ftrace_hash *hash,
1161 struct ftrace_func_entry *entry)
1162{
1163 hlist_del(&entry->hlist);
1164 hash->count--;
1165}
1166
1167static void ftrace_hash_clear(struct ftrace_hash *hash)
1168{
1169 struct hlist_head *hhd;
1170 struct hlist_node *tp, *tn;
1171 struct ftrace_func_entry *entry;
1172 int size = 1 << hash->size_bits;
1173 int i;
1174
1175 if (!hash->count)
1176 return;
1177
1178 for (i = 0; i < size; i++) {
1179 hhd = &hash->buckets[i];
1180 hlist_for_each_entry_safe(entry, tp, tn, hhd, hlist)
1181 free_hash_entry(hash, entry);
1182 }
1183 FTRACE_WARN_ON(hash->count);
1184}
1185
1186static void free_ftrace_hash(struct ftrace_hash *hash)
1187{
1188 if (!hash || hash == EMPTY_HASH)
1189 return;
1190 ftrace_hash_clear(hash);
1191 kfree(hash->buckets);
1192 kfree(hash);
1193}
1194
1195static void __free_ftrace_hash_rcu(struct rcu_head *rcu)
1196{
1197 struct ftrace_hash *hash;
1198
1199 hash = container_of(rcu, struct ftrace_hash, rcu);
1200 free_ftrace_hash(hash);
1201}
1202
1203static void free_ftrace_hash_rcu(struct ftrace_hash *hash)
1204{
1205 if (!hash || hash == EMPTY_HASH)
1206 return;
1207 call_rcu_sched(&hash->rcu, __free_ftrace_hash_rcu);
1208}
1209
1210void ftrace_free_filter(struct ftrace_ops *ops)
1211{
1212 free_ftrace_hash(ops->filter_hash);
1213 free_ftrace_hash(ops->notrace_hash);
1214}
1215
1216static struct ftrace_hash *alloc_ftrace_hash(int size_bits)
1217{
1218 struct ftrace_hash *hash;
1219 int size;
1220
1221 hash = kzalloc(sizeof(*hash), GFP_KERNEL);
1222 if (!hash)
1223 return NULL;
1224
1225 size = 1 << size_bits;
1226 hash->buckets = kcalloc(size, sizeof(*hash->buckets), GFP_KERNEL);
1227
1228 if (!hash->buckets) {
1229 kfree(hash);
1230 return NULL;
1231 }
1232
1233 hash->size_bits = size_bits;
1234
1235 return hash;
1236}
1237
1238static struct ftrace_hash *
1239alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash)
1240{
1241 struct ftrace_func_entry *entry;
1242 struct ftrace_hash *new_hash;
1243 struct hlist_node *tp;
1244 int size;
1245 int ret;
1246 int i;
1247
1248 new_hash = alloc_ftrace_hash(size_bits);
1249 if (!new_hash)
1250 return NULL;
1251
1252 /* Empty hash? */
1253 if (ftrace_hash_empty(hash))
1254 return new_hash;
1255
1256 size = 1 << hash->size_bits;
1257 for (i = 0; i < size; i++) {
1258 hlist_for_each_entry(entry, tp, &hash->buckets[i], hlist) {
1259 ret = add_hash_entry(new_hash, entry->ip);
1260 if (ret < 0)
1261 goto free_hash;
1262 }
1263 }
1264
1265 FTRACE_WARN_ON(new_hash->count != hash->count);
1266
1267 return new_hash;
1268
1269 free_hash:
1270 free_ftrace_hash(new_hash);
1271 return NULL;
1272}
1273
1274static void
1275ftrace_hash_rec_disable(struct ftrace_ops *ops, int filter_hash);
1276static void
1277ftrace_hash_rec_enable(struct ftrace_ops *ops, int filter_hash);
1278
1279static int
1280ftrace_hash_move(struct ftrace_ops *ops, int enable,
1281 struct ftrace_hash **dst, struct ftrace_hash *src)
1282{
1283 struct ftrace_func_entry *entry;
1284 struct hlist_node *tp, *tn;
1285 struct hlist_head *hhd;
1286 struct ftrace_hash *old_hash;
1287 struct ftrace_hash *new_hash;
1288 unsigned long key;
1289 int size = src->count;
1290 int bits = 0;
1291 int ret;
1292 int i;
1293
1294 /*
1295 * Remove the current set, update the hash and add
1296 * them back.
1297 */
1298 ftrace_hash_rec_disable(ops, enable);
1299
1300 /*
1301 * If the new source is empty, just free dst and assign it
1302 * the empty_hash.
1303 */
1304 if (!src->count) {
1305 free_ftrace_hash_rcu(*dst);
1306 rcu_assign_pointer(*dst, EMPTY_HASH);
1307 /* still need to update the function records */
1308 ret = 0;
1309 goto out;
1310 }
1311
1312 /*
1313 * Make the hash size about 1/2 the # found
1314 */
1315 for (size /= 2; size; size >>= 1)
1316 bits++;
1317
1318 /* Don't allocate too much */
1319 if (bits > FTRACE_HASH_MAX_BITS)
1320 bits = FTRACE_HASH_MAX_BITS;
1321
1322 ret = -ENOMEM;
1323 new_hash = alloc_ftrace_hash(bits);
1324 if (!new_hash)
1325 goto out;
1326
1327 size = 1 << src->size_bits;
1328 for (i = 0; i < size; i++) {
1329 hhd = &src->buckets[i];
1330 hlist_for_each_entry_safe(entry, tp, tn, hhd, hlist) {
1331 if (bits > 0)
1332 key = hash_long(entry->ip, bits);
1333 else
1334 key = 0;
1335 remove_hash_entry(src, entry);
1336 __add_hash_entry(new_hash, entry);
1337 }
1338 }
1339
1340 old_hash = *dst;
1341 rcu_assign_pointer(*dst, new_hash);
1342 free_ftrace_hash_rcu(old_hash);
1343
1344 ret = 0;
1345 out:
1346 /*
1347 * Enable regardless of ret:
1348 * On success, we enable the new hash.
1349 * On failure, we re-enable the original hash.
1350 */
1351 ftrace_hash_rec_enable(ops, enable);
1352
1353 return ret;
1354}
1355
1356/*
1357 * Test the hashes for this ops to see if we want to call
1358 * the ops->func or not.
1359 *
1360 * It's a match if the ip is in the ops->filter_hash or
1361 * the filter_hash does not exist or is empty,
1362 * AND
1363 * the ip is not in the ops->notrace_hash.
1364 *
1365 * This needs to be called with preemption disabled as
1366 * the hashes are freed with call_rcu_sched().
1367 */
1368static int
1369ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip)
1370{
1371 struct ftrace_hash *filter_hash;
1372 struct ftrace_hash *notrace_hash;
1373 int ret;
1374
1375 filter_hash = rcu_dereference_raw(ops->filter_hash);
1376 notrace_hash = rcu_dereference_raw(ops->notrace_hash);
1377
1378 if ((ftrace_hash_empty(filter_hash) ||
1379 ftrace_lookup_ip(filter_hash, ip)) &&
1380 (ftrace_hash_empty(notrace_hash) ||
1381 !ftrace_lookup_ip(notrace_hash, ip)))
1382 ret = 1;
1383 else
1384 ret = 0;
1385
1386 return ret;
1387}
1388
1389/*
1390 * This is a double for. Do not use 'break' to break out of the loop,
1391 * you must use a goto.
1392 */
1393#define do_for_each_ftrace_rec(pg, rec) \
1394 for (pg = ftrace_pages_start; pg; pg = pg->next) { \
1395 int _____i; \
1396 for (_____i = 0; _____i < pg->index; _____i++) { \
1397 rec = &pg->records[_____i];
1398
1399#define while_for_each_ftrace_rec() \
1400 } \
1401 }
1402
1403
1404static int ftrace_cmp_recs(const void *a, const void *b)
1405{
1406 const struct dyn_ftrace *reca = a;
1407 const struct dyn_ftrace *recb = b;
1408
1409 if (reca->ip > recb->ip)
1410 return 1;
1411 if (reca->ip < recb->ip)
1412 return -1;
1413 return 0;
1414}
1415
1416/**
1417 * ftrace_location - return true if the ip giving is a traced location
1418 * @ip: the instruction pointer to check
1419 *
1420 * Returns 1 if @ip given is a pointer to a ftrace location.
1421 * That is, the instruction that is either a NOP or call to
1422 * the function tracer. It checks the ftrace internal tables to
1423 * determine if the address belongs or not.
1424 */
1425int ftrace_location(unsigned long ip)
1426{
1427 struct ftrace_page *pg;
1428 struct dyn_ftrace *rec;
1429 struct dyn_ftrace key;
1430
1431 key.ip = ip;
1432
1433 for (pg = ftrace_pages_start; pg; pg = pg->next) {
1434 rec = bsearch(&key, pg->records, pg->index,
1435 sizeof(struct dyn_ftrace),
1436 ftrace_cmp_recs);
1437 if (rec)
1438 return 1;
1439 }
1440
1441 return 0;
1442}
1443
1444static void __ftrace_hash_rec_update(struct ftrace_ops *ops,
1445 int filter_hash,
1446 bool inc)
1447{
1448 struct ftrace_hash *hash;
1449 struct ftrace_hash *other_hash;
1450 struct ftrace_page *pg;
1451 struct dyn_ftrace *rec;
1452 int count = 0;
1453 int all = 0;
1454
1455 /* Only update if the ops has been registered */
1456 if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
1457 return;
1458
1459 /*
1460 * In the filter_hash case:
1461 * If the count is zero, we update all records.
1462 * Otherwise we just update the items in the hash.
1463 *
1464 * In the notrace_hash case:
1465 * We enable the update in the hash.
1466 * As disabling notrace means enabling the tracing,
1467 * and enabling notrace means disabling, the inc variable
1468 * gets inversed.
1469 */
1470 if (filter_hash) {
1471 hash = ops->filter_hash;
1472 other_hash = ops->notrace_hash;
1473 if (ftrace_hash_empty(hash))
1474 all = 1;
1475 } else {
1476 inc = !inc;
1477 hash = ops->notrace_hash;
1478 other_hash = ops->filter_hash;
1479 /*
1480 * If the notrace hash has no items,
1481 * then there's nothing to do.
1482 */
1483 if (ftrace_hash_empty(hash))
1484 return;
1485 }
1486
1487 do_for_each_ftrace_rec(pg, rec) {
1488 int in_other_hash = 0;
1489 int in_hash = 0;
1490 int match = 0;
1491
1492 if (all) {
1493 /*
1494 * Only the filter_hash affects all records.
1495 * Update if the record is not in the notrace hash.
1496 */
1497 if (!other_hash || !ftrace_lookup_ip(other_hash, rec->ip))
1498 match = 1;
1499 } else {
1500 in_hash = !!ftrace_lookup_ip(hash, rec->ip);
1501 in_other_hash = !!ftrace_lookup_ip(other_hash, rec->ip);
1502
1503 /*
1504 *
1505 */
1506 if (filter_hash && in_hash && !in_other_hash)
1507 match = 1;
1508 else if (!filter_hash && in_hash &&
1509 (in_other_hash || ftrace_hash_empty(other_hash)))
1510 match = 1;
1511 }
1512 if (!match)
1513 continue;
1514
1515 if (inc) {
1516 rec->flags++;
1517 if (FTRACE_WARN_ON((rec->flags & ~FTRACE_FL_MASK) == FTRACE_REF_MAX))
1518 return;
1519 } else {
1520 if (FTRACE_WARN_ON((rec->flags & ~FTRACE_FL_MASK) == 0))
1521 return;
1522 rec->flags--;
1523 }
1524 count++;
1525 /* Shortcut, if we handled all records, we are done. */
1526 if (!all && count == hash->count)
1527 return;
1528 } while_for_each_ftrace_rec();
1529}
1530
1531static void ftrace_hash_rec_disable(struct ftrace_ops *ops,
1532 int filter_hash)
1533{
1534 __ftrace_hash_rec_update(ops, filter_hash, 0);
1535}
1536
1537static void ftrace_hash_rec_enable(struct ftrace_ops *ops,
1538 int filter_hash)
1539{
1540 __ftrace_hash_rec_update(ops, filter_hash, 1);
1541}
1542
1543static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
1544{
1545 if (ftrace_pages->index == ftrace_pages->size) {
1546 /* We should have allocated enough */
1547 if (WARN_ON(!ftrace_pages->next))
1548 return NULL;
1549 ftrace_pages = ftrace_pages->next;
1550 }
1551
1552 return &ftrace_pages->records[ftrace_pages->index++];
1553}
1554
1555static struct dyn_ftrace *
1556ftrace_record_ip(unsigned long ip)
1557{
1558 struct dyn_ftrace *rec;
1559
1560 if (ftrace_disabled)
1561 return NULL;
1562
1563 rec = ftrace_alloc_dyn_node(ip);
1564 if (!rec)
1565 return NULL;
1566
1567 rec->ip = ip;
1568
1569 return rec;
1570}
1571
1572static void print_ip_ins(const char *fmt, unsigned char *p)
1573{
1574 int i;
1575
1576 printk(KERN_CONT "%s", fmt);
1577
1578 for (i = 0; i < MCOUNT_INSN_SIZE; i++)
1579 printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
1580}
1581
1582/**
1583 * ftrace_bug - report and shutdown function tracer
1584 * @failed: The failed type (EFAULT, EINVAL, EPERM)
1585 * @ip: The address that failed
1586 *
1587 * The arch code that enables or disables the function tracing
1588 * can call ftrace_bug() when it has detected a problem in
1589 * modifying the code. @failed should be one of either:
1590 * EFAULT - if the problem happens on reading the @ip address
1591 * EINVAL - if what is read at @ip is not what was expected
1592 * EPERM - if the problem happens on writting to the @ip address
1593 */
1594void ftrace_bug(int failed, unsigned long ip)
1595{
1596 switch (failed) {
1597 case -EFAULT:
1598 FTRACE_WARN_ON_ONCE(1);
1599 pr_info("ftrace faulted on modifying ");
1600 print_ip_sym(ip);
1601 break;
1602 case -EINVAL:
1603 FTRACE_WARN_ON_ONCE(1);
1604 pr_info("ftrace failed to modify ");
1605 print_ip_sym(ip);
1606 print_ip_ins(" actual: ", (unsigned char *)ip);
1607 printk(KERN_CONT "\n");
1608 break;
1609 case -EPERM:
1610 FTRACE_WARN_ON_ONCE(1);
1611 pr_info("ftrace faulted on writing ");
1612 print_ip_sym(ip);
1613 break;
1614 default:
1615 FTRACE_WARN_ON_ONCE(1);
1616 pr_info("ftrace faulted on unknown error ");
1617 print_ip_sym(ip);
1618 }
1619}
1620
1621
1622/* Return 1 if the address range is reserved for ftrace */
1623int ftrace_text_reserved(void *start, void *end)
1624{
1625 struct dyn_ftrace *rec;
1626 struct ftrace_page *pg;
1627
1628 do_for_each_ftrace_rec(pg, rec) {
1629 if (rec->ip <= (unsigned long)end &&
1630 rec->ip + MCOUNT_INSN_SIZE > (unsigned long)start)
1631 return 1;
1632 } while_for_each_ftrace_rec();
1633 return 0;
1634}
1635
1636static int ftrace_check_record(struct dyn_ftrace *rec, int enable, int update)
1637{
1638 unsigned long flag = 0UL;
1639
1640 /*
1641 * If we are updating calls:
1642 *
1643 * If the record has a ref count, then we need to enable it
1644 * because someone is using it.
1645 *
1646 * Otherwise we make sure its disabled.
1647 *
1648 * If we are disabling calls, then disable all records that
1649 * are enabled.
1650 */
1651 if (enable && (rec->flags & ~FTRACE_FL_MASK))
1652 flag = FTRACE_FL_ENABLED;
1653
1654 /* If the state of this record hasn't changed, then do nothing */
1655 if ((rec->flags & FTRACE_FL_ENABLED) == flag)
1656 return FTRACE_UPDATE_IGNORE;
1657
1658 if (flag) {
1659 if (update)
1660 rec->flags |= FTRACE_FL_ENABLED;
1661 return FTRACE_UPDATE_MAKE_CALL;
1662 }
1663
1664 if (update)
1665 rec->flags &= ~FTRACE_FL_ENABLED;
1666
1667 return FTRACE_UPDATE_MAKE_NOP;
1668}
1669
1670/**
1671 * ftrace_update_record, set a record that now is tracing or not
1672 * @rec: the record to update
1673 * @enable: set to 1 if the record is tracing, zero to force disable
1674 *
1675 * The records that represent all functions that can be traced need
1676 * to be updated when tracing has been enabled.
1677 */
1678int ftrace_update_record(struct dyn_ftrace *rec, int enable)
1679{
1680 return ftrace_check_record(rec, enable, 1);
1681}
1682
1683/**
1684 * ftrace_test_record, check if the record has been enabled or not
1685 * @rec: the record to test
1686 * @enable: set to 1 to check if enabled, 0 if it is disabled
1687 *
1688 * The arch code may need to test if a record is already set to
1689 * tracing to determine how to modify the function code that it
1690 * represents.
1691 */
1692int ftrace_test_record(struct dyn_ftrace *rec, int enable)
1693{
1694 return ftrace_check_record(rec, enable, 0);
1695}
1696
1697static int
1698__ftrace_replace_code(struct dyn_ftrace *rec, int enable)
1699{
1700 unsigned long ftrace_addr;
1701 int ret;
1702
1703 ftrace_addr = (unsigned long)FTRACE_ADDR;
1704
1705 ret = ftrace_update_record(rec, enable);
1706
1707 switch (ret) {
1708 case FTRACE_UPDATE_IGNORE:
1709 return 0;
1710
1711 case FTRACE_UPDATE_MAKE_CALL:
1712 return ftrace_make_call(rec, ftrace_addr);
1713
1714 case FTRACE_UPDATE_MAKE_NOP:
1715 return ftrace_make_nop(NULL, rec, ftrace_addr);
1716 }
1717
1718 return -1; /* unknow ftrace bug */
1719}
1720
1721static void ftrace_replace_code(int update)
1722{
1723 struct dyn_ftrace *rec;
1724 struct ftrace_page *pg;
1725 int failed;
1726
1727 if (unlikely(ftrace_disabled))
1728 return;
1729
1730 do_for_each_ftrace_rec(pg, rec) {
1731 failed = __ftrace_replace_code(rec, update);
1732 if (failed) {
1733 ftrace_bug(failed, rec->ip);
1734 /* Stop processing */
1735 return;
1736 }
1737 } while_for_each_ftrace_rec();
1738}
1739
1740struct ftrace_rec_iter {
1741 struct ftrace_page *pg;
1742 int index;
1743};
1744
1745/**
1746 * ftrace_rec_iter_start, start up iterating over traced functions
1747 *
1748 * Returns an iterator handle that is used to iterate over all
1749 * the records that represent address locations where functions
1750 * are traced.
1751 *
1752 * May return NULL if no records are available.
1753 */
1754struct ftrace_rec_iter *ftrace_rec_iter_start(void)
1755{
1756 /*
1757 * We only use a single iterator.
1758 * Protected by the ftrace_lock mutex.
1759 */
1760 static struct ftrace_rec_iter ftrace_rec_iter;
1761 struct ftrace_rec_iter *iter = &ftrace_rec_iter;
1762
1763 iter->pg = ftrace_pages_start;
1764 iter->index = 0;
1765
1766 /* Could have empty pages */
1767 while (iter->pg && !iter->pg->index)
1768 iter->pg = iter->pg->next;
1769
1770 if (!iter->pg)
1771 return NULL;
1772
1773 return iter;
1774}
1775
1776/**
1777 * ftrace_rec_iter_next, get the next record to process.
1778 * @iter: The handle to the iterator.
1779 *
1780 * Returns the next iterator after the given iterator @iter.
1781 */
1782struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter)
1783{
1784 iter->index++;
1785
1786 if (iter->index >= iter->pg->index) {
1787 iter->pg = iter->pg->next;
1788 iter->index = 0;
1789
1790 /* Could have empty pages */
1791 while (iter->pg && !iter->pg->index)
1792 iter->pg = iter->pg->next;
1793 }
1794
1795 if (!iter->pg)
1796 return NULL;
1797
1798 return iter;
1799}
1800
1801/**
1802 * ftrace_rec_iter_record, get the record at the iterator location
1803 * @iter: The current iterator location
1804 *
1805 * Returns the record that the current @iter is at.
1806 */
1807struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter)
1808{
1809 return &iter->pg->records[iter->index];
1810}
1811
1812static int
1813ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
1814{
1815 unsigned long ip;
1816 int ret;
1817
1818 ip = rec->ip;
1819
1820 if (unlikely(ftrace_disabled))
1821 return 0;
1822
1823 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
1824 if (ret) {
1825 ftrace_bug(ret, ip);
1826 return 0;
1827 }
1828 return 1;
1829}
1830
1831/*
1832 * archs can override this function if they must do something
1833 * before the modifying code is performed.
1834 */
1835int __weak ftrace_arch_code_modify_prepare(void)
1836{
1837 return 0;
1838}
1839
1840/*
1841 * archs can override this function if they must do something
1842 * after the modifying code is performed.
1843 */
1844int __weak ftrace_arch_code_modify_post_process(void)
1845{
1846 return 0;
1847}
1848
1849static int __ftrace_modify_code(void *data)
1850{
1851 int *command = data;
1852
1853 if (*command & FTRACE_UPDATE_CALLS)
1854 ftrace_replace_code(1);
1855 else if (*command & FTRACE_DISABLE_CALLS)
1856 ftrace_replace_code(0);
1857
1858 if (*command & FTRACE_UPDATE_TRACE_FUNC)
1859 ftrace_update_ftrace_func(ftrace_trace_function);
1860
1861 if (*command & FTRACE_START_FUNC_RET)
1862 ftrace_enable_ftrace_graph_caller();
1863 else if (*command & FTRACE_STOP_FUNC_RET)
1864 ftrace_disable_ftrace_graph_caller();
1865
1866 return 0;
1867}
1868
1869/**
1870 * ftrace_run_stop_machine, go back to the stop machine method
1871 * @command: The command to tell ftrace what to do
1872 *
1873 * If an arch needs to fall back to the stop machine method, the
1874 * it can call this function.
1875 */
1876void ftrace_run_stop_machine(int command)
1877{
1878 stop_machine(__ftrace_modify_code, &command, NULL);
1879}
1880
1881/**
1882 * arch_ftrace_update_code, modify the code to trace or not trace
1883 * @command: The command that needs to be done
1884 *
1885 * Archs can override this function if it does not need to
1886 * run stop_machine() to modify code.
1887 */
1888void __weak arch_ftrace_update_code(int command)
1889{
1890 ftrace_run_stop_machine(command);
1891}
1892
1893static void ftrace_run_update_code(int command)
1894{
1895 int ret;
1896
1897 ret = ftrace_arch_code_modify_prepare();
1898 FTRACE_WARN_ON(ret);
1899 if (ret)
1900 return;
1901 /*
1902 * Do not call function tracer while we update the code.
1903 * We are in stop machine.
1904 */
1905 function_trace_stop++;
1906
1907 /*
1908 * By default we use stop_machine() to modify the code.
1909 * But archs can do what ever they want as long as it
1910 * is safe. The stop_machine() is the safest, but also
1911 * produces the most overhead.
1912 */
1913 arch_ftrace_update_code(command);
1914
1915#ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
1916 /*
1917 * For archs that call ftrace_test_stop_func(), we must
1918 * wait till after we update all the function callers
1919 * before we update the callback. This keeps different
1920 * ops that record different functions from corrupting
1921 * each other.
1922 */
1923 __ftrace_trace_function = __ftrace_trace_function_delay;
1924#endif
1925 function_trace_stop--;
1926
1927 ret = ftrace_arch_code_modify_post_process();
1928 FTRACE_WARN_ON(ret);
1929}
1930
1931static ftrace_func_t saved_ftrace_func;
1932static int ftrace_start_up;
1933static int global_start_up;
1934
1935static void ftrace_startup_enable(int command)
1936{
1937 if (saved_ftrace_func != ftrace_trace_function) {
1938 saved_ftrace_func = ftrace_trace_function;
1939 command |= FTRACE_UPDATE_TRACE_FUNC;
1940 }
1941
1942 if (!command || !ftrace_enabled)
1943 return;
1944
1945 ftrace_run_update_code(command);
1946}
1947
1948static int ftrace_startup(struct ftrace_ops *ops, int command)
1949{
1950 bool hash_enable = true;
1951 int ret;
1952
1953 if (unlikely(ftrace_disabled))
1954 return -ENODEV;
1955
1956 ret = __register_ftrace_function(ops);
1957 if (ret)
1958 return ret;
1959
1960 ftrace_start_up++;
1961 command |= FTRACE_UPDATE_CALLS;
1962
1963 /* ops marked global share the filter hashes */
1964 if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
1965 ops = &global_ops;
1966 /* Don't update hash if global is already set */
1967 if (global_start_up)
1968 hash_enable = false;
1969 global_start_up++;
1970 }
1971
1972 ops->flags |= FTRACE_OPS_FL_ENABLED;
1973 if (hash_enable)
1974 ftrace_hash_rec_enable(ops, 1);
1975
1976 ftrace_startup_enable(command);
1977
1978 return 0;
1979}
1980
1981static int ftrace_shutdown(struct ftrace_ops *ops, int command)
1982{
1983 bool hash_disable = true;
1984 int ret;
1985
1986 if (unlikely(ftrace_disabled))
1987 return -ENODEV;
1988
1989 ret = __unregister_ftrace_function(ops);
1990 if (ret)
1991 return ret;
1992
1993 ftrace_start_up--;
1994 /*
1995 * Just warn in case of unbalance, no need to kill ftrace, it's not
1996 * critical but the ftrace_call callers may be never nopped again after
1997 * further ftrace uses.
1998 */
1999 WARN_ON_ONCE(ftrace_start_up < 0);
2000
2001 if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
2002 ops = &global_ops;
2003 global_start_up--;
2004 WARN_ON_ONCE(global_start_up < 0);
2005 /* Don't update hash if global still has users */
2006 if (global_start_up) {
2007 WARN_ON_ONCE(!ftrace_start_up);
2008 hash_disable = false;
2009 }
2010 }
2011
2012 if (hash_disable)
2013 ftrace_hash_rec_disable(ops, 1);
2014
2015 if (ops != &global_ops || !global_start_up)
2016 ops->flags &= ~FTRACE_OPS_FL_ENABLED;
2017
2018 command |= FTRACE_UPDATE_CALLS;
2019
2020 if (saved_ftrace_func != ftrace_trace_function) {
2021 saved_ftrace_func = ftrace_trace_function;
2022 command |= FTRACE_UPDATE_TRACE_FUNC;
2023 }
2024
2025 if (!command || !ftrace_enabled) {
2026 /*
2027 * If these are control ops, they still need their
2028 * per_cpu field freed. Since, function tracing is
2029 * not currently active, we can just free them
2030 * without synchronizing all CPUs.
2031 */
2032 if (ops->flags & FTRACE_OPS_FL_CONTROL)
2033 control_ops_free(ops);
2034 return 0;
2035 }
2036
2037 ftrace_run_update_code(command);
2038
2039 /*
2040 * Dynamic ops may be freed, we must make sure that all
2041 * callers are done before leaving this function.
2042 * The same goes for freeing the per_cpu data of the control
2043 * ops.
2044 *
2045 * Again, normal synchronize_sched() is not good enough.
2046 * We need to do a hard force of sched synchronization.
2047 * This is because we use preempt_disable() to do RCU, but
2048 * the function tracers can be called where RCU is not watching
2049 * (like before user_exit()). We can not rely on the RCU
2050 * infrastructure to do the synchronization, thus we must do it
2051 * ourselves.
2052 */
2053 if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_CONTROL)) {
2054 schedule_on_each_cpu(ftrace_sync);
2055
2056 if (ops->flags & FTRACE_OPS_FL_CONTROL)
2057 control_ops_free(ops);
2058 }
2059
2060 return 0;
2061}
2062
2063static void ftrace_startup_sysctl(void)
2064{
2065 int command;
2066
2067 if (unlikely(ftrace_disabled))
2068 return;
2069
2070 /* Force update next time */
2071 saved_ftrace_func = NULL;
2072 /* ftrace_start_up is true if we want ftrace running */
2073 if (ftrace_start_up) {
2074 command = FTRACE_UPDATE_CALLS;
2075 if (ftrace_graph_active)
2076 command |= FTRACE_START_FUNC_RET;
2077 ftrace_startup_enable(command);
2078 }
2079}
2080
2081static void ftrace_shutdown_sysctl(void)
2082{
2083 int command;
2084
2085 if (unlikely(ftrace_disabled))
2086 return;
2087
2088 /* ftrace_start_up is true if ftrace is running */
2089 if (ftrace_start_up) {
2090 command = FTRACE_DISABLE_CALLS;
2091 if (ftrace_graph_active)
2092 command |= FTRACE_STOP_FUNC_RET;
2093 ftrace_run_update_code(command);
2094 }
2095}
2096
2097static cycle_t ftrace_update_time;
2098static unsigned long ftrace_update_cnt;
2099unsigned long ftrace_update_tot_cnt;
2100
2101static inline int ops_traces_mod(struct ftrace_ops *ops)
2102{
2103 /*
2104 * Filter_hash being empty will default to trace module.
2105 * But notrace hash requires a test of individual module functions.
2106 */
2107 return ftrace_hash_empty(ops->filter_hash) &&
2108 ftrace_hash_empty(ops->notrace_hash);
2109}
2110
2111/*
2112 * Check if the current ops references the record.
2113 *
2114 * If the ops traces all functions, then it was already accounted for.
2115 * If the ops does not trace the current record function, skip it.
2116 * If the ops ignores the function via notrace filter, skip it.
2117 */
2118static inline bool
2119ops_references_rec(struct ftrace_ops *ops, struct dyn_ftrace *rec)
2120{
2121 /* If ops isn't enabled, ignore it */
2122 if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
2123 return 0;
2124
2125 /* If ops traces all mods, we already accounted for it */
2126 if (ops_traces_mod(ops))
2127 return 0;
2128
2129 /* The function must be in the filter */
2130 if (!ftrace_hash_empty(ops->filter_hash) &&
2131 !ftrace_lookup_ip(ops->filter_hash, rec->ip))
2132 return 0;
2133
2134 /* If in notrace hash, we ignore it too */
2135 if (ftrace_lookup_ip(ops->notrace_hash, rec->ip))
2136 return 0;
2137
2138 return 1;
2139}
2140
2141static int referenced_filters(struct dyn_ftrace *rec)
2142{
2143 struct ftrace_ops *ops;
2144 int cnt = 0;
2145
2146 for (ops = ftrace_ops_list; ops != &ftrace_list_end; ops = ops->next) {
2147 if (ops_references_rec(ops, rec))
2148 cnt++;
2149 }
2150
2151 return cnt;
2152}
2153
2154static int ftrace_update_code(struct module *mod)
2155{
2156 struct ftrace_page *pg;
2157 struct dyn_ftrace *p;
2158 cycle_t start, stop;
2159 unsigned long ref = 0;
2160 bool test = false;
2161 int i;
2162
2163 /*
2164 * When adding a module, we need to check if tracers are
2165 * currently enabled and if they are set to trace all functions.
2166 * If they are, we need to enable the module functions as well
2167 * as update the reference counts for those function records.
2168 */
2169 if (mod) {
2170 struct ftrace_ops *ops;
2171
2172 for (ops = ftrace_ops_list;
2173 ops != &ftrace_list_end; ops = ops->next) {
2174 if (ops->flags & FTRACE_OPS_FL_ENABLED) {
2175 if (ops_traces_mod(ops))
2176 ref++;
2177 else
2178 test = true;
2179 }
2180 }
2181 }
2182
2183 start = ftrace_now(raw_smp_processor_id());
2184 ftrace_update_cnt = 0;
2185
2186 for (pg = ftrace_new_pgs; pg; pg = pg->next) {
2187
2188 for (i = 0; i < pg->index; i++) {
2189 int cnt = ref;
2190
2191 /* If something went wrong, bail without enabling anything */
2192 if (unlikely(ftrace_disabled))
2193 return -1;
2194
2195 p = &pg->records[i];
2196 if (test)
2197 cnt += referenced_filters(p);
2198 p->flags = cnt;
2199
2200 /*
2201 * Do the initial record conversion from mcount jump
2202 * to the NOP instructions.
2203 */
2204 if (!ftrace_code_disable(mod, p))
2205 break;
2206
2207 ftrace_update_cnt++;
2208
2209 /*
2210 * If the tracing is enabled, go ahead and enable the record.
2211 *
2212 * The reason not to enable the record immediatelly is the
2213 * inherent check of ftrace_make_nop/ftrace_make_call for
2214 * correct previous instructions. Making first the NOP
2215 * conversion puts the module to the correct state, thus
2216 * passing the ftrace_make_call check.
2217 */
2218 if (ftrace_start_up && cnt) {
2219 int failed = __ftrace_replace_code(p, 1);
2220 if (failed)
2221 ftrace_bug(failed, p->ip);
2222 }
2223 }
2224 }
2225
2226 ftrace_new_pgs = NULL;
2227
2228 stop = ftrace_now(raw_smp_processor_id());
2229 ftrace_update_time = stop - start;
2230 ftrace_update_tot_cnt += ftrace_update_cnt;
2231
2232 return 0;
2233}
2234
2235static int ftrace_allocate_records(struct ftrace_page *pg, int count)
2236{
2237 int order;
2238 int cnt;
2239
2240 if (WARN_ON(!count))
2241 return -EINVAL;
2242
2243 order = get_count_order(DIV_ROUND_UP(count, ENTRIES_PER_PAGE));
2244
2245 /*
2246 * We want to fill as much as possible. No more than a page
2247 * may be empty.
2248 */
2249 while ((PAGE_SIZE << order) / ENTRY_SIZE >= count + ENTRIES_PER_PAGE)
2250 order--;
2251
2252 again:
2253 pg->records = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
2254
2255 if (!pg->records) {
2256 /* if we can't allocate this size, try something smaller */
2257 if (!order)
2258 return -ENOMEM;
2259 order >>= 1;
2260 goto again;
2261 }
2262
2263 cnt = (PAGE_SIZE << order) / ENTRY_SIZE;
2264 pg->size = cnt;
2265
2266 if (cnt > count)
2267 cnt = count;
2268
2269 return cnt;
2270}
2271
2272static struct ftrace_page *
2273ftrace_allocate_pages(unsigned long num_to_init)
2274{
2275 struct ftrace_page *start_pg;
2276 struct ftrace_page *pg;
2277 int order;
2278 int cnt;
2279
2280 if (!num_to_init)
2281 return 0;
2282
2283 start_pg = pg = kzalloc(sizeof(*pg), GFP_KERNEL);
2284 if (!pg)
2285 return NULL;
2286
2287 /*
2288 * Try to allocate as much as possible in one continues
2289 * location that fills in all of the space. We want to
2290 * waste as little space as possible.
2291 */
2292 for (;;) {
2293 cnt = ftrace_allocate_records(pg, num_to_init);
2294 if (cnt < 0)
2295 goto free_pages;
2296
2297 num_to_init -= cnt;
2298 if (!num_to_init)
2299 break;
2300
2301 pg->next = kzalloc(sizeof(*pg), GFP_KERNEL);
2302 if (!pg->next)
2303 goto free_pages;
2304
2305 pg = pg->next;
2306 }
2307
2308 return start_pg;
2309
2310 free_pages:
2311 while (start_pg) {
2312 order = get_count_order(pg->size / ENTRIES_PER_PAGE);
2313 free_pages((unsigned long)pg->records, order);
2314 start_pg = pg->next;
2315 kfree(pg);
2316 pg = start_pg;
2317 }
2318 pr_info("ftrace: FAILED to allocate memory for functions\n");
2319 return NULL;
2320}
2321
2322static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
2323{
2324 int cnt;
2325
2326 if (!num_to_init) {
2327 pr_info("ftrace: No functions to be traced?\n");
2328 return -1;
2329 }
2330
2331 cnt = num_to_init / ENTRIES_PER_PAGE;
2332 pr_info("ftrace: allocating %ld entries in %d pages\n",
2333 num_to_init, cnt + 1);
2334
2335 return 0;
2336}
2337
2338#define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
2339
2340struct ftrace_iterator {
2341 loff_t pos;
2342 loff_t func_pos;
2343 struct ftrace_page *pg;
2344 struct dyn_ftrace *func;
2345 struct ftrace_func_probe *probe;
2346 struct trace_parser parser;
2347 struct ftrace_hash *hash;
2348 struct ftrace_ops *ops;
2349 int hidx;
2350 int idx;
2351 unsigned flags;
2352};
2353
2354static void *
2355t_hash_next(struct seq_file *m, loff_t *pos)
2356{
2357 struct ftrace_iterator *iter = m->private;
2358 struct hlist_node *hnd = NULL;
2359 struct hlist_head *hhd;
2360
2361 (*pos)++;
2362 iter->pos = *pos;
2363
2364 if (iter->probe)
2365 hnd = &iter->probe->node;
2366 retry:
2367 if (iter->hidx >= FTRACE_FUNC_HASHSIZE)
2368 return NULL;
2369
2370 hhd = &ftrace_func_hash[iter->hidx];
2371
2372 if (hlist_empty(hhd)) {
2373 iter->hidx++;
2374 hnd = NULL;
2375 goto retry;
2376 }
2377
2378 if (!hnd)
2379 hnd = hhd->first;
2380 else {
2381 hnd = hnd->next;
2382 if (!hnd) {
2383 iter->hidx++;
2384 goto retry;
2385 }
2386 }
2387
2388 if (WARN_ON_ONCE(!hnd))
2389 return NULL;
2390
2391 iter->probe = hlist_entry(hnd, struct ftrace_func_probe, node);
2392
2393 return iter;
2394}
2395
2396static void *t_hash_start(struct seq_file *m, loff_t *pos)
2397{
2398 struct ftrace_iterator *iter = m->private;
2399 void *p = NULL;
2400 loff_t l;
2401
2402 if (!(iter->flags & FTRACE_ITER_DO_HASH))
2403 return NULL;
2404
2405 if (iter->func_pos > *pos)
2406 return NULL;
2407
2408 iter->hidx = 0;
2409 for (l = 0; l <= (*pos - iter->func_pos); ) {
2410 p = t_hash_next(m, &l);
2411 if (!p)
2412 break;
2413 }
2414 if (!p)
2415 return NULL;
2416
2417 /* Only set this if we have an item */
2418 iter->flags |= FTRACE_ITER_HASH;
2419
2420 return iter;
2421}
2422
2423static int
2424t_hash_show(struct seq_file *m, struct ftrace_iterator *iter)
2425{
2426 struct ftrace_func_probe *rec;
2427
2428 rec = iter->probe;
2429 if (WARN_ON_ONCE(!rec))
2430 return -EIO;
2431
2432 if (rec->ops->print)
2433 return rec->ops->print(m, rec->ip, rec->ops, rec->data);
2434
2435 seq_printf(m, "%ps:%ps", (void *)rec->ip, (void *)rec->ops->func);
2436
2437 if (rec->data)
2438 seq_printf(m, ":%p", rec->data);
2439 seq_putc(m, '\n');
2440
2441 return 0;
2442}
2443
2444static void *
2445t_next(struct seq_file *m, void *v, loff_t *pos)
2446{
2447 struct ftrace_iterator *iter = m->private;
2448 struct ftrace_ops *ops = iter->ops;
2449 struct dyn_ftrace *rec = NULL;
2450
2451 if (unlikely(ftrace_disabled))
2452 return NULL;
2453
2454 if (iter->flags & FTRACE_ITER_HASH)
2455 return t_hash_next(m, pos);
2456
2457 (*pos)++;
2458 iter->pos = iter->func_pos = *pos;
2459
2460 if (iter->flags & FTRACE_ITER_PRINTALL)
2461 return t_hash_start(m, pos);
2462
2463 retry:
2464 if (iter->idx >= iter->pg->index) {
2465 if (iter->pg->next) {
2466 iter->pg = iter->pg->next;
2467 iter->idx = 0;
2468 goto retry;
2469 }
2470 } else {
2471 rec = &iter->pg->records[iter->idx++];
2472 if (((iter->flags & FTRACE_ITER_FILTER) &&
2473 !(ftrace_lookup_ip(ops->filter_hash, rec->ip))) ||
2474
2475 ((iter->flags & FTRACE_ITER_NOTRACE) &&
2476 !ftrace_lookup_ip(ops->notrace_hash, rec->ip)) ||
2477
2478 ((iter->flags & FTRACE_ITER_ENABLED) &&
2479 !(rec->flags & ~FTRACE_FL_MASK))) {
2480
2481 rec = NULL;
2482 goto retry;
2483 }
2484 }
2485
2486 if (!rec)
2487 return t_hash_start(m, pos);
2488
2489 iter->func = rec;
2490
2491 return iter;
2492}
2493
2494static void reset_iter_read(struct ftrace_iterator *iter)
2495{
2496 iter->pos = 0;
2497 iter->func_pos = 0;
2498 iter->flags &= ~(FTRACE_ITER_PRINTALL | FTRACE_ITER_HASH);
2499}
2500
2501static void *t_start(struct seq_file *m, loff_t *pos)
2502{
2503 struct ftrace_iterator *iter = m->private;
2504 struct ftrace_ops *ops = iter->ops;
2505 void *p = NULL;
2506 loff_t l;
2507
2508 mutex_lock(&ftrace_lock);
2509
2510 if (unlikely(ftrace_disabled))
2511 return NULL;
2512
2513 /*
2514 * If an lseek was done, then reset and start from beginning.
2515 */
2516 if (*pos < iter->pos)
2517 reset_iter_read(iter);
2518
2519 /*
2520 * For set_ftrace_filter reading, if we have the filter
2521 * off, we can short cut and just print out that all
2522 * functions are enabled.
2523 */
2524 if (iter->flags & FTRACE_ITER_FILTER &&
2525 ftrace_hash_empty(ops->filter_hash)) {
2526 if (*pos > 0)
2527 return t_hash_start(m, pos);
2528 iter->flags |= FTRACE_ITER_PRINTALL;
2529 /* reset in case of seek/pread */
2530 iter->flags &= ~FTRACE_ITER_HASH;
2531 return iter;
2532 }
2533
2534 if (iter->flags & FTRACE_ITER_HASH)
2535 return t_hash_start(m, pos);
2536
2537 /*
2538 * Unfortunately, we need to restart at ftrace_pages_start
2539 * every time we let go of the ftrace_mutex. This is because
2540 * those pointers can change without the lock.
2541 */
2542 iter->pg = ftrace_pages_start;
2543 iter->idx = 0;
2544 for (l = 0; l <= *pos; ) {
2545 p = t_next(m, p, &l);
2546 if (!p)
2547 break;
2548 }
2549
2550 if (!p)
2551 return t_hash_start(m, pos);
2552
2553 return iter;
2554}
2555
2556static void t_stop(struct seq_file *m, void *p)
2557{
2558 mutex_unlock(&ftrace_lock);
2559}
2560
2561static int t_show(struct seq_file *m, void *v)
2562{
2563 struct ftrace_iterator *iter = m->private;
2564 struct dyn_ftrace *rec;
2565
2566 if (iter->flags & FTRACE_ITER_HASH)
2567 return t_hash_show(m, iter);
2568
2569 if (iter->flags & FTRACE_ITER_PRINTALL) {
2570 seq_printf(m, "#### all functions enabled ####\n");
2571 return 0;
2572 }
2573
2574 rec = iter->func;
2575
2576 if (!rec)
2577 return 0;
2578
2579 seq_printf(m, "%ps", (void *)rec->ip);
2580 if (iter->flags & FTRACE_ITER_ENABLED)
2581 seq_printf(m, " (%ld)",
2582 rec->flags & ~FTRACE_FL_MASK);
2583 seq_printf(m, "\n");
2584
2585 return 0;
2586}
2587
2588static const struct seq_operations show_ftrace_seq_ops = {
2589 .start = t_start,
2590 .next = t_next,
2591 .stop = t_stop,
2592 .show = t_show,
2593};
2594
2595static int
2596ftrace_avail_open(struct inode *inode, struct file *file)
2597{
2598 struct ftrace_iterator *iter;
2599 int ret;
2600
2601 if (unlikely(ftrace_disabled))
2602 return -ENODEV;
2603
2604 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
2605 if (!iter)
2606 return -ENOMEM;
2607
2608 iter->pg = ftrace_pages_start;
2609 iter->ops = &global_ops;
2610
2611 ret = seq_open(file, &show_ftrace_seq_ops);
2612 if (!ret) {
2613 struct seq_file *m = file->private_data;
2614
2615 m->private = iter;
2616 } else {
2617 kfree(iter);
2618 }
2619
2620 return ret;
2621}
2622
2623static int
2624ftrace_enabled_open(struct inode *inode, struct file *file)
2625{
2626 struct ftrace_iterator *iter;
2627 int ret;
2628
2629 if (unlikely(ftrace_disabled))
2630 return -ENODEV;
2631
2632 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
2633 if (!iter)
2634 return -ENOMEM;
2635
2636 iter->pg = ftrace_pages_start;
2637 iter->flags = FTRACE_ITER_ENABLED;
2638 iter->ops = &global_ops;
2639
2640 ret = seq_open(file, &show_ftrace_seq_ops);
2641 if (!ret) {
2642 struct seq_file *m = file->private_data;
2643
2644 m->private = iter;
2645 } else {
2646 kfree(iter);
2647 }
2648
2649 return ret;
2650}
2651
2652static void ftrace_filter_reset(struct ftrace_hash *hash)
2653{
2654 mutex_lock(&ftrace_lock);
2655 ftrace_hash_clear(hash);
2656 mutex_unlock(&ftrace_lock);
2657}
2658
2659/**
2660 * ftrace_regex_open - initialize function tracer filter files
2661 * @ops: The ftrace_ops that hold the hash filters
2662 * @flag: The type of filter to process
2663 * @inode: The inode, usually passed in to your open routine
2664 * @file: The file, usually passed in to your open routine
2665 *
2666 * ftrace_regex_open() initializes the filter files for the
2667 * @ops. Depending on @flag it may process the filter hash or
2668 * the notrace hash of @ops. With this called from the open
2669 * routine, you can use ftrace_filter_write() for the write
2670 * routine if @flag has FTRACE_ITER_FILTER set, or
2671 * ftrace_notrace_write() if @flag has FTRACE_ITER_NOTRACE set.
2672 * ftrace_filter_lseek() should be used as the lseek routine, and
2673 * release must call ftrace_regex_release().
2674 */
2675int
2676ftrace_regex_open(struct ftrace_ops *ops, int flag,
2677 struct inode *inode, struct file *file)
2678{
2679 struct ftrace_iterator *iter;
2680 struct ftrace_hash *hash;
2681 int ret = 0;
2682
2683 if (unlikely(ftrace_disabled))
2684 return -ENODEV;
2685
2686 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
2687 if (!iter)
2688 return -ENOMEM;
2689
2690 if (trace_parser_get_init(&iter->parser, FTRACE_BUFF_MAX)) {
2691 kfree(iter);
2692 return -ENOMEM;
2693 }
2694
2695 if (flag & FTRACE_ITER_NOTRACE)
2696 hash = ops->notrace_hash;
2697 else
2698 hash = ops->filter_hash;
2699
2700 iter->ops = ops;
2701 iter->flags = flag;
2702
2703 if (file->f_mode & FMODE_WRITE) {
2704 mutex_lock(&ftrace_lock);
2705 iter->hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, hash);
2706 mutex_unlock(&ftrace_lock);
2707
2708 if (!iter->hash) {
2709 trace_parser_put(&iter->parser);
2710 kfree(iter);
2711 return -ENOMEM;
2712 }
2713 }
2714
2715 mutex_lock(&ftrace_regex_lock);
2716
2717 if ((file->f_mode & FMODE_WRITE) &&
2718 (file->f_flags & O_TRUNC))
2719 ftrace_filter_reset(iter->hash);
2720
2721 if (file->f_mode & FMODE_READ) {
2722 iter->pg = ftrace_pages_start;
2723
2724 ret = seq_open(file, &show_ftrace_seq_ops);
2725 if (!ret) {
2726 struct seq_file *m = file->private_data;
2727 m->private = iter;
2728 } else {
2729 /* Failed */
2730 free_ftrace_hash(iter->hash);
2731 trace_parser_put(&iter->parser);
2732 kfree(iter);
2733 }
2734 } else
2735 file->private_data = iter;
2736 mutex_unlock(&ftrace_regex_lock);
2737
2738 return ret;
2739}
2740
2741static int
2742ftrace_filter_open(struct inode *inode, struct file *file)
2743{
2744 return ftrace_regex_open(&global_ops,
2745 FTRACE_ITER_FILTER | FTRACE_ITER_DO_HASH,
2746 inode, file);
2747}
2748
2749static int
2750ftrace_notrace_open(struct inode *inode, struct file *file)
2751{
2752 return ftrace_regex_open(&global_ops, FTRACE_ITER_NOTRACE,
2753 inode, file);
2754}
2755
2756static int ftrace_match(char *str, char *regex, int len, int type)
2757{
2758 int matched = 0;
2759 int slen;
2760
2761 switch (type) {
2762 case MATCH_FULL:
2763 if (strcmp(str, regex) == 0)
2764 matched = 1;
2765 break;
2766 case MATCH_FRONT_ONLY:
2767 if (strncmp(str, regex, len) == 0)
2768 matched = 1;
2769 break;
2770 case MATCH_MIDDLE_ONLY:
2771 if (strstr(str, regex))
2772 matched = 1;
2773 break;
2774 case MATCH_END_ONLY:
2775 slen = strlen(str);
2776 if (slen >= len && memcmp(str + slen - len, regex, len) == 0)
2777 matched = 1;
2778 break;
2779 }
2780
2781 return matched;
2782}
2783
2784static int
2785enter_record(struct ftrace_hash *hash, struct dyn_ftrace *rec, int not)
2786{
2787 struct ftrace_func_entry *entry;
2788 int ret = 0;
2789
2790 entry = ftrace_lookup_ip(hash, rec->ip);
2791 if (not) {
2792 /* Do nothing if it doesn't exist */
2793 if (!entry)
2794 return 0;
2795
2796 free_hash_entry(hash, entry);
2797 } else {
2798 /* Do nothing if it exists */
2799 if (entry)
2800 return 0;
2801
2802 ret = add_hash_entry(hash, rec->ip);
2803 }
2804 return ret;
2805}
2806
2807static int
2808ftrace_match_record(struct dyn_ftrace *rec, char *mod,
2809 char *regex, int len, int type)
2810{
2811 char str[KSYM_SYMBOL_LEN];
2812 char *modname;
2813
2814 kallsyms_lookup(rec->ip, NULL, NULL, &modname, str);
2815
2816 if (mod) {
2817 /* module lookup requires matching the module */
2818 if (!modname || strcmp(modname, mod))
2819 return 0;
2820
2821 /* blank search means to match all funcs in the mod */
2822 if (!len)
2823 return 1;
2824 }
2825
2826 return ftrace_match(str, regex, len, type);
2827}
2828
2829static int
2830match_records(struct ftrace_hash *hash, char *buff,
2831 int len, char *mod, int not)
2832{
2833 unsigned search_len = 0;
2834 struct ftrace_page *pg;
2835 struct dyn_ftrace *rec;
2836 int type = MATCH_FULL;
2837 char *search = buff;
2838 int found = 0;
2839 int ret;
2840
2841 if (len) {
2842 type = filter_parse_regex(buff, len, &search, &not);
2843 search_len = strlen(search);
2844 }
2845
2846 mutex_lock(&ftrace_lock);
2847
2848 if (unlikely(ftrace_disabled))
2849 goto out_unlock;
2850
2851 do_for_each_ftrace_rec(pg, rec) {
2852 if (ftrace_match_record(rec, mod, search, search_len, type)) {
2853 ret = enter_record(hash, rec, not);
2854 if (ret < 0) {
2855 found = ret;
2856 goto out_unlock;
2857 }
2858 found = 1;
2859 }
2860 } while_for_each_ftrace_rec();
2861 out_unlock:
2862 mutex_unlock(&ftrace_lock);
2863
2864 return found;
2865}
2866
2867static int
2868ftrace_match_records(struct ftrace_hash *hash, char *buff, int len)
2869{
2870 return match_records(hash, buff, len, NULL, 0);
2871}
2872
2873static int
2874ftrace_match_module_records(struct ftrace_hash *hash, char *buff, char *mod)
2875{
2876 int not = 0;
2877
2878 /* blank or '*' mean the same */
2879 if (strcmp(buff, "*") == 0)
2880 buff[0] = 0;
2881
2882 /* handle the case of 'dont filter this module' */
2883 if (strcmp(buff, "!") == 0 || strcmp(buff, "!*") == 0) {
2884 buff[0] = 0;
2885 not = 1;
2886 }
2887
2888 return match_records(hash, buff, strlen(buff), mod, not);
2889}
2890
2891/*
2892 * We register the module command as a template to show others how
2893 * to register the a command as well.
2894 */
2895
2896static int
2897ftrace_mod_callback(struct ftrace_hash *hash,
2898 char *func, char *cmd, char *param, int enable)
2899{
2900 char *mod;
2901 int ret = -EINVAL;
2902
2903 /*
2904 * cmd == 'mod' because we only registered this func
2905 * for the 'mod' ftrace_func_command.
2906 * But if you register one func with multiple commands,
2907 * you can tell which command was used by the cmd
2908 * parameter.
2909 */
2910
2911 /* we must have a module name */
2912 if (!param)
2913 return ret;
2914
2915 mod = strsep(&param, ":");
2916 if (!strlen(mod))
2917 return ret;
2918
2919 ret = ftrace_match_module_records(hash, func, mod);
2920 if (!ret)
2921 ret = -EINVAL;
2922 if (ret < 0)
2923 return ret;
2924
2925 return 0;
2926}
2927
2928static struct ftrace_func_command ftrace_mod_cmd = {
2929 .name = "mod",
2930 .func = ftrace_mod_callback,
2931};
2932
2933static int __init ftrace_mod_cmd_init(void)
2934{
2935 return register_ftrace_command(&ftrace_mod_cmd);
2936}
2937device_initcall(ftrace_mod_cmd_init);
2938
2939static void
2940function_trace_probe_call(unsigned long ip, unsigned long parent_ip)
2941{
2942 struct ftrace_func_probe *entry;
2943 struct hlist_head *hhd;
2944 struct hlist_node *n;
2945 unsigned long key;
2946
2947 key = hash_long(ip, FTRACE_HASH_BITS);
2948
2949 hhd = &ftrace_func_hash[key];
2950
2951 if (hlist_empty(hhd))
2952 return;
2953
2954 /*
2955 * Disable preemption for these calls to prevent a RCU grace
2956 * period. This syncs the hash iteration and freeing of items
2957 * on the hash. rcu_read_lock is too dangerous here.
2958 */
2959 preempt_disable_notrace();
2960 hlist_for_each_entry_rcu(entry, n, hhd, node) {
2961 if (entry->ip == ip)
2962 entry->ops->func(ip, parent_ip, &entry->data);
2963 }
2964 preempt_enable_notrace();
2965}
2966
2967static struct ftrace_ops trace_probe_ops __read_mostly =
2968{
2969 .func = function_trace_probe_call,
2970};
2971
2972static int ftrace_probe_registered;
2973
2974static void __enable_ftrace_function_probe(void)
2975{
2976 int ret;
2977 int i;
2978
2979 if (ftrace_probe_registered)
2980 return;
2981
2982 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
2983 struct hlist_head *hhd = &ftrace_func_hash[i];
2984 if (hhd->first)
2985 break;
2986 }
2987 /* Nothing registered? */
2988 if (i == FTRACE_FUNC_HASHSIZE)
2989 return;
2990
2991 ret = ftrace_startup(&trace_probe_ops, 0);
2992
2993 ftrace_probe_registered = 1;
2994}
2995
2996static void __disable_ftrace_function_probe(void)
2997{
2998 int i;
2999
3000 if (!ftrace_probe_registered)
3001 return;
3002
3003 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
3004 struct hlist_head *hhd = &ftrace_func_hash[i];
3005 if (hhd->first)
3006 return;
3007 }
3008
3009 /* no more funcs left */
3010 ftrace_shutdown(&trace_probe_ops, 0);
3011
3012 ftrace_probe_registered = 0;
3013}
3014
3015
3016static void ftrace_free_entry_rcu(struct rcu_head *rhp)
3017{
3018 struct ftrace_func_probe *entry =
3019 container_of(rhp, struct ftrace_func_probe, rcu);
3020
3021 if (entry->ops->free)
3022 entry->ops->free(&entry->data);
3023 kfree(entry);
3024}
3025
3026
3027int
3028register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
3029 void *data)
3030{
3031 struct ftrace_func_probe *entry;
3032 struct ftrace_page *pg;
3033 struct dyn_ftrace *rec;
3034 int type, len, not;
3035 unsigned long key;
3036 int count = 0;
3037 char *search;
3038
3039 type = filter_parse_regex(glob, strlen(glob), &search, &not);
3040 len = strlen(search);
3041
3042 /* we do not support '!' for function probes */
3043 if (WARN_ON(not))
3044 return -EINVAL;
3045
3046 mutex_lock(&ftrace_lock);
3047
3048 if (unlikely(ftrace_disabled))
3049 goto out_unlock;
3050
3051 do_for_each_ftrace_rec(pg, rec) {
3052
3053 if (!ftrace_match_record(rec, NULL, search, len, type))
3054 continue;
3055
3056 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
3057 if (!entry) {
3058 /* If we did not process any, then return error */
3059 if (!count)
3060 count = -ENOMEM;
3061 goto out_unlock;
3062 }
3063
3064 count++;
3065
3066 entry->data = data;
3067
3068 /*
3069 * The caller might want to do something special
3070 * for each function we find. We call the callback
3071 * to give the caller an opportunity to do so.
3072 */
3073 if (ops->callback) {
3074 if (ops->callback(rec->ip, &entry->data) < 0) {
3075 /* caller does not like this func */
3076 kfree(entry);
3077 continue;
3078 }
3079 }
3080
3081 entry->ops = ops;
3082 entry->ip = rec->ip;
3083
3084 key = hash_long(entry->ip, FTRACE_HASH_BITS);
3085 hlist_add_head_rcu(&entry->node, &ftrace_func_hash[key]);
3086
3087 } while_for_each_ftrace_rec();
3088 __enable_ftrace_function_probe();
3089
3090 out_unlock:
3091 mutex_unlock(&ftrace_lock);
3092
3093 return count;
3094}
3095
3096enum {
3097 PROBE_TEST_FUNC = 1,
3098 PROBE_TEST_DATA = 2
3099};
3100
3101static void
3102__unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
3103 void *data, int flags)
3104{
3105 struct ftrace_func_probe *entry;
3106 struct hlist_node *n, *tmp;
3107 char str[KSYM_SYMBOL_LEN];
3108 int type = MATCH_FULL;
3109 int i, len = 0;
3110 char *search;
3111
3112 if (glob && (strcmp(glob, "*") == 0 || !strlen(glob)))
3113 glob = NULL;
3114 else if (glob) {
3115 int not;
3116
3117 type = filter_parse_regex(glob, strlen(glob), &search, &not);
3118 len = strlen(search);
3119
3120 /* we do not support '!' for function probes */
3121 if (WARN_ON(not))
3122 return;
3123 }
3124
3125 mutex_lock(&ftrace_lock);
3126 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
3127 struct hlist_head *hhd = &ftrace_func_hash[i];
3128
3129 hlist_for_each_entry_safe(entry, n, tmp, hhd, node) {
3130
3131 /* break up if statements for readability */
3132 if ((flags & PROBE_TEST_FUNC) && entry->ops != ops)
3133 continue;
3134
3135 if ((flags & PROBE_TEST_DATA) && entry->data != data)
3136 continue;
3137
3138 /* do this last, since it is the most expensive */
3139 if (glob) {
3140 kallsyms_lookup(entry->ip, NULL, NULL,
3141 NULL, str);
3142 if (!ftrace_match(str, glob, len, type))
3143 continue;
3144 }
3145
3146 hlist_del_rcu(&entry->node);
3147 call_rcu_sched(&entry->rcu, ftrace_free_entry_rcu);
3148 }
3149 }
3150 __disable_ftrace_function_probe();
3151 mutex_unlock(&ftrace_lock);
3152}
3153
3154void
3155unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
3156 void *data)
3157{
3158 __unregister_ftrace_function_probe(glob, ops, data,
3159 PROBE_TEST_FUNC | PROBE_TEST_DATA);
3160}
3161
3162void
3163unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops)
3164{
3165 __unregister_ftrace_function_probe(glob, ops, NULL, PROBE_TEST_FUNC);
3166}
3167
3168void unregister_ftrace_function_probe_all(char *glob)
3169{
3170 __unregister_ftrace_function_probe(glob, NULL, NULL, 0);
3171}
3172
3173static LIST_HEAD(ftrace_commands);
3174static DEFINE_MUTEX(ftrace_cmd_mutex);
3175
3176int register_ftrace_command(struct ftrace_func_command *cmd)
3177{
3178 struct ftrace_func_command *p;
3179 int ret = 0;
3180
3181 mutex_lock(&ftrace_cmd_mutex);
3182 list_for_each_entry(p, &ftrace_commands, list) {
3183 if (strcmp(cmd->name, p->name) == 0) {
3184 ret = -EBUSY;
3185 goto out_unlock;
3186 }
3187 }
3188 list_add(&cmd->list, &ftrace_commands);
3189 out_unlock:
3190 mutex_unlock(&ftrace_cmd_mutex);
3191
3192 return ret;
3193}
3194
3195int unregister_ftrace_command(struct ftrace_func_command *cmd)
3196{
3197 struct ftrace_func_command *p, *n;
3198 int ret = -ENODEV;
3199
3200 mutex_lock(&ftrace_cmd_mutex);
3201 list_for_each_entry_safe(p, n, &ftrace_commands, list) {
3202 if (strcmp(cmd->name, p->name) == 0) {
3203 ret = 0;
3204 list_del_init(&p->list);
3205 goto out_unlock;
3206 }
3207 }
3208 out_unlock:
3209 mutex_unlock(&ftrace_cmd_mutex);
3210
3211 return ret;
3212}
3213
3214static int ftrace_process_regex(struct ftrace_hash *hash,
3215 char *buff, int len, int enable)
3216{
3217 char *func, *command, *next = buff;
3218 struct ftrace_func_command *p;
3219 int ret = -EINVAL;
3220
3221 func = strsep(&next, ":");
3222
3223 if (!next) {
3224 ret = ftrace_match_records(hash, func, len);
3225 if (!ret)
3226 ret = -EINVAL;
3227 if (ret < 0)
3228 return ret;
3229 return 0;
3230 }
3231
3232 /* command found */
3233
3234 command = strsep(&next, ":");
3235
3236 mutex_lock(&ftrace_cmd_mutex);
3237 list_for_each_entry(p, &ftrace_commands, list) {
3238 if (strcmp(p->name, command) == 0) {
3239 ret = p->func(hash, func, command, next, enable);
3240 goto out_unlock;
3241 }
3242 }
3243 out_unlock:
3244 mutex_unlock(&ftrace_cmd_mutex);
3245
3246 return ret;
3247}
3248
3249static ssize_t
3250ftrace_regex_write(struct file *file, const char __user *ubuf,
3251 size_t cnt, loff_t *ppos, int enable)
3252{
3253 struct ftrace_iterator *iter;
3254 struct trace_parser *parser;
3255 ssize_t ret, read;
3256
3257 if (!cnt)
3258 return 0;
3259
3260 mutex_lock(&ftrace_regex_lock);
3261
3262 ret = -ENODEV;
3263 if (unlikely(ftrace_disabled))
3264 goto out_unlock;
3265
3266 if (file->f_mode & FMODE_READ) {
3267 struct seq_file *m = file->private_data;
3268 iter = m->private;
3269 } else
3270 iter = file->private_data;
3271
3272 parser = &iter->parser;
3273 read = trace_get_user(parser, ubuf, cnt, ppos);
3274
3275 if (read >= 0 && trace_parser_loaded(parser) &&
3276 !trace_parser_cont(parser)) {
3277 ret = ftrace_process_regex(iter->hash, parser->buffer,
3278 parser->idx, enable);
3279 trace_parser_clear(parser);
3280 if (ret)
3281 goto out_unlock;
3282 }
3283
3284 ret = read;
3285out_unlock:
3286 mutex_unlock(&ftrace_regex_lock);
3287
3288 return ret;
3289}
3290
3291ssize_t
3292ftrace_filter_write(struct file *file, const char __user *ubuf,
3293 size_t cnt, loff_t *ppos)
3294{
3295 return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
3296}
3297
3298ssize_t
3299ftrace_notrace_write(struct file *file, const char __user *ubuf,
3300 size_t cnt, loff_t *ppos)
3301{
3302 return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
3303}
3304
3305static int
3306ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
3307 int reset, int enable)
3308{
3309 struct ftrace_hash **orig_hash;
3310 struct ftrace_hash *hash;
3311 int ret;
3312
3313 /* All global ops uses the global ops filters */
3314 if (ops->flags & FTRACE_OPS_FL_GLOBAL)
3315 ops = &global_ops;
3316
3317 if (unlikely(ftrace_disabled))
3318 return -ENODEV;
3319
3320 if (enable)
3321 orig_hash = &ops->filter_hash;
3322 else
3323 orig_hash = &ops->notrace_hash;
3324
3325 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
3326 if (!hash)
3327 return -ENOMEM;
3328
3329 mutex_lock(&ftrace_regex_lock);
3330 if (reset)
3331 ftrace_filter_reset(hash);
3332 if (buf && !ftrace_match_records(hash, buf, len)) {
3333 ret = -EINVAL;
3334 goto out_regex_unlock;
3335 }
3336
3337 mutex_lock(&ftrace_lock);
3338 ret = ftrace_hash_move(ops, enable, orig_hash, hash);
3339 if (!ret && ops->flags & FTRACE_OPS_FL_ENABLED
3340 && ftrace_enabled)
3341 ftrace_run_update_code(FTRACE_UPDATE_CALLS);
3342
3343 mutex_unlock(&ftrace_lock);
3344
3345 out_regex_unlock:
3346 mutex_unlock(&ftrace_regex_lock);
3347
3348 free_ftrace_hash(hash);
3349 return ret;
3350}
3351
3352/**
3353 * ftrace_set_filter - set a function to filter on in ftrace
3354 * @ops - the ops to set the filter with
3355 * @buf - the string that holds the function filter text.
3356 * @len - the length of the string.
3357 * @reset - non zero to reset all filters before applying this filter.
3358 *
3359 * Filters denote which functions should be enabled when tracing is enabled.
3360 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
3361 */
3362int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
3363 int len, int reset)
3364{
3365 return ftrace_set_regex(ops, buf, len, reset, 1);
3366}
3367EXPORT_SYMBOL_GPL(ftrace_set_filter);
3368
3369/**
3370 * ftrace_set_notrace - set a function to not trace in ftrace
3371 * @ops - the ops to set the notrace filter with
3372 * @buf - the string that holds the function notrace text.
3373 * @len - the length of the string.
3374 * @reset - non zero to reset all filters before applying this filter.
3375 *
3376 * Notrace Filters denote which functions should not be enabled when tracing
3377 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
3378 * for tracing.
3379 */
3380int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
3381 int len, int reset)
3382{
3383 return ftrace_set_regex(ops, buf, len, reset, 0);
3384}
3385EXPORT_SYMBOL_GPL(ftrace_set_notrace);
3386/**
3387 * ftrace_set_filter - set a function to filter on in ftrace
3388 * @ops - the ops to set the filter with
3389 * @buf - the string that holds the function filter text.
3390 * @len - the length of the string.
3391 * @reset - non zero to reset all filters before applying this filter.
3392 *
3393 * Filters denote which functions should be enabled when tracing is enabled.
3394 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
3395 */
3396void ftrace_set_global_filter(unsigned char *buf, int len, int reset)
3397{
3398 ftrace_set_regex(&global_ops, buf, len, reset, 1);
3399}
3400EXPORT_SYMBOL_GPL(ftrace_set_global_filter);
3401
3402/**
3403 * ftrace_set_notrace - set a function to not trace in ftrace
3404 * @ops - the ops to set the notrace filter with
3405 * @buf - the string that holds the function notrace text.
3406 * @len - the length of the string.
3407 * @reset - non zero to reset all filters before applying this filter.
3408 *
3409 * Notrace Filters denote which functions should not be enabled when tracing
3410 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
3411 * for tracing.
3412 */
3413void ftrace_set_global_notrace(unsigned char *buf, int len, int reset)
3414{
3415 ftrace_set_regex(&global_ops, buf, len, reset, 0);
3416}
3417EXPORT_SYMBOL_GPL(ftrace_set_global_notrace);
3418
3419/*
3420 * command line interface to allow users to set filters on boot up.
3421 */
3422#define FTRACE_FILTER_SIZE COMMAND_LINE_SIZE
3423static char ftrace_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
3424static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata;
3425
3426static int __init set_ftrace_notrace(char *str)
3427{
3428 strncpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE);
3429 return 1;
3430}
3431__setup("ftrace_notrace=", set_ftrace_notrace);
3432
3433static int __init set_ftrace_filter(char *str)
3434{
3435 strncpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE);
3436 return 1;
3437}
3438__setup("ftrace_filter=", set_ftrace_filter);
3439
3440#ifdef CONFIG_FUNCTION_GRAPH_TRACER
3441static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata;
3442static int ftrace_set_func(unsigned long *array, int *idx, char *buffer);
3443
3444static int __init set_graph_function(char *str)
3445{
3446 strlcpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE);
3447 return 1;
3448}
3449__setup("ftrace_graph_filter=", set_graph_function);
3450
3451static void __init set_ftrace_early_graph(char *buf)
3452{
3453 int ret;
3454 char *func;
3455
3456 while (buf) {
3457 func = strsep(&buf, ",");
3458 /* we allow only one expression at a time */
3459 ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count,
3460 func);
3461 if (ret)
3462 printk(KERN_DEBUG "ftrace: function %s not "
3463 "traceable\n", func);
3464 }
3465}
3466#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
3467
3468void __init
3469ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable)
3470{
3471 char *func;
3472
3473 while (buf) {
3474 func = strsep(&buf, ",");
3475 ftrace_set_regex(ops, func, strlen(func), 0, enable);
3476 }
3477}
3478
3479static void __init set_ftrace_early_filters(void)
3480{
3481 if (ftrace_filter_buf[0])
3482 ftrace_set_early_filter(&global_ops, ftrace_filter_buf, 1);
3483 if (ftrace_notrace_buf[0])
3484 ftrace_set_early_filter(&global_ops, ftrace_notrace_buf, 0);
3485#ifdef CONFIG_FUNCTION_GRAPH_TRACER
3486 if (ftrace_graph_buf[0])
3487 set_ftrace_early_graph(ftrace_graph_buf);
3488#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
3489}
3490
3491int ftrace_regex_release(struct inode *inode, struct file *file)
3492{
3493 struct seq_file *m = (struct seq_file *)file->private_data;
3494 struct ftrace_iterator *iter;
3495 struct ftrace_hash **orig_hash;
3496 struct trace_parser *parser;
3497 int filter_hash;
3498 int ret;
3499
3500 mutex_lock(&ftrace_regex_lock);
3501 if (file->f_mode & FMODE_READ) {
3502 iter = m->private;
3503
3504 seq_release(inode, file);
3505 } else
3506 iter = file->private_data;
3507
3508 parser = &iter->parser;
3509 if (trace_parser_loaded(parser)) {
3510 parser->buffer[parser->idx] = 0;
3511 ftrace_match_records(iter->hash, parser->buffer, parser->idx);
3512 }
3513
3514 trace_parser_put(parser);
3515
3516 if (file->f_mode & FMODE_WRITE) {
3517 filter_hash = !!(iter->flags & FTRACE_ITER_FILTER);
3518
3519 if (filter_hash)
3520 orig_hash = &iter->ops->filter_hash;
3521 else
3522 orig_hash = &iter->ops->notrace_hash;
3523
3524 mutex_lock(&ftrace_lock);
3525 ret = ftrace_hash_move(iter->ops, filter_hash,
3526 orig_hash, iter->hash);
3527 if (!ret && (iter->ops->flags & FTRACE_OPS_FL_ENABLED)
3528 && ftrace_enabled)
3529 ftrace_run_update_code(FTRACE_UPDATE_CALLS);
3530
3531 mutex_unlock(&ftrace_lock);
3532 }
3533 free_ftrace_hash(iter->hash);
3534 kfree(iter);
3535
3536 mutex_unlock(&ftrace_regex_lock);
3537 return 0;
3538}
3539
3540static const struct file_operations ftrace_avail_fops = {
3541 .open = ftrace_avail_open,
3542 .read = seq_read,
3543 .llseek = seq_lseek,
3544 .release = seq_release_private,
3545};
3546
3547static const struct file_operations ftrace_enabled_fops = {
3548 .open = ftrace_enabled_open,
3549 .read = seq_read,
3550 .llseek = seq_lseek,
3551 .release = seq_release_private,
3552};
3553
3554static const struct file_operations ftrace_filter_fops = {
3555 .open = ftrace_filter_open,
3556 .read = seq_read,
3557 .write = ftrace_filter_write,
3558 .llseek = ftrace_filter_lseek,
3559 .release = ftrace_regex_release,
3560};
3561
3562static const struct file_operations ftrace_notrace_fops = {
3563 .open = ftrace_notrace_open,
3564 .read = seq_read,
3565 .write = ftrace_notrace_write,
3566 .llseek = ftrace_filter_lseek,
3567 .release = ftrace_regex_release,
3568};
3569
3570#ifdef CONFIG_FUNCTION_GRAPH_TRACER
3571
3572static DEFINE_MUTEX(graph_lock);
3573
3574int ftrace_graph_count;
3575int ftrace_graph_filter_enabled;
3576unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
3577
3578static void *
3579__g_next(struct seq_file *m, loff_t *pos)
3580{
3581 if (*pos >= ftrace_graph_count)
3582 return NULL;
3583 return &ftrace_graph_funcs[*pos];
3584}
3585
3586static void *
3587g_next(struct seq_file *m, void *v, loff_t *pos)
3588{
3589 (*pos)++;
3590 return __g_next(m, pos);
3591}
3592
3593static void *g_start(struct seq_file *m, loff_t *pos)
3594{
3595 mutex_lock(&graph_lock);
3596
3597 /* Nothing, tell g_show to print all functions are enabled */
3598 if (!ftrace_graph_filter_enabled && !*pos)
3599 return (void *)1;
3600
3601 return __g_next(m, pos);
3602}
3603
3604static void g_stop(struct seq_file *m, void *p)
3605{
3606 mutex_unlock(&graph_lock);
3607}
3608
3609static int g_show(struct seq_file *m, void *v)
3610{
3611 unsigned long *ptr = v;
3612
3613 if (!ptr)
3614 return 0;
3615
3616 if (ptr == (unsigned long *)1) {
3617 seq_printf(m, "#### all functions enabled ####\n");
3618 return 0;
3619 }
3620
3621 seq_printf(m, "%ps\n", (void *)*ptr);
3622
3623 return 0;
3624}
3625
3626static const struct seq_operations ftrace_graph_seq_ops = {
3627 .start = g_start,
3628 .next = g_next,
3629 .stop = g_stop,
3630 .show = g_show,
3631};
3632
3633static int
3634ftrace_graph_open(struct inode *inode, struct file *file)
3635{
3636 int ret = 0;
3637
3638 if (unlikely(ftrace_disabled))
3639 return -ENODEV;
3640
3641 mutex_lock(&graph_lock);
3642 if ((file->f_mode & FMODE_WRITE) &&
3643 (file->f_flags & O_TRUNC)) {
3644 ftrace_graph_filter_enabled = 0;
3645 ftrace_graph_count = 0;
3646 memset(ftrace_graph_funcs, 0, sizeof(ftrace_graph_funcs));
3647 }
3648 mutex_unlock(&graph_lock);
3649
3650 if (file->f_mode & FMODE_READ)
3651 ret = seq_open(file, &ftrace_graph_seq_ops);
3652
3653 return ret;
3654}
3655
3656static int
3657ftrace_graph_release(struct inode *inode, struct file *file)
3658{
3659 if (file->f_mode & FMODE_READ)
3660 seq_release(inode, file);
3661 return 0;
3662}
3663
3664static int
3665ftrace_set_func(unsigned long *array, int *idx, char *buffer)
3666{
3667 struct dyn_ftrace *rec;
3668 struct ftrace_page *pg;
3669 int search_len;
3670 int fail = 1;
3671 int type, not;
3672 char *search;
3673 bool exists;
3674 int i;
3675
3676 /* decode regex */
3677 type = filter_parse_regex(buffer, strlen(buffer), &search, &not);
3678 if (!not && *idx >= FTRACE_GRAPH_MAX_FUNCS)
3679 return -EBUSY;
3680
3681 search_len = strlen(search);
3682
3683 mutex_lock(&ftrace_lock);
3684
3685 if (unlikely(ftrace_disabled)) {
3686 mutex_unlock(&ftrace_lock);
3687 return -ENODEV;
3688 }
3689
3690 do_for_each_ftrace_rec(pg, rec) {
3691
3692 if (ftrace_match_record(rec, NULL, search, search_len, type)) {
3693 /* if it is in the array */
3694 exists = false;
3695 for (i = 0; i < *idx; i++) {
3696 if (array[i] == rec->ip) {
3697 exists = true;
3698 break;
3699 }
3700 }
3701
3702 if (!not) {
3703 fail = 0;
3704 if (!exists) {
3705 array[(*idx)++] = rec->ip;
3706 if (*idx >= FTRACE_GRAPH_MAX_FUNCS)
3707 goto out;
3708 }
3709 } else {
3710 if (exists) {
3711 array[i] = array[--(*idx)];
3712 array[*idx] = 0;
3713 fail = 0;
3714 }
3715 }
3716 }
3717 } while_for_each_ftrace_rec();
3718out:
3719 mutex_unlock(&ftrace_lock);
3720
3721 if (fail)
3722 return -EINVAL;
3723
3724 ftrace_graph_filter_enabled = !!(*idx);
3725
3726 return 0;
3727}
3728
3729static ssize_t
3730ftrace_graph_write(struct file *file, const char __user *ubuf,
3731 size_t cnt, loff_t *ppos)
3732{
3733 struct trace_parser parser;
3734 ssize_t read, ret;
3735
3736 if (!cnt)
3737 return 0;
3738
3739 mutex_lock(&graph_lock);
3740
3741 if (trace_parser_get_init(&parser, FTRACE_BUFF_MAX)) {
3742 ret = -ENOMEM;
3743 goto out_unlock;
3744 }
3745
3746 read = trace_get_user(&parser, ubuf, cnt, ppos);
3747
3748 if (read >= 0 && trace_parser_loaded((&parser))) {
3749 parser.buffer[parser.idx] = 0;
3750
3751 /* we allow only one expression at a time */
3752 ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count,
3753 parser.buffer);
3754 if (ret)
3755 goto out_free;
3756 }
3757
3758 ret = read;
3759
3760out_free:
3761 trace_parser_put(&parser);
3762out_unlock:
3763 mutex_unlock(&graph_lock);
3764
3765 return ret;
3766}
3767
3768static const struct file_operations ftrace_graph_fops = {
3769 .open = ftrace_graph_open,
3770 .read = seq_read,
3771 .write = ftrace_graph_write,
3772 .llseek = ftrace_filter_lseek,
3773 .release = ftrace_graph_release,
3774};
3775#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
3776
3777static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
3778{
3779
3780 trace_create_file("available_filter_functions", 0444,
3781 d_tracer, NULL, &ftrace_avail_fops);
3782
3783 trace_create_file("enabled_functions", 0444,
3784 d_tracer, NULL, &ftrace_enabled_fops);
3785
3786 trace_create_file("set_ftrace_filter", 0644, d_tracer,
3787 NULL, &ftrace_filter_fops);
3788
3789 trace_create_file("set_ftrace_notrace", 0644, d_tracer,
3790 NULL, &ftrace_notrace_fops);
3791
3792#ifdef CONFIG_FUNCTION_GRAPH_TRACER
3793 trace_create_file("set_graph_function", 0444, d_tracer,
3794 NULL,
3795 &ftrace_graph_fops);
3796#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
3797
3798 return 0;
3799}
3800
3801static void ftrace_swap_recs(void *a, void *b, int size)
3802{
3803 struct dyn_ftrace *reca = a;
3804 struct dyn_ftrace *recb = b;
3805 struct dyn_ftrace t;
3806
3807 t = *reca;
3808 *reca = *recb;
3809 *recb = t;
3810}
3811
3812static int ftrace_process_locs(struct module *mod,
3813 unsigned long *start,
3814 unsigned long *end)
3815{
3816 struct ftrace_page *pg;
3817 unsigned long count;
3818 unsigned long *p;
3819 unsigned long addr;
3820 unsigned long flags = 0; /* Shut up gcc */
3821 int ret = -ENOMEM;
3822
3823 count = end - start;
3824
3825 if (!count)
3826 return 0;
3827
3828 pg = ftrace_allocate_pages(count);
3829 if (!pg)
3830 return -ENOMEM;
3831
3832 mutex_lock(&ftrace_lock);
3833
3834 /*
3835 * Core and each module needs their own pages, as
3836 * modules will free them when they are removed.
3837 * Force a new page to be allocated for modules.
3838 */
3839 if (!mod) {
3840 WARN_ON(ftrace_pages || ftrace_pages_start);
3841 /* First initialization */
3842 ftrace_pages = ftrace_pages_start = pg;
3843 } else {
3844 if (!ftrace_pages)
3845 goto out;
3846
3847 if (WARN_ON(ftrace_pages->next)) {
3848 /* Hmm, we have free pages? */
3849 while (ftrace_pages->next)
3850 ftrace_pages = ftrace_pages->next;
3851 }
3852
3853 ftrace_pages->next = pg;
3854 ftrace_pages = pg;
3855 }
3856
3857 p = start;
3858 while (p < end) {
3859 addr = ftrace_call_adjust(*p++);
3860 /*
3861 * Some architecture linkers will pad between
3862 * the different mcount_loc sections of different
3863 * object files to satisfy alignments.
3864 * Skip any NULL pointers.
3865 */
3866 if (!addr)
3867 continue;
3868 if (!ftrace_record_ip(addr))
3869 break;
3870 }
3871
3872 /* These new locations need to be initialized */
3873 ftrace_new_pgs = pg;
3874
3875 /* Make each individual set of pages sorted by ips */
3876 for (; pg; pg = pg->next)
3877 sort(pg->records, pg->index, sizeof(struct dyn_ftrace),
3878 ftrace_cmp_recs, ftrace_swap_recs);
3879
3880 /*
3881 * We only need to disable interrupts on start up
3882 * because we are modifying code that an interrupt
3883 * may execute, and the modification is not atomic.
3884 * But for modules, nothing runs the code we modify
3885 * until we are finished with it, and there's no
3886 * reason to cause large interrupt latencies while we do it.
3887 */
3888 if (!mod)
3889 local_irq_save(flags);
3890 ftrace_update_code(mod);
3891 if (!mod)
3892 local_irq_restore(flags);
3893 ret = 0;
3894 out:
3895 mutex_unlock(&ftrace_lock);
3896
3897 return ret;
3898}
3899
3900#ifdef CONFIG_MODULES
3901
3902#define next_to_ftrace_page(p) container_of(p, struct ftrace_page, next)
3903
3904void ftrace_release_mod(struct module *mod)
3905{
3906 struct dyn_ftrace *rec;
3907 struct ftrace_page **last_pg;
3908 struct ftrace_page *pg;
3909 int order;
3910
3911 mutex_lock(&ftrace_lock);
3912
3913 if (ftrace_disabled)
3914 goto out_unlock;
3915
3916 /*
3917 * Each module has its own ftrace_pages, remove
3918 * them from the list.
3919 */
3920 last_pg = &ftrace_pages_start;
3921 for (pg = ftrace_pages_start; pg; pg = *last_pg) {
3922 rec = &pg->records[0];
3923 if (within_module_core(rec->ip, mod)) {
3924 /*
3925 * As core pages are first, the first
3926 * page should never be a module page.
3927 */
3928 if (WARN_ON(pg == ftrace_pages_start))
3929 goto out_unlock;
3930
3931 /* Check if we are deleting the last page */
3932 if (pg == ftrace_pages)
3933 ftrace_pages = next_to_ftrace_page(last_pg);
3934
3935 *last_pg = pg->next;
3936 order = get_count_order(pg->size / ENTRIES_PER_PAGE);
3937 free_pages((unsigned long)pg->records, order);
3938 kfree(pg);
3939 } else
3940 last_pg = &pg->next;
3941 }
3942 out_unlock:
3943 mutex_unlock(&ftrace_lock);
3944}
3945
3946static void ftrace_init_module(struct module *mod,
3947 unsigned long *start, unsigned long *end)
3948{
3949 if (ftrace_disabled || start == end)
3950 return;
3951 ftrace_process_locs(mod, start, end);
3952}
3953
3954void ftrace_module_init(struct module *mod)
3955{
3956 ftrace_init_module(mod, mod->ftrace_callsites,
3957 mod->ftrace_callsites +
3958 mod->num_ftrace_callsites);
3959}
3960
3961static int ftrace_module_notify_exit(struct notifier_block *self,
3962 unsigned long val, void *data)
3963{
3964 struct module *mod = data;
3965
3966 if (val == MODULE_STATE_GOING)
3967 ftrace_release_mod(mod);
3968
3969 return 0;
3970}
3971#else
3972static int ftrace_module_notify_exit(struct notifier_block *self,
3973 unsigned long val, void *data)
3974{
3975 return 0;
3976}
3977#endif /* CONFIG_MODULES */
3978
3979struct notifier_block ftrace_module_exit_nb = {
3980 .notifier_call = ftrace_module_notify_exit,
3981 .priority = INT_MIN, /* Run after anything that can remove kprobes */
3982};
3983
3984extern unsigned long __start_mcount_loc[];
3985extern unsigned long __stop_mcount_loc[];
3986
3987void __init ftrace_init(void)
3988{
3989 unsigned long count, addr, flags;
3990 int ret;
3991
3992 /* Keep the ftrace pointer to the stub */
3993 addr = (unsigned long)ftrace_stub;
3994
3995 local_irq_save(flags);
3996 ftrace_dyn_arch_init(&addr);
3997 local_irq_restore(flags);
3998
3999 /* ftrace_dyn_arch_init places the return code in addr */
4000 if (addr)
4001 goto failed;
4002
4003 count = __stop_mcount_loc - __start_mcount_loc;
4004
4005 ret = ftrace_dyn_table_alloc(count);
4006 if (ret)
4007 goto failed;
4008
4009 last_ftrace_enabled = ftrace_enabled = 1;
4010
4011 ret = ftrace_process_locs(NULL,
4012 __start_mcount_loc,
4013 __stop_mcount_loc);
4014
4015 ret = register_module_notifier(&ftrace_module_exit_nb);
4016 if (ret)
4017 pr_warning("Failed to register trace ftrace module exit notifier\n");
4018
4019 set_ftrace_early_filters();
4020
4021 return;
4022 failed:
4023 ftrace_disabled = 1;
4024}
4025
4026#else
4027
4028static struct ftrace_ops global_ops = {
4029 .func = ftrace_stub,
4030};
4031
4032static int __init ftrace_nodyn_init(void)
4033{
4034 ftrace_enabled = 1;
4035 return 0;
4036}
4037device_initcall(ftrace_nodyn_init);
4038
4039static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; }
4040static inline void ftrace_startup_enable(int command) { }
4041/* Keep as macros so we do not need to define the commands */
4042# define ftrace_startup(ops, command) \
4043 ({ \
4044 int ___ret = __register_ftrace_function(ops); \
4045 if (!___ret) \
4046 (ops)->flags |= FTRACE_OPS_FL_ENABLED; \
4047 ___ret; \
4048 })
4049# define ftrace_shutdown(ops, command) __unregister_ftrace_function(ops)
4050
4051# define ftrace_startup_sysctl() do { } while (0)
4052# define ftrace_shutdown_sysctl() do { } while (0)
4053
4054static inline int
4055ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip)
4056{
4057 return 1;
4058}
4059
4060#endif /* CONFIG_DYNAMIC_FTRACE */
4061
4062static void
4063ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip)
4064{
4065 struct ftrace_ops *op;
4066
4067 if (unlikely(trace_recursion_test(TRACE_CONTROL_BIT)))
4068 return;
4069
4070 /*
4071 * Some of the ops may be dynamically allocated,
4072 * they must be freed after a synchronize_sched().
4073 */
4074 preempt_disable_notrace();
4075 trace_recursion_set(TRACE_CONTROL_BIT);
4076 op = rcu_dereference_raw(ftrace_control_list);
4077 while (op != &ftrace_list_end) {
4078 if (!ftrace_function_local_disabled(op) &&
4079 ftrace_ops_test(op, ip))
4080 op->func(ip, parent_ip);
4081
4082 op = rcu_dereference_raw(op->next);
4083 };
4084 trace_recursion_clear(TRACE_CONTROL_BIT);
4085 preempt_enable_notrace();
4086}
4087
4088static struct ftrace_ops control_ops = {
4089 .func = ftrace_ops_control_func,
4090};
4091
4092static void
4093ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip)
4094{
4095 struct ftrace_ops *op;
4096
4097 if (unlikely(trace_recursion_test(TRACE_INTERNAL_BIT)))
4098 return;
4099
4100 trace_recursion_set(TRACE_INTERNAL_BIT);
4101 /*
4102 * Some of the ops may be dynamically allocated,
4103 * they must be freed after a synchronize_sched().
4104 */
4105 preempt_disable_notrace();
4106 op = rcu_dereference_raw(ftrace_ops_list);
4107 while (op != &ftrace_list_end) {
4108 if (ftrace_ops_test(op, ip))
4109 op->func(ip, parent_ip);
4110 op = rcu_dereference_raw(op->next);
4111 };
4112 preempt_enable_notrace();
4113 trace_recursion_clear(TRACE_INTERNAL_BIT);
4114}
4115
4116static void clear_ftrace_swapper(void)
4117{
4118 struct task_struct *p;
4119 int cpu;
4120
4121 get_online_cpus();
4122 for_each_online_cpu(cpu) {
4123 p = idle_task(cpu);
4124 clear_tsk_trace_trace(p);
4125 }
4126 put_online_cpus();
4127}
4128
4129static void set_ftrace_swapper(void)
4130{
4131 struct task_struct *p;
4132 int cpu;
4133
4134 get_online_cpus();
4135 for_each_online_cpu(cpu) {
4136 p = idle_task(cpu);
4137 set_tsk_trace_trace(p);
4138 }
4139 put_online_cpus();
4140}
4141
4142static void clear_ftrace_pid(struct pid *pid)
4143{
4144 struct task_struct *p;
4145
4146 rcu_read_lock();
4147 do_each_pid_task(pid, PIDTYPE_PID, p) {
4148 clear_tsk_trace_trace(p);
4149 } while_each_pid_task(pid, PIDTYPE_PID, p);
4150 rcu_read_unlock();
4151
4152 put_pid(pid);
4153}
4154
4155static void set_ftrace_pid(struct pid *pid)
4156{
4157 struct task_struct *p;
4158
4159 rcu_read_lock();
4160 do_each_pid_task(pid, PIDTYPE_PID, p) {
4161 set_tsk_trace_trace(p);
4162 } while_each_pid_task(pid, PIDTYPE_PID, p);
4163 rcu_read_unlock();
4164}
4165
4166static void clear_ftrace_pid_task(struct pid *pid)
4167{
4168 if (pid == ftrace_swapper_pid)
4169 clear_ftrace_swapper();
4170 else
4171 clear_ftrace_pid(pid);
4172}
4173
4174static void set_ftrace_pid_task(struct pid *pid)
4175{
4176 if (pid == ftrace_swapper_pid)
4177 set_ftrace_swapper();
4178 else
4179 set_ftrace_pid(pid);
4180}
4181
4182static int ftrace_pid_add(int p)
4183{
4184 struct pid *pid;
4185 struct ftrace_pid *fpid;
4186 int ret = -EINVAL;
4187
4188 mutex_lock(&ftrace_lock);
4189
4190 if (!p)
4191 pid = ftrace_swapper_pid;
4192 else
4193 pid = find_get_pid(p);
4194
4195 if (!pid)
4196 goto out;
4197
4198 ret = 0;
4199
4200 list_for_each_entry(fpid, &ftrace_pids, list)
4201 if (fpid->pid == pid)
4202 goto out_put;
4203
4204 ret = -ENOMEM;
4205
4206 fpid = kmalloc(sizeof(*fpid), GFP_KERNEL);
4207 if (!fpid)
4208 goto out_put;
4209
4210 list_add(&fpid->list, &ftrace_pids);
4211 fpid->pid = pid;
4212
4213 set_ftrace_pid_task(pid);
4214
4215 ftrace_update_pid_func();
4216 ftrace_startup_enable(0);
4217
4218 mutex_unlock(&ftrace_lock);
4219 return 0;
4220
4221out_put:
4222 if (pid != ftrace_swapper_pid)
4223 put_pid(pid);
4224
4225out:
4226 mutex_unlock(&ftrace_lock);
4227 return ret;
4228}
4229
4230static void ftrace_pid_reset(void)
4231{
4232 struct ftrace_pid *fpid, *safe;
4233
4234 mutex_lock(&ftrace_lock);
4235 list_for_each_entry_safe(fpid, safe, &ftrace_pids, list) {
4236 struct pid *pid = fpid->pid;
4237
4238 clear_ftrace_pid_task(pid);
4239
4240 list_del(&fpid->list);
4241 kfree(fpid);
4242 }
4243
4244 ftrace_update_pid_func();
4245 ftrace_startup_enable(0);
4246
4247 mutex_unlock(&ftrace_lock);
4248}
4249
4250static void *fpid_start(struct seq_file *m, loff_t *pos)
4251{
4252 mutex_lock(&ftrace_lock);
4253
4254 if (list_empty(&ftrace_pids) && (!*pos))
4255 return (void *) 1;
4256
4257 return seq_list_start(&ftrace_pids, *pos);
4258}
4259
4260static void *fpid_next(struct seq_file *m, void *v, loff_t *pos)
4261{
4262 if (v == (void *)1)
4263 return NULL;
4264
4265 return seq_list_next(v, &ftrace_pids, pos);
4266}
4267
4268static void fpid_stop(struct seq_file *m, void *p)
4269{
4270 mutex_unlock(&ftrace_lock);
4271}
4272
4273static int fpid_show(struct seq_file *m, void *v)
4274{
4275 const struct ftrace_pid *fpid = list_entry(v, struct ftrace_pid, list);
4276
4277 if (v == (void *)1) {
4278 seq_printf(m, "no pid\n");
4279 return 0;
4280 }
4281
4282 if (fpid->pid == ftrace_swapper_pid)
4283 seq_printf(m, "swapper tasks\n");
4284 else
4285 seq_printf(m, "%u\n", pid_vnr(fpid->pid));
4286
4287 return 0;
4288}
4289
4290static const struct seq_operations ftrace_pid_sops = {
4291 .start = fpid_start,
4292 .next = fpid_next,
4293 .stop = fpid_stop,
4294 .show = fpid_show,
4295};
4296
4297static int
4298ftrace_pid_open(struct inode *inode, struct file *file)
4299{
4300 int ret = 0;
4301
4302 if ((file->f_mode & FMODE_WRITE) &&
4303 (file->f_flags & O_TRUNC))
4304 ftrace_pid_reset();
4305
4306 if (file->f_mode & FMODE_READ)
4307 ret = seq_open(file, &ftrace_pid_sops);
4308
4309 return ret;
4310}
4311
4312static ssize_t
4313ftrace_pid_write(struct file *filp, const char __user *ubuf,
4314 size_t cnt, loff_t *ppos)
4315{
4316 char buf[64], *tmp;
4317 long val;
4318 int ret;
4319
4320 if (cnt >= sizeof(buf))
4321 return -EINVAL;
4322
4323 if (copy_from_user(&buf, ubuf, cnt))
4324 return -EFAULT;
4325
4326 buf[cnt] = 0;
4327
4328 /*
4329 * Allow "echo > set_ftrace_pid" or "echo -n '' > set_ftrace_pid"
4330 * to clean the filter quietly.
4331 */
4332 tmp = strstrip(buf);
4333 if (strlen(tmp) == 0)
4334 return 1;
4335
4336 ret = strict_strtol(tmp, 10, &val);
4337 if (ret < 0)
4338 return ret;
4339
4340 ret = ftrace_pid_add(val);
4341
4342 return ret ? ret : cnt;
4343}
4344
4345static int
4346ftrace_pid_release(struct inode *inode, struct file *file)
4347{
4348 if (file->f_mode & FMODE_READ)
4349 seq_release(inode, file);
4350
4351 return 0;
4352}
4353
4354static const struct file_operations ftrace_pid_fops = {
4355 .open = ftrace_pid_open,
4356 .write = ftrace_pid_write,
4357 .read = seq_read,
4358 .llseek = ftrace_filter_lseek,
4359 .release = ftrace_pid_release,
4360};
4361
4362static __init int ftrace_init_debugfs(void)
4363{
4364 struct dentry *d_tracer;
4365
4366 d_tracer = tracing_init_dentry();
4367 if (!d_tracer)
4368 return 0;
4369
4370 ftrace_init_dyn_debugfs(d_tracer);
4371
4372 trace_create_file("set_ftrace_pid", 0644, d_tracer,
4373 NULL, &ftrace_pid_fops);
4374
4375 ftrace_profile_debugfs(d_tracer);
4376
4377 return 0;
4378}
4379fs_initcall(ftrace_init_debugfs);
4380
4381/**
4382 * ftrace_kill - kill ftrace
4383 *
4384 * This function should be used by panic code. It stops ftrace
4385 * but in a not so nice way. If you need to simply kill ftrace
4386 * from a non-atomic section, use ftrace_kill.
4387 */
4388void ftrace_kill(void)
4389{
4390 ftrace_disabled = 1;
4391 ftrace_enabled = 0;
4392 clear_ftrace_function();
4393}
4394
4395/**
4396 * Test if ftrace is dead or not.
4397 */
4398int ftrace_is_dead(void)
4399{
4400 return ftrace_disabled;
4401}
4402
4403/**
4404 * register_ftrace_function - register a function for profiling
4405 * @ops - ops structure that holds the function for profiling.
4406 *
4407 * Register a function to be called by all functions in the
4408 * kernel.
4409 *
4410 * Note: @ops->func and all the functions it calls must be labeled
4411 * with "notrace", otherwise it will go into a
4412 * recursive loop.
4413 */
4414int register_ftrace_function(struct ftrace_ops *ops)
4415{
4416 int ret = -1;
4417
4418 mutex_lock(&ftrace_lock);
4419
4420 ret = ftrace_startup(ops, 0);
4421
4422 mutex_unlock(&ftrace_lock);
4423 return ret;
4424}
4425EXPORT_SYMBOL_GPL(register_ftrace_function);
4426
4427/**
4428 * unregister_ftrace_function - unregister a function for profiling.
4429 * @ops - ops structure that holds the function to unregister
4430 *
4431 * Unregister a function that was added to be called by ftrace profiling.
4432 */
4433int unregister_ftrace_function(struct ftrace_ops *ops)
4434{
4435 int ret;
4436
4437 mutex_lock(&ftrace_lock);
4438 ret = ftrace_shutdown(ops, 0);
4439 mutex_unlock(&ftrace_lock);
4440
4441 return ret;
4442}
4443EXPORT_SYMBOL_GPL(unregister_ftrace_function);
4444
4445int
4446ftrace_enable_sysctl(struct ctl_table *table, int write,
4447 void __user *buffer, size_t *lenp,
4448 loff_t *ppos)
4449{
4450 int ret = -ENODEV;
4451
4452 mutex_lock(&ftrace_lock);
4453
4454 if (unlikely(ftrace_disabled))
4455 goto out;
4456
4457 ret = proc_dointvec(table, write, buffer, lenp, ppos);
4458
4459 if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled))
4460 goto out;
4461
4462 last_ftrace_enabled = !!ftrace_enabled;
4463
4464 if (ftrace_enabled) {
4465
4466 /* we are starting ftrace again */
4467 if (ftrace_ops_list != &ftrace_list_end)
4468 update_ftrace_function();
4469
4470 ftrace_startup_sysctl();
4471
4472 } else {
4473 /* stopping ftrace calls (just send to ftrace_stub) */
4474 ftrace_trace_function = ftrace_stub;
4475
4476 ftrace_shutdown_sysctl();
4477 }
4478
4479 out:
4480 mutex_unlock(&ftrace_lock);
4481 return ret;
4482}
4483
4484#ifdef CONFIG_FUNCTION_GRAPH_TRACER
4485
4486static struct notifier_block ftrace_suspend_notifier;
4487
4488int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
4489{
4490 return 0;
4491}
4492
4493/* The callbacks that hook a function */
4494trace_func_graph_ret_t ftrace_graph_return =
4495 (trace_func_graph_ret_t)ftrace_stub;
4496trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
4497static trace_func_graph_ent_t __ftrace_graph_entry = ftrace_graph_entry_stub;
4498
4499/* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
4500static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
4501{
4502 int i;
4503 int ret = 0;
4504 unsigned long flags;
4505 int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
4506 struct task_struct *g, *t;
4507
4508 for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
4509 ret_stack_list[i] = kmalloc(FTRACE_RETFUNC_DEPTH
4510 * sizeof(struct ftrace_ret_stack),
4511 GFP_KERNEL);
4512 if (!ret_stack_list[i]) {
4513 start = 0;
4514 end = i;
4515 ret = -ENOMEM;
4516 goto free;
4517 }
4518 }
4519
4520 read_lock_irqsave(&tasklist_lock, flags);
4521 do_each_thread(g, t) {
4522 if (start == end) {
4523 ret = -EAGAIN;
4524 goto unlock;
4525 }
4526
4527 if (t->ret_stack == NULL) {
4528 atomic_set(&t->tracing_graph_pause, 0);
4529 atomic_set(&t->trace_overrun, 0);
4530 t->curr_ret_stack = -1;
4531 /* Make sure the tasks see the -1 first: */
4532 smp_wmb();
4533 t->ret_stack = ret_stack_list[start++];
4534 }
4535 } while_each_thread(g, t);
4536
4537unlock:
4538 read_unlock_irqrestore(&tasklist_lock, flags);
4539free:
4540 for (i = start; i < end; i++)
4541 kfree(ret_stack_list[i]);
4542 return ret;
4543}
4544
4545static void
4546ftrace_graph_probe_sched_switch(void *ignore,
4547 struct task_struct *prev, struct task_struct *next)
4548{
4549 unsigned long long timestamp;
4550 int index;
4551
4552 /*
4553 * Does the user want to count the time a function was asleep.
4554 * If so, do not update the time stamps.
4555 */
4556 if (trace_flags & TRACE_ITER_SLEEP_TIME)
4557 return;
4558
4559 timestamp = trace_clock_local();
4560
4561 prev->ftrace_timestamp = timestamp;
4562
4563 /* only process tasks that we timestamped */
4564 if (!next->ftrace_timestamp)
4565 return;
4566
4567 /*
4568 * Update all the counters in next to make up for the
4569 * time next was sleeping.
4570 */
4571 timestamp -= next->ftrace_timestamp;
4572
4573 for (index = next->curr_ret_stack; index >= 0; index--)
4574 next->ret_stack[index].calltime += timestamp;
4575}
4576
4577/* Allocate a return stack for each task */
4578static int start_graph_tracing(void)
4579{
4580 struct ftrace_ret_stack **ret_stack_list;
4581 int ret, cpu;
4582
4583 ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE *
4584 sizeof(struct ftrace_ret_stack *),
4585 GFP_KERNEL);
4586
4587 if (!ret_stack_list)
4588 return -ENOMEM;
4589
4590 /* The cpu_boot init_task->ret_stack will never be freed */
4591 for_each_online_cpu(cpu) {
4592 if (!idle_task(cpu)->ret_stack)
4593 ftrace_graph_init_idle_task(idle_task(cpu), cpu);
4594 }
4595
4596 do {
4597 ret = alloc_retstack_tasklist(ret_stack_list);
4598 } while (ret == -EAGAIN);
4599
4600 if (!ret) {
4601 ret = register_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
4602 if (ret)
4603 pr_info("ftrace_graph: Couldn't activate tracepoint"
4604 " probe to kernel_sched_switch\n");
4605 }
4606
4607 kfree(ret_stack_list);
4608 return ret;
4609}
4610
4611/*
4612 * Hibernation protection.
4613 * The state of the current task is too much unstable during
4614 * suspend/restore to disk. We want to protect against that.
4615 */
4616static int
4617ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
4618 void *unused)
4619{
4620 switch (state) {
4621 case PM_HIBERNATION_PREPARE:
4622 pause_graph_tracing();
4623 break;
4624
4625 case PM_POST_HIBERNATION:
4626 unpause_graph_tracing();
4627 break;
4628 }
4629 return NOTIFY_DONE;
4630}
4631
4632/* Just a place holder for function graph */
4633static struct ftrace_ops fgraph_ops __read_mostly = {
4634 .func = ftrace_stub,
4635 .flags = FTRACE_OPS_FL_GLOBAL,
4636};
4637
4638static int ftrace_graph_entry_test(struct ftrace_graph_ent *trace)
4639{
4640 if (!ftrace_ops_test(&global_ops, trace->func))
4641 return 0;
4642 return __ftrace_graph_entry(trace);
4643}
4644
4645/*
4646 * The function graph tracer should only trace the functions defined
4647 * by set_ftrace_filter and set_ftrace_notrace. If another function
4648 * tracer ops is registered, the graph tracer requires testing the
4649 * function against the global ops, and not just trace any function
4650 * that any ftrace_ops registered.
4651 */
4652static void update_function_graph_func(void)
4653{
4654 if (ftrace_ops_list == &ftrace_list_end ||
4655 (ftrace_ops_list == &global_ops &&
4656 global_ops.next == &ftrace_list_end))
4657 ftrace_graph_entry = __ftrace_graph_entry;
4658 else
4659 ftrace_graph_entry = ftrace_graph_entry_test;
4660}
4661
4662int register_ftrace_graph(trace_func_graph_ret_t retfunc,
4663 trace_func_graph_ent_t entryfunc)
4664{
4665 int ret = 0;
4666
4667 mutex_lock(&ftrace_lock);
4668
4669 /* we currently allow only one tracer registered at a time */
4670 if (ftrace_graph_active) {
4671 ret = -EBUSY;
4672 goto out;
4673 }
4674
4675 ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call;
4676 register_pm_notifier(&ftrace_suspend_notifier);
4677
4678 ftrace_graph_active++;
4679 ret = start_graph_tracing();
4680 if (ret) {
4681 ftrace_graph_active--;
4682 goto out;
4683 }
4684
4685 ftrace_graph_return = retfunc;
4686
4687 /*
4688 * Update the indirect function to the entryfunc, and the
4689 * function that gets called to the entry_test first. Then
4690 * call the update fgraph entry function to determine if
4691 * the entryfunc should be called directly or not.
4692 */
4693 __ftrace_graph_entry = entryfunc;
4694 ftrace_graph_entry = ftrace_graph_entry_test;
4695 update_function_graph_func();
4696
4697 ret = ftrace_startup(&fgraph_ops, FTRACE_START_FUNC_RET);
4698
4699out:
4700 mutex_unlock(&ftrace_lock);
4701 return ret;
4702}
4703
4704void unregister_ftrace_graph(void)
4705{
4706 mutex_lock(&ftrace_lock);
4707
4708 if (unlikely(!ftrace_graph_active))
4709 goto out;
4710
4711 ftrace_graph_active--;
4712 ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
4713 ftrace_graph_entry = ftrace_graph_entry_stub;
4714 __ftrace_graph_entry = ftrace_graph_entry_stub;
4715 ftrace_shutdown(&fgraph_ops, FTRACE_STOP_FUNC_RET);
4716 unregister_pm_notifier(&ftrace_suspend_notifier);
4717 unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
4718
4719 out:
4720 mutex_unlock(&ftrace_lock);
4721}
4722
4723static DEFINE_PER_CPU(struct ftrace_ret_stack *, idle_ret_stack);
4724
4725static void
4726graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack)
4727{
4728 atomic_set(&t->tracing_graph_pause, 0);
4729 atomic_set(&t->trace_overrun, 0);
4730 t->ftrace_timestamp = 0;
4731 /* make curr_ret_stack visible before we add the ret_stack */
4732 smp_wmb();
4733 t->ret_stack = ret_stack;
4734}
4735
4736/*
4737 * Allocate a return stack for the idle task. May be the first
4738 * time through, or it may be done by CPU hotplug online.
4739 */
4740void ftrace_graph_init_idle_task(struct task_struct *t, int cpu)
4741{
4742 t->curr_ret_stack = -1;
4743 /*
4744 * The idle task has no parent, it either has its own
4745 * stack or no stack at all.
4746 */
4747 if (t->ret_stack)
4748 WARN_ON(t->ret_stack != per_cpu(idle_ret_stack, cpu));
4749
4750 if (ftrace_graph_active) {
4751 struct ftrace_ret_stack *ret_stack;
4752
4753 ret_stack = per_cpu(idle_ret_stack, cpu);
4754 if (!ret_stack) {
4755 ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
4756 * sizeof(struct ftrace_ret_stack),
4757 GFP_KERNEL);
4758 if (!ret_stack)
4759 return;
4760 per_cpu(idle_ret_stack, cpu) = ret_stack;
4761 }
4762 graph_init_task(t, ret_stack);
4763 }
4764}
4765
4766/* Allocate a return stack for newly created task */
4767void ftrace_graph_init_task(struct task_struct *t)
4768{
4769 /* Make sure we do not use the parent ret_stack */
4770 t->ret_stack = NULL;
4771 t->curr_ret_stack = -1;
4772
4773 if (ftrace_graph_active) {
4774 struct ftrace_ret_stack *ret_stack;
4775
4776 ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
4777 * sizeof(struct ftrace_ret_stack),
4778 GFP_KERNEL);
4779 if (!ret_stack)
4780 return;
4781 graph_init_task(t, ret_stack);
4782 }
4783}
4784
4785void ftrace_graph_exit_task(struct task_struct *t)
4786{
4787 struct ftrace_ret_stack *ret_stack = t->ret_stack;
4788
4789 t->ret_stack = NULL;
4790 /* NULL must become visible to IRQs before we free it: */
4791 barrier();
4792
4793 kfree(ret_stack);
4794}
4795
4796void ftrace_graph_stop(void)
4797{
4798 ftrace_stop();
4799}
4800#endif