blob: 9e1a4536812d5d8c892e79e0c25dce6d90639eaa [file] [log] [blame]
b.liue9582032025-04-17 19:18:16 +08001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Infrastructure for profiling code inserted by 'gcc -pg'.
4 *
5 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
6 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
7 *
8 * Originally ported from the -rt patch by:
9 * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
10 *
11 * Based on code in the latency_tracer, that is:
12 *
13 * Copyright (C) 2004-2006 Ingo Molnar
14 * Copyright (C) 2004 Nadia Yvette Chambers
15 */
16
17#include <linux/stop_machine.h>
18#include <linux/clocksource.h>
19#include <linux/sched/task.h>
20#include <linux/kallsyms.h>
21#include <linux/security.h>
22#include <linux/seq_file.h>
23#include <linux/tracefs.h>
24#include <linux/hardirq.h>
25#include <linux/kthread.h>
26#include <linux/uaccess.h>
27#include <linux/bsearch.h>
28#include <linux/module.h>
29#include <linux/ftrace.h>
30#include <linux/sysctl.h>
31#include <linux/slab.h>
32#include <linux/ctype.h>
33#include <linux/sort.h>
34#include <linux/list.h>
35#include <linux/hash.h>
36#include <linux/rcupdate.h>
37#include <linux/kprobes.h>
38
39#include <trace/events/sched.h>
40
41#include <asm/sections.h>
42#include <asm/setup.h>
43
44#include "ftrace_internal.h"
45#include "trace_output.h"
46#include "trace_stat.h"
47
48#define FTRACE_WARN_ON(cond) \
49 ({ \
50 int ___r = cond; \
51 if (WARN_ON(___r)) \
52 ftrace_kill(); \
53 ___r; \
54 })
55
56#define FTRACE_WARN_ON_ONCE(cond) \
57 ({ \
58 int ___r = cond; \
59 if (WARN_ON_ONCE(___r)) \
60 ftrace_kill(); \
61 ___r; \
62 })
63
64/* hash bits for specific function selection */
65#define FTRACE_HASH_BITS 7
66#define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS)
67#define FTRACE_HASH_DEFAULT_BITS 10
68#define FTRACE_HASH_MAX_BITS 12
69
70#ifdef CONFIG_DYNAMIC_FTRACE
71#define INIT_OPS_HASH(opsname) \
72 .func_hash = &opsname.local_hash, \
73 .local_hash.regex_lock = __MUTEX_INITIALIZER(opsname.local_hash.regex_lock),
74#else
75#define INIT_OPS_HASH(opsname)
76#endif
77
78enum {
79 FTRACE_MODIFY_ENABLE_FL = (1 << 0),
80 FTRACE_MODIFY_MAY_SLEEP_FL = (1 << 1),
81};
82
83struct ftrace_ops ftrace_list_end __read_mostly = {
84 .func = ftrace_stub,
85 .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_STUB,
86 INIT_OPS_HASH(ftrace_list_end)
87};
88
89/* ftrace_enabled is a method to turn ftrace on or off */
90int ftrace_enabled __read_mostly;
91static int last_ftrace_enabled;
92
93/* Current function tracing op */
94struct ftrace_ops *function_trace_op __read_mostly = &ftrace_list_end;
95/* What to set function_trace_op to */
96static struct ftrace_ops *set_function_trace_op;
97
98static bool ftrace_pids_enabled(struct ftrace_ops *ops)
99{
100 struct trace_array *tr;
101
102 if (!(ops->flags & FTRACE_OPS_FL_PID) || !ops->private)
103 return false;
104
105 tr = ops->private;
106
107 return tr->function_pids != NULL;
108}
109
110static void ftrace_update_trampoline(struct ftrace_ops *ops);
111
112/*
113 * ftrace_disabled is set when an anomaly is discovered.
114 * ftrace_disabled is much stronger than ftrace_enabled.
115 */
116static int ftrace_disabled __read_mostly;
117
118DEFINE_MUTEX(ftrace_lock);
119
120struct ftrace_ops __rcu *ftrace_ops_list __read_mostly = &ftrace_list_end;
121ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
122struct ftrace_ops global_ops;
123
124#if ARCH_SUPPORTS_FTRACE_OPS
125static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
126 struct ftrace_ops *op, struct pt_regs *regs);
127#else
128/* See comment below, where ftrace_ops_list_func is defined */
129static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip);
130#define ftrace_ops_list_func ((ftrace_func_t)ftrace_ops_no_ops)
131#endif
132
133static inline void ftrace_ops_init(struct ftrace_ops *ops)
134{
135#ifdef CONFIG_DYNAMIC_FTRACE
136 if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED)) {
137 mutex_init(&ops->local_hash.regex_lock);
138 ops->func_hash = &ops->local_hash;
139 ops->flags |= FTRACE_OPS_FL_INITIALIZED;
140 }
141#endif
142}
143
144static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip,
145 struct ftrace_ops *op, struct pt_regs *regs)
146{
147 struct trace_array *tr = op->private;
148
149 if (tr && this_cpu_read(tr->trace_buffer.data->ftrace_ignore_pid))
150 return;
151
152 op->saved_func(ip, parent_ip, op, regs);
153}
154
155static void ftrace_sync(struct work_struct *work)
156{
157 /*
158 * This function is just a stub to implement a hard force
159 * of synchronize_rcu(). This requires synchronizing
160 * tasks even in userspace and idle.
161 *
162 * Yes, function tracing is rude.
163 */
164}
165
166static void ftrace_sync_ipi(void *data)
167{
168 /* Probably not needed, but do it anyway */
169 smp_rmb();
170}
171
172static ftrace_func_t ftrace_ops_get_list_func(struct ftrace_ops *ops)
173{
174 /*
175 * If this is a dynamic, RCU, or per CPU ops, or we force list func,
176 * then it needs to call the list anyway.
177 */
178 if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_RCU) ||
179 FTRACE_FORCE_LIST_FUNC)
180 return ftrace_ops_list_func;
181
182 return ftrace_ops_get_func(ops);
183}
184
185static void update_ftrace_function(void)
186{
187 ftrace_func_t func;
188
189 /*
190 * Prepare the ftrace_ops that the arch callback will use.
191 * If there's only one ftrace_ops registered, the ftrace_ops_list
192 * will point to the ops we want.
193 */
194 set_function_trace_op = rcu_dereference_protected(ftrace_ops_list,
195 lockdep_is_held(&ftrace_lock));
196
197 /* If there's no ftrace_ops registered, just call the stub function */
198 if (set_function_trace_op == &ftrace_list_end) {
199 func = ftrace_stub;
200
201 /*
202 * If we are at the end of the list and this ops is
203 * recursion safe and not dynamic and the arch supports passing ops,
204 * then have the mcount trampoline call the function directly.
205 */
206 } else if (rcu_dereference_protected(ftrace_ops_list->next,
207 lockdep_is_held(&ftrace_lock)) == &ftrace_list_end) {
208 func = ftrace_ops_get_list_func(ftrace_ops_list);
209
210 } else {
211 /* Just use the default ftrace_ops */
212 set_function_trace_op = &ftrace_list_end;
213 func = ftrace_ops_list_func;
214 }
215
216 update_function_graph_func();
217
218 /* If there's no change, then do nothing more here */
219 if (ftrace_trace_function == func)
220 return;
221
222 /*
223 * If we are using the list function, it doesn't care
224 * about the function_trace_ops.
225 */
226 if (func == ftrace_ops_list_func) {
227 ftrace_trace_function = func;
228 /*
229 * Don't even bother setting function_trace_ops,
230 * it would be racy to do so anyway.
231 */
232 return;
233 }
234
235#ifndef CONFIG_DYNAMIC_FTRACE
236 /*
237 * For static tracing, we need to be a bit more careful.
238 * The function change takes affect immediately. Thus,
239 * we need to coorditate the setting of the function_trace_ops
240 * with the setting of the ftrace_trace_function.
241 *
242 * Set the function to the list ops, which will call the
243 * function we want, albeit indirectly, but it handles the
244 * ftrace_ops and doesn't depend on function_trace_op.
245 */
246 ftrace_trace_function = ftrace_ops_list_func;
247 /*
248 * Make sure all CPUs see this. Yes this is slow, but static
249 * tracing is slow and nasty to have enabled.
250 */
251 schedule_on_each_cpu(ftrace_sync);
252 /* Now all cpus are using the list ops. */
253 function_trace_op = set_function_trace_op;
254 /* Make sure the function_trace_op is visible on all CPUs */
255 smp_wmb();
256 /* Nasty way to force a rmb on all cpus */
257 smp_call_function(ftrace_sync_ipi, NULL, 1);
258 /* OK, we are all set to update the ftrace_trace_function now! */
259#endif /* !CONFIG_DYNAMIC_FTRACE */
260
261 ftrace_trace_function = func;
262}
263
264static void add_ftrace_ops(struct ftrace_ops __rcu **list,
265 struct ftrace_ops *ops)
266{
267 rcu_assign_pointer(ops->next, *list);
268
269 /*
270 * We are entering ops into the list but another
271 * CPU might be walking that list. We need to make sure
272 * the ops->next pointer is valid before another CPU sees
273 * the ops pointer included into the list.
274 */
275 rcu_assign_pointer(*list, ops);
276}
277
278static int remove_ftrace_ops(struct ftrace_ops __rcu **list,
279 struct ftrace_ops *ops)
280{
281 struct ftrace_ops **p;
282
283 /*
284 * If we are removing the last function, then simply point
285 * to the ftrace_stub.
286 */
287 if (rcu_dereference_protected(*list,
288 lockdep_is_held(&ftrace_lock)) == ops &&
289 rcu_dereference_protected(ops->next,
290 lockdep_is_held(&ftrace_lock)) == &ftrace_list_end) {
291 *list = &ftrace_list_end;
292 return 0;
293 }
294
295 for (p = list; *p != &ftrace_list_end; p = &(*p)->next)
296 if (*p == ops)
297 break;
298
299 if (*p != ops)
300 return -1;
301
302 *p = (*p)->next;
303 return 0;
304}
305
306static void ftrace_update_trampoline(struct ftrace_ops *ops);
307
308int __register_ftrace_function(struct ftrace_ops *ops)
309{
310 if (ops->flags & FTRACE_OPS_FL_DELETED)
311 return -EINVAL;
312
313 if (WARN_ON(ops->flags & FTRACE_OPS_FL_ENABLED))
314 return -EBUSY;
315
316#ifndef CONFIG_DYNAMIC_FTRACE_WITH_REGS
317 /*
318 * If the ftrace_ops specifies SAVE_REGS, then it only can be used
319 * if the arch supports it, or SAVE_REGS_IF_SUPPORTED is also set.
320 * Setting SAVE_REGS_IF_SUPPORTED makes SAVE_REGS irrelevant.
321 */
322 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS &&
323 !(ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED))
324 return -EINVAL;
325
326 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED)
327 ops->flags |= FTRACE_OPS_FL_SAVE_REGS;
328#endif
329
330 if (!core_kernel_data((unsigned long)ops))
331 ops->flags |= FTRACE_OPS_FL_DYNAMIC;
332
333 add_ftrace_ops(&ftrace_ops_list, ops);
334
335 /* Always save the function, and reset at unregistering */
336 ops->saved_func = ops->func;
337
338 if (ftrace_pids_enabled(ops))
339 ops->func = ftrace_pid_func;
340
341 ftrace_update_trampoline(ops);
342
343 if (ftrace_enabled)
344 update_ftrace_function();
345
346 return 0;
347}
348
349int __unregister_ftrace_function(struct ftrace_ops *ops)
350{
351 int ret;
352
353 if (WARN_ON(!(ops->flags & FTRACE_OPS_FL_ENABLED)))
354 return -EBUSY;
355
356 ret = remove_ftrace_ops(&ftrace_ops_list, ops);
357
358 if (ret < 0)
359 return ret;
360
361 if (ftrace_enabled)
362 update_ftrace_function();
363
364 ops->func = ops->saved_func;
365
366 return 0;
367}
368
369static void ftrace_update_pid_func(void)
370{
371 struct ftrace_ops *op;
372
373 /* Only do something if we are tracing something */
374 if (ftrace_trace_function == ftrace_stub)
375 return;
376
377 do_for_each_ftrace_op(op, ftrace_ops_list) {
378 if (op->flags & FTRACE_OPS_FL_PID) {
379 op->func = ftrace_pids_enabled(op) ?
380 ftrace_pid_func : op->saved_func;
381 ftrace_update_trampoline(op);
382 }
383 } while_for_each_ftrace_op(op);
384
385 update_ftrace_function();
386}
387
388#ifdef CONFIG_FUNCTION_PROFILER
389struct ftrace_profile {
390 struct hlist_node node;
391 unsigned long ip;
392 unsigned long counter;
393#ifdef CONFIG_FUNCTION_GRAPH_TRACER
394 unsigned long long time;
395 unsigned long long time_squared;
396#endif
397};
398
399struct ftrace_profile_page {
400 struct ftrace_profile_page *next;
401 unsigned long index;
402 struct ftrace_profile records[];
403};
404
405struct ftrace_profile_stat {
406 atomic_t disabled;
407 struct hlist_head *hash;
408 struct ftrace_profile_page *pages;
409 struct ftrace_profile_page *start;
410 struct tracer_stat stat;
411};
412
413#define PROFILE_RECORDS_SIZE \
414 (PAGE_SIZE - offsetof(struct ftrace_profile_page, records))
415
416#define PROFILES_PER_PAGE \
417 (PROFILE_RECORDS_SIZE / sizeof(struct ftrace_profile))
418
419static int ftrace_profile_enabled __read_mostly;
420
421/* ftrace_profile_lock - synchronize the enable and disable of the profiler */
422static DEFINE_MUTEX(ftrace_profile_lock);
423
424static DEFINE_PER_CPU(struct ftrace_profile_stat, ftrace_profile_stats);
425
426#define FTRACE_PROFILE_HASH_BITS 10
427#define FTRACE_PROFILE_HASH_SIZE (1 << FTRACE_PROFILE_HASH_BITS)
428
429static void *
430function_stat_next(void *v, int idx)
431{
432 struct ftrace_profile *rec = v;
433 struct ftrace_profile_page *pg;
434
435 pg = (struct ftrace_profile_page *)((unsigned long)rec & PAGE_MASK);
436
437 again:
438 if (idx != 0)
439 rec++;
440
441 if ((void *)rec >= (void *)&pg->records[pg->index]) {
442 pg = pg->next;
443 if (!pg)
444 return NULL;
445 rec = &pg->records[0];
446 if (!rec->counter)
447 goto again;
448 }
449
450 return rec;
451}
452
453static void *function_stat_start(struct tracer_stat *trace)
454{
455 struct ftrace_profile_stat *stat =
456 container_of(trace, struct ftrace_profile_stat, stat);
457
458 if (!stat || !stat->start)
459 return NULL;
460
461 return function_stat_next(&stat->start->records[0], 0);
462}
463
464#ifdef CONFIG_FUNCTION_GRAPH_TRACER
465/* function graph compares on total time */
466static int function_stat_cmp(void *p1, void *p2)
467{
468 struct ftrace_profile *a = p1;
469 struct ftrace_profile *b = p2;
470
471 if (a->time < b->time)
472 return -1;
473 if (a->time > b->time)
474 return 1;
475 else
476 return 0;
477}
478#else
479/* not function graph compares against hits */
480static int function_stat_cmp(void *p1, void *p2)
481{
482 struct ftrace_profile *a = p1;
483 struct ftrace_profile *b = p2;
484
485 if (a->counter < b->counter)
486 return -1;
487 if (a->counter > b->counter)
488 return 1;
489 else
490 return 0;
491}
492#endif
493
494static int function_stat_headers(struct seq_file *m)
495{
496#ifdef CONFIG_FUNCTION_GRAPH_TRACER
497 seq_puts(m, " Function "
498 "Hit Time Avg s^2\n"
499 " -------- "
500 "--- ---- --- ---\n");
501#else
502 seq_puts(m, " Function Hit\n"
503 " -------- ---\n");
504#endif
505 return 0;
506}
507
508static int function_stat_show(struct seq_file *m, void *v)
509{
510 struct ftrace_profile *rec = v;
511 char str[KSYM_SYMBOL_LEN];
512 int ret = 0;
513#ifdef CONFIG_FUNCTION_GRAPH_TRACER
514 static struct trace_seq s;
515 unsigned long long avg;
516 unsigned long long stddev;
517#endif
518 mutex_lock(&ftrace_profile_lock);
519
520 /* we raced with function_profile_reset() */
521 if (unlikely(rec->counter == 0)) {
522 ret = -EBUSY;
523 goto out;
524 }
525
526#ifdef CONFIG_FUNCTION_GRAPH_TRACER
527 avg = div64_ul(rec->time, rec->counter);
528 if (tracing_thresh && (avg < tracing_thresh))
529 goto out;
530#endif
531
532 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
533 seq_printf(m, " %-30.30s %10lu", str, rec->counter);
534
535#ifdef CONFIG_FUNCTION_GRAPH_TRACER
536 seq_puts(m, " ");
537
538 /* Sample standard deviation (s^2) */
539 if (rec->counter <= 1)
540 stddev = 0;
541 else {
542 /*
543 * Apply Welford's method:
544 * s^2 = 1 / (n * (n-1)) * (n * \Sum (x_i)^2 - (\Sum x_i)^2)
545 */
546 stddev = rec->counter * rec->time_squared -
547 rec->time * rec->time;
548
549 /*
550 * Divide only 1000 for ns^2 -> us^2 conversion.
551 * trace_print_graph_duration will divide 1000 again.
552 */
553 stddev = div64_ul(stddev,
554 rec->counter * (rec->counter - 1) * 1000);
555 }
556
557 trace_seq_init(&s);
558 trace_print_graph_duration(rec->time, &s);
559 trace_seq_puts(&s, " ");
560 trace_print_graph_duration(avg, &s);
561 trace_seq_puts(&s, " ");
562 trace_print_graph_duration(stddev, &s);
563 trace_print_seq(m, &s);
564#endif
565 seq_putc(m, '\n');
566out:
567 mutex_unlock(&ftrace_profile_lock);
568
569 return ret;
570}
571
572static void ftrace_profile_reset(struct ftrace_profile_stat *stat)
573{
574 struct ftrace_profile_page *pg;
575
576 pg = stat->pages = stat->start;
577
578 while (pg) {
579 memset(pg->records, 0, PROFILE_RECORDS_SIZE);
580 pg->index = 0;
581 pg = pg->next;
582 }
583
584 memset(stat->hash, 0,
585 FTRACE_PROFILE_HASH_SIZE * sizeof(struct hlist_head));
586}
587
588int ftrace_profile_pages_init(struct ftrace_profile_stat *stat)
589{
590 struct ftrace_profile_page *pg;
591 int functions;
592 int pages;
593 int i;
594
595 /* If we already allocated, do nothing */
596 if (stat->pages)
597 return 0;
598
599 stat->pages = (void *)get_zeroed_page(GFP_KERNEL);
600 if (!stat->pages)
601 return -ENOMEM;
602
603#ifdef CONFIG_DYNAMIC_FTRACE
604 functions = ftrace_update_tot_cnt;
605#else
606 /*
607 * We do not know the number of functions that exist because
608 * dynamic tracing is what counts them. With past experience
609 * we have around 20K functions. That should be more than enough.
610 * It is highly unlikely we will execute every function in
611 * the kernel.
612 */
613 functions = 20000;
614#endif
615
616 pg = stat->start = stat->pages;
617
618 pages = DIV_ROUND_UP(functions, PROFILES_PER_PAGE);
619
620 for (i = 1; i < pages; i++) {
621 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
622 if (!pg->next)
623 goto out_free;
624 pg = pg->next;
625 }
626
627 return 0;
628
629 out_free:
630 pg = stat->start;
631 while (pg) {
632 unsigned long tmp = (unsigned long)pg;
633
634 pg = pg->next;
635 free_page(tmp);
636 }
637
638 stat->pages = NULL;
639 stat->start = NULL;
640
641 return -ENOMEM;
642}
643
644static int ftrace_profile_init_cpu(int cpu)
645{
646 struct ftrace_profile_stat *stat;
647 int size;
648
649 stat = &per_cpu(ftrace_profile_stats, cpu);
650
651 if (stat->hash) {
652 /* If the profile is already created, simply reset it */
653 ftrace_profile_reset(stat);
654 return 0;
655 }
656
657 /*
658 * We are profiling all functions, but usually only a few thousand
659 * functions are hit. We'll make a hash of 1024 items.
660 */
661 size = FTRACE_PROFILE_HASH_SIZE;
662
663 stat->hash = kcalloc(size, sizeof(struct hlist_head), GFP_KERNEL);
664
665 if (!stat->hash)
666 return -ENOMEM;
667
668 /* Preallocate the function profiling pages */
669 if (ftrace_profile_pages_init(stat) < 0) {
670 kfree(stat->hash);
671 stat->hash = NULL;
672 return -ENOMEM;
673 }
674
675 return 0;
676}
677
678static int ftrace_profile_init(void)
679{
680 int cpu;
681 int ret = 0;
682
683 for_each_possible_cpu(cpu) {
684 ret = ftrace_profile_init_cpu(cpu);
685 if (ret)
686 break;
687 }
688
689 return ret;
690}
691
692/* interrupts must be disabled */
693static struct ftrace_profile *
694ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip)
695{
696 struct ftrace_profile *rec;
697 struct hlist_head *hhd;
698 unsigned long key;
699
700 key = hash_long(ip, FTRACE_PROFILE_HASH_BITS);
701 hhd = &stat->hash[key];
702
703 if (hlist_empty(hhd))
704 return NULL;
705
706 hlist_for_each_entry_rcu_notrace(rec, hhd, node) {
707 if (rec->ip == ip)
708 return rec;
709 }
710
711 return NULL;
712}
713
714static void ftrace_add_profile(struct ftrace_profile_stat *stat,
715 struct ftrace_profile *rec)
716{
717 unsigned long key;
718
719 key = hash_long(rec->ip, FTRACE_PROFILE_HASH_BITS);
720 hlist_add_head_rcu(&rec->node, &stat->hash[key]);
721}
722
723/*
724 * The memory is already allocated, this simply finds a new record to use.
725 */
726static struct ftrace_profile *
727ftrace_profile_alloc(struct ftrace_profile_stat *stat, unsigned long ip)
728{
729 struct ftrace_profile *rec = NULL;
730
731 /* prevent recursion (from NMIs) */
732 if (atomic_inc_return(&stat->disabled) != 1)
733 goto out;
734
735 /*
736 * Try to find the function again since an NMI
737 * could have added it
738 */
739 rec = ftrace_find_profiled_func(stat, ip);
740 if (rec)
741 goto out;
742
743 if (stat->pages->index == PROFILES_PER_PAGE) {
744 if (!stat->pages->next)
745 goto out;
746 stat->pages = stat->pages->next;
747 }
748
749 rec = &stat->pages->records[stat->pages->index++];
750 rec->ip = ip;
751 ftrace_add_profile(stat, rec);
752
753 out:
754 atomic_dec(&stat->disabled);
755
756 return rec;
757}
758
759static void
760function_profile_call(unsigned long ip, unsigned long parent_ip,
761 struct ftrace_ops *ops, struct pt_regs *regs)
762{
763 struct ftrace_profile_stat *stat;
764 struct ftrace_profile *rec;
765 unsigned long flags;
766
767 if (!ftrace_profile_enabled)
768 return;
769
770 local_irq_save(flags);
771
772 stat = this_cpu_ptr(&ftrace_profile_stats);
773 if (!stat->hash || !ftrace_profile_enabled)
774 goto out;
775
776 rec = ftrace_find_profiled_func(stat, ip);
777 if (!rec) {
778 rec = ftrace_profile_alloc(stat, ip);
779 if (!rec)
780 goto out;
781 }
782
783 rec->counter++;
784 out:
785 local_irq_restore(flags);
786}
787
788#ifdef CONFIG_FUNCTION_GRAPH_TRACER
789static bool fgraph_graph_time = true;
790
791void ftrace_graph_graph_time_control(bool enable)
792{
793 fgraph_graph_time = enable;
794}
795
796static int profile_graph_entry(struct ftrace_graph_ent *trace)
797{
798 struct ftrace_ret_stack *ret_stack;
799
800 function_profile_call(trace->func, 0, NULL, NULL);
801
802 /* If function graph is shutting down, ret_stack can be NULL */
803 if (!current->ret_stack)
804 return 0;
805
806 ret_stack = ftrace_graph_get_ret_stack(current, 0);
807 if (ret_stack)
808 ret_stack->subtime = 0;
809
810 return 1;
811}
812
813static void profile_graph_return(struct ftrace_graph_ret *trace)
814{
815 struct ftrace_ret_stack *ret_stack;
816 struct ftrace_profile_stat *stat;
817 unsigned long long calltime;
818 struct ftrace_profile *rec;
819 unsigned long flags;
820
821 local_irq_save(flags);
822 stat = this_cpu_ptr(&ftrace_profile_stats);
823 if (!stat->hash || !ftrace_profile_enabled)
824 goto out;
825
826 /* If the calltime was zero'd ignore it */
827 if (!trace->calltime)
828 goto out;
829
830 calltime = trace->rettime - trace->calltime;
831
832 if (!fgraph_graph_time) {
833
834 /* Append this call time to the parent time to subtract */
835 ret_stack = ftrace_graph_get_ret_stack(current, 1);
836 if (ret_stack)
837 ret_stack->subtime += calltime;
838
839 ret_stack = ftrace_graph_get_ret_stack(current, 0);
840 if (ret_stack && ret_stack->subtime < calltime)
841 calltime -= ret_stack->subtime;
842 else
843 calltime = 0;
844 }
845
846 rec = ftrace_find_profiled_func(stat, trace->func);
847 if (rec) {
848 rec->time += calltime;
849 rec->time_squared += calltime * calltime;
850 }
851
852 out:
853 local_irq_restore(flags);
854}
855
856static struct fgraph_ops fprofiler_ops = {
857 .entryfunc = &profile_graph_entry,
858 .retfunc = &profile_graph_return,
859};
860
861static int register_ftrace_profiler(void)
862{
863 return register_ftrace_graph(&fprofiler_ops);
864}
865
866static void unregister_ftrace_profiler(void)
867{
868 unregister_ftrace_graph(&fprofiler_ops);
869}
870#else
871static struct ftrace_ops ftrace_profile_ops __read_mostly = {
872 .func = function_profile_call,
873 .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
874 INIT_OPS_HASH(ftrace_profile_ops)
875};
876
877static int register_ftrace_profiler(void)
878{
879 return register_ftrace_function(&ftrace_profile_ops);
880}
881
882static void unregister_ftrace_profiler(void)
883{
884 unregister_ftrace_function(&ftrace_profile_ops);
885}
886#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
887
888static ssize_t
889ftrace_profile_write(struct file *filp, const char __user *ubuf,
890 size_t cnt, loff_t *ppos)
891{
892 unsigned long val;
893 int ret;
894
895 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
896 if (ret)
897 return ret;
898
899 val = !!val;
900
901 mutex_lock(&ftrace_profile_lock);
902 if (ftrace_profile_enabled ^ val) {
903 if (val) {
904 ret = ftrace_profile_init();
905 if (ret < 0) {
906 cnt = ret;
907 goto out;
908 }
909
910 ret = register_ftrace_profiler();
911 if (ret < 0) {
912 cnt = ret;
913 goto out;
914 }
915 ftrace_profile_enabled = 1;
916 } else {
917 ftrace_profile_enabled = 0;
918 /*
919 * unregister_ftrace_profiler calls stop_machine
920 * so this acts like an synchronize_rcu.
921 */
922 unregister_ftrace_profiler();
923 }
924 }
925 out:
926 mutex_unlock(&ftrace_profile_lock);
927
928 *ppos += cnt;
929
930 return cnt;
931}
932
933static ssize_t
934ftrace_profile_read(struct file *filp, char __user *ubuf,
935 size_t cnt, loff_t *ppos)
936{
937 char buf[64]; /* big enough to hold a number */
938 int r;
939
940 r = sprintf(buf, "%u\n", ftrace_profile_enabled);
941 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
942}
943
944static const struct file_operations ftrace_profile_fops = {
945 .open = tracing_open_generic,
946 .read = ftrace_profile_read,
947 .write = ftrace_profile_write,
948 .llseek = default_llseek,
949};
950
951/* used to initialize the real stat files */
952static struct tracer_stat function_stats __initdata = {
953 .name = "functions",
954 .stat_start = function_stat_start,
955 .stat_next = function_stat_next,
956 .stat_cmp = function_stat_cmp,
957 .stat_headers = function_stat_headers,
958 .stat_show = function_stat_show
959};
960
961static __init void ftrace_profile_tracefs(struct dentry *d_tracer)
962{
963 struct ftrace_profile_stat *stat;
964 struct dentry *entry;
965 char *name;
966 int ret;
967 int cpu;
968
969 for_each_possible_cpu(cpu) {
970 stat = &per_cpu(ftrace_profile_stats, cpu);
971
972 name = kasprintf(GFP_KERNEL, "function%d", cpu);
973 if (!name) {
974 /*
975 * The files created are permanent, if something happens
976 * we still do not free memory.
977 */
978 WARN(1,
979 "Could not allocate stat file for cpu %d\n",
980 cpu);
981 return;
982 }
983 stat->stat = function_stats;
984 stat->stat.name = name;
985 ret = register_stat_tracer(&stat->stat);
986 if (ret) {
987 WARN(1,
988 "Could not register function stat for cpu %d\n",
989 cpu);
990 kfree(name);
991 return;
992 }
993 }
994
995 entry = tracefs_create_file("function_profile_enabled", 0644,
996 d_tracer, NULL, &ftrace_profile_fops);
997 if (!entry)
998 pr_warn("Could not create tracefs 'function_profile_enabled' entry\n");
999}
1000
1001#else /* CONFIG_FUNCTION_PROFILER */
1002static __init void ftrace_profile_tracefs(struct dentry *d_tracer)
1003{
1004}
1005#endif /* CONFIG_FUNCTION_PROFILER */
1006
1007#ifdef CONFIG_DYNAMIC_FTRACE
1008
1009static struct ftrace_ops *removed_ops;
1010
1011/*
1012 * Set when doing a global update, like enabling all recs or disabling them.
1013 * It is not set when just updating a single ftrace_ops.
1014 */
1015static bool update_all_ops;
1016
1017#ifndef CONFIG_FTRACE_MCOUNT_RECORD
1018# error Dynamic ftrace depends on MCOUNT_RECORD
1019#endif
1020
1021struct ftrace_func_entry {
1022 struct hlist_node hlist;
1023 unsigned long ip;
1024};
1025
1026struct ftrace_func_probe {
1027 struct ftrace_probe_ops *probe_ops;
1028 struct ftrace_ops ops;
1029 struct trace_array *tr;
1030 struct list_head list;
1031 void *data;
1032 int ref;
1033};
1034
1035/*
1036 * We make these constant because no one should touch them,
1037 * but they are used as the default "empty hash", to avoid allocating
1038 * it all the time. These are in a read only section such that if
1039 * anyone does try to modify it, it will cause an exception.
1040 */
1041static const struct hlist_head empty_buckets[1];
1042static const struct ftrace_hash empty_hash = {
1043 .buckets = (struct hlist_head *)empty_buckets,
1044};
1045#define EMPTY_HASH ((struct ftrace_hash *)&empty_hash)
1046
1047struct ftrace_ops global_ops = {
1048 .func = ftrace_stub,
1049 .local_hash.notrace_hash = EMPTY_HASH,
1050 .local_hash.filter_hash = EMPTY_HASH,
1051 INIT_OPS_HASH(global_ops)
1052 .flags = FTRACE_OPS_FL_RECURSION_SAFE |
1053 FTRACE_OPS_FL_INITIALIZED |
1054 FTRACE_OPS_FL_PID,
1055};
1056
1057/*
1058 * Used by the stack undwinder to know about dynamic ftrace trampolines.
1059 */
1060struct ftrace_ops *ftrace_ops_trampoline(unsigned long addr)
1061{
1062 struct ftrace_ops *op = NULL;
1063
1064 /*
1065 * Some of the ops may be dynamically allocated,
1066 * they are freed after a synchronize_rcu().
1067 */
1068 preempt_disable_notrace();
1069
1070 do_for_each_ftrace_op(op, ftrace_ops_list) {
1071 /*
1072 * This is to check for dynamically allocated trampolines.
1073 * Trampolines that are in kernel text will have
1074 * core_kernel_text() return true.
1075 */
1076 if (op->trampoline && op->trampoline_size)
1077 if (addr >= op->trampoline &&
1078 addr < op->trampoline + op->trampoline_size) {
1079 preempt_enable_notrace();
1080 return op;
1081 }
1082 } while_for_each_ftrace_op(op);
1083 preempt_enable_notrace();
1084
1085 return NULL;
1086}
1087
1088/*
1089 * This is used by __kernel_text_address() to return true if the
1090 * address is on a dynamically allocated trampoline that would
1091 * not return true for either core_kernel_text() or
1092 * is_module_text_address().
1093 */
1094bool is_ftrace_trampoline(unsigned long addr)
1095{
1096 return ftrace_ops_trampoline(addr) != NULL;
1097}
1098
1099struct ftrace_page {
1100 struct ftrace_page *next;
1101 struct dyn_ftrace *records;
1102 int index;
1103 int order;
1104};
1105
1106#define ENTRY_SIZE sizeof(struct dyn_ftrace)
1107#define ENTRIES_PER_PAGE (PAGE_SIZE / ENTRY_SIZE)
1108
1109/* estimate from running different kernels */
1110#define NR_TO_INIT 10000
1111
1112static struct ftrace_page *ftrace_pages_start;
1113static struct ftrace_page *ftrace_pages;
1114
1115static __always_inline unsigned long
1116ftrace_hash_key(struct ftrace_hash *hash, unsigned long ip)
1117{
1118 if (hash->size_bits > 0)
1119 return hash_long(ip, hash->size_bits);
1120
1121 return 0;
1122}
1123
1124/* Only use this function if ftrace_hash_empty() has already been tested */
1125static __always_inline struct ftrace_func_entry *
1126__ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
1127{
1128 unsigned long key;
1129 struct ftrace_func_entry *entry;
1130 struct hlist_head *hhd;
1131
1132 key = ftrace_hash_key(hash, ip);
1133 hhd = &hash->buckets[key];
1134
1135 hlist_for_each_entry_rcu_notrace(entry, hhd, hlist) {
1136 if (entry->ip == ip)
1137 return entry;
1138 }
1139 return NULL;
1140}
1141
1142/**
1143 * ftrace_lookup_ip - Test to see if an ip exists in an ftrace_hash
1144 * @hash: The hash to look at
1145 * @ip: The instruction pointer to test
1146 *
1147 * Search a given @hash to see if a given instruction pointer (@ip)
1148 * exists in it.
1149 *
1150 * Returns the entry that holds the @ip if found. NULL otherwise.
1151 */
1152struct ftrace_func_entry *
1153ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
1154{
1155 if (ftrace_hash_empty(hash))
1156 return NULL;
1157
1158 return __ftrace_lookup_ip(hash, ip);
1159}
1160
1161static void __add_hash_entry(struct ftrace_hash *hash,
1162 struct ftrace_func_entry *entry)
1163{
1164 struct hlist_head *hhd;
1165 unsigned long key;
1166
1167 key = ftrace_hash_key(hash, entry->ip);
1168 hhd = &hash->buckets[key];
1169 hlist_add_head(&entry->hlist, hhd);
1170 hash->count++;
1171}
1172
1173static int add_hash_entry(struct ftrace_hash *hash, unsigned long ip)
1174{
1175 struct ftrace_func_entry *entry;
1176
1177 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1178 if (!entry)
1179 return -ENOMEM;
1180
1181 entry->ip = ip;
1182 __add_hash_entry(hash, entry);
1183
1184 return 0;
1185}
1186
1187static void
1188free_hash_entry(struct ftrace_hash *hash,
1189 struct ftrace_func_entry *entry)
1190{
1191 hlist_del(&entry->hlist);
1192 kfree(entry);
1193 hash->count--;
1194}
1195
1196static void
1197remove_hash_entry(struct ftrace_hash *hash,
1198 struct ftrace_func_entry *entry)
1199{
1200 hlist_del_rcu(&entry->hlist);
1201 hash->count--;
1202}
1203
1204static void ftrace_hash_clear(struct ftrace_hash *hash)
1205{
1206 struct hlist_head *hhd;
1207 struct hlist_node *tn;
1208 struct ftrace_func_entry *entry;
1209 int size = 1 << hash->size_bits;
1210 int i;
1211
1212 if (!hash->count)
1213 return;
1214
1215 for (i = 0; i < size; i++) {
1216 hhd = &hash->buckets[i];
1217 hlist_for_each_entry_safe(entry, tn, hhd, hlist)
1218 free_hash_entry(hash, entry);
1219 }
1220 FTRACE_WARN_ON(hash->count);
1221}
1222
1223static void free_ftrace_mod(struct ftrace_mod_load *ftrace_mod)
1224{
1225 list_del(&ftrace_mod->list);
1226 kfree(ftrace_mod->module);
1227 kfree(ftrace_mod->func);
1228 kfree(ftrace_mod);
1229}
1230
1231static void clear_ftrace_mod_list(struct list_head *head)
1232{
1233 struct ftrace_mod_load *p, *n;
1234
1235 /* stack tracer isn't supported yet */
1236 if (!head)
1237 return;
1238
1239 mutex_lock(&ftrace_lock);
1240 list_for_each_entry_safe(p, n, head, list)
1241 free_ftrace_mod(p);
1242 mutex_unlock(&ftrace_lock);
1243}
1244
1245static void free_ftrace_hash(struct ftrace_hash *hash)
1246{
1247 if (!hash || hash == EMPTY_HASH)
1248 return;
1249 ftrace_hash_clear(hash);
1250 kfree(hash->buckets);
1251 kfree(hash);
1252}
1253
1254static void __free_ftrace_hash_rcu(struct rcu_head *rcu)
1255{
1256 struct ftrace_hash *hash;
1257
1258 hash = container_of(rcu, struct ftrace_hash, rcu);
1259 free_ftrace_hash(hash);
1260}
1261
1262static void free_ftrace_hash_rcu(struct ftrace_hash *hash)
1263{
1264 if (!hash || hash == EMPTY_HASH)
1265 return;
1266 call_rcu(&hash->rcu, __free_ftrace_hash_rcu);
1267}
1268
1269void ftrace_free_filter(struct ftrace_ops *ops)
1270{
1271 ftrace_ops_init(ops);
1272 free_ftrace_hash(ops->func_hash->filter_hash);
1273 free_ftrace_hash(ops->func_hash->notrace_hash);
1274}
1275
1276static struct ftrace_hash *alloc_ftrace_hash(int size_bits)
1277{
1278 struct ftrace_hash *hash;
1279 int size;
1280
1281 hash = kzalloc(sizeof(*hash), GFP_KERNEL);
1282 if (!hash)
1283 return NULL;
1284
1285 size = 1 << size_bits;
1286 hash->buckets = kcalloc(size, sizeof(*hash->buckets), GFP_KERNEL);
1287
1288 if (!hash->buckets) {
1289 kfree(hash);
1290 return NULL;
1291 }
1292
1293 hash->size_bits = size_bits;
1294
1295 return hash;
1296}
1297
1298
1299static int ftrace_add_mod(struct trace_array *tr,
1300 const char *func, const char *module,
1301 int enable)
1302{
1303 struct ftrace_mod_load *ftrace_mod;
1304 struct list_head *mod_head = enable ? &tr->mod_trace : &tr->mod_notrace;
1305
1306 ftrace_mod = kzalloc(sizeof(*ftrace_mod), GFP_KERNEL);
1307 if (!ftrace_mod)
1308 return -ENOMEM;
1309
1310 INIT_LIST_HEAD(&ftrace_mod->list);
1311 ftrace_mod->func = kstrdup(func, GFP_KERNEL);
1312 ftrace_mod->module = kstrdup(module, GFP_KERNEL);
1313 ftrace_mod->enable = enable;
1314
1315 if (!ftrace_mod->func || !ftrace_mod->module)
1316 goto out_free;
1317
1318 list_add(&ftrace_mod->list, mod_head);
1319
1320 return 0;
1321
1322 out_free:
1323 free_ftrace_mod(ftrace_mod);
1324
1325 return -ENOMEM;
1326}
1327
1328static struct ftrace_hash *
1329alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash)
1330{
1331 struct ftrace_func_entry *entry;
1332 struct ftrace_hash *new_hash;
1333 int size;
1334 int ret;
1335 int i;
1336
1337 new_hash = alloc_ftrace_hash(size_bits);
1338 if (!new_hash)
1339 return NULL;
1340
1341 if (hash)
1342 new_hash->flags = hash->flags;
1343
1344 /* Empty hash? */
1345 if (ftrace_hash_empty(hash))
1346 return new_hash;
1347
1348 size = 1 << hash->size_bits;
1349 for (i = 0; i < size; i++) {
1350 hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
1351 ret = add_hash_entry(new_hash, entry->ip);
1352 if (ret < 0)
1353 goto free_hash;
1354 }
1355 }
1356
1357 FTRACE_WARN_ON(new_hash->count != hash->count);
1358
1359 return new_hash;
1360
1361 free_hash:
1362 free_ftrace_hash(new_hash);
1363 return NULL;
1364}
1365
1366static void
1367ftrace_hash_rec_disable_modify(struct ftrace_ops *ops, int filter_hash);
1368static void
1369ftrace_hash_rec_enable_modify(struct ftrace_ops *ops, int filter_hash);
1370
1371static int ftrace_hash_ipmodify_update(struct ftrace_ops *ops,
1372 struct ftrace_hash *new_hash);
1373
1374static struct ftrace_hash *
1375__ftrace_hash_move(struct ftrace_hash *src)
1376{
1377 struct ftrace_func_entry *entry;
1378 struct hlist_node *tn;
1379 struct hlist_head *hhd;
1380 struct ftrace_hash *new_hash;
1381 int size = src->count;
1382 int bits = 0;
1383 int i;
1384
1385 /*
1386 * If the new source is empty, just return the empty_hash.
1387 */
1388 if (ftrace_hash_empty(src))
1389 return EMPTY_HASH;
1390
1391 /*
1392 * Make the hash size about 1/2 the # found
1393 */
1394 for (size /= 2; size; size >>= 1)
1395 bits++;
1396
1397 /* Don't allocate too much */
1398 if (bits > FTRACE_HASH_MAX_BITS)
1399 bits = FTRACE_HASH_MAX_BITS;
1400
1401 new_hash = alloc_ftrace_hash(bits);
1402 if (!new_hash)
1403 return NULL;
1404
1405 new_hash->flags = src->flags;
1406
1407 size = 1 << src->size_bits;
1408 for (i = 0; i < size; i++) {
1409 hhd = &src->buckets[i];
1410 hlist_for_each_entry_safe(entry, tn, hhd, hlist) {
1411 remove_hash_entry(src, entry);
1412 __add_hash_entry(new_hash, entry);
1413 }
1414 }
1415
1416 return new_hash;
1417}
1418
1419static int
1420ftrace_hash_move(struct ftrace_ops *ops, int enable,
1421 struct ftrace_hash **dst, struct ftrace_hash *src)
1422{
1423 struct ftrace_hash *new_hash;
1424 int ret;
1425
1426 /* Reject setting notrace hash on IPMODIFY ftrace_ops */
1427 if (ops->flags & FTRACE_OPS_FL_IPMODIFY && !enable)
1428 return -EINVAL;
1429
1430 new_hash = __ftrace_hash_move(src);
1431 if (!new_hash)
1432 return -ENOMEM;
1433
1434 /* Make sure this can be applied if it is IPMODIFY ftrace_ops */
1435 if (enable) {
1436 /* IPMODIFY should be updated only when filter_hash updating */
1437 ret = ftrace_hash_ipmodify_update(ops, new_hash);
1438 if (ret < 0) {
1439 free_ftrace_hash(new_hash);
1440 return ret;
1441 }
1442 }
1443
1444 /*
1445 * Remove the current set, update the hash and add
1446 * them back.
1447 */
1448 ftrace_hash_rec_disable_modify(ops, enable);
1449
1450 rcu_assign_pointer(*dst, new_hash);
1451
1452 ftrace_hash_rec_enable_modify(ops, enable);
1453
1454 return 0;
1455}
1456
1457static bool hash_contains_ip(unsigned long ip,
1458 struct ftrace_ops_hash *hash)
1459{
1460 /*
1461 * The function record is a match if it exists in the filter
1462 * hash and not in the notrace hash. Note, an emty hash is
1463 * considered a match for the filter hash, but an empty
1464 * notrace hash is considered not in the notrace hash.
1465 */
1466 return (ftrace_hash_empty(hash->filter_hash) ||
1467 __ftrace_lookup_ip(hash->filter_hash, ip)) &&
1468 (ftrace_hash_empty(hash->notrace_hash) ||
1469 !__ftrace_lookup_ip(hash->notrace_hash, ip));
1470}
1471
1472/*
1473 * Test the hashes for this ops to see if we want to call
1474 * the ops->func or not.
1475 *
1476 * It's a match if the ip is in the ops->filter_hash or
1477 * the filter_hash does not exist or is empty,
1478 * AND
1479 * the ip is not in the ops->notrace_hash.
1480 *
1481 * This needs to be called with preemption disabled as
1482 * the hashes are freed with call_rcu().
1483 */
1484int
1485ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)
1486{
1487 struct ftrace_ops_hash hash;
1488 int ret;
1489
1490#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
1491 /*
1492 * There's a small race when adding ops that the ftrace handler
1493 * that wants regs, may be called without them. We can not
1494 * allow that handler to be called if regs is NULL.
1495 */
1496 if (regs == NULL && (ops->flags & FTRACE_OPS_FL_SAVE_REGS))
1497 return 0;
1498#endif
1499
1500 rcu_assign_pointer(hash.filter_hash, ops->func_hash->filter_hash);
1501 rcu_assign_pointer(hash.notrace_hash, ops->func_hash->notrace_hash);
1502
1503 if (hash_contains_ip(ip, &hash))
1504 ret = 1;
1505 else
1506 ret = 0;
1507
1508 return ret;
1509}
1510
1511/*
1512 * This is a double for. Do not use 'break' to break out of the loop,
1513 * you must use a goto.
1514 */
1515#define do_for_each_ftrace_rec(pg, rec) \
1516 for (pg = ftrace_pages_start; pg; pg = pg->next) { \
1517 int _____i; \
1518 for (_____i = 0; _____i < pg->index; _____i++) { \
1519 rec = &pg->records[_____i];
1520
1521#define while_for_each_ftrace_rec() \
1522 } \
1523 }
1524
1525
1526static int ftrace_cmp_recs(const void *a, const void *b)
1527{
1528 const struct dyn_ftrace *key = a;
1529 const struct dyn_ftrace *rec = b;
1530
1531 if (key->flags < rec->ip)
1532 return -1;
1533 if (key->ip >= rec->ip + MCOUNT_INSN_SIZE)
1534 return 1;
1535 return 0;
1536}
1537
1538/**
1539 * ftrace_location_range - return the first address of a traced location
1540 * if it touches the given ip range
1541 * @start: start of range to search.
1542 * @end: end of range to search (inclusive). @end points to the last byte
1543 * to check.
1544 *
1545 * Returns rec->ip if the related ftrace location is a least partly within
1546 * the given address range. That is, the first address of the instruction
1547 * that is either a NOP or call to the function tracer. It checks the ftrace
1548 * internal tables to determine if the address belongs or not.
1549 */
1550unsigned long ftrace_location_range(unsigned long start, unsigned long end)
1551{
1552 struct ftrace_page *pg;
1553 struct dyn_ftrace *rec;
1554 struct dyn_ftrace key;
1555 unsigned long ip = 0;
1556
1557 preempt_disable_notrace();
1558 key.ip = start;
1559 key.flags = end; /* overload flags, as it is unsigned long */
1560
1561 for (pg = ftrace_pages_start; pg; pg = pg->next) {
1562 if (pg->index == 0 ||
1563 end < pg->records[0].ip ||
1564 start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE))
1565 continue;
1566 rec = bsearch(&key, pg->records, pg->index,
1567 sizeof(struct dyn_ftrace),
1568 ftrace_cmp_recs);
1569 if (rec)
1570 {
1571 ip = rec->ip;
1572 break;
1573 }
1574 }
1575 preempt_enable_notrace();
1576 return ip;
1577}
1578
1579/**
1580 * ftrace_location - return true if the ip giving is a traced location
1581 * @ip: the instruction pointer to check
1582 *
1583 * Returns rec->ip if @ip given is a pointer to a ftrace location.
1584 * That is, the instruction that is either a NOP or call to
1585 * the function tracer. It checks the ftrace internal tables to
1586 * determine if the address belongs or not.
1587 */
1588unsigned long ftrace_location(unsigned long ip)
1589{
1590 return ftrace_location_range(ip, ip);
1591}
1592
1593/**
1594 * ftrace_text_reserved - return true if range contains an ftrace location
1595 * @start: start of range to search
1596 * @end: end of range to search (inclusive). @end points to the last byte to check.
1597 *
1598 * Returns 1 if @start and @end contains a ftrace location.
1599 * That is, the instruction that is either a NOP or call to
1600 * the function tracer. It checks the ftrace internal tables to
1601 * determine if the address belongs or not.
1602 */
1603int ftrace_text_reserved(const void *start, const void *end)
1604{
1605 unsigned long ret;
1606
1607 ret = ftrace_location_range((unsigned long)start,
1608 (unsigned long)end);
1609
1610 return (int)!!ret;
1611}
1612
1613/* Test if ops registered to this rec needs regs */
1614static bool test_rec_ops_needs_regs(struct dyn_ftrace *rec)
1615{
1616 struct ftrace_ops *ops;
1617 bool keep_regs = false;
1618
1619 for (ops = ftrace_ops_list;
1620 ops != &ftrace_list_end; ops = ops->next) {
1621 /* pass rec in as regs to have non-NULL val */
1622 if (ftrace_ops_test(ops, rec->ip, rec)) {
1623 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
1624 keep_regs = true;
1625 break;
1626 }
1627 }
1628 }
1629
1630 return keep_regs;
1631}
1632
1633static struct ftrace_ops *
1634ftrace_find_tramp_ops_any(struct dyn_ftrace *rec);
1635static struct ftrace_ops *
1636ftrace_find_tramp_ops_any_other(struct dyn_ftrace *rec, struct ftrace_ops *op_exclude);
1637static struct ftrace_ops *
1638ftrace_find_tramp_ops_next(struct dyn_ftrace *rec, struct ftrace_ops *ops);
1639
1640static bool __ftrace_hash_rec_update(struct ftrace_ops *ops,
1641 int filter_hash,
1642 bool inc)
1643{
1644 struct ftrace_hash *hash;
1645 struct ftrace_hash *other_hash;
1646 struct ftrace_page *pg;
1647 struct dyn_ftrace *rec;
1648 bool update = false;
1649 int count = 0;
1650 int all = false;
1651
1652 /* Only update if the ops has been registered */
1653 if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
1654 return false;
1655
1656 /*
1657 * In the filter_hash case:
1658 * If the count is zero, we update all records.
1659 * Otherwise we just update the items in the hash.
1660 *
1661 * In the notrace_hash case:
1662 * We enable the update in the hash.
1663 * As disabling notrace means enabling the tracing,
1664 * and enabling notrace means disabling, the inc variable
1665 * gets inversed.
1666 */
1667 if (filter_hash) {
1668 hash = ops->func_hash->filter_hash;
1669 other_hash = ops->func_hash->notrace_hash;
1670 if (ftrace_hash_empty(hash))
1671 all = true;
1672 } else {
1673 inc = !inc;
1674 hash = ops->func_hash->notrace_hash;
1675 other_hash = ops->func_hash->filter_hash;
1676 /*
1677 * If the notrace hash has no items,
1678 * then there's nothing to do.
1679 */
1680 if (ftrace_hash_empty(hash))
1681 return false;
1682 }
1683
1684 do_for_each_ftrace_rec(pg, rec) {
1685 int in_other_hash = 0;
1686 int in_hash = 0;
1687 int match = 0;
1688
1689 if (rec->flags & FTRACE_FL_DISABLED)
1690 continue;
1691
1692 if (all) {
1693 /*
1694 * Only the filter_hash affects all records.
1695 * Update if the record is not in the notrace hash.
1696 */
1697 if (!other_hash || !ftrace_lookup_ip(other_hash, rec->ip))
1698 match = 1;
1699 } else {
1700 in_hash = !!ftrace_lookup_ip(hash, rec->ip);
1701 in_other_hash = !!ftrace_lookup_ip(other_hash, rec->ip);
1702
1703 /*
1704 * If filter_hash is set, we want to match all functions
1705 * that are in the hash but not in the other hash.
1706 *
1707 * If filter_hash is not set, then we are decrementing.
1708 * That means we match anything that is in the hash
1709 * and also in the other_hash. That is, we need to turn
1710 * off functions in the other hash because they are disabled
1711 * by this hash.
1712 */
1713 if (filter_hash && in_hash && !in_other_hash)
1714 match = 1;
1715 else if (!filter_hash && in_hash &&
1716 (in_other_hash || ftrace_hash_empty(other_hash)))
1717 match = 1;
1718 }
1719 if (!match)
1720 continue;
1721
1722 if (inc) {
1723 rec->flags++;
1724 if (FTRACE_WARN_ON(ftrace_rec_count(rec) == FTRACE_REF_MAX))
1725 return false;
1726
1727 /*
1728 * If there's only a single callback registered to a
1729 * function, and the ops has a trampoline registered
1730 * for it, then we can call it directly.
1731 */
1732 if (ftrace_rec_count(rec) == 1 && ops->trampoline)
1733 rec->flags |= FTRACE_FL_TRAMP;
1734 else
1735 /*
1736 * If we are adding another function callback
1737 * to this function, and the previous had a
1738 * custom trampoline in use, then we need to go
1739 * back to the default trampoline.
1740 */
1741 rec->flags &= ~FTRACE_FL_TRAMP;
1742
1743 /*
1744 * If any ops wants regs saved for this function
1745 * then all ops will get saved regs.
1746 */
1747 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS)
1748 rec->flags |= FTRACE_FL_REGS;
1749 } else {
1750 if (FTRACE_WARN_ON(ftrace_rec_count(rec) == 0))
1751 return false;
1752 rec->flags--;
1753
1754 /*
1755 * If the rec had REGS enabled and the ops that is
1756 * being removed had REGS set, then see if there is
1757 * still any ops for this record that wants regs.
1758 * If not, we can stop recording them.
1759 */
1760 if (ftrace_rec_count(rec) > 0 &&
1761 rec->flags & FTRACE_FL_REGS &&
1762 ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
1763 if (!test_rec_ops_needs_regs(rec))
1764 rec->flags &= ~FTRACE_FL_REGS;
1765 }
1766
1767 /*
1768 * The TRAMP needs to be set only if rec count
1769 * is decremented to one, and the ops that is
1770 * left has a trampoline. As TRAMP can only be
1771 * enabled if there is only a single ops attached
1772 * to it.
1773 */
1774 if (ftrace_rec_count(rec) == 1 &&
1775 ftrace_find_tramp_ops_any_other(rec, ops))
1776 rec->flags |= FTRACE_FL_TRAMP;
1777 else
1778 rec->flags &= ~FTRACE_FL_TRAMP;
1779
1780 /*
1781 * flags will be cleared in ftrace_check_record()
1782 * if rec count is zero.
1783 */
1784 }
1785 count++;
1786
1787 /* Must match FTRACE_UPDATE_CALLS in ftrace_modify_all_code() */
1788 update |= ftrace_test_record(rec, true) != FTRACE_UPDATE_IGNORE;
1789
1790 /* Shortcut, if we handled all records, we are done. */
1791 if (!all && count == hash->count)
1792 return update;
1793 } while_for_each_ftrace_rec();
1794
1795 return update;
1796}
1797
1798static bool ftrace_hash_rec_disable(struct ftrace_ops *ops,
1799 int filter_hash)
1800{
1801 return __ftrace_hash_rec_update(ops, filter_hash, 0);
1802}
1803
1804static bool ftrace_hash_rec_enable(struct ftrace_ops *ops,
1805 int filter_hash)
1806{
1807 return __ftrace_hash_rec_update(ops, filter_hash, 1);
1808}
1809
1810static void ftrace_hash_rec_update_modify(struct ftrace_ops *ops,
1811 int filter_hash, int inc)
1812{
1813 struct ftrace_ops *op;
1814
1815 __ftrace_hash_rec_update(ops, filter_hash, inc);
1816
1817 if (ops->func_hash != &global_ops.local_hash)
1818 return;
1819
1820 /*
1821 * If the ops shares the global_ops hash, then we need to update
1822 * all ops that are enabled and use this hash.
1823 */
1824 do_for_each_ftrace_op(op, ftrace_ops_list) {
1825 /* Already done */
1826 if (op == ops)
1827 continue;
1828 if (op->func_hash == &global_ops.local_hash)
1829 __ftrace_hash_rec_update(op, filter_hash, inc);
1830 } while_for_each_ftrace_op(op);
1831}
1832
1833static void ftrace_hash_rec_disable_modify(struct ftrace_ops *ops,
1834 int filter_hash)
1835{
1836 ftrace_hash_rec_update_modify(ops, filter_hash, 0);
1837}
1838
1839static void ftrace_hash_rec_enable_modify(struct ftrace_ops *ops,
1840 int filter_hash)
1841{
1842 ftrace_hash_rec_update_modify(ops, filter_hash, 1);
1843}
1844
1845/*
1846 * Try to update IPMODIFY flag on each ftrace_rec. Return 0 if it is OK
1847 * or no-needed to update, -EBUSY if it detects a conflict of the flag
1848 * on a ftrace_rec, and -EINVAL if the new_hash tries to trace all recs.
1849 * Note that old_hash and new_hash has below meanings
1850 * - If the hash is NULL, it hits all recs (if IPMODIFY is set, this is rejected)
1851 * - If the hash is EMPTY_HASH, it hits nothing
1852 * - Anything else hits the recs which match the hash entries.
1853 */
1854static int __ftrace_hash_update_ipmodify(struct ftrace_ops *ops,
1855 struct ftrace_hash *old_hash,
1856 struct ftrace_hash *new_hash)
1857{
1858 struct ftrace_page *pg;
1859 struct dyn_ftrace *rec, *end = NULL;
1860 int in_old, in_new;
1861
1862 /* Only update if the ops has been registered */
1863 if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
1864 return 0;
1865
1866 if (!(ops->flags & FTRACE_OPS_FL_IPMODIFY))
1867 return 0;
1868
1869 /*
1870 * Since the IPMODIFY is a very address sensitive action, we do not
1871 * allow ftrace_ops to set all functions to new hash.
1872 */
1873 if (!new_hash || !old_hash)
1874 return -EINVAL;
1875
1876 /* Update rec->flags */
1877 do_for_each_ftrace_rec(pg, rec) {
1878
1879 if (rec->flags & FTRACE_FL_DISABLED)
1880 continue;
1881
1882 /* We need to update only differences of filter_hash */
1883 in_old = !!ftrace_lookup_ip(old_hash, rec->ip);
1884 in_new = !!ftrace_lookup_ip(new_hash, rec->ip);
1885 if (in_old == in_new)
1886 continue;
1887
1888 if (in_new) {
1889 /* New entries must ensure no others are using it */
1890 if (rec->flags & FTRACE_FL_IPMODIFY)
1891 goto rollback;
1892 rec->flags |= FTRACE_FL_IPMODIFY;
1893 } else /* Removed entry */
1894 rec->flags &= ~FTRACE_FL_IPMODIFY;
1895 } while_for_each_ftrace_rec();
1896
1897 return 0;
1898
1899rollback:
1900 end = rec;
1901
1902 /* Roll back what we did above */
1903 do_for_each_ftrace_rec(pg, rec) {
1904
1905 if (rec->flags & FTRACE_FL_DISABLED)
1906 continue;
1907
1908 if (rec == end)
1909 goto err_out;
1910
1911 in_old = !!ftrace_lookup_ip(old_hash, rec->ip);
1912 in_new = !!ftrace_lookup_ip(new_hash, rec->ip);
1913 if (in_old == in_new)
1914 continue;
1915
1916 if (in_new)
1917 rec->flags &= ~FTRACE_FL_IPMODIFY;
1918 else
1919 rec->flags |= FTRACE_FL_IPMODIFY;
1920 } while_for_each_ftrace_rec();
1921
1922err_out:
1923 return -EBUSY;
1924}
1925
1926static int ftrace_hash_ipmodify_enable(struct ftrace_ops *ops)
1927{
1928 struct ftrace_hash *hash = ops->func_hash->filter_hash;
1929
1930 if (ftrace_hash_empty(hash))
1931 hash = NULL;
1932
1933 return __ftrace_hash_update_ipmodify(ops, EMPTY_HASH, hash);
1934}
1935
1936/* Disabling always succeeds */
1937static void ftrace_hash_ipmodify_disable(struct ftrace_ops *ops)
1938{
1939 struct ftrace_hash *hash = ops->func_hash->filter_hash;
1940
1941 if (ftrace_hash_empty(hash))
1942 hash = NULL;
1943
1944 __ftrace_hash_update_ipmodify(ops, hash, EMPTY_HASH);
1945}
1946
1947static int ftrace_hash_ipmodify_update(struct ftrace_ops *ops,
1948 struct ftrace_hash *new_hash)
1949{
1950 struct ftrace_hash *old_hash = ops->func_hash->filter_hash;
1951
1952 if (ftrace_hash_empty(old_hash))
1953 old_hash = NULL;
1954
1955 if (ftrace_hash_empty(new_hash))
1956 new_hash = NULL;
1957
1958 return __ftrace_hash_update_ipmodify(ops, old_hash, new_hash);
1959}
1960
1961static void print_ip_ins(const char *fmt, const unsigned char *p)
1962{
1963 char ins[MCOUNT_INSN_SIZE];
1964 int i;
1965
1966 if (probe_kernel_read(ins, p, MCOUNT_INSN_SIZE)) {
1967 printk(KERN_CONT "%s[FAULT] %px\n", fmt, p);
1968 return;
1969 }
1970
1971 printk(KERN_CONT "%s", fmt);
1972
1973 for (i = 0; i < MCOUNT_INSN_SIZE; i++)
1974 printk(KERN_CONT "%s%02x", i ? ":" : "", ins[i]);
1975}
1976
1977enum ftrace_bug_type ftrace_bug_type;
1978const void *ftrace_expected;
1979
1980static void print_bug_type(void)
1981{
1982 switch (ftrace_bug_type) {
1983 case FTRACE_BUG_UNKNOWN:
1984 break;
1985 case FTRACE_BUG_INIT:
1986 pr_info("Initializing ftrace call sites\n");
1987 break;
1988 case FTRACE_BUG_NOP:
1989 pr_info("Setting ftrace call site to NOP\n");
1990 break;
1991 case FTRACE_BUG_CALL:
1992 pr_info("Setting ftrace call site to call ftrace function\n");
1993 break;
1994 case FTRACE_BUG_UPDATE:
1995 pr_info("Updating ftrace call site to call a different ftrace function\n");
1996 break;
1997 }
1998}
1999
2000/**
2001 * ftrace_bug - report and shutdown function tracer
2002 * @failed: The failed type (EFAULT, EINVAL, EPERM)
2003 * @rec: The record that failed
2004 *
2005 * The arch code that enables or disables the function tracing
2006 * can call ftrace_bug() when it has detected a problem in
2007 * modifying the code. @failed should be one of either:
2008 * EFAULT - if the problem happens on reading the @ip address
2009 * EINVAL - if what is read at @ip is not what was expected
2010 * EPERM - if the problem happens on writing to the @ip address
2011 */
2012void ftrace_bug(int failed, struct dyn_ftrace *rec)
2013{
2014 unsigned long ip = rec ? rec->ip : 0;
2015
2016 switch (failed) {
2017 case -EFAULT:
2018 FTRACE_WARN_ON_ONCE(1);
2019 pr_info("ftrace faulted on modifying ");
2020 print_ip_sym(ip);
2021 break;
2022 case -EINVAL:
2023 FTRACE_WARN_ON_ONCE(1);
2024 pr_info("ftrace failed to modify ");
2025 print_ip_sym(ip);
2026 print_ip_ins(" actual: ", (unsigned char *)ip);
2027 pr_cont("\n");
2028 if (ftrace_expected) {
2029 print_ip_ins(" expected: ", ftrace_expected);
2030 pr_cont("\n");
2031 }
2032 break;
2033 case -EPERM:
2034 FTRACE_WARN_ON_ONCE(1);
2035 pr_info("ftrace faulted on writing ");
2036 print_ip_sym(ip);
2037 break;
2038 default:
2039 FTRACE_WARN_ON_ONCE(1);
2040 pr_info("ftrace faulted on unknown error ");
2041 print_ip_sym(ip);
2042 }
2043 print_bug_type();
2044 if (rec) {
2045 struct ftrace_ops *ops = NULL;
2046
2047 pr_info("ftrace record flags: %lx\n", rec->flags);
2048 pr_cont(" (%ld)%s", ftrace_rec_count(rec),
2049 rec->flags & FTRACE_FL_REGS ? " R" : " ");
2050 if (rec->flags & FTRACE_FL_TRAMP_EN) {
2051 ops = ftrace_find_tramp_ops_any(rec);
2052 if (ops) {
2053 do {
2054 pr_cont("\ttramp: %pS (%pS)",
2055 (void *)ops->trampoline,
2056 (void *)ops->func);
2057 ops = ftrace_find_tramp_ops_next(rec, ops);
2058 } while (ops);
2059 } else
2060 pr_cont("\ttramp: ERROR!");
2061
2062 }
2063 ip = ftrace_get_addr_curr(rec);
2064 pr_cont("\n expected tramp: %lx\n", ip);
2065 }
2066}
2067
2068static int ftrace_check_record(struct dyn_ftrace *rec, bool enable, bool update)
2069{
2070 unsigned long flag = 0UL;
2071
2072 ftrace_bug_type = FTRACE_BUG_UNKNOWN;
2073
2074 if (rec->flags & FTRACE_FL_DISABLED)
2075 return FTRACE_UPDATE_IGNORE;
2076
2077 /*
2078 * If we are updating calls:
2079 *
2080 * If the record has a ref count, then we need to enable it
2081 * because someone is using it.
2082 *
2083 * Otherwise we make sure its disabled.
2084 *
2085 * If we are disabling calls, then disable all records that
2086 * are enabled.
2087 */
2088 if (enable && ftrace_rec_count(rec))
2089 flag = FTRACE_FL_ENABLED;
2090
2091 /*
2092 * If enabling and the REGS flag does not match the REGS_EN, or
2093 * the TRAMP flag doesn't match the TRAMP_EN, then do not ignore
2094 * this record. Set flags to fail the compare against ENABLED.
2095 */
2096 if (flag) {
2097 if (!(rec->flags & FTRACE_FL_REGS) !=
2098 !(rec->flags & FTRACE_FL_REGS_EN))
2099 flag |= FTRACE_FL_REGS;
2100
2101 if (!(rec->flags & FTRACE_FL_TRAMP) !=
2102 !(rec->flags & FTRACE_FL_TRAMP_EN))
2103 flag |= FTRACE_FL_TRAMP;
2104 }
2105
2106 /* If the state of this record hasn't changed, then do nothing */
2107 if ((rec->flags & FTRACE_FL_ENABLED) == flag)
2108 return FTRACE_UPDATE_IGNORE;
2109
2110 if (flag) {
2111 /* Save off if rec is being enabled (for return value) */
2112 flag ^= rec->flags & FTRACE_FL_ENABLED;
2113
2114 if (update) {
2115 rec->flags |= FTRACE_FL_ENABLED;
2116 if (flag & FTRACE_FL_REGS) {
2117 if (rec->flags & FTRACE_FL_REGS)
2118 rec->flags |= FTRACE_FL_REGS_EN;
2119 else
2120 rec->flags &= ~FTRACE_FL_REGS_EN;
2121 }
2122 if (flag & FTRACE_FL_TRAMP) {
2123 if (rec->flags & FTRACE_FL_TRAMP)
2124 rec->flags |= FTRACE_FL_TRAMP_EN;
2125 else
2126 rec->flags &= ~FTRACE_FL_TRAMP_EN;
2127 }
2128 }
2129
2130 /*
2131 * If this record is being updated from a nop, then
2132 * return UPDATE_MAKE_CALL.
2133 * Otherwise,
2134 * return UPDATE_MODIFY_CALL to tell the caller to convert
2135 * from the save regs, to a non-save regs function or
2136 * vice versa, or from a trampoline call.
2137 */
2138 if (flag & FTRACE_FL_ENABLED) {
2139 ftrace_bug_type = FTRACE_BUG_CALL;
2140 return FTRACE_UPDATE_MAKE_CALL;
2141 }
2142
2143 ftrace_bug_type = FTRACE_BUG_UPDATE;
2144 return FTRACE_UPDATE_MODIFY_CALL;
2145 }
2146
2147 if (update) {
2148 /* If there's no more users, clear all flags */
2149 if (!ftrace_rec_count(rec))
2150 rec->flags = 0;
2151 else
2152 /*
2153 * Just disable the record, but keep the ops TRAMP
2154 * and REGS states. The _EN flags must be disabled though.
2155 */
2156 rec->flags &= ~(FTRACE_FL_ENABLED | FTRACE_FL_TRAMP_EN |
2157 FTRACE_FL_REGS_EN);
2158 }
2159
2160 ftrace_bug_type = FTRACE_BUG_NOP;
2161 return FTRACE_UPDATE_MAKE_NOP;
2162}
2163
2164/**
2165 * ftrace_update_record, set a record that now is tracing or not
2166 * @rec: the record to update
2167 * @enable: set to true if the record is tracing, false to force disable
2168 *
2169 * The records that represent all functions that can be traced need
2170 * to be updated when tracing has been enabled.
2171 */
2172int ftrace_update_record(struct dyn_ftrace *rec, bool enable)
2173{
2174 return ftrace_check_record(rec, enable, true);
2175}
2176
2177/**
2178 * ftrace_test_record, check if the record has been enabled or not
2179 * @rec: the record to test
2180 * @enable: set to true to check if enabled, false if it is disabled
2181 *
2182 * The arch code may need to test if a record is already set to
2183 * tracing to determine how to modify the function code that it
2184 * represents.
2185 */
2186int ftrace_test_record(struct dyn_ftrace *rec, bool enable)
2187{
2188 return ftrace_check_record(rec, enable, false);
2189}
2190
2191static struct ftrace_ops *
2192ftrace_find_tramp_ops_any(struct dyn_ftrace *rec)
2193{
2194 struct ftrace_ops *op;
2195 unsigned long ip = rec->ip;
2196
2197 do_for_each_ftrace_op(op, ftrace_ops_list) {
2198
2199 if (!op->trampoline)
2200 continue;
2201
2202 if (hash_contains_ip(ip, op->func_hash))
2203 return op;
2204 } while_for_each_ftrace_op(op);
2205
2206 return NULL;
2207}
2208
2209static struct ftrace_ops *
2210ftrace_find_tramp_ops_any_other(struct dyn_ftrace *rec, struct ftrace_ops *op_exclude)
2211{
2212 struct ftrace_ops *op;
2213 unsigned long ip = rec->ip;
2214
2215 do_for_each_ftrace_op(op, ftrace_ops_list) {
2216
2217 if (op == op_exclude || !op->trampoline)
2218 continue;
2219
2220 if (hash_contains_ip(ip, op->func_hash))
2221 return op;
2222 } while_for_each_ftrace_op(op);
2223
2224 return NULL;
2225}
2226
2227static struct ftrace_ops *
2228ftrace_find_tramp_ops_next(struct dyn_ftrace *rec,
2229 struct ftrace_ops *op)
2230{
2231 unsigned long ip = rec->ip;
2232
2233 while_for_each_ftrace_op(op) {
2234
2235 if (!op->trampoline)
2236 continue;
2237
2238 if (hash_contains_ip(ip, op->func_hash))
2239 return op;
2240 }
2241
2242 return NULL;
2243}
2244
2245static struct ftrace_ops *
2246ftrace_find_tramp_ops_curr(struct dyn_ftrace *rec)
2247{
2248 struct ftrace_ops *op;
2249 unsigned long ip = rec->ip;
2250
2251 /*
2252 * Need to check removed ops first.
2253 * If they are being removed, and this rec has a tramp,
2254 * and this rec is in the ops list, then it would be the
2255 * one with the tramp.
2256 */
2257 if (removed_ops) {
2258 if (hash_contains_ip(ip, &removed_ops->old_hash))
2259 return removed_ops;
2260 }
2261
2262 /*
2263 * Need to find the current trampoline for a rec.
2264 * Now, a trampoline is only attached to a rec if there
2265 * was a single 'ops' attached to it. But this can be called
2266 * when we are adding another op to the rec or removing the
2267 * current one. Thus, if the op is being added, we can
2268 * ignore it because it hasn't attached itself to the rec
2269 * yet.
2270 *
2271 * If an ops is being modified (hooking to different functions)
2272 * then we don't care about the new functions that are being
2273 * added, just the old ones (that are probably being removed).
2274 *
2275 * If we are adding an ops to a function that already is using
2276 * a trampoline, it needs to be removed (trampolines are only
2277 * for single ops connected), then an ops that is not being
2278 * modified also needs to be checked.
2279 */
2280 do_for_each_ftrace_op(op, ftrace_ops_list) {
2281
2282 if (!op->trampoline)
2283 continue;
2284
2285 /*
2286 * If the ops is being added, it hasn't gotten to
2287 * the point to be removed from this tree yet.
2288 */
2289 if (op->flags & FTRACE_OPS_FL_ADDING)
2290 continue;
2291
2292
2293 /*
2294 * If the ops is being modified and is in the old
2295 * hash, then it is probably being removed from this
2296 * function.
2297 */
2298 if ((op->flags & FTRACE_OPS_FL_MODIFYING) &&
2299 hash_contains_ip(ip, &op->old_hash))
2300 return op;
2301 /*
2302 * If the ops is not being added or modified, and it's
2303 * in its normal filter hash, then this must be the one
2304 * we want!
2305 */
2306 if (!(op->flags & FTRACE_OPS_FL_MODIFYING) &&
2307 hash_contains_ip(ip, op->func_hash))
2308 return op;
2309
2310 } while_for_each_ftrace_op(op);
2311
2312 return NULL;
2313}
2314
2315static struct ftrace_ops *
2316ftrace_find_tramp_ops_new(struct dyn_ftrace *rec)
2317{
2318 struct ftrace_ops *op;
2319 unsigned long ip = rec->ip;
2320
2321 do_for_each_ftrace_op(op, ftrace_ops_list) {
2322 /* pass rec in as regs to have non-NULL val */
2323 if (hash_contains_ip(ip, op->func_hash))
2324 return op;
2325 } while_for_each_ftrace_op(op);
2326
2327 return NULL;
2328}
2329
2330/**
2331 * ftrace_get_addr_new - Get the call address to set to
2332 * @rec: The ftrace record descriptor
2333 *
2334 * If the record has the FTRACE_FL_REGS set, that means that it
2335 * wants to convert to a callback that saves all regs. If FTRACE_FL_REGS
2336 * is not not set, then it wants to convert to the normal callback.
2337 *
2338 * Returns the address of the trampoline to set to
2339 */
2340unsigned long ftrace_get_addr_new(struct dyn_ftrace *rec)
2341{
2342 struct ftrace_ops *ops;
2343
2344 /* Trampolines take precedence over regs */
2345 if (rec->flags & FTRACE_FL_TRAMP) {
2346 ops = ftrace_find_tramp_ops_new(rec);
2347 if (FTRACE_WARN_ON(!ops || !ops->trampoline)) {
2348 pr_warn("Bad trampoline accounting at: %p (%pS) (%lx)\n",
2349 (void *)rec->ip, (void *)rec->ip, rec->flags);
2350 /* Ftrace is shutting down, return anything */
2351 return (unsigned long)FTRACE_ADDR;
2352 }
2353 return ops->trampoline;
2354 }
2355
2356 if (rec->flags & FTRACE_FL_REGS)
2357 return (unsigned long)FTRACE_REGS_ADDR;
2358 else
2359 return (unsigned long)FTRACE_ADDR;
2360}
2361
2362/**
2363 * ftrace_get_addr_curr - Get the call address that is already there
2364 * @rec: The ftrace record descriptor
2365 *
2366 * The FTRACE_FL_REGS_EN is set when the record already points to
2367 * a function that saves all the regs. Basically the '_EN' version
2368 * represents the current state of the function.
2369 *
2370 * Returns the address of the trampoline that is currently being called
2371 */
2372unsigned long ftrace_get_addr_curr(struct dyn_ftrace *rec)
2373{
2374 struct ftrace_ops *ops;
2375
2376 /* Trampolines take precedence over regs */
2377 if (rec->flags & FTRACE_FL_TRAMP_EN) {
2378 ops = ftrace_find_tramp_ops_curr(rec);
2379 if (FTRACE_WARN_ON(!ops)) {
2380 pr_warn("Bad trampoline accounting at: %p (%pS)\n",
2381 (void *)rec->ip, (void *)rec->ip);
2382 /* Ftrace is shutting down, return anything */
2383 return (unsigned long)FTRACE_ADDR;
2384 }
2385 return ops->trampoline;
2386 }
2387
2388 if (rec->flags & FTRACE_FL_REGS_EN)
2389 return (unsigned long)FTRACE_REGS_ADDR;
2390 else
2391 return (unsigned long)FTRACE_ADDR;
2392}
2393
2394static int
2395__ftrace_replace_code(struct dyn_ftrace *rec, bool enable)
2396{
2397 unsigned long ftrace_old_addr;
2398 unsigned long ftrace_addr;
2399 int ret;
2400
2401 ftrace_addr = ftrace_get_addr_new(rec);
2402
2403 /* This needs to be done before we call ftrace_update_record */
2404 ftrace_old_addr = ftrace_get_addr_curr(rec);
2405
2406 ret = ftrace_update_record(rec, enable);
2407
2408 ftrace_bug_type = FTRACE_BUG_UNKNOWN;
2409
2410 switch (ret) {
2411 case FTRACE_UPDATE_IGNORE:
2412 return 0;
2413
2414 case FTRACE_UPDATE_MAKE_CALL:
2415 ftrace_bug_type = FTRACE_BUG_CALL;
2416 return ftrace_make_call(rec, ftrace_addr);
2417
2418 case FTRACE_UPDATE_MAKE_NOP:
2419 ftrace_bug_type = FTRACE_BUG_NOP;
2420 return ftrace_make_nop(NULL, rec, ftrace_old_addr);
2421
2422 case FTRACE_UPDATE_MODIFY_CALL:
2423 ftrace_bug_type = FTRACE_BUG_UPDATE;
2424 return ftrace_modify_call(rec, ftrace_old_addr, ftrace_addr);
2425 }
2426
2427 return -1; /* unknown ftrace bug */
2428}
2429
2430void __weak ftrace_replace_code(int mod_flags)
2431{
2432 struct dyn_ftrace *rec;
2433 struct ftrace_page *pg;
2434 bool enable = mod_flags & FTRACE_MODIFY_ENABLE_FL;
2435 int schedulable = mod_flags & FTRACE_MODIFY_MAY_SLEEP_FL;
2436 int failed;
2437
2438 if (unlikely(ftrace_disabled))
2439 return;
2440
2441 do_for_each_ftrace_rec(pg, rec) {
2442
2443 if (rec->flags & FTRACE_FL_DISABLED)
2444 continue;
2445
2446 failed = __ftrace_replace_code(rec, enable);
2447 if (failed) {
2448 ftrace_bug(failed, rec);
2449 /* Stop processing */
2450 return;
2451 }
2452 if (schedulable)
2453 cond_resched();
2454 } while_for_each_ftrace_rec();
2455}
2456
2457struct ftrace_rec_iter {
2458 struct ftrace_page *pg;
2459 int index;
2460};
2461
2462/**
2463 * ftrace_rec_iter_start, start up iterating over traced functions
2464 *
2465 * Returns an iterator handle that is used to iterate over all
2466 * the records that represent address locations where functions
2467 * are traced.
2468 *
2469 * May return NULL if no records are available.
2470 */
2471struct ftrace_rec_iter *ftrace_rec_iter_start(void)
2472{
2473 /*
2474 * We only use a single iterator.
2475 * Protected by the ftrace_lock mutex.
2476 */
2477 static struct ftrace_rec_iter ftrace_rec_iter;
2478 struct ftrace_rec_iter *iter = &ftrace_rec_iter;
2479
2480 iter->pg = ftrace_pages_start;
2481 iter->index = 0;
2482
2483 /* Could have empty pages */
2484 while (iter->pg && !iter->pg->index)
2485 iter->pg = iter->pg->next;
2486
2487 if (!iter->pg)
2488 return NULL;
2489
2490 return iter;
2491}
2492
2493/**
2494 * ftrace_rec_iter_next, get the next record to process.
2495 * @iter: The handle to the iterator.
2496 *
2497 * Returns the next iterator after the given iterator @iter.
2498 */
2499struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter)
2500{
2501 iter->index++;
2502
2503 if (iter->index >= iter->pg->index) {
2504 iter->pg = iter->pg->next;
2505 iter->index = 0;
2506
2507 /* Could have empty pages */
2508 while (iter->pg && !iter->pg->index)
2509 iter->pg = iter->pg->next;
2510 }
2511
2512 if (!iter->pg)
2513 return NULL;
2514
2515 return iter;
2516}
2517
2518/**
2519 * ftrace_rec_iter_record, get the record at the iterator location
2520 * @iter: The current iterator location
2521 *
2522 * Returns the record that the current @iter is at.
2523 */
2524struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter)
2525{
2526 return &iter->pg->records[iter->index];
2527}
2528
2529static int
2530ftrace_nop_initialize(struct module *mod, struct dyn_ftrace *rec)
2531{
2532 int ret;
2533
2534 if (unlikely(ftrace_disabled))
2535 return 0;
2536
2537 ret = ftrace_init_nop(mod, rec);
2538 if (ret) {
2539 ftrace_bug_type = FTRACE_BUG_INIT;
2540 ftrace_bug(ret, rec);
2541 return 0;
2542 }
2543 return 1;
2544}
2545
2546/*
2547 * archs can override this function if they must do something
2548 * before the modifying code is performed.
2549 */
2550int __weak ftrace_arch_code_modify_prepare(void)
2551{
2552 return 0;
2553}
2554
2555/*
2556 * archs can override this function if they must do something
2557 * after the modifying code is performed.
2558 */
2559int __weak ftrace_arch_code_modify_post_process(void)
2560{
2561 return 0;
2562}
2563
2564void ftrace_modify_all_code(int command)
2565{
2566 int update = command & FTRACE_UPDATE_TRACE_FUNC;
2567 int mod_flags = 0;
2568 int err = 0;
2569
2570 if (command & FTRACE_MAY_SLEEP)
2571 mod_flags = FTRACE_MODIFY_MAY_SLEEP_FL;
2572
2573 /*
2574 * If the ftrace_caller calls a ftrace_ops func directly,
2575 * we need to make sure that it only traces functions it
2576 * expects to trace. When doing the switch of functions,
2577 * we need to update to the ftrace_ops_list_func first
2578 * before the transition between old and new calls are set,
2579 * as the ftrace_ops_list_func will check the ops hashes
2580 * to make sure the ops are having the right functions
2581 * traced.
2582 */
2583 if (update) {
2584 err = ftrace_update_ftrace_func(ftrace_ops_list_func);
2585 if (FTRACE_WARN_ON(err))
2586 return;
2587 }
2588
2589 if (command & FTRACE_UPDATE_CALLS)
2590 ftrace_replace_code(mod_flags | FTRACE_MODIFY_ENABLE_FL);
2591 else if (command & FTRACE_DISABLE_CALLS)
2592 ftrace_replace_code(mod_flags);
2593
2594 if (update && ftrace_trace_function != ftrace_ops_list_func) {
2595 function_trace_op = set_function_trace_op;
2596 smp_wmb();
2597 /* If irqs are disabled, we are in stop machine */
2598 if (!irqs_disabled())
2599 smp_call_function(ftrace_sync_ipi, NULL, 1);
2600 err = ftrace_update_ftrace_func(ftrace_trace_function);
2601 if (FTRACE_WARN_ON(err))
2602 return;
2603 }
2604
2605 if (command & FTRACE_START_FUNC_RET)
2606 err = ftrace_enable_ftrace_graph_caller();
2607 else if (command & FTRACE_STOP_FUNC_RET)
2608 err = ftrace_disable_ftrace_graph_caller();
2609 FTRACE_WARN_ON(err);
2610}
2611
2612static int __ftrace_modify_code(void *data)
2613{
2614 int *command = data;
2615
2616 ftrace_modify_all_code(*command);
2617
2618 return 0;
2619}
2620
2621/**
2622 * ftrace_run_stop_machine, go back to the stop machine method
2623 * @command: The command to tell ftrace what to do
2624 *
2625 * If an arch needs to fall back to the stop machine method, the
2626 * it can call this function.
2627 */
2628void ftrace_run_stop_machine(int command)
2629{
2630 stop_machine(__ftrace_modify_code, &command, NULL);
2631}
2632
2633/**
2634 * arch_ftrace_update_code, modify the code to trace or not trace
2635 * @command: The command that needs to be done
2636 *
2637 * Archs can override this function if it does not need to
2638 * run stop_machine() to modify code.
2639 */
2640void __weak arch_ftrace_update_code(int command)
2641{
2642 ftrace_run_stop_machine(command);
2643}
2644
2645static void ftrace_run_update_code(int command)
2646{
2647 int ret;
2648
2649 ret = ftrace_arch_code_modify_prepare();
2650 FTRACE_WARN_ON(ret);
2651 if (ret)
2652 return;
2653
2654 /*
2655 * By default we use stop_machine() to modify the code.
2656 * But archs can do what ever they want as long as it
2657 * is safe. The stop_machine() is the safest, but also
2658 * produces the most overhead.
2659 */
2660 arch_ftrace_update_code(command);
2661
2662 ret = ftrace_arch_code_modify_post_process();
2663 FTRACE_WARN_ON(ret);
2664}
2665
2666static void ftrace_run_modify_code(struct ftrace_ops *ops, int command,
2667 struct ftrace_ops_hash *old_hash)
2668{
2669 ops->flags |= FTRACE_OPS_FL_MODIFYING;
2670 ops->old_hash.filter_hash = old_hash->filter_hash;
2671 ops->old_hash.notrace_hash = old_hash->notrace_hash;
2672 ftrace_run_update_code(command);
2673 ops->old_hash.filter_hash = NULL;
2674 ops->old_hash.notrace_hash = NULL;
2675 ops->flags &= ~FTRACE_OPS_FL_MODIFYING;
2676}
2677
2678static ftrace_func_t saved_ftrace_func;
2679static int ftrace_start_up;
2680
2681void __weak arch_ftrace_trampoline_free(struct ftrace_ops *ops)
2682{
2683}
2684
2685static void ftrace_startup_enable(int command)
2686{
2687 if (saved_ftrace_func != ftrace_trace_function) {
2688 saved_ftrace_func = ftrace_trace_function;
2689 command |= FTRACE_UPDATE_TRACE_FUNC;
2690 }
2691
2692 if (!command || !ftrace_enabled)
2693 return;
2694
2695 ftrace_run_update_code(command);
2696}
2697
2698static void ftrace_startup_all(int command)
2699{
2700 update_all_ops = true;
2701 ftrace_startup_enable(command);
2702 update_all_ops = false;
2703}
2704
2705int ftrace_startup(struct ftrace_ops *ops, int command)
2706{
2707 int ret;
2708
2709 if (unlikely(ftrace_disabled))
2710 return -ENODEV;
2711
2712 ret = __register_ftrace_function(ops);
2713 if (ret)
2714 return ret;
2715
2716 ftrace_start_up++;
2717
2718 /*
2719 * Note that ftrace probes uses this to start up
2720 * and modify functions it will probe. But we still
2721 * set the ADDING flag for modification, as probes
2722 * do not have trampolines. If they add them in the
2723 * future, then the probes will need to distinguish
2724 * between adding and updating probes.
2725 */
2726 ops->flags |= FTRACE_OPS_FL_ENABLED | FTRACE_OPS_FL_ADDING;
2727
2728 ret = ftrace_hash_ipmodify_enable(ops);
2729 if (ret < 0) {
2730 /* Rollback registration process */
2731 __unregister_ftrace_function(ops);
2732 ftrace_start_up--;
2733 ops->flags &= ~FTRACE_OPS_FL_ENABLED;
2734 return ret;
2735 }
2736
2737 if (ftrace_hash_rec_enable(ops, 1))
2738 command |= FTRACE_UPDATE_CALLS;
2739
2740 ftrace_startup_enable(command);
2741
2742 /*
2743 * If ftrace is in an undefined state, we just remove ops from list
2744 * to prevent the NULL pointer, instead of totally rolling it back and
2745 * free trampoline, because those actions could cause further damage.
2746 */
2747 if (unlikely(ftrace_disabled)) {
2748 __unregister_ftrace_function(ops);
2749 return -ENODEV;
2750 }
2751
2752 ops->flags &= ~FTRACE_OPS_FL_ADDING;
2753
2754 return 0;
2755}
2756
2757int ftrace_shutdown(struct ftrace_ops *ops, int command)
2758{
2759 int ret;
2760
2761 if (unlikely(ftrace_disabled))
2762 return -ENODEV;
2763
2764 ret = __unregister_ftrace_function(ops);
2765 if (ret)
2766 return ret;
2767
2768 ftrace_start_up--;
2769 /*
2770 * Just warn in case of unbalance, no need to kill ftrace, it's not
2771 * critical but the ftrace_call callers may be never nopped again after
2772 * further ftrace uses.
2773 */
2774 WARN_ON_ONCE(ftrace_start_up < 0);
2775
2776 /* Disabling ipmodify never fails */
2777 ftrace_hash_ipmodify_disable(ops);
2778
2779 if (ftrace_hash_rec_disable(ops, 1))
2780 command |= FTRACE_UPDATE_CALLS;
2781
2782 ops->flags &= ~FTRACE_OPS_FL_ENABLED;
2783
2784 if (saved_ftrace_func != ftrace_trace_function) {
2785 saved_ftrace_func = ftrace_trace_function;
2786 command |= FTRACE_UPDATE_TRACE_FUNC;
2787 }
2788
2789 if (!command || !ftrace_enabled) {
2790 /*
2791 * If these are dynamic or per_cpu ops, they still
2792 * need their data freed. Since, function tracing is
2793 * not currently active, we can just free them
2794 * without synchronizing all CPUs.
2795 */
2796 if (ops->flags & FTRACE_OPS_FL_DYNAMIC)
2797 goto free_ops;
2798
2799 return 0;
2800 }
2801
2802 /*
2803 * If the ops uses a trampoline, then it needs to be
2804 * tested first on update.
2805 */
2806 ops->flags |= FTRACE_OPS_FL_REMOVING;
2807 removed_ops = ops;
2808
2809 /* The trampoline logic checks the old hashes */
2810 ops->old_hash.filter_hash = ops->func_hash->filter_hash;
2811 ops->old_hash.notrace_hash = ops->func_hash->notrace_hash;
2812
2813 ftrace_run_update_code(command);
2814
2815 /*
2816 * If there's no more ops registered with ftrace, run a
2817 * sanity check to make sure all rec flags are cleared.
2818 */
2819 if (rcu_dereference_protected(ftrace_ops_list,
2820 lockdep_is_held(&ftrace_lock)) == &ftrace_list_end) {
2821 struct ftrace_page *pg;
2822 struct dyn_ftrace *rec;
2823
2824 do_for_each_ftrace_rec(pg, rec) {
2825 if (FTRACE_WARN_ON_ONCE(rec->flags & ~FTRACE_FL_DISABLED))
2826 pr_warn(" %pS flags:%lx\n",
2827 (void *)rec->ip, rec->flags);
2828 } while_for_each_ftrace_rec();
2829 }
2830
2831 ops->old_hash.filter_hash = NULL;
2832 ops->old_hash.notrace_hash = NULL;
2833
2834 removed_ops = NULL;
2835 ops->flags &= ~FTRACE_OPS_FL_REMOVING;
2836
2837 /*
2838 * Dynamic ops may be freed, we must make sure that all
2839 * callers are done before leaving this function.
2840 * The same goes for freeing the per_cpu data of the per_cpu
2841 * ops.
2842 */
2843 if (ops->flags & FTRACE_OPS_FL_DYNAMIC) {
2844 /*
2845 * We need to do a hard force of sched synchronization.
2846 * This is because we use preempt_disable() to do RCU, but
2847 * the function tracers can be called where RCU is not watching
2848 * (like before user_exit()). We can not rely on the RCU
2849 * infrastructure to do the synchronization, thus we must do it
2850 * ourselves.
2851 */
2852 schedule_on_each_cpu(ftrace_sync);
2853
2854 /*
2855 * When the kernel is preeptive, tasks can be preempted
2856 * while on a ftrace trampoline. Just scheduling a task on
2857 * a CPU is not good enough to flush them. Calling
2858 * synchornize_rcu_tasks() will wait for those tasks to
2859 * execute and either schedule voluntarily or enter user space.
2860 */
2861 if (IS_ENABLED(CONFIG_PREEMPTION))
2862 synchronize_rcu_tasks();
2863
2864 free_ops:
2865 arch_ftrace_trampoline_free(ops);
2866 }
2867
2868 return 0;
2869}
2870
2871static void ftrace_startup_sysctl(void)
2872{
2873 int command;
2874
2875 if (unlikely(ftrace_disabled))
2876 return;
2877
2878 /* Force update next time */
2879 saved_ftrace_func = NULL;
2880 /* ftrace_start_up is true if we want ftrace running */
2881 if (ftrace_start_up) {
2882 command = FTRACE_UPDATE_CALLS;
2883 if (ftrace_graph_active)
2884 command |= FTRACE_START_FUNC_RET;
2885 ftrace_startup_enable(command);
2886 }
2887}
2888
2889static void ftrace_shutdown_sysctl(void)
2890{
2891 int command;
2892
2893 if (unlikely(ftrace_disabled))
2894 return;
2895
2896 /* ftrace_start_up is true if ftrace is running */
2897 if (ftrace_start_up) {
2898 command = FTRACE_DISABLE_CALLS;
2899 if (ftrace_graph_active)
2900 command |= FTRACE_STOP_FUNC_RET;
2901 ftrace_run_update_code(command);
2902 }
2903}
2904
2905static u64 ftrace_update_time;
2906unsigned long ftrace_update_tot_cnt;
2907unsigned long ftrace_number_of_pages;
2908unsigned long ftrace_number_of_groups;
2909
2910static inline int ops_traces_mod(struct ftrace_ops *ops)
2911{
2912 /*
2913 * Filter_hash being empty will default to trace module.
2914 * But notrace hash requires a test of individual module functions.
2915 */
2916 return ftrace_hash_empty(ops->func_hash->filter_hash) &&
2917 ftrace_hash_empty(ops->func_hash->notrace_hash);
2918}
2919
2920/*
2921 * Check if the current ops references the record.
2922 *
2923 * If the ops traces all functions, then it was already accounted for.
2924 * If the ops does not trace the current record function, skip it.
2925 * If the ops ignores the function via notrace filter, skip it.
2926 */
2927static inline bool
2928ops_references_rec(struct ftrace_ops *ops, struct dyn_ftrace *rec)
2929{
2930 /* If ops isn't enabled, ignore it */
2931 if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
2932 return false;
2933
2934 /* If ops traces all then it includes this function */
2935 if (ops_traces_mod(ops))
2936 return true;
2937
2938 /* The function must be in the filter */
2939 if (!ftrace_hash_empty(ops->func_hash->filter_hash) &&
2940 !__ftrace_lookup_ip(ops->func_hash->filter_hash, rec->ip))
2941 return false;
2942
2943 /* If in notrace hash, we ignore it too */
2944 if (ftrace_lookup_ip(ops->func_hash->notrace_hash, rec->ip))
2945 return false;
2946
2947 return true;
2948}
2949
2950static int ftrace_update_code(struct module *mod, struct ftrace_page *new_pgs)
2951{
2952 struct ftrace_page *pg;
2953 struct dyn_ftrace *p;
2954 u64 start, stop;
2955 unsigned long update_cnt = 0;
2956 unsigned long rec_flags = 0;
2957 int i;
2958
2959 start = ftrace_now(raw_smp_processor_id());
2960
2961 /*
2962 * When a module is loaded, this function is called to convert
2963 * the calls to mcount in its text to nops, and also to create
2964 * an entry in the ftrace data. Now, if ftrace is activated
2965 * after this call, but before the module sets its text to
2966 * read-only, the modification of enabling ftrace can fail if
2967 * the read-only is done while ftrace is converting the calls.
2968 * To prevent this, the module's records are set as disabled
2969 * and will be enabled after the call to set the module's text
2970 * to read-only.
2971 */
2972 if (mod)
2973 rec_flags |= FTRACE_FL_DISABLED;
2974
2975 for (pg = new_pgs; pg; pg = pg->next) {
2976
2977 for (i = 0; i < pg->index; i++) {
2978
2979 /* If something went wrong, bail without enabling anything */
2980 if (unlikely(ftrace_disabled))
2981 return -1;
2982
2983 p = &pg->records[i];
2984 p->flags = rec_flags;
2985
2986 /*
2987 * Do the initial record conversion from mcount jump
2988 * to the NOP instructions.
2989 */
2990 if (!__is_defined(CC_USING_NOP_MCOUNT) &&
2991 !ftrace_nop_initialize(mod, p))
2992 break;
2993
2994 update_cnt++;
2995 }
2996 }
2997
2998 stop = ftrace_now(raw_smp_processor_id());
2999 ftrace_update_time = stop - start;
3000 ftrace_update_tot_cnt += update_cnt;
3001
3002 return 0;
3003}
3004
3005static int ftrace_allocate_records(struct ftrace_page *pg, int count)
3006{
3007 int order;
3008 int cnt;
3009
3010 if (WARN_ON(!count))
3011 return -EINVAL;
3012
3013 order = get_count_order(DIV_ROUND_UP(count, ENTRIES_PER_PAGE));
3014
3015 /*
3016 * We want to fill as much as possible. No more than a page
3017 * may be empty.
3018 */
3019 while ((PAGE_SIZE << order) / ENTRY_SIZE >= count + ENTRIES_PER_PAGE)
3020 order--;
3021
3022 again:
3023 pg->records = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
3024
3025 if (!pg->records) {
3026 /* if we can't allocate this size, try something smaller */
3027 if (!order)
3028 return -ENOMEM;
3029 order--;
3030 goto again;
3031 }
3032
3033 ftrace_number_of_pages += 1 << order;
3034 ftrace_number_of_groups++;
3035
3036 cnt = (PAGE_SIZE << order) / ENTRY_SIZE;
3037 pg->order = order;
3038
3039 if (cnt > count)
3040 cnt = count;
3041
3042 return cnt;
3043}
3044
3045static void ftrace_free_pages(struct ftrace_page *pages)
3046{
3047 struct ftrace_page *pg = pages;
3048
3049 while (pg) {
3050 if (pg->records) {
3051 free_pages((unsigned long)pg->records, pg->order);
3052 ftrace_number_of_pages -= 1 << pg->order;
3053 }
3054 pages = pg->next;
3055 kfree(pg);
3056 pg = pages;
3057 ftrace_number_of_groups--;
3058 }
3059}
3060
3061static struct ftrace_page *
3062ftrace_allocate_pages(unsigned long num_to_init)
3063{
3064 struct ftrace_page *start_pg;
3065 struct ftrace_page *pg;
3066 int cnt;
3067
3068 if (!num_to_init)
3069 return NULL;
3070
3071 start_pg = pg = kzalloc(sizeof(*pg), GFP_KERNEL);
3072 if (!pg)
3073 return NULL;
3074
3075 /*
3076 * Try to allocate as much as possible in one continues
3077 * location that fills in all of the space. We want to
3078 * waste as little space as possible.
3079 */
3080 for (;;) {
3081 cnt = ftrace_allocate_records(pg, num_to_init);
3082 if (cnt < 0)
3083 goto free_pages;
3084
3085 num_to_init -= cnt;
3086 if (!num_to_init)
3087 break;
3088
3089 pg->next = kzalloc(sizeof(*pg), GFP_KERNEL);
3090 if (!pg->next)
3091 goto free_pages;
3092
3093 pg = pg->next;
3094 }
3095
3096 return start_pg;
3097
3098 free_pages:
3099 ftrace_free_pages(start_pg);
3100 pr_info("ftrace: FAILED to allocate memory for functions\n");
3101 return NULL;
3102}
3103
3104#define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
3105
3106struct ftrace_iterator {
3107 loff_t pos;
3108 loff_t func_pos;
3109 loff_t mod_pos;
3110 struct ftrace_page *pg;
3111 struct dyn_ftrace *func;
3112 struct ftrace_func_probe *probe;
3113 struct ftrace_func_entry *probe_entry;
3114 struct trace_parser parser;
3115 struct ftrace_hash *hash;
3116 struct ftrace_ops *ops;
3117 struct trace_array *tr;
3118 struct list_head *mod_list;
3119 int pidx;
3120 int idx;
3121 unsigned flags;
3122};
3123
3124static void *
3125t_probe_next(struct seq_file *m, loff_t *pos)
3126{
3127 struct ftrace_iterator *iter = m->private;
3128 struct trace_array *tr = iter->ops->private;
3129 struct list_head *func_probes;
3130 struct ftrace_hash *hash;
3131 struct list_head *next;
3132 struct hlist_node *hnd = NULL;
3133 struct hlist_head *hhd;
3134 int size;
3135
3136 (*pos)++;
3137 iter->pos = *pos;
3138
3139 if (!tr)
3140 return NULL;
3141
3142 func_probes = &tr->func_probes;
3143 if (list_empty(func_probes))
3144 return NULL;
3145
3146 if (!iter->probe) {
3147 next = func_probes->next;
3148 iter->probe = list_entry(next, struct ftrace_func_probe, list);
3149 }
3150
3151 if (iter->probe_entry)
3152 hnd = &iter->probe_entry->hlist;
3153
3154 hash = iter->probe->ops.func_hash->filter_hash;
3155
3156 /*
3157 * A probe being registered may temporarily have an empty hash
3158 * and it's at the end of the func_probes list.
3159 */
3160 if (!hash || hash == EMPTY_HASH)
3161 return NULL;
3162
3163 size = 1 << hash->size_bits;
3164
3165 retry:
3166 if (iter->pidx >= size) {
3167 if (iter->probe->list.next == func_probes)
3168 return NULL;
3169 next = iter->probe->list.next;
3170 iter->probe = list_entry(next, struct ftrace_func_probe, list);
3171 hash = iter->probe->ops.func_hash->filter_hash;
3172 size = 1 << hash->size_bits;
3173 iter->pidx = 0;
3174 }
3175
3176 hhd = &hash->buckets[iter->pidx];
3177
3178 if (hlist_empty(hhd)) {
3179 iter->pidx++;
3180 hnd = NULL;
3181 goto retry;
3182 }
3183
3184 if (!hnd)
3185 hnd = hhd->first;
3186 else {
3187 hnd = hnd->next;
3188 if (!hnd) {
3189 iter->pidx++;
3190 goto retry;
3191 }
3192 }
3193
3194 if (WARN_ON_ONCE(!hnd))
3195 return NULL;
3196
3197 iter->probe_entry = hlist_entry(hnd, struct ftrace_func_entry, hlist);
3198
3199 return iter;
3200}
3201
3202static void *t_probe_start(struct seq_file *m, loff_t *pos)
3203{
3204 struct ftrace_iterator *iter = m->private;
3205 void *p = NULL;
3206 loff_t l;
3207
3208 if (!(iter->flags & FTRACE_ITER_DO_PROBES))
3209 return NULL;
3210
3211 if (iter->mod_pos > *pos)
3212 return NULL;
3213
3214 iter->probe = NULL;
3215 iter->probe_entry = NULL;
3216 iter->pidx = 0;
3217 for (l = 0; l <= (*pos - iter->mod_pos); ) {
3218 p = t_probe_next(m, &l);
3219 if (!p)
3220 break;
3221 }
3222 if (!p)
3223 return NULL;
3224
3225 /* Only set this if we have an item */
3226 iter->flags |= FTRACE_ITER_PROBE;
3227
3228 return iter;
3229}
3230
3231static int
3232t_probe_show(struct seq_file *m, struct ftrace_iterator *iter)
3233{
3234 struct ftrace_func_entry *probe_entry;
3235 struct ftrace_probe_ops *probe_ops;
3236 struct ftrace_func_probe *probe;
3237
3238 probe = iter->probe;
3239 probe_entry = iter->probe_entry;
3240
3241 if (WARN_ON_ONCE(!probe || !probe_entry))
3242 return -EIO;
3243
3244 probe_ops = probe->probe_ops;
3245
3246 if (probe_ops->print)
3247 return probe_ops->print(m, probe_entry->ip, probe_ops, probe->data);
3248
3249 seq_printf(m, "%ps:%ps\n", (void *)probe_entry->ip,
3250 (void *)probe_ops->func);
3251
3252 return 0;
3253}
3254
3255static void *
3256t_mod_next(struct seq_file *m, loff_t *pos)
3257{
3258 struct ftrace_iterator *iter = m->private;
3259 struct trace_array *tr = iter->tr;
3260
3261 (*pos)++;
3262 iter->pos = *pos;
3263
3264 iter->mod_list = iter->mod_list->next;
3265
3266 if (iter->mod_list == &tr->mod_trace ||
3267 iter->mod_list == &tr->mod_notrace) {
3268 iter->flags &= ~FTRACE_ITER_MOD;
3269 return NULL;
3270 }
3271
3272 iter->mod_pos = *pos;
3273
3274 return iter;
3275}
3276
3277static void *t_mod_start(struct seq_file *m, loff_t *pos)
3278{
3279 struct ftrace_iterator *iter = m->private;
3280 void *p = NULL;
3281 loff_t l;
3282
3283 if (iter->func_pos > *pos)
3284 return NULL;
3285
3286 iter->mod_pos = iter->func_pos;
3287
3288 /* probes are only available if tr is set */
3289 if (!iter->tr)
3290 return NULL;
3291
3292 for (l = 0; l <= (*pos - iter->func_pos); ) {
3293 p = t_mod_next(m, &l);
3294 if (!p)
3295 break;
3296 }
3297 if (!p) {
3298 iter->flags &= ~FTRACE_ITER_MOD;
3299 return t_probe_start(m, pos);
3300 }
3301
3302 /* Only set this if we have an item */
3303 iter->flags |= FTRACE_ITER_MOD;
3304
3305 return iter;
3306}
3307
3308static int
3309t_mod_show(struct seq_file *m, struct ftrace_iterator *iter)
3310{
3311 struct ftrace_mod_load *ftrace_mod;
3312 struct trace_array *tr = iter->tr;
3313
3314 if (WARN_ON_ONCE(!iter->mod_list) ||
3315 iter->mod_list == &tr->mod_trace ||
3316 iter->mod_list == &tr->mod_notrace)
3317 return -EIO;
3318
3319 ftrace_mod = list_entry(iter->mod_list, struct ftrace_mod_load, list);
3320
3321 if (ftrace_mod->func)
3322 seq_printf(m, "%s", ftrace_mod->func);
3323 else
3324 seq_putc(m, '*');
3325
3326 seq_printf(m, ":mod:%s\n", ftrace_mod->module);
3327
3328 return 0;
3329}
3330
3331static void *
3332t_func_next(struct seq_file *m, loff_t *pos)
3333{
3334 struct ftrace_iterator *iter = m->private;
3335 struct dyn_ftrace *rec = NULL;
3336
3337 (*pos)++;
3338
3339 retry:
3340 if (iter->idx >= iter->pg->index) {
3341 if (iter->pg->next) {
3342 iter->pg = iter->pg->next;
3343 iter->idx = 0;
3344 goto retry;
3345 }
3346 } else {
3347 rec = &iter->pg->records[iter->idx++];
3348 if (((iter->flags & (FTRACE_ITER_FILTER | FTRACE_ITER_NOTRACE)) &&
3349 !ftrace_lookup_ip(iter->hash, rec->ip)) ||
3350
3351 ((iter->flags & FTRACE_ITER_ENABLED) &&
3352 !(rec->flags & FTRACE_FL_ENABLED))) {
3353
3354 rec = NULL;
3355 goto retry;
3356 }
3357 }
3358
3359 if (!rec)
3360 return NULL;
3361
3362 iter->pos = iter->func_pos = *pos;
3363 iter->func = rec;
3364
3365 return iter;
3366}
3367
3368static void *
3369t_next(struct seq_file *m, void *v, loff_t *pos)
3370{
3371 struct ftrace_iterator *iter = m->private;
3372 loff_t l = *pos; /* t_probe_start() must use original pos */
3373 void *ret;
3374
3375 if (unlikely(ftrace_disabled))
3376 return NULL;
3377
3378 if (iter->flags & FTRACE_ITER_PROBE)
3379 return t_probe_next(m, pos);
3380
3381 if (iter->flags & FTRACE_ITER_MOD)
3382 return t_mod_next(m, pos);
3383
3384 if (iter->flags & FTRACE_ITER_PRINTALL) {
3385 /* next must increment pos, and t_probe_start does not */
3386 (*pos)++;
3387 return t_mod_start(m, &l);
3388 }
3389
3390 ret = t_func_next(m, pos);
3391
3392 if (!ret)
3393 return t_mod_start(m, &l);
3394
3395 return ret;
3396}
3397
3398static void reset_iter_read(struct ftrace_iterator *iter)
3399{
3400 iter->pos = 0;
3401 iter->func_pos = 0;
3402 iter->flags &= ~(FTRACE_ITER_PRINTALL | FTRACE_ITER_PROBE | FTRACE_ITER_MOD);
3403}
3404
3405static void *t_start(struct seq_file *m, loff_t *pos)
3406{
3407 struct ftrace_iterator *iter = m->private;
3408 void *p = NULL;
3409 loff_t l;
3410
3411 mutex_lock(&ftrace_lock);
3412
3413 if (unlikely(ftrace_disabled))
3414 return NULL;
3415
3416 /*
3417 * If an lseek was done, then reset and start from beginning.
3418 */
3419 if (*pos < iter->pos)
3420 reset_iter_read(iter);
3421
3422 /*
3423 * For set_ftrace_filter reading, if we have the filter
3424 * off, we can short cut and just print out that all
3425 * functions are enabled.
3426 */
3427 if ((iter->flags & (FTRACE_ITER_FILTER | FTRACE_ITER_NOTRACE)) &&
3428 ftrace_hash_empty(iter->hash)) {
3429 iter->func_pos = 1; /* Account for the message */
3430 if (*pos > 0)
3431 return t_mod_start(m, pos);
3432 iter->flags |= FTRACE_ITER_PRINTALL;
3433 /* reset in case of seek/pread */
3434 iter->flags &= ~FTRACE_ITER_PROBE;
3435 return iter;
3436 }
3437
3438 if (iter->flags & FTRACE_ITER_MOD)
3439 return t_mod_start(m, pos);
3440
3441 /*
3442 * Unfortunately, we need to restart at ftrace_pages_start
3443 * every time we let go of the ftrace_mutex. This is because
3444 * those pointers can change without the lock.
3445 */
3446 iter->pg = ftrace_pages_start;
3447 iter->idx = 0;
3448 for (l = 0; l <= *pos; ) {
3449 p = t_func_next(m, &l);
3450 if (!p)
3451 break;
3452 }
3453
3454 if (!p)
3455 return t_mod_start(m, pos);
3456
3457 return iter;
3458}
3459
3460static void t_stop(struct seq_file *m, void *p)
3461{
3462 mutex_unlock(&ftrace_lock);
3463}
3464
3465void * __weak
3466arch_ftrace_trampoline_func(struct ftrace_ops *ops, struct dyn_ftrace *rec)
3467{
3468 return NULL;
3469}
3470
3471static void add_trampoline_func(struct seq_file *m, struct ftrace_ops *ops,
3472 struct dyn_ftrace *rec)
3473{
3474 void *ptr;
3475
3476 ptr = arch_ftrace_trampoline_func(ops, rec);
3477 if (ptr)
3478 seq_printf(m, " ->%pS", ptr);
3479}
3480
3481static int t_show(struct seq_file *m, void *v)
3482{
3483 struct ftrace_iterator *iter = m->private;
3484 struct dyn_ftrace *rec;
3485
3486 if (iter->flags & FTRACE_ITER_PROBE)
3487 return t_probe_show(m, iter);
3488
3489 if (iter->flags & FTRACE_ITER_MOD)
3490 return t_mod_show(m, iter);
3491
3492 if (iter->flags & FTRACE_ITER_PRINTALL) {
3493 if (iter->flags & FTRACE_ITER_NOTRACE)
3494 seq_puts(m, "#### no functions disabled ####\n");
3495 else
3496 seq_puts(m, "#### all functions enabled ####\n");
3497 return 0;
3498 }
3499
3500 rec = iter->func;
3501
3502 if (!rec)
3503 return 0;
3504
3505 seq_printf(m, "%ps", (void *)rec->ip);
3506 if (iter->flags & FTRACE_ITER_ENABLED) {
3507 struct ftrace_ops *ops;
3508
3509 seq_printf(m, " (%ld)%s%s",
3510 ftrace_rec_count(rec),
3511 rec->flags & FTRACE_FL_REGS ? " R" : " ",
3512 rec->flags & FTRACE_FL_IPMODIFY ? " I" : " ");
3513 if (rec->flags & FTRACE_FL_TRAMP_EN) {
3514 ops = ftrace_find_tramp_ops_any(rec);
3515 if (ops) {
3516 do {
3517 seq_printf(m, "\ttramp: %pS (%pS)",
3518 (void *)ops->trampoline,
3519 (void *)ops->func);
3520 add_trampoline_func(m, ops, rec);
3521 ops = ftrace_find_tramp_ops_next(rec, ops);
3522 } while (ops);
3523 } else
3524 seq_puts(m, "\ttramp: ERROR!");
3525 } else {
3526 add_trampoline_func(m, NULL, rec);
3527 }
3528 }
3529
3530 seq_putc(m, '\n');
3531
3532 return 0;
3533}
3534
3535static const struct seq_operations show_ftrace_seq_ops = {
3536 .start = t_start,
3537 .next = t_next,
3538 .stop = t_stop,
3539 .show = t_show,
3540};
3541
3542static int
3543ftrace_avail_open(struct inode *inode, struct file *file)
3544{
3545 struct ftrace_iterator *iter;
3546 int ret;
3547
3548 ret = security_locked_down(LOCKDOWN_TRACEFS);
3549 if (ret)
3550 return ret;
3551
3552 if (unlikely(ftrace_disabled))
3553 return -ENODEV;
3554
3555 iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
3556 if (!iter)
3557 return -ENOMEM;
3558
3559 iter->pg = ftrace_pages_start;
3560 iter->ops = &global_ops;
3561
3562 return 0;
3563}
3564
3565static int
3566ftrace_enabled_open(struct inode *inode, struct file *file)
3567{
3568 struct ftrace_iterator *iter;
3569
3570 /*
3571 * This shows us what functions are currently being
3572 * traced and by what. Not sure if we want lockdown
3573 * to hide such critical information for an admin.
3574 * Although, perhaps it can show information we don't
3575 * want people to see, but if something is tracing
3576 * something, we probably want to know about it.
3577 */
3578
3579 iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
3580 if (!iter)
3581 return -ENOMEM;
3582
3583 iter->pg = ftrace_pages_start;
3584 iter->flags = FTRACE_ITER_ENABLED;
3585 iter->ops = &global_ops;
3586
3587 return 0;
3588}
3589
3590/**
3591 * ftrace_regex_open - initialize function tracer filter files
3592 * @ops: The ftrace_ops that hold the hash filters
3593 * @flag: The type of filter to process
3594 * @inode: The inode, usually passed in to your open routine
3595 * @file: The file, usually passed in to your open routine
3596 *
3597 * ftrace_regex_open() initializes the filter files for the
3598 * @ops. Depending on @flag it may process the filter hash or
3599 * the notrace hash of @ops. With this called from the open
3600 * routine, you can use ftrace_filter_write() for the write
3601 * routine if @flag has FTRACE_ITER_FILTER set, or
3602 * ftrace_notrace_write() if @flag has FTRACE_ITER_NOTRACE set.
3603 * tracing_lseek() should be used as the lseek routine, and
3604 * release must call ftrace_regex_release().
3605 */
3606int
3607ftrace_regex_open(struct ftrace_ops *ops, int flag,
3608 struct inode *inode, struct file *file)
3609{
3610 struct ftrace_iterator *iter;
3611 struct ftrace_hash *hash;
3612 struct list_head *mod_head;
3613 struct trace_array *tr = ops->private;
3614 int ret = -ENOMEM;
3615
3616 ftrace_ops_init(ops);
3617
3618 if (unlikely(ftrace_disabled))
3619 return -ENODEV;
3620
3621 if (tracing_check_open_get_tr(tr))
3622 return -ENODEV;
3623
3624 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
3625 if (!iter)
3626 goto out;
3627
3628 if (trace_parser_get_init(&iter->parser, FTRACE_BUFF_MAX))
3629 goto out;
3630
3631 iter->ops = ops;
3632 iter->flags = flag;
3633 iter->tr = tr;
3634
3635 mutex_lock(&ops->func_hash->regex_lock);
3636
3637 if (flag & FTRACE_ITER_NOTRACE) {
3638 hash = ops->func_hash->notrace_hash;
3639 mod_head = tr ? &tr->mod_notrace : NULL;
3640 } else {
3641 hash = ops->func_hash->filter_hash;
3642 mod_head = tr ? &tr->mod_trace : NULL;
3643 }
3644
3645 iter->mod_list = mod_head;
3646
3647 if (file->f_mode & FMODE_WRITE) {
3648 const int size_bits = FTRACE_HASH_DEFAULT_BITS;
3649
3650 if (file->f_flags & O_TRUNC) {
3651 iter->hash = alloc_ftrace_hash(size_bits);
3652 clear_ftrace_mod_list(mod_head);
3653 } else {
3654 iter->hash = alloc_and_copy_ftrace_hash(size_bits, hash);
3655 }
3656
3657 if (!iter->hash) {
3658 trace_parser_put(&iter->parser);
3659 goto out_unlock;
3660 }
3661 } else
3662 iter->hash = hash;
3663
3664 ret = 0;
3665
3666 if (file->f_mode & FMODE_READ) {
3667 iter->pg = ftrace_pages_start;
3668
3669 ret = seq_open(file, &show_ftrace_seq_ops);
3670 if (!ret) {
3671 struct seq_file *m = file->private_data;
3672 m->private = iter;
3673 } else {
3674 /* Failed */
3675 free_ftrace_hash(iter->hash);
3676 trace_parser_put(&iter->parser);
3677 }
3678 } else
3679 file->private_data = iter;
3680
3681 out_unlock:
3682 mutex_unlock(&ops->func_hash->regex_lock);
3683
3684 out:
3685 if (ret) {
3686 kfree(iter);
3687 if (tr)
3688 trace_array_put(tr);
3689 }
3690
3691 return ret;
3692}
3693
3694static int
3695ftrace_filter_open(struct inode *inode, struct file *file)
3696{
3697 struct ftrace_ops *ops = inode->i_private;
3698
3699 /* Checks for tracefs lockdown */
3700 return ftrace_regex_open(ops,
3701 FTRACE_ITER_FILTER | FTRACE_ITER_DO_PROBES,
3702 inode, file);
3703}
3704
3705static int
3706ftrace_notrace_open(struct inode *inode, struct file *file)
3707{
3708 struct ftrace_ops *ops = inode->i_private;
3709
3710 /* Checks for tracefs lockdown */
3711 return ftrace_regex_open(ops, FTRACE_ITER_NOTRACE,
3712 inode, file);
3713}
3714
3715/* Type for quick search ftrace basic regexes (globs) from filter_parse_regex */
3716struct ftrace_glob {
3717 char *search;
3718 unsigned len;
3719 int type;
3720};
3721
3722/*
3723 * If symbols in an architecture don't correspond exactly to the user-visible
3724 * name of what they represent, it is possible to define this function to
3725 * perform the necessary adjustments.
3726*/
3727char * __weak arch_ftrace_match_adjust(char *str, const char *search)
3728{
3729 return str;
3730}
3731
3732static int ftrace_match(char *str, struct ftrace_glob *g)
3733{
3734 int matched = 0;
3735 int slen;
3736
3737 str = arch_ftrace_match_adjust(str, g->search);
3738
3739 switch (g->type) {
3740 case MATCH_FULL:
3741 if (strcmp(str, g->search) == 0)
3742 matched = 1;
3743 break;
3744 case MATCH_FRONT_ONLY:
3745 if (strncmp(str, g->search, g->len) == 0)
3746 matched = 1;
3747 break;
3748 case MATCH_MIDDLE_ONLY:
3749 if (strstr(str, g->search))
3750 matched = 1;
3751 break;
3752 case MATCH_END_ONLY:
3753 slen = strlen(str);
3754 if (slen >= g->len &&
3755 memcmp(str + slen - g->len, g->search, g->len) == 0)
3756 matched = 1;
3757 break;
3758 case MATCH_GLOB:
3759 if (glob_match(g->search, str))
3760 matched = 1;
3761 break;
3762 }
3763
3764 return matched;
3765}
3766
3767static int
3768enter_record(struct ftrace_hash *hash, struct dyn_ftrace *rec, int clear_filter)
3769{
3770 struct ftrace_func_entry *entry;
3771 int ret = 0;
3772
3773 entry = ftrace_lookup_ip(hash, rec->ip);
3774 if (clear_filter) {
3775 /* Do nothing if it doesn't exist */
3776 if (!entry)
3777 return 0;
3778
3779 free_hash_entry(hash, entry);
3780 } else {
3781 /* Do nothing if it exists */
3782 if (entry)
3783 return 0;
3784
3785 ret = add_hash_entry(hash, rec->ip);
3786 }
3787 return ret;
3788}
3789
3790static int
3791add_rec_by_index(struct ftrace_hash *hash, struct ftrace_glob *func_g,
3792 int clear_filter)
3793{
3794 long index = simple_strtoul(func_g->search, NULL, 0);
3795 struct ftrace_page *pg;
3796 struct dyn_ftrace *rec;
3797
3798 /* The index starts at 1 */
3799 if (--index < 0)
3800 return 0;
3801
3802 do_for_each_ftrace_rec(pg, rec) {
3803 if (pg->index <= index) {
3804 index -= pg->index;
3805 /* this is a double loop, break goes to the next page */
3806 break;
3807 }
3808 rec = &pg->records[index];
3809 enter_record(hash, rec, clear_filter);
3810 return 1;
3811 } while_for_each_ftrace_rec();
3812 return 0;
3813}
3814
3815static int
3816ftrace_match_record(struct dyn_ftrace *rec, struct ftrace_glob *func_g,
3817 struct ftrace_glob *mod_g, int exclude_mod)
3818{
3819 char str[KSYM_SYMBOL_LEN];
3820 char *modname;
3821
3822 kallsyms_lookup(rec->ip, NULL, NULL, &modname, str);
3823
3824 if (mod_g) {
3825 int mod_matches = (modname) ? ftrace_match(modname, mod_g) : 0;
3826
3827 /* blank module name to match all modules */
3828 if (!mod_g->len) {
3829 /* blank module globbing: modname xor exclude_mod */
3830 if (!exclude_mod != !modname)
3831 goto func_match;
3832 return 0;
3833 }
3834
3835 /*
3836 * exclude_mod is set to trace everything but the given
3837 * module. If it is set and the module matches, then
3838 * return 0. If it is not set, and the module doesn't match
3839 * also return 0. Otherwise, check the function to see if
3840 * that matches.
3841 */
3842 if (!mod_matches == !exclude_mod)
3843 return 0;
3844func_match:
3845 /* blank search means to match all funcs in the mod */
3846 if (!func_g->len)
3847 return 1;
3848 }
3849
3850 return ftrace_match(str, func_g);
3851}
3852
3853static int
3854match_records(struct ftrace_hash *hash, char *func, int len, char *mod)
3855{
3856 struct ftrace_page *pg;
3857 struct dyn_ftrace *rec;
3858 struct ftrace_glob func_g = { .type = MATCH_FULL };
3859 struct ftrace_glob mod_g = { .type = MATCH_FULL };
3860 struct ftrace_glob *mod_match = (mod) ? &mod_g : NULL;
3861 int exclude_mod = 0;
3862 int found = 0;
3863 int ret;
3864 int clear_filter = 0;
3865
3866 if (func) {
3867 func_g.type = filter_parse_regex(func, len, &func_g.search,
3868 &clear_filter);
3869 func_g.len = strlen(func_g.search);
3870 }
3871
3872 if (mod) {
3873 mod_g.type = filter_parse_regex(mod, strlen(mod),
3874 &mod_g.search, &exclude_mod);
3875 mod_g.len = strlen(mod_g.search);
3876 }
3877
3878 mutex_lock(&ftrace_lock);
3879
3880 if (unlikely(ftrace_disabled))
3881 goto out_unlock;
3882
3883 if (func_g.type == MATCH_INDEX) {
3884 found = add_rec_by_index(hash, &func_g, clear_filter);
3885 goto out_unlock;
3886 }
3887
3888 do_for_each_ftrace_rec(pg, rec) {
3889
3890 if (rec->flags & FTRACE_FL_DISABLED)
3891 continue;
3892
3893 if (ftrace_match_record(rec, &func_g, mod_match, exclude_mod)) {
3894 ret = enter_record(hash, rec, clear_filter);
3895 if (ret < 0) {
3896 found = ret;
3897 goto out_unlock;
3898 }
3899 found = 1;
3900 }
3901 } while_for_each_ftrace_rec();
3902 out_unlock:
3903 mutex_unlock(&ftrace_lock);
3904
3905 return found;
3906}
3907
3908static int
3909ftrace_match_records(struct ftrace_hash *hash, char *buff, int len)
3910{
3911 return match_records(hash, buff, len, NULL);
3912}
3913
3914static void ftrace_ops_update_code(struct ftrace_ops *ops,
3915 struct ftrace_ops_hash *old_hash)
3916{
3917 struct ftrace_ops *op;
3918
3919 if (!ftrace_enabled)
3920 return;
3921
3922 if (ops->flags & FTRACE_OPS_FL_ENABLED) {
3923 ftrace_run_modify_code(ops, FTRACE_UPDATE_CALLS, old_hash);
3924 return;
3925 }
3926
3927 /*
3928 * If this is the shared global_ops filter, then we need to
3929 * check if there is another ops that shares it, is enabled.
3930 * If so, we still need to run the modify code.
3931 */
3932 if (ops->func_hash != &global_ops.local_hash)
3933 return;
3934
3935 do_for_each_ftrace_op(op, ftrace_ops_list) {
3936 if (op->func_hash == &global_ops.local_hash &&
3937 op->flags & FTRACE_OPS_FL_ENABLED) {
3938 ftrace_run_modify_code(op, FTRACE_UPDATE_CALLS, old_hash);
3939 /* Only need to do this once */
3940 return;
3941 }
3942 } while_for_each_ftrace_op(op);
3943}
3944
3945static int ftrace_hash_move_and_update_ops(struct ftrace_ops *ops,
3946 struct ftrace_hash **orig_hash,
3947 struct ftrace_hash *hash,
3948 int enable)
3949{
3950 struct ftrace_ops_hash old_hash_ops;
3951 struct ftrace_hash *old_hash;
3952 int ret;
3953
3954 old_hash = *orig_hash;
3955 old_hash_ops.filter_hash = ops->func_hash->filter_hash;
3956 old_hash_ops.notrace_hash = ops->func_hash->notrace_hash;
3957 ret = ftrace_hash_move(ops, enable, orig_hash, hash);
3958 if (!ret) {
3959 ftrace_ops_update_code(ops, &old_hash_ops);
3960 free_ftrace_hash_rcu(old_hash);
3961 }
3962 return ret;
3963}
3964
3965static bool module_exists(const char *module)
3966{
3967 /* All modules have the symbol __this_module */
3968 static const char this_mod[] = "__this_module";
3969 char modname[MAX_PARAM_PREFIX_LEN + sizeof(this_mod) + 2];
3970 unsigned long val;
3971 int n;
3972
3973 n = snprintf(modname, sizeof(modname), "%s:%s", module, this_mod);
3974
3975 if (n > sizeof(modname) - 1)
3976 return false;
3977
3978 val = module_kallsyms_lookup_name(modname);
3979 return val != 0;
3980}
3981
3982static int cache_mod(struct trace_array *tr,
3983 const char *func, char *module, int enable)
3984{
3985 struct ftrace_mod_load *ftrace_mod, *n;
3986 struct list_head *head = enable ? &tr->mod_trace : &tr->mod_notrace;
3987 int ret;
3988
3989 mutex_lock(&ftrace_lock);
3990
3991 /* We do not cache inverse filters */
3992 if (func[0] == '!') {
3993 func++;
3994 ret = -EINVAL;
3995
3996 /* Look to remove this hash */
3997 list_for_each_entry_safe(ftrace_mod, n, head, list) {
3998 if (strcmp(ftrace_mod->module, module) != 0)
3999 continue;
4000
4001 /* no func matches all */
4002 if (strcmp(func, "*") == 0 ||
4003 (ftrace_mod->func &&
4004 strcmp(ftrace_mod->func, func) == 0)) {
4005 ret = 0;
4006 free_ftrace_mod(ftrace_mod);
4007 continue;
4008 }
4009 }
4010 goto out;
4011 }
4012
4013 ret = -EINVAL;
4014 /* We only care about modules that have not been loaded yet */
4015 if (module_exists(module))
4016 goto out;
4017
4018 /* Save this string off, and execute it when the module is loaded */
4019 ret = ftrace_add_mod(tr, func, module, enable);
4020 out:
4021 mutex_unlock(&ftrace_lock);
4022
4023 return ret;
4024}
4025
4026static int
4027ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
4028 int reset, int enable);
4029
4030#ifdef CONFIG_MODULES
4031static void process_mod_list(struct list_head *head, struct ftrace_ops *ops,
4032 char *mod, bool enable)
4033{
4034 struct ftrace_mod_load *ftrace_mod, *n;
4035 struct ftrace_hash **orig_hash, *new_hash;
4036 LIST_HEAD(process_mods);
4037 char *func;
4038 int ret;
4039
4040 mutex_lock(&ops->func_hash->regex_lock);
4041
4042 if (enable)
4043 orig_hash = &ops->func_hash->filter_hash;
4044 else
4045 orig_hash = &ops->func_hash->notrace_hash;
4046
4047 new_hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS,
4048 *orig_hash);
4049 if (!new_hash)
4050 goto out; /* warn? */
4051
4052 mutex_lock(&ftrace_lock);
4053
4054 list_for_each_entry_safe(ftrace_mod, n, head, list) {
4055
4056 if (strcmp(ftrace_mod->module, mod) != 0)
4057 continue;
4058
4059 if (ftrace_mod->func)
4060 func = kstrdup(ftrace_mod->func, GFP_KERNEL);
4061 else
4062 func = kstrdup("*", GFP_KERNEL);
4063
4064 if (!func) /* warn? */
4065 continue;
4066
4067 list_del(&ftrace_mod->list);
4068 list_add(&ftrace_mod->list, &process_mods);
4069
4070 /* Use the newly allocated func, as it may be "*" */
4071 kfree(ftrace_mod->func);
4072 ftrace_mod->func = func;
4073 }
4074
4075 mutex_unlock(&ftrace_lock);
4076
4077 list_for_each_entry_safe(ftrace_mod, n, &process_mods, list) {
4078
4079 func = ftrace_mod->func;
4080
4081 /* Grabs ftrace_lock, which is why we have this extra step */
4082 match_records(new_hash, func, strlen(func), mod);
4083 free_ftrace_mod(ftrace_mod);
4084 }
4085
4086 if (enable && list_empty(head))
4087 new_hash->flags &= ~FTRACE_HASH_FL_MOD;
4088
4089 mutex_lock(&ftrace_lock);
4090
4091 ret = ftrace_hash_move_and_update_ops(ops, orig_hash,
4092 new_hash, enable);
4093 mutex_unlock(&ftrace_lock);
4094
4095 out:
4096 mutex_unlock(&ops->func_hash->regex_lock);
4097
4098 free_ftrace_hash(new_hash);
4099}
4100
4101static void process_cached_mods(const char *mod_name)
4102{
4103 struct trace_array *tr;
4104 char *mod;
4105
4106 mod = kstrdup(mod_name, GFP_KERNEL);
4107 if (!mod)
4108 return;
4109
4110 mutex_lock(&trace_types_lock);
4111 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
4112 if (!list_empty(&tr->mod_trace))
4113 process_mod_list(&tr->mod_trace, tr->ops, mod, true);
4114 if (!list_empty(&tr->mod_notrace))
4115 process_mod_list(&tr->mod_notrace, tr->ops, mod, false);
4116 }
4117 mutex_unlock(&trace_types_lock);
4118
4119 kfree(mod);
4120}
4121#endif
4122
4123/*
4124 * We register the module command as a template to show others how
4125 * to register the a command as well.
4126 */
4127
4128static int
4129ftrace_mod_callback(struct trace_array *tr, struct ftrace_hash *hash,
4130 char *func_orig, char *cmd, char *module, int enable)
4131{
4132 char *func;
4133 int ret;
4134
4135 if (!tr)
4136 return -ENODEV;
4137
4138 /* match_records() modifies func, and we need the original */
4139 func = kstrdup(func_orig, GFP_KERNEL);
4140 if (!func)
4141 return -ENOMEM;
4142
4143 /*
4144 * cmd == 'mod' because we only registered this func
4145 * for the 'mod' ftrace_func_command.
4146 * But if you register one func with multiple commands,
4147 * you can tell which command was used by the cmd
4148 * parameter.
4149 */
4150 ret = match_records(hash, func, strlen(func), module);
4151 kfree(func);
4152
4153 if (!ret)
4154 return cache_mod(tr, func_orig, module, enable);
4155 if (ret < 0)
4156 return ret;
4157 return 0;
4158}
4159
4160static struct ftrace_func_command ftrace_mod_cmd = {
4161 .name = "mod",
4162 .func = ftrace_mod_callback,
4163};
4164
4165static int __init ftrace_mod_cmd_init(void)
4166{
4167 return register_ftrace_command(&ftrace_mod_cmd);
4168}
4169core_initcall(ftrace_mod_cmd_init);
4170
4171static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip,
4172 struct ftrace_ops *op, struct pt_regs *pt_regs)
4173{
4174 struct ftrace_probe_ops *probe_ops;
4175 struct ftrace_func_probe *probe;
4176
4177 probe = container_of(op, struct ftrace_func_probe, ops);
4178 probe_ops = probe->probe_ops;
4179
4180 /*
4181 * Disable preemption for these calls to prevent a RCU grace
4182 * period. This syncs the hash iteration and freeing of items
4183 * on the hash. rcu_read_lock is too dangerous here.
4184 */
4185 preempt_disable_notrace();
4186 probe_ops->func(ip, parent_ip, probe->tr, probe_ops, probe->data);
4187 preempt_enable_notrace();
4188}
4189
4190struct ftrace_func_map {
4191 struct ftrace_func_entry entry;
4192 void *data;
4193};
4194
4195struct ftrace_func_mapper {
4196 struct ftrace_hash hash;
4197};
4198
4199/**
4200 * allocate_ftrace_func_mapper - allocate a new ftrace_func_mapper
4201 *
4202 * Returns a ftrace_func_mapper descriptor that can be used to map ips to data.
4203 */
4204struct ftrace_func_mapper *allocate_ftrace_func_mapper(void)
4205{
4206 struct ftrace_hash *hash;
4207
4208 /*
4209 * The mapper is simply a ftrace_hash, but since the entries
4210 * in the hash are not ftrace_func_entry type, we define it
4211 * as a separate structure.
4212 */
4213 hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS);
4214 return (struct ftrace_func_mapper *)hash;
4215}
4216
4217/**
4218 * ftrace_func_mapper_find_ip - Find some data mapped to an ip
4219 * @mapper: The mapper that has the ip maps
4220 * @ip: the instruction pointer to find the data for
4221 *
4222 * Returns the data mapped to @ip if found otherwise NULL. The return
4223 * is actually the address of the mapper data pointer. The address is
4224 * returned for use cases where the data is no bigger than a long, and
4225 * the user can use the data pointer as its data instead of having to
4226 * allocate more memory for the reference.
4227 */
4228void **ftrace_func_mapper_find_ip(struct ftrace_func_mapper *mapper,
4229 unsigned long ip)
4230{
4231 struct ftrace_func_entry *entry;
4232 struct ftrace_func_map *map;
4233
4234 entry = ftrace_lookup_ip(&mapper->hash, ip);
4235 if (!entry)
4236 return NULL;
4237
4238 map = (struct ftrace_func_map *)entry;
4239 return &map->data;
4240}
4241
4242/**
4243 * ftrace_func_mapper_add_ip - Map some data to an ip
4244 * @mapper: The mapper that has the ip maps
4245 * @ip: The instruction pointer address to map @data to
4246 * @data: The data to map to @ip
4247 *
4248 * Returns 0 on succes otherwise an error.
4249 */
4250int ftrace_func_mapper_add_ip(struct ftrace_func_mapper *mapper,
4251 unsigned long ip, void *data)
4252{
4253 struct ftrace_func_entry *entry;
4254 struct ftrace_func_map *map;
4255
4256 entry = ftrace_lookup_ip(&mapper->hash, ip);
4257 if (entry)
4258 return -EBUSY;
4259
4260 map = kmalloc(sizeof(*map), GFP_KERNEL);
4261 if (!map)
4262 return -ENOMEM;
4263
4264 map->entry.ip = ip;
4265 map->data = data;
4266
4267 __add_hash_entry(&mapper->hash, &map->entry);
4268
4269 return 0;
4270}
4271
4272/**
4273 * ftrace_func_mapper_remove_ip - Remove an ip from the mapping
4274 * @mapper: The mapper that has the ip maps
4275 * @ip: The instruction pointer address to remove the data from
4276 *
4277 * Returns the data if it is found, otherwise NULL.
4278 * Note, if the data pointer is used as the data itself, (see
4279 * ftrace_func_mapper_find_ip(), then the return value may be meaningless,
4280 * if the data pointer was set to zero.
4281 */
4282void *ftrace_func_mapper_remove_ip(struct ftrace_func_mapper *mapper,
4283 unsigned long ip)
4284{
4285 struct ftrace_func_entry *entry;
4286 struct ftrace_func_map *map;
4287 void *data;
4288
4289 entry = ftrace_lookup_ip(&mapper->hash, ip);
4290 if (!entry)
4291 return NULL;
4292
4293 map = (struct ftrace_func_map *)entry;
4294 data = map->data;
4295
4296 remove_hash_entry(&mapper->hash, entry);
4297 kfree(entry);
4298
4299 return data;
4300}
4301
4302/**
4303 * free_ftrace_func_mapper - free a mapping of ips and data
4304 * @mapper: The mapper that has the ip maps
4305 * @free_func: A function to be called on each data item.
4306 *
4307 * This is used to free the function mapper. The @free_func is optional
4308 * and can be used if the data needs to be freed as well.
4309 */
4310void free_ftrace_func_mapper(struct ftrace_func_mapper *mapper,
4311 ftrace_mapper_func free_func)
4312{
4313 struct ftrace_func_entry *entry;
4314 struct ftrace_func_map *map;
4315 struct hlist_head *hhd;
4316 int size, i;
4317
4318 if (!mapper)
4319 return;
4320
4321 if (free_func && mapper->hash.count) {
4322 size = 1 << mapper->hash.size_bits;
4323 for (i = 0; i < size; i++) {
4324 hhd = &mapper->hash.buckets[i];
4325 hlist_for_each_entry(entry, hhd, hlist) {
4326 map = (struct ftrace_func_map *)entry;
4327 free_func(map);
4328 }
4329 }
4330 }
4331 free_ftrace_hash(&mapper->hash);
4332}
4333
4334static void release_probe(struct ftrace_func_probe *probe)
4335{
4336 struct ftrace_probe_ops *probe_ops;
4337
4338 mutex_lock(&ftrace_lock);
4339
4340 WARN_ON(probe->ref <= 0);
4341
4342 /* Subtract the ref that was used to protect this instance */
4343 probe->ref--;
4344
4345 if (!probe->ref) {
4346 probe_ops = probe->probe_ops;
4347 /*
4348 * Sending zero as ip tells probe_ops to free
4349 * the probe->data itself
4350 */
4351 if (probe_ops->free)
4352 probe_ops->free(probe_ops, probe->tr, 0, probe->data);
4353 list_del(&probe->list);
4354 kfree(probe);
4355 }
4356 mutex_unlock(&ftrace_lock);
4357}
4358
4359static void acquire_probe_locked(struct ftrace_func_probe *probe)
4360{
4361 /*
4362 * Add one ref to keep it from being freed when releasing the
4363 * ftrace_lock mutex.
4364 */
4365 probe->ref++;
4366}
4367
4368int
4369register_ftrace_function_probe(char *glob, struct trace_array *tr,
4370 struct ftrace_probe_ops *probe_ops,
4371 void *data)
4372{
4373 struct ftrace_func_entry *entry;
4374 struct ftrace_func_probe *probe;
4375 struct ftrace_hash **orig_hash;
4376 struct ftrace_hash *old_hash;
4377 struct ftrace_hash *hash;
4378 int count = 0;
4379 int size;
4380 int ret;
4381 int i;
4382
4383 if (WARN_ON(!tr))
4384 return -EINVAL;
4385
4386 /* We do not support '!' for function probes */
4387 if (WARN_ON(glob[0] == '!'))
4388 return -EINVAL;
4389
4390
4391 mutex_lock(&ftrace_lock);
4392 /* Check if the probe_ops is already registered */
4393 list_for_each_entry(probe, &tr->func_probes, list) {
4394 if (probe->probe_ops == probe_ops)
4395 break;
4396 }
4397 if (&probe->list == &tr->func_probes) {
4398 probe = kzalloc(sizeof(*probe), GFP_KERNEL);
4399 if (!probe) {
4400 mutex_unlock(&ftrace_lock);
4401 return -ENOMEM;
4402 }
4403 probe->probe_ops = probe_ops;
4404 probe->ops.func = function_trace_probe_call;
4405 probe->tr = tr;
4406 ftrace_ops_init(&probe->ops);
4407 list_add(&probe->list, &tr->func_probes);
4408 }
4409
4410 acquire_probe_locked(probe);
4411
4412 mutex_unlock(&ftrace_lock);
4413
4414 /*
4415 * Note, there's a small window here that the func_hash->filter_hash
4416 * may be NULL or empty. Need to be carefule when reading the loop.
4417 */
4418 mutex_lock(&probe->ops.func_hash->regex_lock);
4419
4420 orig_hash = &probe->ops.func_hash->filter_hash;
4421 old_hash = *orig_hash;
4422 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, old_hash);
4423
4424 if (!hash) {
4425 ret = -ENOMEM;
4426 goto out;
4427 }
4428
4429 ret = ftrace_match_records(hash, glob, strlen(glob));
4430
4431 /* Nothing found? */
4432 if (!ret)
4433 ret = -EINVAL;
4434
4435 if (ret < 0)
4436 goto out;
4437
4438 size = 1 << hash->size_bits;
4439 for (i = 0; i < size; i++) {
4440 hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
4441 if (ftrace_lookup_ip(old_hash, entry->ip))
4442 continue;
4443 /*
4444 * The caller might want to do something special
4445 * for each function we find. We call the callback
4446 * to give the caller an opportunity to do so.
4447 */
4448 if (probe_ops->init) {
4449 ret = probe_ops->init(probe_ops, tr,
4450 entry->ip, data,
4451 &probe->data);
4452 if (ret < 0) {
4453 if (probe_ops->free && count)
4454 probe_ops->free(probe_ops, tr,
4455 0, probe->data);
4456 probe->data = NULL;
4457 goto out;
4458 }
4459 }
4460 count++;
4461 }
4462 }
4463
4464 mutex_lock(&ftrace_lock);
4465
4466 if (!count) {
4467 /* Nothing was added? */
4468 ret = -EINVAL;
4469 goto out_unlock;
4470 }
4471
4472 ret = ftrace_hash_move_and_update_ops(&probe->ops, orig_hash,
4473 hash, 1);
4474 if (ret < 0)
4475 goto err_unlock;
4476
4477 /* One ref for each new function traced */
4478 probe->ref += count;
4479
4480 if (!(probe->ops.flags & FTRACE_OPS_FL_ENABLED))
4481 ret = ftrace_startup(&probe->ops, 0);
4482
4483 out_unlock:
4484 mutex_unlock(&ftrace_lock);
4485
4486 if (!ret)
4487 ret = count;
4488 out:
4489 mutex_unlock(&probe->ops.func_hash->regex_lock);
4490 free_ftrace_hash(hash);
4491
4492 release_probe(probe);
4493
4494 return ret;
4495
4496 err_unlock:
4497 if (!probe_ops->free || !count)
4498 goto out_unlock;
4499
4500 /* Failed to do the move, need to call the free functions */
4501 for (i = 0; i < size; i++) {
4502 hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
4503 if (ftrace_lookup_ip(old_hash, entry->ip))
4504 continue;
4505 probe_ops->free(probe_ops, tr, entry->ip, probe->data);
4506 }
4507 }
4508 goto out_unlock;
4509}
4510
4511int
4512unregister_ftrace_function_probe_func(char *glob, struct trace_array *tr,
4513 struct ftrace_probe_ops *probe_ops)
4514{
4515 struct ftrace_ops_hash old_hash_ops;
4516 struct ftrace_func_entry *entry;
4517 struct ftrace_func_probe *probe;
4518 struct ftrace_glob func_g;
4519 struct ftrace_hash **orig_hash;
4520 struct ftrace_hash *old_hash;
4521 struct ftrace_hash *hash = NULL;
4522 struct hlist_node *tmp;
4523 struct hlist_head hhd;
4524 char str[KSYM_SYMBOL_LEN];
4525 int count = 0;
4526 int i, ret = -ENODEV;
4527 int size;
4528
4529 if (!glob || !strlen(glob) || !strcmp(glob, "*"))
4530 func_g.search = NULL;
4531 else {
4532 int not;
4533
4534 func_g.type = filter_parse_regex(glob, strlen(glob),
4535 &func_g.search, &not);
4536 func_g.len = strlen(func_g.search);
4537
4538 /* we do not support '!' for function probes */
4539 if (WARN_ON(not))
4540 return -EINVAL;
4541 }
4542
4543 mutex_lock(&ftrace_lock);
4544 /* Check if the probe_ops is already registered */
4545 list_for_each_entry(probe, &tr->func_probes, list) {
4546 if (probe->probe_ops == probe_ops)
4547 break;
4548 }
4549 if (&probe->list == &tr->func_probes)
4550 goto err_unlock_ftrace;
4551
4552 ret = -EINVAL;
4553 if (!(probe->ops.flags & FTRACE_OPS_FL_INITIALIZED))
4554 goto err_unlock_ftrace;
4555
4556 acquire_probe_locked(probe);
4557
4558 mutex_unlock(&ftrace_lock);
4559
4560 mutex_lock(&probe->ops.func_hash->regex_lock);
4561
4562 orig_hash = &probe->ops.func_hash->filter_hash;
4563 old_hash = *orig_hash;
4564
4565 if (ftrace_hash_empty(old_hash))
4566 goto out_unlock;
4567
4568 old_hash_ops.filter_hash = old_hash;
4569 /* Probes only have filters */
4570 old_hash_ops.notrace_hash = NULL;
4571
4572 ret = -ENOMEM;
4573 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, old_hash);
4574 if (!hash)
4575 goto out_unlock;
4576
4577 INIT_HLIST_HEAD(&hhd);
4578
4579 size = 1 << hash->size_bits;
4580 for (i = 0; i < size; i++) {
4581 hlist_for_each_entry_safe(entry, tmp, &hash->buckets[i], hlist) {
4582
4583 if (func_g.search) {
4584 kallsyms_lookup(entry->ip, NULL, NULL,
4585 NULL, str);
4586 if (!ftrace_match(str, &func_g))
4587 continue;
4588 }
4589 count++;
4590 remove_hash_entry(hash, entry);
4591 hlist_add_head(&entry->hlist, &hhd);
4592 }
4593 }
4594
4595 /* Nothing found? */
4596 if (!count) {
4597 ret = -EINVAL;
4598 goto out_unlock;
4599 }
4600
4601 mutex_lock(&ftrace_lock);
4602
4603 WARN_ON(probe->ref < count);
4604
4605 probe->ref -= count;
4606
4607 if (ftrace_hash_empty(hash))
4608 ftrace_shutdown(&probe->ops, 0);
4609
4610 ret = ftrace_hash_move_and_update_ops(&probe->ops, orig_hash,
4611 hash, 1);
4612
4613 /* still need to update the function call sites */
4614 if (ftrace_enabled && !ftrace_hash_empty(hash))
4615 ftrace_run_modify_code(&probe->ops, FTRACE_UPDATE_CALLS,
4616 &old_hash_ops);
4617 synchronize_rcu();
4618
4619 hlist_for_each_entry_safe(entry, tmp, &hhd, hlist) {
4620 hlist_del(&entry->hlist);
4621 if (probe_ops->free)
4622 probe_ops->free(probe_ops, tr, entry->ip, probe->data);
4623 kfree(entry);
4624 }
4625 mutex_unlock(&ftrace_lock);
4626
4627 out_unlock:
4628 mutex_unlock(&probe->ops.func_hash->regex_lock);
4629 free_ftrace_hash(hash);
4630
4631 release_probe(probe);
4632
4633 return ret;
4634
4635 err_unlock_ftrace:
4636 mutex_unlock(&ftrace_lock);
4637 return ret;
4638}
4639
4640void clear_ftrace_function_probes(struct trace_array *tr)
4641{
4642 struct ftrace_func_probe *probe, *n;
4643
4644 list_for_each_entry_safe(probe, n, &tr->func_probes, list)
4645 unregister_ftrace_function_probe_func(NULL, tr, probe->probe_ops);
4646}
4647
4648static LIST_HEAD(ftrace_commands);
4649static DEFINE_MUTEX(ftrace_cmd_mutex);
4650
4651/*
4652 * Currently we only register ftrace commands from __init, so mark this
4653 * __init too.
4654 */
4655__init int register_ftrace_command(struct ftrace_func_command *cmd)
4656{
4657 struct ftrace_func_command *p;
4658 int ret = 0;
4659
4660 mutex_lock(&ftrace_cmd_mutex);
4661 list_for_each_entry(p, &ftrace_commands, list) {
4662 if (strcmp(cmd->name, p->name) == 0) {
4663 ret = -EBUSY;
4664 goto out_unlock;
4665 }
4666 }
4667 list_add(&cmd->list, &ftrace_commands);
4668 out_unlock:
4669 mutex_unlock(&ftrace_cmd_mutex);
4670
4671 return ret;
4672}
4673
4674/*
4675 * Currently we only unregister ftrace commands from __init, so mark
4676 * this __init too.
4677 */
4678__init int unregister_ftrace_command(struct ftrace_func_command *cmd)
4679{
4680 struct ftrace_func_command *p, *n;
4681 int ret = -ENODEV;
4682
4683 mutex_lock(&ftrace_cmd_mutex);
4684 list_for_each_entry_safe(p, n, &ftrace_commands, list) {
4685 if (strcmp(cmd->name, p->name) == 0) {
4686 ret = 0;
4687 list_del_init(&p->list);
4688 goto out_unlock;
4689 }
4690 }
4691 out_unlock:
4692 mutex_unlock(&ftrace_cmd_mutex);
4693
4694 return ret;
4695}
4696
4697static int ftrace_process_regex(struct ftrace_iterator *iter,
4698 char *buff, int len, int enable)
4699{
4700 struct ftrace_hash *hash = iter->hash;
4701 struct trace_array *tr = iter->ops->private;
4702 char *func, *command, *next = buff;
4703 struct ftrace_func_command *p;
4704 int ret = -EINVAL;
4705
4706 func = strsep(&next, ":");
4707
4708 if (!next) {
4709 ret = ftrace_match_records(hash, func, len);
4710 if (!ret)
4711 ret = -EINVAL;
4712 if (ret < 0)
4713 return ret;
4714 return 0;
4715 }
4716
4717 /* command found */
4718
4719 command = strsep(&next, ":");
4720
4721 mutex_lock(&ftrace_cmd_mutex);
4722 list_for_each_entry(p, &ftrace_commands, list) {
4723 if (strcmp(p->name, command) == 0) {
4724 ret = p->func(tr, hash, func, command, next, enable);
4725 goto out_unlock;
4726 }
4727 }
4728 out_unlock:
4729 mutex_unlock(&ftrace_cmd_mutex);
4730
4731 return ret;
4732}
4733
4734static ssize_t
4735ftrace_regex_write(struct file *file, const char __user *ubuf,
4736 size_t cnt, loff_t *ppos, int enable)
4737{
4738 struct ftrace_iterator *iter;
4739 struct trace_parser *parser;
4740 ssize_t ret, read;
4741
4742 if (!cnt)
4743 return 0;
4744
4745 if (file->f_mode & FMODE_READ) {
4746 struct seq_file *m = file->private_data;
4747 iter = m->private;
4748 } else
4749 iter = file->private_data;
4750
4751 if (unlikely(ftrace_disabled))
4752 return -ENODEV;
4753
4754 /* iter->hash is a local copy, so we don't need regex_lock */
4755
4756 parser = &iter->parser;
4757 read = trace_get_user(parser, ubuf, cnt, ppos);
4758
4759 if (read >= 0 && trace_parser_loaded(parser) &&
4760 !trace_parser_cont(parser)) {
4761 ret = ftrace_process_regex(iter, parser->buffer,
4762 parser->idx, enable);
4763 trace_parser_clear(parser);
4764 if (ret < 0)
4765 goto out;
4766 }
4767
4768 ret = read;
4769 out:
4770 return ret;
4771}
4772
4773ssize_t
4774ftrace_filter_write(struct file *file, const char __user *ubuf,
4775 size_t cnt, loff_t *ppos)
4776{
4777 return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
4778}
4779
4780ssize_t
4781ftrace_notrace_write(struct file *file, const char __user *ubuf,
4782 size_t cnt, loff_t *ppos)
4783{
4784 return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
4785}
4786
4787static int
4788ftrace_match_addr(struct ftrace_hash *hash, unsigned long ip, int remove)
4789{
4790 struct ftrace_func_entry *entry;
4791
4792 if (!ftrace_location(ip))
4793 return -EINVAL;
4794
4795 if (remove) {
4796 entry = ftrace_lookup_ip(hash, ip);
4797 if (!entry)
4798 return -ENOENT;
4799 free_hash_entry(hash, entry);
4800 return 0;
4801 }
4802
4803 return add_hash_entry(hash, ip);
4804}
4805
4806static int
4807ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
4808 unsigned long ip, int remove, int reset, int enable)
4809{
4810 struct ftrace_hash **orig_hash;
4811 struct ftrace_hash *hash;
4812 int ret;
4813
4814 if (unlikely(ftrace_disabled))
4815 return -ENODEV;
4816
4817 mutex_lock(&ops->func_hash->regex_lock);
4818
4819 if (enable)
4820 orig_hash = &ops->func_hash->filter_hash;
4821 else
4822 orig_hash = &ops->func_hash->notrace_hash;
4823
4824 if (reset)
4825 hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS);
4826 else
4827 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
4828
4829 if (!hash) {
4830 ret = -ENOMEM;
4831 goto out_regex_unlock;
4832 }
4833
4834 if (buf && !ftrace_match_records(hash, buf, len)) {
4835 ret = -EINVAL;
4836 goto out_regex_unlock;
4837 }
4838 if (ip) {
4839 ret = ftrace_match_addr(hash, ip, remove);
4840 if (ret < 0)
4841 goto out_regex_unlock;
4842 }
4843
4844 mutex_lock(&ftrace_lock);
4845 ret = ftrace_hash_move_and_update_ops(ops, orig_hash, hash, enable);
4846 mutex_unlock(&ftrace_lock);
4847
4848 out_regex_unlock:
4849 mutex_unlock(&ops->func_hash->regex_lock);
4850
4851 free_ftrace_hash(hash);
4852 return ret;
4853}
4854
4855static int
4856ftrace_set_addr(struct ftrace_ops *ops, unsigned long ip, int remove,
4857 int reset, int enable)
4858{
4859 return ftrace_set_hash(ops, NULL, 0, ip, remove, reset, enable);
4860}
4861
4862/**
4863 * ftrace_set_filter_ip - set a function to filter on in ftrace by address
4864 * @ops - the ops to set the filter with
4865 * @ip - the address to add to or remove from the filter.
4866 * @remove - non zero to remove the ip from the filter
4867 * @reset - non zero to reset all filters before applying this filter.
4868 *
4869 * Filters denote which functions should be enabled when tracing is enabled
4870 * If @ip is NULL, it failes to update filter.
4871 */
4872int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip,
4873 int remove, int reset)
4874{
4875 ftrace_ops_init(ops);
4876 return ftrace_set_addr(ops, ip, remove, reset, 1);
4877}
4878EXPORT_SYMBOL_GPL(ftrace_set_filter_ip);
4879
4880/**
4881 * ftrace_ops_set_global_filter - setup ops to use global filters
4882 * @ops - the ops which will use the global filters
4883 *
4884 * ftrace users who need global function trace filtering should call this.
4885 * It can set the global filter only if ops were not initialized before.
4886 */
4887void ftrace_ops_set_global_filter(struct ftrace_ops *ops)
4888{
4889 if (ops->flags & FTRACE_OPS_FL_INITIALIZED)
4890 return;
4891
4892 ftrace_ops_init(ops);
4893 ops->func_hash = &global_ops.local_hash;
4894}
4895EXPORT_SYMBOL_GPL(ftrace_ops_set_global_filter);
4896
4897static int
4898ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
4899 int reset, int enable)
4900{
4901 return ftrace_set_hash(ops, buf, len, 0, 0, reset, enable);
4902}
4903
4904/**
4905 * ftrace_set_filter - set a function to filter on in ftrace
4906 * @ops - the ops to set the filter with
4907 * @buf - the string that holds the function filter text.
4908 * @len - the length of the string.
4909 * @reset - non zero to reset all filters before applying this filter.
4910 *
4911 * Filters denote which functions should be enabled when tracing is enabled.
4912 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
4913 */
4914int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
4915 int len, int reset)
4916{
4917 ftrace_ops_init(ops);
4918 return ftrace_set_regex(ops, buf, len, reset, 1);
4919}
4920EXPORT_SYMBOL_GPL(ftrace_set_filter);
4921
4922/**
4923 * ftrace_set_notrace - set a function to not trace in ftrace
4924 * @ops - the ops to set the notrace filter with
4925 * @buf - the string that holds the function notrace text.
4926 * @len - the length of the string.
4927 * @reset - non zero to reset all filters before applying this filter.
4928 *
4929 * Notrace Filters denote which functions should not be enabled when tracing
4930 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
4931 * for tracing.
4932 */
4933int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
4934 int len, int reset)
4935{
4936 ftrace_ops_init(ops);
4937 return ftrace_set_regex(ops, buf, len, reset, 0);
4938}
4939EXPORT_SYMBOL_GPL(ftrace_set_notrace);
4940/**
4941 * ftrace_set_global_filter - set a function to filter on with global tracers
4942 * @buf - the string that holds the function filter text.
4943 * @len - the length of the string.
4944 * @reset - non zero to reset all filters before applying this filter.
4945 *
4946 * Filters denote which functions should be enabled when tracing is enabled.
4947 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
4948 */
4949void ftrace_set_global_filter(unsigned char *buf, int len, int reset)
4950{
4951 ftrace_set_regex(&global_ops, buf, len, reset, 1);
4952}
4953EXPORT_SYMBOL_GPL(ftrace_set_global_filter);
4954
4955/**
4956 * ftrace_set_global_notrace - set a function to not trace with global tracers
4957 * @buf - the string that holds the function notrace text.
4958 * @len - the length of the string.
4959 * @reset - non zero to reset all filters before applying this filter.
4960 *
4961 * Notrace Filters denote which functions should not be enabled when tracing
4962 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
4963 * for tracing.
4964 */
4965void ftrace_set_global_notrace(unsigned char *buf, int len, int reset)
4966{
4967 ftrace_set_regex(&global_ops, buf, len, reset, 0);
4968}
4969EXPORT_SYMBOL_GPL(ftrace_set_global_notrace);
4970
4971/*
4972 * command line interface to allow users to set filters on boot up.
4973 */
4974#define FTRACE_FILTER_SIZE COMMAND_LINE_SIZE
4975static char ftrace_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
4976static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata;
4977
4978/* Used by function selftest to not test if filter is set */
4979bool ftrace_filter_param __initdata;
4980
4981static int __init set_ftrace_notrace(char *str)
4982{
4983 ftrace_filter_param = true;
4984 strlcpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE);
4985 return 1;
4986}
4987__setup("ftrace_notrace=", set_ftrace_notrace);
4988
4989static int __init set_ftrace_filter(char *str)
4990{
4991 ftrace_filter_param = true;
4992 strlcpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE);
4993 return 1;
4994}
4995__setup("ftrace_filter=", set_ftrace_filter);
4996
4997#ifdef CONFIG_FUNCTION_GRAPH_TRACER
4998static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata;
4999static char ftrace_graph_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
5000static int ftrace_graph_set_hash(struct ftrace_hash *hash, char *buffer);
5001
5002static int __init set_graph_function(char *str)
5003{
5004 strlcpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE);
5005 return 1;
5006}
5007__setup("ftrace_graph_filter=", set_graph_function);
5008
5009static int __init set_graph_notrace_function(char *str)
5010{
5011 strlcpy(ftrace_graph_notrace_buf, str, FTRACE_FILTER_SIZE);
5012 return 1;
5013}
5014__setup("ftrace_graph_notrace=", set_graph_notrace_function);
5015
5016static int __init set_graph_max_depth_function(char *str)
5017{
5018 if (!str)
5019 return 0;
5020 fgraph_max_depth = simple_strtoul(str, NULL, 0);
5021 return 1;
5022}
5023__setup("ftrace_graph_max_depth=", set_graph_max_depth_function);
5024
5025static void __init set_ftrace_early_graph(char *buf, int enable)
5026{
5027 int ret;
5028 char *func;
5029 struct ftrace_hash *hash;
5030
5031 hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS);
5032 if (WARN_ON(!hash))
5033 return;
5034
5035 while (buf) {
5036 func = strsep(&buf, ",");
5037 /* we allow only one expression at a time */
5038 ret = ftrace_graph_set_hash(hash, func);
5039 if (ret)
5040 printk(KERN_DEBUG "ftrace: function %s not "
5041 "traceable\n", func);
5042 }
5043
5044 if (enable)
5045 ftrace_graph_hash = hash;
5046 else
5047 ftrace_graph_notrace_hash = hash;
5048}
5049#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
5050
5051void __init
5052ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable)
5053{
5054 char *func;
5055
5056 ftrace_ops_init(ops);
5057
5058 while (buf) {
5059 func = strsep(&buf, ",");
5060 ftrace_set_regex(ops, func, strlen(func), 0, enable);
5061 }
5062}
5063
5064static void __init set_ftrace_early_filters(void)
5065{
5066 if (ftrace_filter_buf[0])
5067 ftrace_set_early_filter(&global_ops, ftrace_filter_buf, 1);
5068 if (ftrace_notrace_buf[0])
5069 ftrace_set_early_filter(&global_ops, ftrace_notrace_buf, 0);
5070#ifdef CONFIG_FUNCTION_GRAPH_TRACER
5071 if (ftrace_graph_buf[0])
5072 set_ftrace_early_graph(ftrace_graph_buf, 1);
5073 if (ftrace_graph_notrace_buf[0])
5074 set_ftrace_early_graph(ftrace_graph_notrace_buf, 0);
5075#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
5076}
5077
5078int ftrace_regex_release(struct inode *inode, struct file *file)
5079{
5080 struct seq_file *m = (struct seq_file *)file->private_data;
5081 struct ftrace_iterator *iter;
5082 struct ftrace_hash **orig_hash;
5083 struct trace_parser *parser;
5084 int filter_hash;
5085 int ret;
5086
5087 if (file->f_mode & FMODE_READ) {
5088 iter = m->private;
5089 seq_release(inode, file);
5090 } else
5091 iter = file->private_data;
5092
5093 parser = &iter->parser;
5094 if (trace_parser_loaded(parser)) {
5095 int enable = !(iter->flags & FTRACE_ITER_NOTRACE);
5096
5097 ftrace_process_regex(iter, parser->buffer,
5098 parser->idx, enable);
5099 }
5100
5101 trace_parser_put(parser);
5102
5103 mutex_lock(&iter->ops->func_hash->regex_lock);
5104
5105 if (file->f_mode & FMODE_WRITE) {
5106 filter_hash = !!(iter->flags & FTRACE_ITER_FILTER);
5107
5108 if (filter_hash) {
5109 orig_hash = &iter->ops->func_hash->filter_hash;
5110 if (iter->tr) {
5111 if (list_empty(&iter->tr->mod_trace))
5112 iter->hash->flags &= ~FTRACE_HASH_FL_MOD;
5113 else
5114 iter->hash->flags |= FTRACE_HASH_FL_MOD;
5115 }
5116 } else
5117 orig_hash = &iter->ops->func_hash->notrace_hash;
5118
5119 mutex_lock(&ftrace_lock);
5120 ret = ftrace_hash_move_and_update_ops(iter->ops, orig_hash,
5121 iter->hash, filter_hash);
5122 mutex_unlock(&ftrace_lock);
5123 } else {
5124 /* For read only, the hash is the ops hash */
5125 iter->hash = NULL;
5126 }
5127
5128 mutex_unlock(&iter->ops->func_hash->regex_lock);
5129 free_ftrace_hash(iter->hash);
5130 if (iter->tr)
5131 trace_array_put(iter->tr);
5132 kfree(iter);
5133
5134 return 0;
5135}
5136
5137static const struct file_operations ftrace_avail_fops = {
5138 .open = ftrace_avail_open,
5139 .read = seq_read,
5140 .llseek = seq_lseek,
5141 .release = seq_release_private,
5142};
5143
5144static const struct file_operations ftrace_enabled_fops = {
5145 .open = ftrace_enabled_open,
5146 .read = seq_read,
5147 .llseek = seq_lseek,
5148 .release = seq_release_private,
5149};
5150
5151static const struct file_operations ftrace_filter_fops = {
5152 .open = ftrace_filter_open,
5153 .read = seq_read,
5154 .write = ftrace_filter_write,
5155 .llseek = tracing_lseek,
5156 .release = ftrace_regex_release,
5157};
5158
5159static const struct file_operations ftrace_notrace_fops = {
5160 .open = ftrace_notrace_open,
5161 .read = seq_read,
5162 .write = ftrace_notrace_write,
5163 .llseek = tracing_lseek,
5164 .release = ftrace_regex_release,
5165};
5166
5167#ifdef CONFIG_FUNCTION_GRAPH_TRACER
5168
5169static DEFINE_MUTEX(graph_lock);
5170
5171struct ftrace_hash __rcu *ftrace_graph_hash = EMPTY_HASH;
5172struct ftrace_hash __rcu *ftrace_graph_notrace_hash = EMPTY_HASH;
5173
5174enum graph_filter_type {
5175 GRAPH_FILTER_NOTRACE = 0,
5176 GRAPH_FILTER_FUNCTION,
5177};
5178
5179#define FTRACE_GRAPH_EMPTY ((void *)1)
5180
5181struct ftrace_graph_data {
5182 struct ftrace_hash *hash;
5183 struct ftrace_func_entry *entry;
5184 int idx; /* for hash table iteration */
5185 enum graph_filter_type type;
5186 struct ftrace_hash *new_hash;
5187 const struct seq_operations *seq_ops;
5188 struct trace_parser parser;
5189};
5190
5191static void *
5192__g_next(struct seq_file *m, loff_t *pos)
5193{
5194 struct ftrace_graph_data *fgd = m->private;
5195 struct ftrace_func_entry *entry = fgd->entry;
5196 struct hlist_head *head;
5197 int i, idx = fgd->idx;
5198
5199 if (*pos >= fgd->hash->count)
5200 return NULL;
5201
5202 if (entry) {
5203 hlist_for_each_entry_continue(entry, hlist) {
5204 fgd->entry = entry;
5205 return entry;
5206 }
5207
5208 idx++;
5209 }
5210
5211 for (i = idx; i < 1 << fgd->hash->size_bits; i++) {
5212 head = &fgd->hash->buckets[i];
5213 hlist_for_each_entry(entry, head, hlist) {
5214 fgd->entry = entry;
5215 fgd->idx = i;
5216 return entry;
5217 }
5218 }
5219 return NULL;
5220}
5221
5222static void *
5223g_next(struct seq_file *m, void *v, loff_t *pos)
5224{
5225 (*pos)++;
5226 return __g_next(m, pos);
5227}
5228
5229static void *g_start(struct seq_file *m, loff_t *pos)
5230{
5231 struct ftrace_graph_data *fgd = m->private;
5232
5233 mutex_lock(&graph_lock);
5234
5235 if (fgd->type == GRAPH_FILTER_FUNCTION)
5236 fgd->hash = rcu_dereference_protected(ftrace_graph_hash,
5237 lockdep_is_held(&graph_lock));
5238 else
5239 fgd->hash = rcu_dereference_protected(ftrace_graph_notrace_hash,
5240 lockdep_is_held(&graph_lock));
5241
5242 /* Nothing, tell g_show to print all functions are enabled */
5243 if (ftrace_hash_empty(fgd->hash) && !*pos)
5244 return FTRACE_GRAPH_EMPTY;
5245
5246 fgd->idx = 0;
5247 fgd->entry = NULL;
5248 return __g_next(m, pos);
5249}
5250
5251static void g_stop(struct seq_file *m, void *p)
5252{
5253 mutex_unlock(&graph_lock);
5254}
5255
5256static int g_show(struct seq_file *m, void *v)
5257{
5258 struct ftrace_func_entry *entry = v;
5259
5260 if (!entry)
5261 return 0;
5262
5263 if (entry == FTRACE_GRAPH_EMPTY) {
5264 struct ftrace_graph_data *fgd = m->private;
5265
5266 if (fgd->type == GRAPH_FILTER_FUNCTION)
5267 seq_puts(m, "#### all functions enabled ####\n");
5268 else
5269 seq_puts(m, "#### no functions disabled ####\n");
5270 return 0;
5271 }
5272
5273 seq_printf(m, "%ps\n", (void *)entry->ip);
5274
5275 return 0;
5276}
5277
5278static const struct seq_operations ftrace_graph_seq_ops = {
5279 .start = g_start,
5280 .next = g_next,
5281 .stop = g_stop,
5282 .show = g_show,
5283};
5284
5285static int
5286__ftrace_graph_open(struct inode *inode, struct file *file,
5287 struct ftrace_graph_data *fgd)
5288{
5289 int ret;
5290 struct ftrace_hash *new_hash = NULL;
5291
5292 ret = security_locked_down(LOCKDOWN_TRACEFS);
5293 if (ret)
5294 return ret;
5295
5296 if (file->f_mode & FMODE_WRITE) {
5297 const int size_bits = FTRACE_HASH_DEFAULT_BITS;
5298
5299 if (trace_parser_get_init(&fgd->parser, FTRACE_BUFF_MAX))
5300 return -ENOMEM;
5301
5302 if (file->f_flags & O_TRUNC)
5303 new_hash = alloc_ftrace_hash(size_bits);
5304 else
5305 new_hash = alloc_and_copy_ftrace_hash(size_bits,
5306 fgd->hash);
5307 if (!new_hash) {
5308 ret = -ENOMEM;
5309 goto out;
5310 }
5311 }
5312
5313 if (file->f_mode & FMODE_READ) {
5314 ret = seq_open(file, &ftrace_graph_seq_ops);
5315 if (!ret) {
5316 struct seq_file *m = file->private_data;
5317 m->private = fgd;
5318 } else {
5319 /* Failed */
5320 free_ftrace_hash(new_hash);
5321 new_hash = NULL;
5322 }
5323 } else
5324 file->private_data = fgd;
5325
5326out:
5327 if (ret < 0 && file->f_mode & FMODE_WRITE)
5328 trace_parser_put(&fgd->parser);
5329
5330 fgd->new_hash = new_hash;
5331
5332 /*
5333 * All uses of fgd->hash must be taken with the graph_lock
5334 * held. The graph_lock is going to be released, so force
5335 * fgd->hash to be reinitialized when it is taken again.
5336 */
5337 fgd->hash = NULL;
5338
5339 return ret;
5340}
5341
5342static int
5343ftrace_graph_open(struct inode *inode, struct file *file)
5344{
5345 struct ftrace_graph_data *fgd;
5346 int ret;
5347
5348 if (unlikely(ftrace_disabled))
5349 return -ENODEV;
5350
5351 fgd = kmalloc(sizeof(*fgd), GFP_KERNEL);
5352 if (fgd == NULL)
5353 return -ENOMEM;
5354
5355 mutex_lock(&graph_lock);
5356
5357 fgd->hash = rcu_dereference_protected(ftrace_graph_hash,
5358 lockdep_is_held(&graph_lock));
5359 fgd->type = GRAPH_FILTER_FUNCTION;
5360 fgd->seq_ops = &ftrace_graph_seq_ops;
5361
5362 ret = __ftrace_graph_open(inode, file, fgd);
5363 if (ret < 0)
5364 kfree(fgd);
5365
5366 mutex_unlock(&graph_lock);
5367 return ret;
5368}
5369
5370static int
5371ftrace_graph_notrace_open(struct inode *inode, struct file *file)
5372{
5373 struct ftrace_graph_data *fgd;
5374 int ret;
5375
5376 if (unlikely(ftrace_disabled))
5377 return -ENODEV;
5378
5379 fgd = kmalloc(sizeof(*fgd), GFP_KERNEL);
5380 if (fgd == NULL)
5381 return -ENOMEM;
5382
5383 mutex_lock(&graph_lock);
5384
5385 fgd->hash = rcu_dereference_protected(ftrace_graph_notrace_hash,
5386 lockdep_is_held(&graph_lock));
5387 fgd->type = GRAPH_FILTER_NOTRACE;
5388 fgd->seq_ops = &ftrace_graph_seq_ops;
5389
5390 ret = __ftrace_graph_open(inode, file, fgd);
5391 if (ret < 0)
5392 kfree(fgd);
5393
5394 mutex_unlock(&graph_lock);
5395 return ret;
5396}
5397
5398static int
5399ftrace_graph_release(struct inode *inode, struct file *file)
5400{
5401 struct ftrace_graph_data *fgd;
5402 struct ftrace_hash *old_hash, *new_hash;
5403 struct trace_parser *parser;
5404 int ret = 0;
5405
5406 if (file->f_mode & FMODE_READ) {
5407 struct seq_file *m = file->private_data;
5408
5409 fgd = m->private;
5410 seq_release(inode, file);
5411 } else {
5412 fgd = file->private_data;
5413 }
5414
5415
5416 if (file->f_mode & FMODE_WRITE) {
5417
5418 parser = &fgd->parser;
5419
5420 if (trace_parser_loaded((parser))) {
5421 ret = ftrace_graph_set_hash(fgd->new_hash,
5422 parser->buffer);
5423 }
5424
5425 trace_parser_put(parser);
5426
5427 new_hash = __ftrace_hash_move(fgd->new_hash);
5428 if (!new_hash) {
5429 ret = -ENOMEM;
5430 goto out;
5431 }
5432
5433 mutex_lock(&graph_lock);
5434
5435 if (fgd->type == GRAPH_FILTER_FUNCTION) {
5436 old_hash = rcu_dereference_protected(ftrace_graph_hash,
5437 lockdep_is_held(&graph_lock));
5438 rcu_assign_pointer(ftrace_graph_hash, new_hash);
5439 } else {
5440 old_hash = rcu_dereference_protected(ftrace_graph_notrace_hash,
5441 lockdep_is_held(&graph_lock));
5442 rcu_assign_pointer(ftrace_graph_notrace_hash, new_hash);
5443 }
5444
5445 mutex_unlock(&graph_lock);
5446
5447 /*
5448 * We need to do a hard force of sched synchronization.
5449 * This is because we use preempt_disable() to do RCU, but
5450 * the function tracers can be called where RCU is not watching
5451 * (like before user_exit()). We can not rely on the RCU
5452 * infrastructure to do the synchronization, thus we must do it
5453 * ourselves.
5454 */
5455 schedule_on_each_cpu(ftrace_sync);
5456
5457 free_ftrace_hash(old_hash);
5458 }
5459
5460 out:
5461 free_ftrace_hash(fgd->new_hash);
5462 kfree(fgd);
5463
5464 return ret;
5465}
5466
5467static int
5468ftrace_graph_set_hash(struct ftrace_hash *hash, char *buffer)
5469{
5470 struct ftrace_glob func_g;
5471 struct dyn_ftrace *rec;
5472 struct ftrace_page *pg;
5473 struct ftrace_func_entry *entry;
5474 int fail = 1;
5475 int not;
5476
5477 /* decode regex */
5478 func_g.type = filter_parse_regex(buffer, strlen(buffer),
5479 &func_g.search, &not);
5480
5481 func_g.len = strlen(func_g.search);
5482
5483 mutex_lock(&ftrace_lock);
5484
5485 if (unlikely(ftrace_disabled)) {
5486 mutex_unlock(&ftrace_lock);
5487 return -ENODEV;
5488 }
5489
5490 do_for_each_ftrace_rec(pg, rec) {
5491
5492 if (rec->flags & FTRACE_FL_DISABLED)
5493 continue;
5494
5495 if (ftrace_match_record(rec, &func_g, NULL, 0)) {
5496 entry = ftrace_lookup_ip(hash, rec->ip);
5497
5498 if (!not) {
5499 fail = 0;
5500
5501 if (entry)
5502 continue;
5503 if (add_hash_entry(hash, rec->ip) < 0)
5504 goto out;
5505 } else {
5506 if (entry) {
5507 free_hash_entry(hash, entry);
5508 fail = 0;
5509 }
5510 }
5511 }
5512 } while_for_each_ftrace_rec();
5513out:
5514 mutex_unlock(&ftrace_lock);
5515
5516 if (fail)
5517 return -EINVAL;
5518
5519 return 0;
5520}
5521
5522static ssize_t
5523ftrace_graph_write(struct file *file, const char __user *ubuf,
5524 size_t cnt, loff_t *ppos)
5525{
5526 ssize_t read, ret = 0;
5527 struct ftrace_graph_data *fgd = file->private_data;
5528 struct trace_parser *parser;
5529
5530 if (!cnt)
5531 return 0;
5532
5533 /* Read mode uses seq functions */
5534 if (file->f_mode & FMODE_READ) {
5535 struct seq_file *m = file->private_data;
5536 fgd = m->private;
5537 }
5538
5539 parser = &fgd->parser;
5540
5541 read = trace_get_user(parser, ubuf, cnt, ppos);
5542
5543 if (read >= 0 && trace_parser_loaded(parser) &&
5544 !trace_parser_cont(parser)) {
5545
5546 ret = ftrace_graph_set_hash(fgd->new_hash,
5547 parser->buffer);
5548 trace_parser_clear(parser);
5549 }
5550
5551 if (!ret)
5552 ret = read;
5553
5554 return ret;
5555}
5556
5557static const struct file_operations ftrace_graph_fops = {
5558 .open = ftrace_graph_open,
5559 .read = seq_read,
5560 .write = ftrace_graph_write,
5561 .llseek = tracing_lseek,
5562 .release = ftrace_graph_release,
5563};
5564
5565static const struct file_operations ftrace_graph_notrace_fops = {
5566 .open = ftrace_graph_notrace_open,
5567 .read = seq_read,
5568 .write = ftrace_graph_write,
5569 .llseek = tracing_lseek,
5570 .release = ftrace_graph_release,
5571};
5572#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
5573
5574void ftrace_create_filter_files(struct ftrace_ops *ops,
5575 struct dentry *parent)
5576{
5577
5578 trace_create_file("set_ftrace_filter", 0644, parent,
5579 ops, &ftrace_filter_fops);
5580
5581 trace_create_file("set_ftrace_notrace", 0644, parent,
5582 ops, &ftrace_notrace_fops);
5583}
5584
5585/*
5586 * The name "destroy_filter_files" is really a misnomer. Although
5587 * in the future, it may actually delete the files, but this is
5588 * really intended to make sure the ops passed in are disabled
5589 * and that when this function returns, the caller is free to
5590 * free the ops.
5591 *
5592 * The "destroy" name is only to match the "create" name that this
5593 * should be paired with.
5594 */
5595void ftrace_destroy_filter_files(struct ftrace_ops *ops)
5596{
5597 mutex_lock(&ftrace_lock);
5598 if (ops->flags & FTRACE_OPS_FL_ENABLED)
5599 ftrace_shutdown(ops, 0);
5600 ops->flags |= FTRACE_OPS_FL_DELETED;
5601 ftrace_free_filter(ops);
5602 mutex_unlock(&ftrace_lock);
5603}
5604
5605static __init int ftrace_init_dyn_tracefs(struct dentry *d_tracer)
5606{
5607
5608 trace_create_file("available_filter_functions", 0444,
5609 d_tracer, NULL, &ftrace_avail_fops);
5610
5611 trace_create_file("enabled_functions", 0444,
5612 d_tracer, NULL, &ftrace_enabled_fops);
5613
5614 ftrace_create_filter_files(&global_ops, d_tracer);
5615
5616#ifdef CONFIG_FUNCTION_GRAPH_TRACER
5617 trace_create_file("set_graph_function", 0644, d_tracer,
5618 NULL,
5619 &ftrace_graph_fops);
5620 trace_create_file("set_graph_notrace", 0644, d_tracer,
5621 NULL,
5622 &ftrace_graph_notrace_fops);
5623#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
5624
5625 return 0;
5626}
5627
5628static int ftrace_cmp_ips(const void *a, const void *b)
5629{
5630 const unsigned long *ipa = a;
5631 const unsigned long *ipb = b;
5632
5633 if (*ipa > *ipb)
5634 return 1;
5635 if (*ipa < *ipb)
5636 return -1;
5637 return 0;
5638}
5639
5640static int __norecordmcount ftrace_process_locs(struct module *mod,
5641 unsigned long *start,
5642 unsigned long *end)
5643{
5644 struct ftrace_page *pg_unuse = NULL;
5645 struct ftrace_page *start_pg;
5646 struct ftrace_page *pg;
5647 struct dyn_ftrace *rec;
5648 unsigned long skipped = 0;
5649 unsigned long count;
5650 unsigned long *p;
5651 unsigned long addr;
5652 unsigned long flags = 0; /* Shut up gcc */
5653 int ret = -ENOMEM;
5654
5655 count = end - start;
5656
5657 if (!count)
5658 return 0;
5659
5660 sort(start, count, sizeof(*start),
5661 ftrace_cmp_ips, NULL);
5662
5663 start_pg = ftrace_allocate_pages(count);
5664 if (!start_pg)
5665 return -ENOMEM;
5666
5667 mutex_lock(&ftrace_lock);
5668
5669 /*
5670 * Core and each module needs their own pages, as
5671 * modules will free them when they are removed.
5672 * Force a new page to be allocated for modules.
5673 */
5674 if (!mod) {
5675 WARN_ON(ftrace_pages || ftrace_pages_start);
5676 /* First initialization */
5677 ftrace_pages = ftrace_pages_start = start_pg;
5678 } else {
5679 if (!ftrace_pages)
5680 goto out;
5681
5682 if (WARN_ON(ftrace_pages->next)) {
5683 /* Hmm, we have free pages? */
5684 while (ftrace_pages->next)
5685 ftrace_pages = ftrace_pages->next;
5686 }
5687
5688 ftrace_pages->next = start_pg;
5689 }
5690
5691 p = start;
5692 pg = start_pg;
5693 while (p < end) {
5694 unsigned long end_offset;
5695 addr = ftrace_call_adjust(*p++);
5696 /*
5697 * Some architecture linkers will pad between
5698 * the different mcount_loc sections of different
5699 * object files to satisfy alignments.
5700 * Skip any NULL pointers.
5701 */
5702 if (!addr) {
5703 skipped++;
5704 continue;
5705 }
5706
5707 end_offset = (pg->index+1) * sizeof(pg->records[0]);
5708 if (end_offset > PAGE_SIZE << pg->order) {
5709 /* We should have allocated enough */
5710 if (WARN_ON(!pg->next))
5711 break;
5712 pg = pg->next;
5713 }
5714
5715 rec = &pg->records[pg->index++];
5716 rec->ip = addr;
5717 }
5718
5719 if (pg->next) {
5720 pg_unuse = pg->next;
5721 pg->next = NULL;
5722 }
5723
5724 /* Assign the last page to ftrace_pages */
5725 ftrace_pages = pg;
5726
5727 /*
5728 * We only need to disable interrupts on start up
5729 * because we are modifying code that an interrupt
5730 * may execute, and the modification is not atomic.
5731 * But for modules, nothing runs the code we modify
5732 * until we are finished with it, and there's no
5733 * reason to cause large interrupt latencies while we do it.
5734 */
5735 if (!mod)
5736 local_irq_save(flags);
5737 ftrace_update_code(mod, start_pg);
5738 if (!mod)
5739 local_irq_restore(flags);
5740 ret = 0;
5741 out:
5742 mutex_unlock(&ftrace_lock);
5743
5744 /* We should have used all pages unless we skipped some */
5745 if (pg_unuse) {
5746 WARN_ON(!skipped);
5747 /* Need to synchronize with ftrace_location_range() */
5748 synchronize_rcu();
5749 ftrace_free_pages(pg_unuse);
5750 }
5751 return ret;
5752}
5753
5754struct ftrace_mod_func {
5755 struct list_head list;
5756 char *name;
5757 unsigned long ip;
5758 unsigned int size;
5759};
5760
5761struct ftrace_mod_map {
5762 struct rcu_head rcu;
5763 struct list_head list;
5764 struct module *mod;
5765 unsigned long start_addr;
5766 unsigned long end_addr;
5767 struct list_head funcs;
5768 unsigned int num_funcs;
5769};
5770
5771#ifdef CONFIG_MODULES
5772
5773#define next_to_ftrace_page(p) container_of(p, struct ftrace_page, next)
5774
5775static LIST_HEAD(ftrace_mod_maps);
5776
5777static int referenced_filters(struct dyn_ftrace *rec)
5778{
5779 struct ftrace_ops *ops;
5780 int cnt = 0;
5781
5782 for (ops = ftrace_ops_list; ops != &ftrace_list_end; ops = ops->next) {
5783 if (ops_references_rec(ops, rec)) {
5784 cnt++;
5785 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS)
5786 rec->flags |= FTRACE_FL_REGS;
5787 }
5788 }
5789
5790 return cnt;
5791}
5792
5793static void
5794clear_mod_from_hash(struct ftrace_page *pg, struct ftrace_hash *hash)
5795{
5796 struct ftrace_func_entry *entry;
5797 struct dyn_ftrace *rec;
5798 int i;
5799
5800 if (ftrace_hash_empty(hash))
5801 return;
5802
5803 for (i = 0; i < pg->index; i++) {
5804 rec = &pg->records[i];
5805 entry = __ftrace_lookup_ip(hash, rec->ip);
5806 /*
5807 * Do not allow this rec to match again.
5808 * Yeah, it may waste some memory, but will be removed
5809 * if/when the hash is modified again.
5810 */
5811 if (entry)
5812 entry->ip = 0;
5813 }
5814}
5815
5816/* Clear any records from hashs */
5817static void clear_mod_from_hashes(struct ftrace_page *pg)
5818{
5819 struct trace_array *tr;
5820
5821 mutex_lock(&trace_types_lock);
5822 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
5823 if (!tr->ops || !tr->ops->func_hash)
5824 continue;
5825 mutex_lock(&tr->ops->func_hash->regex_lock);
5826 clear_mod_from_hash(pg, tr->ops->func_hash->filter_hash);
5827 clear_mod_from_hash(pg, tr->ops->func_hash->notrace_hash);
5828 mutex_unlock(&tr->ops->func_hash->regex_lock);
5829 }
5830 mutex_unlock(&trace_types_lock);
5831}
5832
5833static void ftrace_free_mod_map(struct rcu_head *rcu)
5834{
5835 struct ftrace_mod_map *mod_map = container_of(rcu, struct ftrace_mod_map, rcu);
5836 struct ftrace_mod_func *mod_func;
5837 struct ftrace_mod_func *n;
5838
5839 /* All the contents of mod_map are now not visible to readers */
5840 list_for_each_entry_safe(mod_func, n, &mod_map->funcs, list) {
5841 kfree(mod_func->name);
5842 list_del(&mod_func->list);
5843 kfree(mod_func);
5844 }
5845
5846 kfree(mod_map);
5847}
5848
5849void ftrace_release_mod(struct module *mod)
5850{
5851 struct ftrace_mod_map *mod_map;
5852 struct ftrace_mod_map *n;
5853 struct dyn_ftrace *rec;
5854 struct ftrace_page **last_pg;
5855 struct ftrace_page *tmp_page = NULL;
5856 struct ftrace_page *pg;
5857
5858 mutex_lock(&ftrace_lock);
5859
5860 if (ftrace_disabled)
5861 goto out_unlock;
5862
5863 list_for_each_entry_safe(mod_map, n, &ftrace_mod_maps, list) {
5864 if (mod_map->mod == mod) {
5865 list_del_rcu(&mod_map->list);
5866 call_rcu(&mod_map->rcu, ftrace_free_mod_map);
5867 break;
5868 }
5869 }
5870
5871 /*
5872 * Each module has its own ftrace_pages, remove
5873 * them from the list.
5874 */
5875 last_pg = &ftrace_pages_start;
5876 for (pg = ftrace_pages_start; pg; pg = *last_pg) {
5877 rec = &pg->records[0];
5878 if (within_module_core(rec->ip, mod) ||
5879 within_module_init(rec->ip, mod)) {
5880 /*
5881 * As core pages are first, the first
5882 * page should never be a module page.
5883 */
5884 if (WARN_ON(pg == ftrace_pages_start))
5885 goto out_unlock;
5886
5887 /* Check if we are deleting the last page */
5888 if (pg == ftrace_pages)
5889 ftrace_pages = next_to_ftrace_page(last_pg);
5890
5891 ftrace_update_tot_cnt -= pg->index;
5892 *last_pg = pg->next;
5893
5894 pg->next = tmp_page;
5895 tmp_page = pg;
5896 } else
5897 last_pg = &pg->next;
5898 }
5899 out_unlock:
5900 mutex_unlock(&ftrace_lock);
5901
5902 /* Need to synchronize with ftrace_location_range() */
5903 if (tmp_page)
5904 synchronize_rcu();
5905 for (pg = tmp_page; pg; pg = tmp_page) {
5906
5907 /* Needs to be called outside of ftrace_lock */
5908 clear_mod_from_hashes(pg);
5909
5910 if (pg->records) {
5911 free_pages((unsigned long)pg->records, pg->order);
5912 ftrace_number_of_pages -= 1 << pg->order;
5913 }
5914 tmp_page = pg->next;
5915 kfree(pg);
5916 ftrace_number_of_groups--;
5917 }
5918}
5919
5920void ftrace_module_enable(struct module *mod)
5921{
5922 struct dyn_ftrace *rec;
5923 struct ftrace_page *pg;
5924
5925 mutex_lock(&ftrace_lock);
5926
5927 if (ftrace_disabled)
5928 goto out_unlock;
5929
5930 /*
5931 * If the tracing is enabled, go ahead and enable the record.
5932 *
5933 * The reason not to enable the record immediately is the
5934 * inherent check of ftrace_make_nop/ftrace_make_call for
5935 * correct previous instructions. Making first the NOP
5936 * conversion puts the module to the correct state, thus
5937 * passing the ftrace_make_call check.
5938 *
5939 * We also delay this to after the module code already set the
5940 * text to read-only, as we now need to set it back to read-write
5941 * so that we can modify the text.
5942 */
5943 if (ftrace_start_up)
5944 ftrace_arch_code_modify_prepare();
5945
5946 do_for_each_ftrace_rec(pg, rec) {
5947 int cnt;
5948 /*
5949 * do_for_each_ftrace_rec() is a double loop.
5950 * module text shares the pg. If a record is
5951 * not part of this module, then skip this pg,
5952 * which the "break" will do.
5953 */
5954 if (!within_module_core(rec->ip, mod) &&
5955 !within_module_init(rec->ip, mod))
5956 break;
5957
5958 cnt = 0;
5959
5960 /*
5961 * When adding a module, we need to check if tracers are
5962 * currently enabled and if they are, and can trace this record,
5963 * we need to enable the module functions as well as update the
5964 * reference counts for those function records.
5965 */
5966 if (ftrace_start_up)
5967 cnt += referenced_filters(rec);
5968
5969 rec->flags &= ~FTRACE_FL_DISABLED;
5970 rec->flags += cnt;
5971
5972 if (ftrace_start_up && cnt) {
5973 int failed = __ftrace_replace_code(rec, 1);
5974 if (failed) {
5975 ftrace_bug(failed, rec);
5976 goto out_loop;
5977 }
5978 }
5979
5980 } while_for_each_ftrace_rec();
5981
5982 out_loop:
5983 if (ftrace_start_up)
5984 ftrace_arch_code_modify_post_process();
5985
5986 out_unlock:
5987 mutex_unlock(&ftrace_lock);
5988
5989 process_cached_mods(mod->name);
5990}
5991
5992void ftrace_module_init(struct module *mod)
5993{
5994 if (ftrace_disabled || !mod->num_ftrace_callsites)
5995 return;
5996
5997 ftrace_process_locs(mod, mod->ftrace_callsites,
5998 mod->ftrace_callsites + mod->num_ftrace_callsites);
5999}
6000
6001static void save_ftrace_mod_rec(struct ftrace_mod_map *mod_map,
6002 struct dyn_ftrace *rec)
6003{
6004 struct ftrace_mod_func *mod_func;
6005 unsigned long symsize;
6006 unsigned long offset;
6007 char str[KSYM_SYMBOL_LEN];
6008 char *modname;
6009 const char *ret;
6010
6011 ret = kallsyms_lookup(rec->ip, &symsize, &offset, &modname, str);
6012 if (!ret)
6013 return;
6014
6015 mod_func = kmalloc(sizeof(*mod_func), GFP_KERNEL);
6016 if (!mod_func)
6017 return;
6018
6019 mod_func->name = kstrdup(str, GFP_KERNEL);
6020 if (!mod_func->name) {
6021 kfree(mod_func);
6022 return;
6023 }
6024
6025 mod_func->ip = rec->ip - offset;
6026 mod_func->size = symsize;
6027
6028 mod_map->num_funcs++;
6029
6030 list_add_rcu(&mod_func->list, &mod_map->funcs);
6031}
6032
6033static struct ftrace_mod_map *
6034allocate_ftrace_mod_map(struct module *mod,
6035 unsigned long start, unsigned long end)
6036{
6037 struct ftrace_mod_map *mod_map;
6038
6039 mod_map = kmalloc(sizeof(*mod_map), GFP_KERNEL);
6040 if (!mod_map)
6041 return NULL;
6042
6043 mod_map->mod = mod;
6044 mod_map->start_addr = start;
6045 mod_map->end_addr = end;
6046 mod_map->num_funcs = 0;
6047
6048 INIT_LIST_HEAD_RCU(&mod_map->funcs);
6049
6050 list_add_rcu(&mod_map->list, &ftrace_mod_maps);
6051
6052 return mod_map;
6053}
6054
6055static const char *
6056ftrace_func_address_lookup(struct ftrace_mod_map *mod_map,
6057 unsigned long addr, unsigned long *size,
6058 unsigned long *off, char *sym)
6059{
6060 struct ftrace_mod_func *found_func = NULL;
6061 struct ftrace_mod_func *mod_func;
6062
6063 list_for_each_entry_rcu(mod_func, &mod_map->funcs, list) {
6064 if (addr >= mod_func->ip &&
6065 addr < mod_func->ip + mod_func->size) {
6066 found_func = mod_func;
6067 break;
6068 }
6069 }
6070
6071 if (found_func) {
6072 if (size)
6073 *size = found_func->size;
6074 if (off)
6075 *off = addr - found_func->ip;
6076 if (sym)
6077 strlcpy(sym, found_func->name, KSYM_NAME_LEN);
6078
6079 return found_func->name;
6080 }
6081
6082 return NULL;
6083}
6084
6085const char *
6086ftrace_mod_address_lookup(unsigned long addr, unsigned long *size,
6087 unsigned long *off, char **modname, char *sym)
6088{
6089 struct ftrace_mod_map *mod_map;
6090 const char *ret = NULL;
6091
6092 /* mod_map is freed via call_rcu() */
6093 preempt_disable();
6094 list_for_each_entry_rcu(mod_map, &ftrace_mod_maps, list) {
6095 ret = ftrace_func_address_lookup(mod_map, addr, size, off, sym);
6096 if (ret) {
6097 if (modname)
6098 *modname = mod_map->mod->name;
6099 break;
6100 }
6101 }
6102 preempt_enable();
6103
6104 return ret;
6105}
6106
6107int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value,
6108 char *type, char *name,
6109 char *module_name, int *exported)
6110{
6111 struct ftrace_mod_map *mod_map;
6112 struct ftrace_mod_func *mod_func;
6113
6114 preempt_disable();
6115 list_for_each_entry_rcu(mod_map, &ftrace_mod_maps, list) {
6116
6117 if (symnum >= mod_map->num_funcs) {
6118 symnum -= mod_map->num_funcs;
6119 continue;
6120 }
6121
6122 list_for_each_entry_rcu(mod_func, &mod_map->funcs, list) {
6123 if (symnum > 1) {
6124 symnum--;
6125 continue;
6126 }
6127
6128 *value = mod_func->ip;
6129 *type = 'T';
6130 strlcpy(name, mod_func->name, KSYM_NAME_LEN);
6131 strlcpy(module_name, mod_map->mod->name, MODULE_NAME_LEN);
6132 *exported = 1;
6133 preempt_enable();
6134 return 0;
6135 }
6136 WARN_ON(1);
6137 break;
6138 }
6139 preempt_enable();
6140 return -ERANGE;
6141}
6142
6143#else
6144static void save_ftrace_mod_rec(struct ftrace_mod_map *mod_map,
6145 struct dyn_ftrace *rec) { }
6146static inline struct ftrace_mod_map *
6147allocate_ftrace_mod_map(struct module *mod,
6148 unsigned long start, unsigned long end)
6149{
6150 return NULL;
6151}
6152#endif /* CONFIG_MODULES */
6153
6154struct ftrace_init_func {
6155 struct list_head list;
6156 unsigned long ip;
6157};
6158
6159/* Clear any init ips from hashes */
6160static void
6161clear_func_from_hash(struct ftrace_init_func *func, struct ftrace_hash *hash)
6162{
6163 struct ftrace_func_entry *entry;
6164
6165 entry = ftrace_lookup_ip(hash, func->ip);
6166 /*
6167 * Do not allow this rec to match again.
6168 * Yeah, it may waste some memory, but will be removed
6169 * if/when the hash is modified again.
6170 */
6171 if (entry)
6172 entry->ip = 0;
6173}
6174
6175static void
6176clear_func_from_hashes(struct ftrace_init_func *func)
6177{
6178 struct trace_array *tr;
6179
6180 mutex_lock(&trace_types_lock);
6181 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6182 if (!tr->ops || !tr->ops->func_hash)
6183 continue;
6184 mutex_lock(&tr->ops->func_hash->regex_lock);
6185 clear_func_from_hash(func, tr->ops->func_hash->filter_hash);
6186 clear_func_from_hash(func, tr->ops->func_hash->notrace_hash);
6187 mutex_unlock(&tr->ops->func_hash->regex_lock);
6188 }
6189 mutex_unlock(&trace_types_lock);
6190}
6191
6192static void add_to_clear_hash_list(struct list_head *clear_list,
6193 struct dyn_ftrace *rec)
6194{
6195 struct ftrace_init_func *func;
6196
6197 func = kmalloc(sizeof(*func), GFP_KERNEL);
6198 if (!func) {
6199 WARN_ONCE(1, "alloc failure, ftrace filter could be stale\n");
6200 return;
6201 }
6202
6203 func->ip = rec->ip;
6204 list_add(&func->list, clear_list);
6205}
6206
6207void ftrace_free_mem(struct module *mod, void *start_ptr, void *end_ptr)
6208{
6209 unsigned long start = (unsigned long)(start_ptr);
6210 unsigned long end = (unsigned long)(end_ptr);
6211 struct ftrace_page **last_pg = &ftrace_pages_start;
6212 struct ftrace_page *tmp_page = NULL;
6213 struct ftrace_page *pg;
6214 struct dyn_ftrace *rec;
6215 struct dyn_ftrace key;
6216 struct ftrace_mod_map *mod_map = NULL;
6217 struct ftrace_init_func *func, *func_next;
6218 struct list_head clear_hash;
6219
6220 INIT_LIST_HEAD(&clear_hash);
6221
6222 key.ip = start;
6223 key.flags = end; /* overload flags, as it is unsigned long */
6224
6225 mutex_lock(&ftrace_lock);
6226
6227 /*
6228 * If we are freeing module init memory, then check if
6229 * any tracer is active. If so, we need to save a mapping of
6230 * the module functions being freed with the address.
6231 */
6232 if (mod && ftrace_ops_list != &ftrace_list_end)
6233 mod_map = allocate_ftrace_mod_map(mod, start, end);
6234
6235 for (pg = ftrace_pages_start; pg; last_pg = &pg->next, pg = *last_pg) {
6236 if (end < pg->records[0].ip ||
6237 start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE))
6238 continue;
6239 again:
6240 rec = bsearch(&key, pg->records, pg->index,
6241 sizeof(struct dyn_ftrace),
6242 ftrace_cmp_recs);
6243 if (!rec)
6244 continue;
6245
6246 /* rec will be cleared from hashes after ftrace_lock unlock */
6247 add_to_clear_hash_list(&clear_hash, rec);
6248
6249 if (mod_map)
6250 save_ftrace_mod_rec(mod_map, rec);
6251
6252 pg->index--;
6253 ftrace_update_tot_cnt--;
6254 if (!pg->index) {
6255 *last_pg = pg->next;
6256 pg->next = tmp_page;
6257 tmp_page = pg;
6258 pg = container_of(last_pg, struct ftrace_page, next);
6259 if (!(*last_pg))
6260 ftrace_pages = pg;
6261 continue;
6262 }
6263 memmove(rec, rec + 1,
6264 (pg->index - (rec - pg->records)) * sizeof(*rec));
6265 /* More than one function may be in this block */
6266 goto again;
6267 }
6268 mutex_unlock(&ftrace_lock);
6269
6270 list_for_each_entry_safe(func, func_next, &clear_hash, list) {
6271 clear_func_from_hashes(func);
6272 kfree(func);
6273 }
6274 /* Need to synchronize with ftrace_location_range() */
6275 if (tmp_page) {
6276 synchronize_rcu();
6277 ftrace_free_pages(tmp_page);
6278 }
6279}
6280
6281void __init ftrace_free_init_mem(void)
6282{
6283 void *start = (void *)(&__init_begin);
6284 void *end = (void *)(&__init_end);
6285
6286 ftrace_free_mem(NULL, start, end);
6287}
6288
6289void __init ftrace_init(void)
6290{
6291 extern unsigned long __start_mcount_loc[];
6292 extern unsigned long __stop_mcount_loc[];
6293 unsigned long count, flags;
6294 int ret;
6295
6296 local_irq_save(flags);
6297 ret = ftrace_dyn_arch_init();
6298 local_irq_restore(flags);
6299 if (ret)
6300 goto failed;
6301
6302 count = __stop_mcount_loc - __start_mcount_loc;
6303 if (!count) {
6304 pr_info("ftrace: No functions to be traced?\n");
6305 goto failed;
6306 }
6307
6308 pr_info("ftrace: allocating %ld entries in %ld pages\n",
6309 count, DIV_ROUND_UP(count, ENTRIES_PER_PAGE));
6310
6311 last_ftrace_enabled = ftrace_enabled = 1;
6312
6313 ret = ftrace_process_locs(NULL,
6314 __start_mcount_loc,
6315 __stop_mcount_loc);
6316
6317 pr_info("ftrace: allocated %ld pages with %ld groups\n",
6318 ftrace_number_of_pages, ftrace_number_of_groups);
6319
6320 set_ftrace_early_filters();
6321
6322 return;
6323 failed:
6324 ftrace_disabled = 1;
6325}
6326
6327/* Do nothing if arch does not support this */
6328void __weak arch_ftrace_update_trampoline(struct ftrace_ops *ops)
6329{
6330}
6331
6332static void ftrace_update_trampoline(struct ftrace_ops *ops)
6333{
6334 arch_ftrace_update_trampoline(ops);
6335}
6336
6337void ftrace_init_trace_array(struct trace_array *tr)
6338{
6339 INIT_LIST_HEAD(&tr->func_probes);
6340 INIT_LIST_HEAD(&tr->mod_trace);
6341 INIT_LIST_HEAD(&tr->mod_notrace);
6342}
6343#else
6344
6345struct ftrace_ops global_ops = {
6346 .func = ftrace_stub,
6347 .flags = FTRACE_OPS_FL_RECURSION_SAFE |
6348 FTRACE_OPS_FL_INITIALIZED |
6349 FTRACE_OPS_FL_PID,
6350};
6351
6352static int __init ftrace_nodyn_init(void)
6353{
6354 ftrace_enabled = 1;
6355 return 0;
6356}
6357core_initcall(ftrace_nodyn_init);
6358
6359static inline int ftrace_init_dyn_tracefs(struct dentry *d_tracer) { return 0; }
6360static inline void ftrace_startup_enable(int command) { }
6361static inline void ftrace_startup_all(int command) { }
6362
6363# define ftrace_startup_sysctl() do { } while (0)
6364# define ftrace_shutdown_sysctl() do { } while (0)
6365
6366static void ftrace_update_trampoline(struct ftrace_ops *ops)
6367{
6368}
6369
6370#endif /* CONFIG_DYNAMIC_FTRACE */
6371
6372__init void ftrace_init_global_array_ops(struct trace_array *tr)
6373{
6374 tr->ops = &global_ops;
6375 tr->ops->private = tr;
6376 ftrace_init_trace_array(tr);
6377}
6378
6379void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func)
6380{
6381 /* If we filter on pids, update to use the pid function */
6382 if (tr->flags & TRACE_ARRAY_FL_GLOBAL) {
6383 if (WARN_ON(tr->ops->func != ftrace_stub))
6384 printk("ftrace ops had %pS for function\n",
6385 tr->ops->func);
6386 }
6387 tr->ops->func = func;
6388 tr->ops->private = tr;
6389}
6390
6391void ftrace_reset_array_ops(struct trace_array *tr)
6392{
6393 tr->ops->func = ftrace_stub;
6394}
6395
6396static nokprobe_inline void
6397__ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
6398 struct ftrace_ops *ignored, struct pt_regs *regs)
6399{
6400 struct ftrace_ops *op;
6401 int bit;
6402
6403 bit = trace_test_and_set_recursion(TRACE_LIST_START);
6404 if (bit < 0)
6405 return;
6406
6407 /*
6408 * Some of the ops may be dynamically allocated,
6409 * they must be freed after a synchronize_rcu().
6410 */
6411 preempt_disable_notrace();
6412
6413 do_for_each_ftrace_op(op, ftrace_ops_list) {
6414 /* Stub functions don't need to be called nor tested */
6415 if (op->flags & FTRACE_OPS_FL_STUB)
6416 continue;
6417 /*
6418 * Check the following for each ops before calling their func:
6419 * if RCU flag is set, then rcu_is_watching() must be true
6420 * if PER_CPU is set, then ftrace_function_local_disable()
6421 * must be false
6422 * Otherwise test if the ip matches the ops filter
6423 *
6424 * If any of the above fails then the op->func() is not executed.
6425 */
6426 if ((!(op->flags & FTRACE_OPS_FL_RCU) || rcu_is_watching()) &&
6427 ftrace_ops_test(op, ip, regs)) {
6428 if (FTRACE_WARN_ON(!op->func)) {
6429 pr_warn("op=%p %pS\n", op, op);
6430 goto out;
6431 }
6432 op->func(ip, parent_ip, op, regs);
6433 }
6434 } while_for_each_ftrace_op(op);
6435out:
6436 preempt_enable_notrace();
6437 trace_clear_recursion(bit);
6438}
6439
6440/*
6441 * Some archs only support passing ip and parent_ip. Even though
6442 * the list function ignores the op parameter, we do not want any
6443 * C side effects, where a function is called without the caller
6444 * sending a third parameter.
6445 * Archs are to support both the regs and ftrace_ops at the same time.
6446 * If they support ftrace_ops, it is assumed they support regs.
6447 * If call backs want to use regs, they must either check for regs
6448 * being NULL, or CONFIG_DYNAMIC_FTRACE_WITH_REGS.
6449 * Note, CONFIG_DYNAMIC_FTRACE_WITH_REGS expects a full regs to be saved.
6450 * An architecture can pass partial regs with ftrace_ops and still
6451 * set the ARCH_SUPPORTS_FTRACE_OPS.
6452 */
6453#if ARCH_SUPPORTS_FTRACE_OPS
6454static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
6455 struct ftrace_ops *op, struct pt_regs *regs)
6456{
6457 __ftrace_ops_list_func(ip, parent_ip, NULL, regs);
6458}
6459NOKPROBE_SYMBOL(ftrace_ops_list_func);
6460#else
6461static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip)
6462{
6463 __ftrace_ops_list_func(ip, parent_ip, NULL, NULL);
6464}
6465NOKPROBE_SYMBOL(ftrace_ops_no_ops);
6466#endif
6467
6468/*
6469 * If there's only one function registered but it does not support
6470 * recursion, needs RCU protection and/or requires per cpu handling, then
6471 * this function will be called by the mcount trampoline.
6472 */
6473static void ftrace_ops_assist_func(unsigned long ip, unsigned long parent_ip,
6474 struct ftrace_ops *op, struct pt_regs *regs)
6475{
6476 int bit;
6477
6478 bit = trace_test_and_set_recursion(TRACE_LIST_START);
6479 if (bit < 0)
6480 return;
6481
6482 preempt_disable_notrace();
6483
6484 if (!(op->flags & FTRACE_OPS_FL_RCU) || rcu_is_watching())
6485 op->func(ip, parent_ip, op, regs);
6486
6487 preempt_enable_notrace();
6488 trace_clear_recursion(bit);
6489}
6490NOKPROBE_SYMBOL(ftrace_ops_assist_func);
6491
6492/**
6493 * ftrace_ops_get_func - get the function a trampoline should call
6494 * @ops: the ops to get the function for
6495 *
6496 * Normally the mcount trampoline will call the ops->func, but there
6497 * are times that it should not. For example, if the ops does not
6498 * have its own recursion protection, then it should call the
6499 * ftrace_ops_assist_func() instead.
6500 *
6501 * Returns the function that the trampoline should call for @ops.
6502 */
6503ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops)
6504{
6505 /*
6506 * If the function does not handle recursion, needs to be RCU safe,
6507 * or does per cpu logic, then we need to call the assist handler.
6508 */
6509 if (!(ops->flags & FTRACE_OPS_FL_RECURSION_SAFE) ||
6510 ops->flags & FTRACE_OPS_FL_RCU)
6511 return ftrace_ops_assist_func;
6512
6513 return ops->func;
6514}
6515
6516static void
6517ftrace_filter_pid_sched_switch_probe(void *data, bool preempt,
6518 struct task_struct *prev, struct task_struct *next)
6519{
6520 struct trace_array *tr = data;
6521 struct trace_pid_list *pid_list;
6522
6523 pid_list = rcu_dereference_sched(tr->function_pids);
6524
6525 this_cpu_write(tr->trace_buffer.data->ftrace_ignore_pid,
6526 trace_ignore_this_task(pid_list, next));
6527}
6528
6529static void
6530ftrace_pid_follow_sched_process_fork(void *data,
6531 struct task_struct *self,
6532 struct task_struct *task)
6533{
6534 struct trace_pid_list *pid_list;
6535 struct trace_array *tr = data;
6536
6537 pid_list = rcu_dereference_sched(tr->function_pids);
6538 trace_filter_add_remove_task(pid_list, self, task);
6539}
6540
6541static void
6542ftrace_pid_follow_sched_process_exit(void *data, struct task_struct *task)
6543{
6544 struct trace_pid_list *pid_list;
6545 struct trace_array *tr = data;
6546
6547 pid_list = rcu_dereference_sched(tr->function_pids);
6548 trace_filter_add_remove_task(pid_list, NULL, task);
6549}
6550
6551void ftrace_pid_follow_fork(struct trace_array *tr, bool enable)
6552{
6553 if (enable) {
6554 register_trace_sched_process_fork(ftrace_pid_follow_sched_process_fork,
6555 tr);
6556 register_trace_sched_process_free(ftrace_pid_follow_sched_process_exit,
6557 tr);
6558 } else {
6559 unregister_trace_sched_process_fork(ftrace_pid_follow_sched_process_fork,
6560 tr);
6561 unregister_trace_sched_process_free(ftrace_pid_follow_sched_process_exit,
6562 tr);
6563 }
6564}
6565
6566static void clear_ftrace_pids(struct trace_array *tr)
6567{
6568 struct trace_pid_list *pid_list;
6569 int cpu;
6570
6571 pid_list = rcu_dereference_protected(tr->function_pids,
6572 lockdep_is_held(&ftrace_lock));
6573 if (!pid_list)
6574 return;
6575
6576 unregister_trace_sched_switch(ftrace_filter_pid_sched_switch_probe, tr);
6577
6578 for_each_possible_cpu(cpu)
6579 per_cpu_ptr(tr->trace_buffer.data, cpu)->ftrace_ignore_pid = false;
6580
6581 rcu_assign_pointer(tr->function_pids, NULL);
6582
6583 /* Wait till all users are no longer using pid filtering */
6584 synchronize_rcu();
6585
6586 trace_free_pid_list(pid_list);
6587}
6588
6589void ftrace_clear_pids(struct trace_array *tr)
6590{
6591 mutex_lock(&ftrace_lock);
6592
6593 clear_ftrace_pids(tr);
6594
6595 mutex_unlock(&ftrace_lock);
6596}
6597
6598static void ftrace_pid_reset(struct trace_array *tr)
6599{
6600 mutex_lock(&ftrace_lock);
6601 clear_ftrace_pids(tr);
6602
6603 ftrace_update_pid_func();
6604 ftrace_startup_all(0);
6605
6606 mutex_unlock(&ftrace_lock);
6607}
6608
6609/* Greater than any max PID */
6610#define FTRACE_NO_PIDS (void *)(PID_MAX_LIMIT + 1)
6611
6612static void *fpid_start(struct seq_file *m, loff_t *pos)
6613 __acquires(RCU)
6614{
6615 struct trace_pid_list *pid_list;
6616 struct trace_array *tr = m->private;
6617
6618 mutex_lock(&ftrace_lock);
6619 rcu_read_lock_sched();
6620
6621 pid_list = rcu_dereference_sched(tr->function_pids);
6622
6623 if (!pid_list)
6624 return !(*pos) ? FTRACE_NO_PIDS : NULL;
6625
6626 return trace_pid_start(pid_list, pos);
6627}
6628
6629static void *fpid_next(struct seq_file *m, void *v, loff_t *pos)
6630{
6631 struct trace_array *tr = m->private;
6632 struct trace_pid_list *pid_list = rcu_dereference_sched(tr->function_pids);
6633
6634 if (v == FTRACE_NO_PIDS) {
6635 (*pos)++;
6636 return NULL;
6637 }
6638 return trace_pid_next(pid_list, v, pos);
6639}
6640
6641static void fpid_stop(struct seq_file *m, void *p)
6642 __releases(RCU)
6643{
6644 rcu_read_unlock_sched();
6645 mutex_unlock(&ftrace_lock);
6646}
6647
6648static int fpid_show(struct seq_file *m, void *v)
6649{
6650 if (v == FTRACE_NO_PIDS) {
6651 seq_puts(m, "no pid\n");
6652 return 0;
6653 }
6654
6655 return trace_pid_show(m, v);
6656}
6657
6658static const struct seq_operations ftrace_pid_sops = {
6659 .start = fpid_start,
6660 .next = fpid_next,
6661 .stop = fpid_stop,
6662 .show = fpid_show,
6663};
6664
6665static int
6666ftrace_pid_open(struct inode *inode, struct file *file)
6667{
6668 struct trace_array *tr = inode->i_private;
6669 struct seq_file *m;
6670 int ret = 0;
6671
6672 ret = tracing_check_open_get_tr(tr);
6673 if (ret)
6674 return ret;
6675
6676 if ((file->f_mode & FMODE_WRITE) &&
6677 (file->f_flags & O_TRUNC))
6678 ftrace_pid_reset(tr);
6679
6680 ret = seq_open(file, &ftrace_pid_sops);
6681 if (ret < 0) {
6682 trace_array_put(tr);
6683 } else {
6684 m = file->private_data;
6685 /* copy tr over to seq ops */
6686 m->private = tr;
6687 }
6688
6689 return ret;
6690}
6691
6692static void ignore_task_cpu(void *data)
6693{
6694 struct trace_array *tr = data;
6695 struct trace_pid_list *pid_list;
6696
6697 /*
6698 * This function is called by on_each_cpu() while the
6699 * event_mutex is held.
6700 */
6701 pid_list = rcu_dereference_protected(tr->function_pids,
6702 mutex_is_locked(&ftrace_lock));
6703
6704 this_cpu_write(tr->trace_buffer.data->ftrace_ignore_pid,
6705 trace_ignore_this_task(pid_list, current));
6706}
6707
6708static ssize_t
6709ftrace_pid_write(struct file *filp, const char __user *ubuf,
6710 size_t cnt, loff_t *ppos)
6711{
6712 struct seq_file *m = filp->private_data;
6713 struct trace_array *tr = m->private;
6714 struct trace_pid_list *filtered_pids = NULL;
6715 struct trace_pid_list *pid_list;
6716 ssize_t ret;
6717
6718 if (!cnt)
6719 return 0;
6720
6721 mutex_lock(&ftrace_lock);
6722
6723 filtered_pids = rcu_dereference_protected(tr->function_pids,
6724 lockdep_is_held(&ftrace_lock));
6725
6726 ret = trace_pid_write(filtered_pids, &pid_list, ubuf, cnt);
6727 if (ret < 0)
6728 goto out;
6729
6730 rcu_assign_pointer(tr->function_pids, pid_list);
6731
6732 if (filtered_pids) {
6733 synchronize_rcu();
6734 trace_free_pid_list(filtered_pids);
6735 } else if (pid_list) {
6736 /* Register a probe to set whether to ignore the tracing of a task */
6737 register_trace_sched_switch(ftrace_filter_pid_sched_switch_probe, tr);
6738 }
6739
6740 /*
6741 * Ignoring of pids is done at task switch. But we have to
6742 * check for those tasks that are currently running.
6743 * Always do this in case a pid was appended or removed.
6744 */
6745 on_each_cpu(ignore_task_cpu, tr, 1);
6746
6747 ftrace_update_pid_func();
6748 ftrace_startup_all(0);
6749 out:
6750 mutex_unlock(&ftrace_lock);
6751
6752 if (ret > 0)
6753 *ppos += ret;
6754
6755 return ret;
6756}
6757
6758static int
6759ftrace_pid_release(struct inode *inode, struct file *file)
6760{
6761 struct trace_array *tr = inode->i_private;
6762
6763 trace_array_put(tr);
6764
6765 return seq_release(inode, file);
6766}
6767
6768static const struct file_operations ftrace_pid_fops = {
6769 .open = ftrace_pid_open,
6770 .write = ftrace_pid_write,
6771 .read = seq_read,
6772 .llseek = tracing_lseek,
6773 .release = ftrace_pid_release,
6774};
6775
6776void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d_tracer)
6777{
6778 trace_create_file("set_ftrace_pid", 0644, d_tracer,
6779 tr, &ftrace_pid_fops);
6780}
6781
6782void __init ftrace_init_tracefs_toplevel(struct trace_array *tr,
6783 struct dentry *d_tracer)
6784{
6785 /* Only the top level directory has the dyn_tracefs and profile */
6786 WARN_ON(!(tr->flags & TRACE_ARRAY_FL_GLOBAL));
6787
6788 ftrace_init_dyn_tracefs(d_tracer);
6789 ftrace_profile_tracefs(d_tracer);
6790}
6791
6792/**
6793 * ftrace_kill - kill ftrace
6794 *
6795 * This function should be used by panic code. It stops ftrace
6796 * but in a not so nice way. If you need to simply kill ftrace
6797 * from a non-atomic section, use ftrace_kill.
6798 */
6799void ftrace_kill(void)
6800{
6801 ftrace_disabled = 1;
6802 ftrace_enabled = 0;
6803 ftrace_trace_function = ftrace_stub;
6804}
6805
6806/**
6807 * Test if ftrace is dead or not.
6808 */
6809int ftrace_is_dead(void)
6810{
6811 return ftrace_disabled;
6812}
6813
6814/**
6815 * register_ftrace_function - register a function for profiling
6816 * @ops - ops structure that holds the function for profiling.
6817 *
6818 * Register a function to be called by all functions in the
6819 * kernel.
6820 *
6821 * Note: @ops->func and all the functions it calls must be labeled
6822 * with "notrace", otherwise it will go into a
6823 * recursive loop.
6824 */
6825int register_ftrace_function(struct ftrace_ops *ops)
6826{
6827 int ret = -1;
6828
6829 ftrace_ops_init(ops);
6830
6831 mutex_lock(&ftrace_lock);
6832
6833 ret = ftrace_startup(ops, 0);
6834
6835 mutex_unlock(&ftrace_lock);
6836
6837 return ret;
6838}
6839EXPORT_SYMBOL_GPL(register_ftrace_function);
6840
6841/**
6842 * unregister_ftrace_function - unregister a function for profiling.
6843 * @ops - ops structure that holds the function to unregister
6844 *
6845 * Unregister a function that was added to be called by ftrace profiling.
6846 */
6847int unregister_ftrace_function(struct ftrace_ops *ops)
6848{
6849 int ret;
6850
6851 mutex_lock(&ftrace_lock);
6852 ret = ftrace_shutdown(ops, 0);
6853 mutex_unlock(&ftrace_lock);
6854
6855 return ret;
6856}
6857EXPORT_SYMBOL_GPL(unregister_ftrace_function);
6858
6859int
6860ftrace_enable_sysctl(struct ctl_table *table, int write,
6861 void __user *buffer, size_t *lenp,
6862 loff_t *ppos)
6863{
6864 int ret = -ENODEV;
6865
6866 mutex_lock(&ftrace_lock);
6867
6868 if (unlikely(ftrace_disabled))
6869 goto out;
6870
6871 ret = proc_dointvec(table, write, buffer, lenp, ppos);
6872
6873 if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled))
6874 goto out;
6875
6876 last_ftrace_enabled = !!ftrace_enabled;
6877
6878 if (ftrace_enabled) {
6879
6880 /* we are starting ftrace again */
6881 if (rcu_dereference_protected(ftrace_ops_list,
6882 lockdep_is_held(&ftrace_lock)) != &ftrace_list_end)
6883 update_ftrace_function();
6884
6885 ftrace_startup_sysctl();
6886
6887 } else {
6888 /* stopping ftrace calls (just send to ftrace_stub) */
6889 ftrace_trace_function = ftrace_stub;
6890
6891 ftrace_shutdown_sysctl();
6892 }
6893
6894 out:
6895 mutex_unlock(&ftrace_lock);
6896 return ret;
6897}