blob: 204960321366cab5ea9ec90d30eae9079d209073 [file] [log] [blame]
b.liue9582032025-04-17 19:18:16 +08001/*
2 * Dynamic function tracing support.
3 *
4 * Copyright (C) 2008 Abhishek Sagar <sagar.abhishek@gmail.com>
5 * Copyright (C) 2010 Rabin Vincent <rabin@rab.in>
6 *
7 * For licencing details, see COPYING.
8 *
9 * Defines low-level handling of mcount calls when the kernel
10 * is compiled with the -pg flag. When using dynamic ftrace, the
11 * mcount call-sites get patched with NOP till they are enabled.
12 * All code mutation routines here are called under stop_machine().
13 */
14
15#include <linux/ftrace.h>
16#include <linux/uaccess.h>
17#include <linux/module.h>
18#include <linux/stop_machine.h>
19
20#include <asm/cacheflush.h>
21#include <asm/opcodes.h>
22#include <asm/ftrace.h>
23#include <asm/insn.h>
24#include <asm/set_memory.h>
25#include <asm/stacktrace.h>
26
27/*
28 * The compiler emitted profiling hook consists of
29 *
30 * PUSH {LR}
31 * BL __gnu_mcount_nc
32 *
33 * To turn this combined sequence into a NOP, we need to restore the value of
34 * SP before the PUSH. Let's use an ADD rather than a POP into LR, as LR is not
35 * modified anyway, and reloading LR from memory is highly likely to be less
36 * efficient.
37 */
38#ifdef CONFIG_THUMB2_KERNEL
39#define NOP 0xf10d0d04 /* add.w sp, sp, #4 */
40#else
41#define NOP 0xe28dd004 /* add sp, sp, #4 */
42#endif
43
44#ifdef CONFIG_DYNAMIC_FTRACE
45
46static int __ftrace_modify_code(void *data)
47{
48 int *command = data;
49
50 set_kernel_text_rw();
51 ftrace_modify_all_code(*command);
52 set_kernel_text_ro();
53
54 return 0;
55}
56
57void arch_ftrace_update_code(int command)
58{
59 stop_machine(__ftrace_modify_code, &command, NULL);
60}
61
62static unsigned long ftrace_nop_replace(struct dyn_ftrace *rec)
63{
64 return NOP;
65}
66
67void ftrace_caller_from_init(void);
68void ftrace_regs_caller_from_init(void);
69
70static unsigned long __ref adjust_address(struct dyn_ftrace *rec,
71 unsigned long addr)
72{
73 if (!IS_ENABLED(CONFIG_DYNAMIC_FTRACE) ||
74 system_state >= SYSTEM_FREEING_INITMEM ||
75 likely(!is_kernel_inittext(rec->ip)))
76 return addr;
77 if (!IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS) ||
78 addr == (unsigned long)&ftrace_caller)
79 return (unsigned long)&ftrace_caller_from_init;
80 return (unsigned long)&ftrace_regs_caller_from_init;
81}
82
83int ftrace_arch_code_modify_prepare(void)
84{
85 set_all_modules_text_rw();
86 return 0;
87}
88
89int ftrace_arch_code_modify_post_process(void)
90{
91 set_all_modules_text_ro();
92 /* Make sure any TLB misses during machine stop are cleared. */
93 flush_tlb_all();
94 return 0;
95}
96
97static unsigned long ftrace_call_replace(unsigned long pc, unsigned long addr,
98 bool warn)
99{
100 return arm_gen_branch_link(pc, addr, warn);
101}
102
103static int ftrace_modify_code(unsigned long pc, unsigned long old,
104 unsigned long new, bool validate)
105{
106 unsigned long replaced;
107
108 if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) {
109 old = __opcode_to_mem_thumb32(old);
110 new = __opcode_to_mem_thumb32(new);
111 } else {
112 old = __opcode_to_mem_arm(old);
113 new = __opcode_to_mem_arm(new);
114 }
115
116 if (validate) {
117 if (probe_kernel_read(&replaced, (void *)pc, MCOUNT_INSN_SIZE))
118 return -EFAULT;
119
120 if (replaced != old)
121 return -EINVAL;
122 }
123
124 if (probe_kernel_write((void *)pc, &new, MCOUNT_INSN_SIZE))
125 return -EPERM;
126
127 flush_icache_range(pc, pc + MCOUNT_INSN_SIZE);
128
129 return 0;
130}
131
132int ftrace_update_ftrace_func(ftrace_func_t func)
133{
134 unsigned long pc;
135 unsigned long new;
136 int ret;
137
138 pc = (unsigned long)&ftrace_call;
139 new = ftrace_call_replace(pc, (unsigned long)func, true);
140
141 ret = ftrace_modify_code(pc, 0, new, false);
142
143#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
144 if (!ret) {
145 pc = (unsigned long)&ftrace_regs_call;
146 new = ftrace_call_replace(pc, (unsigned long)func, true);
147
148 ret = ftrace_modify_code(pc, 0, new, false);
149 }
150#endif
151
152 return ret;
153}
154
155int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
156{
157 unsigned long new, old;
158 unsigned long ip = rec->ip;
159 unsigned long aaddr = adjust_address(rec, addr);
160 struct module *mod = NULL;
161
162#ifdef CONFIG_ARM_MODULE_PLTS
163 mod = rec->arch.mod;
164#endif
165
166 old = ftrace_nop_replace(rec);
167
168 new = ftrace_call_replace(ip, aaddr, !mod);
169#ifdef CONFIG_ARM_MODULE_PLTS
170 if (!new && mod) {
171 aaddr = get_module_plt(mod, ip, aaddr);
172 new = ftrace_call_replace(ip, aaddr, true);
173 }
174#endif
175
176 return ftrace_modify_code(rec->ip, old, new, true);
177}
178
179#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
180
181int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
182 unsigned long addr)
183{
184 unsigned long new, old;
185 unsigned long ip = rec->ip;
186
187 old = ftrace_call_replace(ip, adjust_address(rec, old_addr), true);
188
189 new = ftrace_call_replace(ip, adjust_address(rec, addr), true);
190
191 return ftrace_modify_code(rec->ip, old, new, true);
192}
193
194#endif
195
196int ftrace_make_nop(struct module *mod,
197 struct dyn_ftrace *rec, unsigned long addr)
198{
199 unsigned long aaddr = adjust_address(rec, addr);
200 unsigned long ip = rec->ip;
201 unsigned long old;
202 unsigned long new;
203 int ret;
204
205#ifdef CONFIG_ARM_MODULE_PLTS
206 /* mod is only supplied during module loading */
207 if (!mod)
208 mod = rec->arch.mod;
209 else
210 rec->arch.mod = mod;
211#endif
212
213 old = ftrace_call_replace(ip, aaddr,
214 !IS_ENABLED(CONFIG_ARM_MODULE_PLTS) || !mod);
215#ifdef CONFIG_ARM_MODULE_PLTS
216 if (!old && mod) {
217 aaddr = get_module_plt(mod, ip, aaddr);
218 old = ftrace_call_replace(ip, aaddr, true);
219 }
220#endif
221
222 new = ftrace_nop_replace(rec);
223 /*
224 * Locations in .init.text may call __gnu_mcount_mc via a linker
225 * emitted veneer if they are too far away from its implementation, and
226 * so validation may fail spuriously in such cases. Let's work around
227 * this by omitting those from validation.
228 */
229 ret = ftrace_modify_code(ip, old, new, !is_kernel_inittext(ip));
230
231 return ret;
232}
233
234int __init ftrace_dyn_arch_init(void)
235{
236 return 0;
237}
238#endif /* CONFIG_DYNAMIC_FTRACE */
239
240#ifdef CONFIG_FUNCTION_GRAPH_TRACER
241asmlinkage
242void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
243 unsigned long frame_pointer,
244 unsigned long stack_pointer)
245{
246 unsigned long return_hooker = (unsigned long) &return_to_handler;
247 unsigned long old;
248
249 if (unlikely(atomic_read(&current->tracing_graph_pause)))
250err_out:
251 return;
252
253 if (IS_ENABLED(CONFIG_UNWINDER_FRAME_POINTER)) {
254 /*
255 * Usually, the stack frames are contiguous in memory but cases
256 * have been observed where the next stack frame does not live
257 * at 'frame_pointer + 4' as this code used to assume.
258 *
259 * Instead, dereference the field in the stack frame that
260 * stores the SP of the calling frame: to avoid unbounded
261 * recursion, this cannot involve any ftrace instrumented
262 * functions, so use the __get_kernel_nofault() primitive
263 * directly.
264 */
265 __get_kernel_nofault(&frame_pointer,
266 (unsigned long *)(frame_pointer - 8),
267 unsigned long, err_out);
268 } else {
269 struct stackframe frame = {
270 .fp = frame_pointer,
271 .sp = stack_pointer,
272 .lr = self_addr,
273 .pc = self_addr,
274 };
275 if (unwind_frame(&frame) < 0)
276 return;
277 if (frame.lr != self_addr)
278 parent = frame.lr_addr;
279 frame_pointer = frame.sp;
280 }
281
282 old = *parent;
283 *parent = return_hooker;
284
285 if (function_graph_enter(old, self_addr, frame_pointer, NULL))
286 *parent = old;
287}
288
289#ifdef CONFIG_DYNAMIC_FTRACE
290extern unsigned long ftrace_graph_call;
291extern unsigned long ftrace_graph_call_old;
292extern void ftrace_graph_caller_old(void);
293extern unsigned long ftrace_graph_regs_call;
294extern void ftrace_graph_regs_caller(void);
295
296static int __ftrace_modify_caller(unsigned long *callsite,
297 void (*func) (void), bool enable)
298{
299 unsigned long caller_fn = (unsigned long) func;
300 unsigned long pc = (unsigned long) callsite;
301 unsigned long branch = arm_gen_branch(pc, caller_fn);
302 unsigned long nop = arm_gen_nop();
303 unsigned long old = enable ? nop : branch;
304 unsigned long new = enable ? branch : nop;
305
306 return ftrace_modify_code(pc, old, new, true);
307}
308
309static int ftrace_modify_graph_caller(bool enable)
310{
311 int ret;
312
313 ret = __ftrace_modify_caller(&ftrace_graph_call,
314 ftrace_graph_caller,
315 enable);
316
317#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
318 if (!ret)
319 ret = __ftrace_modify_caller(&ftrace_graph_regs_call,
320 ftrace_graph_regs_caller,
321 enable);
322#endif
323
324
325 return ret;
326}
327
328int ftrace_enable_ftrace_graph_caller(void)
329{
330 return ftrace_modify_graph_caller(true);
331}
332
333int ftrace_disable_ftrace_graph_caller(void)
334{
335 return ftrace_modify_graph_caller(false);
336}
337#endif /* CONFIG_DYNAMIC_FTRACE */
338#endif /* CONFIG_FUNCTION_GRAPH_TRACER */