yuezonghe | 824eb0c | 2024-06-27 02:32:26 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com> |
| 3 | * |
| 4 | */ |
| 5 | #include <linux/stacktrace.h> |
| 6 | #include <linux/kallsyms.h> |
| 7 | #include <linux/seq_file.h> |
| 8 | #include <linux/spinlock.h> |
| 9 | #include <linux/uaccess.h> |
| 10 | #include <linux/debugfs.h> |
| 11 | #include <linux/ftrace.h> |
| 12 | #include <linux/module.h> |
| 13 | #include <linux/sysctl.h> |
| 14 | #include <linux/init.h> |
| 15 | #include <linux/fs.h> |
| 16 | |
| 17 | #include <asm/setup.h> |
| 18 | |
| 19 | #include "trace.h" |
| 20 | |
| 21 | #define STACK_TRACE_ENTRIES 500 |
| 22 | |
| 23 | #ifdef CC_USING_FENTRY |
| 24 | # define fentry 1 |
| 25 | #else |
| 26 | # define fentry 0 |
| 27 | #endif |
| 28 | |
| 29 | static unsigned long stack_dump_trace[STACK_TRACE_ENTRIES+1] = |
| 30 | { [0 ... (STACK_TRACE_ENTRIES)] = ULONG_MAX }; |
| 31 | static unsigned stack_dump_index[STACK_TRACE_ENTRIES]; |
| 32 | |
| 33 | /* |
| 34 | * Reserve one entry for the passed in ip. This will allow |
| 35 | * us to remove most or all of the stack size overhead |
| 36 | * added by the stack tracer itself. |
| 37 | */ |
| 38 | static struct stack_trace max_stack_trace = { |
| 39 | .max_entries = STACK_TRACE_ENTRIES - 1, |
| 40 | .entries = &stack_dump_trace[1], |
| 41 | }; |
| 42 | |
| 43 | static unsigned long max_stack_size; |
| 44 | static arch_spinlock_t max_stack_lock = |
| 45 | (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; |
| 46 | |
| 47 | static int stack_trace_disabled __read_mostly; |
| 48 | static DEFINE_PER_CPU(int, trace_active); |
| 49 | static DEFINE_MUTEX(stack_sysctl_mutex); |
| 50 | |
| 51 | int stack_tracer_enabled; |
| 52 | static int last_stack_tracer_enabled; |
| 53 | |
| 54 | static inline void |
| 55 | check_stack(unsigned long ip, unsigned long *stack) |
| 56 | { |
| 57 | unsigned long this_size, flags; |
| 58 | unsigned long *p, *top, *start; |
| 59 | static int tracer_frame; |
| 60 | int frame_size = ACCESS_ONCE(tracer_frame); |
| 61 | int i; |
| 62 | |
| 63 | this_size = ((unsigned long)stack) & (THREAD_SIZE-1); |
| 64 | this_size = THREAD_SIZE - this_size; |
| 65 | /* Remove the frame of the tracer */ |
| 66 | this_size -= frame_size; |
| 67 | |
| 68 | if (this_size <= max_stack_size) |
| 69 | return; |
| 70 | |
| 71 | /* we do not handle interrupt stacks yet */ |
| 72 | if (!object_is_on_stack(stack)) |
| 73 | return; |
| 74 | |
| 75 | local_irq_save(flags); |
| 76 | arch_spin_lock(&max_stack_lock); |
| 77 | |
| 78 | /* In case another CPU set the tracer_frame on us */ |
| 79 | if (unlikely(!frame_size)) |
| 80 | this_size -= tracer_frame; |
| 81 | |
| 82 | /* a race could have already updated it */ |
| 83 | if (this_size <= max_stack_size) |
| 84 | goto out; |
| 85 | |
| 86 | max_stack_size = this_size; |
| 87 | |
| 88 | max_stack_trace.nr_entries = 0; |
| 89 | max_stack_trace.skip = 3; |
| 90 | |
| 91 | save_stack_trace(&max_stack_trace); |
| 92 | |
| 93 | /* |
| 94 | * Add the passed in ip from the function tracer. |
| 95 | * Searching for this on the stack will skip over |
| 96 | * most of the overhead from the stack tracer itself. |
| 97 | */ |
| 98 | stack_dump_trace[0] = ip; |
| 99 | max_stack_trace.nr_entries++; |
| 100 | |
| 101 | /* |
| 102 | * Now find where in the stack these are. |
| 103 | */ |
| 104 | i = 0; |
| 105 | start = stack; |
| 106 | top = (unsigned long *) |
| 107 | (((unsigned long)start & ~(THREAD_SIZE-1)) + THREAD_SIZE); |
| 108 | |
| 109 | /* |
| 110 | * Loop through all the entries. One of the entries may |
| 111 | * for some reason be missed on the stack, so we may |
| 112 | * have to account for them. If they are all there, this |
| 113 | * loop will only happen once. This code only takes place |
| 114 | * on a new max, so it is far from a fast path. |
| 115 | */ |
| 116 | while (i < max_stack_trace.nr_entries) { |
| 117 | int found = 0; |
| 118 | |
| 119 | stack_dump_index[i] = this_size; |
| 120 | p = start; |
| 121 | |
| 122 | for (; p < top && i < max_stack_trace.nr_entries; p++) { |
| 123 | if (*p == stack_dump_trace[i]) { |
| 124 | this_size = stack_dump_index[i++] = |
| 125 | (top - p) * sizeof(unsigned long); |
| 126 | found = 1; |
| 127 | /* Start the search from here */ |
| 128 | start = p + 1; |
| 129 | /* |
| 130 | * We do not want to show the overhead |
| 131 | * of the stack tracer stack in the |
| 132 | * max stack. If we haven't figured |
| 133 | * out what that is, then figure it out |
| 134 | * now. |
| 135 | */ |
| 136 | if (unlikely(!tracer_frame) && i == 1) { |
| 137 | tracer_frame = (p - stack) * |
| 138 | sizeof(unsigned long); |
| 139 | max_stack_size -= tracer_frame; |
| 140 | } |
| 141 | } |
| 142 | } |
| 143 | |
| 144 | if (!found) |
| 145 | i++; |
| 146 | } |
| 147 | |
| 148 | out: |
| 149 | arch_spin_unlock(&max_stack_lock); |
| 150 | local_irq_restore(flags); |
| 151 | } |
| 152 | |
| 153 | static void |
| 154 | stack_trace_call(unsigned long ip, unsigned long parent_ip) |
| 155 | { |
| 156 | unsigned long stack; |
| 157 | int cpu; |
| 158 | |
| 159 | if (unlikely(!ftrace_enabled || stack_trace_disabled)) |
| 160 | return; |
| 161 | |
| 162 | preempt_disable_notrace(); |
| 163 | |
| 164 | cpu = raw_smp_processor_id(); |
| 165 | /* no atomic needed, we only modify this variable by this cpu */ |
| 166 | if (per_cpu(trace_active, cpu)++ != 0) |
| 167 | goto out; |
| 168 | |
| 169 | /* |
| 170 | * When fentry is used, the traced function does not get |
| 171 | * its stack frame set up, and we lose the parent. |
| 172 | * The ip is pretty useless because the function tracer |
| 173 | * was called before that function set up its stack frame. |
| 174 | * In this case, we use the parent ip. |
| 175 | * |
| 176 | * By adding the return address of either the parent ip |
| 177 | * or the current ip we can disregard most of the stack usage |
| 178 | * caused by the stack tracer itself. |
| 179 | * |
| 180 | * The function tracer always reports the address of where the |
| 181 | * mcount call was, but the stack will hold the return address. |
| 182 | */ |
| 183 | if (fentry) |
| 184 | ip = parent_ip; |
| 185 | else |
| 186 | ip += MCOUNT_INSN_SIZE; |
| 187 | |
| 188 | check_stack(ip, &stack); |
| 189 | |
| 190 | out: |
| 191 | per_cpu(trace_active, cpu)--; |
| 192 | /* prevent recursion in schedule */ |
| 193 | preempt_enable_notrace(); |
| 194 | } |
| 195 | |
| 196 | static struct ftrace_ops trace_ops __read_mostly = |
| 197 | { |
| 198 | .func = stack_trace_call, |
| 199 | }; |
| 200 | |
| 201 | static ssize_t |
| 202 | stack_max_size_read(struct file *filp, char __user *ubuf, |
| 203 | size_t count, loff_t *ppos) |
| 204 | { |
| 205 | unsigned long *ptr = filp->private_data; |
| 206 | char buf[64]; |
| 207 | int r; |
| 208 | |
| 209 | r = snprintf(buf, sizeof(buf), "%ld\n", *ptr); |
| 210 | if (r > sizeof(buf)) |
| 211 | r = sizeof(buf); |
| 212 | return simple_read_from_buffer(ubuf, count, ppos, buf, r); |
| 213 | } |
| 214 | |
| 215 | static ssize_t |
| 216 | stack_max_size_write(struct file *filp, const char __user *ubuf, |
| 217 | size_t count, loff_t *ppos) |
| 218 | { |
| 219 | long *ptr = filp->private_data; |
| 220 | unsigned long val, flags; |
| 221 | int ret; |
| 222 | int cpu; |
| 223 | |
| 224 | ret = kstrtoul_from_user(ubuf, count, 10, &val); |
| 225 | if (ret) |
| 226 | return ret; |
| 227 | |
| 228 | local_irq_save(flags); |
| 229 | |
| 230 | /* |
| 231 | * In case we trace inside arch_spin_lock() or after (NMI), |
| 232 | * we will cause circular lock, so we also need to increase |
| 233 | * the percpu trace_active here. |
| 234 | */ |
| 235 | cpu = smp_processor_id(); |
| 236 | per_cpu(trace_active, cpu)++; |
| 237 | |
| 238 | arch_spin_lock(&max_stack_lock); |
| 239 | *ptr = val; |
| 240 | arch_spin_unlock(&max_stack_lock); |
| 241 | |
| 242 | per_cpu(trace_active, cpu)--; |
| 243 | local_irq_restore(flags); |
| 244 | |
| 245 | return count; |
| 246 | } |
| 247 | |
| 248 | static const struct file_operations stack_max_size_fops = { |
| 249 | .open = tracing_open_generic, |
| 250 | .read = stack_max_size_read, |
| 251 | .write = stack_max_size_write, |
| 252 | .llseek = default_llseek, |
| 253 | }; |
| 254 | |
| 255 | static void * |
| 256 | __next(struct seq_file *m, loff_t *pos) |
| 257 | { |
| 258 | long n = *pos - 1; |
| 259 | |
| 260 | if (n >= max_stack_trace.nr_entries || stack_dump_trace[n] == ULONG_MAX) |
| 261 | return NULL; |
| 262 | |
| 263 | m->private = (void *)n; |
| 264 | return &m->private; |
| 265 | } |
| 266 | |
| 267 | static void * |
| 268 | t_next(struct seq_file *m, void *v, loff_t *pos) |
| 269 | { |
| 270 | (*pos)++; |
| 271 | return __next(m, pos); |
| 272 | } |
| 273 | |
| 274 | static void *t_start(struct seq_file *m, loff_t *pos) |
| 275 | { |
| 276 | int cpu; |
| 277 | |
| 278 | local_irq_disable(); |
| 279 | |
| 280 | cpu = smp_processor_id(); |
| 281 | per_cpu(trace_active, cpu)++; |
| 282 | |
| 283 | arch_spin_lock(&max_stack_lock); |
| 284 | |
| 285 | if (*pos == 0) |
| 286 | return SEQ_START_TOKEN; |
| 287 | |
| 288 | return __next(m, pos); |
| 289 | } |
| 290 | |
| 291 | static void t_stop(struct seq_file *m, void *p) |
| 292 | { |
| 293 | int cpu; |
| 294 | |
| 295 | arch_spin_unlock(&max_stack_lock); |
| 296 | |
| 297 | cpu = smp_processor_id(); |
| 298 | per_cpu(trace_active, cpu)--; |
| 299 | |
| 300 | local_irq_enable(); |
| 301 | } |
| 302 | |
| 303 | static int trace_lookup_stack(struct seq_file *m, long i) |
| 304 | { |
| 305 | unsigned long addr = stack_dump_trace[i]; |
| 306 | |
| 307 | return seq_printf(m, "%pS\n", (void *)addr); |
| 308 | } |
| 309 | |
| 310 | static void print_disabled(struct seq_file *m) |
| 311 | { |
| 312 | seq_puts(m, "#\n" |
| 313 | "# Stack tracer disabled\n" |
| 314 | "#\n" |
| 315 | "# To enable the stack tracer, either add 'stacktrace' to the\n" |
| 316 | "# kernel command line\n" |
| 317 | "# or 'echo 1 > /proc/sys/kernel/stack_tracer_enabled'\n" |
| 318 | "#\n"); |
| 319 | } |
| 320 | |
| 321 | static int t_show(struct seq_file *m, void *v) |
| 322 | { |
| 323 | long i; |
| 324 | int size; |
| 325 | |
| 326 | if (v == SEQ_START_TOKEN) { |
| 327 | seq_printf(m, " Depth Size Location" |
| 328 | " (%d entries)\n" |
| 329 | " ----- ---- --------\n", |
| 330 | max_stack_trace.nr_entries - 1); |
| 331 | |
| 332 | if (!stack_tracer_enabled && !max_stack_size) |
| 333 | print_disabled(m); |
| 334 | |
| 335 | return 0; |
| 336 | } |
| 337 | |
| 338 | i = *(long *)v; |
| 339 | |
| 340 | if (i >= max_stack_trace.nr_entries || |
| 341 | stack_dump_trace[i] == ULONG_MAX) |
| 342 | return 0; |
| 343 | |
| 344 | if (i+1 == max_stack_trace.nr_entries || |
| 345 | stack_dump_trace[i+1] == ULONG_MAX) |
| 346 | size = stack_dump_index[i]; |
| 347 | else |
| 348 | size = stack_dump_index[i] - stack_dump_index[i+1]; |
| 349 | |
| 350 | seq_printf(m, "%3ld) %8d %5d ", i, stack_dump_index[i], size); |
| 351 | |
| 352 | trace_lookup_stack(m, i); |
| 353 | |
| 354 | return 0; |
| 355 | } |
| 356 | |
| 357 | static const struct seq_operations stack_trace_seq_ops = { |
| 358 | .start = t_start, |
| 359 | .next = t_next, |
| 360 | .stop = t_stop, |
| 361 | .show = t_show, |
| 362 | }; |
| 363 | |
| 364 | static int stack_trace_open(struct inode *inode, struct file *file) |
| 365 | { |
| 366 | return seq_open(file, &stack_trace_seq_ops); |
| 367 | } |
| 368 | |
| 369 | static const struct file_operations stack_trace_fops = { |
| 370 | .open = stack_trace_open, |
| 371 | .read = seq_read, |
| 372 | .llseek = seq_lseek, |
| 373 | .release = seq_release, |
| 374 | }; |
| 375 | |
| 376 | static int |
| 377 | stack_trace_filter_open(struct inode *inode, struct file *file) |
| 378 | { |
| 379 | return ftrace_regex_open(&trace_ops, FTRACE_ITER_FILTER, |
| 380 | inode, file); |
| 381 | } |
| 382 | |
| 383 | static const struct file_operations stack_trace_filter_fops = { |
| 384 | .open = stack_trace_filter_open, |
| 385 | .read = seq_read, |
| 386 | .write = ftrace_filter_write, |
| 387 | .llseek = ftrace_filter_lseek, |
| 388 | .release = ftrace_regex_release, |
| 389 | }; |
| 390 | |
| 391 | int |
| 392 | stack_trace_sysctl(struct ctl_table *table, int write, |
| 393 | void __user *buffer, size_t *lenp, |
| 394 | loff_t *ppos) |
| 395 | { |
| 396 | int ret; |
| 397 | |
| 398 | mutex_lock(&stack_sysctl_mutex); |
| 399 | |
| 400 | ret = proc_dointvec(table, write, buffer, lenp, ppos); |
| 401 | |
| 402 | if (ret || !write || |
| 403 | (last_stack_tracer_enabled == !!stack_tracer_enabled)) |
| 404 | goto out; |
| 405 | |
| 406 | last_stack_tracer_enabled = !!stack_tracer_enabled; |
| 407 | |
| 408 | if (stack_tracer_enabled) |
| 409 | register_ftrace_function(&trace_ops); |
| 410 | else |
| 411 | unregister_ftrace_function(&trace_ops); |
| 412 | |
| 413 | out: |
| 414 | mutex_unlock(&stack_sysctl_mutex); |
| 415 | return ret; |
| 416 | } |
| 417 | |
| 418 | static char stack_trace_filter_buf[COMMAND_LINE_SIZE+1] __initdata; |
| 419 | |
| 420 | static __init int enable_stacktrace(char *str) |
| 421 | { |
| 422 | if (strncmp(str, "_filter=", 8) == 0) |
| 423 | strncpy(stack_trace_filter_buf, str+8, COMMAND_LINE_SIZE); |
| 424 | |
| 425 | stack_tracer_enabled = 1; |
| 426 | last_stack_tracer_enabled = 1; |
| 427 | return 1; |
| 428 | } |
| 429 | __setup("stacktrace", enable_stacktrace); |
| 430 | |
| 431 | static __init int stack_trace_init(void) |
| 432 | { |
| 433 | struct dentry *d_tracer; |
| 434 | |
| 435 | d_tracer = tracing_init_dentry(); |
| 436 | if (!d_tracer) |
| 437 | return 0; |
| 438 | |
| 439 | trace_create_file("stack_max_size", 0644, d_tracer, |
| 440 | &max_stack_size, &stack_max_size_fops); |
| 441 | |
| 442 | trace_create_file("stack_trace", 0444, d_tracer, |
| 443 | NULL, &stack_trace_fops); |
| 444 | |
| 445 | trace_create_file("stack_trace_filter", 0444, d_tracer, |
| 446 | NULL, &stack_trace_filter_fops); |
| 447 | |
| 448 | if (stack_trace_filter_buf[0]) |
| 449 | ftrace_set_early_filter(&trace_ops, stack_trace_filter_buf, 1); |
| 450 | |
| 451 | if (stack_tracer_enabled) |
| 452 | register_ftrace_function(&trace_ops); |
| 453 | |
| 454 | return 0; |
| 455 | } |
| 456 | |
| 457 | device_initcall(stack_trace_init); |