blob: c8c01e706118ee9f1a17d649ff7dfb3a3302a4c4 [file] [log] [blame]
b.liue9582032025-04-17 19:18:16 +08001/*
2 * builtin-trace.c
3 *
4 * Builtin 'trace' command:
5 *
6 * Display a continuously updated trace of any workload, CPU, specific PID,
7 * system wide, etc. Default format is loosely strace like, but any other
8 * event may be specified using --event.
9 *
10 * Copyright (C) 2012, 2013, 2014, 2015 Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
11 *
12 * Initially based on the 'trace' prototype by Thomas Gleixner:
13 *
14 * http://lwn.net/Articles/415728/ ("Announcing a new utility: 'trace'")
15 */
16
17#include "util/record.h"
18#include <traceevent/event-parse.h>
19#include <api/fs/tracing_path.h>
20#include <bpf/bpf.h>
21#include "util/bpf_map.h"
22#include "util/rlimit.h"
23#include "builtin.h"
24#include "util/cgroup.h"
25#include "util/color.h"
26#include "util/config.h"
27#include "util/debug.h"
28#include "util/dso.h"
29#include "util/env.h"
30#include "util/event.h"
31#include "util/evsel.h"
32#include "util/evsel_fprintf.h"
33#include "util/synthetic-events.h"
34#include "util/evlist.h"
35#include "util/evswitch.h"
36#include "util/mmap.h"
37#include <subcmd/pager.h>
38#include <subcmd/exec-cmd.h>
39#include "util/machine.h"
40#include "util/map.h"
41#include "util/symbol.h"
42#include "util/path.h"
43#include "util/session.h"
44#include "util/thread.h"
45#include <subcmd/parse-options.h>
46#include "util/strlist.h"
47#include "util/intlist.h"
48#include "util/thread_map.h"
49#include "util/stat.h"
50#include "util/tool.h"
51#include "util/util.h"
52#include "trace/beauty/beauty.h"
53#include "trace-event.h"
54#include "util/parse-events.h"
55#include "util/bpf-loader.h"
56#include "callchain.h"
57#include "print_binary.h"
58#include "string2.h"
59#include "syscalltbl.h"
60#include "rb_resort.h"
61#include "../perf.h"
62
63#include <errno.h>
64#include <inttypes.h>
65#include <poll.h>
66#include <signal.h>
67#include <stdlib.h>
68#include <string.h>
69#include <linux/err.h>
70#include <linux/filter.h>
71#include <linux/kernel.h>
72#include <linux/random.h>
73#include <linux/stringify.h>
74#include <linux/time64.h>
75#include <linux/zalloc.h>
76#include <fcntl.h>
77#include <sys/sysmacros.h>
78
79#include <linux/ctype.h>
80
81#ifndef O_CLOEXEC
82# define O_CLOEXEC 02000000
83#endif
84
85#ifndef F_LINUX_SPECIFIC_BASE
86# define F_LINUX_SPECIFIC_BASE 1024
87#endif
88
89#define RAW_SYSCALL_ARGS_NUM 6
90
91/*
92 * strtoul: Go from a string to a value, i.e. for msr: MSR_FS_BASE to 0xc0000100
93 */
94struct syscall_arg_fmt {
95 size_t (*scnprintf)(char *bf, size_t size, struct syscall_arg *arg);
96 bool (*strtoul)(char *bf, size_t size, struct syscall_arg *arg, u64 *val);
97 unsigned long (*mask_val)(struct syscall_arg *arg, unsigned long val);
98 void *parm;
99 const char *name;
100 bool show_zero;
101};
102
103struct syscall_fmt {
104 const char *name;
105 const char *alias;
106 struct {
107 const char *sys_enter,
108 *sys_exit;
109 } bpf_prog_name;
110 struct syscall_arg_fmt arg[RAW_SYSCALL_ARGS_NUM];
111 u8 nr_args;
112 bool errpid;
113 bool timeout;
114 bool hexret;
115};
116
117struct trace {
118 struct perf_tool tool;
119 struct syscalltbl *sctbl;
120 struct {
121 struct syscall *table;
122 struct bpf_map *map;
123 struct { // per syscall BPF_MAP_TYPE_PROG_ARRAY
124 struct bpf_map *sys_enter,
125 *sys_exit;
126 } prog_array;
127 struct {
128 struct evsel *sys_enter,
129 *sys_exit,
130 *augmented;
131 } events;
132 struct bpf_program *unaugmented_prog;
133 } syscalls;
134 struct {
135 struct bpf_map *map;
136 } dump;
137 struct record_opts opts;
138 struct evlist *evlist;
139 struct machine *host;
140 struct thread *current;
141 struct bpf_object *bpf_obj;
142 struct cgroup *cgroup;
143 u64 base_time;
144 FILE *output;
145 unsigned long nr_events;
146 unsigned long nr_events_printed;
147 unsigned long max_events;
148 struct evswitch evswitch;
149 struct strlist *ev_qualifier;
150 struct {
151 size_t nr;
152 int *entries;
153 } ev_qualifier_ids;
154 struct {
155 size_t nr;
156 pid_t *entries;
157 struct bpf_map *map;
158 } filter_pids;
159 double duration_filter;
160 double runtime_ms;
161 struct {
162 u64 vfs_getname,
163 proc_getname;
164 } stats;
165 unsigned int max_stack;
166 unsigned int min_stack;
167 int raw_augmented_syscalls_args_size;
168 bool raw_augmented_syscalls;
169 bool fd_path_disabled;
170 bool sort_events;
171 bool not_ev_qualifier;
172 bool live;
173 bool full_time;
174 bool sched;
175 bool multiple_threads;
176 bool summary;
177 bool summary_only;
178 bool failure_only;
179 bool show_comm;
180 bool print_sample;
181 bool show_tool_stats;
182 bool trace_syscalls;
183 bool kernel_syscallchains;
184 s16 args_alignment;
185 bool show_tstamp;
186 bool show_duration;
187 bool show_zeros;
188 bool show_arg_names;
189 bool show_string_prefix;
190 bool force;
191 bool vfs_getname;
192 int trace_pgfaults;
193 struct {
194 struct ordered_events data;
195 u64 last;
196 } oe;
197};
198
199struct tp_field {
200 int offset;
201 union {
202 u64 (*integer)(struct tp_field *field, struct perf_sample *sample);
203 void *(*pointer)(struct tp_field *field, struct perf_sample *sample);
204 };
205};
206
207#define TP_UINT_FIELD(bits) \
208static u64 tp_field__u##bits(struct tp_field *field, struct perf_sample *sample) \
209{ \
210 u##bits value; \
211 memcpy(&value, sample->raw_data + field->offset, sizeof(value)); \
212 return value; \
213}
214
215TP_UINT_FIELD(8);
216TP_UINT_FIELD(16);
217TP_UINT_FIELD(32);
218TP_UINT_FIELD(64);
219
220#define TP_UINT_FIELD__SWAPPED(bits) \
221static u64 tp_field__swapped_u##bits(struct tp_field *field, struct perf_sample *sample) \
222{ \
223 u##bits value; \
224 memcpy(&value, sample->raw_data + field->offset, sizeof(value)); \
225 return bswap_##bits(value);\
226}
227
228TP_UINT_FIELD__SWAPPED(16);
229TP_UINT_FIELD__SWAPPED(32);
230TP_UINT_FIELD__SWAPPED(64);
231
232static int __tp_field__init_uint(struct tp_field *field, int size, int offset, bool needs_swap)
233{
234 field->offset = offset;
235
236 switch (size) {
237 case 1:
238 field->integer = tp_field__u8;
239 break;
240 case 2:
241 field->integer = needs_swap ? tp_field__swapped_u16 : tp_field__u16;
242 break;
243 case 4:
244 field->integer = needs_swap ? tp_field__swapped_u32 : tp_field__u32;
245 break;
246 case 8:
247 field->integer = needs_swap ? tp_field__swapped_u64 : tp_field__u64;
248 break;
249 default:
250 return -1;
251 }
252
253 return 0;
254}
255
256static int tp_field__init_uint(struct tp_field *field, struct tep_format_field *format_field, bool needs_swap)
257{
258 return __tp_field__init_uint(field, format_field->size, format_field->offset, needs_swap);
259}
260
261static void *tp_field__ptr(struct tp_field *field, struct perf_sample *sample)
262{
263 return sample->raw_data + field->offset;
264}
265
266static int __tp_field__init_ptr(struct tp_field *field, int offset)
267{
268 field->offset = offset;
269 field->pointer = tp_field__ptr;
270 return 0;
271}
272
273static int tp_field__init_ptr(struct tp_field *field, struct tep_format_field *format_field)
274{
275 return __tp_field__init_ptr(field, format_field->offset);
276}
277
278struct syscall_tp {
279 struct tp_field id;
280 union {
281 struct tp_field args, ret;
282 };
283};
284
285static int perf_evsel__init_tp_uint_field(struct evsel *evsel,
286 struct tp_field *field,
287 const char *name)
288{
289 struct tep_format_field *format_field = perf_evsel__field(evsel, name);
290
291 if (format_field == NULL)
292 return -1;
293
294 return tp_field__init_uint(field, format_field, evsel->needs_swap);
295}
296
297#define perf_evsel__init_sc_tp_uint_field(evsel, name) \
298 ({ struct syscall_tp *sc = evsel->priv;\
299 perf_evsel__init_tp_uint_field(evsel, &sc->name, #name); })
300
301static int perf_evsel__init_tp_ptr_field(struct evsel *evsel,
302 struct tp_field *field,
303 const char *name)
304{
305 struct tep_format_field *format_field = perf_evsel__field(evsel, name);
306
307 if (format_field == NULL)
308 return -1;
309
310 return tp_field__init_ptr(field, format_field);
311}
312
313#define perf_evsel__init_sc_tp_ptr_field(evsel, name) \
314 ({ struct syscall_tp *sc = evsel->priv;\
315 perf_evsel__init_tp_ptr_field(evsel, &sc->name, #name); })
316
317static void evsel__delete_priv(struct evsel *evsel)
318{
319 zfree(&evsel->priv);
320 evsel__delete(evsel);
321}
322
323static int perf_evsel__init_syscall_tp(struct evsel *evsel)
324{
325 struct syscall_tp *sc = evsel->priv = malloc(sizeof(struct syscall_tp));
326
327 if (evsel->priv != NULL) {
328 if (perf_evsel__init_tp_uint_field(evsel, &sc->id, "__syscall_nr") &&
329 perf_evsel__init_tp_uint_field(evsel, &sc->id, "nr"))
330 goto out_delete;
331 return 0;
332 }
333
334 return -ENOMEM;
335out_delete:
336 zfree(&evsel->priv);
337 return -ENOENT;
338}
339
340static int perf_evsel__init_augmented_syscall_tp(struct evsel *evsel, struct evsel *tp)
341{
342 struct syscall_tp *sc = evsel->priv = malloc(sizeof(struct syscall_tp));
343
344 if (evsel->priv != NULL) {
345 struct tep_format_field *syscall_id = perf_evsel__field(tp, "id");
346 if (syscall_id == NULL)
347 syscall_id = perf_evsel__field(tp, "__syscall_nr");
348 if (syscall_id == NULL)
349 goto out_delete;
350 if (__tp_field__init_uint(&sc->id, syscall_id->size, syscall_id->offset, evsel->needs_swap))
351 goto out_delete;
352
353 return 0;
354 }
355
356 return -ENOMEM;
357out_delete:
358 zfree(&evsel->priv);
359 return -EINVAL;
360}
361
362static int perf_evsel__init_augmented_syscall_tp_args(struct evsel *evsel)
363{
364 struct syscall_tp *sc = evsel->priv;
365
366 return __tp_field__init_ptr(&sc->args, sc->id.offset + sizeof(u64));
367}
368
369static int perf_evsel__init_augmented_syscall_tp_ret(struct evsel *evsel)
370{
371 struct syscall_tp *sc = evsel->priv;
372
373 return __tp_field__init_uint(&sc->ret, sizeof(u64), sc->id.offset + sizeof(u64), evsel->needs_swap);
374}
375
376static int perf_evsel__init_raw_syscall_tp(struct evsel *evsel, void *handler)
377{
378 evsel->priv = malloc(sizeof(struct syscall_tp));
379 if (evsel->priv != NULL) {
380 if (perf_evsel__init_sc_tp_uint_field(evsel, id))
381 goto out_delete;
382
383 evsel->handler = handler;
384 return 0;
385 }
386
387 return -ENOMEM;
388
389out_delete:
390 zfree(&evsel->priv);
391 return -ENOENT;
392}
393
394static struct evsel *perf_evsel__raw_syscall_newtp(const char *direction, void *handler)
395{
396 struct evsel *evsel = perf_evsel__newtp("raw_syscalls", direction);
397
398 /* older kernel (e.g., RHEL6) use syscalls:{enter,exit} */
399 if (IS_ERR(evsel))
400 evsel = perf_evsel__newtp("syscalls", direction);
401
402 if (IS_ERR(evsel))
403 return NULL;
404
405 if (perf_evsel__init_raw_syscall_tp(evsel, handler))
406 goto out_delete;
407
408 return evsel;
409
410out_delete:
411 evsel__delete_priv(evsel);
412 return NULL;
413}
414
415#define perf_evsel__sc_tp_uint(evsel, name, sample) \
416 ({ struct syscall_tp *fields = evsel->priv; \
417 fields->name.integer(&fields->name, sample); })
418
419#define perf_evsel__sc_tp_ptr(evsel, name, sample) \
420 ({ struct syscall_tp *fields = evsel->priv; \
421 fields->name.pointer(&fields->name, sample); })
422
423size_t strarray__scnprintf(struct strarray *sa, char *bf, size_t size, const char *intfmt, bool show_prefix, int val)
424{
425 int idx = val - sa->offset;
426
427 if (idx < 0 || idx >= sa->nr_entries || sa->entries[idx] == NULL) {
428 size_t printed = scnprintf(bf, size, intfmt, val);
429 if (show_prefix)
430 printed += scnprintf(bf + printed, size - printed, " /* %s??? */", sa->prefix);
431 return printed;
432 }
433
434 return scnprintf(bf, size, "%s%s", show_prefix ? sa->prefix : "", sa->entries[idx]);
435}
436
437static size_t __syscall_arg__scnprintf_strarray(char *bf, size_t size,
438 const char *intfmt,
439 struct syscall_arg *arg)
440{
441 return strarray__scnprintf(arg->parm, bf, size, intfmt, arg->show_string_prefix, arg->val);
442}
443
444static size_t syscall_arg__scnprintf_strarray(char *bf, size_t size,
445 struct syscall_arg *arg)
446{
447 return __syscall_arg__scnprintf_strarray(bf, size, "%d", arg);
448}
449
450#define SCA_STRARRAY syscall_arg__scnprintf_strarray
451
452size_t syscall_arg__scnprintf_strarray_flags(char *bf, size_t size, struct syscall_arg *arg)
453{
454 return strarray__scnprintf_flags(arg->parm, bf, size, arg->show_string_prefix, arg->val);
455}
456
457size_t strarrays__scnprintf(struct strarrays *sas, char *bf, size_t size, const char *intfmt, bool show_prefix, int val)
458{
459 size_t printed;
460 int i;
461
462 for (i = 0; i < sas->nr_entries; ++i) {
463 struct strarray *sa = sas->entries[i];
464 int idx = val - sa->offset;
465
466 if (idx >= 0 && idx < sa->nr_entries) {
467 if (sa->entries[idx] == NULL)
468 break;
469 return scnprintf(bf, size, "%s%s", show_prefix ? sa->prefix : "", sa->entries[idx]);
470 }
471 }
472
473 printed = scnprintf(bf, size, intfmt, val);
474 if (show_prefix)
475 printed += scnprintf(bf + printed, size - printed, " /* %s??? */", sas->entries[0]->prefix);
476 return printed;
477}
478
479size_t syscall_arg__scnprintf_strarrays(char *bf, size_t size,
480 struct syscall_arg *arg)
481{
482 return strarrays__scnprintf(arg->parm, bf, size, "%d", arg->show_string_prefix, arg->val);
483}
484
485#ifndef AT_FDCWD
486#define AT_FDCWD -100
487#endif
488
489static size_t syscall_arg__scnprintf_fd_at(char *bf, size_t size,
490 struct syscall_arg *arg)
491{
492 int fd = arg->val;
493 const char *prefix = "AT_FD";
494
495 if (fd == AT_FDCWD)
496 return scnprintf(bf, size, "%s%s", arg->show_string_prefix ? prefix : "", "CWD");
497
498 return syscall_arg__scnprintf_fd(bf, size, arg);
499}
500
501#define SCA_FDAT syscall_arg__scnprintf_fd_at
502
503static size_t syscall_arg__scnprintf_close_fd(char *bf, size_t size,
504 struct syscall_arg *arg);
505
506#define SCA_CLOSE_FD syscall_arg__scnprintf_close_fd
507
508size_t syscall_arg__scnprintf_hex(char *bf, size_t size, struct syscall_arg *arg)
509{
510 return scnprintf(bf, size, "%#lx", arg->val);
511}
512
513size_t syscall_arg__scnprintf_ptr(char *bf, size_t size, struct syscall_arg *arg)
514{
515 if (arg->val == 0)
516 return scnprintf(bf, size, "NULL");
517 return syscall_arg__scnprintf_hex(bf, size, arg);
518}
519
520size_t syscall_arg__scnprintf_int(char *bf, size_t size, struct syscall_arg *arg)
521{
522 return scnprintf(bf, size, "%d", arg->val);
523}
524
525size_t syscall_arg__scnprintf_long(char *bf, size_t size, struct syscall_arg *arg)
526{
527 return scnprintf(bf, size, "%ld", arg->val);
528}
529
530static const char *bpf_cmd[] = {
531 "MAP_CREATE", "MAP_LOOKUP_ELEM", "MAP_UPDATE_ELEM", "MAP_DELETE_ELEM",
532 "MAP_GET_NEXT_KEY", "PROG_LOAD",
533};
534static DEFINE_STRARRAY(bpf_cmd, "BPF_");
535
536static const char *fsmount_flags[] = {
537 [1] = "CLOEXEC",
538};
539static DEFINE_STRARRAY(fsmount_flags, "FSMOUNT_");
540
541#include "trace/beauty/generated/fsconfig_arrays.c"
542
543static DEFINE_STRARRAY(fsconfig_cmds, "FSCONFIG_");
544
545static const char *epoll_ctl_ops[] = { "ADD", "DEL", "MOD", };
546static DEFINE_STRARRAY_OFFSET(epoll_ctl_ops, "EPOLL_CTL_", 1);
547
548static const char *itimers[] = { "REAL", "VIRTUAL", "PROF", };
549static DEFINE_STRARRAY(itimers, "ITIMER_");
550
551static const char *keyctl_options[] = {
552 "GET_KEYRING_ID", "JOIN_SESSION_KEYRING", "UPDATE", "REVOKE", "CHOWN",
553 "SETPERM", "DESCRIBE", "CLEAR", "LINK", "UNLINK", "SEARCH", "READ",
554 "INSTANTIATE", "NEGATE", "SET_REQKEY_KEYRING", "SET_TIMEOUT",
555 "ASSUME_AUTHORITY", "GET_SECURITY", "SESSION_TO_PARENT", "REJECT",
556 "INSTANTIATE_IOV", "INVALIDATE", "GET_PERSISTENT",
557};
558static DEFINE_STRARRAY(keyctl_options, "KEYCTL_");
559
560static const char *whences[] = { "SET", "CUR", "END",
561#ifdef SEEK_DATA
562"DATA",
563#endif
564#ifdef SEEK_HOLE
565"HOLE",
566#endif
567};
568static DEFINE_STRARRAY(whences, "SEEK_");
569
570static const char *fcntl_cmds[] = {
571 "DUPFD", "GETFD", "SETFD", "GETFL", "SETFL", "GETLK", "SETLK",
572 "SETLKW", "SETOWN", "GETOWN", "SETSIG", "GETSIG", "GETLK64",
573 "SETLK64", "SETLKW64", "SETOWN_EX", "GETOWN_EX",
574 "GETOWNER_UIDS",
575};
576static DEFINE_STRARRAY(fcntl_cmds, "F_");
577
578static const char *fcntl_linux_specific_cmds[] = {
579 "SETLEASE", "GETLEASE", "NOTIFY", [5] = "CANCELLK", "DUPFD_CLOEXEC",
580 "SETPIPE_SZ", "GETPIPE_SZ", "ADD_SEALS", "GET_SEALS",
581 "GET_RW_HINT", "SET_RW_HINT", "GET_FILE_RW_HINT", "SET_FILE_RW_HINT",
582};
583
584static DEFINE_STRARRAY_OFFSET(fcntl_linux_specific_cmds, "F_", F_LINUX_SPECIFIC_BASE);
585
586static struct strarray *fcntl_cmds_arrays[] = {
587 &strarray__fcntl_cmds,
588 &strarray__fcntl_linux_specific_cmds,
589};
590
591static DEFINE_STRARRAYS(fcntl_cmds_arrays);
592
593static const char *rlimit_resources[] = {
594 "CPU", "FSIZE", "DATA", "STACK", "CORE", "RSS", "NPROC", "NOFILE",
595 "MEMLOCK", "AS", "LOCKS", "SIGPENDING", "MSGQUEUE", "NICE", "RTPRIO",
596 "RTTIME",
597};
598static DEFINE_STRARRAY(rlimit_resources, "RLIMIT_");
599
600static const char *sighow[] = { "BLOCK", "UNBLOCK", "SETMASK", };
601static DEFINE_STRARRAY(sighow, "SIG_");
602
603static const char *clockid[] = {
604 "REALTIME", "MONOTONIC", "PROCESS_CPUTIME_ID", "THREAD_CPUTIME_ID",
605 "MONOTONIC_RAW", "REALTIME_COARSE", "MONOTONIC_COARSE", "BOOTTIME",
606 "REALTIME_ALARM", "BOOTTIME_ALARM", "SGI_CYCLE", "TAI"
607};
608static DEFINE_STRARRAY(clockid, "CLOCK_");
609
610static size_t syscall_arg__scnprintf_access_mode(char *bf, size_t size,
611 struct syscall_arg *arg)
612{
613 bool show_prefix = arg->show_string_prefix;
614 const char *suffix = "_OK";
615 size_t printed = 0;
616 int mode = arg->val;
617
618 if (mode == F_OK) /* 0 */
619 return scnprintf(bf, size, "F%s", show_prefix ? suffix : "");
620#define P_MODE(n) \
621 if (mode & n##_OK) { \
622 printed += scnprintf(bf + printed, size - printed, "%s%s", #n, show_prefix ? suffix : ""); \
623 mode &= ~n##_OK; \
624 }
625
626 P_MODE(R);
627 P_MODE(W);
628 P_MODE(X);
629#undef P_MODE
630
631 if (mode)
632 printed += scnprintf(bf + printed, size - printed, "|%#x", mode);
633
634 return printed;
635}
636
637#define SCA_ACCMODE syscall_arg__scnprintf_access_mode
638
639static size_t syscall_arg__scnprintf_filename(char *bf, size_t size,
640 struct syscall_arg *arg);
641
642#define SCA_FILENAME syscall_arg__scnprintf_filename
643
644static size_t syscall_arg__scnprintf_pipe_flags(char *bf, size_t size,
645 struct syscall_arg *arg)
646{
647 bool show_prefix = arg->show_string_prefix;
648 const char *prefix = "O_";
649 int printed = 0, flags = arg->val;
650
651#define P_FLAG(n) \
652 if (flags & O_##n) { \
653 printed += scnprintf(bf + printed, size - printed, "%s%s%s", printed ? "|" : "", show_prefix ? prefix : "", #n); \
654 flags &= ~O_##n; \
655 }
656
657 P_FLAG(CLOEXEC);
658 P_FLAG(NONBLOCK);
659#undef P_FLAG
660
661 if (flags)
662 printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags);
663
664 return printed;
665}
666
667#define SCA_PIPE_FLAGS syscall_arg__scnprintf_pipe_flags
668
669#ifndef GRND_NONBLOCK
670#define GRND_NONBLOCK 0x0001
671#endif
672#ifndef GRND_RANDOM
673#define GRND_RANDOM 0x0002
674#endif
675
676static size_t syscall_arg__scnprintf_getrandom_flags(char *bf, size_t size,
677 struct syscall_arg *arg)
678{
679 bool show_prefix = arg->show_string_prefix;
680 const char *prefix = "GRND_";
681 int printed = 0, flags = arg->val;
682
683#define P_FLAG(n) \
684 if (flags & GRND_##n) { \
685 printed += scnprintf(bf + printed, size - printed, "%s%s%s", printed ? "|" : "", show_prefix ? prefix : "", #n); \
686 flags &= ~GRND_##n; \
687 }
688
689 P_FLAG(RANDOM);
690 P_FLAG(NONBLOCK);
691#undef P_FLAG
692
693 if (flags)
694 printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags);
695
696 return printed;
697}
698
699#define SCA_GETRANDOM_FLAGS syscall_arg__scnprintf_getrandom_flags
700
701#define STRARRAY(name, array) \
702 { .scnprintf = SCA_STRARRAY, \
703 .parm = &strarray__##array, }
704
705#define STRARRAY_FLAGS(name, array) \
706 { .scnprintf = SCA_STRARRAY_FLAGS, \
707 .parm = &strarray__##array, }
708
709#include "trace/beauty/arch_errno_names.c"
710#include "trace/beauty/eventfd.c"
711#include "trace/beauty/futex_op.c"
712#include "trace/beauty/futex_val3.c"
713#include "trace/beauty/mmap.c"
714#include "trace/beauty/mode_t.c"
715#include "trace/beauty/msg_flags.c"
716#include "trace/beauty/open_flags.c"
717#include "trace/beauty/perf_event_open.c"
718#include "trace/beauty/pid.c"
719#include "trace/beauty/sched_policy.c"
720#include "trace/beauty/seccomp.c"
721#include "trace/beauty/signum.c"
722#include "trace/beauty/socket_type.c"
723#include "trace/beauty/waitid_options.c"
724
725static struct syscall_fmt syscall_fmts[] = {
726 { .name = "access",
727 .arg = { [1] = { .scnprintf = SCA_ACCMODE, /* mode */ }, }, },
728 { .name = "arch_prctl",
729 .arg = { [0] = { .scnprintf = SCA_X86_ARCH_PRCTL_CODE, /* code */ },
730 [1] = { .scnprintf = SCA_PTR, /* arg2 */ }, }, },
731 { .name = "bind",
732 .arg = { [0] = { .scnprintf = SCA_INT, /* fd */ },
733 [1] = { .scnprintf = SCA_SOCKADDR, /* umyaddr */ },
734 [2] = { .scnprintf = SCA_INT, /* addrlen */ }, }, },
735 { .name = "bpf",
736 .arg = { [0] = STRARRAY(cmd, bpf_cmd), }, },
737 { .name = "brk", .hexret = true,
738 .arg = { [0] = { .scnprintf = SCA_PTR, /* brk */ }, }, },
739 { .name = "clock_gettime",
740 .arg = { [0] = STRARRAY(clk_id, clockid), }, },
741 { .name = "clone", .errpid = true, .nr_args = 5,
742 .arg = { [0] = { .name = "flags", .scnprintf = SCA_CLONE_FLAGS, },
743 [1] = { .name = "child_stack", .scnprintf = SCA_HEX, },
744 [2] = { .name = "parent_tidptr", .scnprintf = SCA_HEX, },
745 [3] = { .name = "child_tidptr", .scnprintf = SCA_HEX, },
746 [4] = { .name = "tls", .scnprintf = SCA_HEX, }, }, },
747 { .name = "close",
748 .arg = { [0] = { .scnprintf = SCA_CLOSE_FD, /* fd */ }, }, },
749 { .name = "connect",
750 .arg = { [0] = { .scnprintf = SCA_INT, /* fd */ },
751 [1] = { .scnprintf = SCA_SOCKADDR, /* servaddr */ },
752 [2] = { .scnprintf = SCA_INT, /* addrlen */ }, }, },
753 { .name = "epoll_ctl",
754 .arg = { [1] = STRARRAY(op, epoll_ctl_ops), }, },
755 { .name = "eventfd2",
756 .arg = { [1] = { .scnprintf = SCA_EFD_FLAGS, /* flags */ }, }, },
757 { .name = "fchmodat",
758 .arg = { [0] = { .scnprintf = SCA_FDAT, /* fd */ }, }, },
759 { .name = "fchownat",
760 .arg = { [0] = { .scnprintf = SCA_FDAT, /* fd */ }, }, },
761 { .name = "fcntl",
762 .arg = { [1] = { .scnprintf = SCA_FCNTL_CMD, /* cmd */
763 .parm = &strarrays__fcntl_cmds_arrays,
764 .show_zero = true, },
765 [2] = { .scnprintf = SCA_FCNTL_ARG, /* arg */ }, }, },
766 { .name = "flock",
767 .arg = { [1] = { .scnprintf = SCA_FLOCK, /* cmd */ }, }, },
768 { .name = "fsconfig",
769 .arg = { [1] = STRARRAY(cmd, fsconfig_cmds), }, },
770 { .name = "fsmount",
771 .arg = { [1] = STRARRAY_FLAGS(flags, fsmount_flags),
772 [2] = { .scnprintf = SCA_FSMOUNT_ATTR_FLAGS, /* attr_flags */ }, }, },
773 { .name = "fspick",
774 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ },
775 [1] = { .scnprintf = SCA_FILENAME, /* path */ },
776 [2] = { .scnprintf = SCA_FSPICK_FLAGS, /* flags */ }, }, },
777 { .name = "fstat", .alias = "newfstat", },
778 { .name = "fstatat", .alias = "newfstatat", },
779 { .name = "futex",
780 .arg = { [1] = { .scnprintf = SCA_FUTEX_OP, /* op */ },
781 [5] = { .scnprintf = SCA_FUTEX_VAL3, /* val3 */ }, }, },
782 { .name = "futimesat",
783 .arg = { [0] = { .scnprintf = SCA_FDAT, /* fd */ }, }, },
784 { .name = "getitimer",
785 .arg = { [0] = STRARRAY(which, itimers), }, },
786 { .name = "getpid", .errpid = true, },
787 { .name = "getpgid", .errpid = true, },
788 { .name = "getppid", .errpid = true, },
789 { .name = "getrandom",
790 .arg = { [2] = { .scnprintf = SCA_GETRANDOM_FLAGS, /* flags */ }, }, },
791 { .name = "getrlimit",
792 .arg = { [0] = STRARRAY(resource, rlimit_resources), }, },
793 { .name = "gettid", .errpid = true, },
794 { .name = "ioctl",
795 .arg = {
796#if defined(__i386__) || defined(__x86_64__)
797/*
798 * FIXME: Make this available to all arches.
799 */
800 [1] = { .scnprintf = SCA_IOCTL_CMD, /* cmd */ },
801 [2] = { .scnprintf = SCA_HEX, /* arg */ }, }, },
802#else
803 [2] = { .scnprintf = SCA_HEX, /* arg */ }, }, },
804#endif
805 { .name = "kcmp", .nr_args = 5,
806 .arg = { [0] = { .name = "pid1", .scnprintf = SCA_PID, },
807 [1] = { .name = "pid2", .scnprintf = SCA_PID, },
808 [2] = { .name = "type", .scnprintf = SCA_KCMP_TYPE, },
809 [3] = { .name = "idx1", .scnprintf = SCA_KCMP_IDX, },
810 [4] = { .name = "idx2", .scnprintf = SCA_KCMP_IDX, }, }, },
811 { .name = "keyctl",
812 .arg = { [0] = STRARRAY(option, keyctl_options), }, },
813 { .name = "kill",
814 .arg = { [1] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, },
815 { .name = "linkat",
816 .arg = { [0] = { .scnprintf = SCA_FDAT, /* fd */ }, }, },
817 { .name = "lseek",
818 .arg = { [2] = STRARRAY(whence, whences), }, },
819 { .name = "lstat", .alias = "newlstat", },
820 { .name = "madvise",
821 .arg = { [0] = { .scnprintf = SCA_HEX, /* start */ },
822 [2] = { .scnprintf = SCA_MADV_BHV, /* behavior */ }, }, },
823 { .name = "mkdirat",
824 .arg = { [0] = { .scnprintf = SCA_FDAT, /* fd */ }, }, },
825 { .name = "mknodat",
826 .arg = { [0] = { .scnprintf = SCA_FDAT, /* fd */ }, }, },
827 { .name = "mmap", .hexret = true,
828/* The standard mmap maps to old_mmap on s390x */
829#if defined(__s390x__)
830 .alias = "old_mmap",
831#endif
832 .arg = { [2] = { .scnprintf = SCA_MMAP_PROT, /* prot */ },
833 [3] = { .scnprintf = SCA_MMAP_FLAGS, /* flags */ },
834 [5] = { .scnprintf = SCA_HEX, /* offset */ }, }, },
835 { .name = "mount",
836 .arg = { [0] = { .scnprintf = SCA_FILENAME, /* dev_name */ },
837 [3] = { .scnprintf = SCA_MOUNT_FLAGS, /* flags */
838 .mask_val = SCAMV_MOUNT_FLAGS, /* flags */ }, }, },
839 { .name = "move_mount",
840 .arg = { [0] = { .scnprintf = SCA_FDAT, /* from_dfd */ },
841 [1] = { .scnprintf = SCA_FILENAME, /* from_pathname */ },
842 [2] = { .scnprintf = SCA_FDAT, /* to_dfd */ },
843 [3] = { .scnprintf = SCA_FILENAME, /* to_pathname */ },
844 [4] = { .scnprintf = SCA_MOVE_MOUNT_FLAGS, /* flags */ }, }, },
845 { .name = "mprotect",
846 .arg = { [0] = { .scnprintf = SCA_HEX, /* start */ },
847 [2] = { .scnprintf = SCA_MMAP_PROT, /* prot */ }, }, },
848 { .name = "mq_unlink",
849 .arg = { [0] = { .scnprintf = SCA_FILENAME, /* u_name */ }, }, },
850 { .name = "mremap", .hexret = true,
851 .arg = { [3] = { .scnprintf = SCA_MREMAP_FLAGS, /* flags */ }, }, },
852 { .name = "name_to_handle_at",
853 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ }, }, },
854 { .name = "newfstatat",
855 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ }, }, },
856 { .name = "open",
857 .arg = { [1] = { .scnprintf = SCA_OPEN_FLAGS, /* flags */ }, }, },
858 { .name = "open_by_handle_at",
859 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ },
860 [2] = { .scnprintf = SCA_OPEN_FLAGS, /* flags */ }, }, },
861 { .name = "openat",
862 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ },
863 [2] = { .scnprintf = SCA_OPEN_FLAGS, /* flags */ }, }, },
864 { .name = "perf_event_open",
865 .arg = { [2] = { .scnprintf = SCA_INT, /* cpu */ },
866 [3] = { .scnprintf = SCA_FD, /* group_fd */ },
867 [4] = { .scnprintf = SCA_PERF_FLAGS, /* flags */ }, }, },
868 { .name = "pipe2",
869 .arg = { [1] = { .scnprintf = SCA_PIPE_FLAGS, /* flags */ }, }, },
870 { .name = "pkey_alloc",
871 .arg = { [1] = { .scnprintf = SCA_PKEY_ALLOC_ACCESS_RIGHTS, /* access_rights */ }, }, },
872 { .name = "pkey_free",
873 .arg = { [0] = { .scnprintf = SCA_INT, /* key */ }, }, },
874 { .name = "pkey_mprotect",
875 .arg = { [0] = { .scnprintf = SCA_HEX, /* start */ },
876 [2] = { .scnprintf = SCA_MMAP_PROT, /* prot */ },
877 [3] = { .scnprintf = SCA_INT, /* pkey */ }, }, },
878 { .name = "poll", .timeout = true, },
879 { .name = "ppoll", .timeout = true, },
880 { .name = "prctl",
881 .arg = { [0] = { .scnprintf = SCA_PRCTL_OPTION, /* option */ },
882 [1] = { .scnprintf = SCA_PRCTL_ARG2, /* arg2 */ },
883 [2] = { .scnprintf = SCA_PRCTL_ARG3, /* arg3 */ }, }, },
884 { .name = "pread", .alias = "pread64", },
885 { .name = "preadv", .alias = "pread", },
886 { .name = "prlimit64",
887 .arg = { [1] = STRARRAY(resource, rlimit_resources), }, },
888 { .name = "pwrite", .alias = "pwrite64", },
889 { .name = "readlinkat",
890 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ }, }, },
891 { .name = "recvfrom",
892 .arg = { [3] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ }, }, },
893 { .name = "recvmmsg",
894 .arg = { [3] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ }, }, },
895 { .name = "recvmsg",
896 .arg = { [2] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ }, }, },
897 { .name = "renameat",
898 .arg = { [0] = { .scnprintf = SCA_FDAT, /* olddirfd */ },
899 [2] = { .scnprintf = SCA_FDAT, /* newdirfd */ }, }, },
900 { .name = "renameat2",
901 .arg = { [0] = { .scnprintf = SCA_FDAT, /* olddirfd */ },
902 [2] = { .scnprintf = SCA_FDAT, /* newdirfd */ },
903 [4] = { .scnprintf = SCA_RENAMEAT2_FLAGS, /* flags */ }, }, },
904 { .name = "rt_sigaction",
905 .arg = { [0] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, },
906 { .name = "rt_sigprocmask",
907 .arg = { [0] = STRARRAY(how, sighow), }, },
908 { .name = "rt_sigqueueinfo",
909 .arg = { [1] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, },
910 { .name = "rt_tgsigqueueinfo",
911 .arg = { [2] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, },
912 { .name = "sched_setscheduler",
913 .arg = { [1] = { .scnprintf = SCA_SCHED_POLICY, /* policy */ }, }, },
914 { .name = "seccomp",
915 .arg = { [0] = { .scnprintf = SCA_SECCOMP_OP, /* op */ },
916 [1] = { .scnprintf = SCA_SECCOMP_FLAGS, /* flags */ }, }, },
917 { .name = "select", .timeout = true, },
918 { .name = "sendfile", .alias = "sendfile64", },
919 { .name = "sendmmsg",
920 .arg = { [3] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ }, }, },
921 { .name = "sendmsg",
922 .arg = { [2] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ }, }, },
923 { .name = "sendto",
924 .arg = { [3] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ },
925 [4] = { .scnprintf = SCA_SOCKADDR, /* addr */ }, }, },
926 { .name = "set_tid_address", .errpid = true, },
927 { .name = "setitimer",
928 .arg = { [0] = STRARRAY(which, itimers), }, },
929 { .name = "setrlimit",
930 .arg = { [0] = STRARRAY(resource, rlimit_resources), }, },
931 { .name = "socket",
932 .arg = { [0] = STRARRAY(family, socket_families),
933 [1] = { .scnprintf = SCA_SK_TYPE, /* type */ },
934 [2] = { .scnprintf = SCA_SK_PROTO, /* protocol */ }, }, },
935 { .name = "socketpair",
936 .arg = { [0] = STRARRAY(family, socket_families),
937 [1] = { .scnprintf = SCA_SK_TYPE, /* type */ },
938 [2] = { .scnprintf = SCA_SK_PROTO, /* protocol */ }, }, },
939 { .name = "stat", .alias = "newstat", },
940 { .name = "statx",
941 .arg = { [0] = { .scnprintf = SCA_FDAT, /* fdat */ },
942 [2] = { .scnprintf = SCA_STATX_FLAGS, /* flags */ } ,
943 [3] = { .scnprintf = SCA_STATX_MASK, /* mask */ }, }, },
944 { .name = "swapoff",
945 .arg = { [0] = { .scnprintf = SCA_FILENAME, /* specialfile */ }, }, },
946 { .name = "swapon",
947 .arg = { [0] = { .scnprintf = SCA_FILENAME, /* specialfile */ }, }, },
948 { .name = "symlinkat",
949 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ }, }, },
950 { .name = "sync_file_range",
951 .arg = { [3] = { .scnprintf = SCA_SYNC_FILE_RANGE_FLAGS, /* flags */ }, }, },
952 { .name = "tgkill",
953 .arg = { [2] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, },
954 { .name = "tkill",
955 .arg = { [1] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, },
956 { .name = "umount2", .alias = "umount",
957 .arg = { [0] = { .scnprintf = SCA_FILENAME, /* name */ }, }, },
958 { .name = "uname", .alias = "newuname", },
959 { .name = "unlinkat",
960 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ }, }, },
961 { .name = "utimensat",
962 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dirfd */ }, }, },
963 { .name = "wait4", .errpid = true,
964 .arg = { [2] = { .scnprintf = SCA_WAITID_OPTIONS, /* options */ }, }, },
965 { .name = "waitid", .errpid = true,
966 .arg = { [3] = { .scnprintf = SCA_WAITID_OPTIONS, /* options */ }, }, },
967};
968
969static int syscall_fmt__cmp(const void *name, const void *fmtp)
970{
971 const struct syscall_fmt *fmt = fmtp;
972 return strcmp(name, fmt->name);
973}
974
975static struct syscall_fmt *syscall_fmt__find(const char *name)
976{
977 const int nmemb = ARRAY_SIZE(syscall_fmts);
978 return bsearch(name, syscall_fmts, nmemb, sizeof(struct syscall_fmt), syscall_fmt__cmp);
979}
980
981static struct syscall_fmt *syscall_fmt__find_by_alias(const char *alias)
982{
983 int i, nmemb = ARRAY_SIZE(syscall_fmts);
984
985 for (i = 0; i < nmemb; ++i) {
986 if (syscall_fmts[i].alias && strcmp(syscall_fmts[i].alias, alias) == 0)
987 return &syscall_fmts[i];
988 }
989
990 return NULL;
991}
992
993/*
994 * is_exit: is this "exit" or "exit_group"?
995 * is_open: is this "open" or "openat"? To associate the fd returned in sys_exit with the pathname in sys_enter.
996 * args_size: sum of the sizes of the syscall arguments, anything after that is augmented stuff: pathname for openat, etc.
997 * nonexistent: Just a hole in the syscall table, syscall id not allocated
998 */
999struct syscall {
1000 struct tep_event *tp_format;
1001 int nr_args;
1002 int args_size;
1003 struct {
1004 struct bpf_program *sys_enter,
1005 *sys_exit;
1006 } bpf_prog;
1007 bool is_exit;
1008 bool is_open;
1009 bool nonexistent;
1010 struct tep_format_field *args;
1011 const char *name;
1012 struct syscall_fmt *fmt;
1013 struct syscall_arg_fmt *arg_fmt;
1014};
1015
1016/*
1017 * Must match what is in the BPF program:
1018 *
1019 * tools/perf/examples/bpf/augmented_raw_syscalls.c
1020 */
1021struct bpf_map_syscall_entry {
1022 bool enabled;
1023 u16 string_args_len[RAW_SYSCALL_ARGS_NUM];
1024};
1025
1026/*
1027 * We need to have this 'calculated' boolean because in some cases we really
1028 * don't know what is the duration of a syscall, for instance, when we start
1029 * a session and some threads are waiting for a syscall to finish, say 'poll',
1030 * in which case all we can do is to print "( ? ) for duration and for the
1031 * start timestamp.
1032 */
1033static size_t fprintf_duration(unsigned long t, bool calculated, FILE *fp)
1034{
1035 double duration = (double)t / NSEC_PER_MSEC;
1036 size_t printed = fprintf(fp, "(");
1037
1038 if (!calculated)
1039 printed += fprintf(fp, " ");
1040 else if (duration >= 1.0)
1041 printed += color_fprintf(fp, PERF_COLOR_RED, "%6.3f ms", duration);
1042 else if (duration >= 0.01)
1043 printed += color_fprintf(fp, PERF_COLOR_YELLOW, "%6.3f ms", duration);
1044 else
1045 printed += color_fprintf(fp, PERF_COLOR_NORMAL, "%6.3f ms", duration);
1046 return printed + fprintf(fp, "): ");
1047}
1048
1049/**
1050 * filename.ptr: The filename char pointer that will be vfs_getname'd
1051 * filename.entry_str_pos: Where to insert the string translated from
1052 * filename.ptr by the vfs_getname tracepoint/kprobe.
1053 * ret_scnprintf: syscall args may set this to a different syscall return
1054 * formatter, for instance, fcntl may return fds, file flags, etc.
1055 */
1056struct thread_trace {
1057 u64 entry_time;
1058 bool entry_pending;
1059 unsigned long nr_events;
1060 unsigned long pfmaj, pfmin;
1061 char *entry_str;
1062 double runtime_ms;
1063 size_t (*ret_scnprintf)(char *bf, size_t size, struct syscall_arg *arg);
1064 struct {
1065 unsigned long ptr;
1066 short int entry_str_pos;
1067 bool pending_open;
1068 unsigned int namelen;
1069 char *name;
1070 } filename;
1071 struct {
1072 int max;
1073 struct file *table;
1074 } files;
1075
1076 struct intlist *syscall_stats;
1077};
1078
1079static struct thread_trace *thread_trace__new(void)
1080{
1081 struct thread_trace *ttrace = zalloc(sizeof(struct thread_trace));
1082
1083 if (ttrace) {
1084 ttrace->files.max = -1;
1085 ttrace->syscall_stats = intlist__new(NULL);
1086 }
1087
1088 return ttrace;
1089}
1090
1091static struct thread_trace *thread__trace(struct thread *thread, FILE *fp)
1092{
1093 struct thread_trace *ttrace;
1094
1095 if (thread == NULL)
1096 goto fail;
1097
1098 if (thread__priv(thread) == NULL)
1099 thread__set_priv(thread, thread_trace__new());
1100
1101 if (thread__priv(thread) == NULL)
1102 goto fail;
1103
1104 ttrace = thread__priv(thread);
1105 ++ttrace->nr_events;
1106
1107 return ttrace;
1108fail:
1109 color_fprintf(fp, PERF_COLOR_RED,
1110 "WARNING: not enough memory, dropping samples!\n");
1111 return NULL;
1112}
1113
1114
1115void syscall_arg__set_ret_scnprintf(struct syscall_arg *arg,
1116 size_t (*ret_scnprintf)(char *bf, size_t size, struct syscall_arg *arg))
1117{
1118 struct thread_trace *ttrace = thread__priv(arg->thread);
1119
1120 ttrace->ret_scnprintf = ret_scnprintf;
1121}
1122
1123#define TRACE_PFMAJ (1 << 0)
1124#define TRACE_PFMIN (1 << 1)
1125
1126static const size_t trace__entry_str_size = 2048;
1127
1128static struct file *thread_trace__files_entry(struct thread_trace *ttrace, int fd)
1129{
1130 if (fd < 0)
1131 return NULL;
1132
1133 if (fd > ttrace->files.max) {
1134 struct file *nfiles = realloc(ttrace->files.table, (fd + 1) * sizeof(struct file));
1135
1136 if (nfiles == NULL)
1137 return NULL;
1138
1139 if (ttrace->files.max != -1) {
1140 memset(nfiles + ttrace->files.max + 1, 0,
1141 (fd - ttrace->files.max) * sizeof(struct file));
1142 } else {
1143 memset(nfiles, 0, (fd + 1) * sizeof(struct file));
1144 }
1145
1146 ttrace->files.table = nfiles;
1147 ttrace->files.max = fd;
1148 }
1149
1150 return ttrace->files.table + fd;
1151}
1152
1153struct file *thread__files_entry(struct thread *thread, int fd)
1154{
1155 return thread_trace__files_entry(thread__priv(thread), fd);
1156}
1157
1158static int trace__set_fd_pathname(struct thread *thread, int fd, const char *pathname)
1159{
1160 struct thread_trace *ttrace = thread__priv(thread);
1161 struct file *file = thread_trace__files_entry(ttrace, fd);
1162
1163 if (file != NULL) {
1164 struct stat st;
1165 if (stat(pathname, &st) == 0)
1166 file->dev_maj = major(st.st_rdev);
1167 file->pathname = strdup(pathname);
1168 if (file->pathname)
1169 return 0;
1170 }
1171
1172 return -1;
1173}
1174
1175static int thread__read_fd_path(struct thread *thread, int fd)
1176{
1177 char linkname[PATH_MAX], pathname[PATH_MAX];
1178 struct stat st;
1179 int ret;
1180
1181 if (thread->pid_ == thread->tid) {
1182 scnprintf(linkname, sizeof(linkname),
1183 "/proc/%d/fd/%d", thread->pid_, fd);
1184 } else {
1185 scnprintf(linkname, sizeof(linkname),
1186 "/proc/%d/task/%d/fd/%d", thread->pid_, thread->tid, fd);
1187 }
1188
1189 if (lstat(linkname, &st) < 0 || st.st_size + 1 > (off_t)sizeof(pathname))
1190 return -1;
1191
1192 ret = readlink(linkname, pathname, sizeof(pathname));
1193
1194 if (ret < 0 || ret > st.st_size)
1195 return -1;
1196
1197 pathname[ret] = '\0';
1198 return trace__set_fd_pathname(thread, fd, pathname);
1199}
1200
1201static const char *thread__fd_path(struct thread *thread, int fd,
1202 struct trace *trace)
1203{
1204 struct thread_trace *ttrace = thread__priv(thread);
1205
1206 if (ttrace == NULL || trace->fd_path_disabled)
1207 return NULL;
1208
1209 if (fd < 0)
1210 return NULL;
1211
1212 if ((fd > ttrace->files.max || ttrace->files.table[fd].pathname == NULL)) {
1213 if (!trace->live)
1214 return NULL;
1215 ++trace->stats.proc_getname;
1216 if (thread__read_fd_path(thread, fd))
1217 return NULL;
1218 }
1219
1220 return ttrace->files.table[fd].pathname;
1221}
1222
1223size_t syscall_arg__scnprintf_fd(char *bf, size_t size, struct syscall_arg *arg)
1224{
1225 int fd = arg->val;
1226 size_t printed = scnprintf(bf, size, "%d", fd);
1227 const char *path = thread__fd_path(arg->thread, fd, arg->trace);
1228
1229 if (path)
1230 printed += scnprintf(bf + printed, size - printed, "<%s>", path);
1231
1232 return printed;
1233}
1234
1235size_t pid__scnprintf_fd(struct trace *trace, pid_t pid, int fd, char *bf, size_t size)
1236{
1237 size_t printed = scnprintf(bf, size, "%d", fd);
1238 struct thread *thread = machine__find_thread(trace->host, pid, pid);
1239
1240 if (thread) {
1241 const char *path = thread__fd_path(thread, fd, trace);
1242
1243 if (path)
1244 printed += scnprintf(bf + printed, size - printed, "<%s>", path);
1245
1246 thread__put(thread);
1247 }
1248
1249 return printed;
1250}
1251
1252static size_t syscall_arg__scnprintf_close_fd(char *bf, size_t size,
1253 struct syscall_arg *arg)
1254{
1255 int fd = arg->val;
1256 size_t printed = syscall_arg__scnprintf_fd(bf, size, arg);
1257 struct thread_trace *ttrace = thread__priv(arg->thread);
1258
1259 if (ttrace && fd >= 0 && fd <= ttrace->files.max)
1260 zfree(&ttrace->files.table[fd].pathname);
1261
1262 return printed;
1263}
1264
1265static void thread__set_filename_pos(struct thread *thread, const char *bf,
1266 unsigned long ptr)
1267{
1268 struct thread_trace *ttrace = thread__priv(thread);
1269
1270 ttrace->filename.ptr = ptr;
1271 ttrace->filename.entry_str_pos = bf - ttrace->entry_str;
1272}
1273
1274static size_t syscall_arg__scnprintf_augmented_string(struct syscall_arg *arg, char *bf, size_t size)
1275{
1276 struct augmented_arg *augmented_arg = arg->augmented.args;
1277 size_t printed = scnprintf(bf, size, "\"%.*s\"", augmented_arg->size, augmented_arg->value);
1278 /*
1279 * So that the next arg with a payload can consume its augmented arg, i.e. for rename* syscalls
1280 * we would have two strings, each prefixed by its size.
1281 */
1282 int consumed = sizeof(*augmented_arg) + augmented_arg->size;
1283
1284 arg->augmented.args = ((void *)arg->augmented.args) + consumed;
1285 arg->augmented.size -= consumed;
1286
1287 return printed;
1288}
1289
1290static size_t syscall_arg__scnprintf_filename(char *bf, size_t size,
1291 struct syscall_arg *arg)
1292{
1293 unsigned long ptr = arg->val;
1294
1295 if (arg->augmented.args)
1296 return syscall_arg__scnprintf_augmented_string(arg, bf, size);
1297
1298 if (!arg->trace->vfs_getname)
1299 return scnprintf(bf, size, "%#x", ptr);
1300
1301 thread__set_filename_pos(arg->thread, bf, ptr);
1302 return 0;
1303}
1304
1305static bool trace__filter_duration(struct trace *trace, double t)
1306{
1307 return t < (trace->duration_filter * NSEC_PER_MSEC);
1308}
1309
1310static size_t __trace__fprintf_tstamp(struct trace *trace, u64 tstamp, FILE *fp)
1311{
1312 double ts = (double)(tstamp - trace->base_time) / NSEC_PER_MSEC;
1313
1314 return fprintf(fp, "%10.3f ", ts);
1315}
1316
1317/*
1318 * We're handling tstamp=0 as an undefined tstamp, i.e. like when we are
1319 * using ttrace->entry_time for a thread that receives a sys_exit without
1320 * first having received a sys_enter ("poll" issued before tracing session
1321 * starts, lost sys_enter exit due to ring buffer overflow).
1322 */
1323static size_t trace__fprintf_tstamp(struct trace *trace, u64 tstamp, FILE *fp)
1324{
1325 if (tstamp > 0)
1326 return __trace__fprintf_tstamp(trace, tstamp, fp);
1327
1328 return fprintf(fp, " ? ");
1329}
1330
1331static bool done = false;
1332static bool interrupted = false;
1333
1334static void sig_handler(int sig)
1335{
1336 done = true;
1337 interrupted = sig == SIGINT;
1338}
1339
1340static size_t trace__fprintf_comm_tid(struct trace *trace, struct thread *thread, FILE *fp)
1341{
1342 size_t printed = 0;
1343
1344 if (trace->multiple_threads) {
1345 if (trace->show_comm)
1346 printed += fprintf(fp, "%.14s/", thread__comm_str(thread));
1347 printed += fprintf(fp, "%d ", thread->tid);
1348 }
1349
1350 return printed;
1351}
1352
1353static size_t trace__fprintf_entry_head(struct trace *trace, struct thread *thread,
1354 u64 duration, bool duration_calculated, u64 tstamp, FILE *fp)
1355{
1356 size_t printed = 0;
1357
1358 if (trace->show_tstamp)
1359 printed = trace__fprintf_tstamp(trace, tstamp, fp);
1360 if (trace->show_duration)
1361 printed += fprintf_duration(duration, duration_calculated, fp);
1362 return printed + trace__fprintf_comm_tid(trace, thread, fp);
1363}
1364
1365static int trace__process_event(struct trace *trace, struct machine *machine,
1366 union perf_event *event, struct perf_sample *sample)
1367{
1368 int ret = 0;
1369
1370 switch (event->header.type) {
1371 case PERF_RECORD_LOST:
1372 color_fprintf(trace->output, PERF_COLOR_RED,
1373 "LOST %" PRIu64 " events!\n", event->lost.lost);
1374 ret = machine__process_lost_event(machine, event, sample);
1375 break;
1376 default:
1377 ret = machine__process_event(machine, event, sample);
1378 break;
1379 }
1380
1381 return ret;
1382}
1383
1384static int trace__tool_process(struct perf_tool *tool,
1385 union perf_event *event,
1386 struct perf_sample *sample,
1387 struct machine *machine)
1388{
1389 struct trace *trace = container_of(tool, struct trace, tool);
1390 return trace__process_event(trace, machine, event, sample);
1391}
1392
1393static char *trace__machine__resolve_kernel_addr(void *vmachine, unsigned long long *addrp, char **modp)
1394{
1395 struct machine *machine = vmachine;
1396
1397 if (machine->kptr_restrict_warned)
1398 return NULL;
1399
1400 if (symbol_conf.kptr_restrict) {
1401 pr_warning("Kernel address maps (/proc/{kallsyms,modules}) are restricted.\n\n"
1402 "Check /proc/sys/kernel/kptr_restrict and /proc/sys/kernel/perf_event_paranoid.\n\n"
1403 "Kernel samples will not be resolved.\n");
1404 machine->kptr_restrict_warned = true;
1405 return NULL;
1406 }
1407
1408 return machine__resolve_kernel_addr(vmachine, addrp, modp);
1409}
1410
1411static int trace__symbols_init(struct trace *trace, struct evlist *evlist)
1412{
1413 int err = symbol__init(NULL);
1414
1415 if (err)
1416 return err;
1417
1418 trace->host = machine__new_host();
1419 if (trace->host == NULL)
1420 return -ENOMEM;
1421
1422 err = trace_event__register_resolver(trace->host, trace__machine__resolve_kernel_addr);
1423 if (err < 0)
1424 goto out;
1425
1426 err = __machine__synthesize_threads(trace->host, &trace->tool, &trace->opts.target,
1427 evlist->core.threads, trace__tool_process, false,
1428 1);
1429out:
1430 if (err)
1431 symbol__exit();
1432
1433 return err;
1434}
1435
1436static void trace__symbols__exit(struct trace *trace)
1437{
1438 machine__exit(trace->host);
1439 trace->host = NULL;
1440
1441 symbol__exit();
1442}
1443
1444static int syscall__alloc_arg_fmts(struct syscall *sc, int nr_args)
1445{
1446 int idx;
1447
1448 if (nr_args == RAW_SYSCALL_ARGS_NUM && sc->fmt && sc->fmt->nr_args != 0)
1449 nr_args = sc->fmt->nr_args;
1450
1451 sc->arg_fmt = calloc(nr_args, sizeof(*sc->arg_fmt));
1452 if (sc->arg_fmt == NULL)
1453 return -1;
1454
1455 for (idx = 0; idx < nr_args; ++idx) {
1456 if (sc->fmt)
1457 sc->arg_fmt[idx] = sc->fmt->arg[idx];
1458 }
1459
1460 sc->nr_args = nr_args;
1461 return 0;
1462}
1463
1464static struct syscall_arg_fmt syscall_arg_fmts__by_name[] = {
1465};
1466
1467static int syscall_arg_fmt__cmp(const void *name, const void *fmtp)
1468{
1469 const struct syscall_arg_fmt *fmt = fmtp;
1470 return strcmp(name, fmt->name);
1471}
1472
1473static struct syscall_arg_fmt *
1474__syscall_arg_fmt__find_by_name(struct syscall_arg_fmt *fmts, const int nmemb, const char *name)
1475{
1476 return bsearch(name, fmts, nmemb, sizeof(struct syscall_arg_fmt), syscall_arg_fmt__cmp);
1477}
1478
1479static struct syscall_arg_fmt *syscall_arg_fmt__find_by_name(const char *name)
1480{
1481 const int nmemb = ARRAY_SIZE(syscall_arg_fmts__by_name);
1482 return __syscall_arg_fmt__find_by_name(syscall_arg_fmts__by_name, nmemb, name);
1483}
1484
1485static struct tep_format_field *
1486syscall_arg_fmt__init_array(struct syscall_arg_fmt *arg, struct tep_format_field *field)
1487{
1488 struct tep_format_field *last_field = NULL;
1489 int len;
1490
1491 for (; field; field = field->next, ++arg) {
1492 last_field = field;
1493
1494 if (arg->scnprintf)
1495 continue;
1496
1497 len = strlen(field->name);
1498
1499 if (strcmp(field->type, "const char *") == 0 &&
1500 ((len >= 4 && strcmp(field->name + len - 4, "name") == 0) ||
1501 strstr(field->name, "path") != NULL))
1502 arg->scnprintf = SCA_FILENAME;
1503 else if ((field->flags & TEP_FIELD_IS_POINTER) || strstr(field->name, "addr"))
1504 arg->scnprintf = SCA_PTR;
1505 else if (strcmp(field->type, "pid_t") == 0)
1506 arg->scnprintf = SCA_PID;
1507 else if (strcmp(field->type, "umode_t") == 0)
1508 arg->scnprintf = SCA_MODE_T;
1509 else if ((strcmp(field->type, "int") == 0 ||
1510 strcmp(field->type, "unsigned int") == 0 ||
1511 strcmp(field->type, "long") == 0) &&
1512 len >= 2 && strcmp(field->name + len - 2, "fd") == 0) {
1513 /*
1514 * /sys/kernel/tracing/events/syscalls/sys_enter*
1515 * egrep 'field:.*fd;' .../format|sed -r 's/.*field:([a-z ]+) [a-z_]*fd.+/\1/g'|sort|uniq -c
1516 * 65 int
1517 * 23 unsigned int
1518 * 7 unsigned long
1519 */
1520 arg->scnprintf = SCA_FD;
1521 } else {
1522 struct syscall_arg_fmt *fmt = syscall_arg_fmt__find_by_name(field->name);
1523
1524 if (fmt) {
1525 arg->scnprintf = fmt->scnprintf;
1526 arg->strtoul = fmt->strtoul;
1527 }
1528 }
1529 }
1530
1531 return last_field;
1532}
1533
1534static int syscall__set_arg_fmts(struct syscall *sc)
1535{
1536 struct tep_format_field *last_field = syscall_arg_fmt__init_array(sc->arg_fmt, sc->args);
1537
1538 if (last_field)
1539 sc->args_size = last_field->offset + last_field->size;
1540
1541 return 0;
1542}
1543
1544static int trace__read_syscall_info(struct trace *trace, int id)
1545{
1546 char tp_name[128];
1547 struct syscall *sc;
1548 const char *name = syscalltbl__name(trace->sctbl, id);
1549
1550 if (trace->syscalls.table == NULL) {
1551 trace->syscalls.table = calloc(trace->sctbl->syscalls.max_id + 1, sizeof(*sc));
1552 if (trace->syscalls.table == NULL)
1553 return -ENOMEM;
1554 }
1555
1556 sc = trace->syscalls.table + id;
1557 if (sc->nonexistent)
1558 return -EEXIST;
1559
1560 if (name == NULL) {
1561 sc->nonexistent = true;
1562 return -EEXIST;
1563 }
1564
1565 sc->name = name;
1566 sc->fmt = syscall_fmt__find(sc->name);
1567
1568 snprintf(tp_name, sizeof(tp_name), "sys_enter_%s", sc->name);
1569 sc->tp_format = trace_event__tp_format("syscalls", tp_name);
1570
1571 if (IS_ERR(sc->tp_format) && sc->fmt && sc->fmt->alias) {
1572 snprintf(tp_name, sizeof(tp_name), "sys_enter_%s", sc->fmt->alias);
1573 sc->tp_format = trace_event__tp_format("syscalls", tp_name);
1574 }
1575
1576 /*
1577 * Fails to read trace point format via sysfs node, so the trace point
1578 * doesn't exist. Set the 'nonexistent' flag as true.
1579 */
1580 if (IS_ERR(sc->tp_format)) {
1581 sc->nonexistent = true;
1582 return PTR_ERR(sc->tp_format);
1583 }
1584
1585 if (syscall__alloc_arg_fmts(sc, IS_ERR(sc->tp_format) ?
1586 RAW_SYSCALL_ARGS_NUM : sc->tp_format->format.nr_fields))
1587 return -ENOMEM;
1588
1589 sc->args = sc->tp_format->format.fields;
1590 /*
1591 * We need to check and discard the first variable '__syscall_nr'
1592 * or 'nr' that mean the syscall number. It is needless here.
1593 * So drop '__syscall_nr' or 'nr' field but does not exist on older kernels.
1594 */
1595 if (sc->args && (!strcmp(sc->args->name, "__syscall_nr") || !strcmp(sc->args->name, "nr"))) {
1596 sc->args = sc->args->next;
1597 --sc->nr_args;
1598 }
1599
1600 sc->is_exit = !strcmp(name, "exit_group") || !strcmp(name, "exit");
1601 sc->is_open = !strcmp(name, "open") || !strcmp(name, "openat");
1602
1603 return syscall__set_arg_fmts(sc);
1604}
1605
1606static int intcmp(const void *a, const void *b)
1607{
1608 const int *one = a, *another = b;
1609
1610 return *one - *another;
1611}
1612
1613static int trace__validate_ev_qualifier(struct trace *trace)
1614{
1615 int err = 0;
1616 bool printed_invalid_prefix = false;
1617 struct str_node *pos;
1618 size_t nr_used = 0, nr_allocated = strlist__nr_entries(trace->ev_qualifier);
1619
1620 trace->ev_qualifier_ids.entries = malloc(nr_allocated *
1621 sizeof(trace->ev_qualifier_ids.entries[0]));
1622
1623 if (trace->ev_qualifier_ids.entries == NULL) {
1624 fputs("Error:\tNot enough memory for allocating events qualifier ids\n",
1625 trace->output);
1626 err = -EINVAL;
1627 goto out;
1628 }
1629
1630 strlist__for_each_entry(pos, trace->ev_qualifier) {
1631 const char *sc = pos->s;
1632 int id = syscalltbl__id(trace->sctbl, sc), match_next = -1;
1633
1634 if (id < 0) {
1635 id = syscalltbl__strglobmatch_first(trace->sctbl, sc, &match_next);
1636 if (id >= 0)
1637 goto matches;
1638
1639 if (!printed_invalid_prefix) {
1640 pr_debug("Skipping unknown syscalls: ");
1641 printed_invalid_prefix = true;
1642 } else {
1643 pr_debug(", ");
1644 }
1645
1646 pr_debug("%s", sc);
1647 continue;
1648 }
1649matches:
1650 trace->ev_qualifier_ids.entries[nr_used++] = id;
1651 if (match_next == -1)
1652 continue;
1653
1654 while (1) {
1655 id = syscalltbl__strglobmatch_next(trace->sctbl, sc, &match_next);
1656 if (id < 0)
1657 break;
1658 if (nr_allocated == nr_used) {
1659 void *entries;
1660
1661 nr_allocated += 8;
1662 entries = realloc(trace->ev_qualifier_ids.entries,
1663 nr_allocated * sizeof(trace->ev_qualifier_ids.entries[0]));
1664 if (entries == NULL) {
1665 err = -ENOMEM;
1666 fputs("\nError:\t Not enough memory for parsing\n", trace->output);
1667 goto out_free;
1668 }
1669 trace->ev_qualifier_ids.entries = entries;
1670 }
1671 trace->ev_qualifier_ids.entries[nr_used++] = id;
1672 }
1673 }
1674
1675 trace->ev_qualifier_ids.nr = nr_used;
1676 qsort(trace->ev_qualifier_ids.entries, nr_used, sizeof(int), intcmp);
1677out:
1678 if (printed_invalid_prefix)
1679 pr_debug("\n");
1680 return err;
1681out_free:
1682 zfree(&trace->ev_qualifier_ids.entries);
1683 trace->ev_qualifier_ids.nr = 0;
1684 goto out;
1685}
1686
1687static __maybe_unused bool trace__syscall_enabled(struct trace *trace, int id)
1688{
1689 bool in_ev_qualifier;
1690
1691 if (trace->ev_qualifier_ids.nr == 0)
1692 return true;
1693
1694 in_ev_qualifier = bsearch(&id, trace->ev_qualifier_ids.entries,
1695 trace->ev_qualifier_ids.nr, sizeof(int), intcmp) != NULL;
1696
1697 if (in_ev_qualifier)
1698 return !trace->not_ev_qualifier;
1699
1700 return trace->not_ev_qualifier;
1701}
1702
1703/*
1704 * args is to be interpreted as a series of longs but we need to handle
1705 * 8-byte unaligned accesses. args points to raw_data within the event
1706 * and raw_data is guaranteed to be 8-byte unaligned because it is
1707 * preceded by raw_size which is a u32. So we need to copy args to a temp
1708 * variable to read it. Most notably this avoids extended load instructions
1709 * on unaligned addresses
1710 */
1711unsigned long syscall_arg__val(struct syscall_arg *arg, u8 idx)
1712{
1713 unsigned long val;
1714 unsigned char *p = arg->args + sizeof(unsigned long) * idx;
1715
1716 memcpy(&val, p, sizeof(val));
1717 return val;
1718}
1719
1720static size_t syscall__scnprintf_name(struct syscall *sc, char *bf, size_t size,
1721 struct syscall_arg *arg)
1722{
1723 if (sc->arg_fmt && sc->arg_fmt[arg->idx].name)
1724 return scnprintf(bf, size, "%s: ", sc->arg_fmt[arg->idx].name);
1725
1726 return scnprintf(bf, size, "arg%d: ", arg->idx);
1727}
1728
1729/*
1730 * Check if the value is in fact zero, i.e. mask whatever needs masking, such
1731 * as mount 'flags' argument that needs ignoring some magic flag, see comment
1732 * in tools/perf/trace/beauty/mount_flags.c
1733 */
1734static unsigned long syscall__mask_val(struct syscall *sc, struct syscall_arg *arg, unsigned long val)
1735{
1736 if (sc->arg_fmt && sc->arg_fmt[arg->idx].mask_val)
1737 return sc->arg_fmt[arg->idx].mask_val(arg, val);
1738
1739 return val;
1740}
1741
1742static size_t syscall__scnprintf_val(struct syscall *sc, char *bf, size_t size,
1743 struct syscall_arg *arg, unsigned long val)
1744{
1745 if (sc->arg_fmt && sc->arg_fmt[arg->idx].scnprintf) {
1746 arg->val = val;
1747 if (sc->arg_fmt[arg->idx].parm)
1748 arg->parm = sc->arg_fmt[arg->idx].parm;
1749 return sc->arg_fmt[arg->idx].scnprintf(bf, size, arg);
1750 }
1751 return scnprintf(bf, size, "%ld", val);
1752}
1753
1754static size_t syscall__scnprintf_args(struct syscall *sc, char *bf, size_t size,
1755 unsigned char *args, void *augmented_args, int augmented_args_size,
1756 struct trace *trace, struct thread *thread)
1757{
1758 size_t printed = 0;
1759 unsigned long val;
1760 u8 bit = 1;
1761 struct syscall_arg arg = {
1762 .args = args,
1763 .augmented = {
1764 .size = augmented_args_size,
1765 .args = augmented_args,
1766 },
1767 .idx = 0,
1768 .mask = 0,
1769 .trace = trace,
1770 .thread = thread,
1771 .show_string_prefix = trace->show_string_prefix,
1772 };
1773 struct thread_trace *ttrace = thread__priv(thread);
1774
1775 /*
1776 * Things like fcntl will set this in its 'cmd' formatter to pick the
1777 * right formatter for the return value (an fd? file flags?), which is
1778 * not needed for syscalls that always return a given type, say an fd.
1779 */
1780 ttrace->ret_scnprintf = NULL;
1781
1782 if (sc->args != NULL) {
1783 struct tep_format_field *field;
1784
1785 for (field = sc->args; field;
1786 field = field->next, ++arg.idx, bit <<= 1) {
1787 if (arg.mask & bit)
1788 continue;
1789
1790 arg.fmt = &sc->arg_fmt[arg.idx];
1791 val = syscall_arg__val(&arg, arg.idx);
1792 /*
1793 * Some syscall args need some mask, most don't and
1794 * return val untouched.
1795 */
1796 val = syscall__mask_val(sc, &arg, val);
1797
1798 /*
1799 * Suppress this argument if its value is zero and
1800 * and we don't have a string associated in an
1801 * strarray for it.
1802 */
1803 if (val == 0 &&
1804 !trace->show_zeros &&
1805 !(sc->arg_fmt &&
1806 (sc->arg_fmt[arg.idx].show_zero ||
1807 sc->arg_fmt[arg.idx].scnprintf == SCA_STRARRAY ||
1808 sc->arg_fmt[arg.idx].scnprintf == SCA_STRARRAYS) &&
1809 sc->arg_fmt[arg.idx].parm))
1810 continue;
1811
1812 printed += scnprintf(bf + printed, size - printed, "%s", printed ? ", " : "");
1813
1814 if (trace->show_arg_names)
1815 printed += scnprintf(bf + printed, size - printed, "%s: ", field->name);
1816
1817 printed += syscall__scnprintf_val(sc, bf + printed, size - printed, &arg, val);
1818 }
1819 } else if (IS_ERR(sc->tp_format)) {
1820 /*
1821 * If we managed to read the tracepoint /format file, then we
1822 * may end up not having any args, like with gettid(), so only
1823 * print the raw args when we didn't manage to read it.
1824 */
1825 while (arg.idx < sc->nr_args) {
1826 if (arg.mask & bit)
1827 goto next_arg;
1828 val = syscall_arg__val(&arg, arg.idx);
1829 if (printed)
1830 printed += scnprintf(bf + printed, size - printed, ", ");
1831 printed += syscall__scnprintf_name(sc, bf + printed, size - printed, &arg);
1832 printed += syscall__scnprintf_val(sc, bf + printed, size - printed, &arg, val);
1833next_arg:
1834 ++arg.idx;
1835 bit <<= 1;
1836 }
1837 }
1838
1839 return printed;
1840}
1841
1842typedef int (*tracepoint_handler)(struct trace *trace, struct evsel *evsel,
1843 union perf_event *event,
1844 struct perf_sample *sample);
1845
1846static struct syscall *trace__syscall_info(struct trace *trace,
1847 struct evsel *evsel, int id)
1848{
1849 int err = 0;
1850
1851 if (id < 0) {
1852
1853 /*
1854 * XXX: Noticed on x86_64, reproduced as far back as 3.0.36, haven't tried
1855 * before that, leaving at a higher verbosity level till that is
1856 * explained. Reproduced with plain ftrace with:
1857 *
1858 * echo 1 > /t/events/raw_syscalls/sys_exit/enable
1859 * grep "NR -1 " /t/trace_pipe
1860 *
1861 * After generating some load on the machine.
1862 */
1863 if (verbose > 1) {
1864 static u64 n;
1865 fprintf(trace->output, "Invalid syscall %d id, skipping (%s, %" PRIu64 ") ...\n",
1866 id, perf_evsel__name(evsel), ++n);
1867 }
1868 return NULL;
1869 }
1870
1871 err = -EINVAL;
1872
1873 if (id > trace->sctbl->syscalls.max_id)
1874 goto out_cant_read;
1875
1876 if ((trace->syscalls.table == NULL || trace->syscalls.table[id].name == NULL) &&
1877 (err = trace__read_syscall_info(trace, id)) != 0)
1878 goto out_cant_read;
1879
1880 if (trace->syscalls.table && trace->syscalls.table[id].nonexistent)
1881 goto out_cant_read;
1882
1883 return &trace->syscalls.table[id];
1884
1885out_cant_read:
1886 if (verbose > 0) {
1887 char sbuf[STRERR_BUFSIZE];
1888 fprintf(trace->output, "Problems reading syscall %d: %d (%s)", id, -err, str_error_r(-err, sbuf, sizeof(sbuf)));
1889 if (id <= trace->sctbl->syscalls.max_id && trace->syscalls.table[id].name != NULL)
1890 fprintf(trace->output, "(%s)", trace->syscalls.table[id].name);
1891 fputs(" information\n", trace->output);
1892 }
1893 return NULL;
1894}
1895
1896static void thread__update_stats(struct thread_trace *ttrace,
1897 int id, struct perf_sample *sample)
1898{
1899 struct int_node *inode;
1900 struct stats *stats;
1901 u64 duration = 0;
1902
1903 inode = intlist__findnew(ttrace->syscall_stats, id);
1904 if (inode == NULL)
1905 return;
1906
1907 stats = inode->priv;
1908 if (stats == NULL) {
1909 stats = malloc(sizeof(struct stats));
1910 if (stats == NULL)
1911 return;
1912 init_stats(stats);
1913 inode->priv = stats;
1914 }
1915
1916 if (ttrace->entry_time && sample->time > ttrace->entry_time)
1917 duration = sample->time - ttrace->entry_time;
1918
1919 update_stats(stats, duration);
1920}
1921
1922static int trace__printf_interrupted_entry(struct trace *trace)
1923{
1924 struct thread_trace *ttrace;
1925 size_t printed;
1926 int len;
1927
1928 if (trace->failure_only || trace->current == NULL)
1929 return 0;
1930
1931 ttrace = thread__priv(trace->current);
1932
1933 if (!ttrace->entry_pending)
1934 return 0;
1935
1936 printed = trace__fprintf_entry_head(trace, trace->current, 0, false, ttrace->entry_time, trace->output);
1937 printed += len = fprintf(trace->output, "%s)", ttrace->entry_str);
1938
1939 if (len < trace->args_alignment - 4)
1940 printed += fprintf(trace->output, "%-*s", trace->args_alignment - 4 - len, " ");
1941
1942 printed += fprintf(trace->output, " ...\n");
1943
1944 ttrace->entry_pending = false;
1945 ++trace->nr_events_printed;
1946
1947 return printed;
1948}
1949
1950static int trace__fprintf_sample(struct trace *trace, struct evsel *evsel,
1951 struct perf_sample *sample, struct thread *thread)
1952{
1953 int printed = 0;
1954
1955 if (trace->print_sample) {
1956 double ts = (double)sample->time / NSEC_PER_MSEC;
1957
1958 printed += fprintf(trace->output, "%22s %10.3f %s %d/%d [%d]\n",
1959 perf_evsel__name(evsel), ts,
1960 thread__comm_str(thread),
1961 sample->pid, sample->tid, sample->cpu);
1962 }
1963
1964 return printed;
1965}
1966
1967static void *syscall__augmented_args(struct syscall *sc, struct perf_sample *sample, int *augmented_args_size, int raw_augmented_args_size)
1968{
1969 void *augmented_args = NULL;
1970 /*
1971 * For now with BPF raw_augmented we hook into raw_syscalls:sys_enter
1972 * and there we get all 6 syscall args plus the tracepoint common fields
1973 * that gets calculated at the start and the syscall_nr (another long).
1974 * So we check if that is the case and if so don't look after the
1975 * sc->args_size but always after the full raw_syscalls:sys_enter payload,
1976 * which is fixed.
1977 *
1978 * We'll revisit this later to pass s->args_size to the BPF augmenter
1979 * (now tools/perf/examples/bpf/augmented_raw_syscalls.c, so that it
1980 * copies only what we need for each syscall, like what happens when we
1981 * use syscalls:sys_enter_NAME, so that we reduce the kernel/userspace
1982 * traffic to just what is needed for each syscall.
1983 */
1984 int args_size = raw_augmented_args_size ?: sc->args_size;
1985
1986 *augmented_args_size = sample->raw_size - args_size;
1987 if (*augmented_args_size > 0)
1988 augmented_args = sample->raw_data + args_size;
1989
1990 return augmented_args;
1991}
1992
1993static int trace__sys_enter(struct trace *trace, struct evsel *evsel,
1994 union perf_event *event __maybe_unused,
1995 struct perf_sample *sample)
1996{
1997 char *msg;
1998 void *args;
1999 int printed = 0;
2000 struct thread *thread;
2001 int id = perf_evsel__sc_tp_uint(evsel, id, sample), err = -1;
2002 int augmented_args_size = 0;
2003 void *augmented_args = NULL;
2004 struct syscall *sc = trace__syscall_info(trace, evsel, id);
2005 struct thread_trace *ttrace;
2006
2007 if (sc == NULL)
2008 return -1;
2009
2010 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
2011 ttrace = thread__trace(thread, trace->output);
2012 if (ttrace == NULL)
2013 goto out_put;
2014
2015 trace__fprintf_sample(trace, evsel, sample, thread);
2016
2017 args = perf_evsel__sc_tp_ptr(evsel, args, sample);
2018
2019 if (ttrace->entry_str == NULL) {
2020 ttrace->entry_str = malloc(trace__entry_str_size);
2021 if (!ttrace->entry_str)
2022 goto out_put;
2023 }
2024
2025 if (!(trace->duration_filter || trace->summary_only || trace->min_stack))
2026 trace__printf_interrupted_entry(trace);
2027 /*
2028 * If this is raw_syscalls.sys_enter, then it always comes with the 6 possible
2029 * arguments, even if the syscall being handled, say "openat", uses only 4 arguments
2030 * this breaks syscall__augmented_args() check for augmented args, as we calculate
2031 * syscall->args_size using each syscalls:sys_enter_NAME tracefs format file,
2032 * so when handling, say the openat syscall, we end up getting 6 args for the
2033 * raw_syscalls:sys_enter event, when we expected just 4, we end up mistakenly
2034 * thinking that the extra 2 u64 args are the augmented filename, so just check
2035 * here and avoid using augmented syscalls when the evsel is the raw_syscalls one.
2036 */
2037 if (evsel != trace->syscalls.events.sys_enter)
2038 augmented_args = syscall__augmented_args(sc, sample, &augmented_args_size, trace->raw_augmented_syscalls_args_size);
2039 ttrace->entry_time = sample->time;
2040 msg = ttrace->entry_str;
2041 printed += scnprintf(msg + printed, trace__entry_str_size - printed, "%s(", sc->name);
2042
2043 printed += syscall__scnprintf_args(sc, msg + printed, trace__entry_str_size - printed,
2044 args, augmented_args, augmented_args_size, trace, thread);
2045
2046 if (sc->is_exit) {
2047 if (!(trace->duration_filter || trace->summary_only || trace->failure_only || trace->min_stack)) {
2048 int alignment = 0;
2049
2050 trace__fprintf_entry_head(trace, thread, 0, false, ttrace->entry_time, trace->output);
2051 printed = fprintf(trace->output, "%s)", ttrace->entry_str);
2052 if (trace->args_alignment > printed)
2053 alignment = trace->args_alignment - printed;
2054 fprintf(trace->output, "%*s= ?\n", alignment, " ");
2055 }
2056 } else {
2057 ttrace->entry_pending = true;
2058 /* See trace__vfs_getname & trace__sys_exit */
2059 ttrace->filename.pending_open = false;
2060 }
2061
2062 if (trace->current != thread) {
2063 thread__put(trace->current);
2064 trace->current = thread__get(thread);
2065 }
2066 err = 0;
2067out_put:
2068 thread__put(thread);
2069 return err;
2070}
2071
2072static int trace__fprintf_sys_enter(struct trace *trace, struct evsel *evsel,
2073 struct perf_sample *sample)
2074{
2075 struct thread_trace *ttrace;
2076 struct thread *thread;
2077 int id = perf_evsel__sc_tp_uint(evsel, id, sample), err = -1;
2078 struct syscall *sc = trace__syscall_info(trace, evsel, id);
2079 char msg[1024];
2080 void *args, *augmented_args = NULL;
2081 int augmented_args_size;
2082 size_t printed = 0;
2083
2084 if (sc == NULL)
2085 return -1;
2086
2087 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
2088 ttrace = thread__trace(thread, trace->output);
2089 /*
2090 * We need to get ttrace just to make sure it is there when syscall__scnprintf_args()
2091 * and the rest of the beautifiers accessing it via struct syscall_arg touches it.
2092 */
2093 if (ttrace == NULL)
2094 goto out_put;
2095
2096 args = perf_evsel__sc_tp_ptr(evsel, args, sample);
2097 augmented_args = syscall__augmented_args(sc, sample, &augmented_args_size, trace->raw_augmented_syscalls_args_size);
2098 printed += syscall__scnprintf_args(sc, msg, sizeof(msg), args, augmented_args, augmented_args_size, trace, thread);
2099 fprintf(trace->output, "%.*s", (int)printed, msg);
2100 err = 0;
2101out_put:
2102 thread__put(thread);
2103 return err;
2104}
2105
2106static int trace__resolve_callchain(struct trace *trace, struct evsel *evsel,
2107 struct perf_sample *sample,
2108 struct callchain_cursor *cursor)
2109{
2110 struct addr_location al;
2111 int max_stack = evsel->core.attr.sample_max_stack ?
2112 evsel->core.attr.sample_max_stack :
2113 trace->max_stack;
2114 int err;
2115
2116 if (machine__resolve(trace->host, &al, sample) < 0)
2117 return -1;
2118
2119 err = thread__resolve_callchain(al.thread, cursor, evsel, sample, NULL, NULL, max_stack);
2120 addr_location__put(&al);
2121 return err;
2122}
2123
2124static int trace__fprintf_callchain(struct trace *trace, struct perf_sample *sample)
2125{
2126 /* TODO: user-configurable print_opts */
2127 const unsigned int print_opts = EVSEL__PRINT_SYM |
2128 EVSEL__PRINT_DSO |
2129 EVSEL__PRINT_UNKNOWN_AS_ADDR;
2130
2131 return sample__fprintf_callchain(sample, 38, print_opts, &callchain_cursor, symbol_conf.bt_stop_list, trace->output);
2132}
2133
2134static const char *errno_to_name(struct evsel *evsel, int err)
2135{
2136 struct perf_env *env = perf_evsel__env(evsel);
2137 const char *arch_name = perf_env__arch(env);
2138
2139 return arch_syscalls__strerrno(arch_name, err);
2140}
2141
2142static int trace__sys_exit(struct trace *trace, struct evsel *evsel,
2143 union perf_event *event __maybe_unused,
2144 struct perf_sample *sample)
2145{
2146 long ret;
2147 u64 duration = 0;
2148 bool duration_calculated = false;
2149 struct thread *thread;
2150 int id = perf_evsel__sc_tp_uint(evsel, id, sample), err = -1, callchain_ret = 0, printed = 0;
2151 int alignment = trace->args_alignment;
2152 struct syscall *sc = trace__syscall_info(trace, evsel, id);
2153 struct thread_trace *ttrace;
2154
2155 if (sc == NULL)
2156 return -1;
2157
2158 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
2159 ttrace = thread__trace(thread, trace->output);
2160 if (ttrace == NULL)
2161 goto out_put;
2162
2163 trace__fprintf_sample(trace, evsel, sample, thread);
2164
2165 if (trace->summary)
2166 thread__update_stats(ttrace, id, sample);
2167
2168 ret = perf_evsel__sc_tp_uint(evsel, ret, sample);
2169
2170 if (!trace->fd_path_disabled && sc->is_open && ret >= 0 && ttrace->filename.pending_open) {
2171 trace__set_fd_pathname(thread, ret, ttrace->filename.name);
2172 ttrace->filename.pending_open = false;
2173 ++trace->stats.vfs_getname;
2174 }
2175
2176 if (ttrace->entry_time) {
2177 duration = sample->time - ttrace->entry_time;
2178 if (trace__filter_duration(trace, duration))
2179 goto out;
2180 duration_calculated = true;
2181 } else if (trace->duration_filter)
2182 goto out;
2183
2184 if (sample->callchain) {
2185 callchain_ret = trace__resolve_callchain(trace, evsel, sample, &callchain_cursor);
2186 if (callchain_ret == 0) {
2187 if (callchain_cursor.nr < trace->min_stack)
2188 goto out;
2189 callchain_ret = 1;
2190 }
2191 }
2192
2193 if (trace->summary_only || (ret >= 0 && trace->failure_only))
2194 goto out;
2195
2196 trace__fprintf_entry_head(trace, thread, duration, duration_calculated, ttrace->entry_time, trace->output);
2197
2198 if (ttrace->entry_pending) {
2199 printed = fprintf(trace->output, "%s", ttrace->entry_str);
2200 } else {
2201 printed += fprintf(trace->output, " ... [");
2202 color_fprintf(trace->output, PERF_COLOR_YELLOW, "continued");
2203 printed += 9;
2204 printed += fprintf(trace->output, "]: %s()", sc->name);
2205 }
2206
2207 printed++; /* the closing ')' */
2208
2209 if (alignment > printed)
2210 alignment -= printed;
2211 else
2212 alignment = 0;
2213
2214 fprintf(trace->output, ")%*s= ", alignment, " ");
2215
2216 if (sc->fmt == NULL) {
2217 if (ret < 0)
2218 goto errno_print;
2219signed_print:
2220 fprintf(trace->output, "%ld", ret);
2221 } else if (ret < 0) {
2222errno_print: {
2223 char bf[STRERR_BUFSIZE];
2224 const char *emsg = str_error_r(-ret, bf, sizeof(bf)),
2225 *e = errno_to_name(evsel, -ret);
2226
2227 fprintf(trace->output, "-1 %s (%s)", e, emsg);
2228 }
2229 } else if (ret == 0 && sc->fmt->timeout)
2230 fprintf(trace->output, "0 (Timeout)");
2231 else if (ttrace->ret_scnprintf) {
2232 char bf[1024];
2233 struct syscall_arg arg = {
2234 .val = ret,
2235 .thread = thread,
2236 .trace = trace,
2237 };
2238 ttrace->ret_scnprintf(bf, sizeof(bf), &arg);
2239 ttrace->ret_scnprintf = NULL;
2240 fprintf(trace->output, "%s", bf);
2241 } else if (sc->fmt->hexret)
2242 fprintf(trace->output, "%#lx", ret);
2243 else if (sc->fmt->errpid) {
2244 struct thread *child = machine__find_thread(trace->host, ret, ret);
2245
2246 if (child != NULL) {
2247 fprintf(trace->output, "%ld", ret);
2248 if (child->comm_set)
2249 fprintf(trace->output, " (%s)", thread__comm_str(child));
2250 thread__put(child);
2251 }
2252 } else
2253 goto signed_print;
2254
2255 fputc('\n', trace->output);
2256
2257 /*
2258 * We only consider an 'event' for the sake of --max-events a non-filtered
2259 * sys_enter + sys_exit and other tracepoint events.
2260 */
2261 if (++trace->nr_events_printed == trace->max_events && trace->max_events != ULONG_MAX)
2262 interrupted = true;
2263
2264 if (callchain_ret > 0)
2265 trace__fprintf_callchain(trace, sample);
2266 else if (callchain_ret < 0)
2267 pr_err("Problem processing %s callchain, skipping...\n", perf_evsel__name(evsel));
2268out:
2269 ttrace->entry_pending = false;
2270 err = 0;
2271out_put:
2272 thread__put(thread);
2273 return err;
2274}
2275
2276static int trace__vfs_getname(struct trace *trace, struct evsel *evsel,
2277 union perf_event *event __maybe_unused,
2278 struct perf_sample *sample)
2279{
2280 struct thread *thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
2281 struct thread_trace *ttrace;
2282 size_t filename_len, entry_str_len, to_move;
2283 ssize_t remaining_space;
2284 char *pos;
2285 const char *filename = perf_evsel__rawptr(evsel, sample, "pathname");
2286
2287 if (!thread)
2288 goto out;
2289
2290 ttrace = thread__priv(thread);
2291 if (!ttrace)
2292 goto out_put;
2293
2294 filename_len = strlen(filename);
2295 if (filename_len == 0)
2296 goto out_put;
2297
2298 if (ttrace->filename.namelen < filename_len) {
2299 char *f = realloc(ttrace->filename.name, filename_len + 1);
2300
2301 if (f == NULL)
2302 goto out_put;
2303
2304 ttrace->filename.namelen = filename_len;
2305 ttrace->filename.name = f;
2306 }
2307
2308 strcpy(ttrace->filename.name, filename);
2309 ttrace->filename.pending_open = true;
2310
2311 if (!ttrace->filename.ptr)
2312 goto out_put;
2313
2314 entry_str_len = strlen(ttrace->entry_str);
2315 remaining_space = trace__entry_str_size - entry_str_len - 1; /* \0 */
2316 if (remaining_space <= 0)
2317 goto out_put;
2318
2319 if (filename_len > (size_t)remaining_space) {
2320 filename += filename_len - remaining_space;
2321 filename_len = remaining_space;
2322 }
2323
2324 to_move = entry_str_len - ttrace->filename.entry_str_pos + 1; /* \0 */
2325 pos = ttrace->entry_str + ttrace->filename.entry_str_pos;
2326 memmove(pos + filename_len, pos, to_move);
2327 memcpy(pos, filename, filename_len);
2328
2329 ttrace->filename.ptr = 0;
2330 ttrace->filename.entry_str_pos = 0;
2331out_put:
2332 thread__put(thread);
2333out:
2334 return 0;
2335}
2336
2337static int trace__sched_stat_runtime(struct trace *trace, struct evsel *evsel,
2338 union perf_event *event __maybe_unused,
2339 struct perf_sample *sample)
2340{
2341 u64 runtime = perf_evsel__intval(evsel, sample, "runtime");
2342 double runtime_ms = (double)runtime / NSEC_PER_MSEC;
2343 struct thread *thread = machine__findnew_thread(trace->host,
2344 sample->pid,
2345 sample->tid);
2346 struct thread_trace *ttrace = thread__trace(thread, trace->output);
2347
2348 if (ttrace == NULL)
2349 goto out_dump;
2350
2351 ttrace->runtime_ms += runtime_ms;
2352 trace->runtime_ms += runtime_ms;
2353out_put:
2354 thread__put(thread);
2355 return 0;
2356
2357out_dump:
2358 fprintf(trace->output, "%s: comm=%s,pid=%u,runtime=%" PRIu64 ",vruntime=%" PRIu64 ")\n",
2359 evsel->name,
2360 perf_evsel__strval(evsel, sample, "comm"),
2361 (pid_t)perf_evsel__intval(evsel, sample, "pid"),
2362 runtime,
2363 perf_evsel__intval(evsel, sample, "vruntime"));
2364 goto out_put;
2365}
2366
2367static int bpf_output__printer(enum binary_printer_ops op,
2368 unsigned int val, void *extra __maybe_unused, FILE *fp)
2369{
2370 unsigned char ch = (unsigned char)val;
2371
2372 switch (op) {
2373 case BINARY_PRINT_CHAR_DATA:
2374 return fprintf(fp, "%c", isprint(ch) ? ch : '.');
2375 case BINARY_PRINT_DATA_BEGIN:
2376 case BINARY_PRINT_LINE_BEGIN:
2377 case BINARY_PRINT_ADDR:
2378 case BINARY_PRINT_NUM_DATA:
2379 case BINARY_PRINT_NUM_PAD:
2380 case BINARY_PRINT_SEP:
2381 case BINARY_PRINT_CHAR_PAD:
2382 case BINARY_PRINT_LINE_END:
2383 case BINARY_PRINT_DATA_END:
2384 default:
2385 break;
2386 }
2387
2388 return 0;
2389}
2390
2391static void bpf_output__fprintf(struct trace *trace,
2392 struct perf_sample *sample)
2393{
2394 binary__fprintf(sample->raw_data, sample->raw_size, 8,
2395 bpf_output__printer, NULL, trace->output);
2396 ++trace->nr_events_printed;
2397}
2398
2399static int trace__event_handler(struct trace *trace, struct evsel *evsel,
2400 union perf_event *event __maybe_unused,
2401 struct perf_sample *sample)
2402{
2403 struct thread *thread;
2404 int callchain_ret = 0;
2405
2406 if (evsel->nr_events_printed >= evsel->max_events)
2407 return 0;
2408
2409 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
2410
2411 if (sample->callchain) {
2412 callchain_ret = trace__resolve_callchain(trace, evsel, sample, &callchain_cursor);
2413 if (callchain_ret == 0) {
2414 if (callchain_cursor.nr < trace->min_stack)
2415 goto out;
2416 callchain_ret = 1;
2417 }
2418 }
2419
2420 trace__printf_interrupted_entry(trace);
2421 trace__fprintf_tstamp(trace, sample->time, trace->output);
2422
2423 if (trace->trace_syscalls && trace->show_duration)
2424 fprintf(trace->output, "( ): ");
2425
2426 if (thread)
2427 trace__fprintf_comm_tid(trace, thread, trace->output);
2428
2429 if (evsel == trace->syscalls.events.augmented) {
2430 int id = perf_evsel__sc_tp_uint(evsel, id, sample);
2431 struct syscall *sc = trace__syscall_info(trace, evsel, id);
2432
2433 if (sc) {
2434 fprintf(trace->output, "%s(", sc->name);
2435 trace__fprintf_sys_enter(trace, evsel, sample);
2436 fputc(')', trace->output);
2437 goto newline;
2438 }
2439
2440 /*
2441 * XXX: Not having the associated syscall info or not finding/adding
2442 * the thread should never happen, but if it does...
2443 * fall thru and print it as a bpf_output event.
2444 */
2445 }
2446
2447 fprintf(trace->output, "%s:", evsel->name);
2448
2449 if (perf_evsel__is_bpf_output(evsel)) {
2450 bpf_output__fprintf(trace, sample);
2451 } else if (evsel->tp_format) {
2452 if (strncmp(evsel->tp_format->name, "sys_enter_", 10) ||
2453 trace__fprintf_sys_enter(trace, evsel, sample)) {
2454 event_format__fprintf(evsel->tp_format, sample->cpu,
2455 sample->raw_data, sample->raw_size,
2456 trace->output);
2457 ++trace->nr_events_printed;
2458
2459 if (evsel->max_events != ULONG_MAX && ++evsel->nr_events_printed == evsel->max_events) {
2460 evsel__disable(evsel);
2461 evsel__close(evsel);
2462 }
2463 }
2464 }
2465
2466newline:
2467 fprintf(trace->output, "\n");
2468
2469 if (callchain_ret > 0)
2470 trace__fprintf_callchain(trace, sample);
2471 else if (callchain_ret < 0)
2472 pr_err("Problem processing %s callchain, skipping...\n", perf_evsel__name(evsel));
2473out:
2474 thread__put(thread);
2475 return 0;
2476}
2477
2478static void print_location(FILE *f, struct perf_sample *sample,
2479 struct addr_location *al,
2480 bool print_dso, bool print_sym)
2481{
2482
2483 if ((verbose > 0 || print_dso) && al->map)
2484 fprintf(f, "%s@", al->map->dso->long_name);
2485
2486 if ((verbose > 0 || print_sym) && al->sym)
2487 fprintf(f, "%s+0x%" PRIx64, al->sym->name,
2488 al->addr - al->sym->start);
2489 else if (al->map)
2490 fprintf(f, "0x%" PRIx64, al->addr);
2491 else
2492 fprintf(f, "0x%" PRIx64, sample->addr);
2493}
2494
2495static int trace__pgfault(struct trace *trace,
2496 struct evsel *evsel,
2497 union perf_event *event __maybe_unused,
2498 struct perf_sample *sample)
2499{
2500 struct thread *thread;
2501 struct addr_location al;
2502 char map_type = 'd';
2503 struct thread_trace *ttrace;
2504 int err = -1;
2505 int callchain_ret = 0;
2506
2507 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
2508
2509 if (sample->callchain) {
2510 callchain_ret = trace__resolve_callchain(trace, evsel, sample, &callchain_cursor);
2511 if (callchain_ret == 0) {
2512 if (callchain_cursor.nr < trace->min_stack)
2513 goto out_put;
2514 callchain_ret = 1;
2515 }
2516 }
2517
2518 ttrace = thread__trace(thread, trace->output);
2519 if (ttrace == NULL)
2520 goto out_put;
2521
2522 if (evsel->core.attr.config == PERF_COUNT_SW_PAGE_FAULTS_MAJ)
2523 ttrace->pfmaj++;
2524 else
2525 ttrace->pfmin++;
2526
2527 if (trace->summary_only)
2528 goto out;
2529
2530 thread__find_symbol(thread, sample->cpumode, sample->ip, &al);
2531
2532 trace__fprintf_entry_head(trace, thread, 0, true, sample->time, trace->output);
2533
2534 fprintf(trace->output, "%sfault [",
2535 evsel->core.attr.config == PERF_COUNT_SW_PAGE_FAULTS_MAJ ?
2536 "maj" : "min");
2537
2538 print_location(trace->output, sample, &al, false, true);
2539
2540 fprintf(trace->output, "] => ");
2541
2542 thread__find_symbol(thread, sample->cpumode, sample->addr, &al);
2543
2544 if (!al.map) {
2545 thread__find_symbol(thread, sample->cpumode, sample->addr, &al);
2546
2547 if (al.map)
2548 map_type = 'x';
2549 else
2550 map_type = '?';
2551 }
2552
2553 print_location(trace->output, sample, &al, true, false);
2554
2555 fprintf(trace->output, " (%c%c)\n", map_type, al.level);
2556
2557 if (callchain_ret > 0)
2558 trace__fprintf_callchain(trace, sample);
2559 else if (callchain_ret < 0)
2560 pr_err("Problem processing %s callchain, skipping...\n", perf_evsel__name(evsel));
2561
2562 ++trace->nr_events_printed;
2563out:
2564 err = 0;
2565out_put:
2566 thread__put(thread);
2567 return err;
2568}
2569
2570static void trace__set_base_time(struct trace *trace,
2571 struct evsel *evsel,
2572 struct perf_sample *sample)
2573{
2574 /*
2575 * BPF events were not setting PERF_SAMPLE_TIME, so be more robust
2576 * and don't use sample->time unconditionally, we may end up having
2577 * some other event in the future without PERF_SAMPLE_TIME for good
2578 * reason, i.e. we may not be interested in its timestamps, just in
2579 * it taking place, picking some piece of information when it
2580 * appears in our event stream (vfs_getname comes to mind).
2581 */
2582 if (trace->base_time == 0 && !trace->full_time &&
2583 (evsel->core.attr.sample_type & PERF_SAMPLE_TIME))
2584 trace->base_time = sample->time;
2585}
2586
2587static int trace__process_sample(struct perf_tool *tool,
2588 union perf_event *event,
2589 struct perf_sample *sample,
2590 struct evsel *evsel,
2591 struct machine *machine __maybe_unused)
2592{
2593 struct trace *trace = container_of(tool, struct trace, tool);
2594 struct thread *thread;
2595 int err = 0;
2596
2597 tracepoint_handler handler = evsel->handler;
2598
2599 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
2600 if (thread && thread__is_filtered(thread))
2601 goto out;
2602
2603 trace__set_base_time(trace, evsel, sample);
2604
2605 if (handler) {
2606 ++trace->nr_events;
2607 handler(trace, evsel, event, sample);
2608 }
2609out:
2610 thread__put(thread);
2611 return err;
2612}
2613
2614static int trace__record(struct trace *trace, int argc, const char **argv)
2615{
2616 unsigned int rec_argc, i, j;
2617 const char **rec_argv;
2618 const char * const record_args[] = {
2619 "record",
2620 "-R",
2621 "-m", "1024",
2622 "-c", "1",
2623 };
2624
2625 const char * const sc_args[] = { "-e", };
2626 unsigned int sc_args_nr = ARRAY_SIZE(sc_args);
2627 const char * const majpf_args[] = { "-e", "major-faults" };
2628 unsigned int majpf_args_nr = ARRAY_SIZE(majpf_args);
2629 const char * const minpf_args[] = { "-e", "minor-faults" };
2630 unsigned int minpf_args_nr = ARRAY_SIZE(minpf_args);
2631
2632 /* +1 is for the event string below */
2633 rec_argc = ARRAY_SIZE(record_args) + sc_args_nr + 1 +
2634 majpf_args_nr + minpf_args_nr + argc;
2635 rec_argv = calloc(rec_argc + 1, sizeof(char *));
2636
2637 if (rec_argv == NULL)
2638 return -ENOMEM;
2639
2640 j = 0;
2641 for (i = 0; i < ARRAY_SIZE(record_args); i++)
2642 rec_argv[j++] = record_args[i];
2643
2644 if (trace->trace_syscalls) {
2645 for (i = 0; i < sc_args_nr; i++)
2646 rec_argv[j++] = sc_args[i];
2647
2648 /* event string may be different for older kernels - e.g., RHEL6 */
2649 if (is_valid_tracepoint("raw_syscalls:sys_enter"))
2650 rec_argv[j++] = "raw_syscalls:sys_enter,raw_syscalls:sys_exit";
2651 else if (is_valid_tracepoint("syscalls:sys_enter"))
2652 rec_argv[j++] = "syscalls:sys_enter,syscalls:sys_exit";
2653 else {
2654 pr_err("Neither raw_syscalls nor syscalls events exist.\n");
2655 free(rec_argv);
2656 return -1;
2657 }
2658 }
2659
2660 if (trace->trace_pgfaults & TRACE_PFMAJ)
2661 for (i = 0; i < majpf_args_nr; i++)
2662 rec_argv[j++] = majpf_args[i];
2663
2664 if (trace->trace_pgfaults & TRACE_PFMIN)
2665 for (i = 0; i < minpf_args_nr; i++)
2666 rec_argv[j++] = minpf_args[i];
2667
2668 for (i = 0; i < (unsigned int)argc; i++)
2669 rec_argv[j++] = argv[i];
2670
2671 return cmd_record(j, rec_argv);
2672}
2673
2674static size_t trace__fprintf_thread_summary(struct trace *trace, FILE *fp);
2675
2676static bool evlist__add_vfs_getname(struct evlist *evlist)
2677{
2678 bool found = false;
2679 struct evsel *evsel, *tmp;
2680 struct parse_events_error err = { .idx = 0, };
2681 int ret = parse_events(evlist, "probe:vfs_getname*", &err);
2682
2683 if (ret)
2684 return false;
2685
2686 evlist__for_each_entry_safe(evlist, evsel, tmp) {
2687 if (!strstarts(perf_evsel__name(evsel), "probe:vfs_getname"))
2688 continue;
2689
2690 if (perf_evsel__field(evsel, "pathname")) {
2691 evsel->handler = trace__vfs_getname;
2692 found = true;
2693 continue;
2694 }
2695
2696 list_del_init(&evsel->core.node);
2697 evsel->evlist = NULL;
2698 evsel__delete(evsel);
2699 }
2700
2701 return found;
2702}
2703
2704static struct evsel *perf_evsel__new_pgfault(u64 config)
2705{
2706 struct evsel *evsel;
2707 struct perf_event_attr attr = {
2708 .type = PERF_TYPE_SOFTWARE,
2709 .mmap_data = 1,
2710 };
2711
2712 attr.config = config;
2713 attr.sample_period = 1;
2714
2715 event_attr_init(&attr);
2716
2717 evsel = evsel__new(&attr);
2718 if (evsel)
2719 evsel->handler = trace__pgfault;
2720
2721 return evsel;
2722}
2723
2724static void trace__handle_event(struct trace *trace, union perf_event *event, struct perf_sample *sample)
2725{
2726 const u32 type = event->header.type;
2727 struct evsel *evsel;
2728
2729 if (type != PERF_RECORD_SAMPLE) {
2730 trace__process_event(trace, trace->host, event, sample);
2731 return;
2732 }
2733
2734 evsel = perf_evlist__id2evsel(trace->evlist, sample->id);
2735 if (evsel == NULL) {
2736 fprintf(trace->output, "Unknown tp ID %" PRIu64 ", skipping...\n", sample->id);
2737 return;
2738 }
2739
2740 if (evswitch__discard(&trace->evswitch, evsel))
2741 return;
2742
2743 trace__set_base_time(trace, evsel, sample);
2744
2745 if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT &&
2746 sample->raw_data == NULL) {
2747 fprintf(trace->output, "%s sample with no payload for tid: %d, cpu %d, raw_size=%d, skipping...\n",
2748 perf_evsel__name(evsel), sample->tid,
2749 sample->cpu, sample->raw_size);
2750 } else {
2751 tracepoint_handler handler = evsel->handler;
2752 handler(trace, evsel, event, sample);
2753 }
2754
2755 if (trace->nr_events_printed >= trace->max_events && trace->max_events != ULONG_MAX)
2756 interrupted = true;
2757}
2758
2759static int trace__add_syscall_newtp(struct trace *trace)
2760{
2761 int ret = -1;
2762 struct evlist *evlist = trace->evlist;
2763 struct evsel *sys_enter, *sys_exit;
2764
2765 sys_enter = perf_evsel__raw_syscall_newtp("sys_enter", trace__sys_enter);
2766 if (sys_enter == NULL)
2767 goto out;
2768
2769 if (perf_evsel__init_sc_tp_ptr_field(sys_enter, args))
2770 goto out_delete_sys_enter;
2771
2772 sys_exit = perf_evsel__raw_syscall_newtp("sys_exit", trace__sys_exit);
2773 if (sys_exit == NULL)
2774 goto out_delete_sys_enter;
2775
2776 if (perf_evsel__init_sc_tp_uint_field(sys_exit, ret))
2777 goto out_delete_sys_exit;
2778
2779 perf_evsel__config_callchain(sys_enter, &trace->opts, &callchain_param);
2780 perf_evsel__config_callchain(sys_exit, &trace->opts, &callchain_param);
2781
2782 evlist__add(evlist, sys_enter);
2783 evlist__add(evlist, sys_exit);
2784
2785 if (callchain_param.enabled && !trace->kernel_syscallchains) {
2786 /*
2787 * We're interested only in the user space callchain
2788 * leading to the syscall, allow overriding that for
2789 * debugging reasons using --kernel_syscall_callchains
2790 */
2791 sys_exit->core.attr.exclude_callchain_kernel = 1;
2792 }
2793
2794 trace->syscalls.events.sys_enter = sys_enter;
2795 trace->syscalls.events.sys_exit = sys_exit;
2796
2797 ret = 0;
2798out:
2799 return ret;
2800
2801out_delete_sys_exit:
2802 evsel__delete_priv(sys_exit);
2803out_delete_sys_enter:
2804 evsel__delete_priv(sys_enter);
2805 goto out;
2806}
2807
2808static int trace__set_ev_qualifier_tp_filter(struct trace *trace)
2809{
2810 int err = -1;
2811 struct evsel *sys_exit;
2812 char *filter = asprintf_expr_inout_ints("id", !trace->not_ev_qualifier,
2813 trace->ev_qualifier_ids.nr,
2814 trace->ev_qualifier_ids.entries);
2815
2816 if (filter == NULL)
2817 goto out_enomem;
2818
2819 if (!perf_evsel__append_tp_filter(trace->syscalls.events.sys_enter,
2820 filter)) {
2821 sys_exit = trace->syscalls.events.sys_exit;
2822 err = perf_evsel__append_tp_filter(sys_exit, filter);
2823 }
2824
2825 free(filter);
2826out:
2827 return err;
2828out_enomem:
2829 errno = ENOMEM;
2830 goto out;
2831}
2832
2833#ifdef HAVE_LIBBPF_SUPPORT
2834static struct bpf_program *trace__find_bpf_program_by_title(struct trace *trace, const char *name)
2835{
2836 if (trace->bpf_obj == NULL)
2837 return NULL;
2838
2839 return bpf_object__find_program_by_title(trace->bpf_obj, name);
2840}
2841
2842static struct bpf_program *trace__find_syscall_bpf_prog(struct trace *trace, struct syscall *sc,
2843 const char *prog_name, const char *type)
2844{
2845 struct bpf_program *prog;
2846
2847 if (prog_name == NULL) {
2848 char default_prog_name[256];
2849 scnprintf(default_prog_name, sizeof(default_prog_name), "!syscalls:sys_%s_%s", type, sc->name);
2850 prog = trace__find_bpf_program_by_title(trace, default_prog_name);
2851 if (prog != NULL)
2852 goto out_found;
2853 if (sc->fmt && sc->fmt->alias) {
2854 scnprintf(default_prog_name, sizeof(default_prog_name), "!syscalls:sys_%s_%s", type, sc->fmt->alias);
2855 prog = trace__find_bpf_program_by_title(trace, default_prog_name);
2856 if (prog != NULL)
2857 goto out_found;
2858 }
2859 goto out_unaugmented;
2860 }
2861
2862 prog = trace__find_bpf_program_by_title(trace, prog_name);
2863
2864 if (prog != NULL) {
2865out_found:
2866 return prog;
2867 }
2868
2869 pr_debug("Couldn't find BPF prog \"%s\" to associate with syscalls:sys_%s_%s, not augmenting it\n",
2870 prog_name, type, sc->name);
2871out_unaugmented:
2872 return trace->syscalls.unaugmented_prog;
2873}
2874
2875static void trace__init_syscall_bpf_progs(struct trace *trace, int id)
2876{
2877 struct syscall *sc = trace__syscall_info(trace, NULL, id);
2878
2879 if (sc == NULL)
2880 return;
2881
2882 sc->bpf_prog.sys_enter = trace__find_syscall_bpf_prog(trace, sc, sc->fmt ? sc->fmt->bpf_prog_name.sys_enter : NULL, "enter");
2883 sc->bpf_prog.sys_exit = trace__find_syscall_bpf_prog(trace, sc, sc->fmt ? sc->fmt->bpf_prog_name.sys_exit : NULL, "exit");
2884}
2885
2886static int trace__bpf_prog_sys_enter_fd(struct trace *trace, int id)
2887{
2888 struct syscall *sc = trace__syscall_info(trace, NULL, id);
2889 return sc ? bpf_program__fd(sc->bpf_prog.sys_enter) : bpf_program__fd(trace->syscalls.unaugmented_prog);
2890}
2891
2892static int trace__bpf_prog_sys_exit_fd(struct trace *trace, int id)
2893{
2894 struct syscall *sc = trace__syscall_info(trace, NULL, id);
2895 return sc ? bpf_program__fd(sc->bpf_prog.sys_exit) : bpf_program__fd(trace->syscalls.unaugmented_prog);
2896}
2897
2898static void trace__init_bpf_map_syscall_args(struct trace *trace, int id, struct bpf_map_syscall_entry *entry)
2899{
2900 struct syscall *sc = trace__syscall_info(trace, NULL, id);
2901 int arg = 0;
2902
2903 if (sc == NULL)
2904 goto out;
2905
2906 for (; arg < sc->nr_args; ++arg) {
2907 entry->string_args_len[arg] = 0;
2908 if (sc->arg_fmt[arg].scnprintf == SCA_FILENAME) {
2909 /* Should be set like strace -s strsize */
2910 entry->string_args_len[arg] = PATH_MAX;
2911 }
2912 }
2913out:
2914 for (; arg < 6; ++arg)
2915 entry->string_args_len[arg] = 0;
2916}
2917static int trace__set_ev_qualifier_bpf_filter(struct trace *trace)
2918{
2919 int fd = bpf_map__fd(trace->syscalls.map);
2920 struct bpf_map_syscall_entry value = {
2921 .enabled = !trace->not_ev_qualifier,
2922 };
2923 int err = 0;
2924 size_t i;
2925
2926 for (i = 0; i < trace->ev_qualifier_ids.nr; ++i) {
2927 int key = trace->ev_qualifier_ids.entries[i];
2928
2929 if (value.enabled) {
2930 trace__init_bpf_map_syscall_args(trace, key, &value);
2931 trace__init_syscall_bpf_progs(trace, key);
2932 }
2933
2934 err = bpf_map_update_elem(fd, &key, &value, BPF_EXIST);
2935 if (err)
2936 break;
2937 }
2938
2939 return err;
2940}
2941
2942static int __trace__init_syscalls_bpf_map(struct trace *trace, bool enabled)
2943{
2944 int fd = bpf_map__fd(trace->syscalls.map);
2945 struct bpf_map_syscall_entry value = {
2946 .enabled = enabled,
2947 };
2948 int err = 0, key;
2949
2950 for (key = 0; key < trace->sctbl->syscalls.nr_entries; ++key) {
2951 if (enabled)
2952 trace__init_bpf_map_syscall_args(trace, key, &value);
2953
2954 err = bpf_map_update_elem(fd, &key, &value, BPF_ANY);
2955 if (err)
2956 break;
2957 }
2958
2959 return err;
2960}
2961
2962static int trace__init_syscalls_bpf_map(struct trace *trace)
2963{
2964 bool enabled = true;
2965
2966 if (trace->ev_qualifier_ids.nr)
2967 enabled = trace->not_ev_qualifier;
2968
2969 return __trace__init_syscalls_bpf_map(trace, enabled);
2970}
2971
2972static struct bpf_program *trace__find_usable_bpf_prog_entry(struct trace *trace, struct syscall *sc)
2973{
2974 struct tep_format_field *field, *candidate_field;
2975 int id;
2976
2977 /*
2978 * We're only interested in syscalls that have a pointer:
2979 */
2980 for (field = sc->args; field; field = field->next) {
2981 if (field->flags & TEP_FIELD_IS_POINTER)
2982 goto try_to_find_pair;
2983 }
2984
2985 return NULL;
2986
2987try_to_find_pair:
2988 for (id = 0; id < trace->sctbl->syscalls.nr_entries; ++id) {
2989 struct syscall *pair = trace__syscall_info(trace, NULL, id);
2990 struct bpf_program *pair_prog;
2991 bool is_candidate = false;
2992
2993 if (pair == NULL || pair == sc ||
2994 pair->bpf_prog.sys_enter == trace->syscalls.unaugmented_prog)
2995 continue;
2996
2997 for (field = sc->args, candidate_field = pair->args;
2998 field && candidate_field; field = field->next, candidate_field = candidate_field->next) {
2999 bool is_pointer = field->flags & TEP_FIELD_IS_POINTER,
3000 candidate_is_pointer = candidate_field->flags & TEP_FIELD_IS_POINTER;
3001
3002 if (is_pointer) {
3003 if (!candidate_is_pointer) {
3004 // The candidate just doesn't copies our pointer arg, might copy other pointers we want.
3005 continue;
3006 }
3007 } else {
3008 if (candidate_is_pointer) {
3009 // The candidate might copy a pointer we don't have, skip it.
3010 goto next_candidate;
3011 }
3012 continue;
3013 }
3014
3015 if (strcmp(field->type, candidate_field->type))
3016 goto next_candidate;
3017
3018 is_candidate = true;
3019 }
3020
3021 if (!is_candidate)
3022 goto next_candidate;
3023
3024 /*
3025 * Check if the tentative pair syscall augmenter has more pointers, if it has,
3026 * then it may be collecting that and we then can't use it, as it would collect
3027 * more than what is common to the two syscalls.
3028 */
3029 if (candidate_field) {
3030 for (candidate_field = candidate_field->next; candidate_field; candidate_field = candidate_field->next)
3031 if (candidate_field->flags & TEP_FIELD_IS_POINTER)
3032 goto next_candidate;
3033 }
3034
3035 pair_prog = pair->bpf_prog.sys_enter;
3036 /*
3037 * If the pair isn't enabled, then its bpf_prog.sys_enter will not
3038 * have been searched for, so search it here and if it returns the
3039 * unaugmented one, then ignore it, otherwise we'll reuse that BPF
3040 * program for a filtered syscall on a non-filtered one.
3041 *
3042 * For instance, we have "!syscalls:sys_enter_renameat" and that is
3043 * useful for "renameat2".
3044 */
3045 if (pair_prog == NULL) {
3046 pair_prog = trace__find_syscall_bpf_prog(trace, pair, pair->fmt ? pair->fmt->bpf_prog_name.sys_enter : NULL, "enter");
3047 if (pair_prog == trace->syscalls.unaugmented_prog)
3048 goto next_candidate;
3049 }
3050
3051 pr_debug("Reusing \"%s\" BPF sys_enter augmenter for \"%s\"\n", pair->name, sc->name);
3052 return pair_prog;
3053 next_candidate:
3054 continue;
3055 }
3056
3057 return NULL;
3058}
3059
3060static int trace__init_syscalls_bpf_prog_array_maps(struct trace *trace)
3061{
3062 int map_enter_fd = bpf_map__fd(trace->syscalls.prog_array.sys_enter),
3063 map_exit_fd = bpf_map__fd(trace->syscalls.prog_array.sys_exit);
3064 int err = 0, key;
3065
3066 for (key = 0; key < trace->sctbl->syscalls.nr_entries; ++key) {
3067 int prog_fd;
3068
3069 if (!trace__syscall_enabled(trace, key))
3070 continue;
3071
3072 trace__init_syscall_bpf_progs(trace, key);
3073
3074 // It'll get at least the "!raw_syscalls:unaugmented"
3075 prog_fd = trace__bpf_prog_sys_enter_fd(trace, key);
3076 err = bpf_map_update_elem(map_enter_fd, &key, &prog_fd, BPF_ANY);
3077 if (err)
3078 break;
3079 prog_fd = trace__bpf_prog_sys_exit_fd(trace, key);
3080 err = bpf_map_update_elem(map_exit_fd, &key, &prog_fd, BPF_ANY);
3081 if (err)
3082 break;
3083 }
3084
3085 /*
3086 * Now lets do a second pass looking for enabled syscalls without
3087 * an augmenter that have a signature that is a superset of another
3088 * syscall with an augmenter so that we can auto-reuse it.
3089 *
3090 * I.e. if we have an augmenter for the "open" syscall that has
3091 * this signature:
3092 *
3093 * int open(const char *pathname, int flags, mode_t mode);
3094 *
3095 * I.e. that will collect just the first string argument, then we
3096 * can reuse it for the 'creat' syscall, that has this signature:
3097 *
3098 * int creat(const char *pathname, mode_t mode);
3099 *
3100 * and for:
3101 *
3102 * int stat(const char *pathname, struct stat *statbuf);
3103 * int lstat(const char *pathname, struct stat *statbuf);
3104 *
3105 * Because the 'open' augmenter will collect the first arg as a string,
3106 * and leave alone all the other args, which already helps with
3107 * beautifying 'stat' and 'lstat''s pathname arg.
3108 *
3109 * Then, in time, when 'stat' gets an augmenter that collects both
3110 * first and second arg (this one on the raw_syscalls:sys_exit prog
3111 * array tail call, then that one will be used.
3112 */
3113 for (key = 0; key < trace->sctbl->syscalls.nr_entries; ++key) {
3114 struct syscall *sc = trace__syscall_info(trace, NULL, key);
3115 struct bpf_program *pair_prog;
3116 int prog_fd;
3117
3118 if (sc == NULL || sc->bpf_prog.sys_enter == NULL)
3119 continue;
3120
3121 /*
3122 * For now we're just reusing the sys_enter prog, and if it
3123 * already has an augmenter, we don't need to find one.
3124 */
3125 if (sc->bpf_prog.sys_enter != trace->syscalls.unaugmented_prog)
3126 continue;
3127
3128 /*
3129 * Look at all the other syscalls for one that has a signature
3130 * that is close enough that we can share:
3131 */
3132 pair_prog = trace__find_usable_bpf_prog_entry(trace, sc);
3133 if (pair_prog == NULL)
3134 continue;
3135
3136 sc->bpf_prog.sys_enter = pair_prog;
3137
3138 /*
3139 * Update the BPF_MAP_TYPE_PROG_SHARED for raw_syscalls:sys_enter
3140 * with the fd for the program we're reusing:
3141 */
3142 prog_fd = bpf_program__fd(sc->bpf_prog.sys_enter);
3143 err = bpf_map_update_elem(map_enter_fd, &key, &prog_fd, BPF_ANY);
3144 if (err)
3145 break;
3146 }
3147
3148
3149 return err;
3150}
3151#else
3152static int trace__set_ev_qualifier_bpf_filter(struct trace *trace __maybe_unused)
3153{
3154 return 0;
3155}
3156
3157static int trace__init_syscalls_bpf_map(struct trace *trace __maybe_unused)
3158{
3159 return 0;
3160}
3161
3162static struct bpf_program *trace__find_bpf_program_by_title(struct trace *trace __maybe_unused,
3163 const char *name __maybe_unused)
3164{
3165 return NULL;
3166}
3167
3168static int trace__init_syscalls_bpf_prog_array_maps(struct trace *trace __maybe_unused)
3169{
3170 return 0;
3171}
3172#endif // HAVE_LIBBPF_SUPPORT
3173
3174static int trace__set_ev_qualifier_filter(struct trace *trace)
3175{
3176 if (trace->syscalls.map)
3177 return trace__set_ev_qualifier_bpf_filter(trace);
3178 if (trace->syscalls.events.sys_enter)
3179 return trace__set_ev_qualifier_tp_filter(trace);
3180 return 0;
3181}
3182
3183static int bpf_map__set_filter_pids(struct bpf_map *map __maybe_unused,
3184 size_t npids __maybe_unused, pid_t *pids __maybe_unused)
3185{
3186 int err = 0;
3187#ifdef HAVE_LIBBPF_SUPPORT
3188 bool value = true;
3189 int map_fd = bpf_map__fd(map);
3190 size_t i;
3191
3192 for (i = 0; i < npids; ++i) {
3193 err = bpf_map_update_elem(map_fd, &pids[i], &value, BPF_ANY);
3194 if (err)
3195 break;
3196 }
3197#endif
3198 return err;
3199}
3200
3201static int trace__set_filter_loop_pids(struct trace *trace)
3202{
3203 unsigned int nr = 1, err;
3204 pid_t pids[32] = {
3205 getpid(),
3206 };
3207 struct thread *thread = machine__find_thread(trace->host, pids[0], pids[0]);
3208
3209 while (thread && nr < ARRAY_SIZE(pids)) {
3210 struct thread *parent = machine__find_thread(trace->host, thread->ppid, thread->ppid);
3211
3212 if (parent == NULL)
3213 break;
3214
3215 if (!strcmp(thread__comm_str(parent), "sshd") ||
3216 strstarts(thread__comm_str(parent), "gnome-terminal")) {
3217 pids[nr++] = parent->tid;
3218 break;
3219 }
3220 thread = parent;
3221 }
3222
3223 err = perf_evlist__set_tp_filter_pids(trace->evlist, nr, pids);
3224 if (!err && trace->filter_pids.map)
3225 err = bpf_map__set_filter_pids(trace->filter_pids.map, nr, pids);
3226
3227 return err;
3228}
3229
3230static int trace__set_filter_pids(struct trace *trace)
3231{
3232 int err = 0;
3233 /*
3234 * Better not use !target__has_task() here because we need to cover the
3235 * case where no threads were specified in the command line, but a
3236 * workload was, and in that case we will fill in the thread_map when
3237 * we fork the workload in perf_evlist__prepare_workload.
3238 */
3239 if (trace->filter_pids.nr > 0) {
3240 err = perf_evlist__set_tp_filter_pids(trace->evlist, trace->filter_pids.nr,
3241 trace->filter_pids.entries);
3242 if (!err && trace->filter_pids.map) {
3243 err = bpf_map__set_filter_pids(trace->filter_pids.map, trace->filter_pids.nr,
3244 trace->filter_pids.entries);
3245 }
3246 } else if (perf_thread_map__pid(trace->evlist->core.threads, 0) == -1) {
3247 err = trace__set_filter_loop_pids(trace);
3248 }
3249
3250 return err;
3251}
3252
3253static int __trace__deliver_event(struct trace *trace, union perf_event *event)
3254{
3255 struct evlist *evlist = trace->evlist;
3256 struct perf_sample sample;
3257 int err;
3258
3259 err = perf_evlist__parse_sample(evlist, event, &sample);
3260 if (err)
3261 fprintf(trace->output, "Can't parse sample, err = %d, skipping...\n", err);
3262 else
3263 trace__handle_event(trace, event, &sample);
3264
3265 return 0;
3266}
3267
3268static int __trace__flush_events(struct trace *trace)
3269{
3270 u64 first = ordered_events__first_time(&trace->oe.data);
3271 u64 flush = trace->oe.last - NSEC_PER_SEC;
3272
3273 /* Is there some thing to flush.. */
3274 if (first && first < flush)
3275 return ordered_events__flush_time(&trace->oe.data, flush);
3276
3277 return 0;
3278}
3279
3280static int trace__flush_events(struct trace *trace)
3281{
3282 return !trace->sort_events ? 0 : __trace__flush_events(trace);
3283}
3284
3285static int trace__deliver_event(struct trace *trace, union perf_event *event)
3286{
3287 int err;
3288
3289 if (!trace->sort_events)
3290 return __trace__deliver_event(trace, event);
3291
3292 err = perf_evlist__parse_sample_timestamp(trace->evlist, event, &trace->oe.last);
3293 if (err && err != -1)
3294 return err;
3295
3296 err = ordered_events__queue(&trace->oe.data, event, trace->oe.last, 0);
3297 if (err)
3298 return err;
3299
3300 return trace__flush_events(trace);
3301}
3302
3303static int ordered_events__deliver_event(struct ordered_events *oe,
3304 struct ordered_event *event)
3305{
3306 struct trace *trace = container_of(oe, struct trace, oe.data);
3307
3308 return __trace__deliver_event(trace, event->event);
3309}
3310
3311static int trace__run(struct trace *trace, int argc, const char **argv)
3312{
3313 struct evlist *evlist = trace->evlist;
3314 struct evsel *evsel, *pgfault_maj = NULL, *pgfault_min = NULL;
3315 int err = -1, i;
3316 unsigned long before;
3317 const bool forks = argc > 0;
3318 bool draining = false;
3319
3320 trace->live = true;
3321
3322 if (!trace->raw_augmented_syscalls) {
3323 if (trace->trace_syscalls && trace__add_syscall_newtp(trace))
3324 goto out_error_raw_syscalls;
3325
3326 if (trace->trace_syscalls)
3327 trace->vfs_getname = evlist__add_vfs_getname(evlist);
3328 }
3329
3330 if ((trace->trace_pgfaults & TRACE_PFMAJ)) {
3331 pgfault_maj = perf_evsel__new_pgfault(PERF_COUNT_SW_PAGE_FAULTS_MAJ);
3332 if (pgfault_maj == NULL)
3333 goto out_error_mem;
3334 perf_evsel__config_callchain(pgfault_maj, &trace->opts, &callchain_param);
3335 evlist__add(evlist, pgfault_maj);
3336 }
3337
3338 if ((trace->trace_pgfaults & TRACE_PFMIN)) {
3339 pgfault_min = perf_evsel__new_pgfault(PERF_COUNT_SW_PAGE_FAULTS_MIN);
3340 if (pgfault_min == NULL)
3341 goto out_error_mem;
3342 perf_evsel__config_callchain(pgfault_min, &trace->opts, &callchain_param);
3343 evlist__add(evlist, pgfault_min);
3344 }
3345
3346 if (trace->sched &&
3347 perf_evlist__add_newtp(evlist, "sched", "sched_stat_runtime",
3348 trace__sched_stat_runtime))
3349 goto out_error_sched_stat_runtime;
3350
3351 /*
3352 * If a global cgroup was set, apply it to all the events without an
3353 * explicit cgroup. I.e.:
3354 *
3355 * trace -G A -e sched:*switch
3356 *
3357 * Will set all raw_syscalls:sys_{enter,exit}, pgfault, vfs_getname, etc
3358 * _and_ sched:sched_switch to the 'A' cgroup, while:
3359 *
3360 * trace -e sched:*switch -G A
3361 *
3362 * will only set the sched:sched_switch event to the 'A' cgroup, all the
3363 * other events (raw_syscalls:sys_{enter,exit}, etc are left "without"
3364 * a cgroup (on the root cgroup, sys wide, etc).
3365 *
3366 * Multiple cgroups:
3367 *
3368 * trace -G A -e sched:*switch -G B
3369 *
3370 * the syscall ones go to the 'A' cgroup, the sched:sched_switch goes
3371 * to the 'B' cgroup.
3372 *
3373 * evlist__set_default_cgroup() grabs a reference of the passed cgroup
3374 * only for the evsels still without a cgroup, i.e. evsel->cgroup == NULL.
3375 */
3376 if (trace->cgroup)
3377 evlist__set_default_cgroup(trace->evlist, trace->cgroup);
3378
3379 err = perf_evlist__create_maps(evlist, &trace->opts.target);
3380 if (err < 0) {
3381 fprintf(trace->output, "Problems parsing the target to trace, check your options!\n");
3382 goto out_delete_evlist;
3383 }
3384
3385 err = trace__symbols_init(trace, evlist);
3386 if (err < 0) {
3387 fprintf(trace->output, "Problems initializing symbol libraries!\n");
3388 goto out_delete_evlist;
3389 }
3390
3391 perf_evlist__config(evlist, &trace->opts, &callchain_param);
3392
3393 signal(SIGCHLD, sig_handler);
3394 signal(SIGINT, sig_handler);
3395
3396 if (forks) {
3397 err = perf_evlist__prepare_workload(evlist, &trace->opts.target,
3398 argv, false, NULL);
3399 if (err < 0) {
3400 fprintf(trace->output, "Couldn't run the workload!\n");
3401 goto out_delete_evlist;
3402 }
3403 }
3404
3405 err = evlist__open(evlist);
3406 if (err < 0)
3407 goto out_error_open;
3408
3409 err = bpf__apply_obj_config();
3410 if (err) {
3411 char errbuf[BUFSIZ];
3412
3413 bpf__strerror_apply_obj_config(err, errbuf, sizeof(errbuf));
3414 pr_err("ERROR: Apply config to BPF failed: %s\n",
3415 errbuf);
3416 goto out_error_open;
3417 }
3418
3419 err = trace__set_filter_pids(trace);
3420 if (err < 0)
3421 goto out_error_mem;
3422
3423 if (trace->syscalls.map)
3424 trace__init_syscalls_bpf_map(trace);
3425
3426 if (trace->syscalls.prog_array.sys_enter)
3427 trace__init_syscalls_bpf_prog_array_maps(trace);
3428
3429 if (trace->ev_qualifier_ids.nr > 0) {
3430 err = trace__set_ev_qualifier_filter(trace);
3431 if (err < 0)
3432 goto out_errno;
3433
3434 if (trace->syscalls.events.sys_exit) {
3435 pr_debug("event qualifier tracepoint filter: %s\n",
3436 trace->syscalls.events.sys_exit->filter);
3437 }
3438 }
3439
3440 /*
3441 * If the "close" syscall is not traced, then we will not have the
3442 * opportunity to, in syscall_arg__scnprintf_close_fd() invalidate the
3443 * fd->pathname table and were ending up showing the last value set by
3444 * syscalls opening a pathname and associating it with a descriptor or
3445 * reading it from /proc/pid/fd/ in cases where that doesn't make
3446 * sense.
3447 *
3448 * So just disable this beautifier (SCA_FD, SCA_FDAT) when 'close' is
3449 * not in use.
3450 */
3451 trace->fd_path_disabled = !trace__syscall_enabled(trace, syscalltbl__id(trace->sctbl, "close"));
3452
3453 err = perf_evlist__apply_filters(evlist, &evsel);
3454 if (err < 0)
3455 goto out_error_apply_filters;
3456
3457 if (trace->dump.map)
3458 bpf_map__fprintf(trace->dump.map, trace->output);
3459
3460 err = evlist__mmap(evlist, trace->opts.mmap_pages);
3461 if (err < 0)
3462 goto out_error_mmap;
3463
3464 if (!target__none(&trace->opts.target) && !trace->opts.initial_delay)
3465 evlist__enable(evlist);
3466
3467 if (forks)
3468 perf_evlist__start_workload(evlist);
3469
3470 if (trace->opts.initial_delay) {
3471 usleep(trace->opts.initial_delay * 1000);
3472 evlist__enable(evlist);
3473 }
3474
3475 trace->multiple_threads = perf_thread_map__pid(evlist->core.threads, 0) == -1 ||
3476 evlist->core.threads->nr > 1 ||
3477 evlist__first(evlist)->core.attr.inherit;
3478
3479 /*
3480 * Now that we already used evsel->core.attr to ask the kernel to setup the
3481 * events, lets reuse evsel->core.attr.sample_max_stack as the limit in
3482 * trace__resolve_callchain(), allowing per-event max-stack settings
3483 * to override an explicitly set --max-stack global setting.
3484 */
3485 evlist__for_each_entry(evlist, evsel) {
3486 if (evsel__has_callchain(evsel) &&
3487 evsel->core.attr.sample_max_stack == 0)
3488 evsel->core.attr.sample_max_stack = trace->max_stack;
3489 }
3490again:
3491 before = trace->nr_events;
3492
3493 for (i = 0; i < evlist->core.nr_mmaps; i++) {
3494 union perf_event *event;
3495 struct mmap *md;
3496
3497 md = &evlist->mmap[i];
3498 if (perf_mmap__read_init(md) < 0)
3499 continue;
3500
3501 while ((event = perf_mmap__read_event(md)) != NULL) {
3502 ++trace->nr_events;
3503
3504 err = trace__deliver_event(trace, event);
3505 if (err)
3506 goto out_disable;
3507
3508 perf_mmap__consume(md);
3509
3510 if (interrupted)
3511 goto out_disable;
3512
3513 if (done && !draining) {
3514 evlist__disable(evlist);
3515 draining = true;
3516 }
3517 }
3518 perf_mmap__read_done(md);
3519 }
3520
3521 if (trace->nr_events == before) {
3522 int timeout = done ? 100 : -1;
3523
3524 if (!draining && evlist__poll(evlist, timeout) > 0) {
3525 if (evlist__filter_pollfd(evlist, POLLERR | POLLHUP | POLLNVAL) == 0)
3526 draining = true;
3527
3528 goto again;
3529 } else {
3530 if (trace__flush_events(trace))
3531 goto out_disable;
3532 }
3533 } else {
3534 goto again;
3535 }
3536
3537out_disable:
3538 thread__zput(trace->current);
3539
3540 evlist__disable(evlist);
3541
3542 if (trace->sort_events)
3543 ordered_events__flush(&trace->oe.data, OE_FLUSH__FINAL);
3544
3545 if (!err) {
3546 if (trace->summary)
3547 trace__fprintf_thread_summary(trace, trace->output);
3548
3549 if (trace->show_tool_stats) {
3550 fprintf(trace->output, "Stats:\n "
3551 " vfs_getname : %" PRIu64 "\n"
3552 " proc_getname: %" PRIu64 "\n",
3553 trace->stats.vfs_getname,
3554 trace->stats.proc_getname);
3555 }
3556 }
3557
3558out_delete_evlist:
3559 trace__symbols__exit(trace);
3560
3561 evlist__delete(evlist);
3562 cgroup__put(trace->cgroup);
3563 trace->evlist = NULL;
3564 trace->live = false;
3565 return err;
3566{
3567 char errbuf[BUFSIZ];
3568
3569out_error_sched_stat_runtime:
3570 tracing_path__strerror_open_tp(errno, errbuf, sizeof(errbuf), "sched", "sched_stat_runtime");
3571 goto out_error;
3572
3573out_error_raw_syscalls:
3574 tracing_path__strerror_open_tp(errno, errbuf, sizeof(errbuf), "raw_syscalls", "sys_(enter|exit)");
3575 goto out_error;
3576
3577out_error_mmap:
3578 perf_evlist__strerror_mmap(evlist, errno, errbuf, sizeof(errbuf));
3579 goto out_error;
3580
3581out_error_open:
3582 perf_evlist__strerror_open(evlist, errno, errbuf, sizeof(errbuf));
3583
3584out_error:
3585 fprintf(trace->output, "%s\n", errbuf);
3586 goto out_delete_evlist;
3587
3588out_error_apply_filters:
3589 fprintf(trace->output,
3590 "Failed to set filter \"%s\" on event %s with %d (%s)\n",
3591 evsel->filter, perf_evsel__name(evsel), errno,
3592 str_error_r(errno, errbuf, sizeof(errbuf)));
3593 goto out_delete_evlist;
3594}
3595out_error_mem:
3596 fprintf(trace->output, "Not enough memory to run!\n");
3597 goto out_delete_evlist;
3598
3599out_errno:
3600 fprintf(trace->output, "errno=%d,%s\n", errno, strerror(errno));
3601 goto out_delete_evlist;
3602}
3603
3604static int trace__replay(struct trace *trace)
3605{
3606 const struct evsel_str_handler handlers[] = {
3607 { "probe:vfs_getname", trace__vfs_getname, },
3608 };
3609 struct perf_data data = {
3610 .path = input_name,
3611 .mode = PERF_DATA_MODE_READ,
3612 .force = trace->force,
3613 };
3614 struct perf_session *session;
3615 struct evsel *evsel;
3616 int err = -1;
3617
3618 trace->tool.sample = trace__process_sample;
3619 trace->tool.mmap = perf_event__process_mmap;
3620 trace->tool.mmap2 = perf_event__process_mmap2;
3621 trace->tool.comm = perf_event__process_comm;
3622 trace->tool.exit = perf_event__process_exit;
3623 trace->tool.fork = perf_event__process_fork;
3624 trace->tool.attr = perf_event__process_attr;
3625 trace->tool.tracing_data = perf_event__process_tracing_data;
3626 trace->tool.build_id = perf_event__process_build_id;
3627 trace->tool.namespaces = perf_event__process_namespaces;
3628
3629 trace->tool.ordered_events = true;
3630 trace->tool.ordering_requires_timestamps = true;
3631
3632 /* add tid to output */
3633 trace->multiple_threads = true;
3634
3635 session = perf_session__new(&data, false, &trace->tool);
3636 if (IS_ERR(session))
3637 return PTR_ERR(session);
3638
3639 if (trace->opts.target.pid)
3640 symbol_conf.pid_list_str = strdup(trace->opts.target.pid);
3641
3642 if (trace->opts.target.tid)
3643 symbol_conf.tid_list_str = strdup(trace->opts.target.tid);
3644
3645 if (symbol__init(&session->header.env) < 0)
3646 goto out;
3647
3648 trace->host = &session->machines.host;
3649
3650 err = perf_session__set_tracepoints_handlers(session, handlers);
3651 if (err)
3652 goto out;
3653
3654 evsel = perf_evlist__find_tracepoint_by_name(session->evlist,
3655 "raw_syscalls:sys_enter");
3656 /* older kernels have syscalls tp versus raw_syscalls */
3657 if (evsel == NULL)
3658 evsel = perf_evlist__find_tracepoint_by_name(session->evlist,
3659 "syscalls:sys_enter");
3660
3661 if (evsel &&
3662 (perf_evsel__init_raw_syscall_tp(evsel, trace__sys_enter) < 0 ||
3663 perf_evsel__init_sc_tp_ptr_field(evsel, args))) {
3664 pr_err("Error during initialize raw_syscalls:sys_enter event\n");
3665 goto out;
3666 }
3667
3668 evsel = perf_evlist__find_tracepoint_by_name(session->evlist,
3669 "raw_syscalls:sys_exit");
3670 if (evsel == NULL)
3671 evsel = perf_evlist__find_tracepoint_by_name(session->evlist,
3672 "syscalls:sys_exit");
3673 if (evsel &&
3674 (perf_evsel__init_raw_syscall_tp(evsel, trace__sys_exit) < 0 ||
3675 perf_evsel__init_sc_tp_uint_field(evsel, ret))) {
3676 pr_err("Error during initialize raw_syscalls:sys_exit event\n");
3677 goto out;
3678 }
3679
3680 evlist__for_each_entry(session->evlist, evsel) {
3681 if (evsel->core.attr.type == PERF_TYPE_SOFTWARE &&
3682 (evsel->core.attr.config == PERF_COUNT_SW_PAGE_FAULTS_MAJ ||
3683 evsel->core.attr.config == PERF_COUNT_SW_PAGE_FAULTS_MIN ||
3684 evsel->core.attr.config == PERF_COUNT_SW_PAGE_FAULTS))
3685 evsel->handler = trace__pgfault;
3686 }
3687
3688 setup_pager();
3689
3690 err = perf_session__process_events(session);
3691 if (err)
3692 pr_err("Failed to process events, error %d", err);
3693
3694 else if (trace->summary)
3695 trace__fprintf_thread_summary(trace, trace->output);
3696
3697out:
3698 perf_session__delete(session);
3699
3700 return err;
3701}
3702
3703static size_t trace__fprintf_threads_header(FILE *fp)
3704{
3705 size_t printed;
3706
3707 printed = fprintf(fp, "\n Summary of events:\n\n");
3708
3709 return printed;
3710}
3711
3712DEFINE_RESORT_RB(syscall_stats, a->msecs > b->msecs,
3713 struct stats *stats;
3714 double msecs;
3715 int syscall;
3716)
3717{
3718 struct int_node *source = rb_entry(nd, struct int_node, rb_node);
3719 struct stats *stats = source->priv;
3720
3721 entry->syscall = source->i;
3722 entry->stats = stats;
3723 entry->msecs = stats ? (u64)stats->n * (avg_stats(stats) / NSEC_PER_MSEC) : 0;
3724}
3725
3726static size_t thread__dump_stats(struct thread_trace *ttrace,
3727 struct trace *trace, FILE *fp)
3728{
3729 size_t printed = 0;
3730 struct syscall *sc;
3731 struct rb_node *nd;
3732 DECLARE_RESORT_RB_INTLIST(syscall_stats, ttrace->syscall_stats);
3733
3734 if (syscall_stats == NULL)
3735 return 0;
3736
3737 printed += fprintf(fp, "\n");
3738
3739 printed += fprintf(fp, " syscall calls total min avg max stddev\n");
3740 printed += fprintf(fp, " (msec) (msec) (msec) (msec) (%%)\n");
3741 printed += fprintf(fp, " --------------- -------- --------- --------- --------- --------- ------\n");
3742
3743 resort_rb__for_each_entry(nd, syscall_stats) {
3744 struct stats *stats = syscall_stats_entry->stats;
3745 if (stats) {
3746 double min = (double)(stats->min) / NSEC_PER_MSEC;
3747 double max = (double)(stats->max) / NSEC_PER_MSEC;
3748 double avg = avg_stats(stats);
3749 double pct;
3750 u64 n = (u64) stats->n;
3751
3752 pct = avg ? 100.0 * stddev_stats(stats)/avg : 0.0;
3753 avg /= NSEC_PER_MSEC;
3754
3755 sc = &trace->syscalls.table[syscall_stats_entry->syscall];
3756 printed += fprintf(fp, " %-15s", sc->name);
3757 printed += fprintf(fp, " %8" PRIu64 " %9.3f %9.3f %9.3f",
3758 n, syscall_stats_entry->msecs, min, avg);
3759 printed += fprintf(fp, " %9.3f %9.2f%%\n", max, pct);
3760 }
3761 }
3762
3763 resort_rb__delete(syscall_stats);
3764 printed += fprintf(fp, "\n\n");
3765
3766 return printed;
3767}
3768
3769static size_t trace__fprintf_thread(FILE *fp, struct thread *thread, struct trace *trace)
3770{
3771 size_t printed = 0;
3772 struct thread_trace *ttrace = thread__priv(thread);
3773 double ratio;
3774
3775 if (ttrace == NULL)
3776 return 0;
3777
3778 ratio = (double)ttrace->nr_events / trace->nr_events * 100.0;
3779
3780 printed += fprintf(fp, " %s (%d), ", thread__comm_str(thread), thread->tid);
3781 printed += fprintf(fp, "%lu events, ", ttrace->nr_events);
3782 printed += fprintf(fp, "%.1f%%", ratio);
3783 if (ttrace->pfmaj)
3784 printed += fprintf(fp, ", %lu majfaults", ttrace->pfmaj);
3785 if (ttrace->pfmin)
3786 printed += fprintf(fp, ", %lu minfaults", ttrace->pfmin);
3787 if (trace->sched)
3788 printed += fprintf(fp, ", %.3f msec\n", ttrace->runtime_ms);
3789 else if (fputc('\n', fp) != EOF)
3790 ++printed;
3791
3792 printed += thread__dump_stats(ttrace, trace, fp);
3793
3794 return printed;
3795}
3796
3797static unsigned long thread__nr_events(struct thread_trace *ttrace)
3798{
3799 return ttrace ? ttrace->nr_events : 0;
3800}
3801
3802DEFINE_RESORT_RB(threads, (thread__nr_events(a->thread->priv) < thread__nr_events(b->thread->priv)),
3803 struct thread *thread;
3804)
3805{
3806 entry->thread = rb_entry(nd, struct thread, rb_node);
3807}
3808
3809static size_t trace__fprintf_thread_summary(struct trace *trace, FILE *fp)
3810{
3811 size_t printed = trace__fprintf_threads_header(fp);
3812 struct rb_node *nd;
3813 int i;
3814
3815 for (i = 0; i < THREADS__TABLE_SIZE; i++) {
3816 DECLARE_RESORT_RB_MACHINE_THREADS(threads, trace->host, i);
3817
3818 if (threads == NULL) {
3819 fprintf(fp, "%s", "Error sorting output by nr_events!\n");
3820 return 0;
3821 }
3822
3823 resort_rb__for_each_entry(nd, threads)
3824 printed += trace__fprintf_thread(fp, threads_entry->thread, trace);
3825
3826 resort_rb__delete(threads);
3827 }
3828 return printed;
3829}
3830
3831static int trace__set_duration(const struct option *opt, const char *str,
3832 int unset __maybe_unused)
3833{
3834 struct trace *trace = opt->value;
3835
3836 trace->duration_filter = atof(str);
3837 return 0;
3838}
3839
3840static int trace__set_filter_pids_from_option(const struct option *opt, const char *str,
3841 int unset __maybe_unused)
3842{
3843 int ret = -1;
3844 size_t i;
3845 struct trace *trace = opt->value;
3846 /*
3847 * FIXME: introduce a intarray class, plain parse csv and create a
3848 * { int nr, int entries[] } struct...
3849 */
3850 struct intlist *list = intlist__new(str);
3851
3852 if (list == NULL)
3853 return -1;
3854
3855 i = trace->filter_pids.nr = intlist__nr_entries(list) + 1;
3856 trace->filter_pids.entries = calloc(i, sizeof(pid_t));
3857
3858 if (trace->filter_pids.entries == NULL)
3859 goto out;
3860
3861 trace->filter_pids.entries[0] = getpid();
3862
3863 for (i = 1; i < trace->filter_pids.nr; ++i)
3864 trace->filter_pids.entries[i] = intlist__entry(list, i - 1)->i;
3865
3866 intlist__delete(list);
3867 ret = 0;
3868out:
3869 return ret;
3870}
3871
3872static int trace__open_output(struct trace *trace, const char *filename)
3873{
3874 struct stat st;
3875
3876 if (!stat(filename, &st) && st.st_size) {
3877 char oldname[PATH_MAX];
3878
3879 scnprintf(oldname, sizeof(oldname), "%s.old", filename);
3880 unlink(oldname);
3881 rename(filename, oldname);
3882 }
3883
3884 trace->output = fopen(filename, "w");
3885
3886 return trace->output == NULL ? -errno : 0;
3887}
3888
3889static int parse_pagefaults(const struct option *opt, const char *str,
3890 int unset __maybe_unused)
3891{
3892 int *trace_pgfaults = opt->value;
3893
3894 if (strcmp(str, "all") == 0)
3895 *trace_pgfaults |= TRACE_PFMAJ | TRACE_PFMIN;
3896 else if (strcmp(str, "maj") == 0)
3897 *trace_pgfaults |= TRACE_PFMAJ;
3898 else if (strcmp(str, "min") == 0)
3899 *trace_pgfaults |= TRACE_PFMIN;
3900 else
3901 return -1;
3902
3903 return 0;
3904}
3905
3906static void evlist__set_evsel_handler(struct evlist *evlist, void *handler)
3907{
3908 struct evsel *evsel;
3909
3910 evlist__for_each_entry(evlist, evsel)
3911 evsel->handler = handler;
3912}
3913
3914static int evlist__set_syscall_tp_fields(struct evlist *evlist)
3915{
3916 struct evsel *evsel;
3917
3918 evlist__for_each_entry(evlist, evsel) {
3919 if (evsel->priv || !evsel->tp_format)
3920 continue;
3921
3922 if (strcmp(evsel->tp_format->system, "syscalls"))
3923 continue;
3924
3925 if (perf_evsel__init_syscall_tp(evsel))
3926 return -1;
3927
3928 if (!strncmp(evsel->tp_format->name, "sys_enter_", 10)) {
3929 struct syscall_tp *sc = evsel->priv;
3930
3931 if (__tp_field__init_ptr(&sc->args, sc->id.offset + sizeof(u64)))
3932 return -1;
3933 } else if (!strncmp(evsel->tp_format->name, "sys_exit_", 9)) {
3934 struct syscall_tp *sc = evsel->priv;
3935
3936 if (__tp_field__init_uint(&sc->ret, sizeof(u64), sc->id.offset + sizeof(u64), evsel->needs_swap))
3937 return -1;
3938 }
3939 }
3940
3941 return 0;
3942}
3943
3944/*
3945 * XXX: Hackish, just splitting the combined -e+--event (syscalls
3946 * (raw_syscalls:{sys_{enter,exit}} + events (tracepoints, HW, SW, etc) to use
3947 * existing facilities unchanged (trace->ev_qualifier + parse_options()).
3948 *
3949 * It'd be better to introduce a parse_options() variant that would return a
3950 * list with the terms it didn't match to an event...
3951 */
3952static int trace__parse_events_option(const struct option *opt, const char *str,
3953 int unset __maybe_unused)
3954{
3955 struct trace *trace = (struct trace *)opt->value;
3956 const char *s = str;
3957 char *sep = NULL, *lists[2] = { NULL, NULL, };
3958 int len = strlen(str) + 1, err = -1, list, idx;
3959 char *strace_groups_dir = system_path(STRACE_GROUPS_DIR);
3960 char group_name[PATH_MAX];
3961 struct syscall_fmt *fmt;
3962
3963 if (strace_groups_dir == NULL)
3964 return -1;
3965
3966 if (*s == '!') {
3967 ++s;
3968 trace->not_ev_qualifier = true;
3969 }
3970
3971 while (1) {
3972 if ((sep = strchr(s, ',')) != NULL)
3973 *sep = '\0';
3974
3975 list = 0;
3976 if (syscalltbl__id(trace->sctbl, s) >= 0 ||
3977 syscalltbl__strglobmatch_first(trace->sctbl, s, &idx) >= 0) {
3978 list = 1;
3979 goto do_concat;
3980 }
3981
3982 fmt = syscall_fmt__find_by_alias(s);
3983 if (fmt != NULL) {
3984 list = 1;
3985 s = fmt->name;
3986 } else {
3987 path__join(group_name, sizeof(group_name), strace_groups_dir, s);
3988 if (access(group_name, R_OK) == 0)
3989 list = 1;
3990 }
3991do_concat:
3992 if (lists[list]) {
3993 sprintf(lists[list] + strlen(lists[list]), ",%s", s);
3994 } else {
3995 lists[list] = malloc(len);
3996 if (lists[list] == NULL)
3997 goto out;
3998 strcpy(lists[list], s);
3999 }
4000
4001 if (!sep)
4002 break;
4003
4004 *sep = ',';
4005 s = sep + 1;
4006 }
4007
4008 if (lists[1] != NULL) {
4009 struct strlist_config slist_config = {
4010 .dirname = strace_groups_dir,
4011 };
4012
4013 trace->ev_qualifier = strlist__new(lists[1], &slist_config);
4014 if (trace->ev_qualifier == NULL) {
4015 fputs("Not enough memory to parse event qualifier", trace->output);
4016 goto out;
4017 }
4018
4019 if (trace__validate_ev_qualifier(trace))
4020 goto out;
4021 trace->trace_syscalls = true;
4022 }
4023
4024 err = 0;
4025
4026 if (lists[0]) {
4027 struct option o = {
4028 .value = &trace->evlist,
4029 };
4030 err = parse_events_option(&o, lists[0], 0);
4031 }
4032out:
4033 if (sep)
4034 *sep = ',';
4035
4036 return err;
4037}
4038
4039static int trace__parse_cgroups(const struct option *opt, const char *str, int unset)
4040{
4041 struct trace *trace = opt->value;
4042
4043 if (!list_empty(&trace->evlist->core.entries)) {
4044 struct option o = {
4045 .value = &trace->evlist,
4046 };
4047 return parse_cgroups(&o, str, unset);
4048 }
4049 trace->cgroup = evlist__findnew_cgroup(trace->evlist, str);
4050
4051 return 0;
4052}
4053
4054static struct bpf_map *trace__find_bpf_map_by_name(struct trace *trace, const char *name)
4055{
4056 if (trace->bpf_obj == NULL)
4057 return NULL;
4058
4059 return bpf_object__find_map_by_name(trace->bpf_obj, name);
4060}
4061
4062static void trace__set_bpf_map_filtered_pids(struct trace *trace)
4063{
4064 trace->filter_pids.map = trace__find_bpf_map_by_name(trace, "pids_filtered");
4065}
4066
4067static void trace__set_bpf_map_syscalls(struct trace *trace)
4068{
4069 trace->syscalls.map = trace__find_bpf_map_by_name(trace, "syscalls");
4070 trace->syscalls.prog_array.sys_enter = trace__find_bpf_map_by_name(trace, "syscalls_sys_enter");
4071 trace->syscalls.prog_array.sys_exit = trace__find_bpf_map_by_name(trace, "syscalls_sys_exit");
4072}
4073
4074static int trace__config(const char *var, const char *value, void *arg)
4075{
4076 struct trace *trace = arg;
4077 int err = 0;
4078
4079 if (!strcmp(var, "trace.add_events")) {
4080 struct option o = OPT_CALLBACK('e', "event", &trace->evlist, "event",
4081 "event selector. use 'perf list' to list available events",
4082 parse_events_option);
4083 /*
4084 * We can't propagate parse_event_option() return, as it is 1
4085 * for failure while perf_config() expects -1.
4086 */
4087 if (parse_events_option(&o, value, 0))
4088 err = -1;
4089 } else if (!strcmp(var, "trace.show_timestamp")) {
4090 trace->show_tstamp = perf_config_bool(var, value);
4091 } else if (!strcmp(var, "trace.show_duration")) {
4092 trace->show_duration = perf_config_bool(var, value);
4093 } else if (!strcmp(var, "trace.show_arg_names")) {
4094 trace->show_arg_names = perf_config_bool(var, value);
4095 if (!trace->show_arg_names)
4096 trace->show_zeros = true;
4097 } else if (!strcmp(var, "trace.show_zeros")) {
4098 bool new_show_zeros = perf_config_bool(var, value);
4099 if (!trace->show_arg_names && !new_show_zeros) {
4100 pr_warning("trace.show_zeros has to be set when trace.show_arg_names=no\n");
4101 goto out;
4102 }
4103 trace->show_zeros = new_show_zeros;
4104 } else if (!strcmp(var, "trace.show_prefix")) {
4105 trace->show_string_prefix = perf_config_bool(var, value);
4106 } else if (!strcmp(var, "trace.no_inherit")) {
4107 trace->opts.no_inherit = perf_config_bool(var, value);
4108 } else if (!strcmp(var, "trace.args_alignment")) {
4109 int args_alignment = 0;
4110 if (perf_config_int(&args_alignment, var, value) == 0)
4111 trace->args_alignment = args_alignment;
4112 }
4113out:
4114 return err;
4115}
4116
4117int cmd_trace(int argc, const char **argv)
4118{
4119 const char *trace_usage[] = {
4120 "perf trace [<options>] [<command>]",
4121 "perf trace [<options>] -- <command> [<options>]",
4122 "perf trace record [<options>] [<command>]",
4123 "perf trace record [<options>] -- <command> [<options>]",
4124 NULL
4125 };
4126 struct trace trace = {
4127 .opts = {
4128 .target = {
4129 .uid = UINT_MAX,
4130 .uses_mmap = true,
4131 },
4132 .user_freq = UINT_MAX,
4133 .user_interval = ULLONG_MAX,
4134 .no_buffering = true,
4135 .mmap_pages = UINT_MAX,
4136 },
4137 .output = stderr,
4138 .show_comm = true,
4139 .show_tstamp = true,
4140 .show_duration = true,
4141 .show_arg_names = true,
4142 .args_alignment = 70,
4143 .trace_syscalls = false,
4144 .kernel_syscallchains = false,
4145 .max_stack = UINT_MAX,
4146 .max_events = ULONG_MAX,
4147 };
4148 const char *map_dump_str = NULL;
4149 const char *output_name = NULL;
4150 const struct option trace_options[] = {
4151 OPT_CALLBACK('e', "event", &trace, "event",
4152 "event/syscall selector. use 'perf list' to list available events",
4153 trace__parse_events_option),
4154 OPT_BOOLEAN(0, "comm", &trace.show_comm,
4155 "show the thread COMM next to its id"),
4156 OPT_BOOLEAN(0, "tool_stats", &trace.show_tool_stats, "show tool stats"),
4157 OPT_CALLBACK(0, "expr", &trace, "expr", "list of syscalls/events to trace",
4158 trace__parse_events_option),
4159 OPT_STRING('o', "output", &output_name, "file", "output file name"),
4160 OPT_STRING('i', "input", &input_name, "file", "Analyze events in file"),
4161 OPT_STRING('p', "pid", &trace.opts.target.pid, "pid",
4162 "trace events on existing process id"),
4163 OPT_STRING('t', "tid", &trace.opts.target.tid, "tid",
4164 "trace events on existing thread id"),
4165 OPT_CALLBACK(0, "filter-pids", &trace, "CSV list of pids",
4166 "pids to filter (by the kernel)", trace__set_filter_pids_from_option),
4167 OPT_BOOLEAN('a', "all-cpus", &trace.opts.target.system_wide,
4168 "system-wide collection from all CPUs"),
4169 OPT_STRING('C', "cpu", &trace.opts.target.cpu_list, "cpu",
4170 "list of cpus to monitor"),
4171 OPT_BOOLEAN(0, "no-inherit", &trace.opts.no_inherit,
4172 "child tasks do not inherit counters"),
4173 OPT_CALLBACK('m', "mmap-pages", &trace.opts.mmap_pages, "pages",
4174 "number of mmap data pages",
4175 perf_evlist__parse_mmap_pages),
4176 OPT_STRING('u', "uid", &trace.opts.target.uid_str, "user",
4177 "user to profile"),
4178 OPT_CALLBACK(0, "duration", &trace, "float",
4179 "show only events with duration > N.M ms",
4180 trace__set_duration),
4181#ifdef HAVE_LIBBPF_SUPPORT
4182 OPT_STRING(0, "map-dump", &map_dump_str, "BPF map", "BPF map to periodically dump"),
4183#endif
4184 OPT_BOOLEAN(0, "sched", &trace.sched, "show blocking scheduler events"),
4185 OPT_INCR('v', "verbose", &verbose, "be more verbose"),
4186 OPT_BOOLEAN('T', "time", &trace.full_time,
4187 "Show full timestamp, not time relative to first start"),
4188 OPT_BOOLEAN(0, "failure", &trace.failure_only,
4189 "Show only syscalls that failed"),
4190 OPT_BOOLEAN('s', "summary", &trace.summary_only,
4191 "Show only syscall summary with statistics"),
4192 OPT_BOOLEAN('S', "with-summary", &trace.summary,
4193 "Show all syscalls and summary with statistics"),
4194 OPT_CALLBACK_DEFAULT('F', "pf", &trace.trace_pgfaults, "all|maj|min",
4195 "Trace pagefaults", parse_pagefaults, "maj"),
4196 OPT_BOOLEAN(0, "syscalls", &trace.trace_syscalls, "Trace syscalls"),
4197 OPT_BOOLEAN('f', "force", &trace.force, "don't complain, do it"),
4198 OPT_CALLBACK(0, "call-graph", &trace.opts,
4199 "record_mode[,record_size]", record_callchain_help,
4200 &record_parse_callchain_opt),
4201 OPT_BOOLEAN(0, "kernel-syscall-graph", &trace.kernel_syscallchains,
4202 "Show the kernel callchains on the syscall exit path"),
4203 OPT_ULONG(0, "max-events", &trace.max_events,
4204 "Set the maximum number of events to print, exit after that is reached. "),
4205 OPT_UINTEGER(0, "min-stack", &trace.min_stack,
4206 "Set the minimum stack depth when parsing the callchain, "
4207 "anything below the specified depth will be ignored."),
4208 OPT_UINTEGER(0, "max-stack", &trace.max_stack,
4209 "Set the maximum stack depth when parsing the callchain, "
4210 "anything beyond the specified depth will be ignored. "
4211 "Default: kernel.perf_event_max_stack or " __stringify(PERF_MAX_STACK_DEPTH)),
4212 OPT_BOOLEAN(0, "sort-events", &trace.sort_events,
4213 "Sort batch of events before processing, use if getting out of order events"),
4214 OPT_BOOLEAN(0, "print-sample", &trace.print_sample,
4215 "print the PERF_RECORD_SAMPLE PERF_SAMPLE_ info, for debugging"),
4216 OPT_UINTEGER(0, "proc-map-timeout", &proc_map_timeout,
4217 "per thread proc mmap processing timeout in ms"),
4218 OPT_CALLBACK('G', "cgroup", &trace, "name", "monitor event in cgroup name only",
4219 trace__parse_cgroups),
4220 OPT_UINTEGER('D', "delay", &trace.opts.initial_delay,
4221 "ms to wait before starting measurement after program "
4222 "start"),
4223 OPTS_EVSWITCH(&trace.evswitch),
4224 OPT_END()
4225 };
4226 bool __maybe_unused max_stack_user_set = true;
4227 bool mmap_pages_user_set = true;
4228 struct evsel *evsel;
4229 const char * const trace_subcommands[] = { "record", NULL };
4230 int err = -1;
4231 char bf[BUFSIZ];
4232
4233 signal(SIGSEGV, sighandler_dump_stack);
4234 signal(SIGFPE, sighandler_dump_stack);
4235
4236 trace.evlist = evlist__new();
4237 trace.sctbl = syscalltbl__new();
4238
4239 if (trace.evlist == NULL || trace.sctbl == NULL) {
4240 pr_err("Not enough memory to run!\n");
4241 err = -ENOMEM;
4242 goto out;
4243 }
4244
4245 /*
4246 * Parsing .perfconfig may entail creating a BPF event, that may need
4247 * to create BPF maps, so bump RLIM_MEMLOCK as the default 64K setting
4248 * is too small. This affects just this process, not touching the
4249 * global setting. If it fails we'll get something in 'perf trace -v'
4250 * to help diagnose the problem.
4251 */
4252 rlimit__bump_memlock();
4253
4254 err = perf_config(trace__config, &trace);
4255 if (err)
4256 goto out;
4257
4258 argc = parse_options_subcommand(argc, argv, trace_options, trace_subcommands,
4259 trace_usage, PARSE_OPT_STOP_AT_NON_OPTION);
4260
4261 if ((nr_cgroups || trace.cgroup) && !trace.opts.target.system_wide) {
4262 usage_with_options_msg(trace_usage, trace_options,
4263 "cgroup monitoring only available in system-wide mode");
4264 }
4265
4266 evsel = bpf__setup_output_event(trace.evlist, "__augmented_syscalls__");
4267 if (IS_ERR(evsel)) {
4268 bpf__strerror_setup_output_event(trace.evlist, PTR_ERR(evsel), bf, sizeof(bf));
4269 pr_err("ERROR: Setup trace syscalls enter failed: %s\n", bf);
4270 goto out;
4271 }
4272
4273 if (evsel) {
4274 trace.syscalls.events.augmented = evsel;
4275
4276 evsel = perf_evlist__find_tracepoint_by_name(trace.evlist, "raw_syscalls:sys_enter");
4277 if (evsel == NULL) {
4278 pr_err("ERROR: raw_syscalls:sys_enter not found in the augmented BPF object\n");
4279 goto out;
4280 }
4281
4282 if (evsel->bpf_obj == NULL) {
4283 pr_err("ERROR: raw_syscalls:sys_enter not associated to a BPF object\n");
4284 goto out;
4285 }
4286
4287 trace.bpf_obj = evsel->bpf_obj;
4288
4289 trace__set_bpf_map_filtered_pids(&trace);
4290 trace__set_bpf_map_syscalls(&trace);
4291 trace.syscalls.unaugmented_prog = trace__find_bpf_program_by_title(&trace, "!raw_syscalls:unaugmented");
4292 }
4293
4294 err = bpf__setup_stdout(trace.evlist);
4295 if (err) {
4296 bpf__strerror_setup_stdout(trace.evlist, err, bf, sizeof(bf));
4297 pr_err("ERROR: Setup BPF stdout failed: %s\n", bf);
4298 goto out;
4299 }
4300
4301 err = -1;
4302
4303 if (map_dump_str) {
4304 trace.dump.map = trace__find_bpf_map_by_name(&trace, map_dump_str);
4305 if (trace.dump.map == NULL) {
4306 pr_err("ERROR: BPF map \"%s\" not found\n", map_dump_str);
4307 goto out;
4308 }
4309 }
4310
4311 if (trace.trace_pgfaults) {
4312 trace.opts.sample_address = true;
4313 trace.opts.sample_time = true;
4314 }
4315
4316 if (trace.opts.mmap_pages == UINT_MAX)
4317 mmap_pages_user_set = false;
4318
4319 if (trace.max_stack == UINT_MAX) {
4320 trace.max_stack = input_name ? PERF_MAX_STACK_DEPTH : sysctl__max_stack();
4321 max_stack_user_set = false;
4322 }
4323
4324#ifdef HAVE_DWARF_UNWIND_SUPPORT
4325 if ((trace.min_stack || max_stack_user_set) && !callchain_param.enabled) {
4326 record_opts__parse_callchain(&trace.opts, &callchain_param, "dwarf", false);
4327 }
4328#endif
4329
4330 if (callchain_param.enabled) {
4331 if (!mmap_pages_user_set && geteuid() == 0)
4332 trace.opts.mmap_pages = perf_event_mlock_kb_in_pages() * 4;
4333
4334 symbol_conf.use_callchain = true;
4335 }
4336
4337 if (trace.evlist->core.nr_entries > 0) {
4338 evlist__set_evsel_handler(trace.evlist, trace__event_handler);
4339 if (evlist__set_syscall_tp_fields(trace.evlist)) {
4340 perror("failed to set syscalls:* tracepoint fields");
4341 goto out;
4342 }
4343 }
4344
4345 if (trace.sort_events) {
4346 ordered_events__init(&trace.oe.data, ordered_events__deliver_event, &trace);
4347 ordered_events__set_copy_on_queue(&trace.oe.data, true);
4348 }
4349
4350 /*
4351 * If we are augmenting syscalls, then combine what we put in the
4352 * __augmented_syscalls__ BPF map with what is in the
4353 * syscalls:sys_exit_FOO tracepoints, i.e. just like we do without BPF,
4354 * combining raw_syscalls:sys_enter with raw_syscalls:sys_exit.
4355 *
4356 * We'll switch to look at two BPF maps, one for sys_enter and the
4357 * other for sys_exit when we start augmenting the sys_exit paths with
4358 * buffers that are being copied from kernel to userspace, think 'read'
4359 * syscall.
4360 */
4361 if (trace.syscalls.events.augmented) {
4362 evlist__for_each_entry(trace.evlist, evsel) {
4363 bool raw_syscalls_sys_exit = strcmp(perf_evsel__name(evsel), "raw_syscalls:sys_exit") == 0;
4364
4365 if (raw_syscalls_sys_exit) {
4366 trace.raw_augmented_syscalls = true;
4367 goto init_augmented_syscall_tp;
4368 }
4369
4370 if (trace.syscalls.events.augmented->priv == NULL &&
4371 strstr(perf_evsel__name(evsel), "syscalls:sys_enter")) {
4372 struct evsel *augmented = trace.syscalls.events.augmented;
4373 if (perf_evsel__init_augmented_syscall_tp(augmented, evsel) ||
4374 perf_evsel__init_augmented_syscall_tp_args(augmented))
4375 goto out;
4376 /*
4377 * Augmented is __augmented_syscalls__ BPF_OUTPUT event
4378 * Above we made sure we can get from the payload the tp fields
4379 * that we get from syscalls:sys_enter tracefs format file.
4380 */
4381 augmented->handler = trace__sys_enter;
4382 /*
4383 * Now we do the same for the *syscalls:sys_enter event so that
4384 * if we handle it directly, i.e. if the BPF prog returns 0 so
4385 * as not to filter it, then we'll handle it just like we would
4386 * for the BPF_OUTPUT one:
4387 */
4388 if (perf_evsel__init_augmented_syscall_tp(evsel, evsel) ||
4389 perf_evsel__init_augmented_syscall_tp_args(evsel))
4390 goto out;
4391 evsel->handler = trace__sys_enter;
4392 }
4393
4394 if (strstarts(perf_evsel__name(evsel), "syscalls:sys_exit_")) {
4395 struct syscall_tp *sc;
4396init_augmented_syscall_tp:
4397 if (perf_evsel__init_augmented_syscall_tp(evsel, evsel))
4398 goto out;
4399 sc = evsel->priv;
4400 /*
4401 * For now with BPF raw_augmented we hook into
4402 * raw_syscalls:sys_enter and there we get all
4403 * 6 syscall args plus the tracepoint common
4404 * fields and the syscall_nr (another long).
4405 * So we check if that is the case and if so
4406 * don't look after the sc->args_size but
4407 * always after the full raw_syscalls:sys_enter
4408 * payload, which is fixed.
4409 *
4410 * We'll revisit this later to pass
4411 * s->args_size to the BPF augmenter (now
4412 * tools/perf/examples/bpf/augmented_raw_syscalls.c,
4413 * so that it copies only what we need for each
4414 * syscall, like what happens when we use
4415 * syscalls:sys_enter_NAME, so that we reduce
4416 * the kernel/userspace traffic to just what is
4417 * needed for each syscall.
4418 */
4419 if (trace.raw_augmented_syscalls)
4420 trace.raw_augmented_syscalls_args_size = (6 + 1) * sizeof(long) + sc->id.offset;
4421 perf_evsel__init_augmented_syscall_tp_ret(evsel);
4422 evsel->handler = trace__sys_exit;
4423 }
4424 }
4425 }
4426
4427 if ((argc >= 1) && (strcmp(argv[0], "record") == 0))
4428 return trace__record(&trace, argc-1, &argv[1]);
4429
4430 /* summary_only implies summary option, but don't overwrite summary if set */
4431 if (trace.summary_only)
4432 trace.summary = trace.summary_only;
4433
4434 if (!trace.trace_syscalls && !trace.trace_pgfaults &&
4435 trace.evlist->core.nr_entries == 0 /* Was --events used? */) {
4436 trace.trace_syscalls = true;
4437 }
4438
4439 if (output_name != NULL) {
4440 err = trace__open_output(&trace, output_name);
4441 if (err < 0) {
4442 perror("failed to create output file");
4443 goto out;
4444 }
4445 }
4446
4447 err = evswitch__init(&trace.evswitch, trace.evlist, stderr);
4448 if (err)
4449 goto out_close;
4450
4451 err = target__validate(&trace.opts.target);
4452 if (err) {
4453 target__strerror(&trace.opts.target, err, bf, sizeof(bf));
4454 fprintf(trace.output, "%s", bf);
4455 goto out_close;
4456 }
4457
4458 err = target__parse_uid(&trace.opts.target);
4459 if (err) {
4460 target__strerror(&trace.opts.target, err, bf, sizeof(bf));
4461 fprintf(trace.output, "%s", bf);
4462 goto out_close;
4463 }
4464
4465 if (!argc && target__none(&trace.opts.target))
4466 trace.opts.target.system_wide = true;
4467
4468 if (input_name)
4469 err = trace__replay(&trace);
4470 else
4471 err = trace__run(&trace, argc, argv);
4472
4473out_close:
4474 if (output_name != NULL)
4475 fclose(trace.output);
4476out:
4477 return err;
4478}