| xj | b04a402 | 2021-11-25 15:01:52 +0800 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 | 
|  | 2 | #include <errno.h> | 
|  | 3 | #include <inttypes.h> | 
|  | 4 | /* For the CPU_* macros */ | 
|  | 5 | #include <pthread.h> | 
|  | 6 |  | 
|  | 7 | #include <sys/types.h> | 
|  | 8 | #include <sys/stat.h> | 
|  | 9 | #include <fcntl.h> | 
|  | 10 | #include <api/fs/fs.h> | 
|  | 11 | #include <linux/err.h> | 
|  | 12 | #include <api/fs/tracing_path.h> | 
|  | 13 | #include "evsel.h" | 
|  | 14 | #include "tests.h" | 
|  | 15 | #include "thread_map.h" | 
|  | 16 | #include "cpumap.h" | 
|  | 17 | #include "debug.h" | 
|  | 18 | #include "stat.h" | 
|  | 19 |  | 
|  | 20 | int test__openat_syscall_event_on_all_cpus(struct test *test __maybe_unused, int subtest __maybe_unused) | 
|  | 21 | { | 
|  | 22 | int err = -1, fd, cpu; | 
|  | 23 | struct cpu_map *cpus; | 
|  | 24 | struct perf_evsel *evsel; | 
|  | 25 | unsigned int nr_openat_calls = 111, i; | 
|  | 26 | cpu_set_t cpu_set; | 
|  | 27 | struct thread_map *threads = thread_map__new(-1, getpid(), UINT_MAX); | 
|  | 28 | char sbuf[STRERR_BUFSIZE]; | 
|  | 29 | char errbuf[BUFSIZ]; | 
|  | 30 |  | 
|  | 31 | if (threads == NULL) { | 
|  | 32 | pr_debug("thread_map__new\n"); | 
|  | 33 | return -1; | 
|  | 34 | } | 
|  | 35 |  | 
|  | 36 | cpus = cpu_map__new(NULL); | 
|  | 37 | if (cpus == NULL) { | 
|  | 38 | pr_debug("cpu_map__new\n"); | 
|  | 39 | goto out_thread_map_delete; | 
|  | 40 | } | 
|  | 41 |  | 
|  | 42 | CPU_ZERO(&cpu_set); | 
|  | 43 |  | 
|  | 44 | evsel = perf_evsel__newtp("syscalls", "sys_enter_openat"); | 
|  | 45 | if (IS_ERR(evsel)) { | 
|  | 46 | tracing_path__strerror_open_tp(errno, errbuf, sizeof(errbuf), "syscalls", "sys_enter_openat"); | 
|  | 47 | pr_debug("%s\n", errbuf); | 
|  | 48 | goto out_cpu_map_delete; | 
|  | 49 | } | 
|  | 50 |  | 
|  | 51 | if (perf_evsel__open(evsel, cpus, threads) < 0) { | 
|  | 52 | pr_debug("failed to open counter: %s, " | 
|  | 53 | "tweak /proc/sys/kernel/perf_event_paranoid?\n", | 
|  | 54 | str_error_r(errno, sbuf, sizeof(sbuf))); | 
|  | 55 | goto out_evsel_delete; | 
|  | 56 | } | 
|  | 57 |  | 
|  | 58 | for (cpu = 0; cpu < cpus->nr; ++cpu) { | 
|  | 59 | unsigned int ncalls = nr_openat_calls + cpu; | 
|  | 60 | /* | 
|  | 61 | * XXX eventually lift this restriction in a way that | 
|  | 62 | * keeps perf building on older glibc installations | 
|  | 63 | * without CPU_ALLOC. 1024 cpus in 2010 still seems | 
|  | 64 | * a reasonable upper limit tho :-) | 
|  | 65 | */ | 
|  | 66 | if (cpus->map[cpu] >= CPU_SETSIZE) { | 
|  | 67 | pr_debug("Ignoring CPU %d\n", cpus->map[cpu]); | 
|  | 68 | continue; | 
|  | 69 | } | 
|  | 70 |  | 
|  | 71 | CPU_SET(cpus->map[cpu], &cpu_set); | 
|  | 72 | if (sched_setaffinity(0, sizeof(cpu_set), &cpu_set) < 0) { | 
|  | 73 | pr_debug("sched_setaffinity() failed on CPU %d: %s ", | 
|  | 74 | cpus->map[cpu], | 
|  | 75 | str_error_r(errno, sbuf, sizeof(sbuf))); | 
|  | 76 | goto out_close_fd; | 
|  | 77 | } | 
|  | 78 | for (i = 0; i < ncalls; ++i) { | 
|  | 79 | fd = openat(0, "/etc/passwd", O_RDONLY); | 
|  | 80 | close(fd); | 
|  | 81 | } | 
|  | 82 | CPU_CLR(cpus->map[cpu], &cpu_set); | 
|  | 83 | } | 
|  | 84 |  | 
|  | 85 | /* | 
|  | 86 | * Here we need to explicitly preallocate the counts, as if | 
|  | 87 | * we use the auto allocation it will allocate just for 1 cpu, | 
|  | 88 | * as we start by cpu 0. | 
|  | 89 | */ | 
|  | 90 | if (perf_evsel__alloc_counts(evsel, cpus->nr, 1) < 0) { | 
|  | 91 | pr_debug("perf_evsel__alloc_counts(ncpus=%d)\n", cpus->nr); | 
|  | 92 | goto out_close_fd; | 
|  | 93 | } | 
|  | 94 |  | 
|  | 95 | err = 0; | 
|  | 96 |  | 
|  | 97 | for (cpu = 0; cpu < cpus->nr; ++cpu) { | 
|  | 98 | unsigned int expected; | 
|  | 99 |  | 
|  | 100 | if (cpus->map[cpu] >= CPU_SETSIZE) | 
|  | 101 | continue; | 
|  | 102 |  | 
|  | 103 | if (perf_evsel__read_on_cpu(evsel, cpu, 0) < 0) { | 
|  | 104 | pr_debug("perf_evsel__read_on_cpu\n"); | 
|  | 105 | err = -1; | 
|  | 106 | break; | 
|  | 107 | } | 
|  | 108 |  | 
|  | 109 | expected = nr_openat_calls + cpu; | 
|  | 110 | if (perf_counts(evsel->counts, cpu, 0)->val != expected) { | 
|  | 111 | pr_debug("perf_evsel__read_on_cpu: expected to intercept %d calls on cpu %d, got %" PRIu64 "\n", | 
|  | 112 | expected, cpus->map[cpu], perf_counts(evsel->counts, cpu, 0)->val); | 
|  | 113 | err = -1; | 
|  | 114 | } | 
|  | 115 | } | 
|  | 116 |  | 
|  | 117 | perf_evsel__free_counts(evsel); | 
|  | 118 | out_close_fd: | 
|  | 119 | perf_evsel__close_fd(evsel); | 
|  | 120 | out_evsel_delete: | 
|  | 121 | perf_evsel__delete(evsel); | 
|  | 122 | out_cpu_map_delete: | 
|  | 123 | cpu_map__put(cpus); | 
|  | 124 | out_thread_map_delete: | 
|  | 125 | thread_map__put(threads); | 
|  | 126 | return err; | 
|  | 127 | } |