| rjw | 1f88458 | 2022-01-06 17:20:42 +0800 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 | 
|  | 2 | #include <errno.h> | 
|  | 3 | #include <inttypes.h> | 
|  | 4 | #include <math.h> | 
|  | 5 | #include "stat.h" | 
|  | 6 | #include "evlist.h" | 
|  | 7 | #include "evsel.h" | 
|  | 8 | #include "thread_map.h" | 
|  | 9 |  | 
|  | 10 | void update_stats(struct stats *stats, u64 val) | 
|  | 11 | { | 
|  | 12 | double delta; | 
|  | 13 |  | 
|  | 14 | stats->n++; | 
|  | 15 | delta = val - stats->mean; | 
|  | 16 | stats->mean += delta / stats->n; | 
|  | 17 | stats->M2 += delta*(val - stats->mean); | 
|  | 18 |  | 
|  | 19 | if (val > stats->max) | 
|  | 20 | stats->max = val; | 
|  | 21 |  | 
|  | 22 | if (val < stats->min) | 
|  | 23 | stats->min = val; | 
|  | 24 | } | 
|  | 25 |  | 
|  | 26 | double avg_stats(struct stats *stats) | 
|  | 27 | { | 
|  | 28 | return stats->mean; | 
|  | 29 | } | 
|  | 30 |  | 
|  | 31 | /* | 
|  | 32 | * http://en.wikipedia.org/wiki/Algorithms_for_calculating_variance | 
|  | 33 | * | 
|  | 34 | *       (\Sum n_i^2) - ((\Sum n_i)^2)/n | 
|  | 35 | * s^2 = ------------------------------- | 
|  | 36 | *                  n - 1 | 
|  | 37 | * | 
|  | 38 | * http://en.wikipedia.org/wiki/Stddev | 
|  | 39 | * | 
|  | 40 | * The std dev of the mean is related to the std dev by: | 
|  | 41 | * | 
|  | 42 | *             s | 
|  | 43 | * s_mean = ------- | 
|  | 44 | *          sqrt(n) | 
|  | 45 | * | 
|  | 46 | */ | 
|  | 47 | double stddev_stats(struct stats *stats) | 
|  | 48 | { | 
|  | 49 | double variance, variance_mean; | 
|  | 50 |  | 
|  | 51 | if (stats->n < 2) | 
|  | 52 | return 0.0; | 
|  | 53 |  | 
|  | 54 | variance = stats->M2 / (stats->n - 1); | 
|  | 55 | variance_mean = variance / stats->n; | 
|  | 56 |  | 
|  | 57 | return sqrt(variance_mean); | 
|  | 58 | } | 
|  | 59 |  | 
|  | 60 | double rel_stddev_stats(double stddev, double avg) | 
|  | 61 | { | 
|  | 62 | double pct = 0.0; | 
|  | 63 |  | 
|  | 64 | if (avg) | 
|  | 65 | pct = 100.0 * stddev/avg; | 
|  | 66 |  | 
|  | 67 | return pct; | 
|  | 68 | } | 
|  | 69 |  | 
|  | 70 | bool __perf_evsel_stat__is(struct perf_evsel *evsel, | 
|  | 71 | enum perf_stat_evsel_id id) | 
|  | 72 | { | 
|  | 73 | struct perf_stat_evsel *ps = evsel->priv; | 
|  | 74 |  | 
|  | 75 | return ps->id == id; | 
|  | 76 | } | 
|  | 77 |  | 
|  | 78 | #define ID(id, name) [PERF_STAT_EVSEL_ID__##id] = #name | 
|  | 79 | static const char *id_str[PERF_STAT_EVSEL_ID__MAX] = { | 
|  | 80 | ID(NONE,		x), | 
|  | 81 | ID(CYCLES_IN_TX,	cpu/cycles-t/), | 
|  | 82 | ID(TRANSACTION_START,	cpu/tx-start/), | 
|  | 83 | ID(ELISION_START,	cpu/el-start/), | 
|  | 84 | ID(CYCLES_IN_TX_CP,	cpu/cycles-ct/), | 
|  | 85 | ID(TOPDOWN_TOTAL_SLOTS, topdown-total-slots), | 
|  | 86 | ID(TOPDOWN_SLOTS_ISSUED, topdown-slots-issued), | 
|  | 87 | ID(TOPDOWN_SLOTS_RETIRED, topdown-slots-retired), | 
|  | 88 | ID(TOPDOWN_FETCH_BUBBLES, topdown-fetch-bubbles), | 
|  | 89 | ID(TOPDOWN_RECOVERY_BUBBLES, topdown-recovery-bubbles), | 
|  | 90 | ID(SMI_NUM, msr/smi/), | 
|  | 91 | ID(APERF, msr/aperf/), | 
|  | 92 | }; | 
|  | 93 | #undef ID | 
|  | 94 |  | 
|  | 95 | void perf_stat_evsel_id_init(struct perf_evsel *evsel) | 
|  | 96 | { | 
|  | 97 | struct perf_stat_evsel *ps = evsel->priv; | 
|  | 98 | int i; | 
|  | 99 |  | 
|  | 100 | /* ps->id is 0 hence PERF_STAT_EVSEL_ID__NONE by default */ | 
|  | 101 |  | 
|  | 102 | for (i = 0; i < PERF_STAT_EVSEL_ID__MAX; i++) { | 
|  | 103 | if (!strcmp(perf_evsel__name(evsel), id_str[i])) { | 
|  | 104 | ps->id = i; | 
|  | 105 | break; | 
|  | 106 | } | 
|  | 107 | } | 
|  | 108 | } | 
|  | 109 |  | 
|  | 110 | static void perf_evsel__reset_stat_priv(struct perf_evsel *evsel) | 
|  | 111 | { | 
|  | 112 | int i; | 
|  | 113 | struct perf_stat_evsel *ps = evsel->priv; | 
|  | 114 |  | 
|  | 115 | for (i = 0; i < 3; i++) | 
|  | 116 | init_stats(&ps->res_stats[i]); | 
|  | 117 |  | 
|  | 118 | perf_stat_evsel_id_init(evsel); | 
|  | 119 | } | 
|  | 120 |  | 
|  | 121 | static int perf_evsel__alloc_stat_priv(struct perf_evsel *evsel) | 
|  | 122 | { | 
|  | 123 | evsel->priv = zalloc(sizeof(struct perf_stat_evsel)); | 
|  | 124 | if (evsel->priv == NULL) | 
|  | 125 | return -ENOMEM; | 
|  | 126 | perf_evsel__reset_stat_priv(evsel); | 
|  | 127 | return 0; | 
|  | 128 | } | 
|  | 129 |  | 
|  | 130 | static void perf_evsel__free_stat_priv(struct perf_evsel *evsel) | 
|  | 131 | { | 
|  | 132 | struct perf_stat_evsel *ps = evsel->priv; | 
|  | 133 |  | 
|  | 134 | if (ps) | 
|  | 135 | free(ps->group_data); | 
|  | 136 | zfree(&evsel->priv); | 
|  | 137 | } | 
|  | 138 |  | 
|  | 139 | static int perf_evsel__alloc_prev_raw_counts(struct perf_evsel *evsel, | 
|  | 140 | int ncpus, int nthreads) | 
|  | 141 | { | 
|  | 142 | struct perf_counts *counts; | 
|  | 143 |  | 
|  | 144 | counts = perf_counts__new(ncpus, nthreads); | 
|  | 145 | if (counts) | 
|  | 146 | evsel->prev_raw_counts = counts; | 
|  | 147 |  | 
|  | 148 | return counts ? 0 : -ENOMEM; | 
|  | 149 | } | 
|  | 150 |  | 
|  | 151 | static void perf_evsel__free_prev_raw_counts(struct perf_evsel *evsel) | 
|  | 152 | { | 
|  | 153 | perf_counts__delete(evsel->prev_raw_counts); | 
|  | 154 | evsel->prev_raw_counts = NULL; | 
|  | 155 | } | 
|  | 156 |  | 
|  | 157 | static void perf_evsel__reset_prev_raw_counts(struct perf_evsel *evsel) | 
|  | 158 | { | 
|  | 159 | if (evsel->prev_raw_counts) { | 
|  | 160 | evsel->prev_raw_counts->aggr.val = 0; | 
|  | 161 | evsel->prev_raw_counts->aggr.ena = 0; | 
|  | 162 | evsel->prev_raw_counts->aggr.run = 0; | 
|  | 163 | } | 
|  | 164 | } | 
|  | 165 |  | 
|  | 166 | static int perf_evsel__alloc_stats(struct perf_evsel *evsel, bool alloc_raw) | 
|  | 167 | { | 
|  | 168 | int ncpus = perf_evsel__nr_cpus(evsel); | 
|  | 169 | int nthreads = thread_map__nr(evsel->threads); | 
|  | 170 |  | 
|  | 171 | if (perf_evsel__alloc_stat_priv(evsel) < 0 || | 
|  | 172 | perf_evsel__alloc_counts(evsel, ncpus, nthreads) < 0 || | 
|  | 173 | (alloc_raw && perf_evsel__alloc_prev_raw_counts(evsel, ncpus, nthreads) < 0)) | 
|  | 174 | return -ENOMEM; | 
|  | 175 |  | 
|  | 176 | return 0; | 
|  | 177 | } | 
|  | 178 |  | 
|  | 179 | int perf_evlist__alloc_stats(struct perf_evlist *evlist, bool alloc_raw) | 
|  | 180 | { | 
|  | 181 | struct perf_evsel *evsel; | 
|  | 182 |  | 
|  | 183 | evlist__for_each_entry(evlist, evsel) { | 
|  | 184 | if (perf_evsel__alloc_stats(evsel, alloc_raw)) | 
|  | 185 | goto out_free; | 
|  | 186 | } | 
|  | 187 |  | 
|  | 188 | return 0; | 
|  | 189 |  | 
|  | 190 | out_free: | 
|  | 191 | perf_evlist__free_stats(evlist); | 
|  | 192 | return -1; | 
|  | 193 | } | 
|  | 194 |  | 
|  | 195 | void perf_evlist__free_stats(struct perf_evlist *evlist) | 
|  | 196 | { | 
|  | 197 | struct perf_evsel *evsel; | 
|  | 198 |  | 
|  | 199 | evlist__for_each_entry(evlist, evsel) { | 
|  | 200 | perf_evsel__free_stat_priv(evsel); | 
|  | 201 | perf_evsel__free_counts(evsel); | 
|  | 202 | perf_evsel__free_prev_raw_counts(evsel); | 
|  | 203 | } | 
|  | 204 | } | 
|  | 205 |  | 
|  | 206 | void perf_evlist__reset_stats(struct perf_evlist *evlist) | 
|  | 207 | { | 
|  | 208 | struct perf_evsel *evsel; | 
|  | 209 |  | 
|  | 210 | evlist__for_each_entry(evlist, evsel) { | 
|  | 211 | perf_evsel__reset_stat_priv(evsel); | 
|  | 212 | perf_evsel__reset_counts(evsel); | 
|  | 213 | } | 
|  | 214 | } | 
|  | 215 |  | 
|  | 216 | void perf_evlist__reset_prev_raw_counts(struct perf_evlist *evlist) | 
|  | 217 | { | 
|  | 218 | struct perf_evsel *evsel; | 
|  | 219 |  | 
|  | 220 | evlist__for_each_entry(evlist, evsel) | 
|  | 221 | perf_evsel__reset_prev_raw_counts(evsel); | 
|  | 222 | } | 
|  | 223 |  | 
|  | 224 | static void zero_per_pkg(struct perf_evsel *counter) | 
|  | 225 | { | 
|  | 226 | if (counter->per_pkg_mask) | 
|  | 227 | memset(counter->per_pkg_mask, 0, MAX_NR_CPUS); | 
|  | 228 | } | 
|  | 229 |  | 
|  | 230 | static int check_per_pkg(struct perf_evsel *counter, | 
|  | 231 | struct perf_counts_values *vals, int cpu, bool *skip) | 
|  | 232 | { | 
|  | 233 | unsigned long *mask = counter->per_pkg_mask; | 
|  | 234 | struct cpu_map *cpus = perf_evsel__cpus(counter); | 
|  | 235 | int s; | 
|  | 236 |  | 
|  | 237 | *skip = false; | 
|  | 238 |  | 
|  | 239 | if (!counter->per_pkg) | 
|  | 240 | return 0; | 
|  | 241 |  | 
|  | 242 | if (cpu_map__empty(cpus)) | 
|  | 243 | return 0; | 
|  | 244 |  | 
|  | 245 | if (!mask) { | 
|  | 246 | mask = zalloc(MAX_NR_CPUS); | 
|  | 247 | if (!mask) | 
|  | 248 | return -ENOMEM; | 
|  | 249 |  | 
|  | 250 | counter->per_pkg_mask = mask; | 
|  | 251 | } | 
|  | 252 |  | 
|  | 253 | /* | 
|  | 254 | * we do not consider an event that has not run as a good | 
|  | 255 | * instance to mark a package as used (skip=1). Otherwise | 
|  | 256 | * we may run into a situation where the first CPU in a package | 
|  | 257 | * is not running anything, yet the second is, and this function | 
|  | 258 | * would mark the package as used after the first CPU and would | 
|  | 259 | * not read the values from the second CPU. | 
|  | 260 | */ | 
|  | 261 | if (!(vals->run && vals->ena)) | 
|  | 262 | return 0; | 
|  | 263 |  | 
|  | 264 | s = cpu_map__get_socket(cpus, cpu, NULL); | 
|  | 265 | if (s < 0) | 
|  | 266 | return -1; | 
|  | 267 |  | 
|  | 268 | *skip = test_and_set_bit(s, mask) == 1; | 
|  | 269 | return 0; | 
|  | 270 | } | 
|  | 271 |  | 
|  | 272 | static int | 
|  | 273 | process_counter_values(struct perf_stat_config *config, struct perf_evsel *evsel, | 
|  | 274 | int cpu, int thread, | 
|  | 275 | struct perf_counts_values *count) | 
|  | 276 | { | 
|  | 277 | struct perf_counts_values *aggr = &evsel->counts->aggr; | 
|  | 278 | static struct perf_counts_values zero; | 
|  | 279 | bool skip = false; | 
|  | 280 |  | 
|  | 281 | if (check_per_pkg(evsel, count, cpu, &skip)) { | 
|  | 282 | pr_err("failed to read per-pkg counter\n"); | 
|  | 283 | return -1; | 
|  | 284 | } | 
|  | 285 |  | 
|  | 286 | if (skip) | 
|  | 287 | count = &zero; | 
|  | 288 |  | 
|  | 289 | switch (config->aggr_mode) { | 
|  | 290 | case AGGR_THREAD: | 
|  | 291 | case AGGR_CORE: | 
|  | 292 | case AGGR_SOCKET: | 
|  | 293 | case AGGR_NONE: | 
|  | 294 | if (!evsel->snapshot) | 
|  | 295 | perf_evsel__compute_deltas(evsel, cpu, thread, count); | 
|  | 296 | perf_counts_values__scale(count, config->scale, NULL); | 
|  | 297 | if (config->aggr_mode == AGGR_NONE) | 
|  | 298 | perf_stat__update_shadow_stats(evsel, count->values, cpu); | 
|  | 299 | break; | 
|  | 300 | case AGGR_GLOBAL: | 
|  | 301 | aggr->val += count->val; | 
|  | 302 | if (config->scale) { | 
|  | 303 | aggr->ena += count->ena; | 
|  | 304 | aggr->run += count->run; | 
|  | 305 | } | 
|  | 306 | case AGGR_UNSET: | 
|  | 307 | default: | 
|  | 308 | break; | 
|  | 309 | } | 
|  | 310 |  | 
|  | 311 | return 0; | 
|  | 312 | } | 
|  | 313 |  | 
|  | 314 | static int process_counter_maps(struct perf_stat_config *config, | 
|  | 315 | struct perf_evsel *counter) | 
|  | 316 | { | 
|  | 317 | int nthreads = thread_map__nr(counter->threads); | 
|  | 318 | int ncpus = perf_evsel__nr_cpus(counter); | 
|  | 319 | int cpu, thread; | 
|  | 320 |  | 
|  | 321 | if (counter->system_wide) | 
|  | 322 | nthreads = 1; | 
|  | 323 |  | 
|  | 324 | for (thread = 0; thread < nthreads; thread++) { | 
|  | 325 | for (cpu = 0; cpu < ncpus; cpu++) { | 
|  | 326 | if (process_counter_values(config, counter, cpu, thread, | 
|  | 327 | perf_counts(counter->counts, cpu, thread))) | 
|  | 328 | return -1; | 
|  | 329 | } | 
|  | 330 | } | 
|  | 331 |  | 
|  | 332 | return 0; | 
|  | 333 | } | 
|  | 334 |  | 
|  | 335 | int perf_stat_process_counter(struct perf_stat_config *config, | 
|  | 336 | struct perf_evsel *counter) | 
|  | 337 | { | 
|  | 338 | struct perf_counts_values *aggr = &counter->counts->aggr; | 
|  | 339 | struct perf_stat_evsel *ps = counter->priv; | 
|  | 340 | u64 *count = counter->counts->aggr.values; | 
|  | 341 | u64 val; | 
|  | 342 | int i, ret; | 
|  | 343 |  | 
|  | 344 | aggr->val = aggr->ena = aggr->run = 0; | 
|  | 345 |  | 
|  | 346 | /* | 
|  | 347 | * We calculate counter's data every interval, | 
|  | 348 | * and the display code shows ps->res_stats | 
|  | 349 | * avg value. We need to zero the stats for | 
|  | 350 | * interval mode, otherwise overall avg running | 
|  | 351 | * averages will be shown for each interval. | 
|  | 352 | */ | 
|  | 353 | if (config->interval) { | 
|  | 354 | for (i = 0; i < 3; i++) | 
|  | 355 | init_stats(&ps->res_stats[i]); | 
|  | 356 | } | 
|  | 357 |  | 
|  | 358 | if (counter->per_pkg) | 
|  | 359 | zero_per_pkg(counter); | 
|  | 360 |  | 
|  | 361 | ret = process_counter_maps(config, counter); | 
|  | 362 | if (ret) | 
|  | 363 | return ret; | 
|  | 364 |  | 
|  | 365 | if (config->aggr_mode != AGGR_GLOBAL) | 
|  | 366 | return 0; | 
|  | 367 |  | 
|  | 368 | if (!counter->snapshot) | 
|  | 369 | perf_evsel__compute_deltas(counter, -1, -1, aggr); | 
|  | 370 | perf_counts_values__scale(aggr, config->scale, &counter->counts->scaled); | 
|  | 371 |  | 
|  | 372 | for (i = 0; i < 3; i++) | 
|  | 373 | update_stats(&ps->res_stats[i], count[i]); | 
|  | 374 |  | 
|  | 375 | if (verbose > 0) { | 
|  | 376 | fprintf(config->output, "%s: %" PRIu64 " %" PRIu64 " %" PRIu64 "\n", | 
|  | 377 | perf_evsel__name(counter), count[0], count[1], count[2]); | 
|  | 378 | } | 
|  | 379 |  | 
|  | 380 | /* | 
|  | 381 | * Save the full runtime - to allow normalization during printout: | 
|  | 382 | */ | 
|  | 383 | val = counter->scale * *count; | 
|  | 384 | perf_stat__update_shadow_stats(counter, &val, 0); | 
|  | 385 |  | 
|  | 386 | return 0; | 
|  | 387 | } | 
|  | 388 |  | 
|  | 389 | int perf_event__process_stat_event(struct perf_tool *tool __maybe_unused, | 
|  | 390 | union perf_event *event, | 
|  | 391 | struct perf_session *session) | 
|  | 392 | { | 
|  | 393 | struct perf_counts_values count; | 
|  | 394 | struct stat_event *st = &event->stat; | 
|  | 395 | struct perf_evsel *counter; | 
|  | 396 |  | 
|  | 397 | count.val = st->val; | 
|  | 398 | count.ena = st->ena; | 
|  | 399 | count.run = st->run; | 
|  | 400 |  | 
|  | 401 | counter = perf_evlist__id2evsel(session->evlist, st->id); | 
|  | 402 | if (!counter) { | 
|  | 403 | pr_err("Failed to resolve counter for stat event.\n"); | 
|  | 404 | return -EINVAL; | 
|  | 405 | } | 
|  | 406 |  | 
|  | 407 | *perf_counts(counter->counts, st->cpu, st->thread) = count; | 
|  | 408 | counter->supported = true; | 
|  | 409 | return 0; | 
|  | 410 | } | 
|  | 411 |  | 
|  | 412 | size_t perf_event__fprintf_stat(union perf_event *event, FILE *fp) | 
|  | 413 | { | 
|  | 414 | struct stat_event *st = (struct stat_event *) event; | 
|  | 415 | size_t ret; | 
|  | 416 |  | 
|  | 417 | ret  = fprintf(fp, "\n... id %" PRIu64 ", cpu %d, thread %d\n", | 
|  | 418 | st->id, st->cpu, st->thread); | 
|  | 419 | ret += fprintf(fp, "... value %" PRIu64 ", enabled %" PRIu64 ", running %" PRIu64 "\n", | 
|  | 420 | st->val, st->ena, st->run); | 
|  | 421 |  | 
|  | 422 | return ret; | 
|  | 423 | } | 
|  | 424 |  | 
|  | 425 | size_t perf_event__fprintf_stat_round(union perf_event *event, FILE *fp) | 
|  | 426 | { | 
|  | 427 | struct stat_round_event *rd = (struct stat_round_event *)event; | 
|  | 428 | size_t ret; | 
|  | 429 |  | 
|  | 430 | ret = fprintf(fp, "\n... time %" PRIu64 ", type %s\n", rd->time, | 
|  | 431 | rd->type == PERF_STAT_ROUND_TYPE__FINAL ? "FINAL" : "INTERVAL"); | 
|  | 432 |  | 
|  | 433 | return ret; | 
|  | 434 | } | 
|  | 435 |  | 
|  | 436 | size_t perf_event__fprintf_stat_config(union perf_event *event, FILE *fp) | 
|  | 437 | { | 
|  | 438 | struct perf_stat_config sc; | 
|  | 439 | size_t ret; | 
|  | 440 |  | 
|  | 441 | perf_event__read_stat_config(&sc, &event->stat_config); | 
|  | 442 |  | 
|  | 443 | ret  = fprintf(fp, "\n"); | 
|  | 444 | ret += fprintf(fp, "... aggr_mode %d\n", sc.aggr_mode); | 
|  | 445 | ret += fprintf(fp, "... scale     %d\n", sc.scale); | 
|  | 446 | ret += fprintf(fp, "... interval  %u\n", sc.interval); | 
|  | 447 |  | 
|  | 448 | return ret; | 
|  | 449 | } |