rjw | 1f88458 | 2022-01-06 17:20:42 +0800 | [diff] [blame^] | 1 | /*
|
| 2 | * Copyright (C) 2018 MediaTek Inc.
|
| 3 | *
|
| 4 | * This program is free software: you can redistribute it and/or modify
|
| 5 | * it under the terms of the GNU General Public License version 2 as
|
| 6 | * published by the Free Software Foundation.
|
| 7 | *
|
| 8 | * This program is distributed in the hope that it will be useful,
|
| 9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
| 10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
| 11 | * GNU General Public License for more details.
|
| 12 | */
|
| 13 |
|
| 14 | #include <linux/slab.h>
|
| 15 | #include <linux/version.h>
|
| 16 |
|
| 17 | #include "interface.h"
|
| 18 | #include "trace.h"
|
| 19 | #include "cpu_pmu_v2.h"
|
| 20 | #include "v8_pmu_hw_v2.h"
|
| 21 | #include "met_drv.h"
|
| 22 |
|
| 23 |
|
| 24 | #define MET_USER_EVENT_SUPPORT
|
| 25 |
|
| 26 | #include <linux/kthread.h>
|
| 27 | #include <linux/kernel.h>
|
| 28 | #include <linux/sched.h>
|
| 29 | #include <linux/wait.h>
|
| 30 | #include <linux/signal.h>
|
| 31 | #include <linux/workqueue.h>
|
| 32 | #include <linux/perf_event.h>
|
| 33 | #include "met_kernel_symbol.h"
|
| 34 |
|
| 35 |
|
| 36 | /*******************************************************************************
|
| 37 | * Type Define
|
| 38 | *******************************************************************************/
|
| 39 | #define CNTMAX NR_CPUS
|
| 40 |
|
| 41 |
|
| 42 | /*******************************************************************************
|
| 43 | * Fuction Pototypes
|
| 44 | *******************************************************************************/
|
| 45 | static inline struct met_pmu_v2 *get_met_pmu_by_cpu_id(const unsigned int cpu);
|
| 46 | static inline void set_met_pmu_by_cpu_id(const unsigned int cpu, struct met_pmu_v2 *met_pmu);
|
| 47 |
|
| 48 | static int reset_driver_stat(void);
|
| 49 | static struct met_pmu_v2 *lookup_pmu(struct kobject *kobj);
|
| 50 |
|
| 51 | static ssize_t mode_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf);
|
| 52 |
|
| 53 | static int cpupmu_create_subfs(struct kobject *parent);
|
| 54 | static void cpupmu_delete_subfs(void);
|
| 55 | static void _cpupmu_start(void *info);
|
| 56 | static void cpupmu_start(void);
|
| 57 | static void _cpupmu_stop(void *info);
|
| 58 | static void cpupmu_stop(void);
|
| 59 | static void cpupmu_polling(unsigned long long stamp, int cpu);
|
| 60 | extern void cpupmu_polling_v2(unsigned long long stamp, int cpu);
|
| 61 | static int cpupmu_print_help(char *buf, int len);
|
| 62 | static int cpupmu_print_header(char *buf, int len);
|
| 63 | static int cpupmu_process_argument(const char *arg, int len);
|
| 64 |
|
| 65 |
|
| 66 | /*******************************************************************************
|
| 67 | * Globe Variables
|
| 68 | *******************************************************************************/
|
| 69 | static int module_status;
|
| 70 |
|
| 71 | struct cpu_pmu_hw_v2 *met_pmu_hw_v2;
|
| 72 |
|
| 73 | static unsigned int gPMU_CNT[2*MXNR_CPU_V2];
|
| 74 | static unsigned int gMAX_PMU_HW_CNT;
|
| 75 |
|
| 76 | static struct kobject *gKOBJ_CPU;
|
| 77 | static struct met_pmu_v2 *gMET_PMU[2*MXNR_CPU_V2];
|
| 78 |
|
| 79 | static struct kobj_attribute mode_attr = __ATTR(mode, 0444, mode_show, NULL);
|
| 80 |
|
| 81 | static const char cache_line_header[] =
|
| 82 | "met-info [000] 0.0: met_cpu_cache_line_size: %d\n";
|
| 83 | static const char header[] =
|
| 84 | "met-info [000] 0.0: met_cpu_header_v2: ";
|
| 85 | static const char help[] =
|
| 86 | " --cpu-pmu=CORE_ID:EVENT select CPU-PMU events. in %s,\n"
|
| 87 | " you can enable at most \"%d general purpose events\"\n"
|
| 88 | " plus \"one special 0xff (CPU_CYCLE) event\"\n";
|
| 89 |
|
| 90 | static DEFINE_PER_CPU(int[CNTMAX], perfCurr);
|
| 91 | static DEFINE_PER_CPU(int[CNTMAX], perfPrev);
|
| 92 | static DEFINE_PER_CPU(struct perf_event * [CNTMAX], pevent);
|
| 93 | static DEFINE_PER_CPU(struct perf_event_attr [CNTMAX], pevent_attr);
|
| 94 | static DEFINE_PER_CPU(int, perfSet);
|
| 95 | static DEFINE_PER_CPU(unsigned int, perf_task_init_done);
|
| 96 | static DEFINE_PER_CPU(unsigned int, perf_cpuid);
|
| 97 |
|
| 98 | static DEFINE_PER_CPU(struct delayed_work, cpu_pmu_dwork);
|
| 99 | static DEFINE_PER_CPU(struct delayed_work *, perf_delayed_work_setup);
|
| 100 |
|
| 101 | struct metdevice met_cpupmu_v2 = {
|
| 102 | .name = "cpu-pmu",
|
| 103 | .type = MET_TYPE_PMU,
|
| 104 | .cpu_related = 1,
|
| 105 | .create_subfs = cpupmu_create_subfs,
|
| 106 | .delete_subfs = cpupmu_delete_subfs,
|
| 107 | .start = cpupmu_start,
|
| 108 | .stop = cpupmu_stop,
|
| 109 | .polling_interval = 1,
|
| 110 | .timed_polling = cpupmu_polling,
|
| 111 | .print_help = cpupmu_print_help,
|
| 112 | .print_header = cpupmu_print_header,
|
| 113 | .process_argument = cpupmu_process_argument
|
| 114 | };
|
| 115 |
|
| 116 |
|
| 117 | /*******************************************************************************
|
| 118 | * Iplement Start
|
| 119 | *******************************************************************************/
|
| 120 | static inline struct met_pmu_v2 *get_met_pmu_by_cpu_id(const unsigned int cpu)
|
| 121 | {
|
| 122 | if (cpu < MXNR_CPU_V2)
|
| 123 | return gMET_PMU[cpu];
|
| 124 | else
|
| 125 | return NULL;
|
| 126 | }
|
| 127 |
|
| 128 |
|
| 129 | static inline void set_met_pmu_by_cpu_id(const unsigned int cpu, struct met_pmu_v2 *met_pmu)
|
| 130 | {
|
| 131 | if (cpu < MXNR_CPU_V2)
|
| 132 | gMET_PMU[cpu] = met_pmu;
|
| 133 | }
|
| 134 |
|
| 135 |
|
| 136 | static int reset_driver_stat()
|
| 137 | {
|
| 138 | int i;
|
| 139 | int cpu;
|
| 140 | struct met_pmu_v2 *met_pmu;
|
| 141 |
|
| 142 | met_cpupmu_v2.mode = 0;
|
| 143 | for_each_possible_cpu(cpu) {
|
| 144 | met_pmu = get_met_pmu_by_cpu_id(cpu);
|
| 145 | for (i = 0; i < gMAX_PMU_HW_CNT; i++) {
|
| 146 | met_pmu[i].mode = MODE_DISABLED;
|
| 147 | met_pmu[i].event = 0;
|
| 148 | }
|
| 149 | gPMU_CNT[cpu] = 0;
|
| 150 | }
|
| 151 | module_status = 0;
|
| 152 | return 0;
|
| 153 | }
|
| 154 |
|
| 155 |
|
| 156 | static struct met_pmu_v2 *lookup_pmu(struct kobject *kobj)
|
| 157 | {
|
| 158 | int i;
|
| 159 | int cpu;
|
| 160 | struct met_pmu_v2 *met_pmu;
|
| 161 |
|
| 162 | for_each_possible_cpu(cpu) {
|
| 163 | met_pmu = get_met_pmu_by_cpu_id(cpu);
|
| 164 | for (i = 0; i < gMAX_PMU_HW_CNT; i++) {
|
| 165 | if (met_pmu[i].kobj_cpu_pmu == kobj)
|
| 166 | return &met_pmu[i];
|
| 167 | }
|
| 168 | }
|
| 169 | return NULL;
|
| 170 | }
|
| 171 |
|
| 172 |
|
| 173 | static ssize_t mode_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
|
| 174 | {
|
| 175 | struct met_pmu_v2 *p = lookup_pmu(kobj);
|
| 176 |
|
| 177 | if (p != NULL) {
|
| 178 | switch (p->mode) {
|
| 179 | case 0:
|
| 180 | return snprintf(buf, PAGE_SIZE, "%hhd (disabled)\n", p->mode);
|
| 181 | case 1:
|
| 182 | return snprintf(buf, PAGE_SIZE, "%hhd (interrupt)\n", p->mode);
|
| 183 | case 2:
|
| 184 | return snprintf(buf, PAGE_SIZE, "%hhd (polling)\n", p->mode);
|
| 185 | }
|
| 186 | }
|
| 187 | return -EINVAL;
|
| 188 | }
|
| 189 |
|
| 190 |
|
| 191 | static int cpupmu_create_subfs(struct kobject *parent)
|
| 192 | {
|
| 193 | int ret = 0;
|
| 194 | unsigned int i;
|
| 195 | unsigned int cpu;
|
| 196 | char buf[16];
|
| 197 | struct met_pmu_v2 *met_pmu;
|
| 198 |
|
| 199 | met_pmu_hw_v2 = cpu_pmu_hw_init_v2();
|
| 200 | if (met_pmu_hw_v2 == NULL) {
|
| 201 | PR_BOOTMSG("Failed to init CPU PMU HW!!\n");
|
| 202 | return -ENODEV;
|
| 203 | }
|
| 204 | gMAX_PMU_HW_CNT = met_pmu_hw_v2->max_hw_count;
|
| 205 |
|
| 206 | gKOBJ_CPU = parent;
|
| 207 | for_each_possible_cpu(cpu) {
|
| 208 | met_pmu = kmalloc_array(gMAX_PMU_HW_CNT, sizeof(struct met_pmu_v2), GFP_KERNEL);
|
| 209 | if (met_pmu != NULL) {
|
| 210 | memset(met_pmu, 0x0, gMAX_PMU_HW_CNT * sizeof(struct met_pmu_v2));
|
| 211 | met_pmu_hw_v2->met_pmu[cpu] = met_pmu;
|
| 212 | set_met_pmu_by_cpu_id(cpu, met_pmu);
|
| 213 | } else
|
| 214 | ret = -ENOMEM;
|
| 215 |
|
| 216 | for (i = 0; i < gMAX_PMU_HW_CNT; i++) {
|
| 217 | snprintf(buf, sizeof(buf), "CPU-%d-%d", cpu, i);
|
| 218 | met_pmu[i].kobj_cpu_pmu = kobject_create_and_add(buf, gKOBJ_CPU);
|
| 219 | if (met_pmu[i].kobj_cpu_pmu) {
|
| 220 | ret = sysfs_create_file(met_pmu[i].kobj_cpu_pmu, &mode_attr.attr);
|
| 221 | if (ret != 0) {
|
| 222 | PR_BOOTMSG("Failed to create mode in sysfs\n");
|
| 223 | goto out;
|
| 224 | }
|
| 225 | }
|
| 226 | }
|
| 227 | }
|
| 228 | out:
|
| 229 | if (ret != 0) {
|
| 230 | for_each_possible_cpu(cpu) {
|
| 231 | met_pmu = get_met_pmu_by_cpu_id(cpu);
|
| 232 | if (met_pmu != NULL) {
|
| 233 | kfree(met_pmu);
|
| 234 | set_met_pmu_by_cpu_id(cpu, NULL);
|
| 235 | }
|
| 236 | }
|
| 237 | }
|
| 238 | return ret;
|
| 239 | }
|
| 240 |
|
| 241 |
|
| 242 | static void cpupmu_delete_subfs(void)
|
| 243 | {
|
| 244 | unsigned int i;
|
| 245 | unsigned int cpu;
|
| 246 | struct met_pmu_v2 *met_pmu;
|
| 247 |
|
| 248 | for_each_possible_cpu(cpu) {
|
| 249 | met_pmu = get_met_pmu_by_cpu_id(cpu);
|
| 250 | if (met_pmu != NULL) {
|
| 251 | for (i = 0; i < gMAX_PMU_HW_CNT; i++) {
|
| 252 | sysfs_remove_file(met_pmu[i].kobj_cpu_pmu, &mode_attr.attr);
|
| 253 | kobject_del(met_pmu[i].kobj_cpu_pmu);
|
| 254 | kobject_put(met_pmu[i].kobj_cpu_pmu);
|
| 255 | met_pmu[i].kobj_cpu_pmu = NULL;
|
| 256 | }
|
| 257 | kfree(met_pmu);
|
| 258 | }
|
| 259 | set_met_pmu_by_cpu_id(cpu, NULL);
|
| 260 | }
|
| 261 |
|
| 262 | if (gKOBJ_CPU != NULL) {
|
| 263 | gKOBJ_CPU = NULL;
|
| 264 | }
|
| 265 |
|
| 266 | met_pmu_hw_v2 = NULL;
|
| 267 | }
|
| 268 |
|
| 269 |
|
| 270 | noinline void mp_cpu_v2(unsigned char cnt, unsigned int *value)
|
| 271 | {
|
| 272 | if (cnt < MXNR_CPU_V2)
|
| 273 | MET_GENERAL_PRINT(MET_TRACE, cnt, value);
|
| 274 | }
|
| 275 |
|
| 276 |
|
| 277 | static void dummy_handler(struct perf_event *event, struct perf_sample_data *data,
|
| 278 | struct pt_regs *regs)
|
| 279 | {
|
| 280 | /* Required as perf_event_create_kernel_counter() requires an overflow handler, even though all we do is poll */
|
| 281 | }
|
| 282 |
|
| 283 |
|
| 284 | void perf_cpupmu_polling_v2(unsigned long long stamp, int cpu)
|
| 285 | {
|
| 286 | int i, count, delta;
|
| 287 | struct perf_event *ev;
|
| 288 | unsigned int pmu_value[MXNR_CPU_V2];
|
| 289 | struct met_pmu_v2 *met_pmu;
|
| 290 |
|
| 291 | if (per_cpu(perfSet, cpu) == 0)
|
| 292 | return;
|
| 293 |
|
| 294 | memset(pmu_value, 0, sizeof(pmu_value));
|
| 295 | count = 0;
|
| 296 | met_pmu = get_met_pmu_by_cpu_id(cpu);
|
| 297 | for (i = 0; i < gMAX_PMU_HW_CNT; i++) {
|
| 298 | if (met_pmu[i].mode == 0)
|
| 299 | continue;
|
| 300 |
|
| 301 | ev = per_cpu(pevent, cpu)[i];
|
| 302 | if ((ev != NULL) && (ev->state == PERF_EVENT_STATE_ACTIVE)) {
|
| 303 | if (per_cpu(perfPrev, cpu)[i] == 0) {
|
| 304 | per_cpu(perfPrev, cpu)[i] = met_perf_event_read_local(ev);
|
| 305 | continue;
|
| 306 | }
|
| 307 | per_cpu(perfCurr, cpu)[i] = met_perf_event_read_local(ev);
|
| 308 | delta = per_cpu(perfCurr, cpu)[i] - per_cpu(perfPrev, cpu)[i];
|
| 309 | per_cpu(perfPrev, cpu)[i] = per_cpu(perfCurr, cpu)[i];
|
| 310 | if (delta < 0)
|
| 311 | delta *= -1;
|
| 312 | pmu_value[i] = delta;
|
| 313 | count++;
|
| 314 | }
|
| 315 | }
|
| 316 |
|
| 317 | if (count == gPMU_CNT[cpu])
|
| 318 | mp_cpu_v2(count, pmu_value);
|
| 319 | }
|
| 320 |
|
| 321 |
|
| 322 | static int perf_thread_set_perf_events_v2(unsigned int cpu)
|
| 323 | {
|
| 324 | int i, size;
|
| 325 | struct perf_event *ev;
|
| 326 | struct met_pmu_v2 *met_pmu;
|
| 327 |
|
| 328 | size = sizeof(struct perf_event_attr);
|
| 329 | if (per_cpu(perfSet, cpu) == 0) {
|
| 330 | met_pmu = get_met_pmu_by_cpu_id(cpu);
|
| 331 | for (i = 0; i < gMAX_PMU_HW_CNT; i++) {
|
| 332 | per_cpu(pevent, cpu)[i] = NULL;
|
| 333 | if (!met_pmu[i].mode) {/* Skip disabled counters */
|
| 334 | continue;
|
| 335 | }
|
| 336 | per_cpu(perfPrev, cpu)[i] = 0;
|
| 337 | per_cpu(perfCurr, cpu)[i] = 0;
|
| 338 | memset(&per_cpu(pevent_attr, cpu)[i], 0, size);
|
| 339 | per_cpu(pevent_attr, cpu)[i].config = met_pmu[i].event;
|
| 340 | per_cpu(pevent_attr, cpu)[i].type = PERF_TYPE_RAW;
|
| 341 | per_cpu(pevent_attr, cpu)[i].size = size;
|
| 342 | per_cpu(pevent_attr, cpu)[i].sample_period = 0;
|
| 343 | per_cpu(pevent_attr, cpu)[i].pinned = 1;
|
| 344 | if (met_pmu[i].event == 0xff) {
|
| 345 | per_cpu(pevent_attr, cpu)[i].type = PERF_TYPE_HARDWARE;
|
| 346 | per_cpu(pevent_attr, cpu)[i].config = PERF_COUNT_HW_CPU_CYCLES;
|
| 347 | }
|
| 348 |
|
| 349 | per_cpu(pevent, cpu)[i] =
|
| 350 | perf_event_create_kernel_counter(&per_cpu(pevent_attr, cpu)[i], cpu, NULL,
|
| 351 | dummy_handler, NULL);
|
| 352 | if (IS_ERR(per_cpu(pevent, cpu)[i])) {
|
| 353 | per_cpu(pevent, cpu)[i] = NULL;
|
| 354 | PR_BOOTMSG("CPU=%d, %s:%d\n", cpu, __FUNCTION__, __LINE__);
|
| 355 | continue;
|
| 356 | }
|
| 357 |
|
| 358 | if (per_cpu(pevent, cpu)[i]->state != PERF_EVENT_STATE_ACTIVE) {
|
| 359 | perf_event_release_kernel(per_cpu(pevent, cpu)[i]);
|
| 360 | per_cpu(pevent, cpu)[i] = NULL;
|
| 361 | PR_BOOTMSG("CPU=%d, %s:%d\n", cpu, __FUNCTION__, __LINE__);
|
| 362 | continue;
|
| 363 | }
|
| 364 |
|
| 365 | ev = per_cpu(pevent, cpu)[i];
|
| 366 | if (ev != NULL) {
|
| 367 | perf_event_enable(ev);
|
| 368 | }
|
| 369 | } /* for all PMU counter */
|
| 370 | per_cpu(perfSet, cpu) = 1;
|
| 371 | } /* for perfSet */
|
| 372 | return 0;
|
| 373 | }
|
| 374 |
|
| 375 |
|
| 376 | static void perf_thread_setup_v2(struct work_struct *work)
|
| 377 | {
|
| 378 | unsigned int cpu;
|
| 379 | struct delayed_work *dwork = to_delayed_work(work);
|
| 380 |
|
| 381 | cpu = dwork->cpu;
|
| 382 | if (per_cpu(perf_task_init_done, cpu) == 0) {
|
| 383 | per_cpu(perf_task_init_done, cpu) = 1;
|
| 384 | perf_thread_set_perf_events_v2(cpu);
|
| 385 | }
|
| 386 |
|
| 387 | return ;
|
| 388 | }
|
| 389 |
|
| 390 |
|
| 391 | void met_perf_cpupmu_online_v2(unsigned int cpu)
|
| 392 | {
|
| 393 | if (met_cpupmu_v2.mode == 0) {
|
| 394 | PR_BOOTMSG("CPU=%d, %s:%d\n", cpu, __FUNCTION__, __LINE__);
|
| 395 | return;
|
| 396 | }
|
| 397 |
|
| 398 | per_cpu(perf_cpuid, cpu) = cpu;
|
| 399 | if (per_cpu(perf_delayed_work_setup, cpu) == NULL) {
|
| 400 | struct delayed_work *dwork;
|
| 401 |
|
| 402 | dwork = &per_cpu(cpu_pmu_dwork, cpu);
|
| 403 | dwork->cpu = cpu;
|
| 404 | INIT_DELAYED_WORK(dwork, perf_thread_setup_v2);
|
| 405 | schedule_delayed_work(dwork, 0);
|
| 406 | per_cpu(perf_delayed_work_setup, cpu) = dwork;
|
| 407 | }
|
| 408 | }
|
| 409 |
|
| 410 |
|
| 411 | void met_perf_cpupmu_down_v2(void *data)
|
| 412 | {
|
| 413 | unsigned int cpu;
|
| 414 | unsigned int i;
|
| 415 | struct perf_event *ev;
|
| 416 | struct met_pmu_v2 *met_pmu;
|
| 417 |
|
| 418 | cpu = *((unsigned int *)data);
|
| 419 | if (met_cpupmu_v2.mode == 0)
|
| 420 | return;
|
| 421 | if (per_cpu(perfSet, cpu) == 0)
|
| 422 | return;
|
| 423 |
|
| 424 | met_pmu = get_met_pmu_by_cpu_id(cpu);
|
| 425 | per_cpu(perfSet, cpu) = 0;
|
| 426 | for (i = 0; i < gMAX_PMU_HW_CNT; i++) {
|
| 427 | if (!met_pmu[i].mode)
|
| 428 | continue;
|
| 429 | ev = per_cpu(pevent, cpu)[i];
|
| 430 | if ((ev != NULL) && (ev->state == PERF_EVENT_STATE_ACTIVE)) {
|
| 431 | perf_event_disable(ev);
|
| 432 | perf_event_release_kernel(ev);
|
| 433 | }
|
| 434 | }
|
| 435 | per_cpu(perf_task_init_done, cpu) = 0;
|
| 436 | per_cpu(perf_delayed_work_setup, cpu) = NULL;
|
| 437 | }
|
| 438 |
|
| 439 |
|
| 440 | void met_perf_cpupmu_start_v2(void)
|
| 441 | {
|
| 442 | unsigned int cpu;
|
| 443 |
|
| 444 | for_each_online_cpu(cpu) {
|
| 445 | met_perf_cpupmu_online_v2(cpu);
|
| 446 | }
|
| 447 | }
|
| 448 |
|
| 449 |
|
| 450 | void met_perf_cpupmu_stop_v2(void)
|
| 451 | {
|
| 452 | unsigned int cpu;
|
| 453 |
|
| 454 | for_each_online_cpu(cpu) {
|
| 455 | per_cpu(perf_cpuid, cpu) = cpu;
|
| 456 | met_perf_cpupmu_down_v2((void *)&per_cpu(perf_cpuid, cpu));
|
| 457 | }
|
| 458 | }
|
| 459 |
|
| 460 |
|
| 461 | static void cpupmu_polling(unsigned long long stamp, int cpu)
|
| 462 | {
|
| 463 | int count;
|
| 464 | struct met_pmu_v2 *met_pmu;
|
| 465 | unsigned int pmu_value[MXNR_CPU_V2];
|
| 466 |
|
| 467 | met_pmu = get_met_pmu_by_cpu_id(cpu);
|
| 468 | if (met_cpu_pmu_method == 0) {
|
| 469 | count = met_pmu_hw_v2->polling(met_pmu, gMAX_PMU_HW_CNT, pmu_value);
|
| 470 | mp_cpu_v2(count, pmu_value);
|
| 471 | } else
|
| 472 | perf_cpupmu_polling_v2(stamp, cpu);
|
| 473 | }
|
| 474 |
|
| 475 |
|
| 476 | void cpupmu_polling_v2(unsigned long long stamp, int cpu)
|
| 477 | {
|
| 478 | cpupmu_polling(stamp, cpu);
|
| 479 | }
|
| 480 |
|
| 481 |
|
| 482 | static void _cpupmu_start(void *info)
|
| 483 | {
|
| 484 | unsigned int *cpu = (unsigned int *)info;
|
| 485 | struct met_pmu_v2 *met_pmu;
|
| 486 |
|
| 487 | met_pmu = get_met_pmu_by_cpu_id(*cpu);
|
| 488 | met_pmu_hw_v2->start(met_pmu, gMAX_PMU_HW_CNT);
|
| 489 | }
|
| 490 |
|
| 491 | static void cpupmu_start(void)
|
| 492 | {
|
| 493 | if (module_status == 1) {
|
| 494 | PR_BOOTMSG("%s:%d\n", __FUNCTION__, __LINE__);
|
| 495 | return;
|
| 496 | }
|
| 497 |
|
| 498 | if (met_cpu_pmu_method == 0) {
|
| 499 | int this_cpu = smp_processor_id();
|
| 500 | int cpu;
|
| 501 |
|
| 502 | for_each_possible_cpu(cpu) {
|
| 503 | if (cpu == this_cpu)
|
| 504 | _cpupmu_start(&cpu);
|
| 505 | else
|
| 506 | met_smp_call_function_single_symbol(cpu, _cpupmu_start, &cpu, 1);
|
| 507 | }
|
| 508 | }
|
| 509 | module_status = 1;
|
| 510 | }
|
| 511 |
|
| 512 | static void _cpupmu_stop(void *info)
|
| 513 | {
|
| 514 | (void)info;
|
| 515 |
|
| 516 | met_pmu_hw_v2->stop(gMAX_PMU_HW_CNT);
|
| 517 | }
|
| 518 |
|
| 519 | static void cpupmu_stop(void)
|
| 520 | {
|
| 521 | if (module_status == 0) {
|
| 522 | PR_BOOTMSG("%s:%d\n", __FUNCTION__, __LINE__);
|
| 523 | return;
|
| 524 | }
|
| 525 |
|
| 526 | if (met_cpu_pmu_method == 0) {
|
| 527 | int this_cpu = smp_processor_id();
|
| 528 | int cpu;
|
| 529 |
|
| 530 | for_each_possible_cpu(cpu) {
|
| 531 | if (cpu == this_cpu)
|
| 532 | _cpupmu_stop(&cpu);
|
| 533 | else
|
| 534 | met_smp_call_function_single_symbol(cpu, _cpupmu_stop, &cpu, 1);
|
| 535 | }
|
| 536 | }
|
| 537 | module_status = 0;
|
| 538 | }
|
| 539 |
|
| 540 | static int cpupmu_print_help(char *buf, int len)
|
| 541 | {
|
| 542 | return snprintf(buf, PAGE_SIZE, help, met_pmu_hw_v2->name, gMAX_PMU_HW_CNT - 1);
|
| 543 | }
|
| 544 |
|
| 545 | static int cpupmu_print_header(char *buf, int len)
|
| 546 | {
|
| 547 | int i;
|
| 548 | int ret = 0;
|
| 549 | int pmu_cnt = 0;
|
| 550 | char name[32];
|
| 551 | unsigned int cpu;
|
| 552 | struct met_pmu_v2 *met_pmu;
|
| 553 |
|
| 554 | /*append CPU PMU access method*/
|
| 555 | if (met_cpu_pmu_method == 0)
|
| 556 | ret += snprintf(buf + ret, PAGE_SIZE,
|
| 557 | "met-info [000] 0.0: CPU_PMU_method: PMU registers\n");
|
| 558 | else
|
| 559 | ret += snprintf(buf + ret, PAGE_SIZE,
|
| 560 | "met-info [000] 0.0: CPU_PMU_method: perf APIs\n");
|
| 561 |
|
| 562 | /*append cache line size*/
|
| 563 | ret += snprintf(buf + ret, PAGE_SIZE - ret, cache_line_header, cache_line_size());
|
| 564 | ret += snprintf(buf + ret, PAGE_SIZE - ret, header);
|
| 565 | for_each_online_cpu(cpu) {
|
| 566 | int cnt = 0;
|
| 567 |
|
| 568 | pmu_cnt = gPMU_CNT[cpu];
|
| 569 | met_pmu = get_met_pmu_by_cpu_id(cpu);
|
| 570 | for (i = 0; i < pmu_cnt; i++) {
|
| 571 | if (met_pmu[i].mode == 0)
|
| 572 | continue;
|
| 573 |
|
| 574 | if (met_pmu_hw_v2->get_event_desc && 0 == met_pmu_hw_v2->get_event_desc(met_pmu[i].event, name)) {
|
| 575 | if (cnt == 0) {
|
| 576 | ret += snprintf(buf + ret, PAGE_SIZE - ret, "CPU-%d=0x%x:%s", cpu, met_pmu[i].event, name);
|
| 577 | cnt++;
|
| 578 | } else
|
| 579 | ret += snprintf(buf + ret, PAGE_SIZE - ret, ",0x%x:%s", met_pmu[i].event, name);
|
| 580 | }
|
| 581 | met_pmu[i].mode = 0;
|
| 582 | }
|
| 583 | if (cnt > 0 && cpu < MXNR_CPU_V2 - 1)
|
| 584 | ret += snprintf(buf + ret, PAGE_SIZE - ret, ";");
|
| 585 | }
|
| 586 | ret += snprintf(buf + ret, PAGE_SIZE - ret, "\n");
|
| 587 | met_cpupmu_v2.mode = 0;
|
| 588 | reset_driver_stat();
|
| 589 | return ret;
|
| 590 | }
|
| 591 |
|
| 592 | /*
|
| 593 | * "met-cmd --start --pmu_core_evt=0:0x3,0x16,0x17"
|
| 594 | */
|
| 595 | static int cpupmu_process_argument(const char *arg, int len)
|
| 596 | {
|
| 597 | int ret;
|
| 598 | unsigned int cpu;
|
| 599 | unsigned int value;
|
| 600 | unsigned int idx = 0;
|
| 601 | char *str = NULL;
|
| 602 | char *token = NULL;
|
| 603 | struct met_pmu_v2 *met_pmu = NULL;
|
| 604 |
|
| 605 | if (met_cpu_pmu_method == 0)
|
| 606 | gMAX_PMU_HW_CNT = met_pmu_hw_v2->max_hw_count;
|
| 607 | else
|
| 608 | gMAX_PMU_HW_CNT = perf_num_counters();
|
| 609 |
|
| 610 | if (gMAX_PMU_HW_CNT == 0) {
|
| 611 | PR_BOOTMSG("%s:%d\n", __FUNCTION__, __LINE__);
|
| 612 | goto arg_out;
|
| 613 | }
|
| 614 |
|
| 615 | str = kstrdup(arg, GFP_KERNEL);
|
| 616 | token = strsep(&str, ":");
|
| 617 | ret = met_parse_num(token, &cpu, strlen(token));
|
| 618 | if (ret != 0) {
|
| 619 | PR_BOOTMSG("%s:%d\n", __FUNCTION__, __LINE__);
|
| 620 | goto arg_out;
|
| 621 | }
|
| 622 |
|
| 623 | met_pmu = get_met_pmu_by_cpu_id(cpu);
|
| 624 | while (token && met_pmu && idx < gMAX_PMU_HW_CNT) {
|
| 625 | token = strsep(&str, ",\r\n");
|
| 626 | if (token) {
|
| 627 | ret = met_parse_num(token, &value, strlen(token));
|
| 628 | if (ret != 0) {
|
| 629 | PR_BOOTMSG("%s:%d\n", __FUNCTION__, __LINE__);
|
| 630 | goto arg_out;
|
| 631 | }
|
| 632 | if (value != 0xff) {
|
| 633 | if (idx >= (gMAX_PMU_HW_CNT - 1)) {
|
| 634 | PR_BOOTMSG("%s:%d\n", __FUNCTION__, __LINE__);
|
| 635 | goto arg_out;
|
| 636 | }
|
| 637 | met_pmu[idx].mode = MODE_POLLING;
|
| 638 | met_pmu[idx].event = value;
|
| 639 | idx++;
|
| 640 | gPMU_CNT[cpu]++;
|
| 641 | } else {
|
| 642 | if (met_cpu_pmu_method == 0) {
|
| 643 | met_pmu[gMAX_PMU_HW_CNT - 1].mode = MODE_POLLING;
|
| 644 | met_pmu[gMAX_PMU_HW_CNT - 1].event = 0xff;
|
| 645 | gPMU_CNT[cpu]++;
|
| 646 | } else {
|
| 647 | if (idx > (gMAX_PMU_HW_CNT - 1)) {
|
| 648 | PR_BOOTMSG("%s:%d\n", __FUNCTION__, __LINE__);
|
| 649 | goto arg_out;
|
| 650 | }
|
| 651 | met_pmu[idx].mode = MODE_POLLING;
|
| 652 | met_pmu[idx].event = 0xff;
|
| 653 | idx++;
|
| 654 | gPMU_CNT[cpu]++;
|
| 655 | }
|
| 656 | }
|
| 657 | if (met_pmu_hw_v2->check_event(met_pmu, gPMU_CNT[cpu], value) < 0) {
|
| 658 | PR_BOOTMSG("%s:%d\n", __FUNCTION__, __LINE__);
|
| 659 | goto arg_out;
|
| 660 | }
|
| 661 | }
|
| 662 | }
|
| 663 | met_cpupmu_v2.mode = 1;
|
| 664 | module_status = 0;
|
| 665 | return 0;
|
| 666 |
|
| 667 | arg_out:
|
| 668 | if (str)
|
| 669 | kfree(str);
|
| 670 | reset_driver_stat();
|
| 671 | return -EINVAL;
|
| 672 | }
|