rjw | 1f88458 | 2022-01-06 17:20:42 +0800 | [diff] [blame^] | 1 | /*
|
| 2 | * Copyright (C) 2018 MediaTek Inc.
|
| 3 | *
|
| 4 | * This program is free software: you can redistribute it and/or modify
|
| 5 | * it under the terms of the GNU General Public License version 2 as
|
| 6 | * published by the Free Software Foundation.
|
| 7 | *
|
| 8 | * This program is distributed in the hope that it will be useful,
|
| 9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
| 10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
| 11 | * GNU General Public License for more details.
|
| 12 | */
|
| 13 |
|
| 14 | #include <linux/slab.h>
|
| 15 | #include <linux/version.h>
|
| 16 |
|
| 17 | #include "interface.h"
|
| 18 | #include "trace.h"
|
| 19 | #include "cpu_pmu_v2.h"
|
| 20 | #include "v8_pmu_hw_v2.h"
|
| 21 | #include "met_drv.h"
|
| 22 |
|
| 23 |
|
| 24 | #define MET_USER_EVENT_SUPPORT
|
| 25 |
|
| 26 | #include <linux/kthread.h>
|
| 27 | #include <linux/kernel.h>
|
| 28 | #include <linux/sched.h>
|
| 29 | #include <linux/wait.h>
|
| 30 | #include <linux/signal.h>
|
| 31 | #include <linux/workqueue.h>
|
| 32 | #include <linux/perf_event.h>
|
| 33 | #include "met_kernel_symbol.h"
|
| 34 |
|
| 35 |
|
| 36 | /*******************************************************************************
|
| 37 | * Type Define
|
| 38 | *******************************************************************************/
|
| 39 | #define CNTMAX 8
|
| 40 |
|
| 41 |
|
| 42 | /*******************************************************************************
|
| 43 | * Fuction Pototypes
|
| 44 | *******************************************************************************/
|
| 45 | static inline struct met_pmu_v2 *get_met_pmu_by_cpu_id(const unsigned int cpu);
|
| 46 | static inline void set_met_pmu_by_cpu_id(const unsigned int cpu, struct met_pmu_v2 *met_pmu);
|
| 47 |
|
| 48 | static int reset_driver_stat(void);
|
| 49 | static struct met_pmu_v2 *lookup_pmu(struct kobject *kobj);
|
| 50 |
|
| 51 | static ssize_t mode_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf);
|
| 52 |
|
| 53 | static int cpupmu_create_subfs(struct kobject *parent);
|
| 54 | static void cpupmu_delete_subfs(void);
|
| 55 | static void _cpupmu_start(void *info);
|
| 56 | static void cpupmu_start(void);
|
| 57 | static void _cpupmu_stop(void *info);
|
| 58 | static void cpupmu_stop(void);
|
| 59 | static void cpupmu_polling(unsigned long long stamp, int cpu);
|
| 60 | extern void cpupmu_polling_v2(unsigned long long stamp, int cpu);
|
| 61 | static int cpupmu_print_help(char *buf, int len);
|
| 62 | static int cpupmu_print_header(char *buf, int len);
|
| 63 | static int cpupmu_process_argument(const char *arg, int len);
|
| 64 |
|
| 65 |
|
| 66 | /*******************************************************************************
|
| 67 | * Globe Variables
|
| 68 | *******************************************************************************/
|
| 69 | static int module_status;
|
| 70 |
|
| 71 | struct cpu_pmu_hw_v2 *met_pmu_hw_v2;
|
| 72 |
|
| 73 | static unsigned int gPMU_CNT[2*MXNR_CPU_V2];
|
| 74 | static unsigned int gMAX_PMU_HW_CNT;
|
| 75 |
|
| 76 | static struct kobject *gKOBJ_CPU;
|
| 77 | static struct met_pmu_v2 *gMET_PMU[2*MXNR_CPU_V2];
|
| 78 |
|
| 79 | static struct kobj_attribute mode_attr = __ATTR(mode, 0444, mode_show, NULL);
|
| 80 |
|
| 81 | static const char cache_line_header[] =
|
| 82 | "met-info [000] 0.0: met_cpu_cache_line_size: %d\n";
|
| 83 | static const char header[] =
|
| 84 | "met-info [000] 0.0: met_cpu_header_v2: ";
|
| 85 | static const char help[] =
|
| 86 | " --cpu-pmu=CORE_ID:EVENT select CPU-PMU events. in %s,\n"
|
| 87 | " you can enable at most \"%d general purpose events\"\n"
|
| 88 | " plus \"one special 0xff (CPU_CYCLE) event\"\n";
|
| 89 |
|
| 90 | static DEFINE_PER_CPU(int[CNTMAX], perfCurr);
|
| 91 | static DEFINE_PER_CPU(int[CNTMAX], perfPrev);
|
| 92 | static DEFINE_PER_CPU(struct perf_event * [CNTMAX], pevent);
|
| 93 | static DEFINE_PER_CPU(struct perf_event_attr [CNTMAX], pevent_attr);
|
| 94 | static DEFINE_PER_CPU(int, perfSet);
|
| 95 | static DEFINE_PER_CPU(unsigned int, perf_task_init_done);
|
| 96 | static DEFINE_PER_CPU(unsigned int, perf_cpuid);
|
| 97 |
|
| 98 | static DEFINE_PER_CPU(struct delayed_work, cpu_pmu_dwork);
|
| 99 | static DEFINE_PER_CPU(struct delayed_work *, perf_delayed_work_setup);
|
| 100 |
|
| 101 | struct metdevice met_cpupmu_v2 = {
|
| 102 | .name = "cpu-pmu",
|
| 103 | .type = MET_TYPE_PMU,
|
| 104 | .cpu_related = 1,
|
| 105 | .create_subfs = cpupmu_create_subfs,
|
| 106 | .delete_subfs = cpupmu_delete_subfs,
|
| 107 | .start = cpupmu_start,
|
| 108 | .stop = cpupmu_stop,
|
| 109 | .polling_interval = 1,
|
| 110 | .timed_polling = cpupmu_polling,
|
| 111 | .print_help = cpupmu_print_help,
|
| 112 | .print_header = cpupmu_print_header,
|
| 113 | .process_argument = cpupmu_process_argument
|
| 114 | };
|
| 115 |
|
| 116 |
|
| 117 | /*******************************************************************************
|
| 118 | * Iplement Start
|
| 119 | *******************************************************************************/
|
| 120 | static inline struct met_pmu_v2 *get_met_pmu_by_cpu_id(const unsigned int cpu)
|
| 121 | {
|
| 122 | if (cpu < MXNR_CPU_V2)
|
| 123 | return gMET_PMU[cpu];
|
| 124 | else
|
| 125 | return NULL;
|
| 126 | }
|
| 127 |
|
| 128 |
|
| 129 | static inline void set_met_pmu_by_cpu_id(const unsigned int cpu, struct met_pmu_v2 *met_pmu)
|
| 130 | {
|
| 131 | if (cpu < MXNR_CPU_V2)
|
| 132 | gMET_PMU[cpu] = met_pmu;
|
| 133 | }
|
| 134 |
|
| 135 |
|
| 136 | static int reset_driver_stat()
|
| 137 | {
|
| 138 | int i;
|
| 139 | int cpu;
|
| 140 | struct met_pmu_v2 *met_pmu;
|
| 141 |
|
| 142 | met_cpupmu_v2.mode = 0;
|
| 143 | for_each_possible_cpu(cpu) {
|
| 144 | met_pmu = get_met_pmu_by_cpu_id(cpu);
|
| 145 | for (i = 0; i < gMAX_PMU_HW_CNT; i++) {
|
| 146 | met_pmu[i].mode = MODE_DISABLED;
|
| 147 | met_pmu[i].event = 0;
|
| 148 | }
|
| 149 | gPMU_CNT[cpu] = 0;
|
| 150 | }
|
| 151 | module_status = 0;
|
| 152 | return 0;
|
| 153 | }
|
| 154 |
|
| 155 |
|
| 156 | static struct met_pmu_v2 *lookup_pmu(struct kobject *kobj)
|
| 157 | {
|
| 158 | int i;
|
| 159 | int cpu;
|
| 160 | struct met_pmu_v2 *met_pmu;
|
| 161 |
|
| 162 | for_each_possible_cpu(cpu) {
|
| 163 | met_pmu = get_met_pmu_by_cpu_id(cpu);
|
| 164 | for (i = 0; i < gMAX_PMU_HW_CNT; i++) {
|
| 165 | if (met_pmu[i].kobj_cpu_pmu == kobj)
|
| 166 | return &met_pmu[i];
|
| 167 | }
|
| 168 | }
|
| 169 | return NULL;
|
| 170 | }
|
| 171 |
|
| 172 |
|
| 173 | static ssize_t mode_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
|
| 174 | {
|
| 175 | struct met_pmu_v2 *p = lookup_pmu(kobj);
|
| 176 |
|
| 177 | if (p != NULL) {
|
| 178 | switch (p->mode) {
|
| 179 | case 0:
|
| 180 | return snprintf(buf, PAGE_SIZE, "%hhd (disabled)\n", p->mode);
|
| 181 | case 1:
|
| 182 | return snprintf(buf, PAGE_SIZE, "%hhd (interrupt)\n", p->mode);
|
| 183 | case 2:
|
| 184 | return snprintf(buf, PAGE_SIZE, "%hhd (polling)\n", p->mode);
|
| 185 | }
|
| 186 | }
|
| 187 | return -EINVAL;
|
| 188 | }
|
| 189 |
|
| 190 |
|
| 191 | static int cpupmu_create_subfs(struct kobject *parent)
|
| 192 | {
|
| 193 | int ret = 0;
|
| 194 | unsigned int i;
|
| 195 | unsigned int cpu;
|
| 196 | char buf[16];
|
| 197 | struct met_pmu_v2 *met_pmu;
|
| 198 |
|
| 199 | met_pmu_hw_v2 = cpu_pmu_hw_init_v2();
|
| 200 | if (met_pmu_hw_v2 == NULL) {
|
| 201 | PR_BOOTMSG("Failed to init CPU PMU HW!!\n");
|
| 202 | return -ENODEV;
|
| 203 | }
|
| 204 | gMAX_PMU_HW_CNT = met_pmu_hw_v2->max_hw_count;
|
| 205 |
|
| 206 | gKOBJ_CPU = parent;
|
| 207 | for_each_possible_cpu(cpu) {
|
| 208 | met_pmu = kmalloc_array(gMAX_PMU_HW_CNT, sizeof(struct met_pmu_v2), GFP_KERNEL);
|
| 209 | if (met_pmu != NULL) {
|
| 210 | memset(met_pmu, 0x0, gMAX_PMU_HW_CNT * sizeof(struct met_pmu_v2));
|
| 211 | met_pmu_hw_v2->met_pmu[cpu] = met_pmu;
|
| 212 | set_met_pmu_by_cpu_id(cpu, met_pmu);
|
| 213 | } else
|
| 214 | ret = -ENOMEM;
|
| 215 |
|
| 216 | for (i = 0; i < gMAX_PMU_HW_CNT; i++) {
|
| 217 | snprintf(buf, sizeof(buf), "CPU-%d-%d", cpu, i);
|
| 218 | met_pmu[i].kobj_cpu_pmu = kobject_create_and_add(buf, gKOBJ_CPU);
|
| 219 | if (met_pmu[i].kobj_cpu_pmu) {
|
| 220 | ret = sysfs_create_file(met_pmu[i].kobj_cpu_pmu, &mode_attr.attr);
|
| 221 | if (ret != 0) {
|
| 222 | PR_BOOTMSG("Failed to create mode in sysfs\n");
|
| 223 | goto out;
|
| 224 | }
|
| 225 | }
|
| 226 | }
|
| 227 | }
|
| 228 | out:
|
| 229 | if (ret != 0) {
|
| 230 | for_each_possible_cpu(cpu) {
|
| 231 | met_pmu = get_met_pmu_by_cpu_id(cpu);
|
| 232 | if (met_pmu != NULL) {
|
| 233 | kfree(met_pmu);
|
| 234 | set_met_pmu_by_cpu_id(cpu, NULL);
|
| 235 | }
|
| 236 | }
|
| 237 | }
|
| 238 | return ret;
|
| 239 | }
|
| 240 |
|
| 241 |
|
| 242 | static void cpupmu_delete_subfs(void)
|
| 243 | {
|
| 244 | unsigned int i;
|
| 245 | unsigned int cpu;
|
| 246 | struct met_pmu_v2 *met_pmu;
|
| 247 |
|
| 248 | for_each_possible_cpu(cpu) {
|
| 249 | met_pmu = get_met_pmu_by_cpu_id(cpu);
|
| 250 | if (met_pmu != NULL) {
|
| 251 | for (i = 0; i < gMAX_PMU_HW_CNT; i++) {
|
| 252 | sysfs_remove_file(met_pmu[i].kobj_cpu_pmu, &mode_attr.attr);
|
| 253 | kobject_del(met_pmu[i].kobj_cpu_pmu);
|
| 254 | kobject_put(met_pmu[i].kobj_cpu_pmu);
|
| 255 | met_pmu[i].kobj_cpu_pmu = NULL;
|
| 256 | }
|
| 257 | kfree(met_pmu);
|
| 258 | }
|
| 259 | set_met_pmu_by_cpu_id(cpu, NULL);
|
| 260 | }
|
| 261 |
|
| 262 | if (gKOBJ_CPU != NULL) {
|
| 263 | gKOBJ_CPU = NULL;
|
| 264 | }
|
| 265 |
|
| 266 | met_pmu_hw_v2 = NULL;
|
| 267 | }
|
| 268 |
|
| 269 |
|
| 270 | noinline void mp_cpu_v2(unsigned char cnt, unsigned int *value)
|
| 271 | {
|
| 272 | if (cnt < MXNR_CPU_V2)
|
| 273 | MET_GENERAL_PRINT(MET_TRACE, cnt, value);
|
| 274 | }
|
| 275 |
|
| 276 |
|
| 277 | static void dummy_handler(struct perf_event *event, struct perf_sample_data *data,
|
| 278 | struct pt_regs *regs)
|
| 279 | {
|
| 280 | /* Required as perf_event_create_kernel_counter() requires an overflow handler, even though all we do is poll */
|
| 281 | }
|
| 282 |
|
| 283 |
|
| 284 | void perf_cpupmu_polling_v2(unsigned long long stamp, int cpu)
|
| 285 | {
|
| 286 | int i, count, delta;
|
| 287 | struct perf_event *ev;
|
| 288 | unsigned int pmu_value[MXNR_CPU_V2];
|
| 289 | struct met_pmu_v2 *met_pmu;
|
| 290 | u64 value;
|
| 291 |
|
| 292 | if (per_cpu(perfSet, cpu) == 0)
|
| 293 | return;
|
| 294 |
|
| 295 | memset(pmu_value, 0, sizeof(pmu_value));
|
| 296 | count = 0;
|
| 297 | met_pmu = get_met_pmu_by_cpu_id(cpu);
|
| 298 | for (i = 0; i < gMAX_PMU_HW_CNT; i++) {
|
| 299 | if (met_pmu[i].mode == 0)
|
| 300 | continue;
|
| 301 |
|
| 302 | ev = per_cpu(pevent, cpu)[i];
|
| 303 | if ((ev != NULL) && (ev->state == PERF_EVENT_STATE_ACTIVE)) {
|
| 304 | if (per_cpu(perfPrev, cpu)[i] == 0) {
|
| 305 | met_perf_event_read_local_symbol(ev, &value, NULL, NULL);
|
| 306 | per_cpu(perfPrev, cpu)[i] = value;
|
| 307 | continue;
|
| 308 | }
|
| 309 | met_perf_event_read_local_symbol(ev, &value, NULL, NULL);
|
| 310 | per_cpu(perfCurr, cpu)[i] = value;
|
| 311 | delta = per_cpu(perfCurr, cpu)[i] - per_cpu(perfPrev, cpu)[i];
|
| 312 | per_cpu(perfPrev, cpu)[i] = per_cpu(perfCurr, cpu)[i];
|
| 313 | if (delta < 0)
|
| 314 | delta *= -1;
|
| 315 | pmu_value[i] = delta;
|
| 316 | count++;
|
| 317 | }
|
| 318 | }
|
| 319 |
|
| 320 | if (count == gPMU_CNT[cpu])
|
| 321 | mp_cpu_v2(count, pmu_value);
|
| 322 | }
|
| 323 |
|
| 324 |
|
| 325 | static int perf_thread_set_perf_events_v2(unsigned int cpu)
|
| 326 | {
|
| 327 | int i, size;
|
| 328 | struct perf_event *ev;
|
| 329 | struct met_pmu_v2 *met_pmu;
|
| 330 |
|
| 331 | size = sizeof(struct perf_event_attr);
|
| 332 | if (per_cpu(perfSet, cpu) == 0) {
|
| 333 | met_pmu = get_met_pmu_by_cpu_id(cpu);
|
| 334 | for (i = 0; i < gMAX_PMU_HW_CNT; i++) {
|
| 335 | per_cpu(pevent, cpu)[i] = NULL;
|
| 336 | if (!met_pmu[i].mode) {/* Skip disabled counters */
|
| 337 | continue;
|
| 338 | }
|
| 339 | per_cpu(perfPrev, cpu)[i] = 0;
|
| 340 | per_cpu(perfCurr, cpu)[i] = 0;
|
| 341 | memset(&per_cpu(pevent_attr, cpu)[i], 0, size);
|
| 342 | per_cpu(pevent_attr, cpu)[i].config = met_pmu[i].event;
|
| 343 | per_cpu(pevent_attr, cpu)[i].type = PERF_TYPE_RAW;
|
| 344 | per_cpu(pevent_attr, cpu)[i].size = size;
|
| 345 | per_cpu(pevent_attr, cpu)[i].sample_period = 0;
|
| 346 | per_cpu(pevent_attr, cpu)[i].pinned = 1;
|
| 347 | if (met_pmu[i].event == 0xff) {
|
| 348 | per_cpu(pevent_attr, cpu)[i].type = PERF_TYPE_HARDWARE;
|
| 349 | per_cpu(pevent_attr, cpu)[i].config = PERF_COUNT_HW_CPU_CYCLES;
|
| 350 | }
|
| 351 |
|
| 352 | per_cpu(pevent, cpu)[i] =
|
| 353 | perf_event_create_kernel_counter(&per_cpu(pevent_attr, cpu)[i], cpu, NULL,
|
| 354 | dummy_handler, NULL);
|
| 355 | if (IS_ERR(per_cpu(pevent, cpu)[i])) {
|
| 356 | per_cpu(pevent, cpu)[i] = NULL;
|
| 357 | PR_BOOTMSG("CPU=%d, %s:%d\n", cpu, __FUNCTION__, __LINE__);
|
| 358 | continue;
|
| 359 | }
|
| 360 |
|
| 361 | if (per_cpu(pevent, cpu)[i]->state != PERF_EVENT_STATE_ACTIVE) {
|
| 362 | perf_event_release_kernel(per_cpu(pevent, cpu)[i]);
|
| 363 | per_cpu(pevent, cpu)[i] = NULL;
|
| 364 | PR_BOOTMSG("CPU=%d, %s:%d\n", cpu, __FUNCTION__, __LINE__);
|
| 365 | continue;
|
| 366 | }
|
| 367 |
|
| 368 | ev = per_cpu(pevent, cpu)[i];
|
| 369 | if (ev != NULL) {
|
| 370 | perf_event_enable(ev);
|
| 371 | }
|
| 372 | } /* for all PMU counter */
|
| 373 | per_cpu(perfSet, cpu) = 1;
|
| 374 | } /* for perfSet */
|
| 375 | return 0;
|
| 376 | }
|
| 377 |
|
| 378 |
|
| 379 | static void perf_thread_setup_v2(struct work_struct *work)
|
| 380 | {
|
| 381 | unsigned int cpu;
|
| 382 | struct delayed_work *dwork = to_delayed_work(work);
|
| 383 |
|
| 384 | cpu = dwork->cpu;
|
| 385 | if (per_cpu(perf_task_init_done, cpu) == 0) {
|
| 386 | per_cpu(perf_task_init_done, cpu) = 1;
|
| 387 | perf_thread_set_perf_events_v2(cpu);
|
| 388 | }
|
| 389 |
|
| 390 | return ;
|
| 391 | }
|
| 392 |
|
| 393 |
|
| 394 | void met_perf_cpupmu_online_v2(unsigned int cpu)
|
| 395 | {
|
| 396 | if (met_cpupmu_v2.mode == 0) {
|
| 397 | PR_BOOTMSG("CPU=%d, %s:%d\n", cpu, __FUNCTION__, __LINE__);
|
| 398 | return;
|
| 399 | }
|
| 400 |
|
| 401 | per_cpu(perf_cpuid, cpu) = cpu;
|
| 402 | if (per_cpu(perf_delayed_work_setup, cpu) == NULL) {
|
| 403 | struct delayed_work *dwork;
|
| 404 |
|
| 405 | dwork = &per_cpu(cpu_pmu_dwork, cpu);
|
| 406 | dwork->cpu = cpu;
|
| 407 | INIT_DELAYED_WORK(dwork, perf_thread_setup_v2);
|
| 408 | schedule_delayed_work(dwork, 0);
|
| 409 | per_cpu(perf_delayed_work_setup, cpu) = dwork;
|
| 410 | }
|
| 411 | }
|
| 412 |
|
| 413 |
|
| 414 | void met_perf_cpupmu_down_v2(void *data)
|
| 415 | {
|
| 416 | unsigned int cpu;
|
| 417 | unsigned int i;
|
| 418 | struct perf_event *ev;
|
| 419 | struct met_pmu_v2 *met_pmu;
|
| 420 |
|
| 421 | cpu = *((unsigned int *)data);
|
| 422 | if (met_cpupmu_v2.mode == 0)
|
| 423 | return;
|
| 424 | if (per_cpu(perfSet, cpu) == 0)
|
| 425 | return;
|
| 426 |
|
| 427 | met_pmu = get_met_pmu_by_cpu_id(cpu);
|
| 428 | per_cpu(perfSet, cpu) = 0;
|
| 429 | for (i = 0; i < gMAX_PMU_HW_CNT; i++) {
|
| 430 | if (!met_pmu[i].mode)
|
| 431 | continue;
|
| 432 | ev = per_cpu(pevent, cpu)[i];
|
| 433 | if ((ev != NULL) && (ev->state == PERF_EVENT_STATE_ACTIVE)) {
|
| 434 | perf_event_disable(ev);
|
| 435 | perf_event_release_kernel(ev);
|
| 436 | }
|
| 437 | }
|
| 438 | per_cpu(perf_task_init_done, cpu) = 0;
|
| 439 | per_cpu(perf_delayed_work_setup, cpu) = NULL;
|
| 440 | }
|
| 441 |
|
| 442 |
|
| 443 | void met_perf_cpupmu_start_v2(void)
|
| 444 | {
|
| 445 | unsigned int cpu;
|
| 446 |
|
| 447 | for_each_online_cpu(cpu) {
|
| 448 | met_perf_cpupmu_online_v2(cpu);
|
| 449 | }
|
| 450 | }
|
| 451 |
|
| 452 |
|
| 453 | void met_perf_cpupmu_stop_v2(void)
|
| 454 | {
|
| 455 | unsigned int cpu;
|
| 456 |
|
| 457 | for_each_online_cpu(cpu) {
|
| 458 | per_cpu(perf_cpuid, cpu) = cpu;
|
| 459 | met_perf_cpupmu_down_v2((void *)&per_cpu(perf_cpuid, cpu));
|
| 460 | }
|
| 461 | }
|
| 462 |
|
| 463 |
|
| 464 | static void cpupmu_polling(unsigned long long stamp, int cpu)
|
| 465 | {
|
| 466 | int count;
|
| 467 | struct met_pmu_v2 *met_pmu;
|
| 468 | unsigned int pmu_value[MXNR_CPU_V2];
|
| 469 |
|
| 470 | met_pmu = get_met_pmu_by_cpu_id(cpu);
|
| 471 | if (met_cpu_pmu_method == 0) {
|
| 472 | count = met_pmu_hw_v2->polling(met_pmu, gMAX_PMU_HW_CNT, pmu_value);
|
| 473 | mp_cpu_v2(count, pmu_value);
|
| 474 | } else
|
| 475 | perf_cpupmu_polling_v2(stamp, cpu);
|
| 476 | }
|
| 477 |
|
| 478 |
|
| 479 | void cpupmu_polling_v2(unsigned long long stamp, int cpu)
|
| 480 | {
|
| 481 | cpupmu_polling(stamp, cpu);
|
| 482 | }
|
| 483 |
|
| 484 |
|
| 485 | static void _cpupmu_start(void *info)
|
| 486 | {
|
| 487 | unsigned int *cpu = (unsigned int *)info;
|
| 488 | struct met_pmu_v2 *met_pmu;
|
| 489 |
|
| 490 | met_pmu = get_met_pmu_by_cpu_id(*cpu);
|
| 491 | met_pmu_hw_v2->start(met_pmu, gMAX_PMU_HW_CNT);
|
| 492 | }
|
| 493 |
|
| 494 | static void cpupmu_start(void)
|
| 495 | {
|
| 496 | if (module_status == 1) {
|
| 497 | PR_BOOTMSG("%s:%d\n", __FUNCTION__, __LINE__);
|
| 498 | return;
|
| 499 | }
|
| 500 |
|
| 501 | if (met_cpu_pmu_method == 0) {
|
| 502 | int this_cpu = smp_processor_id();
|
| 503 | int cpu;
|
| 504 |
|
| 505 | for_each_possible_cpu(cpu) {
|
| 506 | if (cpu == this_cpu)
|
| 507 | _cpupmu_start(&cpu);
|
| 508 | else
|
| 509 | met_smp_call_function_single_symbol(cpu, _cpupmu_start, &cpu, 1);
|
| 510 | }
|
| 511 | }
|
| 512 | module_status = 1;
|
| 513 | }
|
| 514 |
|
| 515 | static void _cpupmu_stop(void *info)
|
| 516 | {
|
| 517 | (void)info;
|
| 518 |
|
| 519 | met_pmu_hw_v2->stop(gMAX_PMU_HW_CNT);
|
| 520 | }
|
| 521 |
|
| 522 | static void cpupmu_stop(void)
|
| 523 | {
|
| 524 | if (module_status == 0) {
|
| 525 | PR_BOOTMSG("%s:%d\n", __FUNCTION__, __LINE__);
|
| 526 | return;
|
| 527 | }
|
| 528 |
|
| 529 | if (met_cpu_pmu_method == 0) {
|
| 530 | int this_cpu = smp_processor_id();
|
| 531 | int cpu;
|
| 532 |
|
| 533 | for_each_possible_cpu(cpu) {
|
| 534 | if (cpu == this_cpu)
|
| 535 | _cpupmu_stop(&cpu);
|
| 536 | else
|
| 537 | met_smp_call_function_single_symbol(cpu, _cpupmu_stop, &cpu, 1);
|
| 538 | }
|
| 539 | }
|
| 540 | module_status = 0;
|
| 541 | }
|
| 542 |
|
| 543 | static int cpupmu_print_help(char *buf, int len)
|
| 544 | {
|
| 545 | return snprintf(buf, PAGE_SIZE, help, met_pmu_hw_v2->name, gMAX_PMU_HW_CNT - 1);
|
| 546 | }
|
| 547 |
|
| 548 | static int cpupmu_print_header(char *buf, int len)
|
| 549 | {
|
| 550 | int i;
|
| 551 | int ret = 0;
|
| 552 | int pmu_cnt = 0;
|
| 553 | char name[32];
|
| 554 | unsigned int cpu;
|
| 555 | struct met_pmu_v2 *met_pmu;
|
| 556 |
|
| 557 | /*append CPU PMU access method*/
|
| 558 | if (met_cpu_pmu_method == 0)
|
| 559 | ret += snprintf(buf + ret, PAGE_SIZE,
|
| 560 | "met-info [000] 0.0: CPU_PMU_method: PMU registers\n");
|
| 561 | else
|
| 562 | ret += snprintf(buf + ret, PAGE_SIZE,
|
| 563 | "met-info [000] 0.0: CPU_PMU_method: perf APIs\n");
|
| 564 |
|
| 565 | /*append cache line size*/
|
| 566 | ret += snprintf(buf + ret, PAGE_SIZE - ret, cache_line_header, cache_line_size());
|
| 567 | ret += snprintf(buf + ret, PAGE_SIZE - ret, header);
|
| 568 | for_each_online_cpu(cpu) {
|
| 569 | int cnt = 0;
|
| 570 |
|
| 571 | pmu_cnt = gPMU_CNT[cpu];
|
| 572 | met_pmu = get_met_pmu_by_cpu_id(cpu);
|
| 573 | for (i = 0; i < pmu_cnt; i++) {
|
| 574 | if (met_pmu[i].mode == 0)
|
| 575 | continue;
|
| 576 |
|
| 577 | if (met_pmu_hw_v2->get_event_desc && 0 == met_pmu_hw_v2->get_event_desc(met_pmu[i].event, name)) {
|
| 578 | if (cnt == 0) {
|
| 579 | ret += snprintf(buf + ret, PAGE_SIZE - ret, "CPU-%d=0x%x:%s", cpu, met_pmu[i].event, name);
|
| 580 | cnt++;
|
| 581 | } else
|
| 582 | ret += snprintf(buf + ret, PAGE_SIZE - ret, ",0x%x:%s", met_pmu[i].event, name);
|
| 583 | }
|
| 584 | met_pmu[i].mode = 0;
|
| 585 | }
|
| 586 | if (cnt > 0 && cpu < MXNR_CPU_V2 - 1)
|
| 587 | ret += snprintf(buf + ret, PAGE_SIZE - ret, ";");
|
| 588 | }
|
| 589 | ret += snprintf(buf + ret, PAGE_SIZE - ret, "\n");
|
| 590 | met_cpupmu_v2.mode = 0;
|
| 591 | reset_driver_stat();
|
| 592 | return ret;
|
| 593 | }
|
| 594 |
|
| 595 | /*
|
| 596 | * "met-cmd --start --pmu_core_evt=0:0x3,0x16,0x17"
|
| 597 | */
|
| 598 | static int cpupmu_process_argument(const char *arg, int len)
|
| 599 | {
|
| 600 | int ret;
|
| 601 | unsigned int cpu;
|
| 602 | unsigned int value;
|
| 603 | unsigned int idx = 0;
|
| 604 | char *str = NULL;
|
| 605 | char *token = NULL;
|
| 606 | struct met_pmu_v2 *met_pmu = NULL;
|
| 607 |
|
| 608 | if (met_cpu_pmu_method == 0)
|
| 609 | gMAX_PMU_HW_CNT = met_pmu_hw_v2->max_hw_count;
|
| 610 | else
|
| 611 | gMAX_PMU_HW_CNT = perf_num_counters();
|
| 612 |
|
| 613 | if (gMAX_PMU_HW_CNT == 0) {
|
| 614 | PR_BOOTMSG("%s:%d\n", __FUNCTION__, __LINE__);
|
| 615 | goto arg_out;
|
| 616 | }
|
| 617 |
|
| 618 | str = kstrdup(arg, GFP_KERNEL);
|
| 619 | token = strsep(&str, ":");
|
| 620 | ret = met_parse_num(token, &cpu, strlen(token));
|
| 621 | if (ret != 0) {
|
| 622 | PR_BOOTMSG("%s:%d\n", __FUNCTION__, __LINE__);
|
| 623 | goto arg_out;
|
| 624 | }
|
| 625 |
|
| 626 | met_pmu = get_met_pmu_by_cpu_id(cpu);
|
| 627 | while (token && met_pmu && idx < gMAX_PMU_HW_CNT) {
|
| 628 | token = strsep(&str, ",\r\n");
|
| 629 | if (token) {
|
| 630 | ret = met_parse_num(token, &value, strlen(token));
|
| 631 | if (ret != 0) {
|
| 632 | PR_BOOTMSG("%s:%d\n", __FUNCTION__, __LINE__);
|
| 633 | goto arg_out;
|
| 634 | }
|
| 635 | if (value != 0xff) {
|
| 636 | if (idx >= (gMAX_PMU_HW_CNT - 1)) {
|
| 637 | PR_BOOTMSG("%s:%d\n", __FUNCTION__, __LINE__);
|
| 638 | goto arg_out;
|
| 639 | }
|
| 640 | met_pmu[idx].mode = MODE_POLLING;
|
| 641 | met_pmu[idx].event = value;
|
| 642 | idx++;
|
| 643 | gPMU_CNT[cpu]++;
|
| 644 | } else {
|
| 645 | if (met_cpu_pmu_method == 0) {
|
| 646 | met_pmu[gMAX_PMU_HW_CNT - 1].mode = MODE_POLLING;
|
| 647 | met_pmu[gMAX_PMU_HW_CNT - 1].event = 0xff;
|
| 648 | gPMU_CNT[cpu]++;
|
| 649 | } else {
|
| 650 | if (idx > (gMAX_PMU_HW_CNT - 1)) {
|
| 651 | PR_BOOTMSG("%s:%d\n", __FUNCTION__, __LINE__);
|
| 652 | goto arg_out;
|
| 653 | }
|
| 654 | met_pmu[idx].mode = MODE_POLLING;
|
| 655 | met_pmu[idx].event = 0xff;
|
| 656 | idx++;
|
| 657 | gPMU_CNT[cpu]++;
|
| 658 | }
|
| 659 | }
|
| 660 | if (met_pmu_hw_v2->check_event(met_pmu, gPMU_CNT[cpu], value) < 0) {
|
| 661 | PR_BOOTMSG("%s:%d\n", __FUNCTION__, __LINE__);
|
| 662 | goto arg_out;
|
| 663 | }
|
| 664 | }
|
| 665 | }
|
| 666 | met_cpupmu_v2.mode = 1;
|
| 667 | module_status = 0;
|
| 668 | return 0;
|
| 669 |
|
| 670 | arg_out:
|
| 671 | if (str)
|
| 672 | kfree(str);
|
| 673 | reset_driver_stat();
|
| 674 | return -EINVAL;
|
| 675 | }
|