blob: 36a6402112f4995eaa29e7338f77c9eeea98d400 [file] [log] [blame]
rjw1f884582022-01-06 17:20:42 +08001/*
2 * Copyright (C) 2018 MediaTek Inc.
3 *
4 * This program is free software: you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13#include <linux/cpu.h>
14#include <linux/cpu_pm.h>
15#include <linux/perf_event.h>
16
17#if (defined(CONFIG_ARM64) || defined(CONFIG_ARM))
18#include <linux/platform_device.h>
19#include <linux/perf/arm_pmu.h>
20#endif
21
22#include "met_drv.h"
23#include "met_kernel_symbol.h"
24#include "interface.h"
25#include "trace.h"
26#include "cpu_pmu.h"
27#include "mtk_typedefs.h"
28
29struct cpu_pmu_hw *cpu_pmu;
30static int counter_cnt[MXNR_CPU];
31static int nr_arg[MXNR_CPU];
32
33int met_perf_cpupmu_status;
34
35static int mtk_pmu_event_enable = 0;
36static struct kobject *kobj_cpu;
37DECLARE_KOBJ_ATTR_INT(mtk_pmu_event_enable, mtk_pmu_event_enable);
38#define KOBJ_ATTR_LIST \
39 do { \
40 KOBJ_ATTR_ITEM(mtk_pmu_event_enable); \
41 } while (0)
42
43#ifdef CONFIG_CPU_PM
44static int use_cpu_pm_pmu_notifier = 0;
45
46/* helper notifier for maintaining pmu states before cpu state transition */
47static int cpu_pm_pmu_notify(struct notifier_block *b,
48 unsigned long cmd,
49 void *p)
50{
51 int ii;
52 int cpu, count;
53 unsigned int pmu_value[MXNR_PMU_EVENTS];
54
55 if (!met_perf_cpupmu_status)
56 return NOTIFY_OK;
57
58 cpu = raw_smp_processor_id();
59
60 switch (cmd) {
61 case CPU_PM_ENTER:
62 count = cpu_pmu->polling(cpu_pmu->pmu[cpu], cpu_pmu->event_count[cpu], pmu_value);
63 for (ii = 0; ii < count; ii ++)
64 cpu_pmu->cpu_pm_unpolled_loss[cpu][ii] += pmu_value[ii];
65
66 cpu_pmu->stop(cpu_pmu->event_count[cpu]);
67 break;
68 case CPU_PM_ENTER_FAILED:
69 case CPU_PM_EXIT:
70 cpu_pmu->start(cpu_pmu->pmu[cpu], cpu_pmu->event_count[cpu]);
71 break;
72 default:
73 return NOTIFY_DONE;
74 }
75 return NOTIFY_OK;
76}
77
78struct notifier_block cpu_pm_pmu_notifier = {
79 .notifier_call = cpu_pm_pmu_notify,
80};
81#endif
82
83static DEFINE_PER_CPU(unsigned long long[MXNR_PMU_EVENTS], perfCurr);
84static DEFINE_PER_CPU(unsigned long long[MXNR_PMU_EVENTS], perfPrev);
85static DEFINE_PER_CPU(int[MXNR_PMU_EVENTS], perfCntFirst);
86static DEFINE_PER_CPU(struct perf_event * [MXNR_PMU_EVENTS], pevent);
87static DEFINE_PER_CPU(struct perf_event_attr [MXNR_PMU_EVENTS], pevent_attr);
88static DEFINE_PER_CPU(int, perfSet);
89static DEFINE_PER_CPU(unsigned int, perf_task_init_done);
90static DEFINE_PER_CPU(int, perf_cpuid);
91static DEFINE_PER_CPU(struct delayed_work, cpu_pmu_dwork_setup);
92static DEFINE_PER_CPU(struct delayed_work*, perf_delayed_work_setup);
93static DEFINE_PER_CPU(struct delayed_work, cpu_pmu_dwork_down);
94static DEFINE_PER_CPU(int, cpu_status);
95
96#ifdef CPUPMU_V8_2
97#include <linux/of.h>
98#include <linux/of_address.h>
99#include <mt-plat/sync_write.h>
100#include <mt-plat/mtk_io.h>
101
102static char mcucfg_desc[] = "mediatek,mcucfg";
103static void __iomem *mcucfg_base = NULL;
104#define DBG_CONTROL_CPU6 ((unsigned long)mcucfg_base + 0x3000 + 0x308) /* DBG_CONTROL */
105#define DBG_CONTROL_CPU7 ((unsigned long)mcucfg_base + 0x3800 + 0x308) /* DBG_CONTROL */
106#define ENABLE_MTK_PMU_EVENTS_OFFSET 1
107static int restore_dbg_ctrl_cpu6;
108static int restore_dbg_ctrl_cpu7;
109
110int cpu_pmu_debug_init(void)
111{
112 struct device_node *node = NULL;
113 unsigned int value6,value7;
114
115 /*for A75 MTK internal event*/
116 if (mcucfg_base == NULL) {
117 node = of_find_compatible_node(NULL, NULL, mcucfg_desc);
118 if (node == NULL) {
119 MET_TRACE("[MET_PMU_DB] of_find node == NULL\n");
120 pr_debug("[MET_PMU_DB] of_find node == NULL\n");
121 goto out;
122 }
123 mcucfg_base = of_iomap(node, 0);
124 of_node_put(node);
125 if (mcucfg_base == NULL) {
126 MET_TRACE("[MET_PMU_DB] mcucfg_base == NULL\n");
127 pr_debug("[MET_PMU_DB] mcucfg_base == NULL\n");
128 goto out;
129 }
130 MET_TRACE("[MET_PMU_DB] regbase %08lx\n", DBG_CONTROL_CPU7);
131 pr_debug("[MET_PMU_DB] regbase %08lx\n", DBG_CONTROL_CPU7);
132 }
133
134 value6 = readl(IOMEM(DBG_CONTROL_CPU6));
135 if (value6 & (1 << ENABLE_MTK_PMU_EVENTS_OFFSET)) {
136 restore_dbg_ctrl_cpu6 = 1;
137 } else {
138 restore_dbg_ctrl_cpu6 = 0;
139 mt_reg_sync_writel(value6 | (1 << ENABLE_MTK_PMU_EVENTS_OFFSET), DBG_CONTROL_CPU6);
140 }
141
142 value7 = readl(IOMEM(DBG_CONTROL_CPU7));
143 if (value7 & (1 << ENABLE_MTK_PMU_EVENTS_OFFSET)) {
144 restore_dbg_ctrl_cpu7 = 1;
145 } else {
146 restore_dbg_ctrl_cpu7 = 0;
147 mt_reg_sync_writel(value7 | (1 << ENABLE_MTK_PMU_EVENTS_OFFSET), DBG_CONTROL_CPU7);
148 }
149
150 value6 = readl(IOMEM(DBG_CONTROL_CPU6));
151 value7 = readl(IOMEM(DBG_CONTROL_CPU7));
152 MET_TRACE("[MET_PMU_DB]DBG_CONTROL_CPU6 = %08x, DBG_CONTROL_CPU7 = %08x\n", value6, value7);
153 pr_debug("[MET_PMU_DB]DBG_CONTROL_CPU6 = %08x, DBG_CONTROL_CPU7 = %08x\n", value6, value7);
154 return 1;
155
156out:
157 if (mcucfg_base != NULL) {
158 iounmap(mcucfg_base);
159 mcucfg_base = NULL;
160 }
161 MET_TRACE("[MET_PMU_DB]DBG_CONTROL init error");
162 pr_debug("[MET_PMU_DB]DBG_CONTROL init error");
163 return 0;
164}
165
166int cpu_pmu_debug_uninit(void)
167{
168 unsigned int value6,value7;
169
170 if (restore_dbg_ctrl_cpu6 == 0) {
171 value6 = readl(IOMEM(DBG_CONTROL_CPU6));
172 mt_reg_sync_writel(value6 & (~(1 << ENABLE_MTK_PMU_EVENTS_OFFSET)), DBG_CONTROL_CPU6);
173 }
174 if (restore_dbg_ctrl_cpu7 == 0) {
175 value7 = readl(IOMEM(DBG_CONTROL_CPU7));
176 mt_reg_sync_writel(value7 & (~(1 << ENABLE_MTK_PMU_EVENTS_OFFSET)), DBG_CONTROL_CPU7);
177 }
178
179 value6 = readl(IOMEM(DBG_CONTROL_CPU6));
180 value7 = readl(IOMEM(DBG_CONTROL_CPU7));
181 MET_TRACE("[MET_PMU_DB]DBG_CONTROL_CPU6 = %08x, DBG_CONTROL_CPU7 = %08x\n", value6, value7);
182 pr_debug("[MET_PMU_DB]DBG_CONTROL_CPU6 = %08x, DBG_CONTROL_CPU7 = %08x\n", value6, value7);
183
184 if (mcucfg_base != NULL) {
185 iounmap(mcucfg_base);
186 mcucfg_base = NULL;
187 }
188 restore_dbg_ctrl_cpu6 = 0;
189 restore_dbg_ctrl_cpu7 = 0;
190 return 1;
191}
192#endif
193
194
195
196
197noinline void mp_cpu(unsigned char cnt, unsigned int *value)
198{
199 MET_GENERAL_PRINT(MET_TRACE, cnt, value);
200}
201
202static void dummy_handler(struct perf_event *event, struct perf_sample_data *data,
203 struct pt_regs *regs)
204{
205 /*
206 * Required as perf_event_create_kernel_counter() requires an overflow handler,
207 * even though all we do is poll.
208 */
209}
210
211static void perf_cpupmu_polling(unsigned long long stamp, int cpu)
212{
213 int event_count = cpu_pmu->event_count[cpu];
214 struct met_pmu *pmu = cpu_pmu->pmu[cpu];
215 int i, count;
216 unsigned long long delta;
217 struct perf_event *ev;
218 unsigned int pmu_value[MXNR_PMU_EVENTS];
219 u64 value;
220
221 if (per_cpu(perfSet, cpu) == 0)
222 return;
223
224 count = 0;
225 for (i = 0; i < event_count; i++) {
226 if (pmu[i].mode == 0)
227 continue;
228
229 ev = per_cpu(pevent, cpu)[i];
230 if ((ev != NULL) && (ev->state == PERF_EVENT_STATE_ACTIVE)) {
231 met_perf_event_read_local_symbol(ev, &value);
232 per_cpu(perfCurr, cpu)[i] = value;
233 delta = (per_cpu(perfCurr, cpu)[i] - per_cpu(perfPrev, cpu)[i]);
234 per_cpu(perfPrev, cpu)[i] = per_cpu(perfCurr, cpu)[i];
235 if (per_cpu(perfCntFirst, cpu)[i] == 1) {
236 /* we shall omit delta counter when we get first counter */
237 per_cpu(perfCntFirst, cpu)[i] = 0;
238 continue;
239 }
240 pmu_value[count] = (unsigned int)delta;
241 count++;
242 }
243 }
244
245 if (count == counter_cnt[cpu])
246 mp_cpu(count, pmu_value);
247}
248
249static struct perf_event* perf_event_create(int cpu, unsigned short event, int count)
250{
251 struct perf_event_attr *ev_attr;
252 struct perf_event *ev;
253
254 ev_attr = per_cpu(pevent_attr, cpu)+count;
255 memset(ev_attr, 0, sizeof(*ev_attr));
256 if (event == 0xff) {
257 ev_attr->config = PERF_COUNT_HW_CPU_CYCLES;
258 ev_attr->type = PERF_TYPE_HARDWARE;
259 } else {
260 ev_attr->config = event;
261 ev_attr->type = PERF_TYPE_RAW;
262 }
263 ev_attr->size = sizeof(*ev_attr);
264 ev_attr->sample_period = 0;
265 ev_attr->pinned = 1;
266
267 ev = perf_event_create_kernel_counter(ev_attr, cpu, NULL, dummy_handler, NULL);
268 if (IS_ERR(ev))
269 return NULL;
270 do {
271 if (ev->state == PERF_EVENT_STATE_ACTIVE)
272 break;
273 if (ev->state == PERF_EVENT_STATE_ERROR) {
274 perf_event_enable(ev);
275 if (ev->state == PERF_EVENT_STATE_ACTIVE)
276 break;
277 }
278 perf_event_release_kernel(ev);
279 return NULL;
280 } while (0);
281
282 return ev;
283}
284
285static void perf_event_release(int cpu, struct perf_event *ev)
286{
287 if (ev->state == PERF_EVENT_STATE_ACTIVE)
288 perf_event_disable(ev);
289 perf_event_release_kernel(ev);
290}
291
292static int perf_thread_set_perf_events(int cpu)
293{
294 int i, size;
295 struct perf_event *ev;
296
297 size = sizeof(struct perf_event_attr);
298 if (per_cpu(perfSet, cpu) == 0) {
299 int event_count = cpu_pmu->event_count[cpu];
300 struct met_pmu *pmu = cpu_pmu->pmu[cpu];
301 for (i = 0; i < event_count; i++) {
302 if (!pmu[i].mode)
303 continue; /* Skip disabled counters */
304 ev = perf_event_create(cpu, pmu[i].event, i);
305 if (ev == NULL) {
306 met_cpupmu.mode = 0;
307 met_perf_cpupmu_status = 0;
308
309 MET_TRACE("[MET_PMU] failed to register pmu event %4x\n", pmu[i].event);
310 pr_notice("[MET_PMU] failed to register pmu event %4x\n", pmu[i].event);
311 continue;
312 }
313
314 MET_TRACE("[MET_PMU] registered pmu slot: [%d] evt=%#04x\n", ev->hw.idx, pmu[i].event);
315 pr_debug("[MET_PMU] registered pmu slot: [%d] evt=%#04x\n", ev->hw.idx, pmu[i].event);
316
317 per_cpu(pevent, cpu)[i] = ev;
318 per_cpu(perfPrev, cpu)[i] = 0;
319 per_cpu(perfCurr, cpu)[i] = 0;
320 perf_event_enable(ev);
321 per_cpu(perfCntFirst, cpu)[i] = 1;
322 } /* for all PMU counter */
323 per_cpu(perfSet, cpu) = 1;
324 } /* for perfSet */
325
326 return 0;
327}
328
329static void perf_thread_setup(struct work_struct *work)
330{
331 int cpu;
332 struct delayed_work *dwork = to_delayed_work(work);
333
334 cpu = dwork->cpu;
335 if (per_cpu(perf_task_init_done, cpu) == 0) {
336 per_cpu(perf_task_init_done, cpu) = 1;
337 perf_thread_set_perf_events(cpu);
338 }
339}
340
341static void met_perf_cpupmu_start(int cpu)
342{
343 if (met_cpupmu.mode == 0)
344 return;
345
346 per_cpu(perf_cpuid, cpu) = cpu;
347 if (per_cpu(perf_delayed_work_setup, cpu) == NULL) {
348 struct delayed_work *dwork = &per_cpu(cpu_pmu_dwork_setup, cpu);
349 INIT_DELAYED_WORK(dwork, perf_thread_setup);
350 dwork->cpu = cpu;
351 schedule_delayed_work_on(cpu, dwork, 0);
352 per_cpu(perf_delayed_work_setup, cpu) = dwork;
353 }
354}
355
356static void perf_thread_down(struct work_struct *work)
357{
358 struct delayed_work *dwork = to_delayed_work(work);
359 int cpu, i;
360 struct perf_event *ev;
361 int event_count;
362 struct met_pmu *pmu;
363
364 cpu = dwork->cpu;
365 if (per_cpu(perfSet, cpu) == 0)
366 return;
367
368 per_cpu(perfSet, cpu) = 0;
369 event_count = cpu_pmu->event_count[cpu];
370 pmu = cpu_pmu->pmu[cpu];
371 for (i = 0; i < event_count; i++) {
372 ev = per_cpu(pevent, cpu)[i];
373 if (ev != NULL) {
374 perf_event_release(cpu, ev);
375 per_cpu(pevent, cpu)[i] = NULL;
376 }
377 }
378 per_cpu(perf_task_init_done, cpu) = 0;
379 per_cpu(perf_delayed_work_setup, cpu) = NULL;
380}
381
382static void met_perf_cpupmu_stop(int cpu)
383{
384 struct delayed_work *dwork;
385
386 per_cpu(perf_cpuid, cpu) = cpu;
387 dwork = &per_cpu(cpu_pmu_dwork_down, cpu);
388 INIT_DELAYED_WORK(dwork, perf_thread_down);
389 dwork->cpu = cpu;
390 schedule_delayed_work_on(cpu, dwork, 0);
391}
392
393static int cpupmu_create_subfs(struct kobject *parent)
394{
395 int ret = 0;
396
397 cpu_pmu = cpu_pmu_hw_init();
398 if (cpu_pmu == NULL) {
399 PR_BOOTMSG("Failed to init CPU PMU HW!!\n");
400 return -ENODEV;
401 }
402
403 kobj_cpu = parent;
404
405#define KOBJ_ATTR_ITEM(attr_name) \
406 do { \
407 ret = sysfs_create_file(kobj_cpu, &attr_name ## _attr.attr); \
408 if (ret != 0) { \
409 pr_notice("Failed to create " #attr_name " in sysfs\n"); \
410 return ret; \
411 } \
412 } while (0)
413 KOBJ_ATTR_LIST;
414#undef KOBJ_ATTR_ITEM
415
416 return 0;
417}
418
419static void cpupmu_delete_subfs(void)
420{
421#define KOBJ_ATTR_ITEM(attr_name) \
422 sysfs_remove_file(kobj_cpu, &attr_name ## _attr.attr)
423
424 if (kobj_cpu != NULL) {
425 KOBJ_ATTR_LIST;
426 kobj_cpu = NULL;
427 }
428#undef KOBJ_ATTR_ITEM
429}
430
431void met_perf_cpupmu_polling(unsigned long long stamp, int cpu)
432{
433 int count;
434 unsigned int pmu_value[MXNR_PMU_EVENTS];
435
436 if (per_cpu(cpu_status, cpu) != MET_CPU_ONLINE)
437 return;
438
439 if (met_cpu_pmu_method) {
440 perf_cpupmu_polling(stamp, cpu);
441 } else {
442 count = cpu_pmu->polling(cpu_pmu->pmu[cpu], cpu_pmu->event_count[cpu], pmu_value);
443
444#ifdef CONFIG_CPU_PM
445 if (met_cpu_pm_pmu_reconfig) {
446 int ii;
447 for (ii = 0; ii < count; ii ++)
448 pmu_value[ii] += cpu_pmu->cpu_pm_unpolled_loss[cpu][ii];
449 }
450#endif
451
452 mp_cpu(count, pmu_value);
453
454#ifdef CONFIG_CPU_PM
455 if (met_cpu_pm_pmu_reconfig) {
456 memset(cpu_pmu->cpu_pm_unpolled_loss[cpu], 0, sizeof (cpu_pmu->cpu_pm_unpolled_loss[0]));
457 }
458#endif
459 }
460}
461
462static void cpupmu_start(void)
463{
464 int cpu = raw_smp_processor_id();
465
466 if (met_cpu_pmu_method)
467 met_perf_cpupmu_start(cpu);
468 else {
469 nr_arg[cpu] = 0;
470 cpu_pmu->start(cpu_pmu->pmu[cpu], cpu_pmu->event_count[cpu]);
471 }
472 met_perf_cpupmu_status = 1;
473 per_cpu(cpu_status, cpu) = MET_CPU_ONLINE;
474}
475
476
477static void cpupmu_unique_start(void)
478{
479#ifdef CPUPMU_V8_2
480 int ret = 0;
481 if (mtk_pmu_event_enable == 1){
482 ret = cpu_pmu_debug_init();
483 if (ret == 0)
484 PR_BOOTMSG("Failed to init CPU PMU debug!!\n");
485 }
486#endif
487
488#ifdef CONFIG_CPU_PM
489 use_cpu_pm_pmu_notifier = 0;
490 if (met_cpu_pm_pmu_reconfig) {
491 if (met_cpu_pmu_method) {
492 met_cpu_pm_pmu_reconfig = 0;
493 MET_TRACE("[MET_PMU] met_cpu_pmu_method=%d, met_cpu_pm_pmu_reconfig forced disabled\n", met_cpu_pmu_method);
494 pr_debug("[MET_PMU] met_cpu_pmu_method=%d, met_cpu_pm_pmu_reconfig forced disabled\n", met_cpu_pmu_method);
495 } else {
496 memset(cpu_pmu->cpu_pm_unpolled_loss, 0, sizeof (cpu_pmu->cpu_pm_unpolled_loss));
497 cpu_pm_register_notifier(&cpu_pm_pmu_notifier);
498 use_cpu_pm_pmu_notifier = 1;
499 }
500 }
501#else
502 if (met_cpu_pm_pmu_reconfig) {
503 met_cpu_pm_pmu_reconfig = 0;
504 MET_TRACE("[MET_PMU] CONFIG_CPU_PM=%d, met_cpu_pm_pmu_reconfig forced disabled\n", CONFIG_CPU_PM);
505 pr_debug("[MET_PMU] CONFIG_CPU_PM=%d, met_cpu_pm_pmu_reconfig forced disabled\n", CONFIG_CPU_PM);
506 }
507#endif
508 MET_TRACE("[MET_PMU] met_cpu_pm_pmu_reconfig=%u\n", met_cpu_pm_pmu_reconfig);
509 pr_debug("[MET_PMU] met_cpu_pm_pmu_reconfig=%u\n", met_cpu_pm_pmu_reconfig);
510
511 return;
512}
513
514static void cpupmu_stop(void)
515{
516 int cpu = raw_smp_processor_id();
517
518 met_perf_cpupmu_status = 0;
519 if (met_cpu_pmu_method)
520 met_perf_cpupmu_stop(cpu);
521 else
522 cpu_pmu->stop(cpu_pmu->event_count[cpu]);
523}
524
525static void cpupmu_unique_stop(void)
526{
527#ifdef CPUPMU_V8_2
528 if (mtk_pmu_event_enable == 1)
529 cpu_pmu_debug_uninit();
530#endif
531
532#ifdef CONFIG_CPU_PM
533 if (use_cpu_pm_pmu_notifier) {
534 cpu_pm_unregister_notifier(&cpu_pm_pmu_notifier);
535 }
536#endif
537 return;
538}
539
540static const char cache_line_header[] =
541 "met-info [000] 0.0: met_cpu_cache_line_size: %d\n";
542static const char header[] =
543 "met-info [000] 0.0: met_cpu_header_v2: %d";
544
545static const char help[] =
546 " --pmu-cpu-evt=[cpu_list:]event_list select CPU-PMU events in %s\n"
547 " cpu_list: specify the cpu_id list or apply to all the cores\n"
548 " example: 0,1,2\n"
549 " event_list: specify the event number\n"
550 " example: 0x8,0xff\n";
551
552static int cpupmu_print_help(char *buf, int len)
553{
554 return snprintf(buf, PAGE_SIZE, help, cpu_pmu->cpu_name);
555}
556
557static int reset_driver_stat(void)
558{
559 int cpu, i;
560 int event_count;
561 struct met_pmu *pmu;
562
563 met_cpupmu.mode = 0;
564 for_each_possible_cpu(cpu) {
565 event_count = cpu_pmu->event_count[cpu];
566 pmu = cpu_pmu->pmu[cpu];
567 counter_cnt[cpu] = 0;
568 nr_arg[cpu] = 0;
569 for (i = 0; i < event_count; i++) {
570 pmu[i].mode = MODE_DISABLED;
571 pmu[i].event = 0;
572 pmu[i].freq = 0;
573 }
574 }
575
576 return 0;
577}
578
579static int cpupmu_print_header(char *buf, int len)
580{
581 int cpu, i, ret, first;
582 int event_count;
583 struct met_pmu *pmu;
584
585 ret = 0;
586
587 /*append CPU PMU access method*/
588 if (met_cpu_pmu_method)
589 ret += snprintf(buf + ret, PAGE_SIZE,
590 "met-info [000] 0.0: CPU_PMU_method: perf APIs\n");
591 else
592 ret += snprintf(buf + ret, PAGE_SIZE,
593 "met-info [000] 0.0: CPU_PMU_method: MET pmu driver\n");
594
595 /*append cache line size*/
596 ret += snprintf(buf + ret, PAGE_SIZE - ret, cache_line_header, cache_line_size());
597 ret += snprintf(buf + ret, PAGE_SIZE - ret, "# mp_cpu: pmu_value1, ...\n");
598
599 for_each_possible_cpu(cpu) {
600 event_count = cpu_pmu->event_count[cpu];
601 pmu = cpu_pmu->pmu[cpu];
602 first = 1;
603 for (i = 0; i < event_count; i++) {
604 if (pmu[i].mode == 0)
605 continue;
606 if (first) {
607 ret += snprintf(buf + ret, PAGE_SIZE - ret, header, cpu);
608 first = 0;
609 }
610 ret += snprintf(buf + ret, PAGE_SIZE - ret, ",0x%x", pmu[i].event);
611 pmu[i].mode = 0;
612 }
613 if (!first)
614 ret += snprintf(buf + ret, PAGE_SIZE - ret, "\n");
615 }
616
617 reset_driver_stat();
618
619 return ret;
620}
621
622static int met_parse_num_list(char *arg, int len, int *list, int list_cnt)
623{
624 int nr_num = 0;
625 char *num;
626 int num_len;
627
628 /* search ',' as the splitter */
629 while (len) {
630 num = arg;
631 num_len = 0;
632 if (list_cnt <= 0)
633 return -1;
634 while (len) {
635 len--;
636 if (*arg == ',') {
637 *(arg++) = '\0';
638 break;
639 }
640 arg++;
641 num_len++;
642 }
643 if (met_parse_num(num, list, num_len) < 0)
644 return -1;
645 list++;
646 list_cnt--;
647 nr_num++;
648 }
649
650 return nr_num;
651}
652
653static int cpupmu_process_argument(const char *arg, int len)
654{
655 char *arg1 = (char*)arg;
656 int len1 = len;
657 int cpu, cpu_list[MXNR_CPU];
658 int nr_events, event_list[MXNR_PMU_EVENTS];
659 int i;
660 int nr_counters;
661 struct met_pmu *pmu;
662 int arg_nr;
663 int event_no;
664
665 /*
666 * split cpu_list and event_list by ':'
667 * arg, len: cpu_list when found (i < len)
668 * arg1, len1: event_list
669 */
670 for (i = 0; i < len; i++) {
671 if (arg[i] == ':') {
672 arg1[i] = '\0';
673 arg1 += i+1;
674 len1 = len - i - 1;
675 len = i;
676 break;
677 }
678 }
679
680 /*
681 * setup cpu_list array
682 * 1: selected
683 * 0: unselected
684 */
685 if (arg1 != arg) { /* is cpu_id list specified? */
686 int list[MXNR_CPU], cnt;
687 int cpu_id;
688 if ((cnt = met_parse_num_list((char*)arg, len, list, ARRAY_SIZE(list))) <= 0)
689 goto arg_out;
690 memset(cpu_list, 0, sizeof(cpu_list));
691 for (i = 0; i < cnt; i++) {
692 cpu_id = list[i];
693 if (cpu_id < 0 || cpu_id >= ARRAY_SIZE(cpu_list))
694 goto arg_out;
695 cpu_list[cpu_id] = 1;
696 }
697 }
698 else
699 memset(cpu_list, 1, sizeof(cpu_list));
700
701 /* get event_list */
702 if ((nr_events = met_parse_num_list(arg1, len1, event_list, ARRAY_SIZE(event_list))) <= 0)
703 goto arg_out;
704
705 /* for each cpu in cpu_list, add all the events in event_list */
706 for_each_possible_cpu(cpu) {
707 pmu = cpu_pmu->pmu[cpu];
708 arg_nr = nr_arg[cpu];
709
710 if (cpu_list[cpu] == 0)
711 continue;
712
713 if (met_cpu_pmu_method) {
714 nr_counters = perf_num_counters();
715 } else {
716 nr_counters = cpu_pmu->event_count[cpu];
717 }
718
719 pr_debug("[MET_PMU] pmu slot count=%d\n", nr_counters);
720
721 if (nr_counters == 0)
722 goto arg_out;
723
724 for (i = 0; i < nr_events; i++) {
725 event_no = event_list[i];
726 /*
727 * check if event is duplicate,
728 * but may not include 0xff when met_cpu_pmu_method == 0.
729 */
730 if (cpu_pmu->check_event(pmu, arg_nr, event_no) < 0)
731 goto arg_out;
732
733 /*
734 * test if this event is available when in perf_APIs mode
735 */
736 if (met_cpu_pmu_method) {
737 struct perf_event *ev;
738 ev = perf_event_create(cpu, event_no, arg_nr);
739 if (ev == NULL) {
740 pr_debug("!!!!!!!! [MET_PMU] failed pmu alloction test (event_no=%#04x)\n", event_no);
741 } else {
742 perf_event_release(cpu, ev);
743 }
744 }
745
746 if (met_cpu_pmu_method) {
747 if (arg_nr >= nr_counters)
748 goto arg_out;
749 pmu[arg_nr].mode = MODE_POLLING;
750 pmu[arg_nr].event = event_no;
751 pmu[arg_nr].freq = 0;
752 arg_nr++;
753 } else {
754 if (event_no == 0xff) {
755 if (pmu[nr_counters-1].mode == MODE_POLLING)
756 goto arg_out;
757 pmu[nr_counters-1].mode = MODE_POLLING;
758 pmu[nr_counters-1].event = 0xff;
759 pmu[nr_counters-1].freq = 0;
760 } else {
761 if (arg_nr >= (nr_counters - 1))
762 goto arg_out;
763 pmu[arg_nr].mode = MODE_POLLING;
764 pmu[arg_nr].event = event_no;
765 pmu[arg_nr].freq = 0;
766 arg_nr++;
767 }
768 }
769 counter_cnt[cpu]++;
770 }
771 nr_arg[cpu] = arg_nr;
772 }
773
774 met_cpupmu.mode = 1;
775 return 0;
776
777arg_out:
778 reset_driver_stat();
779 return -EINVAL;
780}
781
782
783static void cpupmu_cpu_state_notify(long cpu, unsigned long action)
784{
785 per_cpu(cpu_status, cpu) = action;
786
787#if (defined(CONFIG_ARM64) || defined(CONFIG_ARM))
788 if (met_cpu_pmu_method && action == MET_CPU_OFFLINE) {
789 struct perf_event *event = NULL;
790 struct arm_pmu *armpmu = NULL;
791 struct platform_device *pmu_device = NULL;
792 int irq = 0;
793
794 event = per_cpu(pevent, cpu)[0];
795 if (event)
796 armpmu = to_arm_pmu(event->pmu);
797 pr_debug("!!!!!!!! %s_%ld, event=%p\n", __FUNCTION__, cpu, event);
798
799 if (armpmu)
800 pmu_device = armpmu->plat_device;
801 pr_debug("!!!!!!!! %s_%ld, armpmu=%p\n", __FUNCTION__, cpu, armpmu);
802
803 if (pmu_device)
804 irq = platform_get_irq(pmu_device, 0);
805 pr_debug("!!!!!!!! %s_%ld, pmu_device=%p\n", __FUNCTION__, cpu, pmu_device);
806
807 if (irq > 0)
808 disable_percpu_irq(irq);
809 pr_debug("!!!!!!!! %s_%ld, irq=%d\n", __FUNCTION__, cpu, irq);
810 }
811#endif
812}
813
814
815struct metdevice met_cpupmu = {
816 .name = "cpu",
817 .type = MET_TYPE_PMU,
818 .cpu_related = 1,
819 .create_subfs = cpupmu_create_subfs,
820 .delete_subfs = cpupmu_delete_subfs,
821 .start = cpupmu_start,
822 .uniq_start = cpupmu_unique_start,
823 .stop = cpupmu_stop,
824 .uniq_stop = cpupmu_unique_stop,
825 .polling_interval = 1,
826 .timed_polling = met_perf_cpupmu_polling,
827 .print_help = cpupmu_print_help,
828 .print_header = cpupmu_print_header,
829 .process_argument = cpupmu_process_argument,
830 .cpu_state_notify = cpupmu_cpu_state_notify
831};