blob: 8b16172b7761c230b2403c9a83ba49881f2da1ea [file] [log] [blame]
rjw1f884582022-01-06 17:20:42 +08001/*
2 * Copyright (C) 2018 MediaTek Inc.
3 *
4 * This program is free software: you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13#include <linux/cpu.h>
14#include <linux/cpu_pm.h>
15#include <linux/perf_event.h>
16
17#if (defined(CONFIG_ARM64) || defined(CONFIG_ARM))
18#include <linux/platform_device.h>
19#include <linux/perf/arm_pmu.h>
20#endif
21
22#include "met_drv.h"
23#include "met_kernel_symbol.h"
24#include "interface.h"
25#include "trace.h"
26#include "cpu_pmu.h"
27
28struct cpu_pmu_hw *cpu_pmu;
29static int counter_cnt[MXNR_CPU];
30static int nr_arg[MXNR_CPU];
31
32int met_perf_cpupmu_status;
33
34#ifdef CONFIG_CPU_PM
35static int use_cpu_pm_pmu_notifier = 0;
36
37/* helper notifier for maintaining pmu states before cpu state transition */
38static int cpu_pm_pmu_notify(struct notifier_block *b,
39 unsigned long cmd,
40 void *p)
41{
42 int ii;
43 int cpu, count;
44 unsigned int pmu_value[MXNR_PMU_EVENTS];
45
46 if (!met_perf_cpupmu_status)
47 return NOTIFY_OK;
48
49 cpu = raw_smp_processor_id();
50
51 switch (cmd) {
52 case CPU_PM_ENTER:
53 count = cpu_pmu->polling(cpu_pmu->pmu[cpu], cpu_pmu->event_count[cpu], pmu_value);
54 for (ii = 0; ii < count; ii ++)
55 cpu_pmu->cpu_pm_unpolled_loss[cpu][ii] += pmu_value[ii];
56
57 cpu_pmu->stop(cpu_pmu->event_count[cpu]);
58 break;
59 case CPU_PM_ENTER_FAILED:
60 case CPU_PM_EXIT:
61 cpu_pmu->start(cpu_pmu->pmu[cpu], cpu_pmu->event_count[cpu]);
62 break;
63 default:
64 return NOTIFY_DONE;
65 }
66 return NOTIFY_OK;
67}
68
69struct notifier_block cpu_pm_pmu_notifier = {
70 .notifier_call = cpu_pm_pmu_notify,
71};
72#endif
73
74static DEFINE_PER_CPU(unsigned long long[MXNR_PMU_EVENTS], perfCurr);
75static DEFINE_PER_CPU(unsigned long long[MXNR_PMU_EVENTS], perfPrev);
76static DEFINE_PER_CPU(int[MXNR_PMU_EVENTS], perfCntFirst);
77static DEFINE_PER_CPU(struct perf_event * [MXNR_PMU_EVENTS], pevent);
78static DEFINE_PER_CPU(struct perf_event_attr [MXNR_PMU_EVENTS], pevent_attr);
79static DEFINE_PER_CPU(int, perfSet);
80static DEFINE_PER_CPU(unsigned int, perf_task_init_done);
81static DEFINE_PER_CPU(int, perf_cpuid);
82static DEFINE_PER_CPU(struct delayed_work, cpu_pmu_dwork_setup);
83static DEFINE_PER_CPU(struct delayed_work*, perf_delayed_work_setup);
84static DEFINE_PER_CPU(struct delayed_work, cpu_pmu_dwork_down);
85static DEFINE_PER_CPU(int, cpu_status);
86
87#ifdef CPUPMU_V8_2
88#include <linux/of.h>
89#include <linux/of_address.h>
90#include <mt-plat/sync_write.h>
91#include <mt-plat/mtk_io.h>
92
93static char mcucfg_desc[] = "mediatek,mcucfg";
94static void __iomem *mcucfg_base = NULL;
95#define DBG_CONTROL_CPU6 ((unsigned long)mcucfg_base + 0x3000 + 0x308) /* DBG_CONTROL */
96#define DBG_CONTROL_CPU7 ((unsigned long)mcucfg_base + 0x3800 + 0x308) /* DBG_CONTROL */
97#define ENABLE_MTK_PMU_EVENTS_OFFSET 1
98static int restore_dbg_ctrl_cpu6;
99static int restore_dbg_ctrl_cpu7;
100
101int cpu_pmu_debug_init(void)
102{
103 struct device_node *node = NULL;
104 unsigned int value6,value7;
105
106 /*for A75 MTK internal event*/
107 if (mcucfg_base == NULL) {
108 node = of_find_compatible_node(NULL, NULL, mcucfg_desc);
109 if (node == NULL) {
110 MET_TRACE("[MET_PMU_DB] of_find node == NULL\n");
111 pr_debug("!!!!!!!! [MET_PMU_DB] of_find node == NULL\n");
112 goto out;
113 }
114 mcucfg_base = of_iomap(node, 0);
115 of_node_put(node);
116 if (mcucfg_base == NULL) {
117 MET_TRACE("[MET_PMU_DB] mcucfg_base == NULL\n");
118 pr_debug("!!!!!!!! [MET_PMU_DB] mcucfg_base == NULL\n");
119 goto out;
120 }
121 MET_TRACE("[MET_PMU_DB] regbase %08lx\n", DBG_CONTROL_CPU7);
122 pr_debug("!!!!!!!! [MET_PMU_DB] regbase %08lx\n", DBG_CONTROL_CPU7);
123 }
124
125 value6 = readl(IOMEM(DBG_CONTROL_CPU6));
126 if (value6 & (1 << ENABLE_MTK_PMU_EVENTS_OFFSET)) {
127 restore_dbg_ctrl_cpu6 = 1;
128 } else {
129 restore_dbg_ctrl_cpu6 = 0;
130 mt_reg_sync_writel(value6 | (1 << ENABLE_MTK_PMU_EVENTS_OFFSET), DBG_CONTROL_CPU6);
131 }
132
133 value7 = readl(IOMEM(DBG_CONTROL_CPU7));
134 if (value7 & (1 << ENABLE_MTK_PMU_EVENTS_OFFSET)) {
135 restore_dbg_ctrl_cpu7 = 1;
136 } else {
137 restore_dbg_ctrl_cpu7 = 0;
138 mt_reg_sync_writel(value7 | (1 << ENABLE_MTK_PMU_EVENTS_OFFSET), DBG_CONTROL_CPU7);
139 }
140
141 value6 = readl(IOMEM(DBG_CONTROL_CPU6));
142 value7 = readl(IOMEM(DBG_CONTROL_CPU7));
143 MET_TRACE("[MET_PMU_DB]DBG_CONTROL_CPU6 = %08x, DBG_CONTROL_CPU7 = %08x\n", value6, value7);
144 pr_debug("!!!!!!!! [MET_PMU_DB]DBG_CONTROL_CPU6 = %08x, DBG_CONTROL_CPU7 = %08x\n", value6, value7);
145 return 1;
146
147out:
148 if (mcucfg_base != NULL) {
149 iounmap(mcucfg_base);
150 mcucfg_base = NULL;
151 }
152 MET_TRACE("[MET_PMU_DB]DBG_CONTROL init error");
153 pr_debug("!!!!!!!! [MET_PMU_DB]DBG_CONTROL init error");
154 return 0;
155}
156
157int cpu_pmu_debug_uninit(void)
158{
159 unsigned int value6,value7;
160
161 if (restore_dbg_ctrl_cpu6 == 0) {
162 value6 = readl(IOMEM(DBG_CONTROL_CPU6));
163 mt_reg_sync_writel(value6 & (~(1 << ENABLE_MTK_PMU_EVENTS_OFFSET)), DBG_CONTROL_CPU6);
164 }
165 if (restore_dbg_ctrl_cpu7 == 0) {
166 value7 = readl(IOMEM(DBG_CONTROL_CPU7));
167 mt_reg_sync_writel(value7 & (~(1 << ENABLE_MTK_PMU_EVENTS_OFFSET)), DBG_CONTROL_CPU7);
168 }
169
170 value6 = readl(IOMEM(DBG_CONTROL_CPU6));
171 value7 = readl(IOMEM(DBG_CONTROL_CPU7));
172 MET_TRACE("[MET_PMU_DB]DBG_CONTROL_CPU6 = %08x, DBG_CONTROL_CPU7 = %08x\n", value6, value7);
173 pr_debug("!!!!!!!! [MET_PMU_DB]DBG_CONTROL_CPU6 = %08x, DBG_CONTROL_CPU7 = %08x\n", value6, value7);
174
175 if (mcucfg_base != NULL) {
176 iounmap(mcucfg_base);
177 mcucfg_base = NULL;
178 }
179 restore_dbg_ctrl_cpu6 = 0;
180 restore_dbg_ctrl_cpu7 = 0;
181 return 1;
182}
183#endif
184
185
186noinline void mp_cpu(unsigned char cnt, unsigned int *value)
187{
188 MET_GENERAL_PRINT(MET_TRACE, cnt, value);
189}
190
191static void dummy_handler(struct perf_event *event, struct perf_sample_data *data,
192 struct pt_regs *regs)
193{
194 /*
195 * Required as perf_event_create_kernel_counter() requires an overflow handler,
196 * even though all we do is poll.
197 */
198}
199
200static void perf_cpupmu_polling(unsigned long long stamp, int cpu)
201{
202 int event_count = cpu_pmu->event_count[cpu];
203 struct met_pmu *pmu = cpu_pmu->pmu[cpu];
204 int i, count;
205 unsigned long long delta;
206 struct perf_event *ev;
207 unsigned int pmu_value[MXNR_PMU_EVENTS];
208
209 if (per_cpu(perfSet, cpu) == 0)
210 return;
211
212 count = 0;
213 for (i = 0; i < event_count; i++) {
214 if (pmu[i].mode == 0)
215 continue;
216
217 ev = per_cpu(pevent, cpu)[i];
218 if ((ev != NULL) && (ev->state == PERF_EVENT_STATE_ACTIVE)) {
219 per_cpu(perfCurr, cpu)[i] = met_perf_event_read_local_symbol(ev);
220 delta = (per_cpu(perfCurr, cpu)[i] - per_cpu(perfPrev, cpu)[i]);
221 per_cpu(perfPrev, cpu)[i] = per_cpu(perfCurr, cpu)[i];
222 if (per_cpu(perfCntFirst, cpu)[i] == 1) {
223 /* we shall omit delta counter when we get first counter */
224 per_cpu(perfCntFirst, cpu)[i] = 0;
225 continue;
226 }
227 pmu_value[count] = (unsigned int)delta;
228 count++;
229 }
230 }
231
232 if (count == counter_cnt[cpu])
233 mp_cpu(count, pmu_value);
234}
235
236static struct perf_event* perf_event_create(int cpu, unsigned short event, int count)
237{
238 struct perf_event_attr *ev_attr;
239 struct perf_event *ev;
240
241 ev_attr = per_cpu(pevent_attr, cpu)+count;
242 memset(ev_attr, 0, sizeof(*ev_attr));
243 if (event == 0xff) {
244 ev_attr->config = PERF_COUNT_HW_CPU_CYCLES;
245 ev_attr->type = PERF_TYPE_HARDWARE;
246 } else {
247 ev_attr->config = event;
248 ev_attr->type = PERF_TYPE_RAW;
249 }
250 ev_attr->size = sizeof(*ev_attr);
251 ev_attr->sample_period = 0;
252 ev_attr->pinned = 1;
253
254 ev = perf_event_create_kernel_counter(ev_attr, cpu, NULL, dummy_handler, NULL);
255 if (IS_ERR(ev)) {
256 pr_debug("!!!!!!!! cpu-%d: [MET_PMU] failed perf_event_create_kernel_counter ev is NULL\n", cpu);
257 return NULL;
258 }
259 do {
260 if (ev->state == PERF_EVENT_STATE_ACTIVE) {
261 break;
262 }
263 if (ev->state == PERF_EVENT_STATE_ERROR) {
264 pr_debug("!!!!!!!! cpu-%d: [MET_PMU] ev->state == PERF_EVENT_STATE_ERROR\n", cpu);
265 perf_event_enable(ev);
266 if (ev->state == PERF_EVENT_STATE_ACTIVE)
267 break;
268 }
269 perf_event_release_kernel(ev);
270 return NULL;
271 } while (0);
272
273 return ev;
274}
275
276static void perf_event_release(int cpu, struct perf_event *ev)
277{
278 pr_debug("!!!!!!!! %s cpu-%d\n", __FUNCTION__, cpu);
279 if (ev->state == PERF_EVENT_STATE_ACTIVE)
280 perf_event_disable(ev);
281 perf_event_release_kernel(ev);
282}
283
284static int perf_thread_set_perf_events(int cpu)
285{
286 int i, size;
287 struct perf_event *ev;
288
289 pr_debug("!!!!!!!! %s cpu-%d\n", __FUNCTION__, cpu);
290 size = sizeof(struct perf_event_attr);
291 if (per_cpu(perfSet, cpu) == 0) {
292 int event_count = cpu_pmu->event_count[cpu];
293 struct met_pmu *pmu = cpu_pmu->pmu[cpu];
294 for (i = 0; i < event_count; i++) {
295 if (!pmu[i].mode)
296 continue; /* Skip disabled counters */
297 ev = perf_event_create(cpu, pmu[i].event, i);
298 if (ev == NULL) {
299 met_cpupmu.mode = 0;
300 met_perf_cpupmu_status = 0;
301
302 MET_TRACE("[MET_PMU] failed to register pmu event %4x\n", pmu[i].event);
303 pr_debug("!!!!!!!! [MET_PMU] failed to register pmu event %4x\n", pmu[i].event);
304 continue;
305 }
306
307 MET_TRACE("[MET_PMU] registered pmu slot: [%d] evt=%#04x\n", ev->hw.idx, pmu[i].event);
308 pr_debug("!!!!!!!! [MET_PMU] registered pmu slot: [%d] evt=%#04x\n", ev->hw.idx, pmu[i].event);
309
310 per_cpu(pevent, cpu)[i] = ev;
311 per_cpu(perfPrev, cpu)[i] = 0;
312 per_cpu(perfCurr, cpu)[i] = 0;
313 perf_event_enable(ev);
314 per_cpu(perfCntFirst, cpu)[i] = 1;
315 } /* for all PMU counter */
316 per_cpu(perfSet, cpu) = 1;
317 } /* for perfSet */
318
319 return 0;
320}
321
322static void perf_thread_setup(struct work_struct *work)
323{
324 int cpu;
325 struct delayed_work *dwork = to_delayed_work(work);
326
327 cpu = dwork->cpu;
328 pr_debug("!!!!!!!! %s cpu-%d\n", __FUNCTION__, cpu);
329 if (per_cpu(perf_task_init_done, cpu) == 0) {
330 per_cpu(perf_task_init_done, cpu) = 1;
331 perf_thread_set_perf_events(cpu);
332 }
333}
334
335static void met_perf_cpupmu_start(int cpu)
336{
337 pr_debug("!!!!!!!! %s cpu-%d\n", __FUNCTION__, cpu);
338 if (met_cpupmu.mode == 0)
339 return;
340
341 per_cpu(perf_cpuid, cpu) = cpu;
342 if (per_cpu(perf_delayed_work_setup, cpu) == NULL) {
343 struct delayed_work *dwork = &per_cpu(cpu_pmu_dwork_setup, cpu);
344 INIT_DELAYED_WORK(dwork, perf_thread_setup);
345 dwork->cpu = cpu;
346 schedule_delayed_work_on(cpu, dwork, 0);
347 per_cpu(perf_delayed_work_setup, cpu) = dwork;
348 }
349}
350
351static void perf_thread_down(struct work_struct *work)
352{
353 struct delayed_work *dwork = to_delayed_work(work);
354 int cpu, i;
355 struct perf_event *ev;
356 int event_count;
357 struct met_pmu *pmu;
358
359 cpu = dwork->cpu;
360 pr_debug("!!!!!!!! %s cpu-%d\n", __FUNCTION__, cpu);
361 if (per_cpu(perfSet, cpu) == 0)
362 return;
363
364 per_cpu(perfSet, cpu) = 0;
365 event_count = cpu_pmu->event_count[cpu];
366 pmu = cpu_pmu->pmu[cpu];
367 for (i = 0; i < event_count; i++) {
368 ev = per_cpu(pevent, cpu)[i];
369 if (ev != NULL) {
370 perf_event_release(cpu, ev);
371 per_cpu(pevent, cpu)[i] = NULL;
372 }
373 }
374 per_cpu(perf_task_init_done, cpu) = 0;
375 per_cpu(perf_delayed_work_setup, cpu) = NULL;
376}
377
378static void met_perf_cpupmu_stop(int cpu)
379{
380 struct delayed_work *dwork;
381
382 pr_debug("!!!!!!!! %s cpu-%d\n", __FUNCTION__, cpu);
383 per_cpu(perf_cpuid, cpu) = cpu;
384 dwork = &per_cpu(cpu_pmu_dwork_down, cpu);
385 INIT_DELAYED_WORK(dwork, perf_thread_down);
386 dwork->cpu = cpu;
387 schedule_delayed_work_on(cpu, dwork, 0);
388}
389
390static int cpupmu_create_subfs(struct kobject *parent)
391{
392 cpu_pmu = cpu_pmu_hw_init();
393 if (cpu_pmu == NULL) {
394 PR_BOOTMSG("Failed to init CPU PMU HW!!\n");
395 return -ENODEV;
396 }
397
398 return 0;
399}
400
401static void cpupmu_delete_subfs(void)
402{
403}
404
405void met_perf_cpupmu_polling(unsigned long long stamp, int cpu)
406{
407 int count;
408 unsigned int pmu_value[MXNR_PMU_EVENTS];
409
410 if (per_cpu(cpu_status, cpu) != CPU_ONLINE)
411 return;
412
413 if (met_cpu_pmu_method) {
414 perf_cpupmu_polling(stamp, cpu);
415 } else {
416 count = cpu_pmu->polling(cpu_pmu->pmu[cpu], cpu_pmu->event_count[cpu], pmu_value);
417
418#ifdef CONFIG_CPU_PM
419 if (met_cpu_pm_pmu_reconfig) {
420 int ii;
421 for (ii = 0; ii < count; ii ++)
422 pmu_value[ii] += cpu_pmu->cpu_pm_unpolled_loss[cpu][ii];
423 }
424#endif
425
426 mp_cpu(count, pmu_value);
427
428#ifdef CONFIG_CPU_PM
429 if (met_cpu_pm_pmu_reconfig) {
430 memset(cpu_pmu->cpu_pm_unpolled_loss[cpu], 0, sizeof (cpu_pmu->cpu_pm_unpolled_loss[0]));
431 }
432#endif
433 }
434}
435
436static void cpupmu_start(void)
437{
438 int cpu = raw_smp_processor_id();
439
440 pr_debug("!!!!!!!! %s cpu-%d\n", __FUNCTION__, cpu);
441 if (met_cpu_pmu_method)
442 met_perf_cpupmu_start(cpu);
443 else {
444 nr_arg[cpu] = 0;
445 cpu_pmu->start(cpu_pmu->pmu[cpu], cpu_pmu->event_count[cpu]);
446 }
447 met_perf_cpupmu_status = 1;
448 per_cpu(cpu_status, cpu) = CPU_ONLINE;
449}
450
451
452static void cpupmu_unique_start(void)
453{
454 int cpu = raw_smp_processor_id();
455
456 pr_debug("!!!!!!!! %s cpu-%d\n", __FUNCTION__, cpu);
457#ifdef CPUPMU_V8_2
458 int ret = 0;
459 ret = cpu_pmu_debug_init();
460 if (ret == 0)
461 PR_BOOTMSG("Failed to init CPU PMU debug!!\n");
462#endif
463
464#ifdef CONFIG_CPU_PM
465 use_cpu_pm_pmu_notifier = 0;
466 if (met_cpu_pm_pmu_reconfig) {
467 if (met_cpu_pmu_method) {
468 met_cpu_pm_pmu_reconfig = 0;
469 MET_TRACE("[MET_PMU] met_cpu_pmu_method=%d, met_cpu_pm_pmu_reconfig forced disabled\n", met_cpu_pmu_method);
470 pr_debug("!!!!!!!! [MET_PMU] met_cpu_pmu_method=%d, met_cpu_pm_pmu_reconfig forced disabled\n", met_cpu_pmu_method);
471 } else {
472 memset(cpu_pmu->cpu_pm_unpolled_loss, 0, sizeof (cpu_pmu->cpu_pm_unpolled_loss));
473 cpu_pm_register_notifier(&cpu_pm_pmu_notifier);
474 use_cpu_pm_pmu_notifier = 1;
475 }
476 }
477#else
478 if (met_cpu_pm_pmu_reconfig) {
479 met_cpu_pm_pmu_reconfig = 0;
480 MET_TRACE("[MET_PMU] CONFIG_CPU_PM=%d, met_cpu_pm_pmu_reconfig forced disabled\n", CONFIG_CPU_PM);
481 pr_debug("!!!!!!!! [MET_PMU] CONFIG_CPU_PM=%d, met_cpu_pm_pmu_reconfig forced disabled\n", CONFIG_CPU_PM);
482 }
483#endif
484 MET_TRACE("[MET_PMU] met_cpu_pm_pmu_reconfig=%u\n", met_cpu_pm_pmu_reconfig);
485 pr_debug("!!!!!!!! [MET_PMU] met_cpu_pm_pmu_reconfig=%u\n", met_cpu_pm_pmu_reconfig);
486
487 return;
488}
489
490static void cpupmu_stop(void)
491{
492 int cpu = raw_smp_processor_id();
493
494 pr_debug("!!!!!!!! %s cpu-%d\n", __FUNCTION__, cpu);
495 met_perf_cpupmu_status = 0;
496 if (met_cpu_pmu_method)
497 met_perf_cpupmu_stop(cpu);
498 else
499 cpu_pmu->stop(cpu_pmu->event_count[cpu]);
500}
501
502static void cpupmu_unique_stop(void)
503{
504 int cpu = raw_smp_processor_id();
505
506 pr_debug("!!!!!!!! %s cpu-%d\n", __FUNCTION__, cpu);
507#ifdef CPUPMU_V8_2
508 cpu_pmu_debug_uninit();
509#endif
510
511#ifdef CONFIG_CPU_PM
512 if (use_cpu_pm_pmu_notifier) {
513 cpu_pm_unregister_notifier(&cpu_pm_pmu_notifier);
514 }
515#endif
516 return;
517}
518
519static const char cache_line_header[] =
520 "met-info [000] 0.0: met_cpu_cache_line_size: %d\n";
521static const char header[] =
522 "met-info [000] 0.0: met_cpu_header_v2: %d";
523
524static const char help[] =
525 " --pmu-cpu-evt=[cpu_list:]event_list select CPU-PMU events in %s\n"
526 " cpu_list: specify the cpu_id list or apply to all the cores\n"
527 " example: 0,1,2\n"
528 " event_list: specify the event number\n"
529 " example: 0x8,0xff\n";
530
531static int cpupmu_print_help(char *buf, int len)
532{
533 return snprintf(buf, PAGE_SIZE, help, cpu_pmu->cpu_name);
534}
535
536static int reset_driver_stat(void)
537{
538 int cpu, i;
539 int event_count;
540 struct met_pmu *pmu;
541
542 met_cpupmu.mode = 0;
543 for_each_possible_cpu(cpu) {
544 event_count = cpu_pmu->event_count[cpu];
545 pmu = cpu_pmu->pmu[cpu];
546 counter_cnt[cpu] = 0;
547 nr_arg[cpu] = 0;
548 for (i = 0; i < event_count; i++) {
549 pmu[i].mode = MODE_DISABLED;
550 pmu[i].event = 0;
551 pmu[i].freq = 0;
552 }
553 }
554
555 return 0;
556}
557
558static int cpupmu_print_header(char *buf, int len)
559{
560 int cpu, i, ret, first;
561 int event_count;
562 struct met_pmu *pmu;
563
564 ret = 0;
565
566 /*append CPU PMU access method*/
567 if (met_cpu_pmu_method)
568 ret += snprintf(buf + ret, PAGE_SIZE,
569 "met-info [000] 0.0: CPU_PMU_method: perf APIs\n");
570 else
571 ret += snprintf(buf + ret, PAGE_SIZE,
572 "met-info [000] 0.0: CPU_PMU_method: MET pmu driver\n");
573
574 /*append cache line size*/
575 ret += snprintf(buf + ret, PAGE_SIZE - ret, cache_line_header, cache_line_size());
576 ret += snprintf(buf + ret, PAGE_SIZE - ret, "# mp_cpu: pmu_value1, ...\n");
577
578 for_each_possible_cpu(cpu) {
579 event_count = cpu_pmu->event_count[cpu];
580 pmu = cpu_pmu->pmu[cpu];
581 first = 1;
582 for (i = 0; i < event_count; i++) {
583 if (pmu[i].mode == 0)
584 continue;
585 if (first) {
586 ret += snprintf(buf + ret, PAGE_SIZE - ret, header, cpu);
587 first = 0;
588 }
589 ret += snprintf(buf + ret, PAGE_SIZE - ret, ",0x%x", pmu[i].event);
590 pmu[i].mode = 0;
591 }
592 if (!first)
593 ret += snprintf(buf + ret, PAGE_SIZE - ret, "\n");
594 }
595
596 reset_driver_stat();
597
598 return ret;
599}
600
601static int met_parse_num_list(char *arg, int len, int *list, int list_cnt)
602{
603 int nr_num = 0;
604 char *num;
605 int num_len;
606
607 /* search ',' as the splitter */
608 while (len) {
609 num = arg;
610 num_len = 0;
611 if (list_cnt <= 0)
612 return -1;
613 while (len) {
614 len--;
615 if (*arg == ',') {
616 *(arg++) = '\0';
617 break;
618 }
619 arg++;
620 num_len++;
621 }
622 if (met_parse_num(num, list, num_len) < 0)
623 return -1;
624 list++;
625 list_cnt--;
626 nr_num++;
627 }
628
629 return nr_num;
630}
631
632static int cpupmu_process_argument(const char *arg, int len)
633{
634 char *arg1 = (char*)arg;
635 int len1 = len;
636 int cpu, cpu_list[MXNR_CPU];
637 int nr_events, event_list[MXNR_PMU_EVENTS];
638 int i;
639 int nr_counters;
640 struct met_pmu *pmu;
641 int arg_nr;
642 int event_no;
643
644 /*
645 * split cpu_list and event_list by ':'
646 * arg, len: cpu_list when found (i < len)
647 * arg1, len1: event_list
648 */
649 for (i = 0; i < len; i++) {
650 if (arg[i] == ':') {
651 arg1[i] = '\0';
652 arg1 += i+1;
653 len1 = len - i - 1;
654 len = i;
655 break;
656 }
657 }
658
659 /*
660 * setup cpu_list array
661 * 1: selected
662 * 0: unselected
663 */
664 if (arg1 != arg) { /* is cpu_id list specified? */
665 int list[MXNR_CPU], cnt;
666 int cpu_id;
667 if ((cnt = met_parse_num_list((char*)arg, len, list, ARRAY_SIZE(list))) <= 0)
668 goto arg_out;
669 memset(cpu_list, 0, sizeof(cpu_list));
670 for (i = 0; i < cnt; i++) {
671 cpu_id = list[i];
672 if (cpu_id < 0 || cpu_id >= ARRAY_SIZE(cpu_list))
673 goto arg_out;
674 cpu_list[cpu_id] = 1;
675 }
676 }
677 else
678 memset(cpu_list, 1, sizeof(cpu_list));
679
680 /* get event_list */
681 if ((nr_events = met_parse_num_list(arg1, len1, event_list, ARRAY_SIZE(event_list))) <= 0)
682 goto arg_out;
683
684 /* for each cpu in cpu_list, add all the events in event_list */
685 for_each_possible_cpu(cpu) {
686 pmu = cpu_pmu->pmu[cpu];
687 arg_nr = nr_arg[cpu];
688
689 if (cpu_list[cpu] == 0)
690 continue;
691
692 if (met_cpu_pmu_method) {
693 nr_counters = perf_num_counters();
694 } else {
695 nr_counters = cpu_pmu->event_count[cpu];
696 }
697
698 pr_debug("!!!!!!!! [MET_PMU] pmu slot count=%d\n", nr_counters);
699
700 if (nr_counters == 0)
701 goto arg_out;
702
703 for (i = 0; i < nr_events; i++) {
704 event_no = event_list[i];
705 /*
706 * check if event is duplicate,
707 * but may not include 0xff when met_cpu_pmu_method == 0.
708 */
709 if (cpu_pmu->check_event(pmu, arg_nr, event_no) < 0)
710 goto arg_out;
711
712 /*
713 * test if this event is available when in perf_APIs mode
714 */
715 if (met_cpu_pmu_method) {
716 struct perf_event *ev;
717 ev = perf_event_create(cpu, event_no, arg_nr);
718 if (ev == NULL) {
719 pr_debug("!!!!!!!! [MET_PMU] failed pmu alloction test (event_no=%#04x)\n", event_no);
720 } else {
721 perf_event_release(cpu, ev);
722 }
723 }
724
725 if (met_cpu_pmu_method) {
726 if (arg_nr >= nr_counters)
727 goto arg_out;
728 pmu[arg_nr].mode = MODE_POLLING;
729 pmu[arg_nr].event = event_no;
730 pmu[arg_nr].freq = 0;
731 arg_nr++;
732 } else {
733 if (event_no == 0xff) {
734 if (pmu[nr_counters-1].mode == MODE_POLLING)
735 goto arg_out;
736 pmu[nr_counters-1].mode = MODE_POLLING;
737 pmu[nr_counters-1].event = 0xff;
738 pmu[nr_counters-1].freq = 0;
739 } else {
740 if (arg_nr >= (nr_counters - 1))
741 goto arg_out;
742 pmu[arg_nr].mode = MODE_POLLING;
743 pmu[arg_nr].event = event_no;
744 pmu[arg_nr].freq = 0;
745 arg_nr++;
746 }
747 }
748 counter_cnt[cpu]++;
749 }
750 nr_arg[cpu] = arg_nr;
751 }
752
753 met_cpupmu.mode = 1;
754 return 0;
755
756arg_out:
757 reset_driver_stat();
758 return -EINVAL;
759}
760
761
762static void cpupmu_cpu_state_notify(long cpu, unsigned long action)
763{
764 per_cpu(cpu_status, cpu) = action;
765
766#if (defined(CONFIG_ARM64) || defined(CONFIG_ARM))
767 if (met_cpu_pmu_method && action == CPU_DOWN_PREPARE) {
768 struct perf_event *event = NULL;
769 struct arm_pmu *armpmu = NULL;
770 struct platform_device *pmu_device = NULL;
771 int irq = 0;
772
773 event = per_cpu(pevent, cpu)[0];
774 if (event)
775 armpmu = to_arm_pmu(event->pmu);
776 pr_debug("!!!!!!!! %s_%ld, event=%p\n", __FUNCTION__, cpu, event);
777
778 if (armpmu)
779 pmu_device = armpmu->plat_device;
780 pr_debug("!!!!!!!! %s_%ld, armpmu=%p\n", __FUNCTION__, cpu, armpmu);
781
782 if (pmu_device)
783 irq = platform_get_irq(pmu_device, 0);
784 pr_debug("!!!!!!!! %s_%ld, pmu_device=%p\n", __FUNCTION__, cpu, pmu_device);
785
786 if (irq > 0)
787 disable_percpu_irq(irq);
788 pr_debug("!!!!!!!! %s_%ld, irq=%d\n", __FUNCTION__, cpu, irq);
789 }
790#endif
791}
792
793
794struct metdevice met_cpupmu = {
795 .name = "cpu",
796 .type = MET_TYPE_PMU,
797 .cpu_related = 1,
798 .create_subfs = cpupmu_create_subfs,
799 .delete_subfs = cpupmu_delete_subfs,
800 .start = cpupmu_start,
801 .uniq_start = cpupmu_unique_start,
802 .stop = cpupmu_stop,
803 .uniq_stop = cpupmu_unique_stop,
804 .polling_interval = 1,
805 .timed_polling = met_perf_cpupmu_polling,
806 .print_help = cpupmu_print_help,
807 .print_header = cpupmu_print_header,
808 .process_argument = cpupmu_process_argument,
809 .cpu_state_notify = cpupmu_cpu_state_notify
810};