blob: e84018ad77f86cec313835f22c2e6a6a3274c510 [file] [log] [blame]
rjw1f884582022-01-06 17:20:42 +08001/*
2 * Copyright (C) 2019 MediaTek Inc.
3 *
4 * This program is free software: you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13#include <linux/cpu.h>
14#include <linux/cpu_pm.h>
15#include <linux/perf_event.h>
16
17#if (defined(CONFIG_ARM64) || defined(CONFIG_ARM))
18#include <linux/platform_device.h>
19#include <linux/perf/arm_pmu.h>
20#endif
21
22#include <linux/kernel.h>
23#include <linux/mutex.h>
24#include <linux/perf/arm_pmu.h>
25#include <linux/irqreturn.h>
26#include <linux/irq_work.h>
27#include "met_drv.h"
28#include "met_kernel_symbol.h"
29#include "interface.h"
30#include "trace.h"
31#include "cpu_pmu.h"
32#include "mtk_typedefs.h"
33
34#if defined(CONFIG_MTK_TINYSYS_SSPM_SUPPORT) && defined(ONDIEMET_SUPPORT)
35#include "sspm/ondiemet_sspm.h"
36#endif
37
38struct cpu_pmu_hw *cpu_pmu;
39static int counter_cnt[MXNR_CPU];
40static int nr_arg[MXNR_CPU];
41
42int met_perf_cpupmu_status;
43
44static int mtk_pmu_event_enable = 0;
45static struct kobject *kobj_cpu;
46DECLARE_KOBJ_ATTR_INT(mtk_pmu_event_enable, mtk_pmu_event_enable);
47#define KOBJ_ATTR_LIST \
48 do { \
49 KOBJ_ATTR_ITEM(mtk_pmu_event_enable); \
50 } while (0)
51
52DEFINE_MUTEX(handle_irq_lock);
53irqreturn_t (*handle_irq_orig)(int irq_num, void *dev);
54
55#ifdef CONFIG_CPU_PM
56static int use_cpu_pm_pmu_notifier = 0;
57
58/* helper notifier for maintaining pmu states before cpu state transition */
59static int cpu_pm_pmu_notify(struct notifier_block *b,
60 unsigned long cmd,
61 void *p)
62{
63 int ii;
64 int cpu, count;
65 unsigned int pmu_value[MXNR_PMU_EVENTS];
66
67 if (!met_perf_cpupmu_status)
68 return NOTIFY_OK;
69
70 cpu = raw_smp_processor_id();
71
72 switch (cmd) {
73 case CPU_PM_ENTER:
74 count = cpu_pmu->polling(cpu_pmu->pmu[cpu], cpu_pmu->event_count[cpu], pmu_value);
75 for (ii = 0; ii < count; ii ++)
76 cpu_pmu->cpu_pm_unpolled_loss[cpu][ii] += pmu_value[ii];
77
78 cpu_pmu->stop(cpu_pmu->event_count[cpu]);
79 break;
80 case CPU_PM_ENTER_FAILED:
81 case CPU_PM_EXIT:
82 cpu_pmu->start(cpu_pmu->pmu[cpu], cpu_pmu->event_count[cpu]);
83 break;
84 default:
85 return NOTIFY_DONE;
86 }
87 return NOTIFY_OK;
88}
89
90struct notifier_block cpu_pm_pmu_notifier = {
91 .notifier_call = cpu_pm_pmu_notify,
92};
93#endif
94
95static DEFINE_PER_CPU(unsigned long long[MXNR_PMU_EVENTS], perfCurr);
96static DEFINE_PER_CPU(unsigned long long[MXNR_PMU_EVENTS], perfPrev);
97static DEFINE_PER_CPU(int[MXNR_PMU_EVENTS], perfCntFirst);
98static DEFINE_PER_CPU(struct perf_event * [MXNR_PMU_EVENTS], pevent);
99static DEFINE_PER_CPU(struct perf_event_attr [MXNR_PMU_EVENTS], pevent_attr);
100static DEFINE_PER_CPU(int, perfSet);
101static DEFINE_PER_CPU(int, cpu_status);
102
103#ifdef CPUPMU_V8_2
104#include <linux/of.h>
105#include <linux/of_address.h>
106#include <mt-plat/sync_write.h>
107#include <mt-plat/mtk_io.h>
108
109static char mcucfg_desc[] = "mediatek,mcucfg";
110static void __iomem *mcucfg_base = NULL;
111#define DBG_CONTROL_CPU6 ((unsigned long)mcucfg_base + 0x3000 + 0x308) /* DBG_CONTROL */
112#define DBG_CONTROL_CPU7 ((unsigned long)mcucfg_base + 0x3800 + 0x308) /* DBG_CONTROL */
113#define ENABLE_MTK_PMU_EVENTS_OFFSET 1
114static int restore_dbg_ctrl_cpu6;
115static int restore_dbg_ctrl_cpu7;
116
117int cpu_pmu_debug_init(void)
118{
119 struct device_node *node = NULL;
120 unsigned int value6,value7;
121
122 /*for A75 MTK internal event*/
123 if (mcucfg_base == NULL) {
124 node = of_find_compatible_node(NULL, NULL, mcucfg_desc);
125 if (node == NULL) {
126 MET_TRACE("[MET_PMU_DB] of_find node == NULL\n");
127 pr_debug("[MET_PMU_DB] of_find node == NULL\n");
128 goto out;
129 }
130 mcucfg_base = of_iomap(node, 0);
131 of_node_put(node);
132 if (mcucfg_base == NULL) {
133 MET_TRACE("[MET_PMU_DB] mcucfg_base == NULL\n");
134 pr_debug("[MET_PMU_DB] mcucfg_base == NULL\n");
135 goto out;
136 }
137 MET_TRACE("[MET_PMU_DB] regbase %08lx\n", DBG_CONTROL_CPU7);
138 pr_debug("[MET_PMU_DB] regbase %08lx\n", DBG_CONTROL_CPU7);
139 }
140
141 value6 = readl(IOMEM(DBG_CONTROL_CPU6));
142 if (value6 & (1 << ENABLE_MTK_PMU_EVENTS_OFFSET)) {
143 restore_dbg_ctrl_cpu6 = 1;
144 } else {
145 restore_dbg_ctrl_cpu6 = 0;
146 mt_reg_sync_writel(value6 | (1 << ENABLE_MTK_PMU_EVENTS_OFFSET), DBG_CONTROL_CPU6);
147 }
148
149 value7 = readl(IOMEM(DBG_CONTROL_CPU7));
150 if (value7 & (1 << ENABLE_MTK_PMU_EVENTS_OFFSET)) {
151 restore_dbg_ctrl_cpu7 = 1;
152 } else {
153 restore_dbg_ctrl_cpu7 = 0;
154 mt_reg_sync_writel(value7 | (1 << ENABLE_MTK_PMU_EVENTS_OFFSET), DBG_CONTROL_CPU7);
155 }
156
157 value6 = readl(IOMEM(DBG_CONTROL_CPU6));
158 value7 = readl(IOMEM(DBG_CONTROL_CPU7));
159 MET_TRACE("[MET_PMU_DB]DBG_CONTROL_CPU6 = %08x, DBG_CONTROL_CPU7 = %08x\n", value6, value7);
160 pr_debug("[MET_PMU_DB]DBG_CONTROL_CPU6 = %08x, DBG_CONTROL_CPU7 = %08x\n", value6, value7);
161 return 1;
162
163out:
164 if (mcucfg_base != NULL) {
165 iounmap(mcucfg_base);
166 mcucfg_base = NULL;
167 }
168 MET_TRACE("[MET_PMU_DB]DBG_CONTROL init error");
169 pr_debug("[MET_PMU_DB]DBG_CONTROL init error");
170 return 0;
171}
172
173int cpu_pmu_debug_uninit(void)
174{
175 unsigned int value6,value7;
176
177 if (restore_dbg_ctrl_cpu6 == 0) {
178 value6 = readl(IOMEM(DBG_CONTROL_CPU6));
179 mt_reg_sync_writel(value6 & (~(1 << ENABLE_MTK_PMU_EVENTS_OFFSET)), DBG_CONTROL_CPU6);
180 }
181 if (restore_dbg_ctrl_cpu7 == 0) {
182 value7 = readl(IOMEM(DBG_CONTROL_CPU7));
183 mt_reg_sync_writel(value7 & (~(1 << ENABLE_MTK_PMU_EVENTS_OFFSET)), DBG_CONTROL_CPU7);
184 }
185
186 value6 = readl(IOMEM(DBG_CONTROL_CPU6));
187 value7 = readl(IOMEM(DBG_CONTROL_CPU7));
188 MET_TRACE("[MET_PMU_DB]DBG_CONTROL_CPU6 = %08x, DBG_CONTROL_CPU7 = %08x\n", value6, value7);
189 pr_debug("[MET_PMU_DB]DBG_CONTROL_CPU6 = %08x, DBG_CONTROL_CPU7 = %08x\n", value6, value7);
190
191 if (mcucfg_base != NULL) {
192 iounmap(mcucfg_base);
193 mcucfg_base = NULL;
194 }
195 restore_dbg_ctrl_cpu6 = 0;
196 restore_dbg_ctrl_cpu7 = 0;
197 return 1;
198}
199#endif
200
201
202
203
204noinline void mp_cpu(unsigned char cnt, unsigned int *value)
205{
206 MET_GENERAL_PRINT(MET_TRACE, cnt, value);
207}
208
209static void dummy_handler(struct perf_event *event, struct perf_sample_data *data,
210 struct pt_regs *regs)
211{
212 /*
213 * Required as perf_event_create_kernel_counter() requires an overflow handler,
214 * even though all we do is poll.
215 */
216}
217
218static void perf_cpupmu_polling(unsigned long long stamp, int cpu)
219{
220 int event_count = cpu_pmu->event_count[cpu];
221 struct met_pmu *pmu = cpu_pmu->pmu[cpu];
222 int i, count;
223 unsigned long long delta;
224 struct perf_event *ev;
225 unsigned int pmu_value[MXNR_PMU_EVENTS];
226 u64 value;
227
228 if (per_cpu(perfSet, cpu) == 0)
229 return;
230
231 count = 0;
232 for (i = 0; i < event_count; i++) {
233 if (pmu[i].mode == 0)
234 continue;
235
236 ev = per_cpu(pevent, cpu)[i];
237 if ((ev != NULL) && (ev->state == PERF_EVENT_STATE_ACTIVE)) {
238 met_perf_event_read_local_symbol(ev, &value);
239 per_cpu(perfCurr, cpu)[i] = value;
240 delta = (per_cpu(perfCurr, cpu)[i] - per_cpu(perfPrev, cpu)[i]);
241 per_cpu(perfPrev, cpu)[i] = per_cpu(perfCurr, cpu)[i];
242 if (per_cpu(perfCntFirst, cpu)[i] == 1) {
243 /* we shall omit delta counter when we get first counter */
244 per_cpu(perfCntFirst, cpu)[i] = 0;
245 continue;
246 }
247 pmu_value[count] = (unsigned int)delta;
248 count++;
249 }
250 }
251
252 if (count == counter_cnt[cpu])
253 mp_cpu(count, pmu_value);
254}
255
256static struct perf_event* perf_event_create(int cpu, unsigned short event, int count)
257{
258 struct perf_event_attr *ev_attr;
259 struct perf_event *ev;
260
261 ev_attr = per_cpu(pevent_attr, cpu)+count;
262 memset(ev_attr, 0, sizeof(*ev_attr));
263 if (event == 0xff) {
264 ev_attr->config = PERF_COUNT_HW_CPU_CYCLES;
265 ev_attr->type = PERF_TYPE_HARDWARE;
266 } else {
267 ev_attr->config = event;
268 ev_attr->type = PERF_TYPE_RAW;
269 }
270 ev_attr->size = sizeof(*ev_attr);
271 ev_attr->sample_period = 0;
272 ev_attr->pinned = 1;
273
274 ev = perf_event_create_kernel_counter(ev_attr, cpu, NULL, dummy_handler, NULL);
275 if (IS_ERR(ev))
276 return NULL;
277 do {
278 if (ev->state == PERF_EVENT_STATE_ACTIVE)
279 break;
280 if (ev->state == PERF_EVENT_STATE_ERROR) {
281 perf_event_enable(ev);
282 if (ev->state == PERF_EVENT_STATE_ACTIVE)
283 break;
284 }
285 perf_event_release_kernel(ev);
286 return NULL;
287 } while (0);
288
289 return ev;
290}
291
292static void perf_event_release(int cpu, struct perf_event *ev)
293{
294 if (ev->state == PERF_EVENT_STATE_ACTIVE)
295 perf_event_disable(ev);
296 perf_event_release_kernel(ev);
297}
298
299#if defined(CONFIG_MTK_TINYSYS_SSPM_SUPPORT) && defined(ONDIEMET_SUPPORT)
300#define PMU_OVERFLOWED_MASK 0xffffffff
301
302static inline int pmu_has_overflowed(u32 pmovsr)
303{
304 return pmovsr & PMU_OVERFLOWED_MASK;
305}
306
307static irqreturn_t perf_event_handle_irq_ignore_overflow(int irq_num, void *dev)
308{
309 u32 pmovsr;
310
311 pmovsr = cpu_pmu->pmu_read_clear_overflow_flag();
312
313 if (!pmu_has_overflowed(pmovsr)) {
314 return IRQ_NONE;
315 }
316 else {
317 irq_work_run();
318 return IRQ_HANDLED;
319 }
320}
321#endif
322
323static int perf_thread_set_perf_events(int cpu)
324{
325 int i, size;
326 struct perf_event *ev;
327
328 size = sizeof(struct perf_event_attr);
329 if (per_cpu(perfSet, cpu) == 0) {
330 int event_count = cpu_pmu->event_count[cpu];
331 struct met_pmu *pmu = cpu_pmu->pmu[cpu];
332 for (i = 0; i < event_count; i++) {
333 if (!pmu[i].mode)
334 continue; /* Skip disabled counters */
335 ev = perf_event_create(cpu, pmu[i].event, i);
336 if (ev == NULL) {
337 met_cpupmu.mode = 0;
338 met_perf_cpupmu_status = 0;
339
340 MET_TRACE("[MET_PMU] cpu %d failed to register pmu event %4x\n", cpu, pmu[i].event);
341 pr_notice("[MET_PMU] cpu %d failed to register pmu event %4x\n", cpu, pmu[i].event);
342 continue;
343 }
344
345 /*
346 * in perf-event implementation, hardware pmu slot and cycle counter
347 * was mapped to perf_event::hw::idx as follows:
348 *
349 * | idx | hardware slot |
350 * |-----+---------------|
351 * | 0 | pmccntr_el0 |
352 * | 1 | 0 |
353 * | 2 | 1 |
354 * | 3 | 2 |
355 * | 4 | 3 |
356 * | 5 | 4 |
357 * | 6 | 5 |
358 */
359 if (ev->hw.idx != 0) {
360 MET_TRACE("[MET_PMU] cpu %d registered in pmu slot: [%d] evt=%#04x\n",
361 cpu, ev->hw.idx-1, pmu[i].event);
362 pr_debug("[MET_PMU] cpu %d registered in pmu slot: [%d] evt=%#04x\n",
363 cpu, ev->hw.idx-1, pmu[i].event);
364 } else if (ev->hw.idx == 0) {
365 MET_TRACE("[MET_PMU] cpu %d registered cycle count evt=%#04x\n",
366 cpu, pmu[i].event);
367 pr_debug("[MET_PMU] cpu %d registered cycle count evt=%#04x\n",
368 cpu, pmu[i].event);
369 }
370
371 per_cpu(pevent, cpu)[i] = ev;
372 per_cpu(perfPrev, cpu)[i] = 0;
373 per_cpu(perfCurr, cpu)[i] = 0;
374 perf_event_enable(ev);
375 per_cpu(perfCntFirst, cpu)[i] = 1;
376
377#if defined(CONFIG_MTK_TINYSYS_SSPM_SUPPORT) && defined(ONDIEMET_SUPPORT)
378 if (met_cpupmu.ondiemet_mode) {
379 struct arm_pmu *armpmu;
380 armpmu = container_of(ev->pmu, struct arm_pmu, pmu);
381 mutex_lock(&handle_irq_lock);
382 if (armpmu && armpmu->handle_irq != perf_event_handle_irq_ignore_overflow) {
383 pr_debug("[MET_PMU] replaced original handle_irq=%p with dummy function\n",
384 armpmu->handle_irq);
385 handle_irq_orig = armpmu->handle_irq;
386 armpmu->handle_irq = perf_event_handle_irq_ignore_overflow;
387 }
388 mutex_unlock(&handle_irq_lock);
389 }
390#endif
391 } /* for all PMU counter */
392 per_cpu(perfSet, cpu) = 1;
393 } /* for perfSet */
394
395 return 0;
396}
397
398static void met_perf_cpupmu_start(int cpu)
399{
400 if (met_cpupmu.mode == 0)
401 return;
402
403 perf_thread_set_perf_events(cpu);
404}
405
406static void perf_thread_down(int cpu)
407{
408 int i;
409 struct perf_event *ev;
410 int event_count;
411 struct met_pmu *pmu;
412
413 if (per_cpu(perfSet, cpu) == 0)
414 return;
415
416 per_cpu(perfSet, cpu) = 0;
417 event_count = cpu_pmu->event_count[cpu];
418 pmu = cpu_pmu->pmu[cpu];
419 for (i = 0; i < event_count; i++) {
420 ev = per_cpu(pevent, cpu)[i];
421 if (ev != NULL) {
422
423#if defined(CONFIG_MTK_TINYSYS_SSPM_SUPPORT) && defined(ONDIEMET_SUPPORT)
424 if (met_cpupmu.ondiemet_mode) {
425 struct arm_pmu *armpmu;
426 armpmu = container_of(ev->pmu, struct arm_pmu, pmu);
427 mutex_lock(&handle_irq_lock);
428 if (armpmu && armpmu->handle_irq == perf_event_handle_irq_ignore_overflow) {
429 pr_debug("[MET_PMU] restore original handle_irq=%p\n", handle_irq_orig);
430 armpmu->handle_irq = handle_irq_orig;
431 handle_irq_orig = NULL;
432 }
433 mutex_unlock(&handle_irq_lock);
434 }
435#endif
436
437 perf_event_release(cpu, ev);
438 per_cpu(pevent, cpu)[i] = NULL;
439 }
440 }
441}
442
443static void met_perf_cpupmu_stop(int cpu)
444{
445 perf_thread_down(cpu);
446}
447
448static int cpupmu_create_subfs(struct kobject *parent)
449{
450 int ret = 0;
451
452 cpu_pmu = cpu_pmu_hw_init();
453 if (cpu_pmu == NULL) {
454 PR_BOOTMSG("Failed to init CPU PMU HW!!\n");
455 return -ENODEV;
456 }
457
458 kobj_cpu = parent;
459
460#define KOBJ_ATTR_ITEM(attr_name) \
461 do { \
462 ret = sysfs_create_file(kobj_cpu, &attr_name ## _attr.attr); \
463 if (ret != 0) { \
464 pr_notice("Failed to create " #attr_name " in sysfs\n"); \
465 return ret; \
466 } \
467 } while (0)
468 KOBJ_ATTR_LIST;
469#undef KOBJ_ATTR_ITEM
470
471 return 0;
472}
473
474static void cpupmu_delete_subfs(void)
475{
476#define KOBJ_ATTR_ITEM(attr_name) \
477 sysfs_remove_file(kobj_cpu, &attr_name ## _attr.attr)
478
479 if (kobj_cpu != NULL) {
480 KOBJ_ATTR_LIST;
481 kobj_cpu = NULL;
482 }
483#undef KOBJ_ATTR_ITEM
484}
485
486void met_perf_cpupmu_polling(unsigned long long stamp, int cpu)
487{
488 int count;
489 unsigned int pmu_value[MXNR_PMU_EVENTS];
490
491 if (per_cpu(cpu_status, cpu) != MET_CPU_ONLINE)
492 return;
493
494 if (met_cpu_pmu_method) {
495 perf_cpupmu_polling(stamp, cpu);
496 } else {
497 count = cpu_pmu->polling(cpu_pmu->pmu[cpu], cpu_pmu->event_count[cpu], pmu_value);
498
499#ifdef CONFIG_CPU_PM
500 if (met_cpu_pm_pmu_reconfig) {
501 int ii;
502 for (ii = 0; ii < count; ii ++)
503 pmu_value[ii] += cpu_pmu->cpu_pm_unpolled_loss[cpu][ii];
504 }
505#endif
506
507 mp_cpu(count, pmu_value);
508
509#ifdef CONFIG_CPU_PM
510 if (met_cpu_pm_pmu_reconfig) {
511 memset(cpu_pmu->cpu_pm_unpolled_loss[cpu], 0, sizeof (cpu_pmu->cpu_pm_unpolled_loss[0]));
512 }
513#endif
514 }
515}
516
517static void cpupmu_start(void)
518{
519 int cpu = raw_smp_processor_id();
520
521 if (!met_cpu_pmu_method) {
522 nr_arg[cpu] = 0;
523 cpu_pmu->start(cpu_pmu->pmu[cpu], cpu_pmu->event_count[cpu]);
524
525 met_perf_cpupmu_status = 1;
526 per_cpu(cpu_status, cpu) = MET_CPU_ONLINE;
527 }
528}
529
530
531static void cpupmu_unique_start(void)
532{
533 int cpu;
534
535#ifdef CPUPMU_V8_2
536 int ret = 0;
537 if (mtk_pmu_event_enable == 1){
538 ret = cpu_pmu_debug_init();
539 if (ret == 0)
540 PR_BOOTMSG("Failed to init CPU PMU debug!!\n");
541 }
542#endif
543
544#ifdef CONFIG_CPU_PM
545 use_cpu_pm_pmu_notifier = 0;
546 if (met_cpu_pm_pmu_reconfig) {
547 if (met_cpu_pmu_method) {
548 met_cpu_pm_pmu_reconfig = 0;
549 MET_TRACE("[MET_PMU] met_cpu_pmu_method=%d, met_cpu_pm_pmu_reconfig forced disabled\n", met_cpu_pmu_method);
550 pr_debug("[MET_PMU] met_cpu_pmu_method=%d, met_cpu_pm_pmu_reconfig forced disabled\n", met_cpu_pmu_method);
551 } else {
552 memset(cpu_pmu->cpu_pm_unpolled_loss, 0, sizeof (cpu_pmu->cpu_pm_unpolled_loss));
553 cpu_pm_register_notifier(&cpu_pm_pmu_notifier);
554 use_cpu_pm_pmu_notifier = 1;
555 }
556 }
557#else
558 if (met_cpu_pm_pmu_reconfig) {
559 met_cpu_pm_pmu_reconfig = 0;
560 MET_TRACE("[MET_PMU] CONFIG_CPU_PM=%d, met_cpu_pm_pmu_reconfig forced disabled\n", CONFIG_CPU_PM);
561 pr_debug("[MET_PMU] CONFIG_CPU_PM=%d, met_cpu_pm_pmu_reconfig forced disabled\n", CONFIG_CPU_PM);
562 }
563#endif
564 MET_TRACE("[MET_PMU] met_cpu_pm_pmu_reconfig=%u\n", met_cpu_pm_pmu_reconfig);
565 pr_debug("[MET_PMU] met_cpu_pm_pmu_reconfig=%u\n", met_cpu_pm_pmu_reconfig);
566
567 if (met_cpu_pmu_method) {
568 for_each_possible_cpu(cpu) {
569 met_perf_cpupmu_start(cpu);
570
571 met_perf_cpupmu_status = 1;
572 per_cpu(cpu_status, cpu) = MET_CPU_ONLINE;
573 }
574 }
575
576 return;
577}
578
579static void cpupmu_stop(void)
580{
581 int cpu = raw_smp_processor_id();
582
583 met_perf_cpupmu_status = 0;
584
585 if (!met_cpu_pmu_method)
586 cpu_pmu->stop(cpu_pmu->event_count[cpu]);
587}
588
589static void cpupmu_unique_stop(void)
590{
591 int cpu;
592
593 if (met_cpu_pmu_method) {
594 for_each_possible_cpu(cpu) {
595 met_perf_cpupmu_stop(cpu);
596 }
597 }
598
599#ifdef CPUPMU_V8_2
600 if (mtk_pmu_event_enable == 1)
601 cpu_pmu_debug_uninit();
602#endif
603
604#ifdef CONFIG_CPU_PM
605 if (use_cpu_pm_pmu_notifier) {
606 cpu_pm_unregister_notifier(&cpu_pm_pmu_notifier);
607 }
608#endif
609 return;
610}
611
612static const char cache_line_header[] =
613 "met-info [000] 0.0: met_cpu_cache_line_size: %d\n";
614static const char header[] =
615 "met-info [000] 0.0: met_cpu_header_v2: %d";
616
617static const char help[] =
618 " --pmu-cpu-evt=[cpu_list:]event_list select CPU-PMU events in %s\n"
619 " cpu_list: specify the cpu_id list or apply to all the cores\n"
620 " example: 0,1,2\n"
621 " event_list: specify the event number\n"
622 " example: 0x8,0xff\n";
623
624static int cpupmu_print_help(char *buf, int len)
625{
626 return snprintf(buf, PAGE_SIZE, help, cpu_pmu->cpu_name);
627}
628
629static int reset_driver_stat(void)
630{
631 int cpu, i;
632 int event_count;
633 struct met_pmu *pmu;
634
635 met_cpupmu.mode = 0;
636 for_each_possible_cpu(cpu) {
637 event_count = cpu_pmu->event_count[cpu];
638 pmu = cpu_pmu->pmu[cpu];
639 counter_cnt[cpu] = 0;
640 nr_arg[cpu] = 0;
641 for (i = 0; i < event_count; i++) {
642 pmu[i].mode = MODE_DISABLED;
643 pmu[i].event = 0;
644 pmu[i].freq = 0;
645 }
646 }
647
648 return 0;
649}
650
651static int cpupmu_print_header(char *buf, int len)
652{
653 int cpu, i, ret, first;
654 int event_count;
655 struct met_pmu *pmu;
656
657 ret = 0;
658
659 /*append CPU PMU access method*/
660 if (met_cpu_pmu_method)
661 ret += snprintf(buf + ret, len,
662 "met-info [000] 0.0: CPU_PMU_method: perf APIs\n");
663 else
664 ret += snprintf(buf + ret, len,
665 "met-info [000] 0.0: CPU_PMU_method: MET pmu driver\n");
666
667 /*append cache line size*/
668 ret += snprintf(buf + ret, len - ret, cache_line_header, cache_line_size());
669 ret += snprintf(buf + ret, len - ret, "# mp_cpu: pmu_value1, ...\n");
670
671 for_each_possible_cpu(cpu) {
672 event_count = cpu_pmu->event_count[cpu];
673 pmu = cpu_pmu->pmu[cpu];
674 first = 1;
675 for (i = 0; i < event_count; i++) {
676 if (pmu[i].mode == 0)
677 continue;
678 if (first) {
679 ret += snprintf(buf + ret, len - ret, header, cpu);
680 first = 0;
681 }
682 ret += snprintf(buf + ret, len - ret, ",0x%x", pmu[i].event);
683 pmu[i].mode = 0;
684 }
685 if (!first)
686 ret += snprintf(buf + ret, len - ret, "\n");
687 }
688
689 reset_driver_stat();
690
691 return ret;
692}
693
694static int met_parse_num_list(char *arg, int len, int *list, int list_cnt)
695{
696 int nr_num = 0;
697 char *num;
698 int num_len;
699
700 /* search ',' as the splitter */
701 while (len) {
702 num = arg;
703 num_len = 0;
704 if (list_cnt <= 0)
705 return -1;
706 while (len) {
707 len--;
708 if (*arg == ',') {
709 *(arg++) = '\0';
710 break;
711 }
712 arg++;
713 num_len++;
714 }
715 if (met_parse_num(num, list, num_len) < 0)
716 return -1;
717 list++;
718 list_cnt--;
719 nr_num++;
720 }
721
722 return nr_num;
723}
724
725static const struct perf_pmu_events_attr *
726perf_event_get_evt_attr_by_name(const struct perf_event *ev,
727 const char *name) {
728 struct arm_pmu *arm_pmu;
729 struct attribute **attrp;
730 struct device_attribute *dev_attr_p;
731 struct perf_pmu_events_attr *ev_attr_p;
732
733 arm_pmu = container_of(ev->pmu, struct arm_pmu, pmu);
734
735 for (attrp = arm_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS]->attrs;
736 *attrp != NULL;
737 attrp ++) {
738
739 dev_attr_p = container_of(*attrp, struct device_attribute, attr);
740 ev_attr_p = container_of(dev_attr_p, struct perf_pmu_events_attr, attr);
741
742 if (0 == strcmp((*attrp)->name, name)) {
743 return ev_attr_p;
744 }
745 }
746
747 return NULL;
748}
749
750static int cpupmu_process_argument(const char *arg, int len)
751{
752 char *arg1 = (char*)arg;
753 int len1 = len;
754 int cpu, cpu_list[MXNR_CPU];
755 int nr_events, event_list[MXNR_PMU_EVENTS];
756 int i;
757 int nr_counters;
758 struct met_pmu *pmu;
759 int arg_nr;
760 int event_no;
761 int is_cpu_cycle_evt;
762 const struct perf_pmu_events_attr *ev_attr_p;
763
764 /*
765 * split cpu_list and event_list by ':'
766 * arg, len: cpu_list when found (i < len)
767 * arg1, len1: event_list
768 */
769 for (i = 0; i < len; i++) {
770 if (arg[i] == ':') {
771 arg1[i] = '\0';
772 arg1 += i+1;
773 len1 = len - i - 1;
774 len = i;
775 break;
776 }
777 }
778
779 /*
780 * setup cpu_list array
781 * 1: selected
782 * 0: unselected
783 */
784 if (arg1 != arg) { /* is cpu_id list specified? */
785 int list[MXNR_CPU], cnt;
786 int cpu_id;
787 if ((cnt = met_parse_num_list((char*)arg, len, list, ARRAY_SIZE(list))) <= 0)
788 goto arg_out;
789 memset(cpu_list, 0, sizeof(cpu_list));
790 for (i = 0; i < cnt; i++) {
791 cpu_id = list[i];
792 if (cpu_id < 0 || cpu_id >= ARRAY_SIZE(cpu_list))
793 goto arg_out;
794 cpu_list[cpu_id] = 1;
795 }
796 }
797 else
798 memset(cpu_list, 1, sizeof(cpu_list));
799
800 /* get event_list */
801 if ((nr_events = met_parse_num_list(arg1, len1, event_list, ARRAY_SIZE(event_list))) <= 0)
802 goto arg_out;
803
804 /* for each cpu in cpu_list, add all the events in event_list */
805 for_each_possible_cpu(cpu) {
806 pmu = cpu_pmu->pmu[cpu];
807 arg_nr = nr_arg[cpu];
808
809 if (cpu_list[cpu] == 0)
810 continue;
811
812 if (met_cpu_pmu_method) {
813 nr_counters = perf_num_counters();
814 } else {
815 nr_counters = cpu_pmu->event_count[cpu];
816 }
817
818 pr_debug("[MET_PMU] pmu slot count=%d\n", nr_counters);
819
820 if (nr_counters == 0)
821 goto arg_out;
822
823 for (i = 0; i < nr_events; i++) {
824 event_no = event_list[i];
825 is_cpu_cycle_evt = 0;
826 /*
827 * check if event is duplicate, but does not include 0xff
828 */
829 if (cpu_pmu->check_event(pmu, arg_nr, event_no) < 0)
830 goto arg_out;
831
832 /*
833 * test if this event is available when in perf_APIs mode
834 */
835 if (met_cpu_pmu_method) {
836 struct perf_event *ev;
837
838 if (!cpu_pmu->perf_event_get_evttype) {
839 MET_TRACE("[MET_PMU] cpu_pmu->perf_event_get_evttype=NULL, "
840 "met pmu on perf-event was not supported on this platform\n");
841 pr_debug("[MET_PMU] cpu_pmu->perf_event_get_evttype=NULL, "
842 "met pmu on perf-event was not supported on this platform\n");
843 goto arg_out;
844 }
845
846 ev = perf_event_create(cpu, event_no, arg_nr);
847 if (ev == NULL) {
848 pr_debug("!!!!!!!! [MET_PMU] failed pmu alloction test (event_no=%#04x)\n", event_no);
849 goto arg_out;
850 } else {
851 perf_event_release(cpu, ev);
852 }
853
854 ev_attr_p = perf_event_get_evt_attr_by_name(ev, "cpu_cycles");
855 if (ev_attr_p && cpu_pmu->perf_event_get_evttype(ev) == ev_attr_p->id)
856 is_cpu_cycle_evt = 1;
857 }
858
859 if (met_cpu_pmu_method) {
860 if (is_cpu_cycle_evt) {
861 if (pmu[nr_counters-1].mode == MODE_POLLING)
862 goto arg_out;
863 pmu[nr_counters-1].mode = MODE_POLLING;
864 pmu[nr_counters-1].event = event_no;
865 pmu[nr_counters-1].freq = 0;
866 } else {
867 if (arg_nr >= (nr_counters - 1))
868 goto arg_out;
869 pmu[arg_nr].mode = MODE_POLLING;
870 pmu[arg_nr].event = event_no;
871 pmu[arg_nr].freq = 0;
872 arg_nr++;
873 }
874 } else {
875 if (event_no == 0xff) {
876 if (pmu[nr_counters-1].mode == MODE_POLLING)
877 goto arg_out;
878 pmu[nr_counters-1].mode = MODE_POLLING;
879 pmu[nr_counters-1].event = 0xff;
880 pmu[nr_counters-1].freq = 0;
881 } else {
882 if (arg_nr >= (nr_counters - 1))
883 goto arg_out;
884 pmu[arg_nr].mode = MODE_POLLING;
885 pmu[arg_nr].event = event_no;
886 pmu[arg_nr].freq = 0;
887 arg_nr++;
888 }
889 }
890 counter_cnt[cpu]++;
891 }
892 nr_arg[cpu] = arg_nr;
893 }
894
895 met_cpupmu.mode = 1;
896 return 0;
897
898arg_out:
899 reset_driver_stat();
900 return -EINVAL;
901}
902
903static void cpupmu_cpu_state_notify(long cpu, unsigned long action)
904{
905 per_cpu(cpu_status, cpu) = action;
906
907#if (defined(CONFIG_ARM64) || defined(CONFIG_ARM))
908 if (met_cpu_pmu_method && action == MET_CPU_OFFLINE) {
909 struct perf_event *event = NULL;
910 struct arm_pmu *armpmu = NULL;
911 struct platform_device *pmu_device = NULL;
912 int irq = 0;
913
914 event = per_cpu(pevent, cpu)[0];
915 if (event)
916 armpmu = to_arm_pmu(event->pmu);
917 pr_debug("!!!!!!!! %s_%ld, event=%p\n", __FUNCTION__, cpu, event);
918
919 if (armpmu)
920 pmu_device = armpmu->plat_device;
921 pr_debug("!!!!!!!! %s_%ld, armpmu=%p\n", __FUNCTION__, cpu, armpmu);
922
923 if (pmu_device)
924 irq = platform_get_irq(pmu_device, 0);
925 pr_debug("!!!!!!!! %s_%ld, pmu_device=%p\n", __FUNCTION__, cpu, pmu_device);
926
927 if (irq > 0)
928 disable_percpu_irq(irq);
929 pr_debug("!!!!!!!! %s_%ld, irq=%d\n", __FUNCTION__, cpu, irq);
930 }
931#endif
932}
933
934#if defined(CONFIG_MTK_TINYSYS_SSPM_SUPPORT) && defined(ONDIEMET_SUPPORT)
935static void sspm_pmu_start(void)
936{
937 ondiemet_module[ONDIEMET_SSPM] |= ID_PMU;
938
939 if (met_cpupmu.ondiemet_mode == 1)
940 cpupmu_start();
941}
942
943static int cycle_count_mode_enabled(int cpu) {
944
945 int event_cnt;
946 struct met_pmu *pmu;
947
948 pmu = cpu_pmu->pmu[cpu];
949
950 if (met_cpu_pmu_method) {
951 event_cnt = perf_num_counters();
952 } else {
953 event_cnt = cpu_pmu->event_count[cpu];
954 }
955
956 return pmu[event_cnt-1].mode == MODE_POLLING;
957}
958
959static void ipi_config_pmu_counter_cnt(void) {
960
961 int ret, cpu, ii, cnt_num;
962 unsigned int rdata;
963 unsigned int ipi_buf[4];
964 struct hw_perf_event *hwc;
965 unsigned int base_offset;
966
967 for_each_possible_cpu(cpu) {
968 for (ii = 0; ii < 4; ii++)
969 ipi_buf[ii] = 0;
970
971 ipi_buf[0] = MET_MAIN_ID | (MID_PMU << MID_BIT_SHIFT) | MET_ARGU | SET_PMU_EVT_CNT;
972 /*
973 * XXX: on sspm side, cycle counter was not counted in
974 * total event number `counter_cnt', but controlled by
975 * an addtional argument `SET_PMU_CYCCNT_ENABLE' instead
976 */
977 cnt_num = (cycle_count_mode_enabled(cpu) ?
978 (counter_cnt[cpu]-1) : counter_cnt[cpu]);
979 ipi_buf[1] = (cpu << 16) | (cnt_num & 0xffff);
980
981 MET_TRACE("[MET_PMU][IPI_CONFIG] core=%d, pmu_counter_cnt=%d\n", cpu, cnt_num);
982 pr_debug("[MET_PMU][IPI_CONFIG] core=%d, pmu_counter_cnt=%d\n", cpu, cnt_num);
983
984 MET_TRACE("[MET_PMU][IPI_CONFIG] sspm_buf_available=%d, in_interrupt()=%lu\n", sspm_buf_available, in_interrupt());
985 pr_debug("[MET_PMU][IPI_CONFIG] sspm_buf_available=%d, in_interrupt()=%lu\n", sspm_buf_available, in_interrupt());
986
987 if (sspm_buf_available == 1) {
988 ret = met_ipi_to_sspm_command((void *) ipi_buf, 0, &rdata, 1);
989 }
990
991 for (ii = 0; ii < 4; ii++)
992 ipi_buf[ii] = 0;
993
994 if (per_cpu(pevent, cpu)[0]) {
995 hwc = &(per_cpu(pevent, cpu)[0]->hw);
996 base_offset = hwc->idx-1;
997 } else {
998 base_offset = 0;
999 }
1000
1001 ipi_buf[0] = MET_MAIN_ID | (MID_PMU << MID_BIT_SHIFT) | MET_ARGU | SET_PMU_BASE_OFFSET;
1002 ipi_buf[1] = (cpu << 16) | (base_offset & 0xffff);
1003
1004 MET_TRACE("[MET_PMU][IPI_CONFIG] core=%d, base offset set to %lu\n", cpu, base_offset);
1005 pr_debug("[MET_PMU][IPI_CONFIG] core=%d, base offset set to %lu\n", cpu, base_offset);
1006
1007 if (sspm_buf_available == 1) {
1008 ret = met_ipi_to_sspm_command((void *) ipi_buf, 0, &rdata, 1);
1009 }
1010
1011 if (cycle_count_mode_enabled(cpu)) {
1012
1013 for (ii = 0; ii < 4; ii++)
1014 ipi_buf[ii] = 0;
1015
1016 ipi_buf[0] = MET_MAIN_ID | (MID_PMU << MID_BIT_SHIFT) | MET_ARGU | SET_PMU_CYCCNT_ENABLE;
1017 ipi_buf[1] = cpu & 0xffff;
1018
1019 MET_TRACE("[MET_PMU][IPI_CONFIG] core=%d, pmu cycle cnt enable\n", cpu);
1020 pr_debug("[MET_PMU][IPI_CONFIG] core=%d, pmu cycle cnt enable\n", cpu);
1021
1022 if (sspm_buf_available == 1) {
1023 ret = met_ipi_to_sspm_command((void *) ipi_buf, 0, &rdata, 1);
1024 }
1025 }
1026 }
1027}
1028
1029static int __is_perf_event_hw_slot_seq_order(int cpu) {
1030
1031 struct hw_perf_event *hwc, *hwc_prev;
1032 int event_count = cpu_pmu->event_count[cpu];
1033 int ii;
1034
1035 /*
1036 * perf-event descriptor list would not have any hole
1037 * (excepts special 0xff, which will always be the last element)
1038 */
1039 if (per_cpu(pevent, cpu)[0] == NULL)
1040 return 1;
1041
1042 /*
1043 * XXX: no need to check the last slot,
1044 * which is reserved for 0xff
1045 */
1046 for (ii = 1; ii < event_count - 1; ii++) {
1047
1048 if (per_cpu(pevent, cpu)[ii] == NULL)
1049 return 1;
1050
1051 hwc = &(per_cpu(pevent, cpu)[ii]->hw);
1052 hwc_prev = &(per_cpu(pevent, cpu)[ii-1]->hw);
1053
1054 if (hwc->idx != hwc_prev->idx + 1)
1055 return 0;
1056 }
1057
1058 return 1;
1059}
1060
1061static int __validate_sspm_compatibility(void) {
1062
1063 int cpu;
1064
1065 for_each_possible_cpu(cpu) {
1066
1067 if (!__is_perf_event_hw_slot_seq_order(cpu)) {
1068 MET_TRACE("[MET_PMU] pmu not sequentially allocated on cpu %d\n"
1069 ,cpu);
1070 pr_debug("[MET_PMU] pmu not sequentially allocated on cpu %d\n"
1071 ,cpu);
1072 return -1;
1073 }
1074 }
1075
1076 return 0;
1077}
1078
1079static void sspm_pmu_unique_start(void) {
1080
1081 if (met_cpupmu.ondiemet_mode == 1)
1082 cpupmu_unique_start();
1083
1084 if (met_cpupmu.ondiemet_mode == 1) {
1085 if (__validate_sspm_compatibility() == -1) {
1086 MET_TRACE("[MET_PMU] turned off sspm side polling\n");
1087 pr_debug("[MET_PMU] turned off sspm side polling\n");
1088 /* return without sending init IPIs, leaving sspm side to poll nothing */
1089 return;
1090 }
1091 }
1092
1093 ipi_config_pmu_counter_cnt();
1094}
1095
1096static void sspm_pmu_unique_stop(void)
1097{
1098 if (met_cpupmu.ondiemet_mode == 1)
1099 cpupmu_unique_stop();
1100 return;
1101}
1102
1103static void sspm_pmu_stop(void)
1104{
1105 if (met_cpupmu.ondiemet_mode == 1)
1106 cpupmu_stop();
1107}
1108
1109static const char sspm_pmu_header[] = "met-info [000] 0.0: pmu_sampler: sspm\n";
1110
1111static int sspm_pmu_print_header(char *buf, int len)
1112{
1113 int ret;
1114
1115 ret = snprintf(buf, len, sspm_pmu_header);
1116
1117 if (met_cpupmu.ondiemet_mode == 1)
1118 ret += cpupmu_print_header(buf + ret, len - ret);
1119
1120 return ret;
1121}
1122
1123static int sspm_pmu_process_argument(const char *arg, int len)
1124{
1125 if (met_cpupmu.ondiemet_mode == 1) {
1126
1127 if (!cpu_pmu->pmu_read_clear_overflow_flag) {
1128 MET_TRACE("[MET_PMU] cpu_pmu->pmu_read_clear_overflow_flag=NULL, "
1129 "pmu on sspm was not supported on this platform\n");
1130 pr_debug("[MET_PMU] cpu_pmu->pmu_read_clear_overflow_flag=NULL, "
1131 "pmu on sspm was not supported on this platform\n");
1132 return -EINVAL;
1133 }
1134
1135 return cpupmu_process_argument(arg, len);
1136 }
1137 return 0;
1138}
1139#endif
1140
1141struct metdevice met_cpupmu = {
1142 .name = "cpu",
1143 .type = MET_TYPE_PMU,
1144 .cpu_related = 1,
1145 .create_subfs = cpupmu_create_subfs,
1146 .delete_subfs = cpupmu_delete_subfs,
1147 .start = cpupmu_start,
1148 .uniq_start = cpupmu_unique_start,
1149 .stop = cpupmu_stop,
1150 .uniq_stop = cpupmu_unique_stop,
1151 .polling_interval = 1,
1152 .timed_polling = met_perf_cpupmu_polling,
1153 .print_help = cpupmu_print_help,
1154 .print_header = cpupmu_print_header,
1155 .process_argument = cpupmu_process_argument,
1156 .cpu_state_notify = cpupmu_cpu_state_notify,
1157
1158#if defined(CONFIG_MTK_TINYSYS_SSPM_SUPPORT) && defined(ONDIEMET_SUPPORT)
1159 .ondiemet_mode = 1,
1160 .ondiemet_start = sspm_pmu_start,
1161 .uniq_ondiemet_start = sspm_pmu_unique_start,
1162 .uniq_ondiemet_stop = sspm_pmu_unique_stop,
1163 .ondiemet_stop = sspm_pmu_stop,
1164 .ondiemet_print_header = sspm_pmu_print_header,
1165 .ondiemet_process_argument = sspm_pmu_process_argument
1166#endif
1167};