blob: 06fc9d0d1995da534513940d550283b5fd3d6f52 [file] [log] [blame]
xjb04a4022021-11-25 15:01:52 +08001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2019 MediaTek Inc.
4 */
5
6#include <linux/cpu.h>
7#include <linux/cpu_pm.h>
8#include <linux/perf_event.h>
9
10#if (IS_ENABLED(CONFIG_ARM64) || IS_ENABLED(CONFIG_ARM))
11#include <linux/platform_device.h>
12#include <linux/perf/arm_pmu.h>
13#endif
14
15#include <linux/kernel.h>
16#include <linux/mutex.h>
17#include <linux/perf/arm_pmu.h>
18#include <linux/irqreturn.h>
19#include <linux/irq_work.h>
20#include "met_drv.h"
21#include "met_kernel_symbol.h"
22#include "interface.h"
23#include "trace.h"
24#include "cpu_pmu.h"
25#include "mtk_typedefs.h"
26
27#if IS_ENABLED(CONFIG_MTK_TINYSYS_SSPM_SUPPORT)
28#if defined(ONDIEMET_SUPPORT)
29#include "sspm/ondiemet_sspm.h"
30#elif defined(TINYSYS_SSPM_SUPPORT)
31#include "tinysys_sspm.h"
32#include "tinysys_mgr.h" /* for ondiemet_module */
33#include "sspm_met_ipi_handle.h"
34#endif
35#endif
36
37struct cpu_pmu_hw *cpu_pmu;
38static int counter_cnt[MXNR_CPU];
39static int nr_arg[MXNR_CPU];
40
41int met_perf_cpupmu_status;
42
43static int mtk_pmu_event_enable = 0;
44static struct kobject *kobj_cpu;
45DECLARE_KOBJ_ATTR_INT(mtk_pmu_event_enable, mtk_pmu_event_enable);
46#define KOBJ_ATTR_LIST \
47 do { \
48 KOBJ_ATTR_ITEM(mtk_pmu_event_enable); \
49 } while (0)
50
51DEFINE_MUTEX(handle_irq_lock);
52irqreturn_t (*handle_irq_orig)(struct arm_pmu *pmu);
53
54#if IS_ENABLED(CONFIG_CPU_PM)
55static int use_cpu_pm_pmu_notifier = 0;
56
57/* helper notifier for maintaining pmu states before cpu state transition */
58static int cpu_pm_pmu_notify(struct notifier_block *b,
59 unsigned long cmd,
60 void *p)
61{
62 int ii;
63 int cpu, count;
64 unsigned int pmu_value[MXNR_PMU_EVENTS];
65
66 if (!met_perf_cpupmu_status)
67 return NOTIFY_OK;
68
69 cpu = raw_smp_processor_id();
70
71 switch (cmd) {
72 case CPU_PM_ENTER:
73 count = cpu_pmu->polling(cpu_pmu->pmu[cpu], cpu_pmu->event_count[cpu], pmu_value);
74 for (ii = 0; ii < count; ii ++)
75 cpu_pmu->cpu_pm_unpolled_loss[cpu][ii] += pmu_value[ii];
76
77 cpu_pmu->stop(cpu_pmu->event_count[cpu]);
78 break;
79 case CPU_PM_ENTER_FAILED:
80 case CPU_PM_EXIT:
81 cpu_pmu->start(cpu_pmu->pmu[cpu], cpu_pmu->event_count[cpu]);
82 break;
83 default:
84 return NOTIFY_DONE;
85 }
86 return NOTIFY_OK;
87}
88
89struct notifier_block cpu_pm_pmu_notifier = {
90 .notifier_call = cpu_pm_pmu_notify,
91};
92#endif
93
94static DEFINE_PER_CPU(unsigned long long[MXNR_PMU_EVENTS], perfCurr);
95static DEFINE_PER_CPU(unsigned long long[MXNR_PMU_EVENTS], perfPrev);
96static DEFINE_PER_CPU(int[MXNR_PMU_EVENTS], perfCntFirst);
97static DEFINE_PER_CPU(struct perf_event * [MXNR_PMU_EVENTS], pevent);
98static DEFINE_PER_CPU(struct perf_event_attr [MXNR_PMU_EVENTS], pevent_attr);
99static DEFINE_PER_CPU(int, perfSet);
100static DEFINE_PER_CPU(int, cpu_status);
101
102#ifdef CPUPMU_V8_2
103#include <linux/of.h>
104#include <linux/of_address.h>
105
106#ifdef USE_KERNEL_SYNC_WRITE_H
107#include <mt-plat/sync_write.h>
108#else
109#include "sync_write.h"
110#endif
111
112#ifdef USE_KERNEL_MTK_IO_H
113#include <mt-plat/mtk_io.h>
114#else
115#include "mtk_io.h"
116#endif
117
118static char mcucfg_desc[] = "mediatek,mcucfg";
119static void __iomem *mcucfg_base = NULL;
120#define DBG_CONTROL_CPU6 ((unsigned long)mcucfg_base + 0x3000 + 0x308) /* DBG_CONTROL */
121#define DBG_CONTROL_CPU7 ((unsigned long)mcucfg_base + 0x3800 + 0x308) /* DBG_CONTROL */
122#define ENABLE_MTK_PMU_EVENTS_OFFSET 1
123static int restore_dbg_ctrl_cpu6;
124static int restore_dbg_ctrl_cpu7;
125
126int cpu_pmu_debug_init(void)
127{
128 struct device_node *node = NULL;
129 unsigned int value6,value7;
130
131 /*for A75 MTK internal event*/
132 if (mcucfg_base == NULL) {
133 node = of_find_compatible_node(NULL, NULL, mcucfg_desc);
134 if (node == NULL) {
135 MET_TRACE("[MET_PMU_DB] of_find node == NULL\n");
136 pr_debug("[MET_PMU_DB] of_find node == NULL\n");
137 goto out;
138 }
139 mcucfg_base = of_iomap(node, 0);
140 of_node_put(node);
141 if (mcucfg_base == NULL) {
142 MET_TRACE("[MET_PMU_DB] mcucfg_base == NULL\n");
143 pr_debug("[MET_PMU_DB] mcucfg_base == NULL\n");
144 goto out;
145 }
146 MET_TRACE("[MET_PMU_DB] regbase %08lx\n", DBG_CONTROL_CPU7);
147 pr_debug("[MET_PMU_DB] regbase %08lx\n", DBG_CONTROL_CPU7);
148 }
149
150 value6 = readl(IOMEM(DBG_CONTROL_CPU6));
151 if (value6 & (1 << ENABLE_MTK_PMU_EVENTS_OFFSET)) {
152 restore_dbg_ctrl_cpu6 = 1;
153 } else {
154 restore_dbg_ctrl_cpu6 = 0;
155 mt_reg_sync_writel(value6 | (1 << ENABLE_MTK_PMU_EVENTS_OFFSET), DBG_CONTROL_CPU6);
156 }
157
158 value7 = readl(IOMEM(DBG_CONTROL_CPU7));
159 if (value7 & (1 << ENABLE_MTK_PMU_EVENTS_OFFSET)) {
160 restore_dbg_ctrl_cpu7 = 1;
161 } else {
162 restore_dbg_ctrl_cpu7 = 0;
163 mt_reg_sync_writel(value7 | (1 << ENABLE_MTK_PMU_EVENTS_OFFSET), DBG_CONTROL_CPU7);
164 }
165
166 value6 = readl(IOMEM(DBG_CONTROL_CPU6));
167 value7 = readl(IOMEM(DBG_CONTROL_CPU7));
168 MET_TRACE("[MET_PMU_DB]DBG_CONTROL_CPU6 = %08x, DBG_CONTROL_CPU7 = %08x\n", value6, value7);
169 pr_debug("[MET_PMU_DB]DBG_CONTROL_CPU6 = %08x, DBG_CONTROL_CPU7 = %08x\n", value6, value7);
170 return 1;
171
172out:
173 if (mcucfg_base != NULL) {
174 iounmap(mcucfg_base);
175 mcucfg_base = NULL;
176 }
177 MET_TRACE("[MET_PMU_DB]DBG_CONTROL init error");
178 pr_debug("[MET_PMU_DB]DBG_CONTROL init error");
179 return 0;
180}
181
182int cpu_pmu_debug_uninit(void)
183{
184 unsigned int value6,value7;
185
186 if (restore_dbg_ctrl_cpu6 == 0) {
187 value6 = readl(IOMEM(DBG_CONTROL_CPU6));
188 mt_reg_sync_writel(value6 & (~(1 << ENABLE_MTK_PMU_EVENTS_OFFSET)), DBG_CONTROL_CPU6);
189 }
190 if (restore_dbg_ctrl_cpu7 == 0) {
191 value7 = readl(IOMEM(DBG_CONTROL_CPU7));
192 mt_reg_sync_writel(value7 & (~(1 << ENABLE_MTK_PMU_EVENTS_OFFSET)), DBG_CONTROL_CPU7);
193 }
194
195 value6 = readl(IOMEM(DBG_CONTROL_CPU6));
196 value7 = readl(IOMEM(DBG_CONTROL_CPU7));
197 MET_TRACE("[MET_PMU_DB]DBG_CONTROL_CPU6 = %08x, DBG_CONTROL_CPU7 = %08x\n", value6, value7);
198 pr_debug("[MET_PMU_DB]DBG_CONTROL_CPU6 = %08x, DBG_CONTROL_CPU7 = %08x\n", value6, value7);
199
200 if (mcucfg_base != NULL) {
201 iounmap(mcucfg_base);
202 mcucfg_base = NULL;
203 }
204 restore_dbg_ctrl_cpu6 = 0;
205 restore_dbg_ctrl_cpu7 = 0;
206 return 1;
207}
208#endif
209
210
211
212
213noinline void mp_cpu(unsigned char cnt, unsigned int *value)
214{
215 MET_GENERAL_PRINT(MET_TRACE, cnt, value);
216}
217
218static void dummy_handler(struct perf_event *event, struct perf_sample_data *data,
219 struct pt_regs *regs)
220{
221 /*
222 * Required as perf_event_create_kernel_counter() requires an overflow handler,
223 * even though all we do is poll.
224 */
225}
226
227static void perf_cpupmu_polling(unsigned long long stamp, int cpu)
228{
229 int event_count = cpu_pmu->event_count[cpu];
230 struct met_pmu *pmu = cpu_pmu->pmu[cpu];
231 int i, count;
232 unsigned long long delta;
233 struct perf_event *ev;
234 unsigned int pmu_value[MXNR_PMU_EVENTS];
235 u64 value;
236 int ret;
237
238 if (per_cpu(perfSet, cpu) == 0)
239 return;
240
241 count = 0;
242 for (i = 0; i < event_count; i++) {
243 if (pmu[i].mode == 0)
244 continue;
245
246 ev = per_cpu(pevent, cpu)[i];
247 if ((ev != NULL) && (ev->state == PERF_EVENT_STATE_ACTIVE)) {
248 if (!met_export_api_symbol->met_perf_event_read_local)
249 continue;
250
251 ret = met_export_api_symbol->met_perf_event_read_local(ev, &value);
252 if (ret < 0) {
253 PR_BOOTMSG_ONCE("[MET_PMU] perf_event_read_local fail (ret=%d)\n", ret);
254 pr_debug("[MET_PMU] perf_event_read_local fail (ret=%d)\n", ret);
255 continue;
256 }
257
258 per_cpu(perfCurr, cpu)[i] = value;
259 delta = (per_cpu(perfCurr, cpu)[i] - per_cpu(perfPrev, cpu)[i]);
260 per_cpu(perfPrev, cpu)[i] = per_cpu(perfCurr, cpu)[i];
261 if (per_cpu(perfCntFirst, cpu)[i] == 1) {
262 /* we shall omit delta counter when we get first counter */
263 per_cpu(perfCntFirst, cpu)[i] = 0;
264 continue;
265 }
266 pmu_value[count] = (unsigned int)delta;
267 count++;
268 }
269 }
270
271 if (count == counter_cnt[cpu])
272 mp_cpu(count, pmu_value);
273}
274
275static struct perf_event* perf_event_create(int cpu, unsigned short event, int count)
276{
277 struct perf_event_attr *ev_attr;
278 struct perf_event *ev;
279
280 ev_attr = per_cpu(pevent_attr, cpu)+count;
281 memset(ev_attr, 0, sizeof(*ev_attr));
282 if (event == 0xff) {
283 ev_attr->config = PERF_COUNT_HW_CPU_CYCLES;
284 ev_attr->type = PERF_TYPE_HARDWARE;
285 } else {
286 ev_attr->config = event;
287 ev_attr->type = PERF_TYPE_RAW;
288 }
289 ev_attr->size = sizeof(*ev_attr);
290 ev_attr->sample_period = 0;
291 ev_attr->pinned = 1;
292
293 ev = perf_event_create_kernel_counter(ev_attr, cpu, NULL, dummy_handler, NULL);
294 if (IS_ERR(ev))
295 return NULL;
296 do {
297 if (ev->state == PERF_EVENT_STATE_ACTIVE)
298 break;
299 if (ev->state == PERF_EVENT_STATE_ERROR) {
300 perf_event_enable(ev);
301 if (ev->state == PERF_EVENT_STATE_ACTIVE)
302 break;
303 }
304 perf_event_release_kernel(ev);
305 return NULL;
306 } while (0);
307
308 return ev;
309}
310
311static void perf_event_release(int cpu, struct perf_event *ev)
312{
313 if (ev->state == PERF_EVENT_STATE_ACTIVE)
314 perf_event_disable(ev);
315 perf_event_release_kernel(ev);
316}
317
318#if IS_ENABLED(CONFIG_MTK_TINYSYS_SSPM_SUPPORT)
319#if defined(ONDIEMET_SUPPORT) || defined(TINYSYS_SSPM_SUPPORT)
320#define PMU_OVERFLOWED_MASK 0xffffffff
321
322static inline int pmu_has_overflowed(u32 pmovsr)
323{
324 return pmovsr & PMU_OVERFLOWED_MASK;
325}
326
327static irqreturn_t perf_event_handle_irq_ignore_overflow(struct arm_pmu *pmu)
328{
329 u32 pmovsr;
330
331 pmovsr = cpu_pmu->pmu_read_clear_overflow_flag();
332
333 if (!pmu_has_overflowed(pmovsr)) {
334 return IRQ_NONE;
335 }
336 else {
337 irq_work_run();
338 return IRQ_HANDLED;
339 }
340}
341#endif
342#endif
343
344static int perf_thread_set_perf_events(int cpu)
345{
346 int i, size;
347 struct perf_event *ev;
348
349 size = sizeof(struct perf_event_attr);
350 if (per_cpu(perfSet, cpu) == 0) {
351 int event_count = cpu_pmu->event_count[cpu];
352 struct met_pmu *pmu = cpu_pmu->pmu[cpu];
353 for (i = 0; i < event_count; i++) {
354 if (!pmu[i].mode)
355 continue; /* Skip disabled counters */
356 ev = perf_event_create(cpu, pmu[i].event, i);
357 if (ev == NULL) {
358 met_cpupmu.mode = 0;
359 met_perf_cpupmu_status = 0;
360
361 MET_TRACE("[MET_PMU] cpu %d failed to register pmu event %4x\n", cpu, pmu[i].event);
362 pr_notice("[MET_PMU] cpu %d failed to register pmu event %4x\n", cpu, pmu[i].event);
363 continue;
364 }
365
366 /*
367 * in perf-event implementation, hardware pmu slot and cycle counter
368 * was mapped to perf_event::hw::idx as follows:
369 *
370 * | idx | hardware slot |
371 * |-----+---------------|
372 * | 0 | pmccntr_el0 |
373 * | 1 | 0 |
374 * | 2 | 1 |
375 * | 3 | 2 |
376 * | 4 | 3 |
377 * | 5 | 4 |
378 * | 6 | 5 |
379 */
380 if (ev->hw.idx != 0) {
381 MET_TRACE("[MET_PMU] cpu %d registered in pmu slot: [%d] evt=%#04x\n",
382 cpu, ev->hw.idx-1, pmu[i].event);
383 pr_debug("[MET_PMU] cpu %d registered in pmu slot: [%d] evt=%#04x\n",
384 cpu, ev->hw.idx-1, pmu[i].event);
385 } else if (ev->hw.idx == 0) {
386 MET_TRACE("[MET_PMU] cpu %d registered cycle count evt=%#04x\n",
387 cpu, pmu[i].event);
388 pr_debug("[MET_PMU] cpu %d registered cycle count evt=%#04x\n",
389 cpu, pmu[i].event);
390 }
391
392 per_cpu(pevent, cpu)[i] = ev;
393 per_cpu(perfPrev, cpu)[i] = 0;
394 per_cpu(perfCurr, cpu)[i] = 0;
395 perf_event_enable(ev);
396 per_cpu(perfCntFirst, cpu)[i] = 1;
397
398#if IS_ENABLED(CONFIG_MTK_TINYSYS_SSPM_SUPPORT)
399#if defined(ONDIEMET_SUPPORT) || defined(TINYSYS_SSPM_SUPPORT)
400 if (met_cpupmu.ondiemet_mode) {
401 struct arm_pmu *armpmu;
402 armpmu = container_of(ev->pmu, struct arm_pmu, pmu);
403 mutex_lock(&handle_irq_lock);
404 if (armpmu && armpmu->handle_irq != perf_event_handle_irq_ignore_overflow) {
405 pr_debug("[MET_PMU] replaced original handle_irq=%p with dummy function\n",
406 armpmu->handle_irq);
407 handle_irq_orig = armpmu->handle_irq;
408 armpmu->handle_irq = perf_event_handle_irq_ignore_overflow;
409 }
410 mutex_unlock(&handle_irq_lock);
411 }
412#endif
413#endif
414 } /* for all PMU counter */
415 per_cpu(perfSet, cpu) = 1;
416 } /* for perfSet */
417
418 return 0;
419}
420
421static void met_perf_cpupmu_start(int cpu)
422{
423 if (met_cpupmu.mode == 0)
424 return;
425
426 perf_thread_set_perf_events(cpu);
427}
428
429static void perf_thread_down(int cpu)
430{
431 int i;
432 struct perf_event *ev;
433 int event_count;
434 struct met_pmu *pmu;
435
436 if (per_cpu(perfSet, cpu) == 0)
437 return;
438
439 per_cpu(perfSet, cpu) = 0;
440 event_count = cpu_pmu->event_count[cpu];
441 pmu = cpu_pmu->pmu[cpu];
442 for (i = 0; i < event_count; i++) {
443 ev = per_cpu(pevent, cpu)[i];
444 if (ev != NULL) {
445
446#if IS_ENABLED(CONFIG_MTK_TINYSYS_SSPM_SUPPORT)
447#if defined(ONDIEMET_SUPPORT) || defined(TINYSYS_SSPM_SUPPORT)
448 if (met_cpupmu.ondiemet_mode) {
449 struct arm_pmu *armpmu;
450 armpmu = container_of(ev->pmu, struct arm_pmu, pmu);
451 mutex_lock(&handle_irq_lock);
452 if (armpmu && armpmu->handle_irq == perf_event_handle_irq_ignore_overflow) {
453 pr_debug("[MET_PMU] restore original handle_irq=%p\n", handle_irq_orig);
454 armpmu->handle_irq = handle_irq_orig;
455 handle_irq_orig = NULL;
456 }
457 mutex_unlock(&handle_irq_lock);
458 }
459#endif
460#endif
461
462 perf_event_release(cpu, ev);
463 per_cpu(pevent, cpu)[i] = NULL;
464 }
465 }
466}
467
468static void met_perf_cpupmu_stop(int cpu)
469{
470 perf_thread_down(cpu);
471}
472
473static int cpupmu_create_subfs(struct kobject *parent)
474{
475 int ret = 0;
476
477 cpu_pmu = cpu_pmu_hw_init();
478 if (cpu_pmu == NULL) {
479 PR_BOOTMSG("Failed to init CPU PMU HW!!\n");
480 return -ENODEV;
481 }
482
483 kobj_cpu = parent;
484
485#define KOBJ_ATTR_ITEM(attr_name) \
486 do { \
487 ret = sysfs_create_file(kobj_cpu, &attr_name ## _attr.attr); \
488 if (ret != 0) { \
489 pr_notice("Failed to create " #attr_name " in sysfs\n"); \
490 return ret; \
491 } \
492 } while (0)
493 KOBJ_ATTR_LIST;
494#undef KOBJ_ATTR_ITEM
495
496 return 0;
497}
498
499static void cpupmu_delete_subfs(void)
500{
501#define KOBJ_ATTR_ITEM(attr_name) \
502 sysfs_remove_file(kobj_cpu, &attr_name ## _attr.attr)
503
504 if (kobj_cpu != NULL) {
505 KOBJ_ATTR_LIST;
506 kobj_cpu = NULL;
507 }
508#undef KOBJ_ATTR_ITEM
509}
510
511void met_perf_cpupmu_polling(unsigned long long stamp, int cpu)
512{
513 int count;
514 unsigned int pmu_value[MXNR_PMU_EVENTS];
515
516 if (per_cpu(cpu_status, cpu) != MET_CPU_ONLINE)
517 return;
518
519 if (met_cpu_pmu_method) {
520 perf_cpupmu_polling(stamp, cpu);
521 } else {
522 count = cpu_pmu->polling(cpu_pmu->pmu[cpu], cpu_pmu->event_count[cpu], pmu_value);
523
524#if IS_ENABLED(CONFIG_CPU_PM)
525 if (met_cpu_pm_pmu_reconfig) {
526 int ii;
527 for (ii = 0; ii < count; ii ++)
528 pmu_value[ii] += cpu_pmu->cpu_pm_unpolled_loss[cpu][ii];
529 }
530#endif
531
532 mp_cpu(count, pmu_value);
533
534#if IS_ENABLED(CONFIG_CPU_PM)
535 if (met_cpu_pm_pmu_reconfig) {
536 memset(cpu_pmu->cpu_pm_unpolled_loss[cpu], 0, sizeof (cpu_pmu->cpu_pm_unpolled_loss[0]));
537 }
538#endif
539 }
540}
541
542static void cpupmu_start(void)
543{
544 int cpu = raw_smp_processor_id();
545
546 if (!met_cpu_pmu_method) {
547 nr_arg[cpu] = 0;
548 cpu_pmu->start(cpu_pmu->pmu[cpu], cpu_pmu->event_count[cpu]);
549
550 met_perf_cpupmu_status = 1;
551 per_cpu(cpu_status, cpu) = MET_CPU_ONLINE;
552 }
553}
554
555
556static void cpupmu_unique_start(void)
557{
558 int cpu;
559
560#ifdef CPUPMU_V8_2
561 int ret = 0;
562 if (mtk_pmu_event_enable == 1){
563 ret = cpu_pmu_debug_init();
564 if (ret == 0)
565 PR_BOOTMSG("Failed to init CPU PMU debug!!\n");
566 }
567#endif
568
569#if IS_ENABLED(CONFIG_CPU_PM)
570 use_cpu_pm_pmu_notifier = 0;
571 if (met_cpu_pm_pmu_reconfig) {
572 if (met_cpu_pmu_method) {
573 met_cpu_pm_pmu_reconfig = 0;
574 MET_TRACE("[MET_PMU] met_cpu_pmu_method=%d, met_cpu_pm_pmu_reconfig forced disabled\n", met_cpu_pmu_method);
575 pr_debug("[MET_PMU] met_cpu_pmu_method=%d, met_cpu_pm_pmu_reconfig forced disabled\n", met_cpu_pmu_method);
576 } else {
577 memset(cpu_pmu->cpu_pm_unpolled_loss, 0, sizeof (cpu_pmu->cpu_pm_unpolled_loss));
578 cpu_pm_register_notifier(&cpu_pm_pmu_notifier);
579 use_cpu_pm_pmu_notifier = 1;
580 }
581 }
582#else
583 if (met_cpu_pm_pmu_reconfig) {
584 met_cpu_pm_pmu_reconfig = 0;
585 MET_TRACE("[MET_PMU] CONFIG_CPU_PM=%d, met_cpu_pm_pmu_reconfig forced disabled\n", CONFIG_CPU_PM);
586 pr_debug("[MET_PMU] CONFIG_CPU_PM=%d, met_cpu_pm_pmu_reconfig forced disabled\n", CONFIG_CPU_PM);
587 }
588#endif
589 MET_TRACE("[MET_PMU] met_cpu_pm_pmu_reconfig=%u\n", met_cpu_pm_pmu_reconfig);
590 pr_debug("[MET_PMU] met_cpu_pm_pmu_reconfig=%u\n", met_cpu_pm_pmu_reconfig);
591
592 if (met_cpu_pmu_method) {
593 for_each_possible_cpu(cpu) {
594 met_perf_cpupmu_start(cpu);
595
596 met_perf_cpupmu_status = 1;
597 per_cpu(cpu_status, cpu) = MET_CPU_ONLINE;
598 }
599 }
600
601 return;
602}
603
604static void cpupmu_stop(void)
605{
606 int cpu = raw_smp_processor_id();
607
608 met_perf_cpupmu_status = 0;
609
610 if (!met_cpu_pmu_method)
611 cpu_pmu->stop(cpu_pmu->event_count[cpu]);
612}
613
614static void cpupmu_unique_stop(void)
615{
616 int cpu;
617
618 if (met_cpu_pmu_method) {
619 for_each_possible_cpu(cpu) {
620 met_perf_cpupmu_stop(cpu);
621 }
622 }
623
624#ifdef CPUPMU_V8_2
625 if (mtk_pmu_event_enable == 1)
626 cpu_pmu_debug_uninit();
627#endif
628
629#if IS_ENABLED(CONFIG_CPU_PM)
630 if (use_cpu_pm_pmu_notifier) {
631 cpu_pm_unregister_notifier(&cpu_pm_pmu_notifier);
632 }
633#endif
634 return;
635}
636
637static const char cache_line_header[] =
638 "met-info [000] 0.0: met_cpu_cache_line_size: %d\n";
639static const char header[] =
640 "met-info [000] 0.0: met_cpu_header_v2: %d";
641
642static const char help[] =
643 " --pmu-cpu-evt=[cpu_list:]event_list select CPU-PMU events in %s\n"
644 " cpu_list: specify the cpu_id list or apply to all the cores\n"
645 " example: 0,1,2\n"
646 " event_list: specify the event number\n"
647 " example: 0x8,0xff\n";
648
649static int cpupmu_print_help(char *buf, int len)
650{
651 return snprintf(buf, PAGE_SIZE, help, cpu_pmu->cpu_name);
652}
653
654static int reset_driver_stat(void)
655{
656 int cpu, i;
657 int event_count;
658 struct met_pmu *pmu;
659
660 met_cpupmu.mode = 0;
661 for_each_possible_cpu(cpu) {
662 event_count = cpu_pmu->event_count[cpu];
663 pmu = cpu_pmu->pmu[cpu];
664 counter_cnt[cpu] = 0;
665 nr_arg[cpu] = 0;
666 for (i = 0; i < event_count; i++) {
667 pmu[i].mode = MODE_DISABLED;
668 pmu[i].event = 0;
669 pmu[i].freq = 0;
670 }
671 }
672
673 return 0;
674}
675
676static int cpupmu_print_header(char *buf, int len)
677{
678 int cpu, i, ret, first;
679 int event_count;
680 struct met_pmu *pmu;
681
682 ret = 0;
683
684 /*append CPU PMU access method*/
685 if (met_cpu_pmu_method)
686 ret += snprintf(buf + ret, len,
687 "met-info [000] 0.0: CPU_PMU_method: perf APIs\n");
688 else
689 ret += snprintf(buf + ret, len,
690 "met-info [000] 0.0: CPU_PMU_method: MET pmu driver\n");
691
692 /*append cache line size*/
693 ret += snprintf(buf + ret, len - ret, cache_line_header, cache_line_size());
694 ret += snprintf(buf + ret, len - ret, "# mp_cpu: pmu_value1, ...\n");
695
696 for_each_possible_cpu(cpu) {
697 event_count = cpu_pmu->event_count[cpu];
698 pmu = cpu_pmu->pmu[cpu];
699 first = 1;
700 for (i = 0; i < event_count; i++) {
701 if (pmu[i].mode == 0)
702 continue;
703 if (first) {
704 ret += snprintf(buf + ret, len - ret, header, cpu);
705 first = 0;
706 }
707 ret += snprintf(buf + ret, len - ret, ",0x%x", pmu[i].event);
708 pmu[i].mode = 0;
709 }
710 if (!first)
711 ret += snprintf(buf + ret, len - ret, "\n");
712 }
713
714 reset_driver_stat();
715
716 return ret;
717}
718
719static int met_parse_num_list(char *arg, int len, int *list, int list_cnt)
720{
721 int nr_num = 0;
722 char *num;
723 int num_len;
724
725 /* search ',' as the splitter */
726 while (len) {
727 num = arg;
728 num_len = 0;
729 if (list_cnt <= 0)
730 return -1;
731 while (len) {
732 len--;
733 if (*arg == ',') {
734 *(arg++) = '\0';
735 break;
736 }
737 arg++;
738 num_len++;
739 }
740 if (met_parse_num(num, list, num_len) < 0)
741 return -1;
742 list++;
743 list_cnt--;
744 nr_num++;
745 }
746
747 return nr_num;
748}
749
750static const struct perf_pmu_events_attr *
751perf_event_get_evt_attr_by_name(const struct perf_event *ev,
752 const char *name) {
753 struct arm_pmu *arm_pmu;
754 struct attribute **attrp;
755 struct device_attribute *dev_attr_p;
756 struct perf_pmu_events_attr *ev_attr_p;
757
758 arm_pmu = container_of(ev->pmu, struct arm_pmu, pmu);
759
760 for (attrp = arm_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS]->attrs;
761 *attrp != NULL;
762 attrp ++) {
763
764 dev_attr_p = container_of(*attrp, struct device_attribute, attr);
765 ev_attr_p = container_of(dev_attr_p, struct perf_pmu_events_attr, attr);
766
767 if (0 == strcmp((*attrp)->name, name)) {
768 return ev_attr_p;
769 }
770 }
771
772 return NULL;
773}
774
775static int cpupmu_process_argument(const char *arg, int len)
776{
777 char *arg1 = (char*)arg;
778 int len1 = len;
779 int cpu, cpu_list[MXNR_CPU];
780 int nr_events, event_list[MXNR_PMU_EVENTS]={0};
781 int i;
782 int nr_counters;
783 struct met_pmu *pmu;
784 int arg_nr;
785 int event_no;
786 int is_cpu_cycle_evt;
787 const struct perf_pmu_events_attr *ev_attr_p;
788
789 /*
790 * split cpu_list and event_list by ':'
791 * arg, len: cpu_list when found (i < len)
792 * arg1, len1: event_list
793 */
794 for (i = 0; i < len; i++) {
795 if (arg[i] == ':') {
796 arg1[i] = '\0';
797 arg1 += i+1;
798 len1 = len - i - 1;
799 len = i;
800 break;
801 }
802 }
803
804 /*
805 * setup cpu_list array
806 * 1: selected
807 * 0: unselected
808 */
809 if (arg1 != arg) { /* is cpu_id list specified? */
810 int list[MXNR_CPU]={0}, cnt;
811 int cpu_id;
812 if ((cnt = met_parse_num_list((char*)arg, len, list, ARRAY_SIZE(list))) <= 0)
813 goto arg_out;
814 memset(cpu_list, 0, sizeof(cpu_list));
815 for (i = 0; i < cnt; i++) {
816 cpu_id = list[i];
817 if (cpu_id < 0 || cpu_id >= ARRAY_SIZE(cpu_list))
818 goto arg_out;
819 cpu_list[cpu_id] = 1;
820 }
821 }
822 else
823 memset(cpu_list, 1, sizeof(cpu_list));
824
825 /* get event_list */
826 if ((nr_events = met_parse_num_list(arg1, len1, event_list, ARRAY_SIZE(event_list))) <= 0)
827 goto arg_out;
828
829 /* for each cpu in cpu_list, add all the events in event_list */
830 for_each_possible_cpu(cpu) {
831 pmu = cpu_pmu->pmu[cpu];
832 arg_nr = nr_arg[cpu];
833
834 if (cpu_list[cpu] == 0)
835 continue;
836
837 if (met_cpu_pmu_method) {
838 nr_counters = perf_num_counters();
839 } else {
840 nr_counters = cpu_pmu->event_count[cpu];
841 }
842
843 pr_debug("[MET_PMU] pmu slot count=%d\n", nr_counters);
844
845 if (nr_counters == 0)
846 goto arg_out;
847
848 for (i = 0; i < nr_events; i++) {
849 event_no = event_list[i];
850 is_cpu_cycle_evt = 0;
851 /*
852 * check if event is duplicate, but does not include 0xff
853 */
854 if (cpu_pmu->check_event(pmu, arg_nr, event_no) < 0)
855 goto arg_out;
856
857 /*
858 * test if this event is available when in perf_APIs mode
859 */
860 if (met_cpu_pmu_method) {
861 struct perf_event *ev;
862
863 if (!cpu_pmu->perf_event_get_evttype) {
864 MET_TRACE("[MET_PMU] cpu_pmu->perf_event_get_evttype=NULL, "
865 "met pmu on perf-event was not supported on this platform\n");
866 pr_debug("[MET_PMU] cpu_pmu->perf_event_get_evttype=NULL, "
867 "met pmu on perf-event was not supported on this platform\n");
868 goto arg_out;
869 }
870
871 ev = perf_event_create(cpu, event_no, arg_nr);
872 if (ev == NULL) {
873 pr_debug("!!!!!!!! [MET_PMU] failed pmu alloction test (event_no=%#04x)\n", event_no);
874 goto arg_out;
875 } else {
876 perf_event_release(cpu, ev);
877 }
878
879 ev_attr_p = perf_event_get_evt_attr_by_name(ev, "cpu_cycles");
880 if (ev_attr_p && cpu_pmu->perf_event_get_evttype(ev) == ev_attr_p->id)
881 is_cpu_cycle_evt = 1;
882 }
883
884 if (met_cpu_pmu_method) {
885 if (is_cpu_cycle_evt) {
886 if (pmu[nr_counters-1].mode == MODE_POLLING)
887 goto arg_out;
888 pmu[nr_counters-1].mode = MODE_POLLING;
889 pmu[nr_counters-1].event = event_no;
890 pmu[nr_counters-1].freq = 0;
891 } else {
892 if (arg_nr >= (nr_counters - 1))
893 goto arg_out;
894 pmu[arg_nr].mode = MODE_POLLING;
895 pmu[arg_nr].event = event_no;
896 pmu[arg_nr].freq = 0;
897 arg_nr++;
898 }
899 } else {
900 if (event_no == 0xff) {
901 if (pmu[nr_counters-1].mode == MODE_POLLING)
902 goto arg_out;
903 pmu[nr_counters-1].mode = MODE_POLLING;
904 pmu[nr_counters-1].event = 0xff;
905 pmu[nr_counters-1].freq = 0;
906 } else {
907 if (arg_nr >= (nr_counters - 1))
908 goto arg_out;
909 pmu[arg_nr].mode = MODE_POLLING;
910 pmu[arg_nr].event = event_no;
911 pmu[arg_nr].freq = 0;
912 arg_nr++;
913 }
914 }
915 counter_cnt[cpu]++;
916 }
917 nr_arg[cpu] = arg_nr;
918 }
919
920 met_cpupmu.mode = 1;
921 return 0;
922
923arg_out:
924 reset_driver_stat();
925 return -EINVAL;
926}
927
928static void cpupmu_cpu_state_notify(long cpu, unsigned long action)
929{
930 per_cpu(cpu_status, cpu) = action;
931
932#if (IS_ENABLED(CONFIG_ARM64) || IS_ENABLED(CONFIG_ARM))
933 if (met_cpu_pmu_method && action == MET_CPU_OFFLINE) {
934 struct perf_event *event = NULL;
935 struct arm_pmu *armpmu = NULL;
936 struct platform_device *pmu_device = NULL;
937 int irq = 0;
938
939 event = per_cpu(pevent, cpu)[0];
940 if (event)
941 armpmu = to_arm_pmu(event->pmu);
942 pr_debug("!!!!!!!! %s_%ld, event=%p\n", __FUNCTION__, cpu, event);
943
944 if (armpmu)
945 pmu_device = armpmu->plat_device;
946 pr_debug("!!!!!!!! %s_%ld, armpmu=%p\n", __FUNCTION__, cpu, armpmu);
947
948 if (pmu_device)
949 irq = platform_get_irq(pmu_device, 0);
950 pr_debug("!!!!!!!! %s_%ld, pmu_device=%p\n", __FUNCTION__, cpu, pmu_device);
951
952 if (irq > 0)
953 disable_percpu_irq(irq);
954 pr_debug("!!!!!!!! %s_%ld, irq=%d\n", __FUNCTION__, cpu, irq);
955 }
956#endif
957}
958
959#if IS_ENABLED(CONFIG_MTK_TINYSYS_SSPM_SUPPORT)
960#if defined(ONDIEMET_SUPPORT) || defined(TINYSYS_SSPM_SUPPORT)
961static void sspm_pmu_start(void)
962{
963 ondiemet_module[ONDIEMET_SSPM] |= ID_PMU;
964
965 if (met_cpupmu.ondiemet_mode == 1)
966 cpupmu_start();
967}
968
969static int cycle_count_mode_enabled(int cpu) {
970
971 int event_cnt;
972 struct met_pmu *pmu;
973
974 pmu = cpu_pmu->pmu[cpu];
975
976 if (met_cpu_pmu_method) {
977 event_cnt = perf_num_counters();
978 } else {
979 event_cnt = cpu_pmu->event_count[cpu];
980 }
981
982 return pmu[event_cnt-1].mode == MODE_POLLING;
983}
984
985static void ipi_config_pmu_counter_cnt(void) {
986
987 int ret, cpu, ii, cnt_num;
988 unsigned int rdata;
989 unsigned int ipi_buf[4];
990 struct hw_perf_event *hwc;
991 unsigned int base_offset;
992
993 for_each_possible_cpu(cpu) {
994 for (ii = 0; ii < 4; ii++)
995 ipi_buf[ii] = 0;
996
997 ipi_buf[0] = MET_MAIN_ID | (MID_PMU << MID_BIT_SHIFT) | MET_ARGU | SET_PMU_EVT_CNT;
998 /*
999 * XXX: on sspm side, cycle counter was not counted in
1000 * total event number `counter_cnt', but controlled by
1001 * an addtional argument `SET_PMU_CYCCNT_ENABLE' instead
1002 */
1003 cnt_num = (cycle_count_mode_enabled(cpu) ?
1004 (counter_cnt[cpu]-1) : counter_cnt[cpu]);
1005 ipi_buf[1] = (cpu << 16) | (cnt_num & 0xffff);
1006
1007 MET_TRACE("[MET_PMU][IPI_CONFIG] core=%d, pmu_counter_cnt=%d\n", cpu, cnt_num);
1008 pr_debug("[MET_PMU][IPI_CONFIG] core=%d, pmu_counter_cnt=%d\n", cpu, cnt_num);
1009
1010 MET_TRACE("[MET_PMU][IPI_CONFIG] sspm_buf_available=%d, in_interrupt()=%lu\n", sspm_buf_available, in_interrupt());
1011 pr_debug("[MET_PMU][IPI_CONFIG] sspm_buf_available=%d, in_interrupt()=%lu\n", sspm_buf_available, in_interrupt());
1012
1013 if (sspm_buf_available == 1) {
1014 ret = met_ipi_to_sspm_command((void *) ipi_buf, 0, &rdata, 1);
1015 }
1016
1017 for (ii = 0; ii < 4; ii++)
1018 ipi_buf[ii] = 0;
1019
1020 if (per_cpu(pevent, cpu)[0]) {
1021 hwc = &(per_cpu(pevent, cpu)[0]->hw);
1022 base_offset = hwc->idx-1;
1023 } else {
1024 base_offset = 0;
1025 }
1026
1027 ipi_buf[0] = MET_MAIN_ID | (MID_PMU << MID_BIT_SHIFT) | MET_ARGU | SET_PMU_BASE_OFFSET;
1028 ipi_buf[1] = (cpu << 16) | (base_offset & 0xffff);
1029
1030 MET_TRACE("[MET_PMU][IPI_CONFIG] core=%d, base offset set to %u\n", cpu, base_offset);
1031 pr_debug("[MET_PMU][IPI_CONFIG] core=%d, base offset set to %u\n", cpu, base_offset);
1032
1033 if (sspm_buf_available == 1) {
1034 ret = met_ipi_to_sspm_command((void *) ipi_buf, 0, &rdata, 1);
1035 }
1036
1037 if (cycle_count_mode_enabled(cpu)) {
1038
1039 for (ii = 0; ii < 4; ii++)
1040 ipi_buf[ii] = 0;
1041
1042 ipi_buf[0] = MET_MAIN_ID | (MID_PMU << MID_BIT_SHIFT) | MET_ARGU | SET_PMU_CYCCNT_ENABLE;
1043 ipi_buf[1] = cpu & 0xffff;
1044
1045 MET_TRACE("[MET_PMU][IPI_CONFIG] core=%d, pmu cycle cnt enable\n", cpu);
1046 pr_debug("[MET_PMU][IPI_CONFIG] core=%d, pmu cycle cnt enable\n", cpu);
1047
1048 if (sspm_buf_available == 1) {
1049 ret = met_ipi_to_sspm_command((void *) ipi_buf, 0, &rdata, 1);
1050 }
1051 }
1052 }
1053}
1054
1055static int __is_perf_event_hw_slot_seq_order(int cpu) {
1056
1057 struct hw_perf_event *hwc, *hwc_prev;
1058 int event_count = cpu_pmu->event_count[cpu];
1059 int ii;
1060
1061 /*
1062 * perf-event descriptor list would not have any hole
1063 * (excepts special 0xff, which will always be the last element)
1064 */
1065 if (per_cpu(pevent, cpu)[0] == NULL)
1066 return 1;
1067
1068 /*
1069 * XXX: no need to check the last slot,
1070 * which is reserved for 0xff
1071 */
1072 for (ii = 1; ii < event_count - 1; ii++) {
1073
1074 if (per_cpu(pevent, cpu)[ii] == NULL)
1075 return 1;
1076
1077 hwc = &(per_cpu(pevent, cpu)[ii]->hw);
1078 hwc_prev = &(per_cpu(pevent, cpu)[ii-1]->hw);
1079
1080 if (hwc->idx != hwc_prev->idx + 1)
1081 return 0;
1082 }
1083
1084 return 1;
1085}
1086
1087static int __validate_sspm_compatibility(void) {
1088
1089 int cpu;
1090
1091 for_each_possible_cpu(cpu) {
1092
1093 if (!__is_perf_event_hw_slot_seq_order(cpu)) {
1094 MET_TRACE("[MET_PMU] pmu not sequentially allocated on cpu %d\n"
1095 ,cpu);
1096 pr_debug("[MET_PMU] pmu not sequentially allocated on cpu %d\n"
1097 ,cpu);
1098 return -1;
1099 }
1100 }
1101
1102 return 0;
1103}
1104
1105static void sspm_pmu_unique_start(void) {
1106
1107 if (met_cpupmu.ondiemet_mode == 1)
1108 cpupmu_unique_start();
1109
1110 if (met_cpupmu.ondiemet_mode == 1) {
1111 if (__validate_sspm_compatibility() == -1) {
1112 MET_TRACE("[MET_PMU] turned off sspm side polling\n");
1113 pr_debug("[MET_PMU] turned off sspm side polling\n");
1114 /* return without sending init IPIs, leaving sspm side to poll nothing */
1115 return;
1116 }
1117 }
1118
1119 ipi_config_pmu_counter_cnt();
1120}
1121
1122static void sspm_pmu_unique_stop(void)
1123{
1124 if (met_cpupmu.ondiemet_mode == 1)
1125 cpupmu_unique_stop();
1126 return;
1127}
1128
1129static void sspm_pmu_stop(void)
1130{
1131 if (met_cpupmu.ondiemet_mode == 1)
1132 cpupmu_stop();
1133}
1134
1135static const char sspm_pmu_header[] = "met-info [000] 0.0: pmu_sampler: sspm\n";
1136
1137static int sspm_pmu_print_header(char *buf, int len)
1138{
1139 int ret;
1140
1141 ret = snprintf(buf, len, sspm_pmu_header);
1142
1143 if (met_cpupmu.ondiemet_mode == 1)
1144 ret += cpupmu_print_header(buf + ret, len - ret);
1145
1146 return ret;
1147}
1148
1149static int sspm_pmu_process_argument(const char *arg, int len)
1150{
1151 if (met_cpupmu.ondiemet_mode == 1) {
1152
1153 if (!cpu_pmu->pmu_read_clear_overflow_flag) {
1154 MET_TRACE("[MET_PMU] cpu_pmu->pmu_read_clear_overflow_flag=NULL, "
1155 "pmu on sspm was not supported on this platform\n");
1156 pr_debug("[MET_PMU] cpu_pmu->pmu_read_clear_overflow_flag=NULL, "
1157 "pmu on sspm was not supported on this platform\n");
1158 return -EINVAL;
1159 }
1160
1161 return cpupmu_process_argument(arg, len);
1162 }
1163 return 0;
1164}
1165#endif /* end of #if defined(ONDIEMET_SUPPORT) || defined(TINYSYS_SSPM_SUPPORT) */
1166#endif /* end of #if defined(CONFIG_MTK_TINYSYS_SSPM_SUPPORT) */
1167
1168struct metdevice met_cpupmu = {
1169 .name = "cpu",
1170 .type = MET_TYPE_PMU,
1171 .cpu_related = 1,
1172 .create_subfs = cpupmu_create_subfs,
1173 .delete_subfs = cpupmu_delete_subfs,
1174 .start = cpupmu_start,
1175 .uniq_start = cpupmu_unique_start,
1176 .stop = cpupmu_stop,
1177 .uniq_stop = cpupmu_unique_stop,
1178 .polling_interval = 1,
1179 .timed_polling = met_perf_cpupmu_polling,
1180 .print_help = cpupmu_print_help,
1181 .print_header = cpupmu_print_header,
1182 .process_argument = cpupmu_process_argument,
1183 .cpu_state_notify = cpupmu_cpu_state_notify,
1184#if IS_ENABLED(CONFIG_MTK_TINYSYS_SSPM_SUPPORT)
1185#if defined(ONDIEMET_SUPPORT) || defined(TINYSYS_SSPM_SUPPORT)
1186 .ondiemet_mode = 1,
1187 .ondiemet_start = sspm_pmu_start,
1188 .uniq_ondiemet_start = sspm_pmu_unique_start,
1189 .uniq_ondiemet_stop = sspm_pmu_unique_stop,
1190 .ondiemet_stop = sspm_pmu_stop,
1191 .ondiemet_print_header = sspm_pmu_print_header,
1192 .ondiemet_process_argument = sspm_pmu_process_argument
1193#endif
1194#endif
1195};