blob: e2568b8926c9548d2c0dbd468771b91dcef9ed38 [file] [log] [blame]
rjw1f884582022-01-06 17:20:42 +08001/*
2 * Copyright (C) 2018 MediaTek Inc.
3 *
4 * This program is free software: you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14/* include <asm/page.h> */
15#include <linux/slab.h>
16#include <linux/version.h>
17
18#include "interface.h"
19#include "trace.h"
20#include "cpu_pmu.h"
21#include "met_drv.h"
22
23#define MET_USER_EVENT_SUPPORT
24
25#include <linux/kthread.h>
26#include <linux/kernel.h>
27#include <linux/sched.h>
28#include <linux/wait.h>
29#include <linux/signal.h>
30#include <linux/workqueue.h>
31#include <linux/perf_event.h>
32
33#include "met_kernel_symbol.h"
34#include "interface.h"
35
36static int counter_cnt;
37
38struct metdevice met_cpupmu;
39struct cpu_pmu_hw *cpu_pmu;
40static int nr_counters;
41
42static struct kobject *kobj_cpu;
43static struct met_pmu *pmu;
44static int nr_arg;
45
46
47#define CNTMAX 8
48static DEFINE_PER_CPU(unsigned long long[CNTMAX], perfCurr);
49static DEFINE_PER_CPU(unsigned long long[CNTMAX], perfPrev);
50static DEFINE_PER_CPU(int[CNTMAX], perfCntFirst);
51static DEFINE_PER_CPU(struct perf_event * [CNTMAX], pevent);
52static DEFINE_PER_CPU(struct perf_event_attr [CNTMAX], pevent_attr);
53static DEFINE_PER_CPU(int, perfSet);
54static DEFINE_PER_CPU(unsigned int, perf_task_init_done);
55static DEFINE_PER_CPU(unsigned int, perf_cpuid);
56
57static DEFINE_PER_CPU(struct delayed_work, cpu_pmu_dwork);
58static DEFINE_PER_CPU(struct delayed_work *, perf_delayed_work_setup);
59
60static inline int reset_driver_stat(int counters)
61{
62 int i;
63
64 nr_arg = 0;
65 counter_cnt = 0;
66 met_cpupmu.mode = 0;
67 for (i = 0; i < counters; i++) {
68 pmu[i].mode = MODE_DISABLED;
69 pmu[i].event = 0;
70 pmu[i].freq = 0;
71 }
72
73 return 0;
74}
75
76static inline struct met_pmu *lookup_pmu(struct kobject *kobj)
77{
78 int i;
79
80 for (i = 0; i < nr_counters; i++) {
81 if (pmu[i].kobj_cpu_pmu == kobj)
82 return &pmu[i];
83 }
84 return NULL;
85}
86
87static ssize_t count_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
88{
89 return snprintf(buf, PAGE_SIZE, "%d\n", nr_counters - 1);
90}
91
92static ssize_t count_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t n)
93{
94 return -EINVAL;
95}
96
97static ssize_t event_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
98{
99 struct met_pmu *p = lookup_pmu(kobj);
100
101 if (p != NULL)
102 return snprintf(buf, PAGE_SIZE, "0x%hx\n", p->event);
103
104 return -EINVAL;
105}
106
107static ssize_t event_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t n)
108{
109 struct met_pmu *p = lookup_pmu(kobj);
110 unsigned short event;
111
112 if (p != NULL) {
113 if (sscanf(buf, "0x%hx", &event) != 1)
114 return -EINVAL;
115
116 if (p == &(pmu[nr_counters - 1])) { /* cycle counter */
117 if (event != 0xff)
118 return -EINVAL;
119 } else {
120 if (cpu_pmu->check_event(pmu, nr_arg, event) < 0)
121 return -EINVAL;
122 }
123
124 p->event = event;
125 return n;
126 }
127 return -EINVAL;
128}
129
130static ssize_t mode_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
131{
132 struct met_pmu *p = lookup_pmu(kobj);
133
134 if (p != NULL) {
135 switch (p->mode) {
136 case 0:
137 return snprintf(buf, PAGE_SIZE, "%hhd (disabled)\n", p->mode);
138 case 1:
139 return snprintf(buf, PAGE_SIZE, "%hhd (interrupt)\n", p->mode);
140 case 2:
141 return snprintf(buf, PAGE_SIZE, "%hhd (polling)\n", p->mode);
142 }
143 }
144 return -EINVAL;
145}
146
147static ssize_t mode_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t n)
148{
149 unsigned int mode;
150 struct met_pmu *p = lookup_pmu(kobj);
151
152 if (p != NULL) {
153 if (kstrtouint(buf, 0, &mode) != 0)
154 return -EINVAL;
155
156 if (mode <= 2) {
157 p->mode = (unsigned char)mode;
158 if (mode > 0)
159 met_cpupmu.mode = 1;
160 return n;
161 }
162 }
163 return -EINVAL;
164}
165
166static ssize_t freq_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
167{
168 struct met_pmu *p = lookup_pmu(kobj);
169
170 if (p != NULL)
171 return snprintf(buf, PAGE_SIZE, "%ld\n", p->freq);
172
173 return -EINVAL;
174}
175
176static ssize_t freq_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t n)
177{
178 struct met_pmu *p = lookup_pmu(kobj);
179
180 if (p != NULL) {
181 if (kstrtoul(buf, 0, &(p->freq)) != 0)
182 return -EINVAL;
183
184 return n;
185 }
186 return -EINVAL;
187}
188
189static struct kobj_attribute count_attr = __ATTR(count, 0664, count_show, count_store);
190static struct kobj_attribute event_attr = __ATTR(event, 0664, event_show, event_store);
191static struct kobj_attribute mode_attr = __ATTR(mode, 0664, mode_show, mode_store);
192static struct kobj_attribute freq_attr = __ATTR(freq, 0664, freq_show, freq_store);
193
194static int cpupmu_create_subfs(struct kobject *parent)
195{
196 int ret = 0;
197 int i;
198 char buf[16];
199
200 cpu_pmu = cpu_pmu_hw_init();
201 if (cpu_pmu == NULL) {
202 PR_BOOTMSG("Failed to init CPU PMU HW!!\n");
203 return -ENODEV;
204 }
205 nr_counters = cpu_pmu->nr_cnt;
206
207 pmu = kmalloc_array(nr_counters, sizeof(struct met_pmu), GFP_KERNEL);
208 if (pmu == NULL)
209 return -ENOMEM;
210
211 memset(pmu, 0, sizeof(struct met_pmu) * nr_counters);
212 cpu_pmu->pmu = pmu;
213 kobj_cpu = parent;
214
215 ret = sysfs_create_file(kobj_cpu, &count_attr.attr);
216 if (ret != 0) {
217 PR_BOOTMSG("Failed to create count in sysfs\n");
218 goto out;
219 }
220
221 for (i = 0; i < nr_counters; i++) {
222 snprintf(buf, sizeof(buf), "%d", i);
223 pmu[i].kobj_cpu_pmu = kobject_create_and_add(buf, kobj_cpu);
224
225 ret = sysfs_create_file(pmu[i].kobj_cpu_pmu, &event_attr.attr);
226 if (ret != 0) {
227 PR_BOOTMSG("Failed to create event in sysfs\n");
228 goto out;
229 }
230
231 ret = sysfs_create_file(pmu[i].kobj_cpu_pmu, &mode_attr.attr);
232 if (ret != 0) {
233 PR_BOOTMSG("Failed to create mode in sysfs\n");
234 goto out;
235 }
236
237 ret = sysfs_create_file(pmu[i].kobj_cpu_pmu, &freq_attr.attr);
238 if (ret != 0) {
239 PR_BOOTMSG("Failed to create freq in sysfs\n");
240 goto out;
241 }
242 }
243
244 out:
245 if (ret != 0) {
246 if (pmu != NULL) {
247 kfree(pmu);
248 pmu = NULL;
249 }
250 }
251 return ret;
252}
253
254static void cpupmu_delete_subfs(void)
255{
256 int i;
257
258 if (kobj_cpu != NULL) {
259 for (i = 0; i < nr_counters; i++) {
260 sysfs_remove_file(pmu[i].kobj_cpu_pmu, &event_attr.attr);
261 sysfs_remove_file(pmu[i].kobj_cpu_pmu, &mode_attr.attr);
262 sysfs_remove_file(pmu[i].kobj_cpu_pmu, &freq_attr.attr);
263 kobject_del(pmu[i].kobj_cpu_pmu);
264 kobject_put(pmu[i].kobj_cpu_pmu);
265 pmu[i].kobj_cpu_pmu = NULL;
266 }
267 sysfs_remove_file(kobj_cpu, &count_attr.attr);
268 kobj_cpu = NULL;
269 }
270
271 if (pmu != NULL) {
272 kfree(pmu);
273 pmu = NULL;
274 }
275
276 cpu_pmu = NULL;
277}
278
279noinline void mp_cpu(unsigned char cnt, unsigned int *value)
280{
281 MET_GENERAL_PRINT(MET_TRACE, cnt, value);
282}
283
284static void dummy_handler(struct perf_event *event, struct perf_sample_data *data,
285 struct pt_regs *regs)
286{
287/* Required as perf_event_create_kernel_counter() requires an overflow handler, even though all we do is poll */
288}
289
290void perf_cpupmu_polling(unsigned long long stamp, int cpu)
291{
292 int i, count;
293 long long int delta;
294 struct perf_event *ev;
295 unsigned int pmu_value[MXNR_CPU];
296
297 MET_TRACE("counter_cnt = %d\n", counter_cnt);
298 if (per_cpu(perfSet, cpu) == 0)
299 return;
300
301 memset(pmu_value, 0, sizeof(pmu_value));
302 count = 0;
303 for (i = 0; i < nr_counters; i++) {
304 if (pmu[i].mode == 0)
305 continue;
306
307 ev = per_cpu(pevent, cpu)[i];
308 if ((ev != NULL) && (ev->state == PERF_EVENT_STATE_ACTIVE)) {
309 per_cpu(perfCurr, cpu)[i] = met_perf_event_read_local_symbol(ev);
310 delta = (long long int)(per_cpu(perfCurr, cpu)[i] - per_cpu(perfPrev, cpu)[i]);
311 if (delta < 0)
312 delta *= (-1);
313 per_cpu(perfPrev, cpu)[i] = per_cpu(perfCurr, cpu)[i];
314 if (per_cpu(perfCntFirst, cpu)[i] == 1) {
315 /* we shall omit delta counter when we get first counter */
316 per_cpu(perfCntFirst, cpu)[i] = 0;
317 continue;
318 }
319 pmu_value[i] = (unsigned int) delta;
320 count++;
321 }
322 }
323
324 MET_TRACE("count = %d, counter_cnt = %d\n", count, counter_cnt);
325
326 if (count == counter_cnt)
327 mp_cpu(count, pmu_value);
328}
329
330static int perf_thread_set_perf_events(unsigned int cpu)
331{
332 int i, size;
333 struct perf_event *ev;
334
335 size = sizeof(struct perf_event_attr);
336 if (per_cpu(perfSet, cpu) == 0) {
337 for (i = 0; i < nr_counters; i++) {
338 per_cpu(pevent, cpu)[i] = NULL;
339 if (!pmu[i].mode) /* Skip disabled counters */
340 continue;
341
342 per_cpu(perfPrev, cpu)[i] = 0;
343 per_cpu(perfCurr, cpu)[i] = 0;
344 memset(&per_cpu(pevent_attr, cpu)[i], 0, size);
345 per_cpu(pevent_attr, cpu)[i].config = pmu[i].event;
346 per_cpu(pevent_attr, cpu)[i].type = PERF_TYPE_RAW;
347 per_cpu(pevent_attr, cpu)[i].size = size;
348 per_cpu(pevent_attr, cpu)[i].sample_period = 0;
349 per_cpu(pevent_attr, cpu)[i].pinned = 1;
350 if (pmu[i].event == 0xff) {
351 per_cpu(pevent_attr, cpu)[i].type = PERF_TYPE_HARDWARE;
352 per_cpu(pevent_attr, cpu)[i].config = PERF_COUNT_HW_CPU_CYCLES;
353 }
354
355 per_cpu(pevent, cpu)[i] =
356 perf_event_create_kernel_counter(&per_cpu(pevent_attr, cpu)[i], cpu, NULL,
357 dummy_handler, NULL);
358 if (IS_ERR(per_cpu(pevent, cpu)[i])) {
359 per_cpu(pevent, cpu)[i] = NULL;
360 PR_BOOTMSG("CPU=%d, %s:%d\n", cpu, __FUNCTION__, __LINE__);
361 continue;
362 }
363
364 if (per_cpu(pevent, cpu)[i]->state != PERF_EVENT_STATE_ACTIVE) {
365 perf_event_release_kernel(per_cpu(pevent, cpu)[i]);
366 per_cpu(pevent, cpu)[i] = NULL;
367 PR_BOOTMSG("CPU=%d, %s:%d\n", cpu, __FUNCTION__, __LINE__);
368 continue;
369 }
370
371 ev = per_cpu(pevent, cpu)[i];
372 if (ev != NULL)
373 perf_event_enable(ev);
374 per_cpu(perfCntFirst, cpu)[i] = 1;
375 } /* for all PMU counter */
376 per_cpu(perfSet, cpu) = 1;
377 } /* for perfSet */
378 return 0;
379}
380
381
382static void perf_thread_setup(struct work_struct *work)
383{
384 unsigned int cpu;
385 struct delayed_work *dwork = to_delayed_work(work);
386
387 cpu = dwork->cpu;
388 if (per_cpu(perf_task_init_done, cpu) == 0) {
389 per_cpu(perf_task_init_done, cpu) = 1;
390 perf_thread_set_perf_events(cpu);
391 }
392
393 return;
394}
395
396void met_perf_cpupmu_online(unsigned int cpu)
397{
398 if (met_cpupmu.mode == 0)
399 return;
400
401 per_cpu(perf_cpuid, cpu) = cpu;
402 if (per_cpu(perf_delayed_work_setup, cpu) == NULL) {
403 struct delayed_work *dwork;
404
405 dwork = &per_cpu(cpu_pmu_dwork, cpu);
406 dwork->cpu = cpu;
407 INIT_DELAYED_WORK(dwork, perf_thread_setup);
408 schedule_delayed_work(dwork, 0);
409 per_cpu(perf_delayed_work_setup, cpu) = dwork;
410 }
411}
412
413
414void met_perf_cpupmu_down(void *data)
415{
416 unsigned int cpu;
417 unsigned int i;
418 struct perf_event *ev;
419
420 cpu = *((unsigned int *)data);
421 if (met_cpupmu.mode == 0)
422 return;
423 if (per_cpu(perfSet, cpu) == 0)
424 return;
425 per_cpu(perfSet, cpu) = 0;
426 for (i = 0; i < nr_counters; i++) {
427 if (!pmu[i].mode)
428 continue;
429 ev = per_cpu(pevent, cpu)[i];
430 if ((ev != NULL) && (ev->state == PERF_EVENT_STATE_ACTIVE)) {
431 perf_event_disable(ev);
432 perf_event_release_kernel(ev);
433 }
434 }
435 per_cpu(perf_task_init_done, cpu) = 0;
436 per_cpu(perf_delayed_work_setup, cpu) = NULL;
437}
438
439void met_perf_cpupmu_stop(void)
440{
441 unsigned int cpu;
442
443 for_each_online_cpu(cpu) {
444 per_cpu(perf_cpuid, cpu) = cpu;
445 met_perf_cpupmu_down((void *)&per_cpu(perf_cpuid, cpu));
446 }
447}
448
449void met_perf_cpupmu_start(void)
450{
451 unsigned int cpu;
452
453 for_each_online_cpu(cpu) {
454 met_perf_cpupmu_online(cpu);
455 }
456}
457
458void cpupmu_polling(unsigned long long stamp, int cpu)
459{
460 int count;
461 unsigned int pmu_value[MXNR_CPU];
462
463 if (met_cpu_pmu_method == 0) {
464 count = cpu_pmu->polling(pmu, nr_counters, pmu_value);
465 mp_cpu(count, pmu_value);
466 } else {
467 perf_cpupmu_polling(stamp, cpu);
468 }
469}
470
471static void cpupmu_start(void)
472{
473 if (met_cpu_pmu_method == 0) {
474 nr_arg = 0;
475 cpu_pmu->start(pmu, nr_counters);
476 }
477}
478
479static void cpupmu_stop(void)
480{
481 if (met_cpu_pmu_method == 0)
482 cpu_pmu->stop(nr_counters);
483}
484
485static const char cache_line_header[] =
486 "met-info [000] 0.0: met_cpu_cache_line_size: %d\n";
487static const char header_n[] =
488 "# mp_cpu: pmu_value1, ...\n"
489 "met-info [000] 0.0: met_cpu_header: 0x%x:%s";
490static const char header[] =
491 "# mp_cpu: pmu_value1, ...\n"
492 "met-info [000] 0.0: met_cpu_header: 0x%x";
493
494static const char help[] =
495 " --pmu-cpu-evt=EVENT select CPU-PMU events. in %s,\n"
496 " you can enable at most \"%d general purpose events\"\n"
497 " plus \"one special 0xff (CPU_CYCLE) event\"\n";
498
499static int cpupmu_print_help(char *buf, int len)
500{
501 return snprintf(buf, PAGE_SIZE, help, cpu_pmu->cpu_name, nr_counters - 1);
502}
503
504static int cpupmu_print_header(char *buf, int len)
505{
506 int i, ret, first;
507 char name[32];
508
509 first = 1;
510 ret = 0;
511
512 /*append CPU PMU access method*/
513 if (met_cpu_pmu_method == 0)
514 ret += snprintf(buf + ret, PAGE_SIZE,
515 "met-info [000] 0.0: CPU_PMU_method: PMU registers\n");
516 else
517 ret += snprintf(buf + ret, PAGE_SIZE,
518 "met-info [000] 0.0: CPU_PMU_method: perf APIs\n");
519
520 /*append cache line size*/
521 ret += snprintf(buf + ret, PAGE_SIZE - ret, cache_line_header, cache_line_size());
522
523 for (i = 0; i < nr_counters; i++) {
524 if (pmu[i].mode == 0)
525 continue;
526 if (cpu_pmu->get_event_desc && 0 == cpu_pmu->get_event_desc(i, pmu[i].event, name)) {
527 if (first) {
528 ret += snprintf(buf + ret, PAGE_SIZE - ret, header_n, pmu[i].event, name);
529 first = 0;
530 } else {
531 ret += snprintf(buf + ret, PAGE_SIZE - ret, ",0x%x:%s", pmu[i].event, name);
532 }
533 } else {
534 if (first) {
535 ret += snprintf(buf + ret, PAGE_SIZE - ret, header, pmu[i].event);
536 first = 0;
537 } else {
538 ret += snprintf(buf + ret, PAGE_SIZE - ret, ",0x%x", pmu[i].event);
539 }
540 }
541 pmu[i].mode = 0;
542
543 }
544
545 ret += snprintf(buf + ret, PAGE_SIZE - ret, "\n");
546 met_cpupmu.mode = 0;
547 reset_driver_stat(nr_counters);
548 nr_arg = 0;
549 return ret;
550}
551
552/*
553 * "met-cmd --start --pmu_cpu_evt=0x3"
554 */
555static int cpupmu_process_argument(const char *arg, int len)
556{
557 unsigned int value;
558
559 if (met_cpu_pmu_method == 0)
560 nr_counters = cpu_pmu->nr_cnt;
561 else
562 nr_counters = perf_num_counters();
563
564 if (nr_counters == 0)
565 goto arg_out;
566
567 if (met_parse_num(arg, &value, len) < 0)
568 goto arg_out;
569
570 if (cpu_pmu->check_event(pmu, nr_arg, value) < 0)
571 goto arg_out;
572
573 if (value == 0xff) {
574 if (met_cpu_pmu_method == 0) {
575 pmu[nr_counters - 1].mode = MODE_POLLING;
576 pmu[nr_counters - 1].event = 0xff;
577 pmu[nr_counters - 1].freq = 0;
578 } else {
579 if (nr_arg > (nr_counters - 1))
580 goto arg_out;
581
582 pmu[nr_arg].mode = MODE_POLLING;
583 pmu[nr_arg].event = value;
584 pmu[nr_arg].freq = 0;
585 nr_arg++;
586 }
587 } else {
588
589 if (nr_arg >= (nr_counters - 1))
590 goto arg_out;
591
592 pmu[nr_arg].mode = MODE_POLLING;
593 pmu[nr_arg].event = value;
594 pmu[nr_arg].freq = 0;
595 nr_arg++;
596 }
597 counter_cnt++;
598
599 met_cpupmu.mode = 1;
600 return 0;
601
602arg_out:
603 reset_driver_stat(nr_counters);
604 return -EINVAL;
605}
606
607struct metdevice met_cpupmu = {
608 .name = "cpu",
609 .type = MET_TYPE_PMU,
610 .cpu_related = 1,
611 .create_subfs = cpupmu_create_subfs,
612 .delete_subfs = cpupmu_delete_subfs,
613 .start = cpupmu_start,
614 .stop = cpupmu_stop,
615 .polling_interval = 1,
616 .timed_polling = cpupmu_polling,
617 .print_help = cpupmu_print_help,
618 .print_header = cpupmu_print_header,
619 .process_argument = cpupmu_process_argument
620};