blob: e47ef196d0e82f3f6abbaf3f38dfdfd68988a48e [file] [log] [blame]
rjw1f884582022-01-06 17:20:42 +08001/*
2 * Copyright (C) 2018 MediaTek Inc.
3 *
4 * This program is free software: you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#include <linux/sched/clock.h>
15#include <linux/kernel.h>
16#include <linux/cpuhotplug.h>
17#include <linux/cpu.h>
18#include <linux/sched.h>
19#include <linux/notifier.h>
20#include <linux/module.h>
21#include <linux/irq.h>
22#if 0 /* fix me later, no such file on current tree */
23#include <mach/mt_cpuxgpt.h>
24#endif
25#include <asm/arch_timer.h>
26
27#define MET_USER_EVENT_SUPPORT
28#include "interface.h"
29#include "sampler.h"
30#include "met_struct.h"
31#include "util.h"
32#include "switch.h"
33#include "trace.h"
34#include "met_drv.h"
35#include "met_tag.h" /* for tracing_mark_write */
36
37#include "cpu_pmu.h" /* for using kernel perf PMU driver */
38
39#include "met_kernel_symbol.h"
40
41#undef DEBUG_CPU_NOTIFY
42/* #define DEBUG_CPU_NOTIFY */
43#if defined(DEBUG_CPU_NOTIFY)
44#ifdef CONFIG_MET_MODULE
45#define dbg_met_tag_oneshot met_tag_oneshot_real
46#else
47#define dbg_met_tag_oneshot met_tag_oneshot
48#endif /* CONFIG_MET_MODULE */
49#else
50#define dbg_met_tag_oneshot(class_id, name, value) ({ 0; })
51#endif
52
53static int start;
54static unsigned int online_cpu_map;
55static int curr_polling_cpu;
56static int cpu_related_cnt;
57
58static int preferred_cpu_list[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9};
59
60static int calc_preferred_polling_cpu(unsigned int cpu_map)
61{
62 int i;
63
64 for (i = 0; i < ARRAY_SIZE(preferred_cpu_list); i++) {
65 if (cpu_map & (1 << preferred_cpu_list[i]))
66 return preferred_cpu_list[i];
67 }
68
69 return -1;
70}
71
72static void wq_sync_buffer(struct work_struct *work)
73{
74 int cpu;
75 struct delayed_work *dw = container_of(work, struct delayed_work, work);
76 struct met_cpu_struct *met_cpu_ptr = container_of(dw, struct met_cpu_struct, dwork);
77
78 cpu = smp_processor_id();
79 if (met_cpu_ptr->cpu != cpu) {
80 /* panic("ERROR"); */
81 return;
82 }
83
84 /* sync_samples(cpu); */
85 /* don't re-add the work if we're shutting down */
86 if (met_cpu_ptr->work_enabled)
87 schedule_delayed_work(dw, DEFAULT_TIMER_EXPIRE);
88}
89
90static enum hrtimer_restart met_hrtimer_notify(struct hrtimer *hrtimer)
91{
92 int cpu;
93 int *count;
94 unsigned long long stamp;
95 struct met_cpu_struct *met_cpu_ptr = container_of(hrtimer, struct met_cpu_struct, hrtimer);
96 struct metdevice *c;
97#if defined(DEBUG_CPU_NOTIFY)
98 char msg[32];
99#endif
100
101 cpu = smp_processor_id();
102#if defined(DEBUG_CPU_NOTIFY)
103 {
104 char msg[32];
105
106 snprintf(msg, sizeof(msg), "met_hrtimer notify_%d", cpu);
107 dbg_met_tag_oneshot(0, msg, 1);
108 }
109#endif
110
111 if (met_cpu_ptr->cpu != cpu) {
112 /* panic("ERROR2"); */
113 dbg_met_tag_oneshot(0, msg, -3);
114 return HRTIMER_NORESTART;
115 }
116
117 list_for_each_entry(c, &met_list, list) {
118 if (c->ondiemet_mode == 0) {
119 if ((c->mode == 0) || (c->timed_polling == NULL))
120 continue;
121 } else if (c->ondiemet_mode == 1) {
122 if ((c->mode == 0) || (c->ondiemet_timed_polling == NULL))
123 continue;
124 } else if (c->ondiemet_mode == 2) {
125 if ((c->mode == 0) || ((c->timed_polling == NULL)
126 && (c->ondiemet_timed_polling == NULL)))
127 continue;
128 }
129
130 count = per_cpu_ptr(c->polling_count, cpu);
131 if ((*count) > 0) {
132 (*count)--;
133 continue;
134 }
135
136 *(count) = c->polling_count_reload;
137
138 stamp = cpu_clock(cpu);
139
140 if (c->cpu_related == 0) {
141 if (cpu == curr_polling_cpu) {
142 if (c->ondiemet_mode == 0) {
143 c->timed_polling(stamp, 0);
144 } else if (c->ondiemet_mode == 1) {
145 c->ondiemet_timed_polling(stamp, 0);
146 } else if (c->ondiemet_mode == 2) {
147 if (c->timed_polling)
148 c->timed_polling(stamp, 0);
149 if (c->ondiemet_timed_polling)
150 c->ondiemet_timed_polling(stamp, 0);
151 }
152 }
153 } else {
154 if (c->ondiemet_mode == 0) {
155 c->timed_polling(stamp, cpu);
156 } else if (c->ondiemet_mode == 1) {
157 c->ondiemet_timed_polling(stamp, cpu);
158 } else if (c->ondiemet_mode == 2) {
159 if (c->timed_polling)
160 c->timed_polling(stamp, 0);
161 if (c->ondiemet_timed_polling)
162 c->ondiemet_timed_polling(stamp, 0);
163 }
164 }
165 }
166
167 if (met_cpu_ptr->hrtimer_online_check) {
168 online_cpu_map |= (1 << cpu);
169 met_cpu_ptr->hrtimer_online_check = 0;
170 dbg_met_tag_oneshot(0, "met_online check done", cpu);
171 if (calc_preferred_polling_cpu(online_cpu_map) == cpu) {
172 curr_polling_cpu = cpu;
173 dbg_met_tag_oneshot(0, "met_curr polling cpu", cpu);
174 }
175 }
176
177 if (met_cpu_ptr->work_enabled) {
178 hrtimer_forward_now(hrtimer, ns_to_ktime(DEFAULT_HRTIMER_EXPIRE));
179 dbg_met_tag_oneshot(0, msg, 0);
180 return HRTIMER_RESTART;
181 }
182 dbg_met_tag_oneshot(0, msg, 0);
183 return HRTIMER_NORESTART;
184}
185
186static void __met_hrtimer_start(void *unused)
187{
188 struct met_cpu_struct *met_cpu_ptr = NULL;
189 struct hrtimer *hrtimer = NULL;
190 /* struct delayed_work *dw; */
191 struct metdevice *c;
192
193 met_cpu_ptr = this_cpu_ptr(&met_cpu);
194#if defined(DEBUG_CPU_NOTIFY)
195 {
196 char msg[32];
197
198 snprintf(msg, sizeof(msg), "met_hrtimer status_%d", met_cpu_ptr->cpu);
199 dbg_met_tag_oneshot(0, msg, 1);
200 }
201#endif
202 /*
203 * do not open HRtimer when EVENT timer enable
204 */
205 if (!(met_switch.mode & MT_SWITCH_EVENT_TIMER)) {
206 hrtimer = &met_cpu_ptr->hrtimer;
207 /* dw = &met_cpu_ptr->dwork; */
208
209 hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
210 hrtimer->function = met_hrtimer_notify;
211 }
212
213 list_for_each_entry(c, &met_list, list) {
214 *(this_cpu_ptr(c->polling_count)) = 0;
215 if (c->ondiemet_mode == 0) {
216 if ((c->cpu_related) && (c->mode) && (c->start))
217 c->start();
218 } else if (c->ondiemet_mode == 1) {
219 if (((c->cpu_related)) && (c->mode) && (c->ondiemet_start))
220 c->ondiemet_start();
221 } else if (c->ondiemet_mode == 2) {
222 if ((c->cpu_related) && (c->mode) && (c->start))
223 c->start();
224 if (((c->cpu_related)) && (c->mode) && (c->ondiemet_start))
225 c->ondiemet_start();
226 }
227 }
228 /*
229 * do not open HRtimer when EVENT timer enable
230 */
231 if (!(met_switch.mode & MT_SWITCH_EVENT_TIMER)) {
232 if (DEFAULT_HRTIMER_EXPIRE) {
233 met_cpu_ptr->work_enabled = 1;
234 /* schedule_delayed_work_on(smp_processor_id(), dw, DEFAULT_TIMER_EXPIRE); */
235 hrtimer_start(hrtimer, ns_to_ktime(DEFAULT_HRTIMER_EXPIRE),
236 HRTIMER_MODE_REL_PINNED);
237 }
238 }
239}
240
241static void __met_hrtimer_stop(void *unused)
242{
243 struct met_cpu_struct *met_cpu_ptr;
244 struct hrtimer *hrtimer;
245 /* struct delayed_work *dw; */
246 struct metdevice *c;
247
248 met_cpu_ptr = this_cpu_ptr(&met_cpu);
249#if defined(DEBUG_CPU_NOTIFY)
250 {
251 char msg[32];
252
253 snprintf(msg, sizeof(msg), "met_hrtimer status_%d", met_cpu_ptr->cpu);
254 dbg_met_tag_oneshot(0, msg, 0);
255 }
256#endif
257 /*
258 * do not open HRtimer when EVENT timer enable
259 */
260 if (!(met_switch.mode & MT_SWITCH_EVENT_TIMER)) {
261 hrtimer = &met_cpu_ptr->hrtimer;
262 /* dw = &met_cpu_ptr->dwork; */
263
264 met_cpu_ptr->work_enabled = 0;
265 hrtimer_cancel(hrtimer);
266
267 /* cancel_delayed_work_sync(dw); */
268 }
269 list_for_each_entry(c, &met_list, list) {
270 if (c->ondiemet_mode == 0) {
271 if ((c->cpu_related) && (c->mode) && (c->stop))
272 c->stop();
273 } else if (c->ondiemet_mode == 1) {
274 if ((c->cpu_related) && (c->mode) && (c->ondiemet_stop))
275 c->ondiemet_stop();
276 } else if (c->ondiemet_mode == 2) {
277 if ((c->cpu_related) && (c->mode) && (c->stop))
278 c->stop();
279 if ((c->cpu_related) && (c->mode) && (c->ondiemet_stop))
280 c->ondiemet_stop();
281 }
282 *(this_cpu_ptr(c->polling_count)) = 0;
283 }
284}
285
286static int met_pmu_cpu_notify(enum met_action action, unsigned int cpu)
287{
288 struct met_cpu_struct *met_cpu_ptr;
289 struct delayed_work *dw;
290 int preferred_polling_cpu;
291 struct metdevice *c;
292
293 if (start == 0)
294 return NOTIFY_OK;
295
296#if defined(DEBUG_CPU_NOTIFY)
297 {
298 char msg[32];
299
300 snprintf(msg, sizeof(msg), "met_cpu notify_%d", cpu);
301 dbg_met_tag_oneshot(0, msg, action);
302 }
303#elif defined(PR_CPU_NOTIFY)
304 {
305 char msg[32];
306
307 if (met_cpu_notify) {
308 snprintf(msg, sizeof(msg), "met_cpu notify_%d", cpu);
309 dbg_met_tag_oneshot(0, msg, action);
310 }
311 }
312#endif
313
314 if (cpu < 0 || cpu >= NR_CPUS)
315 return NOTIFY_OK;
316
317 switch (action) {
318 case MET_CPU_ONLINE:
319 met_cpu_ptr = &per_cpu(met_cpu, cpu);
320 met_cpu_ptr->hrtimer_online_check = 1;
321 dbg_met_tag_oneshot(0, "met_online check", cpu);
322
323 if (cpu_related_cnt == 0) {
324 /*pr_info("%s, %d: curr_polling_cpu is alive = %d\n",
325 * __func__, __LINE__, online_cpu_map & (1 << curr_polling_cpu));
326 */
327
328 online_cpu_map |= (1 << cpu);
329
330 /* check curr_polling_cpu is alive, if it is down,
331 * start current cpu hrtimer, and change it to be currr_pollling_cpu
332 */
333 if ((online_cpu_map & (1 << curr_polling_cpu)) == 0) {
334 met_smp_call_function_single_symbol(cpu, __met_hrtimer_start, NULL, 1);
335 curr_polling_cpu = cpu;
336 }
337 } else
338 met_smp_call_function_single_symbol(cpu, __met_hrtimer_start, NULL, 1);
339
340#ifdef CONFIG_CPU_FREQ
341 force_power_log(cpu);
342#endif
343 list_for_each_entry(c, &met_list, list) {
344 if (c->cpu_state_notify)
345 c->cpu_state_notify(cpu, action);
346 }
347 break;
348
349 case MET_CPU_OFFLINE:
350 list_for_each_entry(c, &met_list, list) {
351 if (c->cpu_state_notify)
352 c->cpu_state_notify(cpu, action);
353 }
354
355 online_cpu_map &= ~(1 << cpu);
356 dbg_met_tag_oneshot(0, "met_offline cpu", cpu);
357 if (cpu == curr_polling_cpu) {
358 /* pr_info("%s, %d: curr_polling_cpu %d is down\n",
359 * __func__, __LINE__, curr_polling_cpu);
360 */
361 preferred_polling_cpu = calc_preferred_polling_cpu(online_cpu_map);
362 /* pr_info("%s, %d: preferred_polling_cpu = %d\n",
363 * __func__, __LINE__, preferred_polling_cpu);
364 */
365 if (preferred_polling_cpu != -1) {
366 curr_polling_cpu = preferred_polling_cpu;
367 dbg_met_tag_oneshot(0, "met_curr polling cpu", curr_polling_cpu);
368
369 if (cpu_related_cnt == 0)
370 /* pr_info("%s, %d: start cpu %d hrtimer start\n",
371 * __func__, __LINE__, curr_polling_cpu);
372 */
373 met_smp_call_function_single_symbol(curr_polling_cpu, __met_hrtimer_start, NULL, 1);
374 }
375 }
376
377 met_smp_call_function_single_symbol(cpu, __met_hrtimer_stop, NULL, 1);
378
379 met_cpu_ptr = &per_cpu(met_cpu, cpu);
380 dw = &met_cpu_ptr->dwork;
381 cancel_delayed_work_sync(dw);
382
383 /* sync_samples(cpu); */
384 break;
385 default:
386 list_for_each_entry(c, &met_list, list) {
387 if (c->cpu_state_notify)
388 c->cpu_state_notify(cpu, action);
389 }
390 }
391
392 return NOTIFY_OK;
393}
394
395static int _met_pmu_cpu_notify_online(unsigned int cpu)
396{
397 met_pmu_cpu_notify(MET_CPU_ONLINE, cpu);
398
399 return 0;
400}
401
402static int _met_pmu_cpu_notify_offline(unsigned int cpu)
403{
404 met_pmu_cpu_notify(MET_CPU_OFFLINE, cpu);
405
406 return 0;
407}
408
409int sampler_start(void)
410{
411 int ret, cpu;
412 struct met_cpu_struct *met_cpu_ptr;
413 struct metdevice *c;
414 int preferred_polling_cpu;
415
416 met_set_suspend_notify(0);
417
418#ifdef CONFIG_CPU_FREQ
419 force_power_log(POWER_LOG_ALL);
420#endif
421
422 for_each_possible_cpu(cpu) {
423 met_cpu_ptr = &per_cpu(met_cpu, cpu);
424 met_cpu_ptr->work_enabled = 0;
425 met_cpu_ptr->hrtimer_online_check = 0;
426 hrtimer_init(&met_cpu_ptr->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
427 met_cpu_ptr->hrtimer.function = met_hrtimer_notify;
428 INIT_DELAYED_WORK(&met_cpu_ptr->dwork, wq_sync_buffer);
429 }
430
431 start = 0;
432 ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
433 "met:online",
434 _met_pmu_cpu_notify_online,
435 _met_pmu_cpu_notify_offline);
436
437 list_for_each_entry(c, &met_list, list) {
438
439 if (try_module_get(c->owner) == 0)
440 continue;
441
442 if ((c->mode) && (c->cpu_related == 1))
443 cpu_related_cnt = 1;
444
445 if (c->ondiemet_mode == 0) {
446 if ((!(c->cpu_related)) && (c->mode) && (c->start))
447 c->start();
448 else if ((c->cpu_related) && (c->mode) && (c->uniq_start))
449 c->uniq_start();
450 } else if (c->ondiemet_mode == 1) {
451 if ((!(c->cpu_related)) && (c->mode) && (c->ondiemet_start))
452 c->ondiemet_start();
453 } else if (c->ondiemet_mode == 2) {
454 if ((!(c->cpu_related)) && (c->mode) && (c->start))
455 c->start();
456 else if ((c->cpu_related) && (c->mode) && (c->uniq_start))
457 c->uniq_start();
458
459 if ((!(c->cpu_related)) && (c->mode) && (c->ondiemet_start))
460 c->ondiemet_start();
461 }
462 }
463
464 get_online_cpus();
465 online_cpu_map = 0;
466 for_each_online_cpu(cpu) {
467 online_cpu_map |= (1 << cpu);
468 }
469 dbg_met_tag_oneshot(0, "met_online cpu map", online_cpu_map);
470
471 preferred_polling_cpu = calc_preferred_polling_cpu(online_cpu_map);
472 if (preferred_polling_cpu != -1)
473 curr_polling_cpu = preferred_polling_cpu;
474 dbg_met_tag_oneshot(0, "met_curr polling cpu", curr_polling_cpu);
475 start = 1;
476
477 if (cpu_related_cnt == 0)
478 met_smp_call_function_single_symbol(curr_polling_cpu, __met_hrtimer_start, NULL, 1);
479 else {
480 //on_each_cpu(__met_hrtimer_start, NULL, 1);
481 for_each_online_cpu(cpu) {
482 met_smp_call_function_single_symbol(cpu, __met_hrtimer_start, NULL, 1);
483 }
484 }
485 put_online_cpus();
486
487 return ret;
488}
489
490void sampler_stop(void)
491{
492 int cpu;
493 struct met_cpu_struct *met_cpu_ptr;
494 struct metdevice *c;
495 struct delayed_work *dw;
496
497
498 get_online_cpus();
499 //on_each_cpu(__met_hrtimer_stop, NULL, 1);
500 online_cpu_map = 0;
501 for_each_online_cpu(cpu) {
502 online_cpu_map |= (1 << cpu);
503 }
504
505 for_each_online_cpu(cpu) {
506 met_smp_call_function_single_symbol(cpu, __met_hrtimer_stop, NULL, 1);
507 }
508
509 /* for_each_online_cpu(cpu) { */
510 for_each_possible_cpu(cpu) { /* Just for case */
511 met_cpu_ptr = &per_cpu(met_cpu, cpu);
512 dw = &met_cpu_ptr->dwork;
513 cancel_delayed_work_sync(dw);
514 /* sync_samples(cpu); */
515 }
516 start = 0;
517 put_online_cpus();
518
519 cpuhp_remove_state_nocalls(CPUHP_AP_ONLINE_DYN);
520
521 list_for_each_entry(c, &met_list, list) {
522 if (c->ondiemet_mode == 0) {
523 if ((!(c->cpu_related)) && (c->mode) && (c->stop))
524 c->stop();
525 else if ((c->cpu_related) && (c->mode) && (c->uniq_stop))
526 c->uniq_stop();
527 } else if (c->ondiemet_mode == 1) {
528 if ((!(c->cpu_related)) && (c->mode) && (c->ondiemet_stop))
529 c->ondiemet_stop();
530 } else if (c->ondiemet_mode == 2) {
531 if ((!(c->cpu_related)) && (c->mode) && (c->stop))
532 c->stop();
533 else if ((c->cpu_related) && (c->mode) && (c->uniq_stop))
534 c->uniq_stop();
535
536 if ((!(c->cpu_related)) && (c->mode) && (c->ondiemet_stop))
537 c->ondiemet_stop();
538 }
539 module_put(c->owner);
540 }
541
542 cpu_related_cnt = 0;
543}
544
545#if 0 /* cann't use static now */
546enum {
547 MET_SUSPEND = 1,
548 MET_RESUME = 2,
549};
550
551static noinline void tracing_mark_write(int op)
552{
553 switch (op) {
554 case MET_SUSPEND:
555 MET_TRACE("C|0|MET_SUSPEND|1");
556 break;
557 case MET_RESUME:
558 MET_TRACE("C|0|MET_SUSPEND|0");
559 break;
560 }
561}
562#endif
563
564int met_hrtimer_suspend(void)
565{
566 struct metdevice *c;
567
568 met_set_suspend_notify(1);
569 /* tracing_mark_write(MET_SUSPEND); */
570 tracing_mark_write(TYPE_MET_SUSPEND, 0, 0, 0, 0, 0);
571 if (start == 0)
572 return 0;
573
574 list_for_each_entry(c, &met_list, list) {
575 if (c->suspend)
576 c->suspend();
577 }
578
579 /* get current COUNT */
580 MET_TRACE("TS: %llu GPT: %llX", sched_clock(), arch_counter_get_cntvct());
581 return 0;
582}
583
584void met_hrtimer_resume(void)
585{
586 struct metdevice *c;
587
588 /* get current COUNT */
589 MET_TRACE("TS: %llu GPT: %llX", sched_clock(), arch_counter_get_cntvct());
590
591 /* tracing_mark_write(MET_RESUME); */
592 tracing_mark_write(TYPE_MET_RESUME, 0, 0, 0, 0, 0);
593 if (start == 0)
594 return;
595
596 list_for_each_entry(c, &met_list, list) {
597 if (c->resume)
598 c->resume();
599 }
600}
601
602/*
603 * event timer:
604 * register IRQ, sched_switch event to monitor Polling count
605 * count can be printed at any live cpu.
606 */
607void met_event_timer_notify(void)
608{
609 unsigned long long stamp;
610 struct metdevice *c;
611 int cpu = -1;
612
613 if (start == 0)
614 return;
615
616 cpu = smp_processor_id();
617 list_for_each_entry(c, &met_list, list) {
618 stamp = local_clock();
619
620 if (c->prev_stamp == 0)
621 c->prev_stamp = stamp;
622
623 /* Critical Section Start */
624 /* try spinlock to prevent a event print twice between config time interval */
625 if (!spin_trylock(&(c->my_lock)))
626 continue;
627
628 /*
629 * DEFAULT_HRTIMER_EXPIRE (met_hrtimer_expire):
630 * sample_rate == 0 --> always print
631 * sample_rate == 1000 --> print interval larger than 1 ms
632 */
633 if (DEFAULT_HRTIMER_EXPIRE == 0 || (stamp - c->prev_stamp) < DEFAULT_HRTIMER_EXPIRE) {
634 spin_unlock(&(c->my_lock));
635 continue;
636 }
637
638 c->prev_stamp = stamp;
639 spin_unlock(&(c->my_lock));
640 /* Critical Section End */
641
642 if ((c->mode == 0) || (c->timed_polling == NULL))
643 continue;
644
645 stamp = local_clock();
646 c->timed_polling(stamp, cpu);
647 }
648}
649