blob: 057b4bcd1688eedeb23aca745184417f0e890814 [file] [log] [blame]
rjw1f884582022-01-06 17:20:42 +08001/*
2 * Copyright (C) 2018 MediaTek Inc.
3 *
4 * This program is free software: you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#include <linux/cpu.h>
15#include <linux/sched.h>
16#include <linux/notifier.h>
17#include <linux/module.h>
18#include <linux/irq.h>
19#if 0 /* fix me later, no such file on current tree */
20#include <mach/mt_cpuxgpt.h>
21#endif
22#include <asm/arch_timer.h>
23
24#define MET_USER_EVENT_SUPPORT
25#include "interface.h"
26#include "sampler.h"
27#include "met_struct.h"
28#include "util.h"
29#include "switch.h"
30#include "trace.h"
31#include "met_drv.h"
32#include "met_tag.h" /* for tracing_mark_write */
33
34#include "cpu_pmu.h" /* for using kernel perf PMU driver */
35#include "cpu_pmu_v2.h" /* for using kernel perf PMU v2 driver */
36#include "met_kernel_symbol.h"
37
38#undef DEBUG_CPU_NOTIFY
39/* #define DEBUG_CPU_NOTIFY */
40#if defined(DEBUG_CPU_NOTIFY)
41#ifdef CONFIG_MET_MODULE
42#define dbg_met_tag_oneshot met_tag_oneshot_real
43#else
44#define dbg_met_tag_oneshot met_tag_oneshot
45#endif /* CONFIG_MET_MODULE */
46#else
47#define dbg_met_tag_oneshot(class_id, name, value) ({ 0; })
48#endif
49
50static int start;
51static unsigned int online_cpu_map;
52static int curr_polling_cpu;
53static int cpu_related_cnt;
54
55static int pmu_profiling_version = 0;
56
57static DEFINE_PER_CPU(unsigned int, perf_cpuid);
58
59static int preferred_cpu_list[] = { 0, 4, 1, 2, 3, 5, 6, 7 };
60
61int get_pmu_profiling_version()
62{
63 return pmu_profiling_version;
64}
65
66static int calc_preferred_polling_cpu(unsigned int cpu_map)
67{
68 int i;
69
70 for (i = 0; i < ARRAY_SIZE(preferred_cpu_list); i++) {
71 if (cpu_map & (1 << preferred_cpu_list[i]))
72 return preferred_cpu_list[i];
73 }
74
75 return -1;
76}
77
78static void wq_sync_buffer(struct work_struct *work)
79{
80 int cpu;
81 struct delayed_work *dw = container_of(work, struct delayed_work, work);
82 struct met_cpu_struct *met_cpu_ptr = container_of(dw, struct met_cpu_struct, dwork);
83
84 cpu = smp_processor_id();
85 if (met_cpu_ptr->cpu != cpu) {
86 /* panic("ERROR"); */
87 return;
88 }
89
90 /* sync_samples(cpu); */
91 /* don't re-add the work if we're shutting down */
92 if (met_cpu_ptr->work_enabled)
93 schedule_delayed_work(dw, DEFAULT_TIMER_EXPIRE);
94}
95
96static enum hrtimer_restart met_hrtimer_notify(struct hrtimer *hrtimer)
97{
98 int cpu;
99 int *count;
100 unsigned long long stamp;
101 struct met_cpu_struct *met_cpu_ptr = container_of(hrtimer, struct met_cpu_struct, hrtimer);
102 struct metdevice *c;
103#if defined(DEBUG_CPU_NOTIFY)
104 char msg[32];
105#endif
106
107 cpu = smp_processor_id();
108#if defined(DEBUG_CPU_NOTIFY)
109 {
110 char msg[32];
111
112 snprintf(msg, sizeof(msg), "met_hrtimer notify_%d", cpu);
113 dbg_met_tag_oneshot(0, msg, 1);
114 }
115#endif
116
117 if (met_cpu_ptr->cpu != cpu) {
118 /* panic("ERROR2"); */
119 dbg_met_tag_oneshot(0, msg, -3);
120 return HRTIMER_NORESTART;
121 }
122
123 list_for_each_entry(c, &met_list, list) {
124 if (c->ondiemet_mode == 0) {
125 if ((c->mode == 0) || (c->timed_polling == NULL))
126 continue;
127 } else if (c->ondiemet_mode == 1) {
128 if ((c->mode == 0) || (c->ondiemet_timed_polling == NULL))
129 continue;
130 } else if (c->ondiemet_mode == 2) {
131 if ((c->mode == 0) || ((c->timed_polling == NULL)
132 && (c->ondiemet_timed_polling == NULL)))
133 continue;
134 }
135
136 count = per_cpu_ptr(c->polling_count, cpu);
137 if ((*count) > 0) {
138 (*count)--;
139 continue;
140 }
141
142 *(count) = c->polling_count_reload;
143
144 stamp = cpu_clock(cpu);
145
146 if (c->cpu_related == 0) {
147 if (cpu == curr_polling_cpu) {
148 if (c->ondiemet_mode == 0) {
149 c->timed_polling(stamp, 0);
150 } else if (c->ondiemet_mode == 1) {
151 c->ondiemet_timed_polling(stamp, 0);
152 } else if (c->ondiemet_mode == 2) {
153 if (c->timed_polling)
154 c->timed_polling(stamp, 0);
155 if (c->ondiemet_timed_polling)
156 c->ondiemet_timed_polling(stamp, 0);
157 }
158 }
159 } else {
160 if (c->ondiemet_mode == 0) {
161 c->timed_polling(stamp, cpu);
162 } else if (c->ondiemet_mode == 1) {
163 c->ondiemet_timed_polling(stamp, cpu);
164 } else if (c->ondiemet_mode == 2) {
165 if (c->timed_polling)
166 c->timed_polling(stamp, 0);
167 if (c->ondiemet_timed_polling)
168 c->ondiemet_timed_polling(stamp, 0);
169 }
170 }
171 }
172
173 if (met_cpu_ptr->hrtimer_online_check) {
174 online_cpu_map |= (1 << cpu);
175 met_cpu_ptr->hrtimer_online_check = 0;
176 dbg_met_tag_oneshot(0, "met_online check done", cpu);
177 if (calc_preferred_polling_cpu(online_cpu_map) == cpu) {
178 curr_polling_cpu = cpu;
179 dbg_met_tag_oneshot(0, "met_curr polling cpu", cpu);
180 }
181 }
182
183 if (met_cpu_ptr->work_enabled) {
184 hrtimer_forward_now(hrtimer, ns_to_ktime(DEFAULT_HRTIMER_EXPIRE));
185 dbg_met_tag_oneshot(0, msg, 0);
186 return HRTIMER_RESTART;
187 }
188 dbg_met_tag_oneshot(0, msg, 0);
189 return HRTIMER_NORESTART;
190}
191
192static void __met_hrtimer_start(void *unused)
193{
194 struct met_cpu_struct *met_cpu_ptr = NULL;
195 struct hrtimer *hrtimer = NULL;
196 /* struct delayed_work *dw; */
197 struct metdevice *c;
198
199 met_cpu_ptr = this_cpu_ptr(&met_cpu);
200#if defined(DEBUG_CPU_NOTIFY)
201 {
202 char msg[32];
203
204 snprintf(msg, sizeof(msg), "met_hrtimer status_%d", met_cpu_ptr->cpu);
205 dbg_met_tag_oneshot(0, msg, 1);
206 }
207#endif
208 /*
209 * do not open HRtimer when EVENT timer enable
210 */
211// if (!(met_switch.mode & MT_SWITCH_EVENT_TIMER)) {
212 hrtimer = &met_cpu_ptr->hrtimer;
213 /* dw = &met_cpu_ptr->dwork; */
214
215 hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
216 hrtimer->function = met_hrtimer_notify;
217// }
218
219 list_for_each_entry(c, &met_list, list) {
220 if (c->ondiemet_mode == 0) {
221 if ((c->cpu_related) && (c->mode) && (c->start))
222 c->start();
223 } else if (c->ondiemet_mode == 1) {
224 if (((c->cpu_related)) && (c->mode) && (c->ondiemet_start))
225 c->ondiemet_start();
226 } else if (c->ondiemet_mode == 2) {
227 if ((c->cpu_related) && (c->mode) && (c->start))
228 c->start();
229 if (((c->cpu_related)) && (c->mode) && (c->ondiemet_start))
230 c->ondiemet_start();
231 }
232 }
233 /*
234 * do not open HRtimer when EVENT timer enable
235 */
236// if (!(met_switch.mode & MT_SWITCH_EVENT_TIMER)) {
237 if (DEFAULT_HRTIMER_EXPIRE) {
238 met_cpu_ptr->work_enabled = 1;
239 /* schedule_delayed_work_on(smp_processor_id(), dw, DEFAULT_TIMER_EXPIRE); */
240 hrtimer_start(hrtimer, ns_to_ktime(DEFAULT_HRTIMER_EXPIRE),
241 HRTIMER_MODE_REL_PINNED);
242 }
243// }
244}
245
246static void __met_hrtimer_stop(void *unused)
247{
248 struct met_cpu_struct *met_cpu_ptr;
249 struct hrtimer *hrtimer;
250 /* struct delayed_work *dw; */
251 struct metdevice *c;
252
253 met_cpu_ptr = this_cpu_ptr(&met_cpu);
254#if defined(DEBUG_CPU_NOTIFY)
255 {
256 char msg[32];
257
258 snprintf(msg, sizeof(msg), "met_hrtimer status_%d", met_cpu_ptr->cpu);
259 dbg_met_tag_oneshot(0, msg, 0);
260 }
261#endif
262 /*
263 * do not open HRtimer when EVENT timer enable
264 */
265// if (!(met_switch.mode & MT_SWITCH_EVENT_TIMER)) {
266 hrtimer = &met_cpu_ptr->hrtimer;
267 /* dw = &met_cpu_ptr->dwork; */
268
269 met_cpu_ptr->work_enabled = 0;
270 hrtimer_cancel(hrtimer);
271 /* cancel_delayed_work_sync(dw); */
272// }
273 list_for_each_entry(c, &met_list, list) {
274 if (c->ondiemet_mode == 0) {
275 if ((c->cpu_related) && (c->mode) && (c->stop))
276 c->stop();
277 } else if (c->ondiemet_mode == 1) {
278 if ((c->cpu_related) && (c->mode) && (c->ondiemet_stop))
279 c->ondiemet_stop();
280 } else if (c->ondiemet_mode == 2) {
281 if ((c->cpu_related) && (c->mode) && (c->stop))
282 c->stop();
283 if ((c->cpu_related) && (c->mode) && (c->ondiemet_stop))
284 c->ondiemet_stop();
285 }
286 }
287}
288
289static int met_pmu_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
290{
291 struct met_cpu_struct *met_cpu_ptr;
292 struct delayed_work *dw;
293 long cpu = (long)hcpu;
294 int preferred_polling_cpu;
295
296 if (start == 0)
297 return NOTIFY_OK;
298
299#if defined(DEBUG_CPU_NOTIFY)
300 {
301 char msg[32];
302
303 snprintf(msg, sizeof(msg), "met_cpu notify_%ld", cpu);
304 dbg_met_tag_oneshot(0, msg, action);
305 }
306#elif defined(PR_CPU_NOTIFY)
307 {
308 char msg[32];
309
310 if (met_cpu_notify) {
311 snprintf(msg, sizeof(msg), "met_cpu notify_%ld", cpu);
312 dbg_met_tag_oneshot(0, msg, action);
313 }
314 }
315#endif
316
317 if (cpu < 0 || cpu >= ARRAY_SIZE(preferred_cpu_list))
318 return NOTIFY_OK;
319
320 switch (action) {
321 case CPU_ONLINE:
322 case CPU_ONLINE_FROZEN:
323 met_cpu_ptr = &per_cpu(met_cpu, cpu);
324 met_cpu_ptr->hrtimer_online_check = 1;
325 dbg_met_tag_oneshot(0, "met_online check", cpu);
326
327 if (cpu_related_cnt == 0) {
328 /*printk("%s, %d: curr_polling_cpu is alive = %d\n",
329 * __func__, __LINE__, online_cpu_map & (1 << curr_polling_cpu));
330 */
331
332 online_cpu_map |= (1 << cpu);
333
334 /* check curr_polling_cpu is alive, if it is down,
335 * start current cpu hrtimer, and change it to be currr_pollling_cpu
336 */
337 if ((online_cpu_map & (1 << curr_polling_cpu)) == 0) {
338 met_smp_call_function_single_symbol(cpu, __met_hrtimer_start, NULL, 1);
339 curr_polling_cpu = cpu;
340 }
341 } else
342 met_smp_call_function_single_symbol(cpu, __met_hrtimer_start, NULL, 1);
343
344 if (met_cpu_pmu_method != 0) {
345 if (pmu_profiling_version == 1)
346 met_perf_cpupmu_online(cpu);
347#ifdef MET_SUPPORT_CPUPMU_V2
348 else if (pmu_profiling_version == 2)
349 met_perf_cpupmu_online_v2(cpu);
350#endif
351 }
352
353#ifdef CONFIG_CPU_FREQ
354 force_power_log(cpu);
355#endif
356 break;
357
358 case CPU_DOWN_PREPARE:
359 case CPU_DOWN_PREPARE_FROZEN:
360 online_cpu_map &= ~(1 << cpu);
361 dbg_met_tag_oneshot(0, "met_offline cpu", cpu);
362 if (cpu == curr_polling_cpu) {
363 /* printk("%s, %d: curr_polling_cpu %d is down\n",
364 * __func__, __LINE__, curr_polling_cpu);
365 */
366 preferred_polling_cpu = calc_preferred_polling_cpu(online_cpu_map);
367 /* printk("%s, %d: preferred_polling_cpu = %d\n",
368 * __func__, __LINE__, preferred_polling_cpu);
369 */
370 if (preferred_polling_cpu != -1) {
371 curr_polling_cpu = preferred_polling_cpu;
372 dbg_met_tag_oneshot(0, "met_curr polling cpu", curr_polling_cpu);
373
374 if (cpu_related_cnt == 0)
375 /* printk("%s, %d: start cpu %d hrtimer start\n",
376 * __func__, __LINE__, curr_polling_cpu);
377 */
378 met_smp_call_function_single_symbol(curr_polling_cpu, __met_hrtimer_start, NULL, 1);
379 }
380 }
381
382 met_smp_call_function_single_symbol(cpu, __met_hrtimer_stop, NULL, 1);
383 if (met_cpu_pmu_method != 0) {
384 if (pmu_profiling_version == 1) {
385 per_cpu(perf_cpuid, cpu) = cpu;
386 met_smp_call_function_single_symbol(cpu, met_perf_cpupmu_down, (void *)&per_cpu(perf_cpuid, cpu), 1);
387 }
388#ifdef MET_SUPPORT_CPUPMU_V2
389 else if (pmu_profiling_version == 2) {
390 per_cpu(perf_cpuid, cpu) = cpu;
391 met_smp_call_function_single_symbol(cpu, met_perf_cpupmu_down_v2, (void *)&per_cpu(perf_cpuid, cpu), 1);
392 }
393#endif
394 }
395
396 met_cpu_ptr = &per_cpu(met_cpu, cpu);
397 dw = &met_cpu_ptr->dwork;
398 cancel_delayed_work_sync(dw);
399
400 /* sync_samples(cpu); */
401 break;
402
403 case CPU_DOWN_FAILED:
404 case CPU_DOWN_FAILED_FROZEN:
405 met_cpu_ptr = &per_cpu(met_cpu, cpu);
406 met_cpu_ptr->hrtimer_online_check = 1;
407 dbg_met_tag_oneshot(0, "met_online check", cpu);
408
409 met_smp_call_function_single_symbol(cpu, __met_hrtimer_start, NULL, 1);
410 break;
411
412 case CPU_DEAD:
413 case CPU_DEAD_FROZEN:
414#ifdef CONFIG_CPU_FREQ
415 force_power_log_val(0, cpu);
416#endif
417 break;
418 }
419 return NOTIFY_OK;
420}
421
422static struct notifier_block __refdata met_pmu_cpu_notifier = {
423 .notifier_call = met_pmu_cpu_notify,
424};
425
426int sampler_start(void)
427{
428 int ret, cpu;
429 struct met_cpu_struct *met_cpu_ptr;
430 struct metdevice *c;
431 int preferred_polling_cpu;
432
433 met_set_suspend_notify(0);
434
435#ifdef CONFIG_CPU_FREQ
436 force_power_log(POWER_LOG_ALL);
437#endif
438
439 for_each_possible_cpu(cpu) {
440 met_cpu_ptr = &per_cpu(met_cpu, cpu);
441 met_cpu_ptr->work_enabled = 0;
442 met_cpu_ptr->hrtimer_online_check = 0;
443 hrtimer_init(&met_cpu_ptr->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
444 met_cpu_ptr->hrtimer.function = met_hrtimer_notify;
445 INIT_DELAYED_WORK(&met_cpu_ptr->dwork, wq_sync_buffer);
446 }
447
448 start = 0;
449 ret = register_hotcpu_notifier(&met_pmu_cpu_notifier);
450
451 list_for_each_entry(c, &met_list, list) {
452
453 if (try_module_get(c->owner) == 0)
454 continue;
455#ifdef CONFIG_MET_ARM_32BIT
456 if (strcmp(c->name, "cpu") == 0) {
457 if ((c->mode) && (c->start)) {
458 pmu_profiling_version = 1;
459 cpu_related_cnt = 1;
460 if (met_cpu_pmu_method != 0)
461 met_perf_cpupmu_start();
462 else
463 c->start();
464 }
465 continue;
466 }
467#endif
468
469#ifdef MET_SUPPORT_CPUPMU_V2
470 if (strcmp(c->name, "cpu-pmu") == 0) {
471 if ((c->mode) && (c->start)) {
472 pmu_profiling_version = 2;
473 cpu_related_cnt = 1;
474 if (met_cpu_pmu_method != 0)
475 met_perf_cpupmu_start_v2();
476 else
477 c->start();
478 }
479 continue;
480 } else if (strcmp(c->name, "cpu") == 0) {
481 if ((c->mode) && (c->start)) {
482 pmu_profiling_version = 1;
483 cpu_related_cnt = 1;
484 if (met_cpu_pmu_method != 0)
485 met_perf_cpupmu_start();
486 else
487 c->start();
488 }
489 continue;
490 }
491#endif
492 if ((c->mode) && (c->cpu_related == 1))
493 cpu_related_cnt = 1;
494
495 if (c->ondiemet_mode == 0) {
496 if ((!(c->cpu_related)) && (c->mode) && (c->start))
497 c->start();
498 } else if (c->ondiemet_mode == 1) {
499 if ((!(c->cpu_related)) && (c->mode) && (c->ondiemet_start))
500 c->ondiemet_start();
501 } else if (c->ondiemet_mode == 2) {
502 if ((!(c->cpu_related)) && (c->mode) && (c->start))
503 c->start();
504 if ((!(c->cpu_related)) && (c->mode) && (c->ondiemet_start))
505 c->ondiemet_start();
506 }
507 }
508
509 get_online_cpus();
510 online_cpu_map = 0;
511 for_each_online_cpu(cpu) {
512 online_cpu_map |= (1 << cpu);
513 }
514 dbg_met_tag_oneshot(0, "met_online cpu map", online_cpu_map);
515 preferred_polling_cpu = calc_preferred_polling_cpu(online_cpu_map);
516 if (preferred_polling_cpu != -1)
517 curr_polling_cpu = preferred_polling_cpu;
518 dbg_met_tag_oneshot(0, "met_curr polling cpu", curr_polling_cpu);
519 start = 1;
520
521 if (cpu_related_cnt == 0)
522 met_smp_call_function_single_symbol(curr_polling_cpu, __met_hrtimer_start, NULL, 1);
523 else
524 on_each_cpu(__met_hrtimer_start, NULL, 1);
525 put_online_cpus();
526
527 return ret;
528}
529
530void sampler_stop(void)
531{
532 int cpu;
533 struct met_cpu_struct *met_cpu_ptr;
534 struct metdevice *c;
535 struct delayed_work *dw;
536
537 get_online_cpus();
538
539 on_each_cpu(__met_hrtimer_stop, NULL, 1);
540/* for_each_online_cpu(cpu) { */
541 for_each_possible_cpu(cpu) { /* Just for case */
542 met_cpu_ptr = &per_cpu(met_cpu, cpu);
543 dw = &met_cpu_ptr->dwork;
544 cancel_delayed_work_sync(dw);
545 /* sync_samples(cpu); */
546 }
547
548 start = 0;
549 put_online_cpus();
550
551 unregister_hotcpu_notifier(&met_pmu_cpu_notifier);
552
553 list_for_each_entry(c, &met_list, list) {
554#ifdef CONFIG_MET_ARM_32BIT
555 if (strcmp(c->name, "cpu") == 0) {
556 pmu_profiling_version = 0;
557 if ((c->mode) && (c->stop)) {
558 if (met_cpu_pmu_method != 0)
559 met_perf_cpupmu_stop();
560 else
561 c->stop();
562 }
563 module_put(c->owner);
564 continue;
565 }
566#endif
567
568#ifdef MET_SUPPORT_CPUPMU_V2
569 if (strcmp(c->name, "cpu-pmu") == 0) {
570 pmu_profiling_version = 0;
571 if ((c->mode) && (c->stop)) {
572 if (met_cpu_pmu_method != 0)
573 met_perf_cpupmu_stop_v2();
574 else
575 c->stop();
576 }
577 module_put(c->owner);
578 continue;
579 }
580 else if (strcmp(c->name, "cpu") == 0) {
581 pmu_profiling_version = 0;
582 if ((c->mode) && (c->stop)) {
583 if (met_cpu_pmu_method != 0)
584 met_perf_cpupmu_stop();
585 else
586 c->stop();
587 }
588 module_put(c->owner);
589 continue;
590 }
591#endif
592 if (c->ondiemet_mode == 0) {
593 if ((!(c->cpu_related)) && (c->mode) && (c->stop))
594 c->stop();
595 } else if (c->ondiemet_mode == 1) {
596 if ((!(c->cpu_related)) && (c->mode) && (c->ondiemet_stop))
597 c->ondiemet_stop();
598 } else if (c->ondiemet_mode == 2) {
599 if ((!(c->cpu_related)) && (c->mode) && (c->stop))
600 c->stop();
601 if ((!(c->cpu_related)) && (c->mode) && (c->ondiemet_stop))
602 c->ondiemet_stop();
603 }
604 module_put(c->owner);
605 }
606
607 cpu_related_cnt = 0;
608}
609
610#if 0 /* cann't use static now */
611enum {
612 MET_SUSPEND = 1,
613 MET_RESUME = 2,
614};
615
616static noinline void tracing_mark_write(int op)
617{
618 switch (op) {
619 case MET_SUSPEND:
620 MET_TRACE("C|0|MET_SUSPEND|1");
621 break;
622 case MET_RESUME:
623 MET_TRACE("C|0|MET_SUSPEND|0");
624 break;
625 }
626}
627#endif
628
629int met_hrtimer_suspend(void)
630{
631 struct metdevice *c;
632
633 met_set_suspend_notify(1);
634 /* tracing_mark_write(MET_SUSPEND); */
635// tracing_mark_write(TYPE_MET_SUSPEND, 0, 0, 0, 0, 0);
636 if (start == 0)
637 return 0;
638
639 list_for_each_entry(c, &met_list, list) {
640 if (c->suspend)
641 c->suspend();
642 }
643
644 /* get current COUNT */
645 MET_TRACE("TS: %llu GPT: %llX", sched_clock(), arch_counter_get_cntvct());
646 return 0;
647}
648
649void met_hrtimer_resume(void)
650{
651 struct metdevice *c;
652
653 /* get current COUNT */
654 MET_TRACE("TS: %llu GPT: %llX", sched_clock(), arch_counter_get_cntvct());
655
656 /* tracing_mark_write(MET_RESUME); */
657// tracing_mark_write(TYPE_MET_RESUME, 0, 0, 0, 0, 0);
658 if (start == 0)
659 return;
660
661 list_for_each_entry(c, &met_list, list) {
662 if (c->resume)
663 c->resume();
664 }
665}
666
667/*
668 * event timer:
669 * register IRQ, sched_switch event to monitor Polling count
670 * count can be printed at any live cpu.
671 */
672void met_event_timer_notify(void)
673{
674 unsigned long long stamp;
675 struct metdevice *c;
676 int cpu = -1;
677
678 if (start == 0)
679 return;
680
681 cpu = smp_processor_id();
682 list_for_each_entry(c, &met_list, list) {
683 stamp = local_clock();
684
685 if (c->prev_stamp == 0)
686 c->prev_stamp = stamp;
687
688 /* Critical Section Start */
689 /* try spinlock to prevent a event print twice between config time interval */
690 if (!spin_trylock(&(c->my_lock)))
691 continue;
692
693 /*
694 * DEFAULT_HRTIMER_EXPIRE (met_hrtimer_expire):
695 * sample_rate == 0 --> always print
696 * sample_rate == 1000 --> print interval larger than 1 ms
697 */
698 if (DEFAULT_HRTIMER_EXPIRE == 0 || (stamp - c->prev_stamp) < DEFAULT_HRTIMER_EXPIRE) {
699 spin_unlock(&(c->my_lock));
700 continue;
701 }
702
703 c->prev_stamp = stamp;
704 spin_unlock(&(c->my_lock));
705 /* Critical Section End */
706
707 if ((c->mode == 0) || (c->timed_polling == NULL))
708 continue;
709
710 stamp = local_clock();
711 c->timed_polling(stamp, cpu);
712 }
713}
714