blob: 23c2cc89cec3bc11a40b9acd942c9b8c44a6fc24 [file] [log] [blame]
rjw1f884582022-01-06 17:20:42 +08001/*
2 * Copyright (C) 2019 MediaTek Inc.
3 *
4 * This program is free software: you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#include <linux/sched/clock.h>
15#include <linux/kernel.h>
16#include <linux/cpuhotplug.h>
17#include <linux/cpu.h>
18#include <linux/sched.h>
19#include <linux/notifier.h>
20#include <linux/module.h>
21#include <linux/irq.h>
22#if 0 /* fix me later, no such file on current tree */
23#include <mach/mt_cpuxgpt.h>
24#endif
25#include <asm/arch_timer.h>
26
27#define MET_USER_EVENT_SUPPORT
28#include "interface.h"
29#include "sampler.h"
30#include "met_struct.h"
31#include "util.h"
32#include "switch.h"
33#include "trace.h"
34#include "met_drv.h"
35#include "met_tag.h" /* for tracing_mark_write */
36
37#include "cpu_pmu.h" /* for using kernel perf PMU driver */
38
39#include "met_kernel_symbol.h"
40
41#undef DEBUG_CPU_NOTIFY
42/* #define DEBUG_CPU_NOTIFY */
43#if defined(DEBUG_CPU_NOTIFY)
44#ifdef CONFIG_MET_MODULE
45#define dbg_met_tag_oneshot met_tag_oneshot_real
46#else
47#define dbg_met_tag_oneshot met_tag_oneshot
48#endif /* CONFIG_MET_MODULE */
49#else
50#define dbg_met_tag_oneshot(class_id, name, value) ({ 0; })
51#endif
52
53static int start;
54static unsigned int online_cpu_map;
55static int curr_polling_cpu;
56static int cpu_related_cnt;
57static int cpu_related_polling_hdlr_cnt;
58
59static int preferred_cpu_list[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9};
60
61static int calc_preferred_polling_cpu(unsigned int cpu_map)
62{
63 int i;
64
65 for (i = 0; i < ARRAY_SIZE(preferred_cpu_list); i++) {
66 if (cpu_map & (1 << preferred_cpu_list[i]))
67 return preferred_cpu_list[i];
68 }
69
70 return -1;
71}
72
73static void wq_sync_buffer(struct work_struct *work)
74{
75 int cpu;
76 struct delayed_work *dw = container_of(work, struct delayed_work, work);
77 struct met_cpu_struct *met_cpu_ptr = container_of(dw, struct met_cpu_struct, dwork);
78
79 cpu = smp_processor_id();
80 if (met_cpu_ptr->cpu != cpu) {
81 /* panic("ERROR"); */
82 return;
83 }
84
85 /* sync_samples(cpu); */
86 /* don't re-add the work if we're shutting down */
87 if (met_cpu_ptr->work_enabled)
88 schedule_delayed_work(dw, DEFAULT_TIMER_EXPIRE);
89}
90
91static enum hrtimer_restart met_hrtimer_notify(struct hrtimer *hrtimer)
92{
93 int cpu;
94 int *count;
95 unsigned long long stamp;
96 struct met_cpu_struct *met_cpu_ptr = container_of(hrtimer, struct met_cpu_struct, hrtimer);
97 struct metdevice *c;
98#if defined(DEBUG_CPU_NOTIFY)
99 char msg[32];
100#endif
101
102 cpu = smp_processor_id();
103#if defined(DEBUG_CPU_NOTIFY)
104 {
105 char msg[32];
106
107 snprintf(msg, sizeof(msg), "met_hrtimer notify_%d", cpu);
108 dbg_met_tag_oneshot(0, msg, 1);
109 }
110#endif
111
112 if (met_cpu_ptr->cpu != cpu) {
113 /* panic("ERROR2"); */
114 dbg_met_tag_oneshot(0, msg, -3);
115 return HRTIMER_NORESTART;
116 }
117
118 list_for_each_entry(c, &met_list, list) {
119 if (c->ondiemet_mode == 0) {
120 if ((c->mode == 0) || (c->timed_polling == NULL))
121 continue;
122 } else if (c->ondiemet_mode == 1) {
123 if ((c->mode == 0) || (c->ondiemet_timed_polling == NULL))
124 continue;
125 } else if (c->ondiemet_mode == 2) {
126 if ((c->mode == 0) || ((c->timed_polling == NULL)
127 && (c->ondiemet_timed_polling == NULL)))
128 continue;
129 }
130
131 count = per_cpu_ptr(c->polling_count, cpu);
132 if ((*count) > 0) {
133 (*count)--;
134 continue;
135 }
136
137 *(count) = c->polling_count_reload;
138
139 stamp = cpu_clock(cpu);
140
141 if (c->cpu_related == 0) {
142 if (cpu == curr_polling_cpu) {
143 if (c->ondiemet_mode == 0) {
144 c->timed_polling(stamp, 0);
145 } else if (c->ondiemet_mode == 1) {
146 c->ondiemet_timed_polling(stamp, 0);
147 } else if (c->ondiemet_mode == 2) {
148 if (c->timed_polling)
149 c->timed_polling(stamp, 0);
150 if (c->ondiemet_timed_polling)
151 c->ondiemet_timed_polling(stamp, 0);
152 }
153 }
154 } else {
155 if (c->ondiemet_mode == 0) {
156 c->timed_polling(stamp, cpu);
157 } else if (c->ondiemet_mode == 1) {
158 c->ondiemet_timed_polling(stamp, cpu);
159 } else if (c->ondiemet_mode == 2) {
160 if (c->timed_polling)
161 c->timed_polling(stamp, 0);
162 if (c->ondiemet_timed_polling)
163 c->ondiemet_timed_polling(stamp, 0);
164 }
165 }
166 }
167
168 if (met_cpu_ptr->hrtimer_online_check) {
169 online_cpu_map |= (1 << cpu);
170 met_cpu_ptr->hrtimer_online_check = 0;
171 dbg_met_tag_oneshot(0, "met_online check done", cpu);
172 if (calc_preferred_polling_cpu(online_cpu_map) == cpu) {
173 curr_polling_cpu = cpu;
174 dbg_met_tag_oneshot(0, "met_curr polling cpu", cpu);
175 }
176 }
177
178 if (met_cpu_ptr->work_enabled) {
179 hrtimer_forward_now(hrtimer, ns_to_ktime(DEFAULT_HRTIMER_EXPIRE));
180 dbg_met_tag_oneshot(0, msg, 0);
181 return HRTIMER_RESTART;
182 }
183 dbg_met_tag_oneshot(0, msg, 0);
184 return HRTIMER_NORESTART;
185}
186
187static void __met_init_cpu_related_device(void *unused)
188{
189 struct metdevice *c;
190
191 list_for_each_entry(c, &met_list, list) {
192 *(this_cpu_ptr(c->polling_count)) = 0;
193 if (c->ondiemet_mode == 0) {
194 if ((c->cpu_related) && (c->mode) && (c->start))
195 c->start();
196 } else if (c->ondiemet_mode == 1) {
197 if (((c->cpu_related)) && (c->mode) && (c->ondiemet_start))
198 c->ondiemet_start();
199 } else if (c->ondiemet_mode == 2) {
200 if ((c->cpu_related) && (c->mode) && (c->start))
201 c->start();
202 if (((c->cpu_related)) && (c->mode) && (c->ondiemet_start))
203 c->ondiemet_start();
204 }
205 }
206}
207
208static void __met_hrtimer_register(void *unused)
209{
210 struct met_cpu_struct *met_cpu_ptr = NULL;
211 struct hrtimer *hrtimer = NULL;
212 /* struct delayed_work *dw; */
213 /*struct metdevice *c;*/
214
215 met_cpu_ptr = this_cpu_ptr(&met_cpu);
216#if defined(DEBUG_CPU_NOTIFY)
217 {
218 char msg[32];
219
220 snprintf(msg, sizeof(msg), "met_hrtimer status_%d", met_cpu_ptr->cpu);
221 dbg_met_tag_oneshot(0, msg, 1);
222 }
223#endif
224 /*
225 * do not open HRtimer when EVENT timer enable
226 */
227 if (!(met_switch.mode & MT_SWITCH_EVENT_TIMER)) {
228
229 hrtimer = &met_cpu_ptr->hrtimer;
230 hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
231 hrtimer->function = met_hrtimer_notify;
232
233 if (DEFAULT_HRTIMER_EXPIRE) {
234 met_cpu_ptr->work_enabled = 1;
235 /* schedule_delayed_work_on(smp_processor_id(), dw, DEFAULT_TIMER_EXPIRE); */
236 hrtimer_start(hrtimer, ns_to_ktime(DEFAULT_HRTIMER_EXPIRE),
237 HRTIMER_MODE_REL_PINNED);
238 }
239 }
240}
241
242static void __met_hrtimer_stop(void *unused)
243{
244 struct met_cpu_struct *met_cpu_ptr;
245 struct hrtimer *hrtimer;
246 /* struct delayed_work *dw; */
247 struct metdevice *c;
248
249 met_cpu_ptr = this_cpu_ptr(&met_cpu);
250#if defined(DEBUG_CPU_NOTIFY)
251 {
252 char msg[32];
253
254 snprintf(msg, sizeof(msg), "met_hrtimer status_%d", met_cpu_ptr->cpu);
255 dbg_met_tag_oneshot(0, msg, 0);
256 }
257#endif
258 /*
259 * do not open HRtimer when EVENT timer enable
260 */
261 if (!(met_switch.mode & MT_SWITCH_EVENT_TIMER)) {
262 hrtimer = &met_cpu_ptr->hrtimer;
263 /* dw = &met_cpu_ptr->dwork; */
264
265 met_cpu_ptr->work_enabled = 0;
266 hrtimer_cancel(hrtimer);
267
268 /* cancel_delayed_work_sync(dw); */
269 }
270 list_for_each_entry(c, &met_list, list) {
271 if (c->ondiemet_mode == 0) {
272 if ((c->cpu_related) && (c->mode) && (c->stop))
273 c->stop();
274 } else if (c->ondiemet_mode == 1) {
275 if ((c->cpu_related) && (c->mode) && (c->ondiemet_stop))
276 c->ondiemet_stop();
277 } else if (c->ondiemet_mode == 2) {
278 if ((c->cpu_related) && (c->mode) && (c->stop))
279 c->stop();
280 if ((c->cpu_related) && (c->mode) && (c->ondiemet_stop))
281 c->ondiemet_stop();
282 }
283 *(this_cpu_ptr(c->polling_count)) = 0;
284 }
285}
286
287static int met_pmu_cpu_notify(enum met_action action, unsigned int cpu)
288{
289 struct met_cpu_struct *met_cpu_ptr;
290 struct delayed_work *dw;
291 int preferred_polling_cpu;
292 struct metdevice *c;
293
294 if (start == 0)
295 return NOTIFY_OK;
296
297#if defined(DEBUG_CPU_NOTIFY)
298 {
299 char msg[32];
300
301 snprintf(msg, sizeof(msg), "met_cpu notify_%d", cpu);
302 dbg_met_tag_oneshot(0, msg, action);
303 }
304#elif defined(PR_CPU_NOTIFY)
305 {
306 char msg[32];
307
308 if (met_cpu_notify) {
309 snprintf(msg, sizeof(msg), "met_cpu notify_%d", cpu);
310 dbg_met_tag_oneshot(0, msg, action);
311 }
312 }
313#endif
314
315 if (cpu < 0 || cpu >= NR_CPUS)
316 return NOTIFY_OK;
317
318 switch (action) {
319 case MET_CPU_ONLINE:
320 met_cpu_ptr = &per_cpu(met_cpu, cpu);
321 met_cpu_ptr->hrtimer_online_check = 1;
322 dbg_met_tag_oneshot(0, "met_online check", cpu);
323
324 if (cpu_related_cnt == 0) {
325 /*pr_info("%s, %d: curr_polling_cpu is alive = %d\n",
326 * __func__, __LINE__, online_cpu_map & (1 << curr_polling_cpu));
327 */
328
329 online_cpu_map |= (1 << cpu);
330
331 /* check curr_polling_cpu is alive, if it is down,
332 * start current cpu hrtimer, and change it to be currr_pollling_cpu
333 */
334 if ((online_cpu_map & (1 << curr_polling_cpu)) == 0) {
335 met_smp_call_function_single_symbol(cpu, __met_hrtimer_register, NULL, 1);
336 curr_polling_cpu = cpu;
337 }
338 } else {
339 if (cpu_related_polling_hdlr_cnt) {
340 met_smp_call_function_single_symbol(cpu, __met_init_cpu_related_device, NULL, 1);
341 met_smp_call_function_single_symbol(cpu, __met_hrtimer_register, NULL, 1);
342 } else {
343
344 /*pr_info("%s, %d: curr_polling_cpu is alive = %d\n",
345 * __func__, __LINE__, online_cpu_map & (1 << curr_polling_cpu));
346 */
347
348 online_cpu_map |= (1 << cpu);
349
350 /* check curr_polling_cpu is alive, if it is down,
351 * start current cpu hrtimer, and change it to be currr_pollling_cpu
352 */
353 if ((online_cpu_map & (1 << curr_polling_cpu)) == 0) {
354 met_smp_call_function_single_symbol(cpu, __met_init_cpu_related_device, NULL, 1);
355 met_smp_call_function_single_symbol(cpu, __met_hrtimer_register, NULL, 1);
356 curr_polling_cpu = cpu;
357 }
358 }
359 }
360
361#ifdef CONFIG_CPU_FREQ
362 force_power_log(cpu);
363#endif
364 list_for_each_entry(c, &met_list, list) {
365 if (c->cpu_state_notify)
366 c->cpu_state_notify(cpu, action);
367 }
368 break;
369
370 case MET_CPU_OFFLINE:
371 list_for_each_entry(c, &met_list, list) {
372 if (c->cpu_state_notify)
373 c->cpu_state_notify(cpu, action);
374 }
375
376 online_cpu_map &= ~(1 << cpu);
377 dbg_met_tag_oneshot(0, "met_offline cpu", cpu);
378 if (cpu == curr_polling_cpu) {
379 /* pr_info("%s, %d: curr_polling_cpu %d is down\n",
380 * __func__, __LINE__, curr_polling_cpu);
381 */
382 preferred_polling_cpu = calc_preferred_polling_cpu(online_cpu_map);
383 /* pr_info("%s, %d: preferred_polling_cpu = %d\n",
384 * __func__, __LINE__, preferred_polling_cpu);
385 */
386 if (preferred_polling_cpu != -1) {
387 curr_polling_cpu = preferred_polling_cpu;
388 dbg_met_tag_oneshot(0, "met_curr polling cpu", curr_polling_cpu);
389
390 if (cpu_related_cnt == 0) {
391 /* pr_info("%s, %d: start cpu %d hrtimer start\n",
392 * __func__, __LINE__, curr_polling_cpu);
393 */
394 met_smp_call_function_single_symbol(curr_polling_cpu, __met_hrtimer_register, NULL, 1);
395 } else if (cpu_related_polling_hdlr_cnt == 0) {
396 met_smp_call_function_single_symbol(curr_polling_cpu, __met_hrtimer_register, NULL, 1);
397 }
398 }
399 }
400
401 met_smp_call_function_single_symbol(cpu, __met_hrtimer_stop, NULL, 1);
402
403 met_cpu_ptr = &per_cpu(met_cpu, cpu);
404 dw = &met_cpu_ptr->dwork;
405 cancel_delayed_work_sync(dw);
406
407 /* sync_samples(cpu); */
408 break;
409 default:
410 list_for_each_entry(c, &met_list, list) {
411 if (c->cpu_state_notify)
412 c->cpu_state_notify(cpu, action);
413 }
414 }
415
416 return NOTIFY_OK;
417}
418
419static int _met_pmu_cpu_notify_online(unsigned int cpu)
420{
421 met_pmu_cpu_notify(MET_CPU_ONLINE, cpu);
422
423 return 0;
424}
425
426static int _met_pmu_cpu_notify_offline(unsigned int cpu)
427{
428 met_pmu_cpu_notify(MET_CPU_OFFLINE, cpu);
429
430 return 0;
431}
432
433int sampler_start(void)
434{
435 int ret, cpu;
436 struct met_cpu_struct *met_cpu_ptr;
437 struct metdevice *c;
438 int preferred_polling_cpu;
439
440 met_set_suspend_notify(0);
441
442#ifdef CONFIG_CPU_FREQ
443 force_power_log(POWER_LOG_ALL);
444#endif
445
446 for_each_possible_cpu(cpu) {
447 met_cpu_ptr = &per_cpu(met_cpu, cpu);
448 met_cpu_ptr->work_enabled = 0;
449 met_cpu_ptr->hrtimer_online_check = 0;
450 hrtimer_init(&met_cpu_ptr->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
451 met_cpu_ptr->hrtimer.function = met_hrtimer_notify;
452 INIT_DELAYED_WORK(&met_cpu_ptr->dwork, wq_sync_buffer);
453 }
454
455 start = 0;
456 ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
457 "met:online",
458 _met_pmu_cpu_notify_online,
459 _met_pmu_cpu_notify_offline);
460
461 list_for_each_entry(c, &met_list, list) {
462
463 if (try_module_get(c->owner) == 0)
464 continue;
465
466 if ((c->mode) && (c->cpu_related == 1)) {
467 cpu_related_cnt = 1;
468
469 if (c->ondiemet_mode == 0) {
470 if (c->timed_polling)
471 cpu_related_polling_hdlr_cnt = 1;
472 } else if (c->ondiemet_mode == 1) {
473 if (c->ondiemet_timed_polling)
474 cpu_related_polling_hdlr_cnt = 1;
475 } else if (c->ondiemet_mode == 2) {
476 if (c->timed_polling || c->ondiemet_timed_polling)
477 cpu_related_polling_hdlr_cnt = 1;
478 }
479 }
480
481 if (c->ondiemet_mode == 0) {
482 if ((!(c->cpu_related)) && (c->mode) && (c->start))
483 c->start();
484 else if ((c->cpu_related) && (c->mode) && (c->uniq_start))
485 c->uniq_start();
486 } else if (c->ondiemet_mode == 1) {
487 if ((!(c->cpu_related)) && (c->mode) && (c->ondiemet_start))
488 c->ondiemet_start();
489 if ((c->cpu_related) && (c->mode) && (c->uniq_ondiemet_start))
490 c->uniq_ondiemet_start();
491 } else if (c->ondiemet_mode == 2) {
492 if ((!(c->cpu_related)) && (c->mode) && (c->start))
493 c->start();
494 else if ((c->cpu_related) && (c->mode) && (c->uniq_start))
495 c->uniq_start();
496
497 if ((!(c->cpu_related)) && (c->mode) && (c->ondiemet_start))
498 c->ondiemet_start();
499 else if ((c->cpu_related) && (c->mode) && (c->uniq_ondiemet_start))
500 c->uniq_ondiemet_start();
501 }
502 }
503
504 get_online_cpus();
505 online_cpu_map = 0;
506 for_each_online_cpu(cpu) {
507 online_cpu_map |= (1 << cpu);
508 }
509 dbg_met_tag_oneshot(0, "met_online cpu map", online_cpu_map);
510 preferred_polling_cpu = calc_preferred_polling_cpu(online_cpu_map);
511 if (preferred_polling_cpu != -1)
512 curr_polling_cpu = preferred_polling_cpu;
513 dbg_met_tag_oneshot(0, "met_curr polling cpu", curr_polling_cpu);
514 start = 1;
515
516 if (cpu_related_cnt == 0) {
517 met_smp_call_function_single_symbol(curr_polling_cpu, __met_hrtimer_register, NULL, 1);
518 }
519 else {
520 //on_each_cpu(__met_hrtimer_start, NULL, 1);
521 for_each_online_cpu(cpu) {
522 met_smp_call_function_single_symbol(cpu, __met_init_cpu_related_device, NULL, 1);
523 }
524
525 if (cpu_related_polling_hdlr_cnt) {
526 for_each_online_cpu(cpu) {
527 met_smp_call_function_single_symbol(cpu, __met_hrtimer_register, NULL, 1);
528 }
529 } else {
530 met_smp_call_function_single_symbol(curr_polling_cpu, __met_hrtimer_register, NULL, 1);
531 }
532 }
533 put_online_cpus();
534
535 return ret;
536}
537
538void sampler_stop(void)
539{
540 int cpu;
541 struct met_cpu_struct *met_cpu_ptr;
542 struct metdevice *c;
543 struct delayed_work *dw;
544
545
546 get_online_cpus();
547 //on_each_cpu(__met_hrtimer_stop, NULL, 1);
548 online_cpu_map = 0;
549 for_each_online_cpu(cpu) {
550 online_cpu_map |= (1 << cpu);
551 }
552
553 for_each_online_cpu(cpu) {
554 met_smp_call_function_single_symbol(cpu, __met_hrtimer_stop, NULL, 1);
555 }
556
557 /* for_each_online_cpu(cpu) { */
558 for_each_possible_cpu(cpu) { /* Just for case */
559 met_cpu_ptr = &per_cpu(met_cpu, cpu);
560 dw = &met_cpu_ptr->dwork;
561 cancel_delayed_work_sync(dw);
562 /* sync_samples(cpu); */
563 }
564 start = 0;
565 put_online_cpus();
566
567 cpuhp_remove_state_nocalls(CPUHP_AP_ONLINE_DYN);
568
569 list_for_each_entry(c, &met_list, list) {
570 if (c->ondiemet_mode == 0) {
571 if ((!(c->cpu_related)) && (c->mode) && (c->stop))
572 c->stop();
573 else if ((c->cpu_related) && (c->mode) && (c->uniq_stop))
574 c->uniq_stop();
575 } else if (c->ondiemet_mode == 1) {
576 if ((!(c->cpu_related)) && (c->mode) && (c->ondiemet_stop))
577 c->ondiemet_stop();
578 else if ((c->cpu_related) && (c->mode) && (c->uniq_ondiemet_stop))
579 c->uniq_ondiemet_stop();
580 } else if (c->ondiemet_mode == 2) {
581 if ((!(c->cpu_related)) && (c->mode) && (c->stop))
582 c->stop();
583 else if ((c->cpu_related) && (c->mode) && (c->uniq_stop))
584 c->uniq_stop();
585
586 if ((!(c->cpu_related)) && (c->mode) && (c->ondiemet_stop))
587 c->ondiemet_stop();
588 else if ((c->cpu_related) && (c->mode) && (c->uniq_ondiemet_stop))
589 c->uniq_ondiemet_stop();
590 }
591 module_put(c->owner);
592 }
593
594 cpu_related_cnt = 0;
595 cpu_related_polling_hdlr_cnt = 0;
596}
597
598#if 0 /* cann't use static now */
599enum {
600 MET_SUSPEND = 1,
601 MET_RESUME = 2,
602};
603
604static noinline void tracing_mark_write(int op)
605{
606 switch (op) {
607 case MET_SUSPEND:
608 MET_TRACE("C|0|MET_SUSPEND|1");
609 break;
610 case MET_RESUME:
611 MET_TRACE("C|0|MET_SUSPEND|0");
612 break;
613 }
614}
615#endif
616
617int met_hrtimer_suspend(void)
618{
619 struct metdevice *c;
620
621 met_set_suspend_notify(1);
622 /* tracing_mark_write(MET_SUSPEND); */
623 tracing_mark_write(TYPE_MET_SUSPEND, 0, 0, 0, 0, 0);
624 if (start == 0)
625 return 0;
626
627 list_for_each_entry(c, &met_list, list) {
628 if (c->suspend)
629 c->suspend();
630 }
631
632 /* get current COUNT */
633 MET_TRACE("TS: %llu GPT: %llX", sched_clock(), arch_counter_get_cntvct());
634 return 0;
635}
636
637void met_hrtimer_resume(void)
638{
639 struct metdevice *c;
640
641 /* get current COUNT */
642 MET_TRACE("TS: %llu GPT: %llX", sched_clock(), arch_counter_get_cntvct());
643
644 /* tracing_mark_write(MET_RESUME); */
645 tracing_mark_write(TYPE_MET_RESUME, 0, 0, 0, 0, 0);
646 if (start == 0)
647 return;
648
649 list_for_each_entry(c, &met_list, list) {
650 if (c->resume)
651 c->resume();
652 }
653}
654
655/*
656 * event timer:
657 * register IRQ, sched_switch event to monitor Polling count
658 * count can be printed at any live cpu.
659 */
660void met_event_timer_notify(void)
661{
662 unsigned long long stamp;
663 struct metdevice *c;
664 int cpu = -1;
665
666 if (start == 0)
667 return;
668
669 cpu = smp_processor_id();
670 list_for_each_entry(c, &met_list, list) {
671 stamp = local_clock();
672
673 if (c->prev_stamp == 0)
674 c->prev_stamp = stamp;
675
676 /* Critical Section Start */
677 /* try spinlock to prevent a event print twice between config time interval */
678 if (!spin_trylock(&(c->my_lock)))
679 continue;
680
681 /*
682 * DEFAULT_HRTIMER_EXPIRE (met_hrtimer_expire):
683 * sample_rate == 0 --> always print
684 * sample_rate == 1000 --> print interval larger than 1 ms
685 */
686 if (DEFAULT_HRTIMER_EXPIRE == 0 || (stamp - c->prev_stamp) < DEFAULT_HRTIMER_EXPIRE) {
687 spin_unlock(&(c->my_lock));
688 continue;
689 }
690
691 c->prev_stamp = stamp;
692 spin_unlock(&(c->my_lock));
693 /* Critical Section End */
694
695 if ((c->mode == 0) || (c->timed_polling == NULL))
696 continue;
697
698 stamp = local_clock();
699 c->timed_polling(stamp, cpu);
700 }
701}
702