blob: dca688312438c68383355b08b2ef3d9e3e95eab3 [file] [log] [blame]
rjw1f884582022-01-06 17:20:42 +08001/*
2 * Copyright (C) 2018 MediaTek Inc.
3 *
4 * This program is free software: you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#include <linux/sched/clock.h>
15#include <linux/kernel.h>
16#include <linux/cpuhotplug.h>
17#include <linux/cpu.h>
18#include <linux/sched.h>
19#include <linux/notifier.h>
20#include <linux/module.h>
21#include <linux/irq.h>
22#if 0 /* fix me later, no such file on current tree */
23#include <mach/mt_cpuxgpt.h>
24#endif
25#include <asm/arch_timer.h>
26
27#define MET_USER_EVENT_SUPPORT
28#include "interface.h"
29#include "sampler.h"
30#include "met_struct.h"
31#include "util.h"
32#include "switch.h"
33#include "trace.h"
34#include "met_drv.h"
35#include "met_tag.h" /* for tracing_mark_write */
36
37#include "cpu_pmu.h" /* for using kernel perf PMU driver */
38#include "cpu_pmu_v2.h" /* for using kernel perf PMU v2 driver */
39#include "met_kernel_symbol.h"
40
41#undef DEBUG_CPU_NOTIFY
42/* #define DEBUG_CPU_NOTIFY */
43#if defined(DEBUG_CPU_NOTIFY)
44#ifdef CONFIG_MET_MODULE
45#define dbg_met_tag_oneshot met_tag_oneshot_real
46#else
47#define dbg_met_tag_oneshot met_tag_oneshot
48#endif /* CONFIG_MET_MODULE */
49#else
50#define dbg_met_tag_oneshot(class_id, name, value) ({ 0; })
51#endif
52
53static int start;
54static unsigned int online_cpu_map;
55static int curr_polling_cpu;
56static int cpu_related_cnt;
57
58static int pmu_profiling_version = 0;
59
60static DEFINE_PER_CPU(unsigned int, perf_cpuid);
61
62static int preferred_cpu_list[] = { 0, 4, 1, 2, 3, 5, 6, 7 };
63
64int get_pmu_profiling_version()
65{
66 return pmu_profiling_version;
67}
68
69static int calc_preferred_polling_cpu(unsigned int cpu_map)
70{
71 int i;
72
73 for (i = 0; i < ARRAY_SIZE(preferred_cpu_list); i++) {
74 if (cpu_map & (1 << preferred_cpu_list[i]))
75 return preferred_cpu_list[i];
76 }
77
78 return -1;
79}
80
81static void wq_sync_buffer(struct work_struct *work)
82{
83 int cpu;
84 struct delayed_work *dw = container_of(work, struct delayed_work, work);
85 struct met_cpu_struct *met_cpu_ptr = container_of(dw, struct met_cpu_struct, dwork);
86
87 cpu = smp_processor_id();
88 if (met_cpu_ptr->cpu != cpu) {
89 /* panic("ERROR"); */
90 return;
91 }
92
93 /* sync_samples(cpu); */
94 /* don't re-add the work if we're shutting down */
95 if (met_cpu_ptr->work_enabled)
96 schedule_delayed_work(dw, DEFAULT_TIMER_EXPIRE);
97}
98
99static enum hrtimer_restart met_hrtimer_notify(struct hrtimer *hrtimer)
100{
101 int cpu;
102 int *count;
103 unsigned long long stamp;
104 struct met_cpu_struct *met_cpu_ptr = container_of(hrtimer, struct met_cpu_struct, hrtimer);
105 struct metdevice *c;
106#if defined(DEBUG_CPU_NOTIFY)
107 char msg[32];
108#endif
109
110 cpu = smp_processor_id();
111#if defined(DEBUG_CPU_NOTIFY)
112 {
113 char msg[32];
114
115 snprintf(msg, sizeof(msg), "met_hrtimer notify_%d", cpu);
116 dbg_met_tag_oneshot(0, msg, 1);
117 }
118#endif
119
120 if (met_cpu_ptr->cpu != cpu) {
121 /* panic("ERROR2"); */
122 dbg_met_tag_oneshot(0, msg, -3);
123 return HRTIMER_NORESTART;
124 }
125
126 list_for_each_entry(c, &met_list, list) {
127 if (c->ondiemet_mode == 0) {
128 if ((c->mode == 0) || (c->timed_polling == NULL))
129 continue;
130 } else if (c->ondiemet_mode == 1) {
131 if ((c->mode == 0) || (c->ondiemet_timed_polling == NULL))
132 continue;
133 } else if (c->ondiemet_mode == 2) {
134 if ((c->mode == 0) || ((c->timed_polling == NULL)
135 && (c->ondiemet_timed_polling == NULL)))
136 continue;
137 }
138
139 count = per_cpu_ptr(c->polling_count, cpu);
140 if ((*count) > 0) {
141 (*count)--;
142 continue;
143 }
144
145 *(count) = c->polling_count_reload;
146
147 stamp = cpu_clock(cpu);
148
149 if (c->cpu_related == 0) {
150 if (cpu == curr_polling_cpu) {
151 if (c->ondiemet_mode == 0) {
152 c->timed_polling(stamp, 0);
153 } else if (c->ondiemet_mode == 1) {
154 c->ondiemet_timed_polling(stamp, 0);
155 } else if (c->ondiemet_mode == 2) {
156 if (c->timed_polling)
157 c->timed_polling(stamp, 0);
158 if (c->ondiemet_timed_polling)
159 c->ondiemet_timed_polling(stamp, 0);
160 }
161 }
162 } else {
163 if (c->ondiemet_mode == 0) {
164 c->timed_polling(stamp, cpu);
165 } else if (c->ondiemet_mode == 1) {
166 c->ondiemet_timed_polling(stamp, cpu);
167 } else if (c->ondiemet_mode == 2) {
168 if (c->timed_polling)
169 c->timed_polling(stamp, 0);
170 if (c->ondiemet_timed_polling)
171 c->ondiemet_timed_polling(stamp, 0);
172 }
173 }
174 }
175
176 if (met_cpu_ptr->hrtimer_online_check) {
177 online_cpu_map |= (1 << cpu);
178 met_cpu_ptr->hrtimer_online_check = 0;
179 dbg_met_tag_oneshot(0, "met_online check done", cpu);
180 if (calc_preferred_polling_cpu(online_cpu_map) == cpu) {
181 curr_polling_cpu = cpu;
182 dbg_met_tag_oneshot(0, "met_curr polling cpu", cpu);
183 }
184 }
185
186 if (met_cpu_ptr->work_enabled) {
187 hrtimer_forward_now(hrtimer, ns_to_ktime(DEFAULT_HRTIMER_EXPIRE));
188 dbg_met_tag_oneshot(0, msg, 0);
189 return HRTIMER_RESTART;
190 }
191 dbg_met_tag_oneshot(0, msg, 0);
192 return HRTIMER_NORESTART;
193}
194
195static void __met_hrtimer_start(void *unused)
196{
197 struct met_cpu_struct *met_cpu_ptr = NULL;
198 struct hrtimer *hrtimer = NULL;
199 /* struct delayed_work *dw; */
200 struct metdevice *c;
201
202 met_cpu_ptr = this_cpu_ptr(&met_cpu);
203#if defined(DEBUG_CPU_NOTIFY)
204 {
205 char msg[32];
206
207 snprintf(msg, sizeof(msg), "met_hrtimer status_%d", met_cpu_ptr->cpu);
208 dbg_met_tag_oneshot(0, msg, 1);
209 }
210#endif
211 /*
212 * do not open HRtimer when EVENT timer enable
213 */
214// if (!(met_switch.mode & MT_SWITCH_EVENT_TIMER)) {
215 hrtimer = &met_cpu_ptr->hrtimer;
216 /* dw = &met_cpu_ptr->dwork; */
217
218 hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
219 hrtimer->function = met_hrtimer_notify;
220// }
221
222 list_for_each_entry(c, &met_list, list) {
223 if (c->ondiemet_mode == 0) {
224 if ((c->cpu_related) && (c->mode) && (c->start))
225 c->start();
226 } else if (c->ondiemet_mode == 1) {
227 if (((c->cpu_related)) && (c->mode) && (c->ondiemet_start))
228 c->ondiemet_start();
229 } else if (c->ondiemet_mode == 2) {
230 if ((c->cpu_related) && (c->mode) && (c->start))
231 c->start();
232 if (((c->cpu_related)) && (c->mode) && (c->ondiemet_start))
233 c->ondiemet_start();
234 }
235 }
236 /*
237 * do not open HRtimer when EVENT timer enable
238 */
239// if (!(met_switch.mode & MT_SWITCH_EVENT_TIMER)) {
240 if (DEFAULT_HRTIMER_EXPIRE) {
241 met_cpu_ptr->work_enabled = 1;
242 /* schedule_delayed_work_on(smp_processor_id(), dw, DEFAULT_TIMER_EXPIRE); */
243 hrtimer_start(hrtimer, ns_to_ktime(DEFAULT_HRTIMER_EXPIRE),
244 HRTIMER_MODE_REL_PINNED);
245 }
246// }
247}
248
249static void __met_hrtimer_stop(void *unused)
250{
251 struct met_cpu_struct *met_cpu_ptr;
252 struct hrtimer *hrtimer;
253 /* struct delayed_work *dw; */
254 struct metdevice *c;
255
256 met_cpu_ptr = this_cpu_ptr(&met_cpu);
257#if defined(DEBUG_CPU_NOTIFY)
258 {
259 char msg[32];
260
261 snprintf(msg, sizeof(msg), "met_hrtimer status_%d", met_cpu_ptr->cpu);
262 dbg_met_tag_oneshot(0, msg, 0);
263 }
264#endif
265 /*
266 * do not open HRtimer when EVENT timer enable
267 */
268// if (!(met_switch.mode & MT_SWITCH_EVENT_TIMER)) {
269 hrtimer = &met_cpu_ptr->hrtimer;
270 /* dw = &met_cpu_ptr->dwork; */
271
272 met_cpu_ptr->work_enabled = 0;
273 hrtimer_cancel(hrtimer);
274 /* cancel_delayed_work_sync(dw); */
275// }
276 list_for_each_entry(c, &met_list, list) {
277 if (c->ondiemet_mode == 0) {
278 if ((c->cpu_related) && (c->mode) && (c->stop))
279 c->stop();
280 } else if (c->ondiemet_mode == 1) {
281 if ((c->cpu_related) && (c->mode) && (c->ondiemet_stop))
282 c->ondiemet_stop();
283 } else if (c->ondiemet_mode == 2) {
284 if ((c->cpu_related) && (c->mode) && (c->stop))
285 c->stop();
286 if ((c->cpu_related) && (c->mode) && (c->ondiemet_stop))
287 c->ondiemet_stop();
288 }
289 }
290}
291
292static int met_pmu_cpu_notify(enum met_action action, unsigned int cpu)
293{
294 struct met_cpu_struct *met_cpu_ptr;
295 struct delayed_work *dw;
296 int preferred_polling_cpu;
297
298 if (start == 0)
299 return NOTIFY_OK;
300
301#if defined(DEBUG_CPU_NOTIFY)
302 {
303 char msg[32];
304
305 snprintf(msg, sizeof(msg), "met_cpu notify_%d", cpu);
306 dbg_met_tag_oneshot(0, msg, action);
307 }
308#elif defined(PR_CPU_NOTIFY)
309 {
310 char msg[32];
311
312 if (met_cpu_notify) {
313 snprintf(msg, sizeof(msg), "met_cpu notify_%d", cpu);
314 dbg_met_tag_oneshot(0, msg, action);
315 }
316 }
317#endif
318
319 if (cpu < 0 || cpu >= NR_CPUS)
320 return NOTIFY_OK;
321
322 switch (action) {
323 case MET_CPU_ONLINE:
324 met_cpu_ptr = &per_cpu(met_cpu, cpu);
325 met_cpu_ptr->hrtimer_online_check = 1;
326 dbg_met_tag_oneshot(0, "met_online check", cpu);
327
328 if (cpu_related_cnt == 0) {
329 /*printk("%s, %d: curr_polling_cpu is alive = %d\n",
330 * __func__, __LINE__, online_cpu_map & (1 << curr_polling_cpu));
331 */
332
333 online_cpu_map |= (1 << cpu);
334
335 /* check curr_polling_cpu is alive, if it is down,
336 * start current cpu hrtimer, and change it to be currr_pollling_cpu
337 */
338 if ((online_cpu_map & (1 << curr_polling_cpu)) == 0) {
339 met_smp_call_function_single_symbol(cpu, __met_hrtimer_start, NULL, 1);
340 curr_polling_cpu = cpu;
341 }
342 } else
343 met_smp_call_function_single_symbol(cpu, __met_hrtimer_start, NULL, 1);
344
345 if (met_cpu_pmu_method != 0) {
346 if (pmu_profiling_version == 1)
347 met_perf_cpupmu_online(cpu);
348#ifdef MET_SUPPORT_CPUPMU_V2
349 else if (pmu_profiling_version == 2)
350 met_perf_cpupmu_online_v2(cpu);
351#endif
352 }
353
354#ifdef CONFIG_CPU_FREQ
355 force_power_log(cpu);
356#endif
357 break;
358
359 case MET_CPU_OFFLINE:
360 online_cpu_map &= ~(1 << cpu);
361 dbg_met_tag_oneshot(0, "met_offline cpu", cpu);
362 if (cpu == curr_polling_cpu) {
363 /* printk("%s, %d: curr_polling_cpu %d is down\n",
364 * __func__, __LINE__, curr_polling_cpu);
365 */
366 preferred_polling_cpu = calc_preferred_polling_cpu(online_cpu_map);
367 /* printk("%s, %d: preferred_polling_cpu = %d\n",
368 * __func__, __LINE__, preferred_polling_cpu);
369 */
370 if (preferred_polling_cpu != -1) {
371 curr_polling_cpu = preferred_polling_cpu;
372 dbg_met_tag_oneshot(0, "met_curr polling cpu", curr_polling_cpu);
373
374 if (cpu_related_cnt == 0)
375 /* printk("%s, %d: start cpu %d hrtimer start\n",
376 * __func__, __LINE__, curr_polling_cpu);
377 */
378 met_smp_call_function_single_symbol(curr_polling_cpu, __met_hrtimer_start, NULL, 1);
379 }
380 }
381
382 met_smp_call_function_single_symbol(cpu, __met_hrtimer_stop, NULL, 1);
383 if (met_cpu_pmu_method != 0) {
384 if (pmu_profiling_version == 1) {
385 per_cpu(perf_cpuid, cpu) = cpu;
386 met_smp_call_function_single_symbol(cpu, met_perf_cpupmu_down, (void *)&per_cpu(perf_cpuid, cpu), 1);
387 }
388#ifdef MET_SUPPORT_CPUPMU_V2
389 else if (pmu_profiling_version == 2) {
390 per_cpu(perf_cpuid, cpu) = cpu;
391 met_smp_call_function_single_symbol(cpu, met_perf_cpupmu_down_v2, (void *)&per_cpu(perf_cpuid, cpu), 1);
392 }
393#endif
394 }
395
396 met_cpu_ptr = &per_cpu(met_cpu, cpu);
397 dw = &met_cpu_ptr->dwork;
398 cancel_delayed_work_sync(dw);
399
400 /* sync_samples(cpu); */
401 break;
402 default:
403 break;
404 }
405
406 return NOTIFY_OK;
407}
408
409static int _met_pmu_cpu_notify_online(unsigned int cpu)
410{
411 met_pmu_cpu_notify(MET_CPU_ONLINE, cpu);
412
413 return 0;
414}
415
416static int _met_pmu_cpu_notify_offline(unsigned int cpu)
417{
418 met_pmu_cpu_notify(MET_CPU_OFFLINE, cpu);
419
420 return 0;
421}
422
423int sampler_start(void)
424{
425 int ret, cpu;
426 struct met_cpu_struct *met_cpu_ptr;
427 struct metdevice *c;
428 int preferred_polling_cpu;
429
430 met_set_suspend_notify(0);
431
432#ifdef CONFIG_CPU_FREQ
433 force_power_log(POWER_LOG_ALL);
434#endif
435
436 for_each_possible_cpu(cpu) {
437 met_cpu_ptr = &per_cpu(met_cpu, cpu);
438 met_cpu_ptr->work_enabled = 0;
439 met_cpu_ptr->hrtimer_online_check = 0;
440 hrtimer_init(&met_cpu_ptr->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
441 met_cpu_ptr->hrtimer.function = met_hrtimer_notify;
442 INIT_DELAYED_WORK(&met_cpu_ptr->dwork, wq_sync_buffer);
443 }
444
445 start = 0;
446 ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
447 "met:online",
448 _met_pmu_cpu_notify_online,
449 _met_pmu_cpu_notify_offline);
450
451 list_for_each_entry(c, &met_list, list) {
452
453 if (try_module_get(c->owner) == 0)
454 continue;
455#ifdef CONFIG_MET_ARM_32BIT
456 if (strcmp(c->name, "cpu") == 0) {
457 if ((c->mode) && (c->start)) {
458 pmu_profiling_version = 1;
459 cpu_related_cnt = 1;
460 if (met_cpu_pmu_method != 0)
461 met_perf_cpupmu_start();
462 else
463 c->start();
464 }
465 continue;
466 }
467#endif
468
469#ifdef MET_SUPPORT_CPUPMU_V2
470 if (strcmp(c->name, "cpu-pmu") == 0) {
471 if ((c->mode) && (c->start)) {
472 pmu_profiling_version = 2;
473 cpu_related_cnt = 1;
474 if (met_cpu_pmu_method != 0)
475 met_perf_cpupmu_start_v2();
476 else
477 c->start();
478 }
479 continue;
480 } else if (strcmp(c->name, "cpu") == 0) {
481 if ((c->mode) && (c->start)) {
482 pmu_profiling_version = 1;
483 cpu_related_cnt = 1;
484 if (met_cpu_pmu_method != 0)
485 met_perf_cpupmu_start();
486 else
487 c->start();
488 }
489 continue;
490 }
491#endif
492 if ((c->mode) && (c->cpu_related == 1))
493 cpu_related_cnt = 1;
494
495 if (c->ondiemet_mode == 0) {
496 if ((!(c->cpu_related)) && (c->mode) && (c->start))
497 c->start();
498 } else if (c->ondiemet_mode == 1) {
499 if ((!(c->cpu_related)) && (c->mode) && (c->ondiemet_start))
500 c->ondiemet_start();
501 } else if (c->ondiemet_mode == 2) {
502 if ((!(c->cpu_related)) && (c->mode) && (c->start))
503 c->start();
504 if ((!(c->cpu_related)) && (c->mode) && (c->ondiemet_start))
505 c->ondiemet_start();
506 }
507 }
508
509 get_online_cpus();
510 online_cpu_map = 0;
511 for_each_online_cpu(cpu) {
512 online_cpu_map |= (1 << cpu);
513 }
514 dbg_met_tag_oneshot(0, "met_online cpu map", online_cpu_map);
515 preferred_polling_cpu = calc_preferred_polling_cpu(online_cpu_map);
516 if (preferred_polling_cpu != -1)
517 curr_polling_cpu = preferred_polling_cpu;
518 dbg_met_tag_oneshot(0, "met_curr polling cpu", curr_polling_cpu);
519 start = 1;
520
521 if (cpu_related_cnt == 0)
522 met_smp_call_function_single_symbol(curr_polling_cpu, __met_hrtimer_start, NULL, 1);
523 else
524 on_each_cpu(__met_hrtimer_start, NULL, 1);
525 put_online_cpus();
526
527 return ret;
528}
529
530void sampler_stop(void)
531{
532 int cpu;
533 struct met_cpu_struct *met_cpu_ptr;
534 struct metdevice *c;
535 struct delayed_work *dw;
536
537 get_online_cpus();
538
539 on_each_cpu(__met_hrtimer_stop, NULL, 1);
540/* for_each_online_cpu(cpu) { */
541 for_each_possible_cpu(cpu) { /* Just for case */
542 met_cpu_ptr = &per_cpu(met_cpu, cpu);
543 dw = &met_cpu_ptr->dwork;
544 cancel_delayed_work_sync(dw);
545 /* sync_samples(cpu); */
546 }
547
548 start = 0;
549 put_online_cpus();
550
551 cpuhp_remove_state_nocalls(CPUHP_AP_ONLINE_DYN);
552
553 list_for_each_entry(c, &met_list, list) {
554#ifdef CONFIG_MET_ARM_32BIT
555 if (strcmp(c->name, "cpu") == 0) {
556 pmu_profiling_version = 0;
557 if ((c->mode) && (c->stop)) {
558 if (met_cpu_pmu_method != 0)
559 met_perf_cpupmu_stop();
560 else
561 c->stop();
562 }
563 module_put(c->owner);
564 continue;
565 }
566#endif
567
568#ifdef MET_SUPPORT_CPUPMU_V2
569 if (strcmp(c->name, "cpu-pmu") == 0) {
570 pmu_profiling_version = 0;
571 if ((c->mode) && (c->stop)) {
572 if (met_cpu_pmu_method != 0)
573 met_perf_cpupmu_stop_v2();
574 else
575 c->stop();
576 }
577 module_put(c->owner);
578 continue;
579 }
580 else if (strcmp(c->name, "cpu") == 0) {
581 pmu_profiling_version = 0;
582 if ((c->mode) && (c->stop)) {
583 if (met_cpu_pmu_method != 0)
584 met_perf_cpupmu_stop();
585 else
586 c->stop();
587 }
588 module_put(c->owner);
589 continue;
590 }
591#endif
592 if (c->ondiemet_mode == 0) {
593 if ((!(c->cpu_related)) && (c->mode) && (c->stop))
594 c->stop();
595 } else if (c->ondiemet_mode == 1) {
596 if ((!(c->cpu_related)) && (c->mode) && (c->ondiemet_stop))
597 c->ondiemet_stop();
598 } else if (c->ondiemet_mode == 2) {
599 if ((!(c->cpu_related)) && (c->mode) && (c->stop))
600 c->stop();
601 if ((!(c->cpu_related)) && (c->mode) && (c->ondiemet_stop))
602 c->ondiemet_stop();
603 }
604 module_put(c->owner);
605 }
606
607 cpu_related_cnt = 0;
608}
609
610#if 0 /* cann't use static now */
611enum {
612 MET_SUSPEND = 1,
613 MET_RESUME = 2,
614};
615
616static noinline void tracing_mark_write(int op)
617{
618 switch (op) {
619 case MET_SUSPEND:
620 MET_TRACE("C|0|MET_SUSPEND|1");
621 break;
622 case MET_RESUME:
623 MET_TRACE("C|0|MET_SUSPEND|0");
624 break;
625 }
626}
627#endif
628
629int met_hrtimer_suspend(void)
630{
631 struct metdevice *c;
632
633 met_set_suspend_notify(1);
634 /* tracing_mark_write(MET_SUSPEND); */
635// tracing_mark_write(TYPE_MET_SUSPEND, 0, 0, 0, 0, 0);
636 if (start == 0)
637 return 0;
638
639 list_for_each_entry(c, &met_list, list) {
640 if (c->suspend)
641 c->suspend();
642 }
643
644 /* get current COUNT */
645 MET_TRACE("TS: %llu GPT: %llX", sched_clock(), arch_counter_get_cntvct());
646 return 0;
647}
648
649void met_hrtimer_resume(void)
650{
651 struct metdevice *c;
652
653 /* get current COUNT */
654 MET_TRACE("TS: %llu GPT: %llX", sched_clock(), arch_counter_get_cntvct());
655
656 /* tracing_mark_write(MET_RESUME); */
657// tracing_mark_write(TYPE_MET_RESUME, 0, 0, 0, 0, 0);
658 if (start == 0)
659 return;
660
661 list_for_each_entry(c, &met_list, list) {
662 if (c->resume)
663 c->resume();
664 }
665}
666
667/*
668 * event timer:
669 * register IRQ, sched_switch event to monitor Polling count
670 * count can be printed at any live cpu.
671 */
672void met_event_timer_notify(void)
673{
674 unsigned long long stamp;
675 struct metdevice *c;
676 int cpu = -1;
677
678 if (start == 0)
679 return;
680
681 cpu = smp_processor_id();
682 list_for_each_entry(c, &met_list, list) {
683 stamp = local_clock();
684
685 if (c->prev_stamp == 0)
686 c->prev_stamp = stamp;
687
688 /* Critical Section Start */
689 /* try spinlock to prevent a event print twice between config time interval */
690 if (!spin_trylock(&(c->my_lock)))
691 continue;
692
693 /*
694 * DEFAULT_HRTIMER_EXPIRE (met_hrtimer_expire):
695 * sample_rate == 0 --> always print
696 * sample_rate == 1000 --> print interval larger than 1 ms
697 */
698 if (DEFAULT_HRTIMER_EXPIRE == 0 || (stamp - c->prev_stamp) < DEFAULT_HRTIMER_EXPIRE) {
699 spin_unlock(&(c->my_lock));
700 continue;
701 }
702
703 c->prev_stamp = stamp;
704 spin_unlock(&(c->my_lock));
705 /* Critical Section End */
706
707 if ((c->mode == 0) || (c->timed_polling == NULL))
708 continue;
709
710 stamp = local_clock();
711 c->timed_polling(stamp, cpu);
712 }
713}
714