blob: 1ed04c3b723d2db93a9c391db05c80070f24d7e7 [file] [log] [blame]
rjw1f884582022-01-06 17:20:42 +08001/*
2 * Copyright (C) 2018 MediaTek Inc.
3 *
4 * This program is free software: you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#include <linux/cpu.h>
15#include <linux/sched.h>
16#include <linux/notifier.h>
17#include <linux/module.h>
18#include <linux/irq.h>
19#if 0 /* fix me later, no such file on current tree */
20#include <mach/mt_cpuxgpt.h>
21#endif
22#include <asm/arch_timer.h>
23
24#define MET_USER_EVENT_SUPPORT
25#include "interface.h"
26#include "sampler.h"
27#include "met_struct.h"
28#include "util.h"
29#include "switch.h"
30#include "trace.h"
31#include "met_drv.h"
32#include "met_tag.h" /* for tracing_mark_write */
33
34#include "cpu_pmu.h" /* for using kernel perf PMU driver */
35
36#include "met_kernel_symbol.h"
37
38#undef DEBUG_CPU_NOTIFY
39/* #define DEBUG_CPU_NOTIFY */
40#if defined(DEBUG_CPU_NOTIFY)
41#ifdef CONFIG_MET_MODULE
42#define dbg_met_tag_oneshot met_tag_oneshot_real
43#else
44#define dbg_met_tag_oneshot met_tag_oneshot
45#endif /* CONFIG_MET_MODULE */
46#else
47#define dbg_met_tag_oneshot(class_id, name, value) ({ 0; })
48#endif
49
50static int start;
51static unsigned int online_cpu_map;
52static int curr_polling_cpu;
53static int cpu_related_cnt;
54
55static int preferred_cpu_list[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9};
56
57static int calc_preferred_polling_cpu(unsigned int cpu_map)
58{
59 int i;
60
61 for (i = 0; i < ARRAY_SIZE(preferred_cpu_list); i++) {
62 if (cpu_map & (1 << preferred_cpu_list[i]))
63 return preferred_cpu_list[i];
64 }
65
66 return -1;
67}
68
69static void wq_sync_buffer(struct work_struct *work)
70{
71 int cpu;
72 struct delayed_work *dw = container_of(work, struct delayed_work, work);
73 struct met_cpu_struct *met_cpu_ptr = container_of(dw, struct met_cpu_struct, dwork);
74
75 cpu = smp_processor_id();
76 if (met_cpu_ptr->cpu != cpu) {
77 /* panic("ERROR"); */
78 return;
79 }
80
81 /* sync_samples(cpu); */
82 /* don't re-add the work if we're shutting down */
83 if (met_cpu_ptr->work_enabled)
84 schedule_delayed_work(dw, DEFAULT_TIMER_EXPIRE);
85}
86
87static enum hrtimer_restart met_hrtimer_notify(struct hrtimer *hrtimer)
88{
89 int cpu;
90 int *count;
91 unsigned long long stamp;
92 struct met_cpu_struct *met_cpu_ptr = container_of(hrtimer, struct met_cpu_struct, hrtimer);
93 struct metdevice *c;
94#if defined(DEBUG_CPU_NOTIFY)
95 char msg[32];
96#endif
97
98 cpu = smp_processor_id();
99#if defined(DEBUG_CPU_NOTIFY)
100 {
101 char msg[32];
102
103 snprintf(msg, sizeof(msg), "met_hrtimer notify_%d", cpu);
104 dbg_met_tag_oneshot(0, msg, 1);
105 }
106#endif
107
108 if (met_cpu_ptr->cpu != cpu) {
109 /* panic("ERROR2"); */
110 dbg_met_tag_oneshot(0, msg, -3);
111 return HRTIMER_NORESTART;
112 }
113
114 list_for_each_entry(c, &met_list, list) {
115 if (c->ondiemet_mode == 0) {
116 if ((c->mode == 0) || (c->timed_polling == NULL))
117 continue;
118 } else if (c->ondiemet_mode == 1) {
119 if ((c->mode == 0) || (c->ondiemet_timed_polling == NULL))
120 continue;
121 } else if (c->ondiemet_mode == 2) {
122 if ((c->mode == 0) || ((c->timed_polling == NULL)
123 && (c->ondiemet_timed_polling == NULL)))
124 continue;
125 }
126
127 count = per_cpu_ptr(c->polling_count, cpu);
128 if ((*count) > 0) {
129 (*count)--;
130 continue;
131 }
132
133 *(count) = c->polling_count_reload;
134
135 stamp = cpu_clock(cpu);
136
137 if (c->cpu_related == 0) {
138 if (cpu == curr_polling_cpu) {
139 if (c->ondiemet_mode == 0) {
140 c->timed_polling(stamp, 0);
141 } else if (c->ondiemet_mode == 1) {
142 c->ondiemet_timed_polling(stamp, 0);
143 } else if (c->ondiemet_mode == 2) {
144 if (c->timed_polling)
145 c->timed_polling(stamp, 0);
146 if (c->ondiemet_timed_polling)
147 c->ondiemet_timed_polling(stamp, 0);
148 }
149 }
150 } else {
151 if (c->ondiemet_mode == 0) {
152 c->timed_polling(stamp, cpu);
153 } else if (c->ondiemet_mode == 1) {
154 c->ondiemet_timed_polling(stamp, cpu);
155 } else if (c->ondiemet_mode == 2) {
156 if (c->timed_polling)
157 c->timed_polling(stamp, 0);
158 if (c->ondiemet_timed_polling)
159 c->ondiemet_timed_polling(stamp, 0);
160 }
161 }
162 }
163
164 if (met_cpu_ptr->hrtimer_online_check) {
165 online_cpu_map |= (1 << cpu);
166 met_cpu_ptr->hrtimer_online_check = 0;
167 dbg_met_tag_oneshot(0, "met_online check done", cpu);
168 if (calc_preferred_polling_cpu(online_cpu_map) == cpu) {
169 curr_polling_cpu = cpu;
170 dbg_met_tag_oneshot(0, "met_curr polling cpu", cpu);
171 }
172 }
173
174 if (met_cpu_ptr->work_enabled) {
175 hrtimer_forward_now(hrtimer, ns_to_ktime(DEFAULT_HRTIMER_EXPIRE));
176 dbg_met_tag_oneshot(0, msg, 0);
177 return HRTIMER_RESTART;
178 }
179 dbg_met_tag_oneshot(0, msg, 0);
180 return HRTIMER_NORESTART;
181}
182
183static void __met_hrtimer_start(void *unused)
184{
185 struct met_cpu_struct *met_cpu_ptr = NULL;
186 struct hrtimer *hrtimer = NULL;
187 /* struct delayed_work *dw; */
188 struct metdevice *c;
189
190 met_cpu_ptr = this_cpu_ptr(&met_cpu);
191#if defined(DEBUG_CPU_NOTIFY)
192 {
193 char msg[32];
194
195 snprintf(msg, sizeof(msg), "met_hrtimer status_%d", met_cpu_ptr->cpu);
196 dbg_met_tag_oneshot(0, msg, 1);
197 }
198#endif
199 /*
200 * do not open HRtimer when EVENT timer enable
201 */
202 if (!(met_switch.mode & MT_SWITCH_EVENT_TIMER)) {
203 hrtimer = &met_cpu_ptr->hrtimer;
204 /* dw = &met_cpu_ptr->dwork; */
205
206 hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
207 hrtimer->function = met_hrtimer_notify;
208 }
209
210 list_for_each_entry(c, &met_list, list) {
211 *(this_cpu_ptr(c->polling_count)) = 0;
212 if (c->ondiemet_mode == 0) {
213 if ((c->cpu_related) && (c->mode) && (c->start))
214 c->start();
215 } else if (c->ondiemet_mode == 1) {
216 if (((c->cpu_related)) && (c->mode) && (c->ondiemet_start))
217 c->ondiemet_start();
218 } else if (c->ondiemet_mode == 2) {
219 if ((c->cpu_related) && (c->mode) && (c->start))
220 c->start();
221 if (((c->cpu_related)) && (c->mode) && (c->ondiemet_start))
222 c->ondiemet_start();
223 }
224 }
225 /*
226 * do not open HRtimer when EVENT timer enable
227 */
228 if (!(met_switch.mode & MT_SWITCH_EVENT_TIMER)) {
229 if (DEFAULT_HRTIMER_EXPIRE) {
230 met_cpu_ptr->work_enabled = 1;
231 /* schedule_delayed_work_on(smp_processor_id(), dw, DEFAULT_TIMER_EXPIRE); */
232 hrtimer_start(hrtimer, ns_to_ktime(DEFAULT_HRTIMER_EXPIRE),
233 HRTIMER_MODE_REL_PINNED);
234 }
235 }
236}
237
238static void __met_hrtimer_stop(void *unused)
239{
240 struct met_cpu_struct *met_cpu_ptr;
241 struct hrtimer *hrtimer;
242 /* struct delayed_work *dw; */
243 struct metdevice *c;
244
245 int cpu = smp_processor_id();
246 pr_debug("!!!!!!!! %s cpu = %d\n", __FUNCTION__, cpu);
247
248 met_cpu_ptr = this_cpu_ptr(&met_cpu);
249#if defined(DEBUG_CPU_NOTIFY)
250 {
251 char msg[32];
252
253 snprintf(msg, sizeof(msg), "met_hrtimer status_%d", met_cpu_ptr->cpu);
254 dbg_met_tag_oneshot(0, msg, 0);
255 }
256#endif
257 /*
258 * do not open HRtimer when EVENT timer enable
259 */
260 if (!(met_switch.mode & MT_SWITCH_EVENT_TIMER)) {
261 hrtimer = &met_cpu_ptr->hrtimer;
262 /* dw = &met_cpu_ptr->dwork; */
263
264 met_cpu_ptr->work_enabled = 0;
265 hrtimer_cancel(hrtimer);
266 pr_debug("!!!!!!!! %s hrtimer_cancel cpu = %d\n", __FUNCTION__, cpu);
267 /* cancel_delayed_work_sync(dw); */
268 }
269 list_for_each_entry(c, &met_list, list) {
270 if (c->ondiemet_mode == 0) {
271 if ((c->cpu_related) && (c->mode) && (c->stop))
272 c->stop();
273 } else if (c->ondiemet_mode == 1) {
274 if ((c->cpu_related) && (c->mode) && (c->ondiemet_stop))
275 c->ondiemet_stop();
276 } else if (c->ondiemet_mode == 2) {
277 if ((c->cpu_related) && (c->mode) && (c->stop))
278 c->stop();
279 if ((c->cpu_related) && (c->mode) && (c->ondiemet_stop))
280 c->ondiemet_stop();
281 }
282 *(this_cpu_ptr(c->polling_count)) = 0;
283 }
284}
285
286static int met_pmu_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
287{
288 struct met_cpu_struct *met_cpu_ptr;
289 struct delayed_work *dw;
290 long cpu = (long)hcpu;
291 int preferred_polling_cpu;
292 struct metdevice *c;
293
294 pr_debug("!!!!!!!! %s_%ld, action=%d\n", __FUNCTION__, cpu, action);
295 if (start == 0)
296 return NOTIFY_OK;
297
298#if defined(DEBUG_CPU_NOTIFY)
299 {
300 char msg[32];
301
302 snprintf(msg, sizeof(msg), "met_cpu notify_%ld", cpu);
303 dbg_met_tag_oneshot(0, msg, action);
304 }
305#elif defined(PR_CPU_NOTIFY)
306 {
307 char msg[32];
308
309 if (met_cpu_notify) {
310 snprintf(msg, sizeof(msg), "met_cpu notify_%ld", cpu);
311 dbg_met_tag_oneshot(0, msg, action);
312 }
313 }
314#endif
315
316 if (cpu < 0 || cpu >= ARRAY_SIZE(preferred_cpu_list))
317 return NOTIFY_OK;
318
319 switch (action) {
320 case CPU_ONLINE:
321 case CPU_ONLINE_FROZEN:
322 met_cpu_ptr = &per_cpu(met_cpu, cpu);
323 met_cpu_ptr->hrtimer_online_check = 1;
324 dbg_met_tag_oneshot(0, "met_online check", cpu);
325
326 if (cpu_related_cnt == 0) {
327 /*printk("%s, %d: curr_polling_cpu is alive = %d\n",
328 * __func__, __LINE__, online_cpu_map & (1 << curr_polling_cpu));
329 */
330
331 online_cpu_map |= (1 << cpu);
332
333 /* check curr_polling_cpu is alive, if it is down,
334 * start current cpu hrtimer, and change it to be currr_pollling_cpu
335 */
336 if ((online_cpu_map & (1 << curr_polling_cpu)) == 0) {
337 met_smp_call_function_single_symbol(cpu, __met_hrtimer_start, NULL, 1);
338 curr_polling_cpu = cpu;
339 }
340 } else
341 met_smp_call_function_single_symbol(cpu, __met_hrtimer_start, NULL, 1);
342
343#ifdef CONFIG_CPU_FREQ
344 force_power_log(cpu);
345#endif
346 list_for_each_entry(c, &met_list, list) {
347 if (c->cpu_state_notify)
348 c->cpu_state_notify(cpu, action);
349 }
350 break;
351
352 case CPU_DOWN_PREPARE:
353 case CPU_DOWN_PREPARE_FROZEN:
354 list_for_each_entry(c, &met_list, list) {
355 if (c->cpu_state_notify)
356 c->cpu_state_notify(cpu, action);
357 }
358
359 online_cpu_map &= ~(1 << cpu);
360 dbg_met_tag_oneshot(0, "met_offline cpu", cpu);
361 if (cpu == curr_polling_cpu) {
362 /* printk("%s, %d: curr_polling_cpu %d is down\n",
363 * __func__, __LINE__, curr_polling_cpu);
364 */
365 preferred_polling_cpu = calc_preferred_polling_cpu(online_cpu_map);
366 /* printk("%s, %d: preferred_polling_cpu = %d\n",
367 * __func__, __LINE__, preferred_polling_cpu);
368 */
369 if (preferred_polling_cpu != -1) {
370 curr_polling_cpu = preferred_polling_cpu;
371 dbg_met_tag_oneshot(0, "met_curr polling cpu", curr_polling_cpu);
372
373 if (cpu_related_cnt == 0)
374 /* printk("%s, %d: start cpu %d hrtimer start\n",
375 * __func__, __LINE__, curr_polling_cpu);
376 */
377 met_smp_call_function_single_symbol(curr_polling_cpu, __met_hrtimer_start, NULL, 1);
378 }
379 }
380
381 met_smp_call_function_single_symbol(cpu, __met_hrtimer_stop, NULL, 1);
382
383 met_cpu_ptr = &per_cpu(met_cpu, cpu);
384 dw = &met_cpu_ptr->dwork;
385 cancel_delayed_work_sync(dw);
386
387 /* sync_samples(cpu); */
388 break;
389
390 case CPU_DOWN_FAILED:
391 case CPU_DOWN_FAILED_FROZEN:
392 met_cpu_ptr = &per_cpu(met_cpu, cpu);
393 met_cpu_ptr->hrtimer_online_check = 1;
394 dbg_met_tag_oneshot(0, "met_online check", cpu);
395
396 met_smp_call_function_single_symbol(cpu, __met_hrtimer_start, NULL, 1);
397 list_for_each_entry(c, &met_list, list) {
398 if (c->cpu_state_notify)
399 c->cpu_state_notify(cpu, action);
400 }
401 break;
402
403 case CPU_DEAD:
404 case CPU_DEAD_FROZEN:
405 list_for_each_entry(c, &met_list, list) {
406 if (c->cpu_state_notify)
407 c->cpu_state_notify(cpu, action);
408 }
409#ifdef CONFIG_CPU_FREQ
410 force_power_log_val(0, cpu);
411#endif
412 break;
413
414 default:
415 list_for_each_entry(c, &met_list, list) {
416 if (c->cpu_state_notify)
417 c->cpu_state_notify(cpu, action);
418 }
419 }
420
421 return NOTIFY_OK;
422}
423
424static struct notifier_block __refdata met_pmu_cpu_notifier = {
425 .notifier_call = met_pmu_cpu_notify,
426};
427
428int sampler_start(void)
429{
430 int ret, cpu;
431 struct met_cpu_struct *met_cpu_ptr;
432 struct metdevice *c;
433 int preferred_polling_cpu;
434
435 met_set_suspend_notify(0);
436
437#ifdef CONFIG_CPU_FREQ
438 force_power_log(POWER_LOG_ALL);
439#endif
440
441 for_each_possible_cpu(cpu) {
442 met_cpu_ptr = &per_cpu(met_cpu, cpu);
443 met_cpu_ptr->work_enabled = 0;
444 met_cpu_ptr->hrtimer_online_check = 0;
445 hrtimer_init(&met_cpu_ptr->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
446 met_cpu_ptr->hrtimer.function = met_hrtimer_notify;
447 INIT_DELAYED_WORK(&met_cpu_ptr->dwork, wq_sync_buffer);
448 }
449
450 start = 0;
451 ret = register_hotcpu_notifier(&met_pmu_cpu_notifier);
452
453 list_for_each_entry(c, &met_list, list) {
454
455 if (try_module_get(c->owner) == 0)
456 continue;
457
458 if ((c->mode) && (c->cpu_related == 1))
459 cpu_related_cnt = 1;
460
461 if (c->ondiemet_mode == 0) {
462 if ((!(c->cpu_related)) && (c->mode) && (c->start))
463 c->start();
464 else if ((c->cpu_related) && (c->mode) && (c->uniq_start))
465 c->uniq_start();
466 } else if (c->ondiemet_mode == 1) {
467 if ((!(c->cpu_related)) && (c->mode) && (c->ondiemet_start))
468 c->ondiemet_start();
469 } else if (c->ondiemet_mode == 2) {
470 if ((!(c->cpu_related)) && (c->mode) && (c->start))
471 c->start();
472 else if ((c->cpu_related) && (c->mode) && (c->uniq_start))
473 c->uniq_start();
474
475 if ((!(c->cpu_related)) && (c->mode) && (c->ondiemet_start))
476 c->ondiemet_start();
477 }
478 }
479
480 get_online_cpus();
481 online_cpu_map = 0;
482 for_each_online_cpu(cpu) {
483 online_cpu_map |= (1 << cpu);
484 }
485 dbg_met_tag_oneshot(0, "met_online cpu map", online_cpu_map);
486 pr_debug("!!!!!!!! %s met_online cpu map = 0x%8X\n", __FUNCTION__, online_cpu_map);
487 preferred_polling_cpu = calc_preferred_polling_cpu(online_cpu_map);
488 if (preferred_polling_cpu != -1)
489 curr_polling_cpu = preferred_polling_cpu;
490 dbg_met_tag_oneshot(0, "met_curr polling cpu", curr_polling_cpu);
491 start = 1;
492
493 if (cpu_related_cnt == 0)
494 met_smp_call_function_single_symbol(curr_polling_cpu, __met_hrtimer_start, NULL, 1);
495 else {
496 //on_each_cpu(__met_hrtimer_start, NULL, 1);
497 for_each_online_cpu(cpu) {
498 met_smp_call_function_single_symbol(cpu, __met_hrtimer_start, NULL, 1);
499 }
500 }
501 put_online_cpus();
502
503 return ret;
504}
505
506void sampler_stop(void)
507{
508 int cpu;
509 struct met_cpu_struct *met_cpu_ptr;
510 struct metdevice *c;
511 struct delayed_work *dw;
512
513
514 get_online_cpus();
515 //on_each_cpu(__met_hrtimer_stop, NULL, 1);
516 online_cpu_map = 0;
517 for_each_online_cpu(cpu) {
518 online_cpu_map |= (1 << cpu);
519 }
520 pr_debug("!!!!!!!! %s met_online cpu map = 0x%8X\n", __FUNCTION__, online_cpu_map);
521 for_each_online_cpu(cpu) {
522 met_smp_call_function_single_symbol(cpu, __met_hrtimer_stop, NULL, 1);
523 }
524
525 /* for_each_online_cpu(cpu) { */
526 for_each_possible_cpu(cpu) { /* Just for case */
527 met_cpu_ptr = &per_cpu(met_cpu, cpu);
528 dw = &met_cpu_ptr->dwork;
529 cancel_delayed_work_sync(dw);
530 /* sync_samples(cpu); */
531 }
532 start = 0;
533 put_online_cpus();
534
535 unregister_hotcpu_notifier(&met_pmu_cpu_notifier);
536
537 list_for_each_entry(c, &met_list, list) {
538 if (c->ondiemet_mode == 0) {
539 if ((!(c->cpu_related)) && (c->mode) && (c->stop))
540 c->stop();
541 else if ((c->cpu_related) && (c->mode) && (c->uniq_stop))
542 c->uniq_stop();
543 } else if (c->ondiemet_mode == 1) {
544 if ((!(c->cpu_related)) && (c->mode) && (c->ondiemet_stop))
545 c->ondiemet_stop();
546 } else if (c->ondiemet_mode == 2) {
547 if ((!(c->cpu_related)) && (c->mode) && (c->stop))
548 c->stop();
549 else if ((c->cpu_related) && (c->mode) && (c->uniq_stop))
550 c->uniq_stop();
551
552 if ((!(c->cpu_related)) && (c->mode) && (c->ondiemet_stop))
553 c->ondiemet_stop();
554 }
555 module_put(c->owner);
556 }
557
558 cpu_related_cnt = 0;
559}
560
561#if 0 /* cann't use static now */
562enum {
563 MET_SUSPEND = 1,
564 MET_RESUME = 2,
565};
566
567static noinline void tracing_mark_write(int op)
568{
569 switch (op) {
570 case MET_SUSPEND:
571 MET_TRACE("C|0|MET_SUSPEND|1");
572 break;
573 case MET_RESUME:
574 MET_TRACE("C|0|MET_SUSPEND|0");
575 break;
576 }
577}
578#endif
579
580int met_hrtimer_suspend(void)
581{
582 struct metdevice *c;
583
584 met_set_suspend_notify(1);
585 /* tracing_mark_write(MET_SUSPEND); */
586 tracing_mark_write(TYPE_MET_SUSPEND, 0, 0, 0, 0, 0);
587 if (start == 0)
588 return 0;
589
590 list_for_each_entry(c, &met_list, list) {
591 if (c->suspend)
592 c->suspend();
593 }
594
595 /* get current COUNT */
596 MET_TRACE("TS: %llu GPT: %llX", sched_clock(), arch_counter_get_cntvct());
597 return 0;
598}
599
600void met_hrtimer_resume(void)
601{
602 struct metdevice *c;
603
604 /* get current COUNT */
605 MET_TRACE("TS: %llu GPT: %llX", sched_clock(), arch_counter_get_cntvct());
606
607 /* tracing_mark_write(MET_RESUME); */
608 tracing_mark_write(TYPE_MET_RESUME, 0, 0, 0, 0, 0);
609 if (start == 0)
610 return;
611
612 list_for_each_entry(c, &met_list, list) {
613 if (c->resume)
614 c->resume();
615 }
616}
617
618/*
619 * event timer:
620 * register IRQ, sched_switch event to monitor Polling count
621 * count can be printed at any live cpu.
622 */
623void met_event_timer_notify(void)
624{
625 unsigned long long stamp;
626 struct metdevice *c;
627 int cpu = -1;
628
629 if (start == 0)
630 return;
631
632 cpu = smp_processor_id();
633 list_for_each_entry(c, &met_list, list) {
634 stamp = local_clock();
635
636 if (c->prev_stamp == 0)
637 c->prev_stamp = stamp;
638
639 /* Critical Section Start */
640 /* try spinlock to prevent a event print twice between config time interval */
641 if (!spin_trylock(&(c->my_lock)))
642 continue;
643
644 /*
645 * DEFAULT_HRTIMER_EXPIRE (met_hrtimer_expire):
646 * sample_rate == 0 --> always print
647 * sample_rate == 1000 --> print interval larger than 1 ms
648 */
649 if (DEFAULT_HRTIMER_EXPIRE == 0 || (stamp - c->prev_stamp) < DEFAULT_HRTIMER_EXPIRE) {
650 spin_unlock(&(c->my_lock));
651 continue;
652 }
653
654 c->prev_stamp = stamp;
655 spin_unlock(&(c->my_lock));
656 /* Critical Section End */
657
658 if ((c->mode == 0) || (c->timed_polling == NULL))
659 continue;
660
661 stamp = local_clock();
662 c->timed_polling(stamp, cpu);
663 }
664}
665