blob: 48d9c8291b3b9fbb9e09ed2ffbf2ac356627a290 [file] [log] [blame]
b.liue9582032025-04-17 19:18:16 +08001/*
2 * driver/clocksource/timer-mmp.c
3 *
4 * Support for clocksource and clockevents
5 *
6 * Copyright (C) 2008 Marvell International Ltd.
7 * All rights reserved.
8 *
9 * 2008-04-11: Jason Chagas <Jason.chagas@marvell.com>
10 * 2008-10-08: Bin Yang <bin.yang@marvell.com>
11 *
12 * The timers module actually includes three timers, each timer with up to
13 * three match comparators. Timer #0 is used here in free-running mode as
14 * the clock source, and match comparator #1 used as clock event device.
15 *
16 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License version 2 as
18 * published by the Free Software Foundation.
19 */
20
21#include <linux/init.h>
22#include <linux/kernel.h>
23#include <linux/interrupt.h>
24#include <linux/clockchips.h>
25
26#include <linux/io.h>
27#include <linux/irq.h>
28#include <linux/of.h>
29#include <linux/of_address.h>
30#include <linux/of_irq.h>
31#include <linux/delay.h>
32#include <linux/slab.h>
33#include <linux/cpu.h>
34#include <linux/mmp_timer.h>
35#include <linux/clockchips.h>
36#include <linux/sched_clock.h>
37#include <linux/stat.h>
38#include <linux/cputype.h>
39#include <dt-bindings/clock/timer-mmp.h>
40#include <soc/asr/regs-timers.h>
41#include <soc/asr/addr-map.h>
42
43#define MAX_EVT_NUM 5
44
45#define MAX_DELTA (0xfffffffe)
46#define MIN_DELTA (5)
47
48#define MMP_MAX_COUNTER 3
49#define MMP_MAX_TIMER 4
50
51#define TMR_CER_COUNTER(cid) (1 << (cid))
52#define MMP_ALL_COUNTERS ((1 << MMP_MAX_COUNTER) - 1)
53
54#define MMP_TIMER_CLOCK_32KHZ 32768
55
56#ifdef CONFIG_CPU_ASR1903
57#define SCS_TIMER_CLOCK_32KHZ 32765
58#else
59#define SCS_TIMER_CLOCK_32KHZ 32787
60#endif
61
62#define MMP_TIMER_CLOCK_1KHZ 1000
63
64#define ASR1903_SCS_RTC_VIRT_BASE (APB_VIRT_BASE + 0x0C0000)
65#define SCS_DCS_MODE (0x1c)
66
67struct mmp_timer;
68struct mmp_timer_counter {
69 unsigned int id;
70 unsigned int usage;
71 unsigned int cnt_freq;
72 int cpu;
73 int loop_delay;
74 struct mmp_timer *timer;
75};
76
77struct mmp_timer {
78 unsigned int id;
79 void __iomem *base;
80 unsigned int crsr_off;
81 struct mmp_timer_counter counters[MMP_MAX_COUNTER];
82 unsigned int flag;
83 int loop_delay_fastclk;
84 unsigned int fc_freq;
85 spinlock_t tm_lock;
86};
87
88struct mmp_timer_clkevt {
89 struct mmp_timer_counter *counter;
90 struct clock_event_device ced;
91 struct irqaction irqa;
92 struct notifier_block nb;
93 int freq;
94};
95
96struct mmp_timer_clksrc {
97 struct mmp_timer_counter *counter;
98 struct clocksource cs;
99};
100
101#ifdef CONFIG_ARM
102struct mmp_timer_dclk {
103 struct mmp_timer_counter *counter;
104 struct delay_timer *dt;
105};
106static struct mmp_timer_dclk *dclk;
107#endif
108
109static struct mmp_timer *mmp_timers[MMP_MAX_TIMER];
110static struct mmp_timer_clksrc *clksrc;
111
112#ifdef CONFIG_SMP
113static struct mmp_timer_clkevt *cpu_clkevt[NR_CPUS];
114#endif
115
116static int timer_counter_switch_clock(int tid, int cid, unsigned int freq)
117{
118 struct mmp_timer *tm = mmp_timers[tid];
119 u32 ccr, mask;
120
121 ccr = __raw_readl(tm->base + TMR_CCR);
122
123 if (cid == 0)
124 mask = TMR_CCR_CS_0(3);
125 else if (cid == 1)
126 mask = TMR_CCR_CS_1(3);
127 else
128 mask = TMR_CCR_CS_2(3);
129
130 ccr &= ~mask;
131
132 if (freq == MMP_TIMER_CLOCK_32KHZ || freq == SCS_TIMER_CLOCK_32KHZ) {
133 if (cid == 2)
134 ccr |= TMR_CCR_CS_2(2);
135 else if (cid == 1)
136 ccr |= TMR_CCR_CS_1(1);
137 else if (cid == 0)
138 ccr |= TMR_CCR_CS_0(1);
139 } else if (freq == MMP_TIMER_CLOCK_1KHZ) {
140 if (cid == 2)
141 ccr |= TMR_CCR_CS_2(1);
142 else if (cid == 1)
143 ccr |= TMR_CCR_CS_1(2);
144 } else if (freq == tm->fc_freq) {
145 if (cid == 2)
146 ccr |= TMR_CCR_CS_2(0);
147 else if (cid == 1)
148 ccr |= TMR_CCR_CS_1(0);
149 else if (cid == 0)
150 ccr |= TMR_CCR_CS_0(0);
151 } else {
152 pr_err("Timer %d:%d: invalid clock rate %d\n", tid, cid, freq);
153 return -EINVAL;
154 }
155
156 __raw_writel(ccr, tm->base + TMR_CCR);
157
158 return 0;
159}
160
161static void timer_counter_disable(struct mmp_timer_counter *cnt)
162{
163 struct mmp_timer *tm = cnt->timer;
164 int delay = tm->loop_delay_fastclk;
165 u32 cer;
166
167 /*
168 * Stop the counter will need mutiple timer clock to take effect.
169 * Some operations can only be done when counter is disabled. So
170 * add delay here.
171 */
172 /* Step1: disable counter */
173 cer = __raw_readl(tm->base + TMR_CER);
174 __raw_writel(cer & ~(1 << cnt->id), tm->base + TMR_CER);
175
176 /*
177 * Step2: switch to fast clock, so the delay can be completed
178 * quickly.
179 */
180 if (cnt->cnt_freq != tm->fc_freq)
181 timer_counter_switch_clock(tm->id, cnt->id, tm->fc_freq);
182
183 /*
184 * Step3: Loop for mutiple timer cycles. We do it by clearing
185 * pending interrupt status.
186 */
187 while (delay--)
188 __raw_writel(0x1, tm->base + TMR_ICR(cnt->id));
189
190}
191
192static void timer_counter_enable(struct mmp_timer_counter *cnt)
193{
194 struct mmp_timer *tm = cnt->timer;
195 u32 cer;
196#ifdef CONFIG_CPU_ASR1802S
197 int timeout = 800;
198#endif
199
200 /* Switch to original clock */
201 if (cnt->cnt_freq != tm->fc_freq)
202 timer_counter_switch_clock(tm->id, cnt->id, cnt->cnt_freq);
203
204 /* Enable timer */
205 cer = __raw_readl(tm->base + TMR_CER);
206 __raw_writel(cer | (1 << cnt->id), tm->base + TMR_CER);
207#ifdef CONFIG_CPU_ASR1802S
208 /* wait for timer to become zero */
209 if (cpu_is_asr1802s()) {
210 while(__raw_readl(tm->base + TMR_CR(cnt->id)) != 0)
211 timeout--;
212 }
213#endif
214}
215
216static inline uint32_t timer_read(struct mmp_timer_counter *cnt)
217{
218 struct mmp_timer *tm = cnt->timer;
219 int has_shadow = tm->flag & MMP_TIMER_FLAG_SHADOW;
220 int delay = 3;
221 u32 val1, val2;
222
223 if (has_shadow)
224 return __raw_readl(tm->base + TMR_CR(cnt->id));
225 else {
226 if (cnt->cnt_freq != tm->fc_freq) {
227 /* slow clock */
228 do {
229 val1 = __raw_readl(tm->base + TMR_CR(cnt->id));
230 val2 = __raw_readl(tm->base + TMR_CR(cnt->id));
231 } while (val2 != val1);
232 } else {
233 /* fast clock */
234 __raw_writel(1, tm->base + TMR_CVWR(cnt->id));
235 while (delay--)
236 val1 = __raw_readl(tm->base +
237 TMR_CVWR(cnt->id));
238 }
239 return val1;
240 }
241}
242
243static irqreturn_t timer_interrupt(int irq, void *dev_id)
244{
245 struct clock_event_device *c = dev_id;
246 struct mmp_timer_clkevt *evt;
247 struct mmp_timer_counter *counter;
248 unsigned int cnt;
249 unsigned long flags;
250 void __iomem *base;
251 int has_crsr;
252
253 evt = container_of(c, struct mmp_timer_clkevt, ced);
254 counter = evt->counter;
255 cnt = counter->id;
256 base = counter->timer->base;
257 has_crsr = counter->timer->flag & MMP_TIMER_FLAG_CRSR;
258
259 spin_lock_irqsave(&(counter->timer->tm_lock), flags);
260 /* We only use match #0 for the counter. */
261 if (__raw_readl(base + TMR_SR(cnt)) & 0x1) {
262 if (!has_crsr)
263 timer_counter_disable(counter);
264
265 /* Disable the interrupt. */
266 __raw_writel(0x00, base + TMR_IER(cnt));
267 /* Clear interrupt status */
268 __raw_writel(0x01, base + TMR_ICR(cnt));
269
270 spin_unlock_irqrestore(&(counter->timer->tm_lock), flags);
271
272 c->event_handler(c);
273
274 return IRQ_HANDLED;
275 }
276
277 spin_unlock_irqrestore(&(counter->timer->tm_lock), flags);
278
279 return IRQ_NONE;
280}
281
282static int timer_set_next_event(unsigned long delta,
283 struct clock_event_device *dev)
284{
285 struct mmp_timer_counter *cnt;
286 struct mmp_timer_clkevt *evt;
287 unsigned long flags;
288 unsigned int cid;
289 unsigned int crsr_off;
290 u32 cer, crsr;
291 void __iomem *base;
292 int delay, has_crsr;
293
294 evt = container_of(dev, struct mmp_timer_clkevt, ced);
295 cnt = evt->counter;
296 cid = cnt->id;
297 base = cnt->timer->base;
298 crsr_off = cnt->timer->crsr_off;
299 has_crsr = cnt->timer->flag & MMP_TIMER_FLAG_CRSR;
300
301 spin_lock_irqsave(&(cnt->timer->tm_lock), flags);
302 if (has_crsr) {
303 /*
304 * Use TMR_CRSR to restart the counter and make match
305 * register take effect. This bit should be 0 before
306 * set it again.
307 * The polling loop is defined by loop_delay.
308 */
309 delay = cnt->loop_delay;
310 do {
311 crsr = __raw_readl(base + crsr_off);
312 delay--;
313 } while ((crsr & (1 << cid)) && delay > 0);
314
315 BUG_ON(delay <= 0);
316
317 __raw_writel(delta - 1, base + TMR_TN_MM(cid, 0));
318 /*
319 * After counter is restart, clear the interrupt status for
320 * safe, and re-enable the interrupt for match #0.
321 */
322 __raw_writel(0x01, base + TMR_ICR(cid));
323 __raw_writel(0x01, base + TMR_IER(cid));
324 __raw_writel((1 << cid), base + crsr_off);
325 } else {
326 cer = __raw_readl(base + TMR_CER);
327
328 /* If the timer counter is enabled, first disable it. */
329 if (cer & (1 << cid))
330 timer_counter_disable(cnt);
331
332 /* Setup new counter value */
333 __raw_writel(delta - 1, base + TMR_TN_MM(cid, 0));
334
335 /* enable the matching interrupt */
336 __raw_writel(0x1, base + TMR_IER(cid));
337
338 timer_counter_enable(cnt);
339 }
340 spin_unlock_irqrestore(&(cnt->timer->tm_lock), flags);
341
342 return 0;
343}
344
345static int timer_set_shutdown(struct clock_event_device *dev)
346{
347 unsigned long flags;
348 unsigned int cnt;
349 struct mmp_timer_counter *counter;
350 struct mmp_timer_clkevt *evt;
351 void __iomem *base;
352
353 evt = container_of(dev, struct mmp_timer_clkevt, ced);
354 counter = evt->counter;
355 cnt = counter->id;
356 base = counter->timer->base;
357
358 spin_lock_irqsave(&(counter->timer->tm_lock), flags);
359 timer_counter_disable(counter);
360 spin_unlock_irqrestore(&(counter->timer->tm_lock), flags);
361
362 return 0;
363}
364/*
365static void timer_set_oneshot(struct clock_event_device *dev)
366{
367 unsigned long flags;
368 unsigned int cnt;
369 struct mmp_timer_counter *counter;
370 struct mmp_timer_clkevt *evt;
371 void __iomem *base;
372
373 evt = container_of(dev, struct mmp_timer_clkevt, ced);
374 counter = evt->counter;
375 cnt = counter->id;
376 base = counter->timer->base;
377
378 spin_lock_irqsave(&(counter->timer->tm_lock), flags);
379 timer_counter_enable(counter);
380 spin_unlock_irqrestore(&(counter->timer->tm_lock), flags);
381}
382*/
383static u64 clksrc_read(struct clocksource *cs)
384{
385 return timer_read(clksrc->counter);
386}
387
388static u64 notrace mmp_read_sched_clock(void)
389{
390 return timer_read(clksrc->counter);
391}
392
393#ifdef CONFIG_ARM
394static unsigned long d_read_current_timer(void)
395{
396 return timer_read(dclk->counter);
397}
398
399static struct delay_timer d_timer = {
400 .read_current_timer = d_read_current_timer,
401};
402#endif
403
404static int mmp_timer_cpu_notify(struct notifier_block *self,
405 unsigned long action, void *hcpu)
406{
407#if 0
408 struct mmp_timer_clkevt *evt;
409 struct mmp_timer_counter *cnt;
410
411 evt = container_of(self, struct mmp_timer_clkevt, nb);
412 cnt = evt->counter;
413
414 if (cnt->cpu != (unsigned long)hcpu)
415 return NOTIFY_OK;
416
417 switch (action & ~CPU_TASKS_FROZEN) {
418 case CPU_STARTING:
419 clockevents_config_and_register(&evt->ced,
420 cnt->cnt_freq,
421 MIN_DELTA, MAX_DELTA);
422 break;
423 case CPU_ONLINE:
424 irq_set_affinity(evt->ced.irq, evt->ced.cpumask);
425 enable_irq(evt->ced.irq);
426 break;
427 case CPU_DYING:
428 clockevents_set_mode(&evt->ced,
429 CLOCK_EVT_MODE_SHUTDOWN);
430 disable_irq(evt->ced.irq);
431 break;
432 }
433#endif
434 return NOTIFY_OK;
435}
436
437int __init mmp_timer_init(int tid, void __iomem *base,
438 unsigned int flag, unsigned int fc_freq,
439 unsigned int apb_freq, unsigned int crsr_off)
440{
441 struct mmp_timer *tm = mmp_timers[tid];
442 u32 tmp, delay;
443 int cid;
444
445 if (tm)
446 return -EINVAL;
447
448 tm = kzalloc(sizeof(struct mmp_timer), GFP_KERNEL);
449 if (!tm)
450 return -ENOMEM;
451
452 /*
453 * The calculation formula for the loop cycle is:
454 *
455 * (1) need wait for 2 timer's clock cycle:
456 * 1 2
457 * ------- x 2 = -------
458 * fc_freq fc_freq
459 *
460 * (2) convert to apb clock cycle:
461 * 2 1 apb_freq * 2
462 * ------- / -------- = ----------------
463 * fc_freq apb_freq fc_freq
464 *
465 * (3) every apb register's accessing will take 8 apb clock cycle,
466 * also consider add extral one more time for safe way;
467 * so finally need loop times for the apb register accessing:
468 *
469 * (apb_freq * 2)
470 * ------------------ / 8 + 1
471 * fc_freq
472 */
473 delay = ((apb_freq * 2) / fc_freq / 8) + 1;
474 pr_debug("Timer %d: loop_delay_fastclk is %d\n", tid, delay);
475
476 tm->id = tid;
477 tm->base = base;
478 tm->flag = flag;
479 tm->loop_delay_fastclk = delay;
480 tm->fc_freq = fc_freq;
481 tm->crsr_off = crsr_off;
482 spin_lock_init(&(tm->tm_lock));
483
484 mmp_timers[tid] = tm;
485
486 for (cid = 0; cid < MMP_MAX_COUNTER; cid++) {
487 tm->counters[cid].id = cid;
488 tm->counters[cid].timer = tm;
489
490 /* We will disable all counters. Switch to fastclk first. */
491 timer_counter_switch_clock(tid, cid, fc_freq);
492 }
493
494 /* disalbe all counters */
495 tmp = __raw_readl(base + TMR_CER) & ~MMP_ALL_COUNTERS;
496 __raw_writel(tmp, base + TMR_CER);
497
498 /* disable matching interrupt */
499 __raw_writel(0x00, base + TMR_IER(0));
500 __raw_writel(0x00, base + TMR_IER(1));
501 __raw_writel(0x00, base + TMR_IER(2));
502
503 while (delay--) {
504 /* Clear pending interrupt status */
505 __raw_writel(0x1, base + TMR_ICR(0));
506 __raw_writel(0x1, base + TMR_ICR(1));
507 __raw_writel(0x1, base + TMR_ICR(2));
508 __raw_writel(tmp, base + TMR_CER);
509 }
510
511 return 0;
512}
513
514static int __init mmp_timer_counter_hw_init(struct mmp_timer *tm, int cid,
515 unsigned int freq)
516{
517 u32 tmp, delay;
518 unsigned int ratio;
519 int ret;
520
521 ret = timer_counter_switch_clock(tm->id, cid, freq);
522 if (ret)
523 return ret;
524
525 ratio = tm->fc_freq / tm->counters[cid].cnt_freq;
526 tm->counters[cid].cnt_freq = freq;
527 tm->counters[cid].loop_delay = tm->loop_delay_fastclk * ratio;
528
529 /* set timer to free-running mode */
530 tmp = __raw_readl(tm->base + TMR_CMR) | TMR_CER_COUNTER(cid);
531 __raw_writel(tmp, tm->base + TMR_CMR);
532
533 /*
534 * If CMR[0]==0,
535 * If PLCR==0, timer is still periodic, re-program registers
536 * are written, after re-start, continue count, until match occur,
537 * pre-load the value in PLVR, and continue count.
538 *
539 * If PLCR!=0, timer is still periodic, re-program registers
540 * are written, after re-start, pre-load the value in PLVR, and
541 * continue count, until match occur, pre-load the value in PLVR,
542 * and continue count.
543 *
544 * If CMR[0]==1,
545 * If PLCR==0, timer is free running, re-program registers
546 * are written, after re-start, continue count, until hit 0xFFFFFFFF,
547 * then wrap to 0x0, and continue.
548 *
549 * If PLCR!=0, timer is free running, re-program registers are
550 * written, after re-start, pre-load the value in PLVR, and continue
551 * count, until hit 0xFFFFFFFF, then wrap to 0x0, and continue.
552 */
553 if (tm->counters[cid].usage & MMP_TIMER_COUNTER_CLKEVT) {
554 /* pre-load the value in PLVR */
555 tmp = __raw_readl(tm->base + TMR_PLCR(cid)) | cid;
556 __raw_writel(tmp, tm->base + TMR_PLCR(cid));
557 __raw_writel(0x0, tm->base + TMR_PLVR(cid));
558 } else {
559 /* free-running */
560 __raw_writel(0x0, tm->base + TMR_PLCR(cid));
561 }
562
563 /* clear status */
564 __raw_writel(0x7, tm->base + TMR_ICR(cid));
565
566 /* enable counter */
567 tmp = __raw_readl(tm->base + TMR_CER) | TMR_CER_COUNTER(cid);
568 __raw_writel(tmp, tm->base + TMR_CER);
569
570 delay = tm->counters[cid].loop_delay;
571 while (delay--)
572 __raw_writel(tmp, tm->base + TMR_CER);
573
574 return 0;
575}
576
577int __init mmp_counter_clocksource_init(int tid, int cid, int rating,
578 unsigned int freq)
579{
580 struct mmp_timer *mt = mmp_timers[tid];
581 int ret;
582
583 if (!mt)
584 return -EINVAL;
585
586 if (cid < 0 || cid >= MMP_MAX_COUNTER)
587 return -EINVAL;
588
589 if (clksrc) {
590 pr_err("One clksrc has already been registered!\n");
591 return -EINVAL;
592 }
593 clksrc = kzalloc(sizeof(struct mmp_timer_clksrc), GFP_KERNEL);
594 if (!clksrc)
595 return -ENOMEM;
596
597 mt->counters[cid].usage |= MMP_TIMER_COUNTER_CLKSRC;
598 mt->counters[cid].cnt_freq = freq;
599
600 clksrc->counter = &mt->counters[cid];
601 clksrc->cs.name = "clocksource-mmp";
602 clksrc->cs.rating = rating;
603 clksrc->cs.read = clksrc_read;
604 clksrc->cs.mask = CLOCKSOURCE_MASK(32);
605 clksrc->cs.flags = CLOCK_SOURCE_IS_CONTINUOUS;
606 if (SCS_TIMER_CLOCK_32KHZ == freq || MMP_TIMER_CLOCK_32KHZ == freq)
607 clksrc->cs.flags |= CLOCK_SOURCE_SUSPEND_NONSTOP;
608
609 /* setup_sched_clock(mmp_read_sched_clock, 32, freq); */
610 sched_clock_register(mmp_read_sched_clock, 32, freq);
611 clocksource_register_hz(&(clksrc->cs), freq);
612
613 ret = mmp_timer_counter_hw_init(mt, cid, freq);
614 if (ret)
615 return ret;
616
617 return 0;
618}
619
620int __init mmp_counter_timer_delay_init(int tid, int cid, unsigned int freq)
621{
622#ifdef CONFIG_ARM
623 struct mmp_timer *mt = mmp_timers[tid];
624 int ret;
625
626 if (!mt)
627 return -EINVAL;
628
629 if (cid < 0 || cid >= MMP_MAX_COUNTER)
630 return -EINVAL;
631
632 if (dclk) {
633 pr_err("Delay clock has already been registered!\n");
634 return -EINVAL;
635 }
636 dclk = kzalloc(sizeof(struct mmp_timer_dclk), GFP_KERNEL);
637 if (!dclk)
638 return -ENOMEM;
639
640 mt->counters[cid].usage |= MMP_TIMER_COUNTER_DELAY;
641 mt->counters[cid].cnt_freq = freq;
642
643 dclk->counter = &mt->counters[cid];
644 dclk->dt = &d_timer;
645 d_timer.freq = freq;
646#if !defined(CONFIG_CPU_ASR18XX) && !defined(CONFIG_CPU_ASR1901)
647 register_current_timer_delay(&d_timer);
648#endif
649 ret = mmp_timer_counter_hw_init(mt, cid, freq);
650 if (ret)
651 return ret;
652#endif
653 return 0;
654}
655
656#ifdef CONFIG_SMP
657static int mmp_local_timer_starting_cpu(unsigned int cpu)
658{
659 struct mmp_timer_clkevt *clkevt = cpu_clkevt[cpu];
660
661 BUG_ON(!clkevt);
662 clockevents_config_and_register(&clkevt->ced,
663 clkevt->freq, MIN_DELTA, MAX_DELTA);
664 /*
665 * must use irq_force_affinity for low level cpu hotplug code,
666 * irq_set_affinity can't set irq affinity correctly here
667 */
668 irq_force_affinity(clkevt->ced.irq, cpumask_of(cpu));
669 enable_irq(clkevt->ced.irq);
670
671 return 0;
672}
673
674static int mmp_local_timer_dying_cpu(unsigned int cpu)
675{
676 struct mmp_timer_clkevt *clkevt = cpu_clkevt[cpu];
677
678 BUG_ON(!clkevt);
679
680 clkevt->ced.set_state_shutdown(&clkevt->ced);
681 disable_irq(clkevt->ced.irq);
682 return 0;
683}
684#endif
685
686int __init mmp_counter_clockevent_init(int tid, int cid, int rating, int irq,
687 int freq, int dynirq, unsigned int cpu)
688{
689 struct mmp_timer *mt = mmp_timers[tid];
690 struct mmp_timer_clkevt *clkevt;
691 int broadcast = 0;
692 int ret;
693
694 if (!mt)
695 return -EINVAL;
696
697 if (cid < 0 || cid >= MMP_MAX_COUNTER)
698 return -EINVAL;
699
700 if (cpu == MMP_TIMER_ALL_CPU)
701 broadcast = 1;
702 else if (cpu >= num_possible_cpus())
703 return -EINVAL;
704
705 mt->counters[cid].usage |= MMP_TIMER_COUNTER_CLKEVT;
706 mt->counters[cid].cnt_freq = freq;
707 mt->counters[cid].cpu = cpu;
708
709 clkevt = kzalloc(sizeof(struct mmp_timer_clkevt), GFP_KERNEL);
710 if (!clkevt)
711 return -ENOMEM;
712
713 clkevt->counter = &mt->counters[cid];
714 clkevt->ced.name = "clockevent-mmp";
715 clkevt->ced.features = CLOCK_EVT_FEAT_ONESHOT;
716 clkevt->ced.rating = rating;
717 clkevt->ced.set_next_event = timer_set_next_event;
718 clkevt->ced.set_state_shutdown = timer_set_shutdown;
719 clkevt->ced.set_state_oneshot = timer_set_shutdown;
720 clkevt->ced.tick_resume = timer_set_shutdown;
721
722 clkevt->ced.irq = irq;
723
724 clkevt->irqa.flags = IRQF_TIMER |
725 IRQF_IRQPOLL;
726 clkevt->irqa.handler = timer_interrupt;
727 clkevt->irqa.dev_id = &(clkevt->ced);
728 clkevt->freq = freq;
729 ret = mmp_timer_counter_hw_init(mt, cid, freq);
730 if (ret)
731 return ret;
732
733#ifndef CONFIG_SMP
734 if (broadcast) {
735 clkevt->irqa.name = "broadcast-timer";
736 if (dynirq)
737 clkevt->ced.features |= CLOCK_EVT_FEAT_DYNIRQ;
738 clkevt->ced.cpumask = cpu_possible_mask;
739 setup_irq(clkevt->ced.irq, &(clkevt->irqa));
740 clockevents_config_and_register(&clkevt->ced,
741 freq, MIN_DELTA, MAX_DELTA);
742 } else {
743 clkevt->irqa.name = "local-timer";
744 clkevt->ced.cpumask = cpumask_of(cpu);
745 clkevt->nb.notifier_call = mmp_timer_cpu_notify;
746 clkevt->irqa.flags |= IRQF_PERCPU;
747 //TODO:
748 //register_cpu_notifier(&clkevt->nb);
749 setup_irq(clkevt->ced.irq, &(clkevt->irqa));
750 /* Enable clock event device for boot CPU. */
751 if (cpu == smp_processor_id()) {
752 clockevents_config_and_register(&clkevt->ced,
753 freq, MIN_DELTA, MAX_DELTA);
754 /*
755 * must use irq_force_affinity for low level cpu hotplug code,
756 * irq_set_affinity can't set irq affinity correctly here
757 */
758 irq_force_affinity(clkevt->ced.irq, cpumask_of(cpu));
759 } else {
760 /* disable none boot CPU's irq at first */
761 disable_irq(clkevt->ced.irq);
762 }
763 }
764#else
765 cpu_clkevt[cpu] = clkevt;
766
767 if (broadcast) {
768 clkevt->irqa.name = "broadcast-timer";
769 if (dynirq)
770 clkevt->ced.features |= CLOCK_EVT_FEAT_DYNIRQ;
771 clkevt->ced.cpumask = cpu_possible_mask;
772 setup_irq(clkevt->ced.irq, &(clkevt->irqa));
773 clockevents_config_and_register(&clkevt->ced,
774 freq, MIN_DELTA, MAX_DELTA);
775 } else {
776 clkevt->irqa.name = "local-timer";
777 clkevt->ced.cpumask = cpumask_of(cpu);
778 clkevt->nb.notifier_call = mmp_timer_cpu_notify;
779 clkevt->irqa.flags |= IRQF_PERCPU;
780 setup_irq(clkevt->ced.irq, &(clkevt->irqa));
781 /* disable timer irq at first */
782 disable_irq(clkevt->ced.irq);
783 if (cpu == smp_processor_id()) {
784 /* Install and invoke hotplug callbacks */
785 ret = cpuhp_setup_state(CPUHP_AP_ASR_TIMER_STARTING,
786 "clockevents/asr/timer:starting",
787 mmp_local_timer_starting_cpu,
788 mmp_local_timer_dying_cpu);
789 if (ret < 0)
790 pr_err("%s: cpuhp_setup_state failed: %d\n", __func__, ret);
791 }
792 }
793#endif
794 return 0;
795}
796
797static int __init mmp_of_counter_init(struct device_node *np, int tid)
798{
799 int irq, ret, dynirq;
800 unsigned int cid, usage, freq, cpu, rating;
801
802 if (!np)
803 return -EINVAL;
804
805 ret = of_property_read_u32(np, "marvell,timer-counter-id", &cid);
806 if (ret) {
807 pr_err("Timer %d: fail to get counter id\n", tid);
808 return ret;
809 }
810 ret = of_property_read_u32(np, "marvell,timer-counter-usage", &usage);
811 if (ret) {
812 pr_err("Timer %d:%d: fail to get counter usage\n", tid, cid);
813 return ret;
814 }
815
816 if (usage == MMP_TIMER_COUNTER_NOTUSED) {
817 pr_warn("Timer %d: not to use counter %d\n", tid, cid);
818 return 0;
819 }
820
821 ret = of_property_read_u32(np, "marvell,timer-counter-frequency",
822 &freq);
823 if (ret) {
824 pr_err("Timer %d:%d: fail to get counter frequency\n",
825 tid, cid);
826 return ret;
827 }
828
829 if (usage & MMP_TIMER_COUNTER_DELAY) {
830 ret = mmp_counter_timer_delay_init(tid, cid, freq);
831 if (ret) {
832 pr_err("Timer %d:%d: fail to create delay timer\n",
833 tid, cid);
834 return ret;
835 }
836 }
837
838 if (usage & (MMP_TIMER_COUNTER_CLKSRC | MMP_TIMER_COUNTER_CLKEVT)) {
839#ifdef CONFIG_CPU_ASR1903
840 if (freq <= MMP_TIMER_CLOCK_32KHZ) {
841 if(!readl(ASR1903_SCS_RTC_VIRT_BASE + SCS_DCS_MODE)) {/* scs mode */
842 pr_emerg("scs: %d->%dhz\n", freq, SCS_TIMER_CLOCK_32KHZ);
843 freq = SCS_TIMER_CLOCK_32KHZ;
844 }
845 }
846#endif
847 ret = of_property_read_u32(np,
848 "marvell,timer-counter-rating", &rating);
849 if (ret) {
850 pr_err("Timer %d:%d: fail to get counter rating\n",
851 tid, cid);
852 return ret;
853 }
854
855 if (usage & MMP_TIMER_COUNTER_CLKSRC) {
856 ret = mmp_counter_clocksource_init(tid, cid, rating,
857 freq);
858 if (ret) {
859 pr_err("Timer %d:%d: fail to create clksrc\n",
860 tid, cid);
861 return ret;
862 }
863 }
864 if (usage & MMP_TIMER_COUNTER_CLKEVT) {
865 ret = of_property_read_u32(np,
866 "marvell,timer-counter-cpu", &cpu);
867 if (ret) {
868 pr_err("Timer %d:%d: fail to get cpu\n",
869 tid, cid);
870 return ret;
871 }
872 dynirq = !of_property_read_bool(np,
873 "marvell,timer-counter-nodynirq");
874 irq = irq_of_parse_and_map(np, 0);
875 ret = mmp_counter_clockevent_init(tid, cid, rating,
876 irq, freq, dynirq, cpu);
877 if (ret) {
878 pr_err("Timer %d:%d: fail to create clkevt\n",
879 tid, cid);
880 return ret;
881 }
882 }
883 }
884
885 return 0;
886}
887
888#ifdef CONFIG_OF
889
890static const struct of_device_id mmp_timer_dt_ids[] = {
891 { .compatible = "mrvl,mmp-timer", },
892 {}
893};
894
895static int __init mmp_dt_init_timer(struct device_node *np)
896{
897 int tid;
898 unsigned int flag, fc_freq, apb_freq, crsr_off;
899 void __iomem *base;
900 struct device_node *child_np;
901 int ret = 0;
902
903
904 /* timer initialization */
905 ret = of_property_read_u32(np, "marvell,timer-id", &tid);
906 if (ret) {
907 pr_err("Timer: fail to get timer id with err %d\n", ret);
908 goto out;
909 }
910 if (tid < 0 || tid >= MMP_MAX_TIMER) {
911 pr_err("Timer: id too large or too small\n");
912 ret = -EINVAL;
913 goto out;
914 }
915 base = of_iomap(np, 0);
916 if (!base) {
917 pr_err("Timer: fail to map register space\n");
918 ret = -EINVAL;
919 goto out;
920 }
921 ret = of_property_read_u32(np, "marvell,timer-flag", &flag);
922 if (ret) {
923 pr_err("Timer %d: fail to get flag with err %d\n", tid, ret);
924 goto out;
925 }
926
927 /* timer's fast clock and apb frequency */
928 ret = of_property_read_u32(np, "marvell,timer-fastclk-frequency",
929 &fc_freq);
930 if (ret) {
931 pr_err("Timer %d: fail to get fastclk-frequency with err %d\n",
932 tid, ret);
933 goto out;
934 }
935 ret = of_property_read_u32(np, "marvell,timer-apb-frequency",
936 &apb_freq);
937 if (ret) {
938 pr_err("Timer %d: fail to get apb-frequency with err %d\n",
939 tid, ret);
940 goto out;
941 }
942
943 /*
944 * Need use loop for more safe register's accessing,
945 * so at here dynamically calculate the loop time.
946 */
947 if (!fc_freq || !apb_freq) {
948 pr_err("mmp timer's fast clock or apb freq are incorrect!\n");
949 ret = -EINVAL;
950 goto out;
951 }
952
953 crsr_off = TMR_CRSR;
954 ret = of_property_read_u32(np, "marvell,timer-crsr-offset", &crsr_off);
955 if (ret)
956 pr_warn("Timer %d: fail to get crsr, use default %x\n",
957 tid, crsr_off);
958
959 ret = mmp_timer_init(tid, base, flag, fc_freq, apb_freq, crsr_off);
960 if (ret)
961 goto out;
962
963 /*
964 * If device node is marked as not available,
965 * we then don't try to enable the counter again
966 */
967 if (!of_device_is_available(np)) {
968 pr_warn("Timer %d: is not used\n", tid);
969 return -ENODEV;
970 }
971
972 /* counter initialization */
973 for_each_child_of_node(np, child_np) {
974 ret = mmp_of_counter_init(child_np, tid);
975 if (ret)
976 goto out;
977 }
978
979 return 0;
980out:
981 if (ret)
982 pr_err("Failed to get timer from dtb with error:%d\n", ret);
983 return -ENODEV;
984}
985
986TIMER_OF_DECLARE(mmp_timer, "mrvl,mmp-timer", mmp_dt_init_timer);
987#endif
988#if 0
989extern struct device device_clocksource;
990static ssize_t show_mmp_clocksource_counter(struct device *dev, struct device_attribute *attr,
991 char *buf)
992{
993 u64 counter = 0;
994
995 counter = clksrc_read(&clksrc->cs);
996 return sprintf(buf, "%ld\n", (unsigned long)counter);
997}
998static DEVICE_ATTR(mmp_clocksource_counter, S_IRUGO, show_mmp_clocksource_counter, NULL);
999/* This counter is used by CP for time sync.
1000 * AP side cannot change it without syncing
1001 * with CP and telephony code.
1002 */
1003static int __init mmp_clocksource_counter_init(void)
1004{
1005 if (clksrc)
1006 return device_create_file(&device_clocksource, &dev_attr_mmp_clocksource_counter);
1007 else
1008 return 0;
1009}
1010late_initcall(mmp_clocksource_counter_init);
1011#endif