ASR_BASE

Change-Id: Icf3719cc0afe3eeb3edc7fa80a2eb5199ca9dda1
diff --git a/marvell/linux/drivers/clocksource/timer-mmp.c b/marvell/linux/drivers/clocksource/timer-mmp.c
new file mode 100644
index 0000000..48d9c82
--- /dev/null
+++ b/marvell/linux/drivers/clocksource/timer-mmp.c
@@ -0,0 +1,1011 @@
+/*
+ * driver/clocksource/timer-mmp.c
+ *
+ *   Support for clocksource and clockevents
+ *
+ * Copyright (C) 2008 Marvell International Ltd.
+ * All rights reserved.
+ *
+ *   2008-04-11: Jason Chagas <Jason.chagas@marvell.com>
+ *   2008-10-08: Bin Yang <bin.yang@marvell.com>
+ *
+ * The timers module actually includes three timers, each timer with up to
+ * three match comparators. Timer #0 is used here in free-running mode as
+ * the clock source, and match comparator #1 used as clock event device.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/clockchips.h>
+
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/cpu.h>
+#include <linux/mmp_timer.h>
+#include <linux/clockchips.h>
+#include <linux/sched_clock.h>
+#include <linux/stat.h>
+#include <linux/cputype.h>
+#include <dt-bindings/clock/timer-mmp.h>
+#include <soc/asr/regs-timers.h>
+#include <soc/asr/addr-map.h>
+
+#define MAX_EVT_NUM		5
+
+#define MAX_DELTA		(0xfffffffe)
+#define MIN_DELTA		(5)
+
+#define MMP_MAX_COUNTER		3
+#define MMP_MAX_TIMER		4
+
+#define TMR_CER_COUNTER(cid)	(1 << (cid))
+#define MMP_ALL_COUNTERS	((1 << MMP_MAX_COUNTER) - 1)
+
+#define MMP_TIMER_CLOCK_32KHZ	32768
+
+#ifdef CONFIG_CPU_ASR1903
+#define SCS_TIMER_CLOCK_32KHZ	32765
+#else
+#define SCS_TIMER_CLOCK_32KHZ	32787
+#endif
+
+#define MMP_TIMER_CLOCK_1KHZ	1000
+
+#define ASR1903_SCS_RTC_VIRT_BASE	(APB_VIRT_BASE + 0x0C0000)
+#define SCS_DCS_MODE			(0x1c)
+
+struct mmp_timer;
+struct mmp_timer_counter {
+	unsigned int id;
+	unsigned int usage;
+	unsigned int cnt_freq;
+	int cpu;
+	int loop_delay;
+	struct mmp_timer *timer;
+};
+
+struct mmp_timer {
+	unsigned int id;
+	void __iomem *base;
+	unsigned int crsr_off;
+	struct mmp_timer_counter counters[MMP_MAX_COUNTER];
+	unsigned int flag;
+	int loop_delay_fastclk;
+	unsigned int fc_freq;
+	spinlock_t tm_lock;
+};
+
+struct mmp_timer_clkevt {
+	struct mmp_timer_counter *counter;
+	struct clock_event_device ced;
+	struct irqaction irqa;
+	struct notifier_block nb;
+	int freq;
+};
+
+struct mmp_timer_clksrc {
+	struct mmp_timer_counter *counter;
+	struct clocksource cs;
+};
+
+#ifdef CONFIG_ARM
+struct mmp_timer_dclk {
+	struct mmp_timer_counter *counter;
+	struct delay_timer *dt;
+};
+static struct mmp_timer_dclk *dclk;
+#endif
+
+static struct mmp_timer *mmp_timers[MMP_MAX_TIMER];
+static struct mmp_timer_clksrc *clksrc;
+
+#ifdef CONFIG_SMP
+static struct mmp_timer_clkevt *cpu_clkevt[NR_CPUS];
+#endif
+
+static int timer_counter_switch_clock(int tid, int cid, unsigned int freq)
+{
+	struct mmp_timer *tm = mmp_timers[tid];
+	u32 ccr, mask;
+
+	ccr = __raw_readl(tm->base + TMR_CCR);
+
+	if (cid == 0)
+		mask = TMR_CCR_CS_0(3);
+	else if (cid == 1)
+		mask = TMR_CCR_CS_1(3);
+	else
+		mask = TMR_CCR_CS_2(3);
+
+	ccr &= ~mask;
+
+	if (freq == MMP_TIMER_CLOCK_32KHZ || freq == SCS_TIMER_CLOCK_32KHZ) {
+		if (cid == 2)
+			ccr |= TMR_CCR_CS_2(2);
+		else if (cid == 1)
+			ccr |= TMR_CCR_CS_1(1);
+		else if (cid == 0)
+			ccr |= TMR_CCR_CS_0(1);
+	} else if (freq == MMP_TIMER_CLOCK_1KHZ) {
+		if (cid == 2)
+			ccr |= TMR_CCR_CS_2(1);
+		else if (cid == 1)
+			ccr |= TMR_CCR_CS_1(2);
+	} else if (freq == tm->fc_freq) {
+		if (cid == 2)
+			ccr |= TMR_CCR_CS_2(0);
+		else if (cid == 1)
+			ccr |= TMR_CCR_CS_1(0);
+		else if (cid == 0)
+			ccr |= TMR_CCR_CS_0(0);
+	} else {
+		pr_err("Timer %d:%d: invalid clock rate %d\n", tid, cid, freq);
+		return -EINVAL;
+	}
+
+	__raw_writel(ccr, tm->base + TMR_CCR);
+
+	return 0;
+}
+
+static void timer_counter_disable(struct mmp_timer_counter *cnt)
+{
+	struct mmp_timer *tm = cnt->timer;
+	int delay = tm->loop_delay_fastclk;
+	u32 cer;
+
+	/*
+	 * Stop the counter will need mutiple timer clock to take effect.
+	 * Some operations can only be done when counter is disabled. So
+	 * add delay here.
+	 */
+	/* Step1: disable counter */
+	cer = __raw_readl(tm->base + TMR_CER);
+	__raw_writel(cer & ~(1 << cnt->id), tm->base + TMR_CER);
+
+	/*
+	 * Step2: switch to fast clock, so the delay can be completed
+	 * quickly.
+	 */
+	if (cnt->cnt_freq != tm->fc_freq)
+		timer_counter_switch_clock(tm->id, cnt->id, tm->fc_freq);
+
+	/*
+	 * Step3: Loop for mutiple timer cycles. We do it by clearing
+	 * pending interrupt status.
+	 */
+	while (delay--)
+		__raw_writel(0x1, tm->base + TMR_ICR(cnt->id));
+
+}
+
+static void timer_counter_enable(struct mmp_timer_counter *cnt)
+{
+	struct mmp_timer *tm = cnt->timer;
+	u32 cer;
+#ifdef CONFIG_CPU_ASR1802S
+	int timeout = 800;
+#endif
+
+	/* Switch to original clock */
+	if (cnt->cnt_freq != tm->fc_freq)
+		timer_counter_switch_clock(tm->id, cnt->id, cnt->cnt_freq);
+
+	/* Enable timer */
+	cer = __raw_readl(tm->base + TMR_CER);
+	__raw_writel(cer | (1 << cnt->id), tm->base + TMR_CER);
+#ifdef CONFIG_CPU_ASR1802S
+	/* wait for timer to become zero */
+	if (cpu_is_asr1802s()) {
+		while(__raw_readl(tm->base + TMR_CR(cnt->id)) != 0)
+			timeout--;
+	}
+#endif
+}
+
+static inline uint32_t timer_read(struct mmp_timer_counter *cnt)
+{
+	struct mmp_timer *tm = cnt->timer;
+	int has_shadow = tm->flag & MMP_TIMER_FLAG_SHADOW;
+	int delay = 3;
+	u32 val1, val2;
+
+	if (has_shadow)
+		return __raw_readl(tm->base + TMR_CR(cnt->id));
+	else {
+		if (cnt->cnt_freq != tm->fc_freq) {
+			/* slow clock */
+			do {
+				val1 = __raw_readl(tm->base + TMR_CR(cnt->id));
+				val2 = __raw_readl(tm->base + TMR_CR(cnt->id));
+			} while (val2 != val1);
+		} else {
+			/* fast clock */
+			__raw_writel(1, tm->base + TMR_CVWR(cnt->id));
+			while (delay--)
+				val1 = __raw_readl(tm->base +
+						TMR_CVWR(cnt->id));
+		}
+		return val1;
+	}
+}
+
+static irqreturn_t timer_interrupt(int irq, void *dev_id)
+{
+	struct clock_event_device *c = dev_id;
+	struct mmp_timer_clkevt *evt;
+	struct mmp_timer_counter *counter;
+	unsigned int cnt;
+	unsigned long flags;
+	void __iomem *base;
+	int has_crsr;
+
+	evt = container_of(c, struct mmp_timer_clkevt, ced);
+	counter = evt->counter;
+	cnt = counter->id;
+	base = counter->timer->base;
+	has_crsr = counter->timer->flag & MMP_TIMER_FLAG_CRSR;
+
+	spin_lock_irqsave(&(counter->timer->tm_lock), flags);
+	/* We only use match #0 for the counter. */
+	if (__raw_readl(base + TMR_SR(cnt)) & 0x1) {
+		if (!has_crsr)
+			timer_counter_disable(counter);
+
+		/* Disable the interrupt. */
+		__raw_writel(0x00, base + TMR_IER(cnt));
+		/* Clear interrupt status */
+		__raw_writel(0x01, base + TMR_ICR(cnt));
+
+		spin_unlock_irqrestore(&(counter->timer->tm_lock), flags);
+
+		c->event_handler(c);
+
+		return IRQ_HANDLED;
+	}
+
+	spin_unlock_irqrestore(&(counter->timer->tm_lock), flags);
+
+	return IRQ_NONE;
+}
+
+static int timer_set_next_event(unsigned long delta,
+				struct clock_event_device *dev)
+{
+	struct mmp_timer_counter *cnt;
+	struct mmp_timer_clkevt *evt;
+	unsigned long flags;
+	unsigned int cid;
+	unsigned int crsr_off;
+	u32 cer, crsr;
+	void __iomem *base;
+	int delay, has_crsr;
+
+	evt = container_of(dev, struct mmp_timer_clkevt, ced);
+	cnt = evt->counter;
+	cid = cnt->id;
+	base = cnt->timer->base;
+	crsr_off = cnt->timer->crsr_off;
+	has_crsr = cnt->timer->flag & MMP_TIMER_FLAG_CRSR;
+
+	spin_lock_irqsave(&(cnt->timer->tm_lock), flags);
+	if (has_crsr) {
+		/*
+		 * Use TMR_CRSR to restart the counter and make match
+		 * register take effect. This bit should be 0 before
+		 * set it again.
+		 * The polling loop is defined by loop_delay.
+		 */
+		delay = cnt->loop_delay;
+		do {
+			crsr = __raw_readl(base + crsr_off);
+			delay--;
+		} while ((crsr & (1 << cid)) && delay > 0);
+
+		BUG_ON(delay <= 0);
+
+		__raw_writel(delta - 1, base + TMR_TN_MM(cid, 0));
+		/*
+		 * After counter is restart, clear the interrupt status for
+		 * safe, and re-enable the interrupt for match #0.
+		 */
+		__raw_writel(0x01, base + TMR_ICR(cid));
+		__raw_writel(0x01, base + TMR_IER(cid));
+		__raw_writel((1 << cid), base + crsr_off);
+	} else {
+		cer = __raw_readl(base + TMR_CER);
+
+		/* If the timer counter is enabled, first disable it. */
+		if (cer & (1 << cid))
+			timer_counter_disable(cnt);
+
+		/* Setup new counter value */
+		__raw_writel(delta - 1, base + TMR_TN_MM(cid, 0));
+
+		/* enable the matching interrupt */
+		__raw_writel(0x1, base + TMR_IER(cid));
+
+		timer_counter_enable(cnt);
+	}
+	spin_unlock_irqrestore(&(cnt->timer->tm_lock), flags);
+
+	return 0;
+}
+
+static int timer_set_shutdown(struct clock_event_device *dev)
+{
+	unsigned long flags;
+	unsigned int cnt;
+	struct mmp_timer_counter *counter;
+	struct mmp_timer_clkevt *evt;
+	void __iomem *base;
+
+	evt = container_of(dev, struct mmp_timer_clkevt, ced);
+	counter = evt->counter;
+	cnt = counter->id;
+	base = counter->timer->base;
+
+	spin_lock_irqsave(&(counter->timer->tm_lock), flags);
+	timer_counter_disable(counter);
+	spin_unlock_irqrestore(&(counter->timer->tm_lock), flags);
+
+	return 0;
+}
+/*
+static void timer_set_oneshot(struct clock_event_device *dev)
+{
+	unsigned long flags;
+	unsigned int cnt;
+	struct mmp_timer_counter *counter;
+	struct mmp_timer_clkevt *evt;
+	void __iomem *base;
+
+	evt = container_of(dev, struct mmp_timer_clkevt, ced);
+	counter = evt->counter;
+	cnt = counter->id;
+	base = counter->timer->base;
+
+	spin_lock_irqsave(&(counter->timer->tm_lock), flags);
+	timer_counter_enable(counter);
+	spin_unlock_irqrestore(&(counter->timer->tm_lock), flags);
+}
+*/
+static u64 clksrc_read(struct clocksource *cs)
+{
+	return timer_read(clksrc->counter);
+}
+
+static u64 notrace mmp_read_sched_clock(void)
+{
+	return timer_read(clksrc->counter);
+}
+
+#ifdef CONFIG_ARM
+static unsigned long d_read_current_timer(void)
+{
+	return timer_read(dclk->counter);
+}
+
+static struct delay_timer d_timer = {
+	.read_current_timer	= d_read_current_timer,
+};
+#endif
+
+static int mmp_timer_cpu_notify(struct notifier_block *self,
+					   unsigned long action, void *hcpu)
+{
+#if 0
+	struct mmp_timer_clkevt *evt;
+	struct mmp_timer_counter *cnt;
+
+	evt = container_of(self, struct mmp_timer_clkevt, nb);
+	cnt = evt->counter;
+
+	if (cnt->cpu != (unsigned long)hcpu)
+		return NOTIFY_OK;
+
+	switch (action & ~CPU_TASKS_FROZEN) {
+	case CPU_STARTING:
+		clockevents_config_and_register(&evt->ced,
+						cnt->cnt_freq,
+						MIN_DELTA, MAX_DELTA);
+		break;
+	case CPU_ONLINE:
+		irq_set_affinity(evt->ced.irq, evt->ced.cpumask);
+		enable_irq(evt->ced.irq);
+		break;
+	case CPU_DYING:
+		clockevents_set_mode(&evt->ced,
+					CLOCK_EVT_MODE_SHUTDOWN);
+		disable_irq(evt->ced.irq);
+		break;
+	}
+#endif
+	return NOTIFY_OK;
+}
+
+int __init mmp_timer_init(int tid, void __iomem *base,
+			unsigned int flag, unsigned int fc_freq,
+			unsigned int apb_freq, unsigned int crsr_off)
+{
+	struct mmp_timer *tm = mmp_timers[tid];
+	u32 tmp, delay;
+	int cid;
+
+	if (tm)
+		return -EINVAL;
+
+	tm = kzalloc(sizeof(struct mmp_timer), GFP_KERNEL);
+	if (!tm)
+		return -ENOMEM;
+
+	/*
+	 * The calculation formula for the loop cycle is:
+	 *
+	 * (1) need wait for 2 timer's clock cycle:
+	 *        1             2
+	 *     ------- x 2 = -------
+	 *     fc_freq       fc_freq
+	 *
+	 * (2) convert to apb clock cycle:
+	 *        2          1        apb_freq * 2
+	 *     ------- / -------- = ----------------
+	 *     fc_freq   apb_freq       fc_freq
+	 *
+	 * (3) every apb register's accessing will take 8 apb clock cycle,
+	 *     also consider add extral one more time for safe way;
+	 *     so finally need loop times for the apb register accessing:
+	 *
+	 *       (apb_freq * 2)
+	 *     ------------------ / 8 + 1
+	 *          fc_freq
+	 */
+	delay = ((apb_freq * 2) / fc_freq / 8) + 1;
+	pr_debug("Timer %d: loop_delay_fastclk is %d\n", tid, delay);
+
+	tm->id = tid;
+	tm->base = base;
+	tm->flag = flag;
+	tm->loop_delay_fastclk = delay;
+	tm->fc_freq = fc_freq;
+	tm->crsr_off = crsr_off;
+	spin_lock_init(&(tm->tm_lock));
+
+	mmp_timers[tid] = tm;
+
+	for (cid = 0; cid < MMP_MAX_COUNTER; cid++) {
+		tm->counters[cid].id = cid;
+		tm->counters[cid].timer = tm;
+
+		/* We will disable all counters. Switch to fastclk first. */
+		timer_counter_switch_clock(tid, cid, fc_freq);
+	}
+
+	/* disalbe all counters */
+	tmp = __raw_readl(base + TMR_CER) & ~MMP_ALL_COUNTERS;
+	__raw_writel(tmp, base + TMR_CER);
+
+	/* disable matching interrupt */
+	__raw_writel(0x00, base + TMR_IER(0));
+	__raw_writel(0x00, base + TMR_IER(1));
+	__raw_writel(0x00, base + TMR_IER(2));
+
+	while (delay--) {
+		/* Clear pending interrupt status */
+		__raw_writel(0x1, base + TMR_ICR(0));
+		__raw_writel(0x1, base + TMR_ICR(1));
+		__raw_writel(0x1, base + TMR_ICR(2));
+		__raw_writel(tmp, base + TMR_CER);
+	}
+
+	return 0;
+}
+
+static int __init mmp_timer_counter_hw_init(struct mmp_timer *tm, int cid,
+					unsigned int freq)
+{
+	u32 tmp, delay;
+	unsigned int ratio;
+	int ret;
+
+	ret = timer_counter_switch_clock(tm->id, cid, freq);
+	if (ret)
+		return ret;
+
+	ratio = tm->fc_freq / tm->counters[cid].cnt_freq;
+	tm->counters[cid].cnt_freq = freq;
+	tm->counters[cid].loop_delay = tm->loop_delay_fastclk * ratio;
+
+	/* set timer to free-running mode */
+	tmp = __raw_readl(tm->base + TMR_CMR) | TMR_CER_COUNTER(cid);
+	__raw_writel(tmp, tm->base + TMR_CMR);
+
+	/*
+	 *  If CMR[0]==0,
+	 *    If PLCR==0, timer is still periodic, re-program registers
+	 *    are written, after re-start, continue count, until match occur,
+	 *    pre-load the value in PLVR, and continue count.
+	 *
+	 *    If PLCR!=0, timer is still periodic, re-program registers
+	 *    are written, after re-start, pre-load the value in PLVR, and
+	 *    continue count, until match occur, pre-load the value in PLVR,
+	 *    and continue count.
+	 *
+	 *  If CMR[0]==1,
+	 *    If PLCR==0, timer is free running, re-program registers
+	 *    are written, after re-start, continue count, until hit 0xFFFFFFFF,
+	 *    then wrap to 0x0, and continue.
+	 *
+	 *    If PLCR!=0, timer is free running, re-program registers are
+	 *    written, after re-start, pre-load the value in PLVR, and continue
+	 *    count, until hit 0xFFFFFFFF, then wrap to 0x0, and continue.
+	 */
+	if (tm->counters[cid].usage & MMP_TIMER_COUNTER_CLKEVT) {
+		/* pre-load the value in PLVR */
+		tmp = __raw_readl(tm->base + TMR_PLCR(cid)) | cid;
+		__raw_writel(tmp, tm->base + TMR_PLCR(cid));
+		__raw_writel(0x0, tm->base + TMR_PLVR(cid));
+	} else {
+		/* free-running */
+		__raw_writel(0x0, tm->base + TMR_PLCR(cid));
+	}
+
+	/* clear status */
+	__raw_writel(0x7, tm->base + TMR_ICR(cid));
+
+	/* enable counter */
+	tmp = __raw_readl(tm->base + TMR_CER) | TMR_CER_COUNTER(cid);
+	__raw_writel(tmp, tm->base + TMR_CER);
+
+	delay = tm->counters[cid].loop_delay;
+	while (delay--)
+		__raw_writel(tmp, tm->base + TMR_CER);
+
+	return 0;
+}
+
+int __init mmp_counter_clocksource_init(int tid, int cid, int rating,
+		unsigned int freq)
+{
+	struct mmp_timer *mt = mmp_timers[tid];
+	int ret;
+
+	if (!mt)
+		return -EINVAL;
+
+	if (cid < 0 || cid >= MMP_MAX_COUNTER)
+		return -EINVAL;
+
+	if (clksrc) {
+		pr_err("One clksrc has already been registered!\n");
+		return -EINVAL;
+	}
+	clksrc = kzalloc(sizeof(struct mmp_timer_clksrc), GFP_KERNEL);
+	if (!clksrc)
+		return -ENOMEM;
+
+	mt->counters[cid].usage |= MMP_TIMER_COUNTER_CLKSRC;
+	mt->counters[cid].cnt_freq = freq;
+
+	clksrc->counter = &mt->counters[cid];
+	clksrc->cs.name = "clocksource-mmp";
+	clksrc->cs.rating = rating;
+	clksrc->cs.read = clksrc_read;
+	clksrc->cs.mask = CLOCKSOURCE_MASK(32);
+	clksrc->cs.flags = CLOCK_SOURCE_IS_CONTINUOUS;
+	if (SCS_TIMER_CLOCK_32KHZ == freq || MMP_TIMER_CLOCK_32KHZ == freq)
+		clksrc->cs.flags |= CLOCK_SOURCE_SUSPEND_NONSTOP;
+
+	/* setup_sched_clock(mmp_read_sched_clock, 32, freq); */
+	sched_clock_register(mmp_read_sched_clock, 32, freq);
+	clocksource_register_hz(&(clksrc->cs), freq);
+
+	ret = mmp_timer_counter_hw_init(mt, cid, freq);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+int __init mmp_counter_timer_delay_init(int tid, int cid, unsigned int freq)
+{
+#ifdef CONFIG_ARM
+	struct mmp_timer *mt = mmp_timers[tid];
+	int ret;
+
+	if (!mt)
+		return -EINVAL;
+
+	if (cid < 0 || cid >= MMP_MAX_COUNTER)
+		return -EINVAL;
+
+	if (dclk) {
+		pr_err("Delay clock has already been registered!\n");
+		return -EINVAL;
+	}
+	dclk = kzalloc(sizeof(struct mmp_timer_dclk), GFP_KERNEL);
+	if (!dclk)
+		return -ENOMEM;
+
+	mt->counters[cid].usage |= MMP_TIMER_COUNTER_DELAY;
+	mt->counters[cid].cnt_freq = freq;
+
+	dclk->counter = &mt->counters[cid];
+	dclk->dt = &d_timer;
+	d_timer.freq = freq;
+#if !defined(CONFIG_CPU_ASR18XX) && !defined(CONFIG_CPU_ASR1901)
+	register_current_timer_delay(&d_timer);
+#endif
+	ret = mmp_timer_counter_hw_init(mt, cid, freq);
+	if (ret)
+		return ret;
+#endif
+	return 0;
+}
+
+#ifdef CONFIG_SMP
+static int mmp_local_timer_starting_cpu(unsigned int cpu)
+{
+	struct mmp_timer_clkevt *clkevt = cpu_clkevt[cpu];
+
+	BUG_ON(!clkevt);
+	clockevents_config_and_register(&clkevt->ced,
+			clkevt->freq, MIN_DELTA, MAX_DELTA);
+	/*
+	* must use irq_force_affinity for low level cpu hotplug code,
+	* irq_set_affinity can't set irq affinity correctly here
+	*/
+	irq_force_affinity(clkevt->ced.irq, cpumask_of(cpu));
+	enable_irq(clkevt->ced.irq);
+
+	return 0;
+}
+
+static int mmp_local_timer_dying_cpu(unsigned int cpu)
+{
+	struct mmp_timer_clkevt *clkevt = cpu_clkevt[cpu];
+
+	BUG_ON(!clkevt);
+
+	clkevt->ced.set_state_shutdown(&clkevt->ced);
+	disable_irq(clkevt->ced.irq);
+	return 0;
+}
+#endif
+
+int __init mmp_counter_clockevent_init(int tid, int cid, int rating, int irq,
+		int freq, int dynirq, unsigned int cpu)
+{
+	struct mmp_timer *mt = mmp_timers[tid];
+	struct mmp_timer_clkevt *clkevt;
+	int broadcast = 0;
+	int ret;
+
+	if (!mt)
+		return -EINVAL;
+
+	if (cid < 0 || cid >= MMP_MAX_COUNTER)
+		return -EINVAL;
+
+	if (cpu == MMP_TIMER_ALL_CPU)
+		broadcast = 1;
+	else if (cpu >= num_possible_cpus())
+		return -EINVAL;
+
+	mt->counters[cid].usage |= MMP_TIMER_COUNTER_CLKEVT;
+	mt->counters[cid].cnt_freq = freq;
+	mt->counters[cid].cpu = cpu;
+
+	clkevt = kzalloc(sizeof(struct mmp_timer_clkevt), GFP_KERNEL);
+	if (!clkevt)
+		return -ENOMEM;
+
+	clkevt->counter = &mt->counters[cid];
+	clkevt->ced.name = "clockevent-mmp";
+	clkevt->ced.features = CLOCK_EVT_FEAT_ONESHOT;
+	clkevt->ced.rating = rating;
+	clkevt->ced.set_next_event = timer_set_next_event;
+	clkevt->ced.set_state_shutdown = timer_set_shutdown;
+	clkevt->ced.set_state_oneshot = timer_set_shutdown;
+	clkevt->ced.tick_resume = timer_set_shutdown;
+
+	clkevt->ced.irq = irq;
+
+	clkevt->irqa.flags = IRQF_TIMER |
+		IRQF_IRQPOLL;
+	clkevt->irqa.handler = timer_interrupt;
+	clkevt->irqa.dev_id = &(clkevt->ced);
+	clkevt->freq = freq;
+	ret = mmp_timer_counter_hw_init(mt, cid, freq);
+	if (ret)
+		return ret;
+
+#ifndef CONFIG_SMP
+	if (broadcast) {
+		clkevt->irqa.name = "broadcast-timer";
+		if (dynirq)
+			clkevt->ced.features |= CLOCK_EVT_FEAT_DYNIRQ;
+		clkevt->ced.cpumask = cpu_possible_mask;
+		setup_irq(clkevt->ced.irq, &(clkevt->irqa));
+		clockevents_config_and_register(&clkevt->ced,
+				freq, MIN_DELTA, MAX_DELTA);
+	} else {
+		clkevt->irqa.name = "local-timer";
+		clkevt->ced.cpumask = cpumask_of(cpu);
+		clkevt->nb.notifier_call = mmp_timer_cpu_notify;
+		clkevt->irqa.flags |= IRQF_PERCPU;
+		//TODO:
+		//register_cpu_notifier(&clkevt->nb);
+		setup_irq(clkevt->ced.irq, &(clkevt->irqa));
+		/* Enable clock event device for boot CPU. */
+		if (cpu == smp_processor_id()) {
+			clockevents_config_and_register(&clkevt->ced,
+					freq, MIN_DELTA, MAX_DELTA);
+			/* 
+			* must use irq_force_affinity for low level cpu hotplug code,
+			* irq_set_affinity can't set irq affinity correctly here
+			*/
+			irq_force_affinity(clkevt->ced.irq, cpumask_of(cpu));
+		} else {
+			/* disable none boot CPU's irq at first */
+			disable_irq(clkevt->ced.irq);
+		}
+	}
+#else
+	cpu_clkevt[cpu] = clkevt;
+
+	if (broadcast) {
+		clkevt->irqa.name = "broadcast-timer";
+		if (dynirq)
+			clkevt->ced.features |= CLOCK_EVT_FEAT_DYNIRQ;
+		clkevt->ced.cpumask = cpu_possible_mask;
+		setup_irq(clkevt->ced.irq, &(clkevt->irqa));
+		clockevents_config_and_register(&clkevt->ced,
+				freq, MIN_DELTA, MAX_DELTA);
+	} else {
+		clkevt->irqa.name = "local-timer";
+		clkevt->ced.cpumask = cpumask_of(cpu);
+		clkevt->nb.notifier_call = mmp_timer_cpu_notify;
+		clkevt->irqa.flags |= IRQF_PERCPU;
+		setup_irq(clkevt->ced.irq, &(clkevt->irqa));
+		/* disable timer irq at first */
+		disable_irq(clkevt->ced.irq);
+		if (cpu == smp_processor_id()) {
+			/* Install and invoke hotplug callbacks */
+			ret = cpuhp_setup_state(CPUHP_AP_ASR_TIMER_STARTING,
+						"clockevents/asr/timer:starting",
+						mmp_local_timer_starting_cpu,
+						mmp_local_timer_dying_cpu);
+			if (ret < 0)
+				pr_err("%s: cpuhp_setup_state failed: %d\n", __func__, ret);
+		}
+	}
+#endif
+	return 0;
+}
+
+static int __init mmp_of_counter_init(struct device_node *np, int tid)
+{
+	int irq, ret, dynirq;
+	unsigned int cid, usage, freq, cpu, rating;
+
+	if (!np)
+		return -EINVAL;
+
+	ret = of_property_read_u32(np, "marvell,timer-counter-id", &cid);
+	if (ret) {
+		pr_err("Timer %d: fail to get counter id\n", tid);
+		return ret;
+	}
+	ret = of_property_read_u32(np, "marvell,timer-counter-usage", &usage);
+	if (ret) {
+		pr_err("Timer %d:%d: fail to get counter usage\n", tid, cid);
+		return ret;
+	}
+
+	if (usage == MMP_TIMER_COUNTER_NOTUSED) {
+		pr_warn("Timer %d: not to use counter %d\n", tid, cid);
+		return 0;
+	}
+
+	ret = of_property_read_u32(np, "marvell,timer-counter-frequency",
+			&freq);
+	if (ret) {
+		pr_err("Timer %d:%d: fail to get counter frequency\n",
+				tid, cid);
+		return ret;
+	}
+
+	if (usage & MMP_TIMER_COUNTER_DELAY) {
+		ret = mmp_counter_timer_delay_init(tid, cid, freq);
+		if (ret) {
+			pr_err("Timer %d:%d: fail to create delay timer\n",
+					tid, cid);
+			return ret;
+		}
+	}
+
+	if (usage & (MMP_TIMER_COUNTER_CLKSRC | MMP_TIMER_COUNTER_CLKEVT)) {
+#ifdef CONFIG_CPU_ASR1903
+		if (freq <= MMP_TIMER_CLOCK_32KHZ) {
+			if(!readl(ASR1903_SCS_RTC_VIRT_BASE + SCS_DCS_MODE)) {/* scs mode */
+				pr_emerg("scs: %d->%dhz\n", freq, SCS_TIMER_CLOCK_32KHZ);
+				freq = SCS_TIMER_CLOCK_32KHZ;
+			}
+		}
+#endif
+		ret = of_property_read_u32(np,
+				"marvell,timer-counter-rating", &rating);
+		if (ret) {
+			pr_err("Timer %d:%d: fail to get counter rating\n",
+					tid, cid);
+			return ret;
+		}
+
+		if (usage & MMP_TIMER_COUNTER_CLKSRC) {
+			ret = mmp_counter_clocksource_init(tid, cid, rating,
+					freq);
+			if (ret) {
+				pr_err("Timer %d:%d: fail to create clksrc\n",
+						tid, cid);
+				return ret;
+			}
+		}
+		if (usage & MMP_TIMER_COUNTER_CLKEVT) {
+			ret = of_property_read_u32(np,
+					"marvell,timer-counter-cpu", &cpu);
+			if (ret) {
+				pr_err("Timer %d:%d: fail to get cpu\n",
+						tid, cid);
+				return ret;
+			}
+			dynirq = !of_property_read_bool(np,
+					"marvell,timer-counter-nodynirq");
+			irq = irq_of_parse_and_map(np, 0);
+			ret = mmp_counter_clockevent_init(tid, cid, rating,
+					irq, freq, dynirq, cpu);
+			if (ret) {
+				pr_err("Timer %d:%d: fail to create clkevt\n",
+						tid, cid);
+				return ret;
+			}
+		}
+	}
+
+	return 0;
+}
+
+#ifdef CONFIG_OF
+
+static const struct of_device_id mmp_timer_dt_ids[] = {
+        { .compatible = "mrvl,mmp-timer", },
+        {}
+};
+
+static int __init mmp_dt_init_timer(struct device_node *np)
+{
+	int tid;
+	unsigned int flag, fc_freq, apb_freq, crsr_off;
+	void __iomem *base;
+	struct device_node *child_np;
+	int ret = 0;
+
+
+	/* timer initialization */
+	ret = of_property_read_u32(np, "marvell,timer-id", &tid);
+	if (ret) {
+		pr_err("Timer: fail to get timer id with err %d\n", ret);
+		goto out;
+	}
+	if (tid < 0 || tid >= MMP_MAX_TIMER) {
+		pr_err("Timer: id too large or too small\n");
+		ret = -EINVAL;
+		goto out;
+	}
+	base = of_iomap(np, 0);
+	if (!base) {
+		pr_err("Timer: fail to map register space\n");
+		ret = -EINVAL;
+		goto out;
+	}
+	ret = of_property_read_u32(np, "marvell,timer-flag", &flag);
+	if (ret) {
+		pr_err("Timer %d: fail to get flag with err %d\n", tid, ret);
+		goto out;
+	}
+
+	/* timer's fast clock and apb frequency */
+	ret = of_property_read_u32(np, "marvell,timer-fastclk-frequency",
+			&fc_freq);
+	if (ret) {
+		pr_err("Timer %d: fail to get fastclk-frequency with err %d\n",
+				tid, ret);
+		goto out;
+	}
+	ret = of_property_read_u32(np, "marvell,timer-apb-frequency",
+			&apb_freq);
+	if (ret) {
+		pr_err("Timer %d: fail to get apb-frequency with err %d\n",
+				tid, ret);
+		goto out;
+	}
+
+	/*
+	 * Need use loop for more safe register's accessing,
+	 * so at here dynamically calculate the loop time.
+	 */
+	if (!fc_freq || !apb_freq) {
+		pr_err("mmp timer's fast clock or apb freq are incorrect!\n");
+		ret = -EINVAL;
+		goto out;
+	}
+
+	crsr_off = TMR_CRSR;
+	ret = of_property_read_u32(np, "marvell,timer-crsr-offset", &crsr_off);
+	if (ret)
+		pr_warn("Timer %d: fail to get crsr, use default %x\n",
+				tid, crsr_off);
+
+	ret = mmp_timer_init(tid, base, flag, fc_freq, apb_freq, crsr_off);
+	if (ret)
+		goto out;
+
+	/*
+	 * If device node is marked as not available,
+	 * we then don't try to enable the counter again
+	 */
+	if (!of_device_is_available(np)) {
+		pr_warn("Timer %d: is not used\n", tid);
+		return -ENODEV;
+	}
+
+	/* counter initialization */
+	for_each_child_of_node(np, child_np) {
+		ret = mmp_of_counter_init(child_np, tid);
+		if (ret)
+			goto out;
+	}
+
+	return 0;
+out:
+	if (ret)
+		pr_err("Failed to get timer from dtb with error:%d\n", ret);
+	return -ENODEV;
+}
+
+TIMER_OF_DECLARE(mmp_timer, "mrvl,mmp-timer", mmp_dt_init_timer);
+#endif
+#if 0
+extern struct device device_clocksource;
+static ssize_t show_mmp_clocksource_counter(struct device *dev, struct device_attribute *attr,
+											char *buf)
+{
+	u64 counter = 0;
+
+	counter = clksrc_read(&clksrc->cs);
+	return sprintf(buf, "%ld\n", (unsigned long)counter);
+}
+static DEVICE_ATTR(mmp_clocksource_counter, S_IRUGO, show_mmp_clocksource_counter, NULL);
+/* This counter is used by CP for time sync.
+ * AP side cannot change it without syncing
+ * with CP and telephony code.
+ */
+static int __init mmp_clocksource_counter_init(void)
+{
+	if (clksrc)
+		return device_create_file(&device_clocksource, &dev_attr_mmp_clocksource_counter);
+	else
+		return 0;
+}
+late_initcall(mmp_clocksource_counter_init);
+#endif