ASR_BASE

Change-Id: Icf3719cc0afe3eeb3edc7fa80a2eb5199ca9dda1
diff --git a/marvell/linux/drivers/clk/mmp/clk-ddr-asr1803.c b/marvell/linux/drivers/clk/mmp/clk-ddr-asr1803.c
new file mode 100644
index 0000000..559a15e
--- /dev/null
+++ b/marvell/linux/drivers/clk/mmp/clk-ddr-asr1803.c
@@ -0,0 +1,1308 @@
+/*
+ * mmp core clock operation source file
+ *
+ * Copyright: (C) Copyright 2018 ASR Microelectronics (Shanghai) Co., Ltd.
+ * Xuhong Gao <xuhonggao@marvell.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/kernel.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/cpufreq.h>
+#include <linux/devfreq.h>
+#include <soc/asr/asrdcstat.h>
+#include <soc/asr/asrcpdvc.h>
+#include "clk.h"
+#include "clk-asr1803.h"
+#include <linux/clk-provider.h>
+#include <dt-bindings/clock/asr,asr1803.h>
+#ifdef CONFIG_DEBUG_FS
+#include <soc/asr/debugfs-asr.h>
+#endif
+#include <linux/cputype.h>
+#include <trace/events/pxa.h>
+#ifdef CONFIG_PXA_MIPSRAM
+#include <linux/mipsram.h>
+#include <mach/mipsram_pm_event.h>
+#endif
+
+#define APMU_REG(apmu_base, x)	(apmu_base + (x))
+#define APMU_MC_HW_SLP_TYPE(c)	APMU_REG(c, 0x00B0)
+#define APMU_CKPHY_FC_CTRL(c)	APMU_REG(c, 0x015C)
+#define MPMU_REG(mpmu_base, x)	(mpmu_base + (x))
+#define MPMU_CMPR0				(0x400)
+#define MPMU_CMPR1				(0x404)
+#define AP_DDR_ROP				(MPMU_CMPR0)
+#define CP_DDR_ROP				(MPMU_CMPR1)
+
+#define APMU_DDR_CLK_FC_REQ		(0x1 << 24) 
+
+#define DDR_FC_RIPC_ADDR	(0xd403d200)
+#define DFC_LEVEL(c, i)		APMU_REG(c, (0x190 + ((i) << 2)))
+#define DFC_STATUS(c)		APMU_REG(c, 0x188)
+#define DFC_AP(c)		APMU_REG(c, 0x180)
+
+#define APMU_IMR(c)			APMU_REG(c, 0x0098)
+#define APMU_IRWC(c)		APMU_REG(c, 0x009C)
+#define APMU_ISR(c)			APMU_REG(c, 0x00A0)
+#define APMU_PLL_SEL_STATUS(c)	APMU_REG(c, 0x00c4)
+
+#define ASR1903_A0P_DCLK_SRC_MASK	(0x1F << 16)
+#define ASR1903_A0P_DCLK_SRC_SEL_SHIFT	(18)
+#define ASR1903_A0P_DCLK_SRC_DIV_SHIFT	(16)
+
+DEFINE_SPINLOCK(ripc_spinlock);
+EXPORT_SYMBOL(ripc_spinlock);
+
+static DEFINE_SPINLOCK(ddr_fc_seq_lock);
+static struct task_struct *ddr_fc_seqlock_owner;
+static int ddr_fc_seqlock_cnt;
+
+static void __iomem		*hwlock_addr;
+
+/* parameter passed from cmdline to identify DDR mode */
+enum ddr_type ddr_mode = DDR_400M;
+static int __init __init_ddr_mode(char *arg)
+{
+	int n;
+	if (!get_option(&arg, &n))
+		return 0;
+
+	if ((n >= DDR_TYPE_MAX) || (n < DDR_400M))
+		pr_info("WARNING: unknown DDR type!");
+	else
+		ddr_mode = n;
+
+	return 1;
+}
+__setup("ddr_mode=", __init_ddr_mode);
+
+enum dfc_cause {
+	CP_LPM_DFC = 0,
+	AP_ACTIVE_DFC = 0x1,
+	CP_ACTIVE_DFC = 0x2,
+	DP_ACTIVE_DFC = 0x4,
+};
+
+union pmua_ckphy_fc_ctrl {
+	struct {
+		unsigned int dpll_div:4;
+		unsigned int reserved1:28;
+	} b;
+	unsigned int v;
+};
+
+
+union dfc_ap {
+	struct {
+		unsigned int dfc_req:1;
+		unsigned int fl:3;
+		/* rsv bits */
+		unsigned int reserved:28;
+	} b;
+	unsigned int v;
+};
+
+union dfc_status {
+	struct {
+		unsigned int dfc_status:1;
+		unsigned int cfl:3;
+		unsigned int tfl:3;
+		unsigned int dfc_cause:4;
+		unsigned int reserved:21;
+	} b;
+	unsigned int v;
+};
+
+/* lock declaration */
+static LIST_HEAD(ddr_combined_clk_list);
+
+static struct ddr_opt *cur_ddr_op;
+
+static struct clk *clk_dclk;
+
+static bool ddr_is_hwdfc = false;
+
+int get_fc_ripc_lock(void)
+{
+	int cnt = 0;
+	unsigned long flags;
+
+	spin_lock_irqsave(&ripc_spinlock, flags);
+	while (__raw_readl(hwlock_addr)) {
+		spin_unlock_irqrestore(&ripc_spinlock, flags);
+		cpu_relax();
+		udelay(1);
+		cnt++;
+		if (cnt >= 200) {
+			pr_warn("AP: fail to lock DDR_FC ripc!\n");
+			cnt = 0;
+			return -EBUSY;
+		}
+		spin_lock_irqsave(&ripc_spinlock, flags);
+	}
+	spin_unlock_irqrestore(&ripc_spinlock, flags);
+	return 0;
+}
+
+void put_fc_ripc_lock(void)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&ripc_spinlock, flags);
+	__raw_writel(1, hwlock_addr);
+	spin_unlock_irqrestore(&ripc_spinlock, flags);
+}
+
+static void get_ddr_fc_spinlock(void)
+{
+	unsigned long flags;
+	local_irq_save(flags);
+	if (!spin_trylock(&ddr_fc_seq_lock)) {
+		if (ddr_fc_seqlock_owner == current) {
+			ddr_fc_seqlock_cnt++;
+			local_irq_restore(flags);
+			return;
+		}
+		spin_lock(&ddr_fc_seq_lock);
+	}
+	WARN_ON_ONCE(ddr_fc_seqlock_owner != NULL);
+	WARN_ON_ONCE(ddr_fc_seqlock_cnt != 0);
+	ddr_fc_seqlock_owner = current;
+	ddr_fc_seqlock_cnt = 1;
+
+	local_irq_restore(flags);
+}
+
+static void put_ddr_fc_spinlock(void)
+{
+	unsigned long flags;
+	local_irq_save(flags);
+
+	WARN_ON_ONCE(ddr_fc_seqlock_owner != current);
+	WARN_ON_ONCE(ddr_fc_seqlock_cnt == 0);
+
+	if (--ddr_fc_seqlock_cnt) {
+		local_irq_restore(flags);
+		return;
+	}
+	ddr_fc_seqlock_owner = NULL;
+	spin_unlock(&ddr_fc_seq_lock);
+
+	local_irq_restore(flags);
+}
+
+static void wait_for_hwdfc_done(void __iomem *apmu_base)
+{
+	int timeout = 10000;
+	int dfc_timeout;
+	union dfc_status status;
+
+	/* polling ISR */
+	while (!((1 << 1) & __raw_readl(APMU_ISR(apmu_base))) && timeout)
+		timeout--;
+	if (timeout <= 0) {
+		/* enhancement to check DFC related status */
+		pr_err("APMU_ISR %x, CUR_DLV %d,"
+				" DFC_AP %x, DFC_STATUS %x\n",
+				__raw_readl(APMU_ISR(apmu_base)),
+				cur_ddr_op->ddr_freq_level,
+				__raw_readl(DFC_AP(apmu_base)),
+				__raw_readl(DFC_STATUS(apmu_base)));
+		WARN(1, "HWDFC frequency change timeout!\n");
+		pr_err("APMU_ISR %x\n",
+			__raw_readl(APMU_ISR(apmu_base)));
+	}
+
+	status.v = __raw_readl(DFC_STATUS(apmu_base));
+	if (!status.b.dfc_status)
+		goto out;
+
+	/* polling hwdfc and may timeout */
+	dfc_timeout = 8;
+	while (status.b.dfc_status && dfc_timeout) {
+		dfc_timeout--;
+		udelay(10);
+		status.v = __raw_readl(DFC_STATUS(apmu_base));
+	}
+	if (dfc_timeout <= 0) {
+		/* enhancement to check DFC related status */
+		pr_err("APMU_ISR %x, CUR_DLV %d,"
+				" DFC_AP %x, DFC_STATUS %x\n",
+				__raw_readl(APMU_ISR(apmu_base)),
+				cur_ddr_op->ddr_freq_level,
+				__raw_readl(DFC_AP(apmu_base)),
+				__raw_readl(DFC_STATUS(apmu_base)));
+		WARN(1, "HWDFC frequency change timeout!\n");
+		pr_err("APMU_ISR %x\n",
+			__raw_readl(APMU_ISR(apmu_base)));
+	}
+
+out:
+	/* only clear AP fc done signal */
+	__raw_writel(__raw_readl(APMU_ISR(apmu_base)) & ~(1 << 1),
+			APMU_ISR(apmu_base));
+}
+
+static unsigned int ddr_rate2_op_index(struct clk_hw *hw, unsigned int rate)
+{
+	unsigned int index;
+	struct clk_ddr *ddr = to_clk_ddr(hw);
+	struct ddr_opt *ddr_opt;
+	unsigned int ddr_opt_size;
+	ddr_opt = ddr->params->ddr_opt;
+	ddr_opt_size = ddr->params->ddr_opt_size;
+
+	if (unlikely(rate > ddr_opt[ddr_opt_size - 1].dclk))
+		return ddr_opt_size - 1;
+
+	for (index = 0; index < ddr_opt_size; index++)
+		if (ddr_opt[index].dclk >= rate)
+			break;
+
+	return index;
+}
+
+static int get_ddr_volt_level(struct clk_ddr *ddr, unsigned long freq)
+{
+	int i;
+	unsigned long *array = ddr->params->hwdfc_freq_table;
+	int table_size = ddr->params->hwdfc_table_size;
+	for (i = 0; i < table_size; i++)
+		if (freq <= array[i])
+			break;
+	if (i == table_size)
+		i--;
+	return i;
+}
+
+static void get_cur_ddr_op(struct clk_hw *hw,
+		struct ddr_opt *cop)
+{
+	union pmua_ckphy_fc_ctrl ckphy_fc_ctrl;
+	u32 clk_div;
+	struct clk_ddr *ddr = to_clk_ddr(hw);
+	void __iomem *apmu_base = ddr->params->apmu_base;
+
+	ckphy_fc_ctrl.v = __raw_readl(APMU_CKPHY_FC_CTRL(apmu_base));
+	clk_div = (ckphy_fc_ctrl.b.dpll_div >> 2);
+	pr_debug("ckphy_fc_ctrl: 0x%x\n", ckphy_fc_ctrl.v);
+	BUG_ON(!cop->ddr_parent);
+	cop->ddr_clk_src = clk_get_rate(cop->ddr_parent) / MHZ;
+	cop->dclk = cop->ddr_clk_src / (clk_div + 1) / 2;
+}
+
+#ifdef CONFIG_DDR_DEVFREQ
+static struct devfreq_frequency_table *ddr_devfreq_tbl;
+
+static void __init_ddr_devfreq_table(struct clk_hw *hw)
+{
+	struct ddr_opt *ddr_opt;
+	unsigned int ddr_opt_size = 0, i = 0;
+	struct clk_ddr *ddr = to_clk_ddr(hw);
+
+	ddr_opt_size = ddr->params->ddr_opt_size;
+	ddr_devfreq_tbl =
+		kmalloc(sizeof(struct devfreq_frequency_table)
+			* (ddr_opt_size + 1), GFP_KERNEL);
+	if (!ddr_devfreq_tbl)
+		return;
+
+	ddr_opt = ddr->params->ddr_opt;
+	for (i = 0; i < ddr_opt_size; i++) {
+		ddr_devfreq_tbl[i].index = i;
+		ddr_devfreq_tbl[i].frequency =
+			ddr_opt[i].dclk * MHZ_TO_KHZ;
+	}
+
+	ddr_devfreq_tbl[i].index = i;
+	ddr_devfreq_tbl[i].frequency = DEVFREQ_TABLE_END;
+
+	devfreq_frequency_table_register(ddr_devfreq_tbl, DEVFREQ_DDR);
+}
+#endif
+
+static void asr1803_ddr_fc_seq(struct clk_hw *hw, struct ddr_opt *cop,
+			   struct ddr_opt *top);
+static int __ddr_hwdfc_seq(struct clk_hw *hw, unsigned int level);
+static void clk_ddr_init(struct clk_hw *hw)
+{
+	struct clk *parent, *clk;
+	struct clk_ddr *ddr = to_clk_ddr(hw);
+	struct ddr_opt *ddr_opt, *cop, cur_op;
+	unsigned int ddr_opt_size = 0, i;
+	unsigned int op_index;
+	struct parents_table *parent_table = ddr->params->parent_table;
+	int parent_table_size = ddr->params->parent_table_size;
+#ifdef CONFIG_ASR_CLK_DCSTAT
+	unsigned int idx = 0;
+	unsigned long op[MAX_OP_NUM];
+#endif
+	unsigned int value, volt_level;
+	unsigned int val;
+	void __iomem *apmu_base = ddr->params->apmu_base;
+	struct dfc_level_reg_offset *offset = ddr->params->dfc_level_reg_offset;
+	ddr_opt = ddr->params->ddr_opt;
+	ddr_opt_size = ddr->params->ddr_opt_size;
+
+	for (i = 0; i < parent_table_size; i++) {
+		clk = __clk_lookup(parent_table[i].parent_name);
+		if (!IS_ERR(clk))
+			parent_table[i].parent = clk;
+		else
+			pr_err("%s : can't find clk %s\n", __func__,
+			parent_table[i].parent_name);
+	}
+
+	pr_info("dclk(src:sel,div,tblindex):: ");
+	for (i = 0; i < ddr_opt_size; i++) {
+		cop = &ddr_opt[i];
+		parent = hwsel2parent(parent_table, parent_table_size,
+				cop->ddr_clk_sel);
+		BUG_ON(IS_ERR(parent));
+		cop->ddr_parent = parent;
+		cop->ddr_clk_src =
+			clk_get_rate(parent) / MHZ;
+		cop->dclk_div =
+			cop->ddr_clk_src / (2 * cop->dclk) - 1;
+		cop->dclk_div = (cop->dclk_div << 2);
+
+		printk(KERN_CONT " %d(%d:%d,%d,%d); ",
+			cop->dclk, cop->ddr_clk_src,
+			cop->ddr_clk_sel, cop->dclk_div,
+			cop->ddr_tbl_index);
+
+		if (ddr->flags & MMP_DDR_HWDFC_FEAT) {
+			cop->ddr_freq_level = i;
+			value = __raw_readl(DFC_LEVEL(apmu_base, i));
+#ifdef CONFIG_CPU_ASR1903
+			if (cpu_is_asr1903() && (!cpu_is_asr1903_z1())) {
+				value &= ~(MASK(offset->dclksel_width) <<
+						offset->dclksel_shift);
+				value |= (cop->ddr_clk_sel &
+						MASK(offset->dclksel_width)) <<
+						offset->dclksel_shift;
+			}
+#endif
+			value &= ~(MASK(offset->ddr_clk_div_width) <<
+					offset->ddr_clk_div_shift);
+			value |= ((cop->dclk_div >> 2) &
+					MASK(offset->ddr_clk_div_width)) <<
+					offset->ddr_clk_div_shift;
+
+			value &= ~(MASK(offset->mc_table_num_width) <<
+					offset->mc_table_num_shift);
+			value |= (cop->ddr_tbl_index &
+					MASK(offset->mc_table_num_width)) <<
+					offset->mc_table_num_shift;
+
+			/* Dvc level is filled as 0 here */
+			value &= ~(MASK(offset->volt_level_width) <<
+					offset->volt_level_shift);
+			value |= (0 & MASK(offset->volt_level_width)) <<
+					offset->volt_level_shift;
+			__raw_writel(value, DFC_LEVEL(apmu_base, i));
+		}
+	}
+	printk(KERN_CONT "\n");
+	cur_op = ddr_opt[0];
+	get_cur_ddr_op(hw, &cur_op);
+	op_index = ddr_rate2_op_index(hw, cur_op.dclk);
+	cur_ddr_op = &ddr_opt[op_index];
+	if ((cur_op.ddr_clk_src != cur_ddr_op->ddr_clk_src) ||
+		(cur_op.dclk != cur_ddr_op->dclk)) {
+		WARN_ON("Boot DDR PP is not supported!");
+		if (ddr->flags & MMP_DDR_HWDFC_FEAT) {
+			__ddr_hwdfc_seq(hw, 0);
+			__ddr_hwdfc_seq(hw, op_index);
+		} else
+			asr1803_ddr_fc_seq(hw, &cur_op, cur_ddr_op);
+	}
+
+	if (ddr->flags & MMP_DDR_HWDFC_FEAT) {
+		ddr_is_hwdfc = true;
+
+		val = __raw_readl(APMU_MC_HW_SLP_TYPE(apmu_base));
+		/* enable tbl based FC */
+		val &= ~(0x1 << ddr->params->ddr_offset->tbl_enable_shift);
+		__raw_writel(val, APMU_MC_HW_SLP_TYPE(apmu_base));
+		/*
+		 * HW thinks default DFL is 0, we have to make sure HW
+		 * get the correct DFL by first change it to 0, then change
+		 * it to current DFL
+		 */
+		asr1803_ddr_fc_seq(hw, &cur_op, &ddr_opt[0]);
+		__ddr_hwdfc_seq(hw, 0);	
+		__ddr_hwdfc_seq(hw, op_index);
+		/*
+		 * Fill dvc level in DFC_LEVEL, this will not trigger dvc
+		 * Level change since default level is 0 for all DFC_LEVEL regs
+		 */
+		for (i = 0; i < ddr_opt_size; i++) {
+			cop = &ddr_opt[i];
+			volt_level = get_ddr_volt_level(ddr,
+					cop->dclk * MHZ_TO_KHZ);
+			value = __raw_readl(DFC_LEVEL(apmu_base, i));
+			value &= ~(MASK(offset->volt_level_width) <<
+					offset->volt_level_shift);
+			value |= (volt_level &
+					MASK(offset->volt_level_width)) <<
+					offset->volt_level_shift;
+			__raw_writel(value, DFC_LEVEL(apmu_base, i));
+		}
+	}
+
+	/* hw->clk->rate = ddr_opt[op_index].dclk * MHZ; */
+	clk_set_rate(hw->clk, ddr_opt[op_index].dclk * MHZ);
+#ifdef CONFIG_ASR_CLK_DCSTAT
+	if (ddr->params->dcstat_support) {
+		idx = 0;
+		for (i = 0; i < ddr_opt_size; i++) {
+			cop = &ddr_opt[i];
+			op[idx++] = cop->dclk * MHZ;
+		}
+		clk_register_dcstat(hw->clk, op, idx);
+	}
+#endif
+
+	clk_dclk = hw->clk;
+#ifdef CONFIG_DDR_DEVFREQ
+	__init_ddr_devfreq_table(hw);
+#endif
+	hwlock_addr = ioremap(DDR_FC_RIPC_ADDR, 0x10);
+	if (!hwlock_addr)
+		pr_err("Cannot map RIPC IOMEM\n");
+	get_fc_ripc_lock();
+	/* write initial value into AP_DDR_ROP */
+	__raw_writel((0x1 << op_index),
+		MPMU_REG(ddr->params->mpmu_base, AP_DDR_ROP));
+	put_fc_ripc_lock();
+}
+
+static long clk_ddr_round_rate(struct clk_hw *hw, unsigned long rate,
+					unsigned long * parent_rate)
+{
+	unsigned int index;
+	struct clk_ddr *ddr = to_clk_ddr(hw);
+	struct ddr_opt *ddr_opt;
+	unsigned int ddr_opt_size;
+	ddr_opt = ddr->params->ddr_opt;
+	ddr_opt_size = ddr->params->ddr_opt_size;
+
+	rate /= MHZ;
+
+	if (unlikely(rate > ddr_opt[ddr_opt_size - 1].dclk))
+		return ddr_opt[ddr_opt_size - 1].dclk * MHZ;
+
+	for (index = 0; index < ddr_opt_size; index++)
+		if (ddr_opt[index].dclk >= rate)
+			break;
+
+	return ddr_opt[index].dclk * MHZ;
+}
+
+static inline bool check_hwdfc_inpro(void __iomem *apmu_base,
+					 unsigned int expected_lvl)
+{
+	union dfc_status status;
+	int max_delay = 200;
+	while (max_delay) {
+		status.v = __raw_readl(DFC_STATUS(apmu_base));
+		if ((expected_lvl <= status.b.cfl) &&
+		   ((!status.b.dfc_status) ||
+		   (status.b.dfc_status &&
+		   (status.b.dfc_cause != AP_ACTIVE_DFC))))
+			return false;
+		udelay(5);
+		max_delay--;
+	}
+	return true;
+}
+
+static int __ddr_hwdfc_seq(struct clk_hw *hw, unsigned int level)
+{
+	unsigned int dfc_ap;
+	struct clk_ddr *ddr = to_clk_ddr(hw);
+	struct dfc_ap_reg_offset *offset = ddr->params->dfc_ap_reg_offset;
+	void __iomem *apmu_base = ddr->params->apmu_base;
+	bool inpro = false;
+	union dfc_status status;
+	int max_delay = 100;
+
+	/* wait for DFC triggered by CP/MSA is done */
+	status.v = __raw_readl(DFC_STATUS(apmu_base));
+	while (max_delay && status.b.dfc_status) {
+		udelay(10);
+		max_delay--;
+		status.v = __raw_readl(DFC_STATUS(apmu_base));
+	}
+	if (unlikely(max_delay <= 0)) {
+		WARN(1, "AP cannot start HWDFC as DFC is in progress!\n");
+		pr_err("DFCAP %x, DFCSTATUS %x,\n",
+			__raw_readl(DFC_AP(apmu_base)),
+			__raw_readl(DFC_STATUS(apmu_base)));
+		return -EAGAIN;
+	}
+	/* Check if AP ISR is set, if set, clear it */
+	prefc_check_isr(apmu_base);
+
+	/* trigger AP HWDFC */
+	dfc_ap = __raw_readl(DFC_AP(apmu_base));
+	dfc_ap &= ~(MASK(offset->freq_level_width) <<
+			offset->freq_level_shift);
+	dfc_ap |= (level & MASK(offset->freq_level_width)) <<
+			offset->freq_level_shift;
+	dfc_ap |= 1 << offset->dfc_req_shift;
+	__raw_writel(dfc_ap, DFC_AP(apmu_base));
+
+	/* Check dfc status and done */
+	inpro = check_hwdfc_inpro(apmu_base, level);
+	if (likely(!inpro)) {
+		/* wait for dfc_status to become 0 */
+		wait_for_hwdfc_done(apmu_base);
+	} else {
+		WARN(1, "HW-DFC failed! expect LV %d\n", level);
+		pr_err("DFCAP %x, DFCSTATUS %x, PLLSEL %x\n",
+			__raw_readl(DFC_AP(apmu_base)),
+			__raw_readl(DFC_STATUS(apmu_base)),
+			__raw_readl(APMU_PLL_SEL_STATUS(apmu_base)));
+	}
+	return 0;
+}
+
+static int ddr_hwdfc_seq(struct clk_hw *hw, struct ddr_opt *cop,
+			  struct ddr_opt *top)
+{
+	int ret = 0;
+
+	trace_pxa_ddr_clk_chg(CLK_CHG_ENTRY, cop->dclk, top->dclk);
+	ret = __ddr_hwdfc_seq(hw, top->ddr_freq_level);
+	trace_pxa_ddr_clk_chg(CLK_CHG_EXIT, cop->dclk, top->dclk);
+	return ret;
+}
+
+static int set_hwdfc_freq(struct clk_hw *hw, struct ddr_opt *old,
+			  struct ddr_opt *new)
+{
+	unsigned long flags;
+	int ret = 0;
+
+	pr_debug("DDR set_freq start: old %u, new %u\n",
+		old->dclk, new->dclk);
+
+	clk_prepare_enable(new->ddr_parent);
+	local_irq_save(flags);
+	ret = ddr_hwdfc_seq(hw, old, new);
+	if (unlikely(ret == -EAGAIN)) {
+		/* still stay at old freq and src */
+		local_irq_restore(flags);
+		clk_disable_unprepare(new->ddr_parent);
+		goto out;
+	}
+	local_irq_restore(flags);
+	clk_disable_unprepare(old->ddr_parent);
+
+	pr_debug("DDR set_freq end: old %u, new %u\n",
+		old->dclk, new->dclk);
+out:
+	return ret;
+}
+
+static void set_ddr_tbl_index(struct clk_hw *hw, unsigned int index)
+{
+	unsigned int regval;
+
+	struct clk_ddr *ddr = to_clk_ddr(hw);
+	void __iomem *apmu_base = ddr->params->apmu_base;
+	struct ddr_reg_offset *offset = ddr->params->ddr_offset;
+
+	index &= MASK(offset->tbl_index_width);
+	regval = __raw_readl(APMU_MC_HW_SLP_TYPE(apmu_base));
+	/* clear ddr tbl index */
+	regval &= ~(MASK(offset->tbl_index_width) <<
+			offset->tbl_index_shift);
+	/* set ddr tbl index */
+	regval |= (index << offset->tbl_index_shift);
+
+	__raw_writel(regval, APMU_MC_HW_SLP_TYPE(apmu_base));
+}
+
+static void asr1803_ddr_fc_seq(struct clk_hw *hw, struct ddr_opt *cop,
+			   struct ddr_opt *top)
+{
+	unsigned int regval = 0;
+	struct clk_ddr *ddr = to_clk_ddr(hw);
+	void __iomem *apmu_base = ddr->params->apmu_base;
+	union pmua_ckphy_fc_ctrl ckphy_fc_ctrl;
+	int timeout = 200000;
+
+	trace_pxa_ddr_clk_chg(CLK_CHG_ENTRY, cop->dclk, top->dclk);
+	/* 0.2) Check if AP ISR is set, if set, clear it */
+	prefc_check_isr(apmu_base);
+
+	ckphy_fc_ctrl.v = __raw_readl(APMU_CKPHY_FC_CTRL(apmu_base));
+	regval = __raw_readl(APMU_MC_HW_SLP_TYPE(apmu_base));
+	/* 2) issue DDR FC */
+	if ((cop->ddr_clk_src != top->ddr_clk_src) ||
+	   (cop->dclk != top->dclk)) {
+		/* 2.2) enable tbl based FC and set DDR tbl num */
+		set_ddr_tbl_index(hw, top->ddr_tbl_index);
+
+		/* 2.3) select div for dclk */
+		ckphy_fc_ctrl.b.dpll_div = top->dclk_div;
+
+		regval = __raw_readl(APMU_MC_HW_SLP_TYPE(apmu_base));
+#ifdef CONFIG_CPU_ASR1903
+		if (cpu_is_asr1903() && (!cpu_is_asr1903_z1())) {
+			regval &= ~ASR1903_A0P_DCLK_SRC_MASK;
+			regval |= ((top->dclk_div >> 2) << ASR1903_A0P_DCLK_SRC_DIV_SHIFT);
+			regval |= (top->ddr_clk_sel << ASR1903_A0P_DCLK_SRC_SEL_SHIFT);
+		}
+#endif
+		/* 2.4) set ddr FC req bit */
+		regval |= (0x1 << 24);
+	}
+
+	__raw_writel(ckphy_fc_ctrl.v, APMU_CKPHY_FC_CTRL(apmu_base));
+	/* 3) set div and FC req bit trigger DDR FC */
+	pr_debug("DDR FC APMU_CKPHY_FC_CTRL[%x]\n", ckphy_fc_ctrl.v);
+	pr_debug("DDR FC APMU_MC_HW_SLP_TYPE[%x]\n", regval);
+	dmb();
+	__raw_writel(regval, APMU_MC_HW_SLP_TYPE(apmu_base));
+
+	while ((APMU_DDR_CLK_FC_REQ & __raw_readl(APMU_MC_HW_SLP_TYPE(apmu_base)))
+			&& timeout)
+		timeout--;
+
+	if (timeout <= 0) {
+		pr_err("APMU_MC_HW_SLP_TYPE %x, fc_type DDR_FC\n",
+				__raw_readl(APMU_MC_HW_SLP_TYPE(apmu_base)));
+		WARN(1, "AP core frequency change timeout!\n");
+	}
+
+	trace_pxa_ddr_clk_chg(CLK_CHG_EXIT, cop->dclk, top->dclk);
+}
+
+static int asr1803_set_ddr_freq(struct clk_hw *hw, struct ddr_opt *old,
+			struct ddr_opt *new)
+{
+	struct ddr_opt cop;
+	struct clk *ddr_old_parent;
+	void __iomem *apmu_base;
+	int ret = 0;
+	struct ddr_opt *ddr_opt;
+	struct clk_ddr *ddr;
+	unsigned long flags;
+	ddr = to_clk_ddr(hw);
+	ddr_opt = ddr->params->ddr_opt;
+	apmu_base = ddr->params->apmu_base;
+
+	pr_debug("DDR set_freq start: old %u, new %u\n",
+		old->dclk, new->dclk);
+
+	cop = *old;
+	get_cur_ddr_op(hw, &cop);
+	if (unlikely((cop.ddr_clk_src != old->ddr_clk_src) ||
+		(cop.dclk != old->dclk))) {
+		pr_err(" dsrc dclk");
+		pr_err("OLD %d %d\n", old->ddr_clk_src, old->dclk);
+		pr_err("CUR %d %d\n", cop.ddr_clk_src, cop.dclk);
+		pr_err("NEW %d %d\n", new->ddr_clk_src, new->dclk);
+		dump_stack();
+	}
+
+	ddr_old_parent = cop.ddr_parent;
+	clk_prepare_enable(new->ddr_parent);
+
+	/* Get lock in irq disable status to short AP hold lock time */
+	local_irq_save(flags);
+	asr1803_ddr_fc_seq(hw, &cop, new);
+	local_irq_restore(flags);
+
+	cop = *new;
+	get_cur_ddr_op(hw, &cop);
+	if (unlikely((cop.ddr_clk_src != new->ddr_clk_src) ||
+	   (cop.dclk != new->dclk))) {
+		clk_disable_unprepare(new->ddr_parent);
+		pr_err("DDR:unsuccessful frequency change!\n");
+		pr_err(" dsrc dclk");
+		pr_err("CUR %d %d\n", cop.ddr_clk_src, cop.dclk);
+		pr_err("NEW %d %d\n", new->ddr_clk_src, new->dclk);
+		pr_err("APMU_CKPHY_FC_CTRL %x, "
+			    "APMU_MC_HW_SLP_TYPE %x\n",
+			__raw_readl(APMU_CKPHY_FC_CTRL(apmu_base)),
+			__raw_readl(APMU_MC_HW_SLP_TYPE(apmu_base)));
+		ret = -EAGAIN;
+		goto out;
+	}
+
+	clk_disable_unprepare(ddr_old_parent);
+	pr_debug("DDR set_freq end: old %u, new %u\n",
+		old->dclk, new->dclk);
+out:
+	return ret;
+}
+
+static int asr1803_ripc_set_ddr_freq(struct clk_hw *hw, struct ddr_opt *old,
+			struct ddr_opt *new)
+{
+	struct ddr_opt cop;
+	struct clk *ddr_old_parent;
+	void __iomem *apmu_base;
+	int ret = 0;
+	struct ddr_opt *ddr_opt;
+	struct clk_ddr *ddr;
+	unsigned long flags;
+	u32 cp_ddr_rop, ap_ddr_rop, value;
+	u32 target_ddr_op, cp_ddr_cop, ap_ddr_cop;
+	ddr = to_clk_ddr(hw);
+	ddr_opt = ddr->params->ddr_opt;
+	apmu_base = ddr->params->apmu_base;
+
+	ap_ddr_cop = ddr_rate2_op_index(hw, old->dclk);
+	cop = *old;
+
+	/* Get lock in irq disable status to short AP hold lock time */
+	local_irq_save(flags);
+	ret = get_fc_ripc_lock();
+	if (ret) {
+		local_irq_restore(flags);
+		return ret;
+	}
+	get_cur_ddr_op(hw, &cop);
+	pr_debug("DDR set_freq start: old %u, new %u\n",
+		old->dclk, new->dclk);
+
+	ap_ddr_rop = ddr_rate2_op_index(hw, new->dclk);
+
+	/* get current cp op */
+	cp_ddr_rop = __raw_readl(MPMU_REG(ddr->params->mpmu_base, CP_DDR_ROP));
+	/* check cur op from high to low */
+	if (cp_ddr_rop & (0x1 << 2))
+		cp_ddr_cop = 2;
+	else if (cp_ddr_rop & (0x1 << 1))
+		cp_ddr_cop = 1;
+	else
+		cp_ddr_cop = 0;
+
+	if ((ap_ddr_rop == ap_ddr_cop) ||
+		(ap_ddr_cop <= cp_ddr_cop && ap_ddr_rop <= cp_ddr_cop))
+		goto out;
+
+	target_ddr_op = max(ap_ddr_rop, cp_ddr_cop);
+	if (target_ddr_op >= ddr->params->ddr_opt_size) {
+		pr_warn("!!!!!!!!!!!!!!! ddr target_opt: %d\n", target_ddr_op);
+		target_ddr_op = ddr->params->ddr_opt_size - 1;
+	}
+
+	/* update new ddr opt */
+	if (ddr_opt[target_ddr_op].dclk > new->dclk)
+		new = &ddr_opt[target_ddr_op];
+
+	ddr_old_parent = cop.ddr_parent;
+	clk_prepare_enable(new->ddr_parent);
+	asr1803_ddr_fc_seq(hw, &cop, new);
+
+	cop = *new;
+	get_cur_ddr_op(hw, &cop);
+	target_ddr_op = ddr_rate2_op_index(hw, cop.dclk);
+
+	if (unlikely((cop.ddr_clk_src != new->ddr_clk_src) ||
+	   (cop.dclk != new->dclk))) {
+		clk_disable_unprepare(new->ddr_parent);
+		pr_err("DDR:unsuccessful frequency change!\n");
+		pr_err(" dsrc dclk");
+		pr_err("CUR %d %d\n", cop.ddr_clk_src, cop.dclk);
+		pr_err("NEW %d %d\n", new->ddr_clk_src, new->dclk);
+		pr_err("APMU_CKPHY_FC_CTRL %x, "
+			    "APMU_MC_HW_SLP_TYPE %x\n",
+			__raw_readl(APMU_CKPHY_FC_CTRL(apmu_base)),
+			__raw_readl(APMU_MC_HW_SLP_TYPE(apmu_base)));
+		ret = -EAGAIN;
+		goto out;
+	}
+
+	clk_disable_unprepare(ddr_old_parent);
+	pr_debug("DDR set_freq end: old %u, new %u\n",
+		old->dclk, new->dclk);
+
+out:
+	/* write ap_ddr_rop into AP_DDR_ROP */
+	ap_ddr_rop = (0x1 << ap_ddr_rop);
+	__raw_writel(ap_ddr_rop, MPMU_REG(ddr->params->mpmu_base, AP_DDR_ROP));
+	value = __raw_readl(MPMU_REG(ddr->params->mpmu_base, AP_DDR_ROP));
+	if (value != ap_ddr_rop)
+		pr_err("AP_DDR_ROP Write failure: target 0x%x, final value 0x%X\n",
+		ap_ddr_rop, value);
+	put_fc_ripc_lock();
+	local_irq_restore(flags);
+	return ret;
+}
+
+int register_clk_bind2ddr(struct clk *clk, unsigned long max_freq,
+			  struct ddr_combclk_relation *relationtbl,
+			  unsigned int num_relationtbl)
+{
+	struct ddr_combined_clk *comclk;
+
+	/* search the list of the registation for this clk */
+	list_for_each_entry(comclk, &ddr_combined_clk_list, node)
+		if (comclk->clk == clk)
+			break;
+
+	/* if clk wasn't in the list, allocate new dcstat info */
+	if (comclk->clk != clk) {
+		comclk = kzalloc(sizeof(struct ddr_combined_clk), GFP_KERNEL);
+		if (!comclk)
+			return -ENOMEM;
+
+		comclk->clk = clk;
+		comclk->maxrate = max_freq;
+		comclk->relationtbl = relationtbl;
+		comclk->num_relationtbl = num_relationtbl;
+		list_add(&comclk->node, &ddr_combined_clk_list);
+	}
+	return 0;
+}
+
+static int trigger_bind2ddr_clk_rate(unsigned long ddr_rate)
+{
+	struct ddr_combined_clk *comclk;
+	unsigned long tgt, cur;
+	int ret = 0, i = 0;
+	list_for_each_entry(comclk, &ddr_combined_clk_list, node) {
+		if (!comclk->relationtbl)
+			continue;
+		i = 0;
+		while (i < comclk->num_relationtbl - 1) {
+			if ((ddr_rate >= comclk->relationtbl[i].dclk_rate) &&
+			   (ddr_rate < comclk->relationtbl[i + 1].dclk_rate))
+				break;
+			i++;
+		}
+		tgt = min(comclk->relationtbl[i].combclk_rate, comclk->maxrate);
+		pr_debug("%s Start rate change to %lu\n",
+			__clk_get_name(comclk->clk), tgt);
+		ret = clk_set_rate(comclk->clk, tgt);
+		if (ret) {
+			pr_info("%s failed to change clk %s rate\n",
+				__func__, __clk_get_name(comclk->clk));
+			continue;
+		}
+		cur = clk_get_rate(comclk->clk);
+		if (cur != tgt) {
+			pr_info("clk %s: cur %lu, tgt %lu\n",
+					__clk_get_name(comclk->clk), cur, tgt);
+			WARN_ON(1);
+		}
+	}
+
+	return ret;
+}
+
+static int clk_ddr_setrate(struct clk_hw *hw, unsigned long rate,
+				unsigned long parent_rate)
+{
+	struct ddr_opt *md_new, *md_old;
+	unsigned int index;
+	struct ddr_opt *ddr_opt;
+	struct clk_ddr *ddr = to_clk_ddr(hw);
+	int ret = 0;
+	ddr_opt = ddr->params->ddr_opt;
+
+	rate /= MHZ;
+	index = ddr_rate2_op_index(hw, rate);
+	md_new = &ddr_opt[index];
+	if (md_new == cur_ddr_op)
+		goto out;
+
+	acquire_fc_mutex();
+	md_old = cur_ddr_op;
+
+	get_ddr_fc_spinlock();
+	if (likely(ddr->flags & MMP_DDR_HWDFC_FEAT))
+		ret = set_hwdfc_freq(hw, md_old, md_new);
+	else if (likely(ddr->flags & MMP_DDR_RIPC_LOCK_FC))
+		ret = asr1803_ripc_set_ddr_freq(hw, md_old, md_new);
+	else
+		ret = asr1803_set_ddr_freq(hw, md_old, md_new);
+	put_ddr_fc_spinlock();
+	release_fc_mutex();
+	if (ret)
+		goto err;
+
+	cur_ddr_op = md_new;
+	clk_hw_reparent(hw, __clk_get_hw(md_new->ddr_parent));
+
+out:
+	trigger_bind2ddr_clk_rate(rate * MHZ);
+err:
+	return ret;
+}
+
+static unsigned long clk_ddr_recalc_rate(struct clk_hw *hw,
+					unsigned long parent_rate)
+{
+	struct clk_ddr *ddr = to_clk_ddr(hw);
+	void __iomem *apmu_base = ddr->params->apmu_base;
+	union dfc_status dfc_status;
+	union dfc_ap dfc_ap;
+	struct ddr_opt *ddr_opt = ddr->params->ddr_opt;
+	u32 dfc_lvl;
+
+	if (ddr->flags & MMP_DDR_HWDFC_FEAT) {
+		if (ddr->flags & MMP_DDR_RATE_AP_ONLY) {
+			dfc_ap.v = __raw_readl(DFC_AP(apmu_base));
+			return ddr_opt[dfc_ap.b.fl].dclk * MHZ;
+		} else {
+			dfc_status.v = __raw_readl(DFC_STATUS(apmu_base));
+			dfc_lvl = dfc_status.b.cfl;
+			if (dfc_lvl >= ddr->params->ddr_opt_size)
+				dfc_lvl = dfc_ap.b.fl;
+			return ddr_opt[dfc_lvl].dclk * MHZ;
+		}
+	} else {
+		if (cur_ddr_op)
+			return cur_ddr_op->dclk * MHZ;
+		else
+			pr_err("%s: cur_ddraxi_op NULL\n", __func__);
+	}
+
+	return 0;
+}
+
+static u8 clk_ddr_get_parent(struct clk_hw *hw)
+{
+	struct clk *parent, *clk;
+	u32 src_sel;
+	struct clk_ddr *ddr;
+	void __iomem *apmu_base;
+	struct parents_table *parent_table;
+	int parent_table_size;
+	u8 i = 0;
+	clk = hw->clk;
+	ddr = to_clk_ddr(hw);
+	apmu_base = ddr->params->apmu_base;
+	parent_table = ddr->params->parent_table;
+	parent_table_size = ddr->params->parent_table_size;
+
+	/*there is only one DDR clock source for 1802s*/
+	src_sel = 0;
+	for (i = 0; i < parent_table_size; i++) {
+		if (parent_table[i].hw_sel_val == src_sel)
+			break;
+	}
+	if (i == parent_table_size) {
+		pr_err("%s: Cannot find parent for ddr!\n", __func__);
+		BUG_ON(1);
+	}
+	parent = clk_get_sys(NULL, parent_table[i].parent_name);
+	WARN_ON(!parent);
+	if (parent) {
+		for (i = 0; i < clk_hw_get_num_parents(hw); i++) {
+			if (!strcmp(clk_hw_get_name(clk_hw_get_parent_by_index(hw, i)),
+					__clk_get_name(parent)))
+				break;
+		}
+	}
+	return i;
+}
+
+/* Interface used to get ddr op num */
+unsigned int get_ddr_op_num(void)
+{
+	struct clk_ddr *ddr;
+	struct clk *clk = __clk_lookup("ddr");
+	if (IS_ERR(clk)) {
+		WARN_ON(1);
+		return 0;
+	} else {
+		ddr = to_clk_ddr(__clk_get_hw(clk));
+		return ddr->params->ddr_opt_size;
+	}
+}
+
+/* Interface used to get ddr avaliable rate, unit khz */
+unsigned int get_ddr_op_rate(unsigned int index)
+{
+	struct ddr_opt *ddr_opt;
+	struct clk_ddr *ddr;
+	struct clk *clk = __clk_lookup("ddr");
+	if (IS_ERR(clk)) {
+		WARN_ON(1);
+		return 0;
+	} else {
+		ddr = to_clk_ddr(__clk_get_hw(clk));
+		if (index >= ddr->params->ddr_opt_size) {
+			pr_err("%s index out of range!\n", __func__);
+			return -EINVAL;
+		}
+
+		ddr_opt = ddr->params->ddr_opt;
+		return ddr_opt[index].dclk * MHZ_TO_KHZ;
+	}
+}
+
+struct clk_ops ddr_clk_ops = {
+	.init = clk_ddr_init,
+	.round_rate = clk_ddr_round_rate,
+	.set_rate = clk_ddr_setrate,
+	.recalc_rate = clk_ddr_recalc_rate,
+	.get_parent = clk_ddr_get_parent,
+};
+
+struct clk *mmp_clk_register_ddr(const char *name, const char **parent_name,
+		u8 num_parents, unsigned long flags, u32 ddr_flags,
+		spinlock_t *lock, struct ddr_params *params)
+{
+	struct clk_ddr *ddr;
+	struct clk *clk;
+	struct clk_init_data init;
+
+	ddr = kzalloc(sizeof(*ddr), GFP_KERNEL);
+	if (!ddr)
+		return NULL;
+
+	init.name = name;
+	init.ops = &ddr_clk_ops;
+	init.flags = flags;
+	init.parent_names = parent_name;
+	init.num_parents = num_parents;
+
+	ddr->flags = ddr_flags;
+	ddr->lock = lock;
+	ddr->params = params;
+	ddr->hw.init = &init;
+
+	clk = clk_register(NULL, &ddr->hw);
+	if (IS_ERR(clk))
+		kfree(ddr);
+
+	return clk;
+}
+
+static DEFINE_SPINLOCK(fc_seq_lock);
+
+struct ddr_reg_offset asr1803_ddr_reg_off = {
+	.tbl_enable_shift = 7,
+	.tbl_index_shift = 5,
+	.tbl_index_width = 2,
+};
+
+struct parents_table asr1803_ddr_parent_table[] = {
+	{
+		.parent_name = "dpll_1066",
+		.hw_sel_val = 0x0,
+	},
+};
+
+static const char *asr1803_ddr_parent[] = {"dpll_1066",};
+
+static struct ddr_opt asr1803_lpddr533_oparray[] = {
+	{
+		.dclk = 266,
+		.ddr_tbl_index = 0,
+		.ddr_clk_sel = 0x0,
+		.ddr_freq_level = 0,
+	},
+	{
+		.dclk = 533,
+		.ddr_tbl_index = 1,
+		.ddr_clk_sel = 0x0,
+		.ddr_freq_level = 1,
+	},
+};
+
+#ifdef CONFIG_CPU_ASR1903
+struct parents_table asr1903_a0p_ddr_parent_table[] = {
+	{
+		.parent_name = "dpll_1066",
+		.hw_sel_val = 0x0,
+	},
+};
+
+static struct ddr_opt asr1903_lpddr533_oparray[] = {
+	{
+		.dclk = 266,
+		.ddr_tbl_index = 0,
+		.ddr_clk_sel = 0x0,
+		.ddr_freq_level = 0,
+	},
+	{
+		.dclk = 533,
+		.ddr_tbl_index = 1,
+		.ddr_clk_sel = 0x0,
+		.ddr_freq_level = 1,
+	},
+};
+#endif
+
+static struct dfc_level_reg_offset dfc_level_reg_off = {
+	.dclksel_shift = 11,
+	.dclksel_width = 3,
+	.ddr_clk_div_shift = 14,
+	.ddr_clk_div_width = 2,
+	.mc_table_num_shift = 4,
+	.mc_table_num_width = 2,
+	.volt_level_shift = 0,
+	.volt_level_width = 4,
+};
+
+static struct dfc_ap_reg_offset dfc_ap_reg_off = {
+	.dfc_req_shift = 0,
+	.freq_level_shift = 1,
+	.freq_level_width = 3,
+};
+
+struct ddr_params ddr_params = {
+	.dfc_level_reg_offset = &dfc_level_reg_off,
+	.dfc_ap_reg_offset = &dfc_ap_reg_off,
+	.ddr_offset = &asr1803_ddr_reg_off,
+	.parent_table = asr1803_ddr_parent_table,
+	.parent_table_size = ARRAY_SIZE(asr1803_ddr_parent_table),
+	.ddr_opt = asr1803_lpddr533_oparray,
+	.ddr_opt_size = ARRAY_SIZE(asr1803_lpddr533_oparray),
+#ifdef CONFIG_ASR_CLK_DCSTAT
+	.dcstat_support = true,
+#endif
+};
+
+
+struct ddr_combclk_relation aclk_dclk_relationtbl_1903_1828[] = {
+	{.dclk_rate = 266000000, .combclk_rate = 208000000},
+	{.dclk_rate = 533000000, .combclk_rate = 312000000},
+};
+
+struct ddr_combclk_relation aclk_dclk_relationtbl_1803[] = {
+	{.dclk_rate = 266000000, .combclk_rate = 208000000},
+	{.dclk_rate = 533000000, .combclk_rate = 208000000},
+};
+
+
+static struct ddr_dfc_info ddrdfcinfo;
+
+static void find_ddr_level(struct ddr_opt *ddr_op_array)
+{
+	int i;
+	ddrdfcinfo.ddr_idle = 0;
+	for (i = 0; i <= sizeof(ddr_op_array); i++) {
+		if (ddrdfcinfo.ddr_active == 0) {
+			/* 266 is ok ? */
+			if (ddr_op_array[i].dclk >= 266) {
+				ddrdfcinfo.ddr_active = i;
+			}
+		}
+		if (ddrdfcinfo.ddr_high == 0) {
+			if (ddr_op_array[i].dclk >= 400) {
+				ddrdfcinfo.ddr_high = i;
+			}
+		}
+		if (ddrdfcinfo.ddr_active && ddrdfcinfo.ddr_high)
+			break;
+	}
+	return;
+}
+
+static void init_ddr_dfc_info(void)
+{
+	memset(&ddrdfcinfo, 0, sizeof(ddrdfcinfo));
+	find_ddr_level(asr1803_lpddr533_oparray);
+	fillddrdfcinfo(&ddrdfcinfo);
+	return;
+}
+
+
+void __init asr1803_ddrc_init(struct asr1803_clk_unit *asr_unit)
+{
+	struct mmp_clk_unit *unit = &asr_unit->unit;
+	struct clk *clk = NULL;
+	struct clk *combclk = NULL;
+	int combclk_maxfreq;
+
+	ddr_params.apmu_base = asr_unit->apmu_base;
+	ddr_params.mpmu_base = asr_unit->mpmu_base;
+	ddr_params.dmcu_base = asr_unit->ddrc_base;
+
+	if (cpu_is_asr1806() || cpu_is_asr1903()) {
+		ddr_params.hwdfc_freq_table = freqs_cmb[ddr_cmbindex()];
+		ddr_params.hwdfc_table_size = MAX_PMIC_LEVEL * sizeof(freqs_cmb[ddr_cmbindex()][0]);
+#ifdef CONFIG_CPU_ASR1903
+		if (cpu_is_asr1903() && (!cpu_is_asr1903_z1())) {
+			ddr_params.parent_table = asr1903_a0p_ddr_parent_table;
+			ddr_params.parent_table_size = ARRAY_SIZE(asr1903_a0p_ddr_parent_table);
+			ddr_params.ddr_opt = asr1903_lpddr533_oparray;
+			ddr_params.ddr_opt_size = ARRAY_SIZE(asr1903_lpddr533_oparray);
+		}
+#endif
+	}
+
+	init_ddr_dfc_info();
+
+	asr_clk_parents_lookup(ddr_params.parent_table, 
+			ddr_params.parent_table_size);
+	if (cpu_is_asr1806() || cpu_is_asr1903()) {
+		clk = mmp_clk_register_ddr("ddr", asr1803_ddr_parent,
+			ARRAY_SIZE(asr1803_ddr_parent),
+			CLK_GET_RATE_NOCACHE, MMP_DDR_HWDFC_FEAT, &fc_seq_lock,
+			&ddr_params);
+	} else {
+		clk = mmp_clk_register_ddr("ddr", asr1803_ddr_parent,
+			ARRAY_SIZE(asr1803_ddr_parent),
+			CLK_GET_RATE_NOCACHE, MMP_DDR_RIPC_LOCK_FC, &fc_seq_lock,
+			&ddr_params);
+	}
+
+	if (!clk) {
+		pr_err("%s: registry top clk_ddr fail!!!\n", __func__);
+		return;
+	}
+
+	pr_info(" DDR boot up @%luHZ\n", clk_get_rate(clk));
+
+	mmp_clk_add(unit, ASR1803_CLK_DDR, clk);
+
+	clk_prepare_enable(clk);
+
+	combclk = __clk_lookup("axi");
+
+	if (!combclk) {
+		pr_err("%s: combclk is not registered yet\n", __func__);
+		return;
+	}
+
+	get_axi_max_freq(&combclk_maxfreq);
+
+	if (cpu_is_asr1903() || cpu_is_asr1828())
+		register_clk_bind2ddr(combclk, combclk_maxfreq, aclk_dclk_relationtbl_1903_1828,
+			ARRAY_SIZE(aclk_dclk_relationtbl_1903_1828));
+	else	
+		register_clk_bind2ddr(combclk, combclk_maxfreq, aclk_dclk_relationtbl_1803,
+			ARRAY_SIZE(aclk_dclk_relationtbl_1803));
+
+	return;
+}
+
+void asr18xx_dump_ddr_regs(void)
+{
+	void __iomem *reg_base;
+
+	if (ddr_params.apmu_base) {
+		reg_base = ddr_params.apmu_base;
+		if (ddr_is_hwdfc) {
+			pr_emerg("DFCAP %x, DFCSTATUS %x\n",
+				readl(DFC_AP(reg_base)), readl(DFC_STATUS(reg_base)));
+		} else {
+			pr_emerg("DPHY_CTRL 0x%08x SLP_TYPE 0x%08x\n",
+				readl(APMU_CKPHY_FC_CTRL(reg_base)),
+				readl(APMU_MC_HW_SLP_TYPE(reg_base)));
+		}		
+	}
+}
+