[Feature]add MT2731_MP2_MR2_SVN388 baseline version

Change-Id: Ief04314834b31e27effab435d3ca8ba33b499059
diff --git a/src/kernel/linux/v4.14/drivers/sh/clk/Makefile b/src/kernel/linux/v4.14/drivers/sh/clk/Makefile
new file mode 100644
index 0000000..5d15ebf
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/sh/clk/Makefile
@@ -0,0 +1,3 @@
+obj-y	:= core.o
+
+obj-$(CONFIG_SH_CLK_CPG)	+= cpg.o
diff --git a/src/kernel/linux/v4.14/drivers/sh/clk/core.c b/src/kernel/linux/v4.14/drivers/sh/clk/core.c
new file mode 100644
index 0000000..92863e3
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/sh/clk/core.c
@@ -0,0 +1,623 @@
+/*
+ * SuperH clock framework
+ *
+ *  Copyright (C) 2005 - 2010  Paul Mundt
+ *
+ * This clock framework is derived from the OMAP version by:
+ *
+ *	Copyright (C) 2004 - 2008 Nokia Corporation
+ *	Written by Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com>
+ *
+ *  Modified for omap shared clock framework by Tony Lindgren <tony@atomide.com>
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#define pr_fmt(fmt) "clock: " fmt
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/list.h>
+#include <linux/syscore_ops.h>
+#include <linux/seq_file.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/cpufreq.h>
+#include <linux/clk.h>
+#include <linux/sh_clk.h>
+
+static LIST_HEAD(clock_list);
+static DEFINE_SPINLOCK(clock_lock);
+static DEFINE_MUTEX(clock_list_sem);
+
+/* clock disable operations are not passed on to hardware during boot */
+static int allow_disable;
+
+void clk_rate_table_build(struct clk *clk,
+			  struct cpufreq_frequency_table *freq_table,
+			  int nr_freqs,
+			  struct clk_div_mult_table *src_table,
+			  unsigned long *bitmap)
+{
+	unsigned long mult, div;
+	unsigned long freq;
+	int i;
+
+	clk->nr_freqs = nr_freqs;
+
+	for (i = 0; i < nr_freqs; i++) {
+		div = 1;
+		mult = 1;
+
+		if (src_table->divisors && i < src_table->nr_divisors)
+			div = src_table->divisors[i];
+
+		if (src_table->multipliers && i < src_table->nr_multipliers)
+			mult = src_table->multipliers[i];
+
+		if (!div || !mult || (bitmap && !test_bit(i, bitmap)))
+			freq = CPUFREQ_ENTRY_INVALID;
+		else
+			freq = clk->parent->rate * mult / div;
+
+		freq_table[i].driver_data = i;
+		freq_table[i].frequency = freq;
+	}
+
+	/* Termination entry */
+	freq_table[i].driver_data = i;
+	freq_table[i].frequency = CPUFREQ_TABLE_END;
+}
+
+struct clk_rate_round_data;
+
+struct clk_rate_round_data {
+	unsigned long rate;
+	unsigned int min, max;
+	long (*func)(unsigned int, struct clk_rate_round_data *);
+	void *arg;
+};
+
+#define for_each_frequency(pos, r, freq)			\
+	for (pos = r->min, freq = r->func(pos, r);		\
+	     pos <= r->max; pos++, freq = r->func(pos, r))	\
+		if (unlikely(freq == 0))			\
+			;					\
+		else
+
+static long clk_rate_round_helper(struct clk_rate_round_data *rounder)
+{
+	unsigned long rate_error, rate_error_prev = ~0UL;
+	unsigned long highest, lowest, freq;
+	long rate_best_fit = -ENOENT;
+	int i;
+
+	highest = 0;
+	lowest = ~0UL;
+
+	for_each_frequency(i, rounder, freq) {
+		if (freq > highest)
+			highest = freq;
+		if (freq < lowest)
+			lowest = freq;
+
+		rate_error = abs(freq - rounder->rate);
+		if (rate_error < rate_error_prev) {
+			rate_best_fit = freq;
+			rate_error_prev = rate_error;
+		}
+
+		if (rate_error == 0)
+			break;
+	}
+
+	if (rounder->rate >= highest)
+		rate_best_fit = highest;
+	if (rounder->rate <= lowest)
+		rate_best_fit = lowest;
+
+	return rate_best_fit;
+}
+
+static long clk_rate_table_iter(unsigned int pos,
+				struct clk_rate_round_data *rounder)
+{
+	struct cpufreq_frequency_table *freq_table = rounder->arg;
+	unsigned long freq = freq_table[pos].frequency;
+
+	if (freq == CPUFREQ_ENTRY_INVALID)
+		freq = 0;
+
+	return freq;
+}
+
+long clk_rate_table_round(struct clk *clk,
+			  struct cpufreq_frequency_table *freq_table,
+			  unsigned long rate)
+{
+	struct clk_rate_round_data table_round = {
+		.min	= 0,
+		.max	= clk->nr_freqs - 1,
+		.func	= clk_rate_table_iter,
+		.arg	= freq_table,
+		.rate	= rate,
+	};
+
+	if (clk->nr_freqs < 1)
+		return -ENOSYS;
+
+	return clk_rate_round_helper(&table_round);
+}
+
+static long clk_rate_div_range_iter(unsigned int pos,
+				    struct clk_rate_round_data *rounder)
+{
+	return clk_get_rate(rounder->arg) / pos;
+}
+
+long clk_rate_div_range_round(struct clk *clk, unsigned int div_min,
+			      unsigned int div_max, unsigned long rate)
+{
+	struct clk_rate_round_data div_range_round = {
+		.min	= div_min,
+		.max	= div_max,
+		.func	= clk_rate_div_range_iter,
+		.arg	= clk_get_parent(clk),
+		.rate	= rate,
+	};
+
+	return clk_rate_round_helper(&div_range_round);
+}
+
+static long clk_rate_mult_range_iter(unsigned int pos,
+				      struct clk_rate_round_data *rounder)
+{
+	return clk_get_rate(rounder->arg) * pos;
+}
+
+long clk_rate_mult_range_round(struct clk *clk, unsigned int mult_min,
+			       unsigned int mult_max, unsigned long rate)
+{
+	struct clk_rate_round_data mult_range_round = {
+		.min	= mult_min,
+		.max	= mult_max,
+		.func	= clk_rate_mult_range_iter,
+		.arg	= clk_get_parent(clk),
+		.rate	= rate,
+	};
+
+	return clk_rate_round_helper(&mult_range_round);
+}
+
+int clk_rate_table_find(struct clk *clk,
+			struct cpufreq_frequency_table *freq_table,
+			unsigned long rate)
+{
+	struct cpufreq_frequency_table *pos;
+
+	cpufreq_for_each_valid_entry(pos, freq_table)
+		if (pos->frequency == rate)
+			return pos - freq_table;
+
+	return -ENOENT;
+}
+
+/* Used for clocks that always have same value as the parent clock */
+unsigned long followparent_recalc(struct clk *clk)
+{
+	return clk->parent ? clk->parent->rate : 0;
+}
+
+int clk_reparent(struct clk *child, struct clk *parent)
+{
+	list_del_init(&child->sibling);
+	if (parent)
+		list_add(&child->sibling, &parent->children);
+	child->parent = parent;
+
+	return 0;
+}
+
+/* Propagate rate to children */
+void propagate_rate(struct clk *tclk)
+{
+	struct clk *clkp;
+
+	list_for_each_entry(clkp, &tclk->children, sibling) {
+		if (clkp->ops && clkp->ops->recalc)
+			clkp->rate = clkp->ops->recalc(clkp);
+
+		propagate_rate(clkp);
+	}
+}
+
+static void __clk_disable(struct clk *clk)
+{
+	if (WARN(!clk->usecount, "Trying to disable clock %p with 0 usecount\n",
+		 clk))
+		return;
+
+	if (!(--clk->usecount)) {
+		if (likely(allow_disable && clk->ops && clk->ops->disable))
+			clk->ops->disable(clk);
+		if (likely(clk->parent))
+			__clk_disable(clk->parent);
+	}
+}
+
+void clk_disable(struct clk *clk)
+{
+	unsigned long flags;
+
+	if (!clk)
+		return;
+
+	spin_lock_irqsave(&clock_lock, flags);
+	__clk_disable(clk);
+	spin_unlock_irqrestore(&clock_lock, flags);
+}
+EXPORT_SYMBOL_GPL(clk_disable);
+
+static int __clk_enable(struct clk *clk)
+{
+	int ret = 0;
+
+	if (clk->usecount++ == 0) {
+		if (clk->parent) {
+			ret = __clk_enable(clk->parent);
+			if (unlikely(ret))
+				goto err;
+		}
+
+		if (clk->ops && clk->ops->enable) {
+			ret = clk->ops->enable(clk);
+			if (ret) {
+				if (clk->parent)
+					__clk_disable(clk->parent);
+				goto err;
+			}
+		}
+	}
+
+	return ret;
+err:
+	clk->usecount--;
+	return ret;
+}
+
+int clk_enable(struct clk *clk)
+{
+	unsigned long flags;
+	int ret;
+
+	if (!clk)
+		return -EINVAL;
+
+	spin_lock_irqsave(&clock_lock, flags);
+	ret = __clk_enable(clk);
+	spin_unlock_irqrestore(&clock_lock, flags);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(clk_enable);
+
+static LIST_HEAD(root_clks);
+
+/**
+ * recalculate_root_clocks - recalculate and propagate all root clocks
+ *
+ * Recalculates all root clocks (clocks with no parent), which if the
+ * clock's .recalc is set correctly, should also propagate their rates.
+ * Called at init.
+ */
+void recalculate_root_clocks(void)
+{
+	struct clk *clkp;
+
+	list_for_each_entry(clkp, &root_clks, sibling) {
+		if (clkp->ops && clkp->ops->recalc)
+			clkp->rate = clkp->ops->recalc(clkp);
+		propagate_rate(clkp);
+	}
+}
+
+static struct clk_mapping dummy_mapping;
+
+static struct clk *lookup_root_clock(struct clk *clk)
+{
+	while (clk->parent)
+		clk = clk->parent;
+
+	return clk;
+}
+
+static int clk_establish_mapping(struct clk *clk)
+{
+	struct clk_mapping *mapping = clk->mapping;
+
+	/*
+	 * Propagate mappings.
+	 */
+	if (!mapping) {
+		struct clk *clkp;
+
+		/*
+		 * dummy mapping for root clocks with no specified ranges
+		 */
+		if (!clk->parent) {
+			clk->mapping = &dummy_mapping;
+			goto out;
+		}
+
+		/*
+		 * If we're on a child clock and it provides no mapping of its
+		 * own, inherit the mapping from its root clock.
+		 */
+		clkp = lookup_root_clock(clk);
+		mapping = clkp->mapping;
+		BUG_ON(!mapping);
+	}
+
+	/*
+	 * Establish initial mapping.
+	 */
+	if (!mapping->base && mapping->phys) {
+		kref_init(&mapping->ref);
+
+		mapping->base = ioremap_nocache(mapping->phys, mapping->len);
+		if (unlikely(!mapping->base))
+			return -ENXIO;
+	} else if (mapping->base) {
+		/*
+		 * Bump the refcount for an existing mapping
+		 */
+		kref_get(&mapping->ref);
+	}
+
+	clk->mapping = mapping;
+out:
+	clk->mapped_reg = clk->mapping->base;
+	clk->mapped_reg += (phys_addr_t)clk->enable_reg - clk->mapping->phys;
+	return 0;
+}
+
+static void clk_destroy_mapping(struct kref *kref)
+{
+	struct clk_mapping *mapping;
+
+	mapping = container_of(kref, struct clk_mapping, ref);
+
+	iounmap(mapping->base);
+}
+
+static void clk_teardown_mapping(struct clk *clk)
+{
+	struct clk_mapping *mapping = clk->mapping;
+
+	/* Nothing to do */
+	if (mapping == &dummy_mapping)
+		goto out;
+
+	kref_put(&mapping->ref, clk_destroy_mapping);
+	clk->mapping = NULL;
+out:
+	clk->mapped_reg = NULL;
+}
+
+int clk_register(struct clk *clk)
+{
+	int ret;
+
+	if (IS_ERR_OR_NULL(clk))
+		return -EINVAL;
+
+	/*
+	 * trap out already registered clocks
+	 */
+	if (clk->node.next || clk->node.prev)
+		return 0;
+
+	mutex_lock(&clock_list_sem);
+
+	INIT_LIST_HEAD(&clk->children);
+	clk->usecount = 0;
+
+	ret = clk_establish_mapping(clk);
+	if (unlikely(ret))
+		goto out_unlock;
+
+	if (clk->parent)
+		list_add(&clk->sibling, &clk->parent->children);
+	else
+		list_add(&clk->sibling, &root_clks);
+
+	list_add(&clk->node, &clock_list);
+
+#ifdef CONFIG_SH_CLK_CPG_LEGACY
+	if (clk->ops && clk->ops->init)
+		clk->ops->init(clk);
+#endif
+
+out_unlock:
+	mutex_unlock(&clock_list_sem);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(clk_register);
+
+void clk_unregister(struct clk *clk)
+{
+	mutex_lock(&clock_list_sem);
+	list_del(&clk->sibling);
+	list_del(&clk->node);
+	clk_teardown_mapping(clk);
+	mutex_unlock(&clock_list_sem);
+}
+EXPORT_SYMBOL_GPL(clk_unregister);
+
+void clk_enable_init_clocks(void)
+{
+	struct clk *clkp;
+
+	list_for_each_entry(clkp, &clock_list, node)
+		if (clkp->flags & CLK_ENABLE_ON_INIT)
+			clk_enable(clkp);
+}
+
+unsigned long clk_get_rate(struct clk *clk)
+{
+	if (!clk)
+		return 0;
+
+	return clk->rate;
+}
+EXPORT_SYMBOL_GPL(clk_get_rate);
+
+int clk_set_rate(struct clk *clk, unsigned long rate)
+{
+	int ret = -EOPNOTSUPP;
+	unsigned long flags;
+
+	if (!clk)
+		return 0;
+
+	spin_lock_irqsave(&clock_lock, flags);
+
+	if (likely(clk->ops && clk->ops->set_rate)) {
+		ret = clk->ops->set_rate(clk, rate);
+		if (ret != 0)
+			goto out_unlock;
+	} else {
+		clk->rate = rate;
+		ret = 0;
+	}
+
+	if (clk->ops && clk->ops->recalc)
+		clk->rate = clk->ops->recalc(clk);
+
+	propagate_rate(clk);
+
+out_unlock:
+	spin_unlock_irqrestore(&clock_lock, flags);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(clk_set_rate);
+
+int clk_set_parent(struct clk *clk, struct clk *parent)
+{
+	unsigned long flags;
+	int ret = -EINVAL;
+
+	if (!parent || !clk)
+		return ret;
+	if (clk->parent == parent)
+		return 0;
+
+	spin_lock_irqsave(&clock_lock, flags);
+	if (clk->usecount == 0) {
+		if (clk->ops->set_parent)
+			ret = clk->ops->set_parent(clk, parent);
+		else
+			ret = clk_reparent(clk, parent);
+
+		if (ret == 0) {
+			if (clk->ops->recalc)
+				clk->rate = clk->ops->recalc(clk);
+			pr_debug("set parent of %p to %p (new rate %ld)\n",
+				 clk, clk->parent, clk->rate);
+			propagate_rate(clk);
+		}
+	} else
+		ret = -EBUSY;
+	spin_unlock_irqrestore(&clock_lock, flags);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(clk_set_parent);
+
+struct clk *clk_get_parent(struct clk *clk)
+{
+	if (!clk)
+		return NULL;
+
+	return clk->parent;
+}
+EXPORT_SYMBOL_GPL(clk_get_parent);
+
+long clk_round_rate(struct clk *clk, unsigned long rate)
+{
+	if (!clk)
+		return 0;
+
+	if (likely(clk->ops && clk->ops->round_rate)) {
+		unsigned long flags, rounded;
+
+		spin_lock_irqsave(&clock_lock, flags);
+		rounded = clk->ops->round_rate(clk, rate);
+		spin_unlock_irqrestore(&clock_lock, flags);
+
+		return rounded;
+	}
+
+	return clk_get_rate(clk);
+}
+EXPORT_SYMBOL_GPL(clk_round_rate);
+
+#ifdef CONFIG_PM
+static void clks_core_resume(void)
+{
+	struct clk *clkp;
+
+	list_for_each_entry(clkp, &clock_list, node) {
+		if (likely(clkp->usecount && clkp->ops)) {
+			unsigned long rate = clkp->rate;
+
+			if (likely(clkp->ops->set_parent))
+				clkp->ops->set_parent(clkp,
+					clkp->parent);
+			if (likely(clkp->ops->set_rate))
+				clkp->ops->set_rate(clkp, rate);
+			else if (likely(clkp->ops->recalc))
+				clkp->rate = clkp->ops->recalc(clkp);
+		}
+	}
+}
+
+static struct syscore_ops clks_syscore_ops = {
+	.resume = clks_core_resume,
+};
+
+static int __init clk_syscore_init(void)
+{
+	register_syscore_ops(&clks_syscore_ops);
+
+	return 0;
+}
+subsys_initcall(clk_syscore_init);
+#endif
+
+static int __init clk_late_init(void)
+{
+	unsigned long flags;
+	struct clk *clk;
+
+	/* disable all clocks with zero use count */
+	mutex_lock(&clock_list_sem);
+	spin_lock_irqsave(&clock_lock, flags);
+
+	list_for_each_entry(clk, &clock_list, node)
+		if (!clk->usecount && clk->ops && clk->ops->disable)
+			clk->ops->disable(clk);
+
+	/* from now on allow clock disable operations */
+	allow_disable = 1;
+
+	spin_unlock_irqrestore(&clock_lock, flags);
+	mutex_unlock(&clock_list_sem);
+	return 0;
+}
+late_initcall(clk_late_init);
diff --git a/src/kernel/linux/v4.14/drivers/sh/clk/cpg.c b/src/kernel/linux/v4.14/drivers/sh/clk/cpg.c
new file mode 100644
index 0000000..7442bc1
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/sh/clk/cpg.c
@@ -0,0 +1,492 @@
+/*
+ * Helper routines for SuperH Clock Pulse Generator blocks (CPG).
+ *
+ *  Copyright (C) 2010  Magnus Damm
+ *  Copyright (C) 2010 - 2012  Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/clk.h>
+#include <linux/compiler.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/sh_clk.h>
+
+#define CPG_CKSTP_BIT	BIT(8)
+
+static unsigned int sh_clk_read(struct clk *clk)
+{
+	if (clk->flags & CLK_ENABLE_REG_8BIT)
+		return ioread8(clk->mapped_reg);
+	else if (clk->flags & CLK_ENABLE_REG_16BIT)
+		return ioread16(clk->mapped_reg);
+
+	return ioread32(clk->mapped_reg);
+}
+
+static void sh_clk_write(int value, struct clk *clk)
+{
+	if (clk->flags & CLK_ENABLE_REG_8BIT)
+		iowrite8(value, clk->mapped_reg);
+	else if (clk->flags & CLK_ENABLE_REG_16BIT)
+		iowrite16(value, clk->mapped_reg);
+	else
+		iowrite32(value, clk->mapped_reg);
+}
+
+static unsigned int r8(const void __iomem *addr)
+{
+	return ioread8(addr);
+}
+
+static unsigned int r16(const void __iomem *addr)
+{
+	return ioread16(addr);
+}
+
+static unsigned int r32(const void __iomem *addr)
+{
+	return ioread32(addr);
+}
+
+static int sh_clk_mstp_enable(struct clk *clk)
+{
+	sh_clk_write(sh_clk_read(clk) & ~(1 << clk->enable_bit), clk);
+	if (clk->status_reg) {
+		unsigned int (*read)(const void __iomem *addr);
+		int i;
+		void __iomem *mapped_status = (phys_addr_t)clk->status_reg -
+			(phys_addr_t)clk->enable_reg + clk->mapped_reg;
+
+		if (clk->flags & CLK_ENABLE_REG_8BIT)
+			read = r8;
+		else if (clk->flags & CLK_ENABLE_REG_16BIT)
+			read = r16;
+		else
+			read = r32;
+
+		for (i = 1000;
+		     (read(mapped_status) & (1 << clk->enable_bit)) && i;
+		     i--)
+			cpu_relax();
+		if (!i) {
+			pr_err("cpg: failed to enable %p[%d]\n",
+			       clk->enable_reg, clk->enable_bit);
+			return -ETIMEDOUT;
+		}
+	}
+	return 0;
+}
+
+static void sh_clk_mstp_disable(struct clk *clk)
+{
+	sh_clk_write(sh_clk_read(clk) | (1 << clk->enable_bit), clk);
+}
+
+static struct sh_clk_ops sh_clk_mstp_clk_ops = {
+	.enable		= sh_clk_mstp_enable,
+	.disable	= sh_clk_mstp_disable,
+	.recalc		= followparent_recalc,
+};
+
+int __init sh_clk_mstp_register(struct clk *clks, int nr)
+{
+	struct clk *clkp;
+	int ret = 0;
+	int k;
+
+	for (k = 0; !ret && (k < nr); k++) {
+		clkp = clks + k;
+		clkp->ops = &sh_clk_mstp_clk_ops;
+		ret |= clk_register(clkp);
+	}
+
+	return ret;
+}
+
+/*
+ * Div/mult table lookup helpers
+ */
+static inline struct clk_div_table *clk_to_div_table(struct clk *clk)
+{
+	return clk->priv;
+}
+
+static inline struct clk_div_mult_table *clk_to_div_mult_table(struct clk *clk)
+{
+	return clk_to_div_table(clk)->div_mult_table;
+}
+
+/*
+ * Common div ops
+ */
+static long sh_clk_div_round_rate(struct clk *clk, unsigned long rate)
+{
+	return clk_rate_table_round(clk, clk->freq_table, rate);
+}
+
+static unsigned long sh_clk_div_recalc(struct clk *clk)
+{
+	struct clk_div_mult_table *table = clk_to_div_mult_table(clk);
+	unsigned int idx;
+
+	clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
+			     table, clk->arch_flags ? &clk->arch_flags : NULL);
+
+	idx = (sh_clk_read(clk) >> clk->enable_bit) & clk->div_mask;
+
+	return clk->freq_table[idx].frequency;
+}
+
+static int sh_clk_div_set_rate(struct clk *clk, unsigned long rate)
+{
+	struct clk_div_table *dt = clk_to_div_table(clk);
+	unsigned long value;
+	int idx;
+
+	idx = clk_rate_table_find(clk, clk->freq_table, rate);
+	if (idx < 0)
+		return idx;
+
+	value = sh_clk_read(clk);
+	value &= ~(clk->div_mask << clk->enable_bit);
+	value |= (idx << clk->enable_bit);
+	sh_clk_write(value, clk);
+
+	/* XXX: Should use a post-change notifier */
+	if (dt->kick)
+		dt->kick(clk);
+
+	return 0;
+}
+
+static int sh_clk_div_enable(struct clk *clk)
+{
+	if (clk->div_mask == SH_CLK_DIV6_MSK) {
+		int ret = sh_clk_div_set_rate(clk, clk->rate);
+		if (ret < 0)
+			return ret;
+	}
+
+	sh_clk_write(sh_clk_read(clk) & ~CPG_CKSTP_BIT, clk);
+	return 0;
+}
+
+static void sh_clk_div_disable(struct clk *clk)
+{
+	unsigned int val;
+
+	val = sh_clk_read(clk);
+	val |= CPG_CKSTP_BIT;
+
+	/*
+	 * div6 clocks require the divisor field to be non-zero or the
+	 * above CKSTP toggle silently fails. Ensure that the divisor
+	 * array is reset to its initial state on disable.
+	 */
+	if (clk->flags & CLK_MASK_DIV_ON_DISABLE)
+		val |= clk->div_mask;
+
+	sh_clk_write(val, clk);
+}
+
+static struct sh_clk_ops sh_clk_div_clk_ops = {
+	.recalc		= sh_clk_div_recalc,
+	.set_rate	= sh_clk_div_set_rate,
+	.round_rate	= sh_clk_div_round_rate,
+};
+
+static struct sh_clk_ops sh_clk_div_enable_clk_ops = {
+	.recalc		= sh_clk_div_recalc,
+	.set_rate	= sh_clk_div_set_rate,
+	.round_rate	= sh_clk_div_round_rate,
+	.enable		= sh_clk_div_enable,
+	.disable	= sh_clk_div_disable,
+};
+
+static int __init sh_clk_init_parent(struct clk *clk)
+{
+	u32 val;
+
+	if (clk->parent)
+		return 0;
+
+	if (!clk->parent_table || !clk->parent_num)
+		return 0;
+
+	if (!clk->src_width) {
+		pr_err("sh_clk_init_parent: cannot select parent clock\n");
+		return -EINVAL;
+	}
+
+	val  = (sh_clk_read(clk) >> clk->src_shift);
+	val &= (1 << clk->src_width) - 1;
+
+	if (val >= clk->parent_num) {
+		pr_err("sh_clk_init_parent: parent table size failed\n");
+		return -EINVAL;
+	}
+
+	clk_reparent(clk, clk->parent_table[val]);
+	if (!clk->parent) {
+		pr_err("sh_clk_init_parent: unable to set parent");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int __init sh_clk_div_register_ops(struct clk *clks, int nr,
+			struct clk_div_table *table, struct sh_clk_ops *ops)
+{
+	struct clk *clkp;
+	void *freq_table;
+	int nr_divs = table->div_mult_table->nr_divisors;
+	int freq_table_size = sizeof(struct cpufreq_frequency_table);
+	int ret = 0;
+	int k;
+
+	freq_table_size *= (nr_divs + 1);
+	freq_table = kzalloc(freq_table_size * nr, GFP_KERNEL);
+	if (!freq_table) {
+		pr_err("%s: unable to alloc memory\n", __func__);
+		return -ENOMEM;
+	}
+
+	for (k = 0; !ret && (k < nr); k++) {
+		clkp = clks + k;
+
+		clkp->ops = ops;
+		clkp->priv = table;
+
+		clkp->freq_table = freq_table + (k * freq_table_size);
+		clkp->freq_table[nr_divs].frequency = CPUFREQ_TABLE_END;
+
+		ret = clk_register(clkp);
+		if (ret == 0)
+			ret = sh_clk_init_parent(clkp);
+	}
+
+	return ret;
+}
+
+/*
+ * div6 support
+ */
+static int sh_clk_div6_divisors[64] = {
+	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
+	17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
+	33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48,
+	49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64
+};
+
+static struct clk_div_mult_table div6_div_mult_table = {
+	.divisors = sh_clk_div6_divisors,
+	.nr_divisors = ARRAY_SIZE(sh_clk_div6_divisors),
+};
+
+static struct clk_div_table sh_clk_div6_table = {
+	.div_mult_table	= &div6_div_mult_table,
+};
+
+static int sh_clk_div6_set_parent(struct clk *clk, struct clk *parent)
+{
+	struct clk_div_mult_table *table = clk_to_div_mult_table(clk);
+	u32 value;
+	int ret, i;
+
+	if (!clk->parent_table || !clk->parent_num)
+		return -EINVAL;
+
+	/* Search the parent */
+	for (i = 0; i < clk->parent_num; i++)
+		if (clk->parent_table[i] == parent)
+			break;
+
+	if (i == clk->parent_num)
+		return -ENODEV;
+
+	ret = clk_reparent(clk, parent);
+	if (ret < 0)
+		return ret;
+
+	value = sh_clk_read(clk) &
+		~(((1 << clk->src_width) - 1) << clk->src_shift);
+
+	sh_clk_write(value | (i << clk->src_shift), clk);
+
+	/* Rebuild the frequency table */
+	clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
+			     table, NULL);
+
+	return 0;
+}
+
+static struct sh_clk_ops sh_clk_div6_reparent_clk_ops = {
+	.recalc		= sh_clk_div_recalc,
+	.round_rate	= sh_clk_div_round_rate,
+	.set_rate	= sh_clk_div_set_rate,
+	.enable		= sh_clk_div_enable,
+	.disable	= sh_clk_div_disable,
+	.set_parent	= sh_clk_div6_set_parent,
+};
+
+int __init sh_clk_div6_register(struct clk *clks, int nr)
+{
+	return sh_clk_div_register_ops(clks, nr, &sh_clk_div6_table,
+				       &sh_clk_div_enable_clk_ops);
+}
+
+int __init sh_clk_div6_reparent_register(struct clk *clks, int nr)
+{
+	return sh_clk_div_register_ops(clks, nr, &sh_clk_div6_table,
+				       &sh_clk_div6_reparent_clk_ops);
+}
+
+/*
+ * div4 support
+ */
+static int sh_clk_div4_set_parent(struct clk *clk, struct clk *parent)
+{
+	struct clk_div_mult_table *table = clk_to_div_mult_table(clk);
+	u32 value;
+	int ret;
+
+	/* we really need a better way to determine parent index, but for
+	 * now assume internal parent comes with CLK_ENABLE_ON_INIT set,
+	 * no CLK_ENABLE_ON_INIT means external clock...
+	 */
+
+	if (parent->flags & CLK_ENABLE_ON_INIT)
+		value = sh_clk_read(clk) & ~(1 << 7);
+	else
+		value = sh_clk_read(clk) | (1 << 7);
+
+	ret = clk_reparent(clk, parent);
+	if (ret < 0)
+		return ret;
+
+	sh_clk_write(value, clk);
+
+	/* Rebiuld the frequency table */
+	clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
+			     table, &clk->arch_flags);
+
+	return 0;
+}
+
+static struct sh_clk_ops sh_clk_div4_reparent_clk_ops = {
+	.recalc		= sh_clk_div_recalc,
+	.set_rate	= sh_clk_div_set_rate,
+	.round_rate	= sh_clk_div_round_rate,
+	.enable		= sh_clk_div_enable,
+	.disable	= sh_clk_div_disable,
+	.set_parent	= sh_clk_div4_set_parent,
+};
+
+int __init sh_clk_div4_register(struct clk *clks, int nr,
+				struct clk_div4_table *table)
+{
+	return sh_clk_div_register_ops(clks, nr, table, &sh_clk_div_clk_ops);
+}
+
+int __init sh_clk_div4_enable_register(struct clk *clks, int nr,
+				struct clk_div4_table *table)
+{
+	return sh_clk_div_register_ops(clks, nr, table,
+				       &sh_clk_div_enable_clk_ops);
+}
+
+int __init sh_clk_div4_reparent_register(struct clk *clks, int nr,
+				struct clk_div4_table *table)
+{
+	return sh_clk_div_register_ops(clks, nr, table,
+				       &sh_clk_div4_reparent_clk_ops);
+}
+
+/* FSI-DIV */
+static unsigned long fsidiv_recalc(struct clk *clk)
+{
+	u32 value;
+
+	value = __raw_readl(clk->mapping->base);
+
+	value >>= 16;
+	if (value < 2)
+		return clk->parent->rate;
+
+	return clk->parent->rate / value;
+}
+
+static long fsidiv_round_rate(struct clk *clk, unsigned long rate)
+{
+	return clk_rate_div_range_round(clk, 1, 0xffff, rate);
+}
+
+static void fsidiv_disable(struct clk *clk)
+{
+	__raw_writel(0, clk->mapping->base);
+}
+
+static int fsidiv_enable(struct clk *clk)
+{
+	u32 value;
+
+	value  = __raw_readl(clk->mapping->base) >> 16;
+	if (value < 2)
+		return 0;
+
+	__raw_writel((value << 16) | 0x3, clk->mapping->base);
+
+	return 0;
+}
+
+static int fsidiv_set_rate(struct clk *clk, unsigned long rate)
+{
+	int idx;
+
+	idx = (clk->parent->rate / rate) & 0xffff;
+	if (idx < 2)
+		__raw_writel(0, clk->mapping->base);
+	else
+		__raw_writel(idx << 16, clk->mapping->base);
+
+	return 0;
+}
+
+static struct sh_clk_ops fsidiv_clk_ops = {
+	.recalc		= fsidiv_recalc,
+	.round_rate	= fsidiv_round_rate,
+	.set_rate	= fsidiv_set_rate,
+	.enable		= fsidiv_enable,
+	.disable	= fsidiv_disable,
+};
+
+int __init sh_clk_fsidiv_register(struct clk *clks, int nr)
+{
+	struct clk_mapping *map;
+	int i;
+
+	for (i = 0; i < nr; i++) {
+
+		map = kzalloc(sizeof(struct clk_mapping), GFP_KERNEL);
+		if (!map) {
+			pr_err("%s: unable to alloc memory\n", __func__);
+			return -ENOMEM;
+		}
+
+		/* clks[i].enable_reg came from SH_CLK_FSIDIV() */
+		map->phys		= (phys_addr_t)clks[i].enable_reg;
+		map->len		= 8;
+
+		clks[i].enable_reg	= 0; /* remove .enable_reg */
+		clks[i].ops		= &fsidiv_clk_ops;
+		clks[i].mapping		= map;
+
+		clk_register(&clks[i]);
+	}
+
+	return 0;
+}