blob: e458e2b4b546d98b90f1e928cd0b224b176a9bc9 [file] [log] [blame]
/*
* linux/arch/arm/mach-zx297520v2/clock.c
*
* Copyright (C) 2013 ZTE-TSP <geanfeng@zte.com.cn>
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/fs.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/list.h>
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/spinlock.h>
#include <linux/delay.h>
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/debugfs.h>
#include <linux/clkdev.h>
#include <mach/iomap.h>
#include <linux/clk-private.h>
#include <mach/board.h>
#include <mach/debug.h>
#include "clk.h"
static DEFINE_SPINLOCK(enable_lock);
static DEFINE_MUTEX(prepare_lock);
static LIST_HEAD(clocks);
/*** helper functions ***/
inline const char *__clk_get_name(struct clk *clk)
{
return !clk ? NULL : clk->name;
}
inline struct clk_hw *__clk_get_hw(struct clk *clk)
{
return !clk ? NULL : clk->hw;
}
inline u8 __clk_get_num_parents(struct clk *clk)
{
return !clk ? -EINVAL : clk->num_parents;
}
inline struct clk *__clk_get_parent(struct clk *clk)
{
return !clk ? NULL : clk->parent;
}
inline int __clk_get_enable_count(struct clk *clk)
{
return !clk ? -EINVAL : clk->enable_count;
}
static unsigned long __clk_get_rate(struct clk *clk)
{
unsigned long ret;
if (!clk) {
ret = -EINVAL;
goto out;
}
ret = clk->rate;
if (clk->flags & CLK_IS_ROOT)
goto out;
if (!clk->parent)
ret = -ENODEV;
out:
return ret;
}
inline unsigned long __clk_get_flags(struct clk *clk)
{
return !clk ? -EINVAL : clk->flags;
}
static int __clk_is_enabled(struct clk *clk)
{
int ret;
if (!clk)
return -EINVAL;
/*
* .is_enabled is only mandatory for clocks that gate
* fall back to software usage counter if .is_enabled is missing
*/
if (!clk->ops->is_enabled) {
ret = clk->enable_count ? 1 : 0;
goto out;
}
ret = clk->ops->is_enabled(clk->hw);
out:
return ret;
}
/*** clk api ***/
static void __clk_disable(struct clk *clk)
{
if (!clk)
return;
if (WARN_ON(clk->enable_count == 0))
return;
if (--clk->enable_count > 0)
return;
if (clk->ops->disable)
clk->ops->disable(clk->hw);
__clk_disable(clk->parent);
}
/**
* clk_disable - gate a clock
* @clk: the clk being gated
*
* clk_disable must not sleep, which differentiates it from clk_unprepare. In
* a simple case, clk_disable can be used instead of clk_unprepare to gate a
* clk if the operation is fast and will never sleep. One example is a
* SoC-internal clk which is controlled via simple register writes. In the
* complex case a clk gate operation may require a fast and a slow part. It is
* this reason that clk_unprepare and clk_disable are not mutually exclusive.
* In fact clk_disable must be called before clk_unprepare.
*/
void clk_disable(struct clk *clk)
{
unsigned long flags;
spin_lock_irqsave(&enable_lock, flags);
__clk_disable(clk);
spin_unlock_irqrestore(&enable_lock, flags);
}
EXPORT_SYMBOL_GPL(clk_disable);
static int __clk_enable(struct clk *clk)
{
int ret = 0;
if (!clk)
return 0;
if (clk->enable_count == 0) {
ret = __clk_enable(clk->parent);
if (ret)
return ret;
if (clk->ops->enable) {
ret = clk->ops->enable(clk->hw);
if (ret) {
__clk_disable(clk->parent);
return ret;
}
}
}
clk->enable_count++;
return 0;
}
/**
* clk_enable - ungate a clock
* @clk: the clk being ungated
*
* clk_enable must not sleep, which differentiates it from clk_prepare. In a
* simple case, clk_enable can be used instead of clk_prepare to ungate a clk
* if the operation will never sleep. One example is a SoC-internal clk which
* is controlled via simple register writes. In the complex case a clk ungate
* operation may require a fast and a slow part. It is this reason that
* clk_enable and clk_prepare are not mutually exclusive. In fact clk_prepare
* must be called before clk_enable. Returns 0 on success, -EERROR
* otherwise.
*/
int clk_enable(struct clk *clk)
{
unsigned long flags;
int ret;
spin_lock_irqsave(&enable_lock, flags);
ret = __clk_enable(clk);
spin_unlock_irqrestore(&enable_lock, flags);
return ret;
}
EXPORT_SYMBOL(clk_enable);
int clk_is_enabled(struct clk *clk)
{
unsigned long flags;
int ret;
spin_lock_irqsave(&enable_lock, flags);
ret = __clk_is_enabled(clk);
spin_unlock_irqrestore(&enable_lock, flags);
return ret;
}
EXPORT_SYMBOL(clk_is_enabled);
/**
* clk_get_rate - return the rate of clk
* @clk: the clk whose rate is being returned
*
* Simply returns the cached rate of the clk. Does not query the hardware. If
* clk is NULL then returns -EINVAL.
*/
unsigned long clk_get_rate(struct clk *clk)
{
unsigned long rate;
mutex_lock(&prepare_lock);
rate = __clk_get_rate(clk);
mutex_unlock(&prepare_lock);
return rate;
}
EXPORT_SYMBOL_GPL(clk_get_rate);
/**
* __clk_round_rate - round the given rate for a clk
* @clk: round the rate of this clock
*
* Caller must hold prepare_lock. Useful for clk_ops such as .set_rate
*/
static unsigned long __clk_round_rate(struct clk *clk, unsigned long rate, struct clk **best_parent)
{
int i;
unsigned long tmp_rate;
unsigned long tmp_rate2;
unsigned long best_rate;
unsigned long diff_rate;
struct clk *old_parent;
struct clk *sel_parent;
if (!clk)
return -EINVAL;
if (!clk->ops->round_rate)
return clk->rate;
sel_parent = clk->parent;
if (clk->num_parents > 1 && (clk->flags & CLK_AUTO_ROUND_PARENT)) {
old_parent = clk->parent;
best_rate = 0;
diff_rate = rate;
for ( i=0; i < clk->num_parents; i++) {
clk->parent = clk->parents[i];
tmp_rate = clk->ops->round_rate(clk->hw, rate, NULL);
if (tmp_rate > rate)
tmp_rate2 = tmp_rate - rate;
else
tmp_rate2 = rate - tmp_rate;
if (tmp_rate2 == 0) {
best_rate = tmp_rate;
sel_parent = clk->parent;
break;
} else if (diff_rate > tmp_rate2) {
diff_rate = tmp_rate2;
best_rate = tmp_rate; /*sel match clock*/
sel_parent = clk->parent;
} else if (best_rate == 0) {
best_rate = tmp_rate;
}
}
clk->parent = old_parent;
}
else
{
best_rate = clk->ops->round_rate(clk->hw, rate, NULL);
}
if(best_parent)
*best_parent = sel_parent;
return best_rate;
}
/**
* clk_round_rate - round the given rate for a clk
* @clk: the clk for which we are rounding a rate
* @rate: the rate which is to be rounded
*
* Takes in a rate as input and rounds it to a rate that the clk can actually
* use which is then returned. If clk doesn't support round_rate operation
* then the parent rate is returned.
*/
long clk_round_rate(struct clk *clk, unsigned long rate)
{
unsigned long ret;
mutex_lock(&prepare_lock);
ret = __clk_round_rate(clk, rate, NULL);
mutex_unlock(&prepare_lock);
return ret;
}
EXPORT_SYMBOL_GPL(clk_round_rate);
/**
* clk_get_parent - return the parent of a clk
* @clk: the clk whose parent gets returned
*
* Simply returns clk->parent. Returns NULL if clk is NULL.
*/
struct clk *clk_get_parent(struct clk *clk)
{
struct clk *parent;
mutex_lock(&prepare_lock);
parent = __clk_get_parent(clk);
mutex_unlock(&prepare_lock);
return parent;
}
EXPORT_SYMBOL_GPL(clk_get_parent);
/**
* __clk_recalc_rates
* @clk: first clk in the subtree
* @msg: notification type (see include/linux/clk.h)
*
* Walks the subtree of clks starting with clk and recalculates rates as it
* goes. Note that if a clk does not implement the .recalc_rate callback then
* it is assumed that the clock will take on the rate of it's parent.
*
* clk_recalc_rates also propagates the POST_RATE_CHANGE notification,
* if necessary.
*
* Caller must hold prepare_lock.
*/
static void __clk_recalc_rates(struct clk *clk)
{
unsigned long parent_rate = 0;
if (clk->parent) {
__clk_recalc_rates(clk->parent);
parent_rate = clk->parent->rate;
} else {
parent_rate = clk->rate;
}
if (clk->ops->recalc_rate)
clk->rate = clk->ops->recalc_rate(clk->hw, parent_rate);
else
clk->rate = parent_rate;
}
static int __clk_set_parent(struct clk *clk, struct clk *parent)
{
struct clk *old_parent;
unsigned long flags;
int ret = -EINVAL;
u8 i;
old_parent = clk->parent;
if (!clk->parents)
return -EINVAL;
/*
* find index of new parent clock using cached parent ptrs,
* or if not yet cached, use string name comparison and cache
* them now to avoid future calls to __clk_lookup.
*/
for (i = 0; i < clk->num_parents; i++) {
if (clk->parents && clk->parents[i] == parent)
break;
else if (!strcmp(clk->parent_names[i], parent->name)) {
if (clk->parents) {
clk->parents[i] = clk_get(NULL, parent->name);
clk_put(clk->parents[i]);
}
break;
}
}
if (i == clk->num_parents) {
pr_debug("%s: clock %s is not a possible parent of clock %s\n",
__func__, parent->name, clk->name);
goto out;
}
/* FIXME replace with clk_is_enabled(clk) someday */
spin_lock_irqsave(&enable_lock, flags);
if (clk->enable_count)
__clk_enable(parent);
spin_unlock_irqrestore(&enable_lock, flags);
/* change clock input source */
ret = clk->ops->set_parent(clk->hw, i);
/* clean up old prepare and enable */
spin_lock_irqsave(&enable_lock, flags);
if (clk->enable_count)
__clk_disable(old_parent);
spin_unlock_irqrestore(&enable_lock, flags);
out:
return ret;
}
/**
* clk_set_parent - switch the parent of a mux clk
* @clk: the mux clk whose input we are switching
* @parent: the new input to clk
*
* Re-parent clk to use parent as it's new input source. If clk has the
* CLK_SET_PARENT_GATE flag set then clk must be gated for this
* operation to succeed. After successfully changing clk's parent
* clk_set_parent will update the clk topology, sysfs topology and
* propagate rate recalculation via __clk_recalc_rates. Returns 0 on
* success, -EERROR otherwise.
*/
int clk_set_parent(struct clk *clk, struct clk *parent)
{
int ret = 0;
if (!clk || !clk->ops)
return -EINVAL;
if (!clk->ops->set_parent || !clk->num_parents)
return -ENOSYS;
/* prevent racing with updates to the clock topology */
mutex_lock(&prepare_lock);
if (clk->parent == parent)
goto out;
if(clk->flags & CLK_AUTO_ROUND_PARENT) {
clk->flags &= ~CLK_AUTO_ROUND_PARENT;/*once set parent, cancel round parent*/
}
ret = __clk_set_parent(clk, parent);
if (ret) {
__clk_recalc_rates(clk);
goto out;
}
clk->parent = parent;
__clk_recalc_rates(clk);
out:
mutex_unlock(&prepare_lock);
return ret;
}
EXPORT_SYMBOL_GPL(clk_set_parent);
/**
* clk_set_rate - specify a new rate for clk
* @clk: the clk whose rate is being changed
* @rate: the new rate for clk
*
* In the simplest case clk_set_rate will only change the rate of clk.
*
* If clk has the CLK_SET_RATE_GATE flag set and it is enabled this call
* will fail; only when the clk is disabled will it be able to change
* its rate.
*
* Setting the CLK_SET_RATE_PARENT flag allows clk_set_rate to
* recursively propagate up to clk's parent; whether or not this happens
* depends on the outcome of clk's .round_rate implementation. If
* *parent_rate is 0 after calling .round_rate then upstream parent
* propagation is ignored. If *parent_rate comes back with a new rate
* for clk's parent then we propagate up to clk's parent and set it's
* rate. Upward propagation will continue until either a clk does not
* support the CLK_SET_RATE_PARENT flag or .round_rate stops requesting
* changes to clk's parent_rate. If there is a failure during upstream
* propagation then clk_set_rate will unwind and restore each clk's rate
* that had been successfully changed. Afterwards a rate change abort
* notification will be propagated downstream, starting from the clk
* that failed.
*
* At the end of all of the rate setting, clk_set_rate internally calls
* __clk_recalc_rates and propagates the rate changes downstream,
* starting from the highest clk whose rate was changed. This has the
* added benefit of propagating post-rate change notifiers.
*
* Note that while post-rate change and rate change abort notifications
* are guaranteed to be sent to a clk only once per call to
* clk_set_rate, pre-change notifications will be sent for every clk
* whose rate is changed. Stacking pre-change notifications is noisy
* for the drivers subscribed to them, but this allows drivers to react
* to intermediate clk rate changes up until the point where the final
* rate is achieved at the end of upstream propagation.
*
* Returns 0 on success, -EERROR otherwise.
*/
int clk_set_rate(struct clk *clk, unsigned long rate)
{
struct clk *best_parent = NULL;
/* prevent racing with updates to the clock topology */
mutex_lock(&prepare_lock);
/* bail early if nothing to do */
if (clk == NULL || rate == clk->rate)
goto out;
clk->new_rate = __clk_round_rate(clk, rate, &best_parent);
if(clk->num_parents && best_parent != NULL && best_parent != clk->parent) {
__clk_set_parent(clk, best_parent);
clk->parent = best_parent;
}
/* change the rates */
if (clk->ops->set_rate)
clk->ops->set_rate(clk->hw, clk->new_rate);
if (clk->ops->recalc_rate)
__clk_recalc_rates(clk);
WARN(!clk->parent,"%s,clk %s 's parent is NULL.\n",__func__, clk->name);
mutex_unlock(&prepare_lock);
return 0;
out:
mutex_unlock(&prepare_lock);
return -ENOSYS;
}
EXPORT_SYMBOL(clk_set_rate);
/**
* clk_set_auto_gate - set the clock auto gate
* @clk: clock source
* @enable: enable auto gate true or false
*
* Returns success (0) or negative errno.
*/
int clk_set_auto_gate(struct clk *clk, bool enable)
{
unsigned long flags;
int ret = 0;
spin_lock_irqsave(&enable_lock, flags);
if (clk->ops->set_auto_gate)
ret = clk->ops->set_auto_gate(clk->hw, enable);
spin_unlock_irqrestore(&enable_lock, flags);
return ret;
}
EXPORT_SYMBOL_GPL(clk_set_auto_gate);
/**
* clk_register - allocate a new clock, register it and return an opaque cookie
* @dev: device that is registering this clock
* @name: clock name
* @ops: operations this clock supports
* @hw: link to hardware-specific clock data
* @parent_names: array of string names for all possible parents
* @num_parents: number of possible parents
* @flags: framework-level hints and quirks
*
* clk_register is the primary interface for populating the clock tree with new
* clock nodes. It returns a pointer to the newly allocated struct clk which
* cannot be dereferenced by driver code but may be used in conjuction with the
* rest of the clock API.
*/
static int clk_register(struct clk *clk)
{
int i, index;
if(clk == NULL)
return -EINVAL;
if(clk->hw != NULL)
clk->hw->clk = clk;
/* throw a WARN if any entries in parent_names are NULL */
for (i = 0; i < clk->num_parents; i++)
WARN(!clk->parent_names[i],
"%s: invalid NULL in %s's .parent_names\n",
__func__, clk->name);
/*
* Allocate an array of struct clk *'s to avoid unnecessary string
* look-ups of clk's possible parents. This can fail for clocks passed
* in to clk_init during early boot; thus any access to clk->parents[]
* must always check for a NULL pointer and try to populate it if
* necessary.
*
* If clk->parents is not NULL we skip this entire block. This allows
* for clock drivers to statically initialize clk->parents.
*/
for (i = 0; i < clk->num_parents; i++){
clk->parents[i] = clk_get(NULL,clk->parent_names[i]);
clk_put(clk->parents[i]);
}
/*get clk's parent used*/
if(clk->num_parents) {
index = clk->ops->get_parent(clk->hw);
if(index < clk->num_parents)
clk->parent = clk->parents[index];
else
WARN(1,"clk %s get parent error.\n",clk->name);
}
/*
* Set clk's rate. The preferred method is to use .recalc_rate. For
* simple clocks and lazy developers the default fallback is to use the
* parent's rate. If a clock doesn't have a parent (or is orphaned)
* then rate is set to zero.
*/
__clk_recalc_rates(clk);
/*
* optional platform-specific magic
*
* The .init callback is not used by any of the basic clock types, but
* exists for weird hardware that must perform initialization magic.
* Please consider other ways of solving initialization problems before
* using this callback, as it's use is discouraged.
*/
if (clk->ops->init)
clk->ops->init(clk->hw);
return 0;
}
/**
* clk_disable_unused - disable unused clk
*/
static void clk_disable_unused(struct clk *clk)
{
unsigned long flags;
if (!clk)
goto out;
spin_lock_irqsave(&enable_lock, flags);
if (clk->enable_count)
goto unlock_out;
if (clk->flags & CLK_IGNORE_UNUSED)
goto unlock_out;
if (__clk_is_enabled(clk) && clk->ops->disable) {
clk->ops->disable(clk->hw);
printk(KERN_DEBUG "clk %s: start disabled\n",clk->name);
}
unlock_out:
spin_unlock_irqrestore(&enable_lock, flags);
out:
return;
}
/**
* clk_show - print clock debug info
*/
static int clk_show(struct seq_file *s, void *v)
{
struct clk *clk;
seq_printf(s, "%-20s %-20s %-9s %-9s\n", "name","parent","enable","rate");
mutex_lock(&prepare_lock);
list_for_each_entry(clk, &clocks, list) {
if(clk->parent)
seq_printf(s, "%-20s %-20s %-9u %-9lu\n", clk->name, clk->parent->name, \
clk->enable_count, clk->rate);
else
seq_printf(s, "%-20s %-20s %-9u %-9lu\n", clk->name, "root", \
clk->enable_count, clk->rate);
}
mutex_unlock(&prepare_lock);
return 0;
}
/**
* clk_open
*/
static int clk_open(struct inode *inode, struct file *file)
{
return single_open(file, clk_show, inode->i_private);
}
/**
* clock debug fs
*/
struct dentry * clk_debugfs = NULL;
static const struct file_operations clk_debugfs_fops = {
.owner = THIS_MODULE,
.open = clk_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static void clk_debugfs_init(void)
{
clk_debugfs = debugfs_create_file("clocks", S_IRUSR, NULL,NULL,
&clk_debugfs_fops);
return ;
}
/**
* "/sys/zte/test/clk_info"
*/
static ssize_t clk_info_show(struct kobject *kobj, struct kobj_attribute *attr,
char *buf)
{
char *s = buf;
struct clk *clk;
s += sprintf(s, "%-20s %-20s %-9s %-9s\n", "name","parent","enable","rate");
mutex_lock(&prepare_lock);
list_for_each_entry(clk, &clocks, list) {
if(clk->parent)
s += sprintf(s, "%-20s %-20s %-9u %-9lu\n", clk->name, clk->parent->name, \
clk->enable_count, clk->rate);
else
s += sprintf(s, "%-20s %-20s %-9u %-9lu\n", clk->name, "root", \
clk->enable_count, clk->rate);
}
mutex_unlock(&prepare_lock);
return (s - buf);
}
static ssize_t clk_info_store(struct kobject *kobj, struct kobj_attribute *attr,
const char *buf, size_t n)
{
return (n);
}
zte_attr(clk_info);
static struct attribute *g[] = {
&clk_info_attr.attr,
NULL,
};
static struct attribute_group zx29_clk_attribute_group = {
.attrs = g,
};
int __init zx_clk_test_init(void)
{
int ret;
ret = sysfs_create_group(zx_test_kobj, &zx29_clk_attribute_group);
pr_info("[DEBUG] create test clk sysfs interface OK.\n");
return 0;
}
/* zx29 clocks init*/
static void __init __zx29_clock_init(struct clk_lookup *clock_tbl, unsigned num_clocks)
{
unsigned n;
struct clk *clk;
mutex_lock(&prepare_lock);
/*register to devlist*/
for (n = 0; n < num_clocks; n++) {
clkdev_add(&clock_tbl[n]);
list_add_tail(&clock_tbl[n].clk->list, &clocks);
}
/*register to clktree*/
for (n = 0; n < num_clocks; n++) {
clk = clock_tbl[n].clk;
clk_register(clk);
}
mutex_unlock(&prepare_lock);
return ;
}
void __init zx29_clock_init(void)
{
__zx29_clock_init(periph_clocks_lookups, periph_clocks_lookups_num);
pr_info("[CLK] zx29 tsp clk init ok.\n");
}
/*
* Several unused clocks may be active. Turn them off.
*/
static int __init zx29_disable_unused_clocks(void)
{
struct clk *clk;
mutex_lock(&prepare_lock);
list_for_each_entry(clk, &clocks, list) {
printk(KERN_DEBUG "clk %s: rate = %lu\n",clk->name,clk->rate);
//zxp clk_disable_unused(clk);
}
mutex_unlock(&prepare_lock);
clk_debugfs_init();
return 0;
}
late_initcall(zx29_disable_unused_clocks);