blob: e458e2b4b546d98b90f1e928cd0b224b176a9bc9 [file] [log] [blame]
yuezonghe824eb0c2024-06-27 02:32:26 -07001/*
2 * linux/arch/arm/mach-zx297520v2/clock.c
3 *
4 * Copyright (C) 2013 ZTE-TSP <geanfeng@zte.com.cn>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 */
10
11#include <linux/module.h>
12#include <linux/kernel.h>
13#include <linux/init.h>
14#include <linux/fs.h>
15#include <linux/debugfs.h>
16#include <linux/seq_file.h>
17#include <linux/list.h>
18#include <linux/errno.h>
19#include <linux/err.h>
20#include <linux/spinlock.h>
21#include <linux/delay.h>
22#include <linux/clk.h>
23#include <linux/io.h>
24#include <linux/debugfs.h>
25
26#include <linux/clkdev.h>
27#include <mach/iomap.h>
28#include <linux/clk-private.h>
29
30#include <mach/board.h>
31#include <mach/debug.h>
32#include "clk.h"
33
34static DEFINE_SPINLOCK(enable_lock);
35static DEFINE_MUTEX(prepare_lock);
36static LIST_HEAD(clocks);
37
38/*** helper functions ***/
39
40inline const char *__clk_get_name(struct clk *clk)
41{
42 return !clk ? NULL : clk->name;
43}
44
45inline struct clk_hw *__clk_get_hw(struct clk *clk)
46{
47 return !clk ? NULL : clk->hw;
48}
49
50inline u8 __clk_get_num_parents(struct clk *clk)
51{
52 return !clk ? -EINVAL : clk->num_parents;
53}
54
55inline struct clk *__clk_get_parent(struct clk *clk)
56{
57 return !clk ? NULL : clk->parent;
58}
59
60inline int __clk_get_enable_count(struct clk *clk)
61{
62 return !clk ? -EINVAL : clk->enable_count;
63}
64
65static unsigned long __clk_get_rate(struct clk *clk)
66{
67 unsigned long ret;
68
69 if (!clk) {
70 ret = -EINVAL;
71 goto out;
72 }
73
74 ret = clk->rate;
75
76 if (clk->flags & CLK_IS_ROOT)
77 goto out;
78
79 if (!clk->parent)
80 ret = -ENODEV;
81
82out:
83 return ret;
84}
85
86inline unsigned long __clk_get_flags(struct clk *clk)
87{
88 return !clk ? -EINVAL : clk->flags;
89}
90
91static int __clk_is_enabled(struct clk *clk)
92{
93 int ret;
94
95 if (!clk)
96 return -EINVAL;
97
98 /*
99 * .is_enabled is only mandatory for clocks that gate
100 * fall back to software usage counter if .is_enabled is missing
101 */
102 if (!clk->ops->is_enabled) {
103 ret = clk->enable_count ? 1 : 0;
104 goto out;
105 }
106
107 ret = clk->ops->is_enabled(clk->hw);
108out:
109 return ret;
110}
111
112/*** clk api ***/
113
114static void __clk_disable(struct clk *clk)
115{
116 if (!clk)
117 return;
118
119 if (WARN_ON(clk->enable_count == 0))
120 return;
121
122 if (--clk->enable_count > 0)
123 return;
124
125 if (clk->ops->disable)
126 clk->ops->disable(clk->hw);
127
128 __clk_disable(clk->parent);
129}
130
131/**
132 * clk_disable - gate a clock
133 * @clk: the clk being gated
134 *
135 * clk_disable must not sleep, which differentiates it from clk_unprepare. In
136 * a simple case, clk_disable can be used instead of clk_unprepare to gate a
137 * clk if the operation is fast and will never sleep. One example is a
138 * SoC-internal clk which is controlled via simple register writes. In the
139 * complex case a clk gate operation may require a fast and a slow part. It is
140 * this reason that clk_unprepare and clk_disable are not mutually exclusive.
141 * In fact clk_disable must be called before clk_unprepare.
142 */
143void clk_disable(struct clk *clk)
144{
145 unsigned long flags;
146
147 spin_lock_irqsave(&enable_lock, flags);
148 __clk_disable(clk);
149 spin_unlock_irqrestore(&enable_lock, flags);
150}
151EXPORT_SYMBOL_GPL(clk_disable);
152
153static int __clk_enable(struct clk *clk)
154{
155 int ret = 0;
156
157 if (!clk)
158 return 0;
159
160 if (clk->enable_count == 0) {
161 ret = __clk_enable(clk->parent);
162
163 if (ret)
164 return ret;
165
166 if (clk->ops->enable) {
167 ret = clk->ops->enable(clk->hw);
168 if (ret) {
169 __clk_disable(clk->parent);
170 return ret;
171 }
172 }
173 }
174
175 clk->enable_count++;
176 return 0;
177}
178
179/**
180 * clk_enable - ungate a clock
181 * @clk: the clk being ungated
182 *
183 * clk_enable must not sleep, which differentiates it from clk_prepare. In a
184 * simple case, clk_enable can be used instead of clk_prepare to ungate a clk
185 * if the operation will never sleep. One example is a SoC-internal clk which
186 * is controlled via simple register writes. In the complex case a clk ungate
187 * operation may require a fast and a slow part. It is this reason that
188 * clk_enable and clk_prepare are not mutually exclusive. In fact clk_prepare
189 * must be called before clk_enable. Returns 0 on success, -EERROR
190 * otherwise.
191 */
192int clk_enable(struct clk *clk)
193{
194 unsigned long flags;
195 int ret;
196
197 spin_lock_irqsave(&enable_lock, flags);
198 ret = __clk_enable(clk);
199 spin_unlock_irqrestore(&enable_lock, flags);
200
201 return ret;
202}
203EXPORT_SYMBOL(clk_enable);
204
205int clk_is_enabled(struct clk *clk)
206{
207 unsigned long flags;
208 int ret;
209
210 spin_lock_irqsave(&enable_lock, flags);
211 ret = __clk_is_enabled(clk);
212 spin_unlock_irqrestore(&enable_lock, flags);
213
214 return ret;
215}
216EXPORT_SYMBOL(clk_is_enabled);
217
218/**
219 * clk_get_rate - return the rate of clk
220 * @clk: the clk whose rate is being returned
221 *
222 * Simply returns the cached rate of the clk. Does not query the hardware. If
223 * clk is NULL then returns -EINVAL.
224 */
225unsigned long clk_get_rate(struct clk *clk)
226{
227 unsigned long rate;
228
229 mutex_lock(&prepare_lock);
230 rate = __clk_get_rate(clk);
231 mutex_unlock(&prepare_lock);
232
233 return rate;
234}
235EXPORT_SYMBOL_GPL(clk_get_rate);
236
237/**
238 * __clk_round_rate - round the given rate for a clk
239 * @clk: round the rate of this clock
240 *
241 * Caller must hold prepare_lock. Useful for clk_ops such as .set_rate
242 */
243static unsigned long __clk_round_rate(struct clk *clk, unsigned long rate, struct clk **best_parent)
244{
245 int i;
246 unsigned long tmp_rate;
247 unsigned long tmp_rate2;
248 unsigned long best_rate;
249 unsigned long diff_rate;
250 struct clk *old_parent;
251 struct clk *sel_parent;
252
253 if (!clk)
254 return -EINVAL;
255
256 if (!clk->ops->round_rate)
257 return clk->rate;
258
259 sel_parent = clk->parent;
260
261 if (clk->num_parents > 1 && (clk->flags & CLK_AUTO_ROUND_PARENT)) {
262 old_parent = clk->parent;
263 best_rate = 0;
264 diff_rate = rate;
265
266 for ( i=0; i < clk->num_parents; i++) {
267 clk->parent = clk->parents[i];
268 tmp_rate = clk->ops->round_rate(clk->hw, rate, NULL);
269
270 if (tmp_rate > rate)
271 tmp_rate2 = tmp_rate - rate;
272 else
273 tmp_rate2 = rate - tmp_rate;
274
275 if (tmp_rate2 == 0) {
276 best_rate = tmp_rate;
277 sel_parent = clk->parent;
278 break;
279 } else if (diff_rate > tmp_rate2) {
280 diff_rate = tmp_rate2;
281 best_rate = tmp_rate; /*sel match clock*/
282 sel_parent = clk->parent;
283 } else if (best_rate == 0) {
284 best_rate = tmp_rate;
285 }
286 }
287 clk->parent = old_parent;
288 }
289 else
290 {
291 best_rate = clk->ops->round_rate(clk->hw, rate, NULL);
292 }
293
294 if(best_parent)
295 *best_parent = sel_parent;
296 return best_rate;
297}
298
299/**
300 * clk_round_rate - round the given rate for a clk
301 * @clk: the clk for which we are rounding a rate
302 * @rate: the rate which is to be rounded
303 *
304 * Takes in a rate as input and rounds it to a rate that the clk can actually
305 * use which is then returned. If clk doesn't support round_rate operation
306 * then the parent rate is returned.
307 */
308long clk_round_rate(struct clk *clk, unsigned long rate)
309{
310 unsigned long ret;
311
312 mutex_lock(&prepare_lock);
313 ret = __clk_round_rate(clk, rate, NULL);
314 mutex_unlock(&prepare_lock);
315
316 return ret;
317}
318EXPORT_SYMBOL_GPL(clk_round_rate);
319
320
321/**
322 * clk_get_parent - return the parent of a clk
323 * @clk: the clk whose parent gets returned
324 *
325 * Simply returns clk->parent. Returns NULL if clk is NULL.
326 */
327struct clk *clk_get_parent(struct clk *clk)
328{
329 struct clk *parent;
330
331 mutex_lock(&prepare_lock);
332 parent = __clk_get_parent(clk);
333 mutex_unlock(&prepare_lock);
334
335 return parent;
336}
337EXPORT_SYMBOL_GPL(clk_get_parent);
338
339/**
340 * __clk_recalc_rates
341 * @clk: first clk in the subtree
342 * @msg: notification type (see include/linux/clk.h)
343 *
344 * Walks the subtree of clks starting with clk and recalculates rates as it
345 * goes. Note that if a clk does not implement the .recalc_rate callback then
346 * it is assumed that the clock will take on the rate of it's parent.
347 *
348 * clk_recalc_rates also propagates the POST_RATE_CHANGE notification,
349 * if necessary.
350 *
351 * Caller must hold prepare_lock.
352 */
353static void __clk_recalc_rates(struct clk *clk)
354{
355 unsigned long parent_rate = 0;
356
357 if (clk->parent) {
358 __clk_recalc_rates(clk->parent);
359 parent_rate = clk->parent->rate;
360 } else {
361 parent_rate = clk->rate;
362 }
363
364 if (clk->ops->recalc_rate)
365 clk->rate = clk->ops->recalc_rate(clk->hw, parent_rate);
366 else
367 clk->rate = parent_rate;
368
369}
370
371static int __clk_set_parent(struct clk *clk, struct clk *parent)
372{
373 struct clk *old_parent;
374 unsigned long flags;
375 int ret = -EINVAL;
376 u8 i;
377
378 old_parent = clk->parent;
379
380 if (!clk->parents)
381 return -EINVAL;
382
383 /*
384 * find index of new parent clock using cached parent ptrs,
385 * or if not yet cached, use string name comparison and cache
386 * them now to avoid future calls to __clk_lookup.
387 */
388 for (i = 0; i < clk->num_parents; i++) {
389 if (clk->parents && clk->parents[i] == parent)
390 break;
391 else if (!strcmp(clk->parent_names[i], parent->name)) {
392 if (clk->parents) {
393 clk->parents[i] = clk_get(NULL, parent->name);
394 clk_put(clk->parents[i]);
395 }
396 break;
397 }
398 }
399
400 if (i == clk->num_parents) {
401 pr_debug("%s: clock %s is not a possible parent of clock %s\n",
402 __func__, parent->name, clk->name);
403 goto out;
404 }
405
406 /* FIXME replace with clk_is_enabled(clk) someday */
407 spin_lock_irqsave(&enable_lock, flags);
408 if (clk->enable_count)
409 __clk_enable(parent);
410 spin_unlock_irqrestore(&enable_lock, flags);
411
412 /* change clock input source */
413 ret = clk->ops->set_parent(clk->hw, i);
414
415 /* clean up old prepare and enable */
416 spin_lock_irqsave(&enable_lock, flags);
417 if (clk->enable_count)
418 __clk_disable(old_parent);
419 spin_unlock_irqrestore(&enable_lock, flags);
420
421out:
422 return ret;
423}
424
425/**
426 * clk_set_parent - switch the parent of a mux clk
427 * @clk: the mux clk whose input we are switching
428 * @parent: the new input to clk
429 *
430 * Re-parent clk to use parent as it's new input source. If clk has the
431 * CLK_SET_PARENT_GATE flag set then clk must be gated for this
432 * operation to succeed. After successfully changing clk's parent
433 * clk_set_parent will update the clk topology, sysfs topology and
434 * propagate rate recalculation via __clk_recalc_rates. Returns 0 on
435 * success, -EERROR otherwise.
436 */
437int clk_set_parent(struct clk *clk, struct clk *parent)
438{
439 int ret = 0;
440
441 if (!clk || !clk->ops)
442 return -EINVAL;
443
444 if (!clk->ops->set_parent || !clk->num_parents)
445 return -ENOSYS;
446
447 /* prevent racing with updates to the clock topology */
448 mutex_lock(&prepare_lock);
449
450 if (clk->parent == parent)
451 goto out;
452
453 if(clk->flags & CLK_AUTO_ROUND_PARENT) {
454 clk->flags &= ~CLK_AUTO_ROUND_PARENT;/*once set parent, cancel round parent*/
455 }
456
457 ret = __clk_set_parent(clk, parent);
458
459 if (ret) {
460 __clk_recalc_rates(clk);
461 goto out;
462 }
463
464 clk->parent = parent;
465
466 __clk_recalc_rates(clk);
467
468out:
469 mutex_unlock(&prepare_lock);
470
471 return ret;
472}
473EXPORT_SYMBOL_GPL(clk_set_parent);
474/**
475 * clk_set_rate - specify a new rate for clk
476 * @clk: the clk whose rate is being changed
477 * @rate: the new rate for clk
478 *
479 * In the simplest case clk_set_rate will only change the rate of clk.
480 *
481 * If clk has the CLK_SET_RATE_GATE flag set and it is enabled this call
482 * will fail; only when the clk is disabled will it be able to change
483 * its rate.
484 *
485 * Setting the CLK_SET_RATE_PARENT flag allows clk_set_rate to
486 * recursively propagate up to clk's parent; whether or not this happens
487 * depends on the outcome of clk's .round_rate implementation. If
488 * *parent_rate is 0 after calling .round_rate then upstream parent
489 * propagation is ignored. If *parent_rate comes back with a new rate
490 * for clk's parent then we propagate up to clk's parent and set it's
491 * rate. Upward propagation will continue until either a clk does not
492 * support the CLK_SET_RATE_PARENT flag or .round_rate stops requesting
493 * changes to clk's parent_rate. If there is a failure during upstream
494 * propagation then clk_set_rate will unwind and restore each clk's rate
495 * that had been successfully changed. Afterwards a rate change abort
496 * notification will be propagated downstream, starting from the clk
497 * that failed.
498 *
499 * At the end of all of the rate setting, clk_set_rate internally calls
500 * __clk_recalc_rates and propagates the rate changes downstream,
501 * starting from the highest clk whose rate was changed. This has the
502 * added benefit of propagating post-rate change notifiers.
503 *
504 * Note that while post-rate change and rate change abort notifications
505 * are guaranteed to be sent to a clk only once per call to
506 * clk_set_rate, pre-change notifications will be sent for every clk
507 * whose rate is changed. Stacking pre-change notifications is noisy
508 * for the drivers subscribed to them, but this allows drivers to react
509 * to intermediate clk rate changes up until the point where the final
510 * rate is achieved at the end of upstream propagation.
511 *
512 * Returns 0 on success, -EERROR otherwise.
513 */
514int clk_set_rate(struct clk *clk, unsigned long rate)
515{
516 struct clk *best_parent = NULL;
517
518 /* prevent racing with updates to the clock topology */
519 mutex_lock(&prepare_lock);
520
521 /* bail early if nothing to do */
522 if (clk == NULL || rate == clk->rate)
523 goto out;
524
525 clk->new_rate = __clk_round_rate(clk, rate, &best_parent);
526
527 if(clk->num_parents && best_parent != NULL && best_parent != clk->parent) {
528 __clk_set_parent(clk, best_parent);
529 clk->parent = best_parent;
530 }
531
532 /* change the rates */
533 if (clk->ops->set_rate)
534 clk->ops->set_rate(clk->hw, clk->new_rate);
535
536 if (clk->ops->recalc_rate)
537 __clk_recalc_rates(clk);
538
539 WARN(!clk->parent,"%s,clk %s 's parent is NULL.\n",__func__, clk->name);
540
541 mutex_unlock(&prepare_lock);
542
543 return 0;
544out:
545 mutex_unlock(&prepare_lock);
546
547 return -ENOSYS;
548}
549EXPORT_SYMBOL(clk_set_rate);
550/**
551 * clk_set_auto_gate - set the clock auto gate
552 * @clk: clock source
553 * @enable: enable auto gate true or false
554 *
555 * Returns success (0) or negative errno.
556 */
557int clk_set_auto_gate(struct clk *clk, bool enable)
558{
559 unsigned long flags;
560 int ret = 0;
561
562 spin_lock_irqsave(&enable_lock, flags);
563 if (clk->ops->set_auto_gate)
564 ret = clk->ops->set_auto_gate(clk->hw, enable);
565 spin_unlock_irqrestore(&enable_lock, flags);
566
567 return ret;
568}
569EXPORT_SYMBOL_GPL(clk_set_auto_gate);
570
571/**
572 * clk_register - allocate a new clock, register it and return an opaque cookie
573 * @dev: device that is registering this clock
574 * @name: clock name
575 * @ops: operations this clock supports
576 * @hw: link to hardware-specific clock data
577 * @parent_names: array of string names for all possible parents
578 * @num_parents: number of possible parents
579 * @flags: framework-level hints and quirks
580 *
581 * clk_register is the primary interface for populating the clock tree with new
582 * clock nodes. It returns a pointer to the newly allocated struct clk which
583 * cannot be dereferenced by driver code but may be used in conjuction with the
584 * rest of the clock API.
585 */
586static int clk_register(struct clk *clk)
587{
588 int i, index;
589
590 if(clk == NULL)
591 return -EINVAL;
592
593 if(clk->hw != NULL)
594 clk->hw->clk = clk;
595
596 /* throw a WARN if any entries in parent_names are NULL */
597 for (i = 0; i < clk->num_parents; i++)
598 WARN(!clk->parent_names[i],
599 "%s: invalid NULL in %s's .parent_names\n",
600 __func__, clk->name);
601
602 /*
603 * Allocate an array of struct clk *'s to avoid unnecessary string
604 * look-ups of clk's possible parents. This can fail for clocks passed
605 * in to clk_init during early boot; thus any access to clk->parents[]
606 * must always check for a NULL pointer and try to populate it if
607 * necessary.
608 *
609 * If clk->parents is not NULL we skip this entire block. This allows
610 * for clock drivers to statically initialize clk->parents.
611 */
612 for (i = 0; i < clk->num_parents; i++){
613 clk->parents[i] = clk_get(NULL,clk->parent_names[i]);
614 clk_put(clk->parents[i]);
615 }
616
617 /*get clk's parent used*/
618 if(clk->num_parents) {
619 index = clk->ops->get_parent(clk->hw);
620 if(index < clk->num_parents)
621 clk->parent = clk->parents[index];
622 else
623 WARN(1,"clk %s get parent error.\n",clk->name);
624 }
625
626 /*
627 * Set clk's rate. The preferred method is to use .recalc_rate. For
628 * simple clocks and lazy developers the default fallback is to use the
629 * parent's rate. If a clock doesn't have a parent (or is orphaned)
630 * then rate is set to zero.
631 */
632 __clk_recalc_rates(clk);
633
634 /*
635 * optional platform-specific magic
636 *
637 * The .init callback is not used by any of the basic clock types, but
638 * exists for weird hardware that must perform initialization magic.
639 * Please consider other ways of solving initialization problems before
640 * using this callback, as it's use is discouraged.
641 */
642 if (clk->ops->init)
643 clk->ops->init(clk->hw);
644
645 return 0;
646}
647/**
648 * clk_disable_unused - disable unused clk
649 */
650static void clk_disable_unused(struct clk *clk)
651{
652 unsigned long flags;
653
654 if (!clk)
655 goto out;
656
657 spin_lock_irqsave(&enable_lock, flags);
658
659 if (clk->enable_count)
660 goto unlock_out;
661
662 if (clk->flags & CLK_IGNORE_UNUSED)
663 goto unlock_out;
664
665 if (__clk_is_enabled(clk) && clk->ops->disable) {
666 clk->ops->disable(clk->hw);
667 printk(KERN_DEBUG "clk %s: start disabled\n",clk->name);
668 }
669
670unlock_out:
671 spin_unlock_irqrestore(&enable_lock, flags);
672
673out:
674 return;
675}
676/**
677 * clk_show - print clock debug info
678 */
679static int clk_show(struct seq_file *s, void *v)
680{
681 struct clk *clk;
682
683 seq_printf(s, "%-20s %-20s %-9s %-9s\n", "name","parent","enable","rate");
684 mutex_lock(&prepare_lock);
685 list_for_each_entry(clk, &clocks, list) {
686 if(clk->parent)
687 seq_printf(s, "%-20s %-20s %-9u %-9lu\n", clk->name, clk->parent->name, \
688 clk->enable_count, clk->rate);
689 else
690 seq_printf(s, "%-20s %-20s %-9u %-9lu\n", clk->name, "root", \
691 clk->enable_count, clk->rate);
692 }
693 mutex_unlock(&prepare_lock);
694 return 0;
695}
696
697/**
698 * clk_open
699 */
700static int clk_open(struct inode *inode, struct file *file)
701{
702 return single_open(file, clk_show, inode->i_private);
703}
704
705/**
706 * clock debug fs
707 */
708struct dentry * clk_debugfs = NULL;
709static const struct file_operations clk_debugfs_fops = {
710 .owner = THIS_MODULE,
711 .open = clk_open,
712 .read = seq_read,
713 .llseek = seq_lseek,
714 .release = single_release,
715};
716static void clk_debugfs_init(void)
717{
718 clk_debugfs = debugfs_create_file("clocks", S_IRUSR, NULL,NULL,
719 &clk_debugfs_fops);
720 return ;
721}
722
723/**
724 * "/sys/zte/test/clk_info"
725 */
726static ssize_t clk_info_show(struct kobject *kobj, struct kobj_attribute *attr,
727 char *buf)
728{
729 char *s = buf;
730 struct clk *clk;
731
732 s += sprintf(s, "%-20s %-20s %-9s %-9s\n", "name","parent","enable","rate");
733 mutex_lock(&prepare_lock);
734 list_for_each_entry(clk, &clocks, list) {
735 if(clk->parent)
736 s += sprintf(s, "%-20s %-20s %-9u %-9lu\n", clk->name, clk->parent->name, \
737 clk->enable_count, clk->rate);
738 else
739 s += sprintf(s, "%-20s %-20s %-9u %-9lu\n", clk->name, "root", \
740 clk->enable_count, clk->rate);
741 }
742 mutex_unlock(&prepare_lock);
743
744 return (s - buf);
745}
746
747static ssize_t clk_info_store(struct kobject *kobj, struct kobj_attribute *attr,
748 const char *buf, size_t n)
749{
750
751 return (n);
752}
753
754zte_attr(clk_info);
755
756static struct attribute *g[] = {
757 &clk_info_attr.attr,
758 NULL,
759};
760
761static struct attribute_group zx29_clk_attribute_group = {
762 .attrs = g,
763};
764int __init zx_clk_test_init(void)
765{
766 int ret;
767
768 ret = sysfs_create_group(zx_test_kobj, &zx29_clk_attribute_group);
769
770 pr_info("[DEBUG] create test clk sysfs interface OK.\n");
771
772 return 0;
773}
774
775/* zx29 clocks init*/
776
777static void __init __zx29_clock_init(struct clk_lookup *clock_tbl, unsigned num_clocks)
778{
779 unsigned n;
780 struct clk *clk;
781
782 mutex_lock(&prepare_lock);
783 /*register to devlist*/
784 for (n = 0; n < num_clocks; n++) {
785 clkdev_add(&clock_tbl[n]);
786 list_add_tail(&clock_tbl[n].clk->list, &clocks);
787 }
788 /*register to clktree*/
789 for (n = 0; n < num_clocks; n++) {
790 clk = clock_tbl[n].clk;
791 clk_register(clk);
792 }
793 mutex_unlock(&prepare_lock);
794
795 return ;
796}
797
798void __init zx29_clock_init(void)
799{
800 __zx29_clock_init(periph_clocks_lookups, periph_clocks_lookups_num);
801
802 pr_info("[CLK] zx29 tsp clk init ok.\n");
803}
804
805/*
806 * Several unused clocks may be active. Turn them off.
807 */
808static int __init zx29_disable_unused_clocks(void)
809{
810 struct clk *clk;
811
812 mutex_lock(&prepare_lock);
813 list_for_each_entry(clk, &clocks, list) {
814 printk(KERN_DEBUG "clk %s: rate = %lu\n",clk->name,clk->rate);
815//zxp clk_disable_unused(clk);
816 }
817 mutex_unlock(&prepare_lock);
818
819 clk_debugfs_init();
820
821 return 0;
822}
823late_initcall(zx29_disable_unused_clocks);