| xj | b04a402 | 2021-11-25 15:01:52 +0800 | [diff] [blame^] | 1 | /* | 
|  | 2 | * Marvell PXA family clocks | 
|  | 3 | * | 
|  | 4 | * Copyright (C) 2014 Robert Jarzmik | 
|  | 5 | * | 
|  | 6 | * Common clock code for PXA clocks ("CKEN" type clocks + DT) | 
|  | 7 | * | 
|  | 8 | * This program is free software; you can redistribute it and/or modify | 
|  | 9 | * it under the terms of the GNU General Public License as published by | 
|  | 10 | * the Free Software Foundation; version 2 of the License. | 
|  | 11 | * | 
|  | 12 | */ | 
|  | 13 | #include <linux/clk.h> | 
|  | 14 | #include <linux/clk-provider.h> | 
|  | 15 | #include <linux/clkdev.h> | 
|  | 16 | #include <linux/of.h> | 
|  | 17 |  | 
|  | 18 | #include <dt-bindings/clock/pxa-clock.h> | 
|  | 19 | #include "clk-pxa.h" | 
|  | 20 |  | 
|  | 21 | #define KHz 1000 | 
|  | 22 | #define MHz (1000 * 1000) | 
|  | 23 |  | 
|  | 24 | #define MDREFR_K0DB4	(1 << 29)	/* SDCLK0 Divide by 4 Control/Status */ | 
|  | 25 | #define MDREFR_K2FREE	(1 << 25)	/* SDRAM Free-Running Control */ | 
|  | 26 | #define MDREFR_K1FREE	(1 << 24)	/* SDRAM Free-Running Control */ | 
|  | 27 | #define MDREFR_K0FREE	(1 << 23)	/* SDRAM Free-Running Control */ | 
|  | 28 | #define MDREFR_SLFRSH	(1 << 22)	/* SDRAM Self-Refresh Control/Status */ | 
|  | 29 | #define MDREFR_APD	(1 << 20)	/* SDRAM/SSRAM Auto-Power-Down Enable */ | 
|  | 30 | #define MDREFR_K2DB2	(1 << 19)	/* SDCLK2 Divide by 2 Control/Status */ | 
|  | 31 | #define MDREFR_K2RUN	(1 << 18)	/* SDCLK2 Run Control/Status */ | 
|  | 32 | #define MDREFR_K1DB2	(1 << 17)	/* SDCLK1 Divide by 2 Control/Status */ | 
|  | 33 | #define MDREFR_K1RUN	(1 << 16)	/* SDCLK1 Run Control/Status */ | 
|  | 34 | #define MDREFR_E1PIN	(1 << 15)	/* SDCKE1 Level Control/Status */ | 
|  | 35 | #define MDREFR_K0DB2	(1 << 14)	/* SDCLK0 Divide by 2 Control/Status */ | 
|  | 36 | #define MDREFR_K0RUN	(1 << 13)	/* SDCLK0 Run Control/Status */ | 
|  | 37 | #define MDREFR_E0PIN	(1 << 12)	/* SDCKE0 Level Control/Status */ | 
|  | 38 | #define MDREFR_DB2_MASK	(MDREFR_K2DB2 | MDREFR_K1DB2) | 
|  | 39 | #define MDREFR_DRI_MASK	0xFFF | 
|  | 40 |  | 
|  | 41 | static DEFINE_SPINLOCK(pxa_clk_lock); | 
|  | 42 |  | 
|  | 43 | static struct clk *pxa_clocks[CLK_MAX]; | 
|  | 44 | static struct clk_onecell_data onecell_data = { | 
|  | 45 | .clks = pxa_clocks, | 
|  | 46 | .clk_num = CLK_MAX, | 
|  | 47 | }; | 
|  | 48 |  | 
|  | 49 | struct pxa_clk { | 
|  | 50 | struct clk_hw hw; | 
|  | 51 | struct clk_fixed_factor lp; | 
|  | 52 | struct clk_fixed_factor hp; | 
|  | 53 | struct clk_gate gate; | 
|  | 54 | bool (*is_in_low_power)(void); | 
|  | 55 | }; | 
|  | 56 |  | 
|  | 57 | #define to_pxa_clk(_hw) container_of(_hw, struct pxa_clk, hw) | 
|  | 58 |  | 
|  | 59 | static unsigned long cken_recalc_rate(struct clk_hw *hw, | 
|  | 60 | unsigned long parent_rate) | 
|  | 61 | { | 
|  | 62 | struct pxa_clk *pclk = to_pxa_clk(hw); | 
|  | 63 | struct clk_fixed_factor *fix; | 
|  | 64 |  | 
|  | 65 | if (!pclk->is_in_low_power || pclk->is_in_low_power()) | 
|  | 66 | fix = &pclk->lp; | 
|  | 67 | else | 
|  | 68 | fix = &pclk->hp; | 
|  | 69 | __clk_hw_set_clk(&fix->hw, hw); | 
|  | 70 | return clk_fixed_factor_ops.recalc_rate(&fix->hw, parent_rate); | 
|  | 71 | } | 
|  | 72 |  | 
|  | 73 | static struct clk_ops cken_rate_ops = { | 
|  | 74 | .recalc_rate = cken_recalc_rate, | 
|  | 75 | }; | 
|  | 76 |  | 
|  | 77 | static u8 cken_get_parent(struct clk_hw *hw) | 
|  | 78 | { | 
|  | 79 | struct pxa_clk *pclk = to_pxa_clk(hw); | 
|  | 80 |  | 
|  | 81 | if (!pclk->is_in_low_power) | 
|  | 82 | return 0; | 
|  | 83 | return pclk->is_in_low_power() ? 0 : 1; | 
|  | 84 | } | 
|  | 85 |  | 
|  | 86 | static struct clk_ops cken_mux_ops = { | 
|  | 87 | .get_parent = cken_get_parent, | 
|  | 88 | .set_parent = dummy_clk_set_parent, | 
|  | 89 | }; | 
|  | 90 |  | 
|  | 91 | void __init clkdev_pxa_register(int ckid, const char *con_id, | 
|  | 92 | const char *dev_id, struct clk *clk) | 
|  | 93 | { | 
|  | 94 | if (!IS_ERR(clk) && (ckid != CLK_NONE)) | 
|  | 95 | pxa_clocks[ckid] = clk; | 
|  | 96 | if (!IS_ERR(clk)) | 
|  | 97 | clk_register_clkdev(clk, con_id, dev_id); | 
|  | 98 | } | 
|  | 99 |  | 
|  | 100 | int __init clk_pxa_cken_init(const struct desc_clk_cken *clks, int nb_clks) | 
|  | 101 | { | 
|  | 102 | int i; | 
|  | 103 | struct pxa_clk *pxa_clk; | 
|  | 104 | struct clk *clk; | 
|  | 105 |  | 
|  | 106 | for (i = 0; i < nb_clks; i++) { | 
|  | 107 | pxa_clk = kzalloc(sizeof(*pxa_clk), GFP_KERNEL); | 
|  | 108 | pxa_clk->is_in_low_power = clks[i].is_in_low_power; | 
|  | 109 | pxa_clk->lp = clks[i].lp; | 
|  | 110 | pxa_clk->hp = clks[i].hp; | 
|  | 111 | pxa_clk->gate = clks[i].gate; | 
|  | 112 | pxa_clk->gate.lock = &pxa_clk_lock; | 
|  | 113 | clk = clk_register_composite(NULL, clks[i].name, | 
|  | 114 | clks[i].parent_names, 2, | 
|  | 115 | &pxa_clk->hw, &cken_mux_ops, | 
|  | 116 | &pxa_clk->hw, &cken_rate_ops, | 
|  | 117 | &pxa_clk->gate.hw, &clk_gate_ops, | 
|  | 118 | clks[i].flags); | 
|  | 119 | clkdev_pxa_register(clks[i].ckid, clks[i].con_id, | 
|  | 120 | clks[i].dev_id, clk); | 
|  | 121 | } | 
|  | 122 | return 0; | 
|  | 123 | } | 
|  | 124 |  | 
|  | 125 | void __init clk_pxa_dt_common_init(struct device_node *np) | 
|  | 126 | { | 
|  | 127 | of_clk_add_provider(np, of_clk_src_onecell_get, &onecell_data); | 
|  | 128 | } | 
|  | 129 |  | 
|  | 130 | void pxa2xx_core_turbo_switch(bool on) | 
|  | 131 | { | 
|  | 132 | unsigned long flags; | 
|  | 133 | unsigned int unused, clkcfg; | 
|  | 134 |  | 
|  | 135 | local_irq_save(flags); | 
|  | 136 |  | 
|  | 137 | asm("mrc p14, 0, %0, c6, c0, 0" : "=r" (clkcfg)); | 
|  | 138 | clkcfg &= ~CLKCFG_TURBO & ~CLKCFG_HALFTURBO; | 
|  | 139 | if (on) | 
|  | 140 | clkcfg |= CLKCFG_TURBO; | 
|  | 141 | clkcfg |= CLKCFG_FCS; | 
|  | 142 |  | 
|  | 143 | asm volatile( | 
|  | 144 | "	b	2f\n" | 
|  | 145 | "	.align	5\n" | 
|  | 146 | "1:	mcr	p14, 0, %1, c6, c0, 0\n" | 
|  | 147 | "	b	3f\n" | 
|  | 148 | "2:	b	1b\n" | 
|  | 149 | "3:	nop\n" | 
|  | 150 | : "=&r" (unused) : "r" (clkcfg)); | 
|  | 151 |  | 
|  | 152 | local_irq_restore(flags); | 
|  | 153 | } | 
|  | 154 |  | 
|  | 155 | void pxa2xx_cpll_change(struct pxa2xx_freq *freq, | 
|  | 156 | u32 (*mdrefr_dri)(unsigned int), void __iomem *mdrefr, | 
|  | 157 | void __iomem *cccr) | 
|  | 158 | { | 
|  | 159 | unsigned int clkcfg = freq->clkcfg; | 
|  | 160 | unsigned int unused, preset_mdrefr, postset_mdrefr; | 
|  | 161 | unsigned long flags; | 
|  | 162 |  | 
|  | 163 | local_irq_save(flags); | 
|  | 164 |  | 
|  | 165 | /* Calculate the next MDREFR.  If we're slowing down the SDRAM clock | 
|  | 166 | * we need to preset the smaller DRI before the change.	 If we're | 
|  | 167 | * speeding up we need to set the larger DRI value after the change. | 
|  | 168 | */ | 
|  | 169 | preset_mdrefr = postset_mdrefr = readl(mdrefr); | 
|  | 170 | if ((preset_mdrefr & MDREFR_DRI_MASK) > mdrefr_dri(freq->membus_khz)) { | 
|  | 171 | preset_mdrefr = (preset_mdrefr & ~MDREFR_DRI_MASK); | 
|  | 172 | preset_mdrefr |= mdrefr_dri(freq->membus_khz); | 
|  | 173 | } | 
|  | 174 | postset_mdrefr = | 
|  | 175 | (postset_mdrefr & ~MDREFR_DRI_MASK) | | 
|  | 176 | mdrefr_dri(freq->membus_khz); | 
|  | 177 |  | 
|  | 178 | /* If we're dividing the memory clock by two for the SDRAM clock, this | 
|  | 179 | * must be set prior to the change.  Clearing the divide must be done | 
|  | 180 | * after the change. | 
|  | 181 | */ | 
|  | 182 | if (freq->div2) { | 
|  | 183 | preset_mdrefr  |= MDREFR_DB2_MASK; | 
|  | 184 | postset_mdrefr |= MDREFR_DB2_MASK; | 
|  | 185 | } else { | 
|  | 186 | postset_mdrefr &= ~MDREFR_DB2_MASK; | 
|  | 187 | } | 
|  | 188 |  | 
|  | 189 | /* Set new the CCCR and prepare CLKCFG */ | 
|  | 190 | writel(freq->cccr, cccr); | 
|  | 191 |  | 
|  | 192 | asm volatile( | 
|  | 193 | "	ldr	r4, [%1]\n" | 
|  | 194 | "	b	2f\n" | 
|  | 195 | "	.align	5\n" | 
|  | 196 | "1:	str	%3, [%1]		/* preset the MDREFR */\n" | 
|  | 197 | "	mcr	p14, 0, %2, c6, c0, 0	/* set CLKCFG[FCS] */\n" | 
|  | 198 | "	str	%4, [%1]		/* postset the MDREFR */\n" | 
|  | 199 | "	b	3f\n" | 
|  | 200 | "2:	b	1b\n" | 
|  | 201 | "3:	nop\n" | 
|  | 202 | : "=&r" (unused) | 
|  | 203 | : "r" (mdrefr), "r" (clkcfg), "r" (preset_mdrefr), | 
|  | 204 | "r" (postset_mdrefr) | 
|  | 205 | : "r4", "r5"); | 
|  | 206 |  | 
|  | 207 | local_irq_restore(flags); | 
|  | 208 | } | 
|  | 209 |  | 
|  | 210 | int pxa2xx_determine_rate(struct clk_rate_request *req, | 
|  | 211 | struct pxa2xx_freq *freqs, int nb_freqs) | 
|  | 212 | { | 
|  | 213 | int i, closest_below = -1, closest_above = -1; | 
|  | 214 | unsigned long rate; | 
|  | 215 |  | 
|  | 216 | for (i = 0; i < nb_freqs; i++) { | 
|  | 217 | rate = freqs[i].cpll; | 
|  | 218 | if (rate == req->rate) | 
|  | 219 | break; | 
|  | 220 | if (rate < req->min_rate) | 
|  | 221 | continue; | 
|  | 222 | if (rate > req->max_rate) | 
|  | 223 | continue; | 
|  | 224 | if (rate <= req->rate) | 
|  | 225 | closest_below = i; | 
|  | 226 | if ((rate >= req->rate) && (closest_above == -1)) | 
|  | 227 | closest_above = i; | 
|  | 228 | } | 
|  | 229 |  | 
|  | 230 | req->best_parent_hw = NULL; | 
|  | 231 |  | 
|  | 232 | if (i < nb_freqs) { | 
|  | 233 | rate = req->rate; | 
|  | 234 | } else if (closest_below >= 0) { | 
|  | 235 | rate = freqs[closest_below].cpll; | 
|  | 236 | } else if (closest_above >= 0) { | 
|  | 237 | rate = freqs[closest_above].cpll; | 
|  | 238 | } else { | 
|  | 239 | pr_debug("%s(rate=%lu) no match\n", __func__, req->rate); | 
|  | 240 | return -EINVAL; | 
|  | 241 | } | 
|  | 242 |  | 
|  | 243 | pr_debug("%s(rate=%lu) rate=%lu\n", __func__, req->rate, rate); | 
|  | 244 | req->rate = rate; | 
|  | 245 |  | 
|  | 246 | return 0; | 
|  | 247 | } |