| xj | b04a402 | 2021-11-25 15:01:52 +0800 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 2014 Google, Inc |
| 3 | * Author: Alexandru M Stan <amstan@chromium.org> |
| 4 | * |
| 5 | * This program is free software; you can redistribute it and/or modify |
| 6 | * it under the terms of the GNU General Public License as published by |
| 7 | * the Free Software Foundation; either version 2 of the License, or |
| 8 | * (at your option) any later version. |
| 9 | * |
| 10 | * This program is distributed in the hope that it will be useful, |
| 11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 13 | * GNU General Public License for more details. |
| 14 | */ |
| 15 | |
| 16 | #include <linux/slab.h> |
| 17 | #include <linux/clk.h> |
| 18 | #include <linux/clk-provider.h> |
| 19 | #include <linux/io.h> |
| 20 | #include <linux/kernel.h> |
| 21 | #include "clk.h" |
| 22 | |
| 23 | struct rockchip_mmc_clock { |
| 24 | struct clk_hw hw; |
| 25 | void __iomem *reg; |
| 26 | int id; |
| 27 | int shift; |
| 28 | int cached_phase; |
| 29 | struct notifier_block clk_rate_change_nb; |
| 30 | }; |
| 31 | |
| 32 | #define to_mmc_clock(_hw) container_of(_hw, struct rockchip_mmc_clock, hw) |
| 33 | |
| 34 | #define RK3288_MMC_CLKGEN_DIV 2 |
| 35 | |
| 36 | static unsigned long rockchip_mmc_recalc(struct clk_hw *hw, |
| 37 | unsigned long parent_rate) |
| 38 | { |
| 39 | return parent_rate / RK3288_MMC_CLKGEN_DIV; |
| 40 | } |
| 41 | |
| 42 | #define ROCKCHIP_MMC_DELAY_SEL BIT(10) |
| 43 | #define ROCKCHIP_MMC_DEGREE_MASK 0x3 |
| 44 | #define ROCKCHIP_MMC_DELAYNUM_OFFSET 2 |
| 45 | #define ROCKCHIP_MMC_DELAYNUM_MASK (0xff << ROCKCHIP_MMC_DELAYNUM_OFFSET) |
| 46 | |
| 47 | #define PSECS_PER_SEC 1000000000000LL |
| 48 | |
| 49 | /* |
| 50 | * Each fine delay is between 44ps-77ps. Assume each fine delay is 60ps to |
| 51 | * simplify calculations. So 45degs could be anywhere between 33deg and 57.8deg. |
| 52 | */ |
| 53 | #define ROCKCHIP_MMC_DELAY_ELEMENT_PSEC 60 |
| 54 | |
| 55 | static int rockchip_mmc_get_phase(struct clk_hw *hw) |
| 56 | { |
| 57 | struct rockchip_mmc_clock *mmc_clock = to_mmc_clock(hw); |
| 58 | unsigned long rate = clk_get_rate(hw->clk); |
| 59 | u32 raw_value; |
| 60 | u16 degrees; |
| 61 | u32 delay_num = 0; |
| 62 | |
| 63 | /* See the comment for rockchip_mmc_set_phase below */ |
| 64 | if (!rate) |
| 65 | return -EINVAL; |
| 66 | |
| 67 | raw_value = readl(mmc_clock->reg) >> (mmc_clock->shift); |
| 68 | |
| 69 | degrees = (raw_value & ROCKCHIP_MMC_DEGREE_MASK) * 90; |
| 70 | |
| 71 | if (raw_value & ROCKCHIP_MMC_DELAY_SEL) { |
| 72 | /* degrees/delaynum * 10000 */ |
| 73 | unsigned long factor = (ROCKCHIP_MMC_DELAY_ELEMENT_PSEC / 10) * |
| 74 | 36 * (rate / 1000000); |
| 75 | |
| 76 | delay_num = (raw_value & ROCKCHIP_MMC_DELAYNUM_MASK); |
| 77 | delay_num >>= ROCKCHIP_MMC_DELAYNUM_OFFSET; |
| 78 | degrees += DIV_ROUND_CLOSEST(delay_num * factor, 10000); |
| 79 | } |
| 80 | |
| 81 | return degrees % 360; |
| 82 | } |
| 83 | |
| 84 | static int rockchip_mmc_set_phase(struct clk_hw *hw, int degrees) |
| 85 | { |
| 86 | struct rockchip_mmc_clock *mmc_clock = to_mmc_clock(hw); |
| 87 | unsigned long rate = clk_get_rate(hw->clk); |
| 88 | u8 nineties, remainder; |
| 89 | u8 delay_num; |
| 90 | u32 raw_value; |
| 91 | u32 delay; |
| 92 | |
| 93 | /* |
| 94 | * The below calculation is based on the output clock from |
| 95 | * MMC host to the card, which expects the phase clock inherits |
| 96 | * the clock rate from its parent, namely the output clock |
| 97 | * provider of MMC host. However, things may go wrong if |
| 98 | * (1) It is orphan. |
| 99 | * (2) It is assigned to the wrong parent. |
| 100 | * |
| 101 | * This check help debug the case (1), which seems to be the |
| 102 | * most likely problem we often face and which makes it difficult |
| 103 | * for people to debug unstable mmc tuning results. |
| 104 | */ |
| 105 | if (!rate) { |
| 106 | pr_err("%s: invalid clk rate\n", __func__); |
| 107 | return -EINVAL; |
| 108 | } |
| 109 | |
| 110 | nineties = degrees / 90; |
| 111 | remainder = (degrees % 90); |
| 112 | |
| 113 | /* |
| 114 | * Due to the inexact nature of the "fine" delay, we might |
| 115 | * actually go non-monotonic. We don't go _too_ monotonic |
| 116 | * though, so we should be OK. Here are options of how we may |
| 117 | * work: |
| 118 | * |
| 119 | * Ideally we end up with: |
| 120 | * 1.0, 2.0, ..., 69.0, 70.0, ..., 89.0, 90.0 |
| 121 | * |
| 122 | * On one extreme (if delay is actually 44ps): |
| 123 | * .73, 1.5, ..., 50.6, 51.3, ..., 65.3, 90.0 |
| 124 | * The other (if delay is actually 77ps): |
| 125 | * 1.3, 2.6, ..., 88.6. 89.8, ..., 114.0, 90 |
| 126 | * |
| 127 | * It's possible we might make a delay that is up to 25 |
| 128 | * degrees off from what we think we're making. That's OK |
| 129 | * though because we should be REALLY far from any bad range. |
| 130 | */ |
| 131 | |
| 132 | /* |
| 133 | * Convert to delay; do a little extra work to make sure we |
| 134 | * don't overflow 32-bit / 64-bit numbers. |
| 135 | */ |
| 136 | delay = 10000000; /* PSECS_PER_SEC / 10000 / 10 */ |
| 137 | delay *= remainder; |
| 138 | delay = DIV_ROUND_CLOSEST(delay, |
| 139 | (rate / 1000) * 36 * |
| 140 | (ROCKCHIP_MMC_DELAY_ELEMENT_PSEC / 10)); |
| 141 | |
| 142 | delay_num = (u8) min_t(u32, delay, 255); |
| 143 | |
| 144 | raw_value = delay_num ? ROCKCHIP_MMC_DELAY_SEL : 0; |
| 145 | raw_value |= delay_num << ROCKCHIP_MMC_DELAYNUM_OFFSET; |
| 146 | raw_value |= nineties; |
| 147 | writel(HIWORD_UPDATE(raw_value, 0x07ff, mmc_clock->shift), |
| 148 | mmc_clock->reg); |
| 149 | |
| 150 | pr_debug("%s->set_phase(%d) delay_nums=%u reg[0x%p]=0x%03x actual_degrees=%d\n", |
| 151 | clk_hw_get_name(hw), degrees, delay_num, |
| 152 | mmc_clock->reg, raw_value>>(mmc_clock->shift), |
| 153 | rockchip_mmc_get_phase(hw) |
| 154 | ); |
| 155 | |
| 156 | return 0; |
| 157 | } |
| 158 | |
| 159 | static const struct clk_ops rockchip_mmc_clk_ops = { |
| 160 | .recalc_rate = rockchip_mmc_recalc, |
| 161 | .get_phase = rockchip_mmc_get_phase, |
| 162 | .set_phase = rockchip_mmc_set_phase, |
| 163 | }; |
| 164 | |
| 165 | #define to_rockchip_mmc_clock(x) \ |
| 166 | container_of(x, struct rockchip_mmc_clock, clk_rate_change_nb) |
| 167 | static int rockchip_mmc_clk_rate_notify(struct notifier_block *nb, |
| 168 | unsigned long event, void *data) |
| 169 | { |
| 170 | struct rockchip_mmc_clock *mmc_clock = to_rockchip_mmc_clock(nb); |
| 171 | struct clk_notifier_data *ndata = data; |
| 172 | |
| 173 | /* |
| 174 | * rockchip_mmc_clk is mostly used by mmc controllers to sample |
| 175 | * the intput data, which expects the fixed phase after the tuning |
| 176 | * process. However if the clock rate is changed, the phase is stale |
| 177 | * and may break the data sampling. So here we try to restore the phase |
| 178 | * for that case, except that |
| 179 | * (1) cached_phase is invaild since we inevitably cached it when the |
| 180 | * clock provider be reparented from orphan to its real parent in the |
| 181 | * first place. Otherwise we may mess up the initialization of MMC cards |
| 182 | * since we only set the default sample phase and drive phase later on. |
| 183 | * (2) the new coming rate is higher than the older one since mmc driver |
| 184 | * set the max-frequency to match the boards' ability but we can't go |
| 185 | * over the heads of that, otherwise the tests smoke out the issue. |
| 186 | */ |
| 187 | if (ndata->old_rate <= ndata->new_rate) |
| 188 | return NOTIFY_DONE; |
| 189 | |
| 190 | if (event == PRE_RATE_CHANGE) |
| 191 | mmc_clock->cached_phase = |
| 192 | rockchip_mmc_get_phase(&mmc_clock->hw); |
| 193 | else if (mmc_clock->cached_phase != -EINVAL && |
| 194 | event == POST_RATE_CHANGE) |
| 195 | rockchip_mmc_set_phase(&mmc_clock->hw, mmc_clock->cached_phase); |
| 196 | |
| 197 | return NOTIFY_DONE; |
| 198 | } |
| 199 | |
| 200 | struct clk *rockchip_clk_register_mmc(const char *name, |
| 201 | const char *const *parent_names, u8 num_parents, |
| 202 | void __iomem *reg, int shift) |
| 203 | { |
| 204 | struct clk_init_data init; |
| 205 | struct rockchip_mmc_clock *mmc_clock; |
| 206 | struct clk *clk; |
| 207 | int ret; |
| 208 | |
| 209 | mmc_clock = kmalloc(sizeof(*mmc_clock), GFP_KERNEL); |
| 210 | if (!mmc_clock) |
| 211 | return ERR_PTR(-ENOMEM); |
| 212 | |
| 213 | init.name = name; |
| 214 | init.flags = 0; |
| 215 | init.num_parents = num_parents; |
| 216 | init.parent_names = parent_names; |
| 217 | init.ops = &rockchip_mmc_clk_ops; |
| 218 | |
| 219 | mmc_clock->hw.init = &init; |
| 220 | mmc_clock->reg = reg; |
| 221 | mmc_clock->shift = shift; |
| 222 | |
| 223 | clk = clk_register(NULL, &mmc_clock->hw); |
| 224 | if (IS_ERR(clk)) { |
| 225 | ret = PTR_ERR(clk); |
| 226 | goto err_register; |
| 227 | } |
| 228 | |
| 229 | mmc_clock->clk_rate_change_nb.notifier_call = |
| 230 | &rockchip_mmc_clk_rate_notify; |
| 231 | ret = clk_notifier_register(clk, &mmc_clock->clk_rate_change_nb); |
| 232 | if (ret) |
| 233 | goto err_notifier; |
| 234 | |
| 235 | return clk; |
| 236 | err_notifier: |
| 237 | clk_unregister(clk); |
| 238 | err_register: |
| 239 | kfree(mmc_clock); |
| 240 | return ERR_PTR(ret); |
| 241 | } |