blob: 663b406d5183f66401f2f4ec40c3d790a11764fd [file] [log] [blame]
xjb04a4022021-11-25 15:01:52 +08001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2019 MediaTek Inc.
4 * Authors:
5 * Stanley Chu <stanley.chu@mediatek.com>
6 * Peter Wang <peter.wang@mediatek.com>
7 */
8
9#include <linux/of.h>
10#include <linux/of_address.h>
11#include <linux/phy/phy.h>
12#include <linux/platform_device.h>
13
14#include "ufshcd.h"
15#include "ufshcd-pltfrm.h"
16#include "unipro.h"
17#include "ufs-mediatek.h"
18
19static void ufs_mtk_parse_dt(struct ufs_mtk_host *host)
20{
21 struct ufs_hba *hba = host->hba;
22 struct device *dev = hba->dev;
23 int ret;
24
25 /*
26 * Parse reference clock control setting
27 * SW mode: 0 (use external function to control ref-clk)
28 * Half-HW mode: 1 (use ufshci register to control ref-clk,
29 * but cannot turn off)
30 * HW mode: 2 (use ufshci register to control ref-clk)
31 */
32 ret = of_property_read_u32(dev->of_node, "mediatek,refclk_ctrl",
33 &host->refclk_ctrl);
34 if (ret) {
35 dev_dbg(hba->dev,
36 "%s: failed to read mediatek,refclk_ctrl, ret=%d\n",
37 __func__, ret);
38 host->refclk_ctrl = REF_CLK_SW_MODE;
39 }
40}
41
42void ufs_mtk_cfg_unipro_cg(struct ufs_hba *hba, bool enable)
43{
44 u32 tmp;
45
46 if (enable) {
47 ufshcd_dme_get(hba,
48 UIC_ARG_MIB(VS_SAVEPOWERCONTROL), &tmp);
49 tmp = tmp |
50 (1 << RX_SYMBOL_CLK_GATE_EN) |
51 (1 << SYS_CLK_GATE_EN) |
52 (1 << TX_CLK_GATE_EN);
53 ufshcd_dme_set(hba,
54 UIC_ARG_MIB(VS_SAVEPOWERCONTROL), tmp);
55
56 ufshcd_dme_get(hba,
57 UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), &tmp);
58 tmp = tmp & ~(1 << TX_SYMBOL_CLK_REQ_FORCE);
59 ufshcd_dme_set(hba,
60 UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), tmp);
61 } else {
62 ufshcd_dme_get(hba,
63 UIC_ARG_MIB(VS_SAVEPOWERCONTROL), &tmp);
64 tmp = tmp & ~((1 << RX_SYMBOL_CLK_GATE_EN) |
65 (1 << SYS_CLK_GATE_EN) |
66 (1 << TX_CLK_GATE_EN));
67 ufshcd_dme_set(hba,
68 UIC_ARG_MIB(VS_SAVEPOWERCONTROL), tmp);
69
70 ufshcd_dme_get(hba,
71 UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), &tmp);
72 tmp = tmp | (1 << TX_SYMBOL_CLK_REQ_FORCE);
73 ufshcd_dme_set(hba,
74 UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), tmp);
75 }
76}
77
78int ufs_mtk_bind_mphy(struct ufs_hba *hba)
79{
80 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
81 struct device *dev = hba->dev;
82 struct device_node *np = dev->of_node;
83 int err = 0;
84
85 host->mphy = devm_of_phy_get_by_index(dev, np, 0);
86
87 if (host->mphy == ERR_PTR(-EPROBE_DEFER)) {
88 /*
89 * UFS driver might be probed before the phy driver does.
90 * In that case we would like to return EPROBE_DEFER code.
91 */
92 err = -EPROBE_DEFER;
93 dev_info(dev,
94 "%s: required phy hasn't probed yet. err = %d\n",
95 __func__, err);
96 } else if (IS_ERR(host->mphy)) {
97 err = PTR_ERR(host->mphy);
98 dev_info(dev, "%s: PHY get failed %d\n", __func__, err);
99 }
100
101 if (err)
102 host->mphy = NULL;
103
104 return err;
105}
106
107/**
108 * ufs_mtk_setup_clocks - enables/disable clocks
109 * @hba: host controller instance
110 * @on: If true, enable clocks else disable them.
111 * @status: PRE_CHANGE or POST_CHANGE notify
112 *
113 * Returns 0 on success, non-zero on failure.
114 */
115static int ufs_mtk_setup_clocks(struct ufs_hba *hba, bool on,
116 enum ufs_notify_change_status status)
117{
118 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
119 int ret = -EINVAL;
120
121 /*
122 * In case ufs_mtk_init() is not yet done, simply ignore.
123 * This ufs_mtk_setup_clocks() shall be called from
124 * ufs_mtk_init() after init is done.
125 */
126 if (!host)
127 return 0;
128
129 switch (status) {
130 case PRE_CHANGE:
131 if (!on)
132 ret = phy_power_off(host->mphy);
133 break;
134 case POST_CHANGE:
135 if (on)
136 ret = phy_power_on(host->mphy);
137 break;
138 }
139
140 return ret;
141}
142
143/**
144 * ufs_mtk_init - find other essential mmio bases
145 * @hba: host controller instance
146 *
147 * Binds PHY with controller and powers up PHY enabling clocks
148 * and regulators.
149 *
150 * Returns -EPROBE_DEFER if binding fails, returns negative error
151 * on phy power up failure and returns zero on success.
152 */
153static int ufs_mtk_init(struct ufs_hba *hba)
154{
155 struct ufs_mtk_host *host;
156 struct device *dev = hba->dev;
157 int err = 0;
158
159 host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
160 if (!host) {
161 err = -ENOMEM;
162 dev_info(dev, "%s: no memory for mtk ufs host\n", __func__);
163 goto out;
164 }
165
166 host->hba = hba;
167 ufshcd_set_variant(hba, host);
168
169 err = ufs_mtk_bind_mphy(hba);
170 if (err)
171 goto out_variant_clear;
172
173 /*
174 * ufshcd_vops_init() is invoked after
175 * ufshcd_setup_clock(true) in ufshcd_hba_init() thus
176 * phy clock setup is skipped.
177 *
178 * Enable phy clocks specifically here.
179 */
180 ufs_mtk_setup_clocks(hba, true, POST_CHANGE);
181
182 ufs_mtk_parse_dt(host);
183
184 goto out;
185
186out_variant_clear:
187 ufshcd_set_variant(hba, NULL);
188out:
189 return err;
190}
191
192static int ufs_mtk_pre_pwr_change(struct ufs_hba *hba,
193 struct ufs_pa_layer_attr *dev_max_params,
194 struct ufs_pa_layer_attr *dev_req_params)
195{
196 struct ufs_dev_params host_cap;
197 int ret;
198
199 host_cap.tx_lanes = UFS_MTK_LIMIT_NUM_LANES_TX;
200 host_cap.rx_lanes = UFS_MTK_LIMIT_NUM_LANES_RX;
201 host_cap.hs_rx_gear = UFS_MTK_LIMIT_HSGEAR_RX;
202 host_cap.hs_tx_gear = UFS_MTK_LIMIT_HSGEAR_TX;
203 host_cap.pwm_rx_gear = UFS_MTK_LIMIT_PWMGEAR_RX;
204 host_cap.pwm_tx_gear = UFS_MTK_LIMIT_PWMGEAR_TX;
205 host_cap.rx_pwr_pwm = UFS_MTK_LIMIT_RX_PWR_PWM;
206 host_cap.tx_pwr_pwm = UFS_MTK_LIMIT_TX_PWR_PWM;
207 host_cap.rx_pwr_hs = UFS_MTK_LIMIT_RX_PWR_HS;
208 host_cap.tx_pwr_hs = UFS_MTK_LIMIT_TX_PWR_HS;
209 host_cap.hs_rate = UFS_MTK_LIMIT_HS_RATE;
210 host_cap.desired_working_mode =
211 UFS_MTK_LIMIT_DESIRED_MODE;
212
213 ret = ufshcd_get_pwr_dev_param(&host_cap,
214 dev_max_params,
215 dev_req_params);
216 if (ret) {
217 pr_info("%s: failed to determine capabilities\n",
218 __func__);
219 }
220
221 return ret;
222}
223
224static int ufs_mtk_pwr_change_notify(struct ufs_hba *hba,
225 enum ufs_notify_change_status stage,
226 struct ufs_pa_layer_attr *dev_max_params,
227 struct ufs_pa_layer_attr *dev_req_params)
228{
229 int ret = 0;
230
231 switch (stage) {
232 case PRE_CHANGE:
233 ret = ufs_mtk_pre_pwr_change(hba, dev_max_params,
234 dev_req_params);
235 break;
236 case POST_CHANGE:
237 break;
238 default:
239 ret = -EINVAL;
240 break;
241 }
242
243 return ret;
244}
245
246static int ufs_mtk_pre_link(struct ufs_hba *hba)
247{
248 int ret;
249 u32 tmp;
250
251 /* disable deep stall */
252 ret = ufshcd_dme_get(hba, UIC_ARG_MIB(VS_SAVEPOWERCONTROL), &tmp);
253 if (ret)
254 return ret;
255
256 tmp &= ~(1 << 6);
257
258 ret = ufshcd_dme_set(hba, UIC_ARG_MIB(VS_SAVEPOWERCONTROL), tmp);
259
260 return ret;
261}
262
263static int ufs_mtk_post_link(struct ufs_hba *hba)
264{
265 /* disable device LCC */
266 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_LOCAL_TX_LCC_ENABLE), 0);
267
268 /* enable unipro clock gating feature */
269 ufs_mtk_cfg_unipro_cg(hba, true);
270
271 return 0;
272}
273
274static int ufs_mtk_link_startup_notify(struct ufs_hba *hba,
275 enum ufs_notify_change_status stage)
276{
277 int ret = 0;
278
279 switch (stage) {
280 case PRE_CHANGE:
281 ret = ufs_mtk_pre_link(hba);
282 break;
283 case POST_CHANGE:
284 ret = ufs_mtk_post_link(hba);
285 break;
286 default:
287 ret = -EINVAL;
288 break;
289 }
290
291 return ret;
292}
293
294static int ufs_mtk_refclk_ctrl(struct ufs_hba *hba, bool on)
295{
296 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
297 unsigned long timeout;
298 u32 value;
299
300 /* This is HW and Half-HW flow, SW flow should ignore */
301 if (host->refclk_ctrl == REF_CLK_SW_MODE)
302 goto out;
303
304 /* Half-HW mode cannot turn off ref-clk, release xoufs spm req only */
305 if (host->refclk_ctrl == REF_CLK_HALF_HW_MODE) {
306 ufshcd_writel(hba, XOUFS_RELEASE, REG_UFS_ADDR_XOUFS_ST);
307 goto out;
308 }
309
310 /*
311 * REG_UFS_ADDR_XOUFS_ST[0] is xoufs_req_s
312 * REG_UFS_ADDR_XOUFS_ST[1] is xoufs_ack_s
313 * xoufs_req_s is used for XOUFS Clock request to SPM
314 * SW sets xoufs_ack_s to trigger Clock Request for XOUFS, and
315 * check xoufs_ack_s set for clock avialable.
316 * SW clears xoufs_ack_s to trigger Clock Release for XOUFS, and
317 * check xoufs_ack_s clear for clock off.
318 */
319 if (on)
320 ufshcd_writel(hba, XOUFS_REQUEST, REG_UFS_ADDR_XOUFS_ST);
321 else
322 ufshcd_writel(hba, XOUFS_RELEASE, REG_UFS_ADDR_XOUFS_ST);
323
324 /* Wait ack */
325 timeout = jiffies + msecs_to_jiffies(REF_CLK_CTRL_TOUT_MS);
326 do {
327 value = ufshcd_readl(hba, REG_UFS_ADDR_XOUFS_ST);
328
329 /* Bit[1] ack should equal to Bit[0] req */
330 if (((value & XOUFS_ACK) >> 1) == (value & XOUFS_REQUEST))
331 goto out;
332
333 /* sleep for max. 200us */
334 usleep_range(100, 200);
335 } while (time_before(jiffies, timeout));
336
337 /* Time out happen */
338 dev_err(hba->dev, "ref-clk ack failed, value = 0x%x\n", value);
339 return -EIO;
340
341out:
342 return 0;
343}
344
345static int ufs_mtk_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
346{
347 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
348
349 if (ufshcd_is_link_hibern8(hba)) {
350 phy_power_off(host->mphy);
351 ufs_mtk_refclk_ctrl(hba, false);
352 }
353
354 return 0;
355}
356
357static int ufs_mtk_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
358{
359 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
360
361 if (ufshcd_is_link_hibern8(hba)) {
362 ufs_mtk_refclk_ctrl(hba, true);
363 phy_power_on(host->mphy);
364 }
365
366 return 0;
367}
368
369/**
370 * struct ufs_hba_mtk_vops - UFS MTK specific variant operations
371 *
372 * The variant operations configure the necessary controller and PHY
373 * handshake during initialization.
374 */
375static struct ufs_hba_variant_ops ufs_hba_mtk_vops = {
376 .name = "mediatek.ufshci",
377 .init = ufs_mtk_init,
378 .setup_clocks = ufs_mtk_setup_clocks,
379 .link_startup_notify = ufs_mtk_link_startup_notify,
380 .pwr_change_notify = ufs_mtk_pwr_change_notify,
381 .suspend = ufs_mtk_suspend,
382 .resume = ufs_mtk_resume,
383};
384
385/**
386 * ufs_mtk_probe - probe routine of the driver
387 * @pdev: pointer to Platform device handle
388 *
389 * Return zero for success and non-zero for failure
390 */
391static int ufs_mtk_probe(struct platform_device *pdev)
392{
393 int err;
394 struct device *dev = &pdev->dev;
395
396 /* perform generic probe */
397 err = ufshcd_pltfrm_init(pdev, &ufs_hba_mtk_vops);
398 if (err)
399 dev_info(dev, "probe failed %d\n", err);
400
401 return err;
402}
403
404/**
405 * ufs_mtk_remove - set driver_data of the device to NULL
406 * @pdev: pointer to platform device handle
407 *
408 * Always return 0
409 */
410static int ufs_mtk_remove(struct platform_device *pdev)
411{
412 struct ufs_hba *hba = platform_get_drvdata(pdev);
413
414 pm_runtime_get_sync(&(pdev)->dev);
415 ufshcd_remove(hba);
416 return 0;
417}
418
419const struct of_device_id ufs_mtk_of_match[] = {
420 { .compatible = "mediatek,mt8183-ufshci"},
421 {},
422};
423
424static const struct dev_pm_ops ufs_mtk_pm_ops = {
425 .suspend = ufshcd_pltfrm_suspend,
426 .resume = ufshcd_pltfrm_resume,
427 .runtime_suspend = ufshcd_pltfrm_runtime_suspend,
428 .runtime_resume = ufshcd_pltfrm_runtime_resume,
429 .runtime_idle = ufshcd_pltfrm_runtime_idle,
430};
431
432static struct platform_driver ufs_mtk_pltform = {
433 .probe = ufs_mtk_probe,
434 .remove = ufs_mtk_remove,
435 .shutdown = ufshcd_pltfrm_shutdown,
436 .driver = {
437 .name = "ufshcd-mtk",
438 .owner = THIS_MODULE,
439 .pm = &ufs_mtk_pm_ops,
440 .of_match_table = ufs_mtk_of_match,
441 },
442};
443
444module_platform_driver(ufs_mtk_pltform);
445
446MODULE_LICENSE("GPL v2");