blob: fc13183959dbc0c60edfeedd80af50dbf2271461 [file] [log] [blame]
xjb04a4022021-11-25 15:01:52 +08001/*
2 * Copyright (c) 2013-2016, Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 */
14
15#include <linux/time.h>
16#include <linux/of.h>
17#include <linux/platform_device.h>
18#include <linux/phy/phy.h>
19#include <linux/phy/phy-qcom-ufs.h>
20
21#include "ufshcd.h"
22#include "ufshcd-pltfrm.h"
23#include "unipro.h"
24#include "ufs-qcom.h"
25#include "ufshci.h"
26#include "ufs_quirks.h"
27#define UFS_QCOM_DEFAULT_DBG_PRINT_EN \
28 (UFS_QCOM_DBG_PRINT_REGS_EN | UFS_QCOM_DBG_PRINT_TEST_BUS_EN)
29
30enum {
31 TSTBUS_UAWM,
32 TSTBUS_UARM,
33 TSTBUS_TXUC,
34 TSTBUS_RXUC,
35 TSTBUS_DFC,
36 TSTBUS_TRLUT,
37 TSTBUS_TMRLUT,
38 TSTBUS_OCSC,
39 TSTBUS_UTP_HCI,
40 TSTBUS_COMBINED,
41 TSTBUS_WRAPPER,
42 TSTBUS_UNIPRO,
43 TSTBUS_MAX,
44};
45
46static struct ufs_qcom_host *ufs_qcom_hosts[MAX_UFS_QCOM_HOSTS];
47
48static int ufs_qcom_set_bus_vote(struct ufs_qcom_host *host, int vote);
49static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host);
50static int ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(struct ufs_hba *hba,
51 u32 clk_cycles);
52
53static void ufs_qcom_dump_regs_wrapper(struct ufs_hba *hba, int offset, int len,
54 const char *prefix, void *priv)
55{
56 ufshcd_dump_regs(hba, offset, len * 4, prefix);
57}
58
59static int ufs_qcom_get_connected_tx_lanes(struct ufs_hba *hba, u32 *tx_lanes)
60{
61 int err = 0;
62
63 err = ufshcd_dme_get(hba,
64 UIC_ARG_MIB(PA_CONNECTEDTXDATALANES), tx_lanes);
65 if (err)
66 dev_err(hba->dev, "%s: couldn't read PA_CONNECTEDTXDATALANES %d\n",
67 __func__, err);
68
69 return err;
70}
71
72static int ufs_qcom_host_clk_get(struct device *dev,
73 const char *name, struct clk **clk_out)
74{
75 struct clk *clk;
76 int err = 0;
77
78 clk = devm_clk_get(dev, name);
79 if (IS_ERR(clk)) {
80 err = PTR_ERR(clk);
81 dev_err(dev, "%s: failed to get %s err %d",
82 __func__, name, err);
83 } else {
84 *clk_out = clk;
85 }
86
87 return err;
88}
89
90static int ufs_qcom_host_clk_enable(struct device *dev,
91 const char *name, struct clk *clk)
92{
93 int err = 0;
94
95 err = clk_prepare_enable(clk);
96 if (err)
97 dev_err(dev, "%s: %s enable failed %d\n", __func__, name, err);
98
99 return err;
100}
101
102static void ufs_qcom_disable_lane_clks(struct ufs_qcom_host *host)
103{
104 if (!host->is_lane_clks_enabled)
105 return;
106
107 if (host->hba->lanes_per_direction > 1)
108 clk_disable_unprepare(host->tx_l1_sync_clk);
109 clk_disable_unprepare(host->tx_l0_sync_clk);
110 if (host->hba->lanes_per_direction > 1)
111 clk_disable_unprepare(host->rx_l1_sync_clk);
112 clk_disable_unprepare(host->rx_l0_sync_clk);
113
114 host->is_lane_clks_enabled = false;
115}
116
117static int ufs_qcom_enable_lane_clks(struct ufs_qcom_host *host)
118{
119 int err = 0;
120 struct device *dev = host->hba->dev;
121
122 if (host->is_lane_clks_enabled)
123 return 0;
124
125 err = ufs_qcom_host_clk_enable(dev, "rx_lane0_sync_clk",
126 host->rx_l0_sync_clk);
127 if (err)
128 goto out;
129
130 err = ufs_qcom_host_clk_enable(dev, "tx_lane0_sync_clk",
131 host->tx_l0_sync_clk);
132 if (err)
133 goto disable_rx_l0;
134
135 if (host->hba->lanes_per_direction > 1) {
136 err = ufs_qcom_host_clk_enable(dev, "rx_lane1_sync_clk",
137 host->rx_l1_sync_clk);
138 if (err)
139 goto disable_tx_l0;
140
141 err = ufs_qcom_host_clk_enable(dev, "tx_lane1_sync_clk",
142 host->tx_l1_sync_clk);
143 if (err)
144 goto disable_rx_l1;
145 }
146
147 host->is_lane_clks_enabled = true;
148 goto out;
149
150disable_rx_l1:
151 if (host->hba->lanes_per_direction > 1)
152 clk_disable_unprepare(host->rx_l1_sync_clk);
153disable_tx_l0:
154 clk_disable_unprepare(host->tx_l0_sync_clk);
155disable_rx_l0:
156 clk_disable_unprepare(host->rx_l0_sync_clk);
157out:
158 return err;
159}
160
161static int ufs_qcom_init_lane_clks(struct ufs_qcom_host *host)
162{
163 int err = 0;
164 struct device *dev = host->hba->dev;
165
166 err = ufs_qcom_host_clk_get(dev,
167 "rx_lane0_sync_clk", &host->rx_l0_sync_clk);
168 if (err)
169 goto out;
170
171 err = ufs_qcom_host_clk_get(dev,
172 "tx_lane0_sync_clk", &host->tx_l0_sync_clk);
173 if (err)
174 goto out;
175
176 /* In case of single lane per direction, don't read lane1 clocks */
177 if (host->hba->lanes_per_direction > 1) {
178 err = ufs_qcom_host_clk_get(dev, "rx_lane1_sync_clk",
179 &host->rx_l1_sync_clk);
180 if (err)
181 goto out;
182
183 err = ufs_qcom_host_clk_get(dev, "tx_lane1_sync_clk",
184 &host->tx_l1_sync_clk);
185 }
186out:
187 return err;
188}
189
190static int ufs_qcom_link_startup_post_change(struct ufs_hba *hba)
191{
192 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
193 struct phy *phy = host->generic_phy;
194 u32 tx_lanes;
195 int err = 0;
196
197 err = ufs_qcom_get_connected_tx_lanes(hba, &tx_lanes);
198 if (err)
199 goto out;
200
201 err = ufs_qcom_phy_set_tx_lane_enable(phy, tx_lanes);
202 if (err)
203 dev_err(hba->dev, "%s: ufs_qcom_phy_set_tx_lane_enable failed\n",
204 __func__);
205
206out:
207 return err;
208}
209
210static int ufs_qcom_check_hibern8(struct ufs_hba *hba)
211{
212 int err;
213 u32 tx_fsm_val = 0;
214 unsigned long timeout = jiffies + msecs_to_jiffies(HBRN8_POLL_TOUT_MS);
215
216 do {
217 err = ufshcd_dme_get(hba,
218 UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE,
219 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
220 &tx_fsm_val);
221 if (err || tx_fsm_val == TX_FSM_HIBERN8)
222 break;
223
224 /* sleep for max. 200us */
225 usleep_range(100, 200);
226 } while (time_before(jiffies, timeout));
227
228 /*
229 * we might have scheduled out for long during polling so
230 * check the state again.
231 */
232 if (time_after(jiffies, timeout))
233 err = ufshcd_dme_get(hba,
234 UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE,
235 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
236 &tx_fsm_val);
237
238 if (err) {
239 dev_err(hba->dev, "%s: unable to get TX_FSM_STATE, err %d\n",
240 __func__, err);
241 } else if (tx_fsm_val != TX_FSM_HIBERN8) {
242 err = tx_fsm_val;
243 dev_err(hba->dev, "%s: invalid TX_FSM_STATE = %d\n",
244 __func__, err);
245 }
246
247 return err;
248}
249
250static void ufs_qcom_select_unipro_mode(struct ufs_qcom_host *host)
251{
252 ufshcd_rmwl(host->hba, QUNIPRO_SEL,
253 ufs_qcom_cap_qunipro(host) ? QUNIPRO_SEL : 0,
254 REG_UFS_CFG1);
255 /* make sure above configuration is applied before we return */
256 mb();
257}
258
259static int ufs_qcom_power_up_sequence(struct ufs_hba *hba)
260{
261 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
262 struct phy *phy = host->generic_phy;
263 int ret = 0;
264 bool is_rate_B = (UFS_QCOM_LIMIT_HS_RATE == PA_HS_MODE_B)
265 ? true : false;
266
267 if (is_rate_B)
268 phy_set_mode(phy, PHY_MODE_UFS_HS_B);
269
270 /* Assert PHY reset and apply PHY calibration values */
271 ufs_qcom_assert_reset(hba);
272 /* provide 1ms delay to let the reset pulse propagate */
273 usleep_range(1000, 1100);
274
275 /* phy initialization - calibrate the phy */
276 ret = phy_init(phy);
277 if (ret) {
278 dev_err(hba->dev, "%s: phy init failed, ret = %d\n",
279 __func__, ret);
280 goto out;
281 }
282
283 /* De-assert PHY reset and start serdes */
284 ufs_qcom_deassert_reset(hba);
285
286 /*
287 * after reset deassertion, phy will need all ref clocks,
288 * voltage, current to settle down before starting serdes.
289 */
290 usleep_range(1000, 1100);
291
292 /* power on phy - start serdes and phy's power and clocks */
293 ret = phy_power_on(phy);
294 if (ret) {
295 dev_err(hba->dev, "%s: phy power on failed, ret = %d\n",
296 __func__, ret);
297 goto out_disable_phy;
298 }
299
300 ufs_qcom_select_unipro_mode(host);
301
302 return 0;
303
304out_disable_phy:
305 ufs_qcom_assert_reset(hba);
306 phy_exit(phy);
307out:
308 return ret;
309}
310
311/*
312 * The UTP controller has a number of internal clock gating cells (CGCs).
313 * Internal hardware sub-modules within the UTP controller control the CGCs.
314 * Hardware CGCs disable the clock to inactivate UTP sub-modules not involved
315 * in a specific operation, UTP controller CGCs are by default disabled and
316 * this function enables them (after every UFS link startup) to save some power
317 * leakage.
318 */
319static void ufs_qcom_enable_hw_clk_gating(struct ufs_hba *hba)
320{
321 ufshcd_writel(hba,
322 ufshcd_readl(hba, REG_UFS_CFG2) | REG_UFS_CFG2_CGC_EN_ALL,
323 REG_UFS_CFG2);
324
325 /* Ensure that HW clock gating is enabled before next operations */
326 mb();
327}
328
329static int ufs_qcom_hce_enable_notify(struct ufs_hba *hba,
330 enum ufs_notify_change_status status)
331{
332 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
333 int err = 0;
334
335 switch (status) {
336 case PRE_CHANGE:
337 ufs_qcom_power_up_sequence(hba);
338 /*
339 * The PHY PLL output is the source of tx/rx lane symbol
340 * clocks, hence, enable the lane clocks only after PHY
341 * is initialized.
342 */
343 err = ufs_qcom_enable_lane_clks(host);
344 break;
345 case POST_CHANGE:
346 /* check if UFS PHY moved from DISABLED to HIBERN8 */
347 err = ufs_qcom_check_hibern8(hba);
348 ufs_qcom_enable_hw_clk_gating(hba);
349
350 break;
351 default:
352 dev_err(hba->dev, "%s: invalid status %d\n", __func__, status);
353 err = -EINVAL;
354 break;
355 }
356 return err;
357}
358
359/**
360 * Returns zero for success and non-zero in case of a failure
361 */
362static int ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear,
363 u32 hs, u32 rate, bool update_link_startup_timer)
364{
365 int ret = 0;
366 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
367 struct ufs_clk_info *clki;
368 u32 core_clk_period_in_ns;
369 u32 tx_clk_cycles_per_us = 0;
370 unsigned long core_clk_rate = 0;
371 u32 core_clk_cycles_per_us = 0;
372
373 static u32 pwm_fr_table[][2] = {
374 {UFS_PWM_G1, 0x1},
375 {UFS_PWM_G2, 0x1},
376 {UFS_PWM_G3, 0x1},
377 {UFS_PWM_G4, 0x1},
378 };
379
380 static u32 hs_fr_table_rA[][2] = {
381 {UFS_HS_G1, 0x1F},
382 {UFS_HS_G2, 0x3e},
383 {UFS_HS_G3, 0x7D},
384 };
385
386 static u32 hs_fr_table_rB[][2] = {
387 {UFS_HS_G1, 0x24},
388 {UFS_HS_G2, 0x49},
389 {UFS_HS_G3, 0x92},
390 };
391
392 /*
393 * The Qunipro controller does not use following registers:
394 * SYS1CLK_1US_REG, TX_SYMBOL_CLK_1US_REG, CLK_NS_REG &
395 * UFS_REG_PA_LINK_STARTUP_TIMER
396 * But UTP controller uses SYS1CLK_1US_REG register for Interrupt
397 * Aggregation logic.
398 */
399 if (ufs_qcom_cap_qunipro(host) && !ufshcd_is_intr_aggr_allowed(hba))
400 goto out;
401
402 if (gear == 0) {
403 dev_err(hba->dev, "%s: invalid gear = %d\n", __func__, gear);
404 goto out_error;
405 }
406
407 list_for_each_entry(clki, &hba->clk_list_head, list) {
408 if (!strcmp(clki->name, "core_clk"))
409 core_clk_rate = clk_get_rate(clki->clk);
410 }
411
412 /* If frequency is smaller than 1MHz, set to 1MHz */
413 if (core_clk_rate < DEFAULT_CLK_RATE_HZ)
414 core_clk_rate = DEFAULT_CLK_RATE_HZ;
415
416 core_clk_cycles_per_us = core_clk_rate / USEC_PER_SEC;
417 if (ufshcd_readl(hba, REG_UFS_SYS1CLK_1US) != core_clk_cycles_per_us) {
418 ufshcd_writel(hba, core_clk_cycles_per_us, REG_UFS_SYS1CLK_1US);
419 /*
420 * make sure above write gets applied before we return from
421 * this function.
422 */
423 mb();
424 }
425
426 if (ufs_qcom_cap_qunipro(host))
427 goto out;
428
429 core_clk_period_in_ns = NSEC_PER_SEC / core_clk_rate;
430 core_clk_period_in_ns <<= OFFSET_CLK_NS_REG;
431 core_clk_period_in_ns &= MASK_CLK_NS_REG;
432
433 switch (hs) {
434 case FASTAUTO_MODE:
435 case FAST_MODE:
436 if (rate == PA_HS_MODE_A) {
437 if (gear > ARRAY_SIZE(hs_fr_table_rA)) {
438 dev_err(hba->dev,
439 "%s: index %d exceeds table size %zu\n",
440 __func__, gear,
441 ARRAY_SIZE(hs_fr_table_rA));
442 goto out_error;
443 }
444 tx_clk_cycles_per_us = hs_fr_table_rA[gear-1][1];
445 } else if (rate == PA_HS_MODE_B) {
446 if (gear > ARRAY_SIZE(hs_fr_table_rB)) {
447 dev_err(hba->dev,
448 "%s: index %d exceeds table size %zu\n",
449 __func__, gear,
450 ARRAY_SIZE(hs_fr_table_rB));
451 goto out_error;
452 }
453 tx_clk_cycles_per_us = hs_fr_table_rB[gear-1][1];
454 } else {
455 dev_err(hba->dev, "%s: invalid rate = %d\n",
456 __func__, rate);
457 goto out_error;
458 }
459 break;
460 case SLOWAUTO_MODE:
461 case SLOW_MODE:
462 if (gear > ARRAY_SIZE(pwm_fr_table)) {
463 dev_err(hba->dev,
464 "%s: index %d exceeds table size %zu\n",
465 __func__, gear,
466 ARRAY_SIZE(pwm_fr_table));
467 goto out_error;
468 }
469 tx_clk_cycles_per_us = pwm_fr_table[gear-1][1];
470 break;
471 case UNCHANGED:
472 default:
473 dev_err(hba->dev, "%s: invalid mode = %d\n", __func__, hs);
474 goto out_error;
475 }
476
477 if (ufshcd_readl(hba, REG_UFS_TX_SYMBOL_CLK_NS_US) !=
478 (core_clk_period_in_ns | tx_clk_cycles_per_us)) {
479 /* this register 2 fields shall be written at once */
480 ufshcd_writel(hba, core_clk_period_in_ns | tx_clk_cycles_per_us,
481 REG_UFS_TX_SYMBOL_CLK_NS_US);
482 /*
483 * make sure above write gets applied before we return from
484 * this function.
485 */
486 mb();
487 }
488
489 if (update_link_startup_timer) {
490 ufshcd_writel(hba, ((core_clk_rate / MSEC_PER_SEC) * 100),
491 REG_UFS_PA_LINK_STARTUP_TIMER);
492 /*
493 * make sure that this configuration is applied before
494 * we return
495 */
496 mb();
497 }
498 goto out;
499
500out_error:
501 ret = -EINVAL;
502out:
503 return ret;
504}
505
506static int ufs_qcom_link_startup_notify(struct ufs_hba *hba,
507 enum ufs_notify_change_status status)
508{
509 int err = 0;
510 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
511
512 switch (status) {
513 case PRE_CHANGE:
514 if (ufs_qcom_cfg_timers(hba, UFS_PWM_G1, SLOWAUTO_MODE,
515 0, true)) {
516 dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n",
517 __func__);
518 err = -EINVAL;
519 goto out;
520 }
521
522 if (ufs_qcom_cap_qunipro(host))
523 /*
524 * set unipro core clock cycles to 150 & clear clock
525 * divider
526 */
527 err = ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba,
528 150);
529
530 /*
531 * Some UFS devices (and may be host) have issues if LCC is
532 * enabled. So we are setting PA_Local_TX_LCC_Enable to 0
533 * before link startup which will make sure that both host
534 * and device TX LCC are disabled once link startup is
535 * completed.
536 */
537 if (ufshcd_get_local_unipro_ver(hba) != UFS_UNIPRO_VER_1_41)
538 err = ufshcd_dme_set(hba,
539 UIC_ARG_MIB(PA_LOCAL_TX_LCC_ENABLE),
540 0);
541
542 break;
543 case POST_CHANGE:
544 ufs_qcom_link_startup_post_change(hba);
545 break;
546 default:
547 break;
548 }
549
550out:
551 return err;
552}
553
554static int ufs_qcom_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
555{
556 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
557 struct phy *phy = host->generic_phy;
558 int ret = 0;
559
560 if (ufs_qcom_is_link_off(hba)) {
561 /*
562 * Disable the tx/rx lane symbol clocks before PHY is
563 * powered down as the PLL source should be disabled
564 * after downstream clocks are disabled.
565 */
566 ufs_qcom_disable_lane_clks(host);
567 phy_power_off(phy);
568
569 /* Assert PHY soft reset */
570 ufs_qcom_assert_reset(hba);
571 goto out;
572 }
573
574 /*
575 * If UniPro link is not active, PHY ref_clk, main PHY analog power
576 * rail and low noise analog power rail for PLL can be switched off.
577 */
578 if (!ufs_qcom_is_link_active(hba)) {
579 ufs_qcom_disable_lane_clks(host);
580 phy_power_off(phy);
581 }
582
583out:
584 return ret;
585}
586
587static int ufs_qcom_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
588{
589 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
590 struct phy *phy = host->generic_phy;
591 int err;
592
593 err = phy_power_on(phy);
594 if (err) {
595 dev_err(hba->dev, "%s: failed enabling regs, err = %d\n",
596 __func__, err);
597 goto out;
598 }
599
600 err = ufs_qcom_enable_lane_clks(host);
601 if (err)
602 goto out;
603
604 hba->is_sys_suspended = false;
605
606out:
607 return err;
608}
609
610#ifdef CONFIG_MSM_BUS_SCALING
611static int ufs_qcom_get_bus_vote(struct ufs_qcom_host *host,
612 const char *speed_mode)
613{
614 struct device *dev = host->hba->dev;
615 struct device_node *np = dev->of_node;
616 int err;
617 const char *key = "qcom,bus-vector-names";
618
619 if (!speed_mode) {
620 err = -EINVAL;
621 goto out;
622 }
623
624 if (host->bus_vote.is_max_bw_needed && !!strcmp(speed_mode, "MIN"))
625 err = of_property_match_string(np, key, "MAX");
626 else
627 err = of_property_match_string(np, key, speed_mode);
628
629out:
630 if (err < 0)
631 dev_err(dev, "%s: Invalid %s mode %d\n",
632 __func__, speed_mode, err);
633 return err;
634}
635
636static void ufs_qcom_get_speed_mode(struct ufs_pa_layer_attr *p, char *result)
637{
638 int gear = max_t(u32, p->gear_rx, p->gear_tx);
639 int lanes = max_t(u32, p->lane_rx, p->lane_tx);
640 int pwr;
641
642 /* default to PWM Gear 1, Lane 1 if power mode is not initialized */
643 if (!gear)
644 gear = 1;
645
646 if (!lanes)
647 lanes = 1;
648
649 if (!p->pwr_rx && !p->pwr_tx) {
650 pwr = SLOWAUTO_MODE;
651 snprintf(result, BUS_VECTOR_NAME_LEN, "MIN");
652 } else if (p->pwr_rx == FAST_MODE || p->pwr_rx == FASTAUTO_MODE ||
653 p->pwr_tx == FAST_MODE || p->pwr_tx == FASTAUTO_MODE) {
654 pwr = FAST_MODE;
655 snprintf(result, BUS_VECTOR_NAME_LEN, "%s_R%s_G%d_L%d", "HS",
656 p->hs_rate == PA_HS_MODE_B ? "B" : "A", gear, lanes);
657 } else {
658 pwr = SLOW_MODE;
659 snprintf(result, BUS_VECTOR_NAME_LEN, "%s_G%d_L%d",
660 "PWM", gear, lanes);
661 }
662}
663
664static int ufs_qcom_set_bus_vote(struct ufs_qcom_host *host, int vote)
665{
666 int err = 0;
667
668 if (vote != host->bus_vote.curr_vote) {
669 err = msm_bus_scale_client_update_request(
670 host->bus_vote.client_handle, vote);
671 if (err) {
672 dev_err(host->hba->dev,
673 "%s: msm_bus_scale_client_update_request() failed: bus_client_handle=0x%x, vote=%d, err=%d\n",
674 __func__, host->bus_vote.client_handle,
675 vote, err);
676 goto out;
677 }
678
679 host->bus_vote.curr_vote = vote;
680 }
681out:
682 return err;
683}
684
685static int ufs_qcom_update_bus_bw_vote(struct ufs_qcom_host *host)
686{
687 int vote;
688 int err = 0;
689 char mode[BUS_VECTOR_NAME_LEN];
690
691 ufs_qcom_get_speed_mode(&host->dev_req_params, mode);
692
693 vote = ufs_qcom_get_bus_vote(host, mode);
694 if (vote >= 0)
695 err = ufs_qcom_set_bus_vote(host, vote);
696 else
697 err = vote;
698
699 if (err)
700 dev_err(host->hba->dev, "%s: failed %d\n", __func__, err);
701 else
702 host->bus_vote.saved_vote = vote;
703 return err;
704}
705
706static ssize_t
707show_ufs_to_mem_max_bus_bw(struct device *dev, struct device_attribute *attr,
708 char *buf)
709{
710 struct ufs_hba *hba = dev_get_drvdata(dev);
711 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
712
713 return snprintf(buf, PAGE_SIZE, "%u\n",
714 host->bus_vote.is_max_bw_needed);
715}
716
717static ssize_t
718store_ufs_to_mem_max_bus_bw(struct device *dev, struct device_attribute *attr,
719 const char *buf, size_t count)
720{
721 struct ufs_hba *hba = dev_get_drvdata(dev);
722 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
723 uint32_t value;
724
725 if (!kstrtou32(buf, 0, &value)) {
726 host->bus_vote.is_max_bw_needed = !!value;
727 ufs_qcom_update_bus_bw_vote(host);
728 }
729
730 return count;
731}
732
733static int ufs_qcom_bus_register(struct ufs_qcom_host *host)
734{
735 int err;
736 struct msm_bus_scale_pdata *bus_pdata;
737 struct device *dev = host->hba->dev;
738 struct platform_device *pdev = to_platform_device(dev);
739 struct device_node *np = dev->of_node;
740
741 bus_pdata = msm_bus_cl_get_pdata(pdev);
742 if (!bus_pdata) {
743 dev_err(dev, "%s: failed to get bus vectors\n", __func__);
744 err = -ENODATA;
745 goto out;
746 }
747
748 err = of_property_count_strings(np, "qcom,bus-vector-names");
749 if (err < 0 || err != bus_pdata->num_usecases) {
750 dev_err(dev, "%s: qcom,bus-vector-names not specified correctly %d\n",
751 __func__, err);
752 goto out;
753 }
754
755 host->bus_vote.client_handle = msm_bus_scale_register_client(bus_pdata);
756 if (!host->bus_vote.client_handle) {
757 dev_err(dev, "%s: msm_bus_scale_register_client failed\n",
758 __func__);
759 err = -EFAULT;
760 goto out;
761 }
762
763 /* cache the vote index for minimum and maximum bandwidth */
764 host->bus_vote.min_bw_vote = ufs_qcom_get_bus_vote(host, "MIN");
765 host->bus_vote.max_bw_vote = ufs_qcom_get_bus_vote(host, "MAX");
766
767 host->bus_vote.max_bus_bw.show = show_ufs_to_mem_max_bus_bw;
768 host->bus_vote.max_bus_bw.store = store_ufs_to_mem_max_bus_bw;
769 sysfs_attr_init(&host->bus_vote.max_bus_bw.attr);
770 host->bus_vote.max_bus_bw.attr.name = "max_bus_bw";
771 host->bus_vote.max_bus_bw.attr.mode = S_IRUGO | S_IWUSR;
772 err = device_create_file(dev, &host->bus_vote.max_bus_bw);
773out:
774 return err;
775}
776#else /* CONFIG_MSM_BUS_SCALING */
777static int ufs_qcom_update_bus_bw_vote(struct ufs_qcom_host *host)
778{
779 return 0;
780}
781
782static int ufs_qcom_set_bus_vote(struct ufs_qcom_host *host, int vote)
783{
784 return 0;
785}
786
787static int ufs_qcom_bus_register(struct ufs_qcom_host *host)
788{
789 return 0;
790}
791#endif /* CONFIG_MSM_BUS_SCALING */
792
793static void ufs_qcom_dev_ref_clk_ctrl(struct ufs_qcom_host *host, bool enable)
794{
795 if (host->dev_ref_clk_ctrl_mmio &&
796 (enable ^ host->is_dev_ref_clk_enabled)) {
797 u32 temp = readl_relaxed(host->dev_ref_clk_ctrl_mmio);
798
799 if (enable)
800 temp |= host->dev_ref_clk_en_mask;
801 else
802 temp &= ~host->dev_ref_clk_en_mask;
803
804 /*
805 * If we are here to disable this clock it might be immediately
806 * after entering into hibern8 in which case we need to make
807 * sure that device ref_clk is active at least 1us after the
808 * hibern8 enter.
809 */
810 if (!enable)
811 udelay(1);
812
813 writel_relaxed(temp, host->dev_ref_clk_ctrl_mmio);
814
815 /* ensure that ref_clk is enabled/disabled before we return */
816 wmb();
817
818 /*
819 * If we call hibern8 exit after this, we need to make sure that
820 * device ref_clk is stable for at least 1us before the hibern8
821 * exit command.
822 */
823 if (enable)
824 udelay(1);
825
826 host->is_dev_ref_clk_enabled = enable;
827 }
828}
829
830static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba,
831 enum ufs_notify_change_status status,
832 struct ufs_pa_layer_attr *dev_max_params,
833 struct ufs_pa_layer_attr *dev_req_params)
834{
835 u32 val;
836 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
837 struct phy *phy = host->generic_phy;
838 struct ufs_dev_params ufs_qcom_cap;
839 int ret = 0;
840 int res = 0;
841
842 if (!dev_req_params) {
843 pr_err("%s: incoming dev_req_params is NULL\n", __func__);
844 ret = -EINVAL;
845 goto out;
846 }
847
848 switch (status) {
849 case PRE_CHANGE:
850 ufs_qcom_cap.tx_lanes = UFS_QCOM_LIMIT_NUM_LANES_TX;
851 ufs_qcom_cap.rx_lanes = UFS_QCOM_LIMIT_NUM_LANES_RX;
852 ufs_qcom_cap.hs_rx_gear = UFS_QCOM_LIMIT_HSGEAR_RX;
853 ufs_qcom_cap.hs_tx_gear = UFS_QCOM_LIMIT_HSGEAR_TX;
854 ufs_qcom_cap.pwm_rx_gear = UFS_QCOM_LIMIT_PWMGEAR_RX;
855 ufs_qcom_cap.pwm_tx_gear = UFS_QCOM_LIMIT_PWMGEAR_TX;
856 ufs_qcom_cap.rx_pwr_pwm = UFS_QCOM_LIMIT_RX_PWR_PWM;
857 ufs_qcom_cap.tx_pwr_pwm = UFS_QCOM_LIMIT_TX_PWR_PWM;
858 ufs_qcom_cap.rx_pwr_hs = UFS_QCOM_LIMIT_RX_PWR_HS;
859 ufs_qcom_cap.tx_pwr_hs = UFS_QCOM_LIMIT_TX_PWR_HS;
860 ufs_qcom_cap.hs_rate = UFS_QCOM_LIMIT_HS_RATE;
861 ufs_qcom_cap.desired_working_mode =
862 UFS_QCOM_LIMIT_DESIRED_MODE;
863
864 if (host->hw_ver.major == 0x1) {
865 /*
866 * HS-G3 operations may not reliably work on legacy QCOM
867 * UFS host controller hardware even though capability
868 * exchange during link startup phase may end up
869 * negotiating maximum supported gear as G3.
870 * Hence downgrade the maximum supported gear to HS-G2.
871 */
872 if (ufs_qcom_cap.hs_tx_gear > UFS_HS_G2)
873 ufs_qcom_cap.hs_tx_gear = UFS_HS_G2;
874 if (ufs_qcom_cap.hs_rx_gear > UFS_HS_G2)
875 ufs_qcom_cap.hs_rx_gear = UFS_HS_G2;
876 }
877
878 ret = ufshcd_get_pwr_dev_param(&ufs_qcom_cap,
879 dev_max_params,
880 dev_req_params);
881 if (ret) {
882 pr_err("%s: failed to determine capabilities\n",
883 __func__);
884 goto out;
885 }
886
887 /* enable the device ref clock before changing to HS mode */
888 if (!ufshcd_is_hs_mode(&hba->pwr_info) &&
889 ufshcd_is_hs_mode(dev_req_params))
890 ufs_qcom_dev_ref_clk_ctrl(host, true);
891 break;
892 case POST_CHANGE:
893 if (ufs_qcom_cfg_timers(hba, dev_req_params->gear_rx,
894 dev_req_params->pwr_rx,
895 dev_req_params->hs_rate, false)) {
896 dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n",
897 __func__);
898 /*
899 * we return error code at the end of the routine,
900 * but continue to configure UFS_PHY_TX_LANE_ENABLE
901 * and bus voting as usual
902 */
903 ret = -EINVAL;
904 }
905
906 val = ~(MAX_U32 << dev_req_params->lane_tx);
907 res = ufs_qcom_phy_set_tx_lane_enable(phy, val);
908 if (res) {
909 dev_err(hba->dev, "%s: ufs_qcom_phy_set_tx_lane_enable() failed res = %d\n",
910 __func__, res);
911 ret = res;
912 }
913
914 /* cache the power mode parameters to use internally */
915 memcpy(&host->dev_req_params,
916 dev_req_params, sizeof(*dev_req_params));
917 ufs_qcom_update_bus_bw_vote(host);
918
919 /* disable the device ref clock if entered PWM mode */
920 if (ufshcd_is_hs_mode(&hba->pwr_info) &&
921 !ufshcd_is_hs_mode(dev_req_params))
922 ufs_qcom_dev_ref_clk_ctrl(host, false);
923 break;
924 default:
925 ret = -EINVAL;
926 break;
927 }
928out:
929 return ret;
930}
931
932static int ufs_qcom_quirk_host_pa_saveconfigtime(struct ufs_hba *hba)
933{
934 int err;
935 u32 pa_vs_config_reg1;
936
937 err = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_VS_CONFIG_REG1),
938 &pa_vs_config_reg1);
939 if (err)
940 goto out;
941
942 /* Allow extension of MSB bits of PA_SaveConfigTime attribute */
943 err = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_VS_CONFIG_REG1),
944 (pa_vs_config_reg1 | (1 << 12)));
945
946out:
947 return err;
948}
949
950static int ufs_qcom_apply_dev_quirks(struct ufs_hba *hba)
951{
952 int err = 0;
953
954 if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME)
955 err = ufs_qcom_quirk_host_pa_saveconfigtime(hba);
956
957 return err;
958}
959
960static u32 ufs_qcom_get_ufs_hci_version(struct ufs_hba *hba)
961{
962 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
963
964 if (host->hw_ver.major == 0x1)
965 return UFSHCI_VERSION_11;
966 else
967 return UFSHCI_VERSION_20;
968}
969
970/**
971 * ufs_qcom_advertise_quirks - advertise the known QCOM UFS controller quirks
972 * @hba: host controller instance
973 *
974 * QCOM UFS host controller might have some non standard behaviours (quirks)
975 * than what is specified by UFSHCI specification. Advertise all such
976 * quirks to standard UFS host controller driver so standard takes them into
977 * account.
978 */
979static void ufs_qcom_advertise_quirks(struct ufs_hba *hba)
980{
981 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
982
983 if (host->hw_ver.major == 0x01) {
984 hba->quirks |= UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS
985 | UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP
986 | UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE;
987
988 if (host->hw_ver.minor == 0x0001 && host->hw_ver.step == 0x0001)
989 hba->quirks |= UFSHCD_QUIRK_BROKEN_INTR_AGGR;
990
991 hba->quirks |= UFSHCD_QUIRK_BROKEN_LCC;
992 }
993
994 if (host->hw_ver.major == 0x2) {
995 hba->quirks |= UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION;
996
997 if (!ufs_qcom_cap_qunipro(host))
998 /* Legacy UniPro mode still need following quirks */
999 hba->quirks |= (UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS
1000 | UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE
1001 | UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP);
1002 }
1003
1004 /*
1005 * Inline crypto is currently broken with ufs-qcom at least because the
1006 * device tree doesn't include the crypto registers. There are likely
1007 * to be other issues that will need to be addressed too.
1008 */
1009 hba->quirks |= UFSHCD_QUIRK_BROKEN_CRYPTO;
1010}
1011
1012static void ufs_qcom_set_caps(struct ufs_hba *hba)
1013{
1014 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1015
1016 hba->caps |= UFSHCD_CAP_CLK_GATING | UFSHCD_CAP_HIBERN8_WITH_CLK_GATING;
1017 hba->caps |= UFSHCD_CAP_CLK_SCALING;
1018 hba->caps |= UFSHCD_CAP_AUTO_BKOPS_SUSPEND;
1019
1020 if (host->hw_ver.major >= 0x2) {
1021 host->caps = UFS_QCOM_CAP_QUNIPRO |
1022 UFS_QCOM_CAP_RETAIN_SEC_CFG_AFTER_PWR_COLLAPSE;
1023 }
1024}
1025
1026/**
1027 * ufs_qcom_setup_clocks - enables/disable clocks
1028 * @hba: host controller instance
1029 * @on: If true, enable clocks else disable them.
1030 * @status: PRE_CHANGE or POST_CHANGE notify
1031 *
1032 * Returns 0 on success, non-zero on failure.
1033 */
1034static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on,
1035 enum ufs_notify_change_status status)
1036{
1037 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1038 int err;
1039 int vote = 0;
1040
1041 /*
1042 * In case ufs_qcom_init() is not yet done, simply ignore.
1043 * This ufs_qcom_setup_clocks() shall be called from
1044 * ufs_qcom_init() after init is done.
1045 */
1046 if (!host)
1047 return 0;
1048
1049 if (on && (status == POST_CHANGE)) {
1050 phy_power_on(host->generic_phy);
1051
1052 /* enable the device ref clock for HS mode*/
1053 if (ufshcd_is_hs_mode(&hba->pwr_info))
1054 ufs_qcom_dev_ref_clk_ctrl(host, true);
1055 vote = host->bus_vote.saved_vote;
1056 if (vote == host->bus_vote.min_bw_vote)
1057 ufs_qcom_update_bus_bw_vote(host);
1058
1059 } else if (!on && (status == PRE_CHANGE)) {
1060 if (!ufs_qcom_is_link_active(hba)) {
1061 /* disable device ref_clk */
1062 ufs_qcom_dev_ref_clk_ctrl(host, false);
1063
1064 /* powering off PHY during aggressive clk gating */
1065 phy_power_off(host->generic_phy);
1066 }
1067
1068 vote = host->bus_vote.min_bw_vote;
1069 }
1070
1071 err = ufs_qcom_set_bus_vote(host, vote);
1072 if (err)
1073 dev_err(hba->dev, "%s: set bus vote failed %d\n",
1074 __func__, err);
1075
1076 return err;
1077}
1078
1079#define ANDROID_BOOT_DEV_MAX 30
1080static char android_boot_dev[ANDROID_BOOT_DEV_MAX];
1081
1082#ifndef MODULE
1083static int __init get_android_boot_dev(char *str)
1084{
1085 strlcpy(android_boot_dev, str, ANDROID_BOOT_DEV_MAX);
1086 return 1;
1087}
1088__setup("androidboot.bootdevice=", get_android_boot_dev);
1089#endif
1090
1091/**
1092 * ufs_qcom_init - bind phy with controller
1093 * @hba: host controller instance
1094 *
1095 * Binds PHY with controller and powers up PHY enabling clocks
1096 * and regulators.
1097 *
1098 * Returns -EPROBE_DEFER if binding fails, returns negative error
1099 * on phy power up failure and returns zero on success.
1100 */
1101static int ufs_qcom_init(struct ufs_hba *hba)
1102{
1103 int err;
1104 struct device *dev = hba->dev;
1105 struct platform_device *pdev = to_platform_device(dev);
1106 struct ufs_qcom_host *host;
1107 struct resource *res;
1108
1109 if (strlen(android_boot_dev) && strcmp(android_boot_dev, dev_name(dev)))
1110 return -ENODEV;
1111
1112 host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
1113 if (!host) {
1114 err = -ENOMEM;
1115 dev_err(dev, "%s: no memory for qcom ufs host\n", __func__);
1116 goto out;
1117 }
1118
1119 /* Make a two way bind between the qcom host and the hba */
1120 host->hba = hba;
1121 ufshcd_set_variant(hba, host);
1122
1123 /*
1124 * voting/devoting device ref_clk source is time consuming hence
1125 * skip devoting it during aggressive clock gating. This clock
1126 * will still be gated off during runtime suspend.
1127 */
1128 host->generic_phy = devm_phy_get(dev, "ufsphy");
1129
1130 if (host->generic_phy == ERR_PTR(-EPROBE_DEFER)) {
1131 /*
1132 * UFS driver might be probed before the phy driver does.
1133 * In that case we would like to return EPROBE_DEFER code.
1134 */
1135 err = -EPROBE_DEFER;
1136 dev_warn(dev, "%s: required phy device. hasn't probed yet. err = %d\n",
1137 __func__, err);
1138 goto out_variant_clear;
1139 } else if (IS_ERR(host->generic_phy)) {
1140 err = PTR_ERR(host->generic_phy);
1141 dev_err(dev, "%s: PHY get failed %d\n", __func__, err);
1142 goto out_variant_clear;
1143 }
1144
1145 err = ufs_qcom_bus_register(host);
1146 if (err)
1147 goto out_variant_clear;
1148
1149 ufs_qcom_get_controller_revision(hba, &host->hw_ver.major,
1150 &host->hw_ver.minor, &host->hw_ver.step);
1151
1152 /*
1153 * for newer controllers, device reference clock control bit has
1154 * moved inside UFS controller register address space itself.
1155 */
1156 if (host->hw_ver.major >= 0x02) {
1157 host->dev_ref_clk_ctrl_mmio = hba->mmio_base + REG_UFS_CFG1;
1158 host->dev_ref_clk_en_mask = BIT(26);
1159 } else {
1160 /* "dev_ref_clk_ctrl_mem" is optional resource */
1161 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1162 if (res) {
1163 host->dev_ref_clk_ctrl_mmio =
1164 devm_ioremap_resource(dev, res);
1165 if (IS_ERR(host->dev_ref_clk_ctrl_mmio)) {
1166 dev_warn(dev,
1167 "%s: could not map dev_ref_clk_ctrl_mmio, err %ld\n",
1168 __func__,
1169 PTR_ERR(host->dev_ref_clk_ctrl_mmio));
1170 host->dev_ref_clk_ctrl_mmio = NULL;
1171 }
1172 host->dev_ref_clk_en_mask = BIT(5);
1173 }
1174 }
1175
1176 /* update phy revision information before calling phy_init() */
1177 ufs_qcom_phy_save_controller_version(host->generic_phy,
1178 host->hw_ver.major, host->hw_ver.minor, host->hw_ver.step);
1179
1180 err = ufs_qcom_init_lane_clks(host);
1181 if (err)
1182 goto out_variant_clear;
1183
1184 ufs_qcom_set_caps(hba);
1185 ufs_qcom_advertise_quirks(hba);
1186
1187 ufs_qcom_setup_clocks(hba, true, POST_CHANGE);
1188
1189 if (hba->dev->id < MAX_UFS_QCOM_HOSTS)
1190 ufs_qcom_hosts[hba->dev->id] = host;
1191
1192 host->dbg_print_en |= UFS_QCOM_DEFAULT_DBG_PRINT_EN;
1193 ufs_qcom_get_default_testbus_cfg(host);
1194 err = ufs_qcom_testbus_config(host);
1195 if (err) {
1196 dev_warn(dev, "%s: failed to configure the testbus %d\n",
1197 __func__, err);
1198 err = 0;
1199 }
1200
1201 goto out;
1202
1203out_variant_clear:
1204 ufshcd_set_variant(hba, NULL);
1205out:
1206 return err;
1207}
1208
1209static void ufs_qcom_exit(struct ufs_hba *hba)
1210{
1211 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1212
1213 ufs_qcom_disable_lane_clks(host);
1214 phy_power_off(host->generic_phy);
1215 phy_exit(host->generic_phy);
1216}
1217
1218static int ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(struct ufs_hba *hba,
1219 u32 clk_cycles)
1220{
1221 int err;
1222 u32 core_clk_ctrl_reg;
1223
1224 if (clk_cycles > DME_VS_CORE_CLK_CTRL_MAX_CORE_CLK_1US_CYCLES_MASK)
1225 return -EINVAL;
1226
1227 err = ufshcd_dme_get(hba,
1228 UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
1229 &core_clk_ctrl_reg);
1230 if (err)
1231 goto out;
1232
1233 core_clk_ctrl_reg &= ~DME_VS_CORE_CLK_CTRL_MAX_CORE_CLK_1US_CYCLES_MASK;
1234 core_clk_ctrl_reg |= clk_cycles;
1235
1236 /* Clear CORE_CLK_DIV_EN */
1237 core_clk_ctrl_reg &= ~DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT;
1238
1239 err = ufshcd_dme_set(hba,
1240 UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
1241 core_clk_ctrl_reg);
1242out:
1243 return err;
1244}
1245
1246static int ufs_qcom_clk_scale_up_pre_change(struct ufs_hba *hba)
1247{
1248 /* nothing to do as of now */
1249 return 0;
1250}
1251
1252static int ufs_qcom_clk_scale_up_post_change(struct ufs_hba *hba)
1253{
1254 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1255
1256 if (!ufs_qcom_cap_qunipro(host))
1257 return 0;
1258
1259 /* set unipro core clock cycles to 150 and clear clock divider */
1260 return ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba, 150);
1261}
1262
1263static int ufs_qcom_clk_scale_down_pre_change(struct ufs_hba *hba)
1264{
1265 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1266 int err;
1267 u32 core_clk_ctrl_reg;
1268
1269 if (!ufs_qcom_cap_qunipro(host))
1270 return 0;
1271
1272 err = ufshcd_dme_get(hba,
1273 UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
1274 &core_clk_ctrl_reg);
1275
1276 /* make sure CORE_CLK_DIV_EN is cleared */
1277 if (!err &&
1278 (core_clk_ctrl_reg & DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT)) {
1279 core_clk_ctrl_reg &= ~DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT;
1280 err = ufshcd_dme_set(hba,
1281 UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
1282 core_clk_ctrl_reg);
1283 }
1284
1285 return err;
1286}
1287
1288static int ufs_qcom_clk_scale_down_post_change(struct ufs_hba *hba)
1289{
1290 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1291
1292 if (!ufs_qcom_cap_qunipro(host))
1293 return 0;
1294
1295 /* set unipro core clock cycles to 75 and clear clock divider */
1296 return ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba, 75);
1297}
1298
1299static int ufs_qcom_clk_scale_notify(struct ufs_hba *hba,
1300 bool scale_up, enum ufs_notify_change_status status)
1301{
1302 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1303 struct ufs_pa_layer_attr *dev_req_params = &host->dev_req_params;
1304 int err = 0;
1305
1306 if (status == PRE_CHANGE) {
1307 if (scale_up)
1308 err = ufs_qcom_clk_scale_up_pre_change(hba);
1309 else
1310 err = ufs_qcom_clk_scale_down_pre_change(hba);
1311 } else {
1312 if (scale_up)
1313 err = ufs_qcom_clk_scale_up_post_change(hba);
1314 else
1315 err = ufs_qcom_clk_scale_down_post_change(hba);
1316
1317 if (err || !dev_req_params)
1318 goto out;
1319
1320 ufs_qcom_cfg_timers(hba,
1321 dev_req_params->gear_rx,
1322 dev_req_params->pwr_rx,
1323 dev_req_params->hs_rate,
1324 false);
1325 ufs_qcom_update_bus_bw_vote(host);
1326 }
1327
1328out:
1329 return err;
1330}
1331
1332static void ufs_qcom_print_hw_debug_reg_all(struct ufs_hba *hba,
1333 void *priv, void (*print_fn)(struct ufs_hba *hba,
1334 int offset, int num_regs, const char *str, void *priv))
1335{
1336 u32 reg;
1337 struct ufs_qcom_host *host;
1338
1339 if (unlikely(!hba)) {
1340 pr_err("%s: hba is NULL\n", __func__);
1341 return;
1342 }
1343 if (unlikely(!print_fn)) {
1344 dev_err(hba->dev, "%s: print_fn is NULL\n", __func__);
1345 return;
1346 }
1347
1348 host = ufshcd_get_variant(hba);
1349 if (!(host->dbg_print_en & UFS_QCOM_DBG_PRINT_REGS_EN))
1350 return;
1351
1352 reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_REG_OCSC);
1353 print_fn(hba, reg, 44, "UFS_UFS_DBG_RD_REG_OCSC ", priv);
1354
1355 reg = ufshcd_readl(hba, REG_UFS_CFG1);
1356 reg |= UTP_DBG_RAMS_EN;
1357 ufshcd_writel(hba, reg, REG_UFS_CFG1);
1358
1359 reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_EDTL_RAM);
1360 print_fn(hba, reg, 32, "UFS_UFS_DBG_RD_EDTL_RAM ", priv);
1361
1362 reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_DESC_RAM);
1363 print_fn(hba, reg, 128, "UFS_UFS_DBG_RD_DESC_RAM ", priv);
1364
1365 reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_PRDT_RAM);
1366 print_fn(hba, reg, 64, "UFS_UFS_DBG_RD_PRDT_RAM ", priv);
1367
1368 /* clear bit 17 - UTP_DBG_RAMS_EN */
1369 ufshcd_rmwl(hba, UTP_DBG_RAMS_EN, 0, REG_UFS_CFG1);
1370
1371 reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_UAWM);
1372 print_fn(hba, reg, 4, "UFS_DBG_RD_REG_UAWM ", priv);
1373
1374 reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_UARM);
1375 print_fn(hba, reg, 4, "UFS_DBG_RD_REG_UARM ", priv);
1376
1377 reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TXUC);
1378 print_fn(hba, reg, 48, "UFS_DBG_RD_REG_TXUC ", priv);
1379
1380 reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_RXUC);
1381 print_fn(hba, reg, 27, "UFS_DBG_RD_REG_RXUC ", priv);
1382
1383 reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_DFC);
1384 print_fn(hba, reg, 19, "UFS_DBG_RD_REG_DFC ", priv);
1385
1386 reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TRLUT);
1387 print_fn(hba, reg, 34, "UFS_DBG_RD_REG_TRLUT ", priv);
1388
1389 reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TMRLUT);
1390 print_fn(hba, reg, 9, "UFS_DBG_RD_REG_TMRLUT ", priv);
1391}
1392
1393static void ufs_qcom_enable_test_bus(struct ufs_qcom_host *host)
1394{
1395 if (host->dbg_print_en & UFS_QCOM_DBG_PRINT_TEST_BUS_EN) {
1396 ufshcd_rmwl(host->hba, UFS_REG_TEST_BUS_EN,
1397 UFS_REG_TEST_BUS_EN, REG_UFS_CFG1);
1398 ufshcd_rmwl(host->hba, TEST_BUS_EN, TEST_BUS_EN, REG_UFS_CFG1);
1399 } else {
1400 ufshcd_rmwl(host->hba, UFS_REG_TEST_BUS_EN, 0, REG_UFS_CFG1);
1401 ufshcd_rmwl(host->hba, TEST_BUS_EN, 0, REG_UFS_CFG1);
1402 }
1403}
1404
1405static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host)
1406{
1407 /* provide a legal default configuration */
1408 host->testbus.select_major = TSTBUS_UNIPRO;
1409 host->testbus.select_minor = 37;
1410}
1411
1412static bool ufs_qcom_testbus_cfg_is_ok(struct ufs_qcom_host *host)
1413{
1414 if (host->testbus.select_major >= TSTBUS_MAX) {
1415 dev_err(host->hba->dev,
1416 "%s: UFS_CFG1[TEST_BUS_SEL} may not equal 0x%05X\n",
1417 __func__, host->testbus.select_major);
1418 return false;
1419 }
1420
1421 return true;
1422}
1423
1424int ufs_qcom_testbus_config(struct ufs_qcom_host *host)
1425{
1426 int reg;
1427 int offset;
1428 u32 mask = TEST_BUS_SUB_SEL_MASK;
1429
1430 if (!host)
1431 return -EINVAL;
1432
1433 if (!ufs_qcom_testbus_cfg_is_ok(host))
1434 return -EPERM;
1435
1436 switch (host->testbus.select_major) {
1437 case TSTBUS_UAWM:
1438 reg = UFS_TEST_BUS_CTRL_0;
1439 offset = 24;
1440 break;
1441 case TSTBUS_UARM:
1442 reg = UFS_TEST_BUS_CTRL_0;
1443 offset = 16;
1444 break;
1445 case TSTBUS_TXUC:
1446 reg = UFS_TEST_BUS_CTRL_0;
1447 offset = 8;
1448 break;
1449 case TSTBUS_RXUC:
1450 reg = UFS_TEST_BUS_CTRL_0;
1451 offset = 0;
1452 break;
1453 case TSTBUS_DFC:
1454 reg = UFS_TEST_BUS_CTRL_1;
1455 offset = 24;
1456 break;
1457 case TSTBUS_TRLUT:
1458 reg = UFS_TEST_BUS_CTRL_1;
1459 offset = 16;
1460 break;
1461 case TSTBUS_TMRLUT:
1462 reg = UFS_TEST_BUS_CTRL_1;
1463 offset = 8;
1464 break;
1465 case TSTBUS_OCSC:
1466 reg = UFS_TEST_BUS_CTRL_1;
1467 offset = 0;
1468 break;
1469 case TSTBUS_WRAPPER:
1470 reg = UFS_TEST_BUS_CTRL_2;
1471 offset = 16;
1472 break;
1473 case TSTBUS_COMBINED:
1474 reg = UFS_TEST_BUS_CTRL_2;
1475 offset = 8;
1476 break;
1477 case TSTBUS_UTP_HCI:
1478 reg = UFS_TEST_BUS_CTRL_2;
1479 offset = 0;
1480 break;
1481 case TSTBUS_UNIPRO:
1482 reg = UFS_UNIPRO_CFG;
1483 offset = 20;
1484 mask = 0xFFF;
1485 break;
1486 /*
1487 * No need for a default case, since
1488 * ufs_qcom_testbus_cfg_is_ok() checks that the configuration
1489 * is legal
1490 */
1491 }
1492 mask <<= offset;
1493
1494 pm_runtime_get_sync(host->hba->dev);
1495 ufshcd_hold(host->hba, false);
1496 ufshcd_rmwl(host->hba, TEST_BUS_SEL,
1497 (u32)host->testbus.select_major << 19,
1498 REG_UFS_CFG1);
1499 ufshcd_rmwl(host->hba, mask,
1500 (u32)host->testbus.select_minor << offset,
1501 reg);
1502 ufs_qcom_enable_test_bus(host);
1503 /*
1504 * Make sure the test bus configuration is
1505 * committed before returning.
1506 */
1507 mb();
1508 ufshcd_release(host->hba);
1509 pm_runtime_put_sync(host->hba->dev);
1510
1511 return 0;
1512}
1513
1514static void ufs_qcom_testbus_read(struct ufs_hba *hba)
1515{
1516 ufshcd_dump_regs(hba, UFS_TEST_BUS, 4, "UFS_TEST_BUS ");
1517}
1518
1519static void ufs_qcom_print_unipro_testbus(struct ufs_hba *hba)
1520{
1521 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1522 u32 *testbus = NULL;
1523 int i, nminor = 256, testbus_len = nminor * sizeof(u32);
1524
1525 testbus = kmalloc(testbus_len, GFP_KERNEL);
1526 if (!testbus)
1527 return;
1528
1529 host->testbus.select_major = TSTBUS_UNIPRO;
1530 for (i = 0; i < nminor; i++) {
1531 host->testbus.select_minor = i;
1532 ufs_qcom_testbus_config(host);
1533 testbus[i] = ufshcd_readl(hba, UFS_TEST_BUS);
1534 }
1535 print_hex_dump(KERN_ERR, "UNIPRO_TEST_BUS ", DUMP_PREFIX_OFFSET,
1536 16, 4, testbus, testbus_len, false);
1537 kfree(testbus);
1538}
1539
1540static void ufs_qcom_dump_dbg_regs(struct ufs_hba *hba)
1541{
1542 ufshcd_dump_regs(hba, REG_UFS_SYS1CLK_1US, 16 * 4,
1543 "HCI Vendor Specific Registers ");
1544
1545 /* sleep a bit intermittently as we are dumping too much data */
1546 ufs_qcom_print_hw_debug_reg_all(hba, NULL, ufs_qcom_dump_regs_wrapper);
1547 usleep_range(1000, 1100);
1548 ufs_qcom_testbus_read(hba);
1549 usleep_range(1000, 1100);
1550 ufs_qcom_print_unipro_testbus(hba);
1551 usleep_range(1000, 1100);
1552}
1553
1554/**
1555 * struct ufs_hba_qcom_vops - UFS QCOM specific variant operations
1556 *
1557 * The variant operations configure the necessary controller and PHY
1558 * handshake during initialization.
1559 */
1560static struct ufs_hba_variant_ops ufs_hba_qcom_vops = {
1561 .name = "qcom",
1562 .init = ufs_qcom_init,
1563 .exit = ufs_qcom_exit,
1564 .get_ufs_hci_version = ufs_qcom_get_ufs_hci_version,
1565 .clk_scale_notify = ufs_qcom_clk_scale_notify,
1566 .setup_clocks = ufs_qcom_setup_clocks,
1567 .hce_enable_notify = ufs_qcom_hce_enable_notify,
1568 .link_startup_notify = ufs_qcom_link_startup_notify,
1569 .pwr_change_notify = ufs_qcom_pwr_change_notify,
1570 .apply_dev_quirks = ufs_qcom_apply_dev_quirks,
1571 .suspend = ufs_qcom_suspend,
1572 .resume = ufs_qcom_resume,
1573 .dbg_register_dump = ufs_qcom_dump_dbg_regs,
1574};
1575
1576/**
1577 * ufs_qcom_probe - probe routine of the driver
1578 * @pdev: pointer to Platform device handle
1579 *
1580 * Return zero for success and non-zero for failure
1581 */
1582static int ufs_qcom_probe(struct platform_device *pdev)
1583{
1584 int err;
1585 struct device *dev = &pdev->dev;
1586
1587 /* Perform generic probe */
1588 err = ufshcd_pltfrm_init(pdev, &ufs_hba_qcom_vops);
1589 if (err)
1590 dev_err(dev, "ufshcd_pltfrm_init() failed %d\n", err);
1591
1592 return err;
1593}
1594
1595/**
1596 * ufs_qcom_remove - set driver_data of the device to NULL
1597 * @pdev: pointer to platform device handle
1598 *
1599 * Always returns 0
1600 */
1601static int ufs_qcom_remove(struct platform_device *pdev)
1602{
1603 struct ufs_hba *hba = platform_get_drvdata(pdev);
1604
1605 pm_runtime_get_sync(&(pdev)->dev);
1606 ufshcd_remove(hba);
1607 return 0;
1608}
1609
1610static const struct of_device_id ufs_qcom_of_match[] = {
1611 { .compatible = "qcom,ufshc"},
1612 {},
1613};
1614MODULE_DEVICE_TABLE(of, ufs_qcom_of_match);
1615
1616static const struct dev_pm_ops ufs_qcom_pm_ops = {
1617 .suspend = ufshcd_pltfrm_suspend,
1618 .resume = ufshcd_pltfrm_resume,
1619 .runtime_suspend = ufshcd_pltfrm_runtime_suspend,
1620 .runtime_resume = ufshcd_pltfrm_runtime_resume,
1621 .runtime_idle = ufshcd_pltfrm_runtime_idle,
1622};
1623
1624static struct platform_driver ufs_qcom_pltform = {
1625 .probe = ufs_qcom_probe,
1626 .remove = ufs_qcom_remove,
1627 .shutdown = ufshcd_pltfrm_shutdown,
1628 .driver = {
1629 .name = "ufshcd-qcom",
1630 .pm = &ufs_qcom_pm_ops,
1631 .of_match_table = of_match_ptr(ufs_qcom_of_match),
1632 },
1633};
1634module_platform_driver(ufs_qcom_pltform);
1635
1636MODULE_LICENSE("GPL v2");