blob: b3114da1c66e69d970e684d0d2c17a796db1c297 [file] [log] [blame]
b.liue9582032025-04-17 19:18:16 +08001// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2010 Google, Inc.
4 */
5
6#include <linux/delay.h>
7#include <linux/dma-mapping.h>
8#include <linux/err.h>
9#include <linux/module.h>
10#include <linux/init.h>
11#include <linux/iopoll.h>
12#include <linux/platform_device.h>
13#include <linux/clk.h>
14#include <linux/io.h>
15#include <linux/of.h>
16#include <linux/of_device.h>
17#include <linux/pinctrl/consumer.h>
18#include <linux/regulator/consumer.h>
19#include <linux/reset.h>
20#include <linux/mmc/card.h>
21#include <linux/mmc/host.h>
22#include <linux/mmc/mmc.h>
23#include <linux/mmc/slot-gpio.h>
24#include <linux/gpio/consumer.h>
25#include <linux/ktime.h>
26
27#include "sdhci-cqhci.h"
28#include "sdhci-pltfm.h"
29#include "cqhci.h"
30
31/* Tegra SDHOST controller vendor register definitions */
32#define SDHCI_TEGRA_VENDOR_CLOCK_CTRL 0x100
33#define SDHCI_CLOCK_CTRL_TAP_MASK 0x00ff0000
34#define SDHCI_CLOCK_CTRL_TAP_SHIFT 16
35#define SDHCI_CLOCK_CTRL_TRIM_MASK 0x1f000000
36#define SDHCI_CLOCK_CTRL_TRIM_SHIFT 24
37#define SDHCI_CLOCK_CTRL_SDR50_TUNING_OVERRIDE BIT(5)
38#define SDHCI_CLOCK_CTRL_PADPIPE_CLKEN_OVERRIDE BIT(3)
39#define SDHCI_CLOCK_CTRL_SPI_MODE_CLKEN_OVERRIDE BIT(2)
40
41#define SDHCI_TEGRA_VENDOR_SYS_SW_CTRL 0x104
42#define SDHCI_TEGRA_SYS_SW_CTRL_ENHANCED_STROBE BIT(31)
43
44#define SDHCI_TEGRA_VENDOR_CAP_OVERRIDES 0x10c
45#define SDHCI_TEGRA_CAP_OVERRIDES_DQS_TRIM_MASK 0x00003f00
46#define SDHCI_TEGRA_CAP_OVERRIDES_DQS_TRIM_SHIFT 8
47
48#define SDHCI_TEGRA_VENDOR_MISC_CTRL 0x120
49#define SDHCI_MISC_CTRL_ENABLE_SDR104 0x8
50#define SDHCI_MISC_CTRL_ENABLE_SDR50 0x10
51#define SDHCI_MISC_CTRL_ENABLE_SDHCI_SPEC_300 0x20
52#define SDHCI_MISC_CTRL_ENABLE_DDR50 0x200
53
54#define SDHCI_TEGRA_VENDOR_DLLCAL_CFG 0x1b0
55#define SDHCI_TEGRA_DLLCAL_CALIBRATE BIT(31)
56
57#define SDHCI_TEGRA_VENDOR_DLLCAL_STA 0x1bc
58#define SDHCI_TEGRA_DLLCAL_STA_ACTIVE BIT(31)
59
60#define SDHCI_VNDR_TUN_CTRL0_0 0x1c0
61#define SDHCI_VNDR_TUN_CTRL0_TUN_HW_TAP 0x20000
62#define SDHCI_VNDR_TUN_CTRL0_START_TAP_VAL_MASK 0x03fc0000
63#define SDHCI_VNDR_TUN_CTRL0_START_TAP_VAL_SHIFT 18
64#define SDHCI_VNDR_TUN_CTRL0_MUL_M_MASK 0x00001fc0
65#define SDHCI_VNDR_TUN_CTRL0_MUL_M_SHIFT 6
66#define SDHCI_VNDR_TUN_CTRL0_TUN_ITER_MASK 0x000e000
67#define SDHCI_VNDR_TUN_CTRL0_TUN_ITER_SHIFT 13
68#define TRIES_128 2
69#define TRIES_256 4
70#define SDHCI_VNDR_TUN_CTRL0_TUN_WORD_SEL_MASK 0x7
71
72#define SDHCI_TEGRA_VNDR_TUN_CTRL1_0 0x1c4
73#define SDHCI_TEGRA_VNDR_TUN_STATUS0 0x1C8
74#define SDHCI_TEGRA_VNDR_TUN_STATUS1 0x1CC
75#define SDHCI_TEGRA_VNDR_TUN_STATUS1_TAP_MASK 0xFF
76#define SDHCI_TEGRA_VNDR_TUN_STATUS1_END_TAP_SHIFT 0x8
77#define TUNING_WORD_BIT_SIZE 32
78
79#define SDHCI_TEGRA_AUTO_CAL_CONFIG 0x1e4
80#define SDHCI_AUTO_CAL_START BIT(31)
81#define SDHCI_AUTO_CAL_ENABLE BIT(29)
82#define SDHCI_AUTO_CAL_PDPU_OFFSET_MASK 0x0000ffff
83
84#define SDHCI_TEGRA_SDMEM_COMP_PADCTRL 0x1e0
85#define SDHCI_TEGRA_SDMEM_COMP_PADCTRL_VREF_SEL_MASK 0x0000000f
86#define SDHCI_TEGRA_SDMEM_COMP_PADCTRL_VREF_SEL_VAL 0x7
87#define SDHCI_TEGRA_SDMEM_COMP_PADCTRL_E_INPUT_E_PWRD BIT(31)
88#define SDHCI_COMP_PADCTRL_DRVUPDN_OFFSET_MASK 0x07FFF000
89
90#define SDHCI_TEGRA_AUTO_CAL_STATUS 0x1ec
91#define SDHCI_TEGRA_AUTO_CAL_ACTIVE BIT(31)
92
93#define NVQUIRK_FORCE_SDHCI_SPEC_200 BIT(0)
94#define NVQUIRK_ENABLE_BLOCK_GAP_DET BIT(1)
95#define NVQUIRK_ENABLE_SDHCI_SPEC_300 BIT(2)
96#define NVQUIRK_ENABLE_SDR50 BIT(3)
97#define NVQUIRK_ENABLE_SDR104 BIT(4)
98#define NVQUIRK_ENABLE_DDR50 BIT(5)
99#define NVQUIRK_HAS_PADCALIB BIT(6)
100#define NVQUIRK_NEEDS_PAD_CONTROL BIT(7)
101#define NVQUIRK_DIS_CARD_CLK_CONFIG_TAP BIT(8)
102#define NVQUIRK_CQHCI_DCMD_R1B_CMD_TIMING BIT(9)
103
104/*
105 * NVQUIRK_HAS_TMCLK is for SoC's having separate timeout clock for Tegra
106 * SDMMC hardware data timeout.
107 */
108#define NVQUIRK_HAS_TMCLK BIT(10)
109
110/* SDMMC CQE Base Address for Tegra Host Ver 4.1 and Higher */
111#define SDHCI_TEGRA_CQE_BASE_ADDR 0xF000
112
113struct sdhci_tegra_soc_data {
114 const struct sdhci_pltfm_data *pdata;
115 u64 dma_mask;
116 u32 nvquirks;
117 u8 min_tap_delay;
118 u8 max_tap_delay;
119};
120
121/* Magic pull up and pull down pad calibration offsets */
122struct sdhci_tegra_autocal_offsets {
123 u32 pull_up_3v3;
124 u32 pull_down_3v3;
125 u32 pull_up_3v3_timeout;
126 u32 pull_down_3v3_timeout;
127 u32 pull_up_1v8;
128 u32 pull_down_1v8;
129 u32 pull_up_1v8_timeout;
130 u32 pull_down_1v8_timeout;
131 u32 pull_up_sdr104;
132 u32 pull_down_sdr104;
133 u32 pull_up_hs400;
134 u32 pull_down_hs400;
135};
136
137struct sdhci_tegra {
138 const struct sdhci_tegra_soc_data *soc_data;
139 struct gpio_desc *power_gpio;
140 struct clk *tmclk;
141 bool ddr_signaling;
142 bool pad_calib_required;
143 bool pad_control_available;
144
145 struct reset_control *rst;
146 struct pinctrl *pinctrl_sdmmc;
147 struct pinctrl_state *pinctrl_state_3v3;
148 struct pinctrl_state *pinctrl_state_1v8;
149 struct pinctrl_state *pinctrl_state_3v3_drv;
150 struct pinctrl_state *pinctrl_state_1v8_drv;
151
152 struct sdhci_tegra_autocal_offsets autocal_offsets;
153 ktime_t last_calib;
154
155 u32 default_tap;
156 u32 default_trim;
157 u32 dqs_trim;
158 bool enable_hwcq;
159 unsigned long curr_clk_rate;
160 u8 tuned_tap_delay;
161};
162
163static u16 tegra_sdhci_readw(struct sdhci_host *host, int reg)
164{
165 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
166 struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
167 const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
168
169 if (unlikely((soc_data->nvquirks & NVQUIRK_FORCE_SDHCI_SPEC_200) &&
170 (reg == SDHCI_HOST_VERSION))) {
171 /* Erratum: Version register is invalid in HW. */
172 return SDHCI_SPEC_200;
173 }
174
175 return readw(host->ioaddr + reg);
176}
177
178static void tegra_sdhci_writew(struct sdhci_host *host, u16 val, int reg)
179{
180 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
181
182 switch (reg) {
183 case SDHCI_TRANSFER_MODE:
184 /*
185 * Postpone this write, we must do it together with a
186 * command write that is down below.
187 */
188 pltfm_host->xfer_mode_shadow = val;
189 return;
190 case SDHCI_COMMAND:
191 writel((val << 16) | pltfm_host->xfer_mode_shadow,
192 host->ioaddr + SDHCI_TRANSFER_MODE);
193 return;
194 }
195
196 writew(val, host->ioaddr + reg);
197}
198
199static void tegra_sdhci_writel(struct sdhci_host *host, u32 val, int reg)
200{
201 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
202 struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
203 const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
204
205 /* Seems like we're getting spurious timeout and crc errors, so
206 * disable signalling of them. In case of real errors software
207 * timers should take care of eventually detecting them.
208 */
209 if (unlikely(reg == SDHCI_SIGNAL_ENABLE))
210 val &= ~(SDHCI_INT_TIMEOUT|SDHCI_INT_CRC);
211
212 writel(val, host->ioaddr + reg);
213
214 if (unlikely((soc_data->nvquirks & NVQUIRK_ENABLE_BLOCK_GAP_DET) &&
215 (reg == SDHCI_INT_ENABLE))) {
216 /* Erratum: Must enable block gap interrupt detection */
217 u8 gap_ctrl = readb(host->ioaddr + SDHCI_BLOCK_GAP_CONTROL);
218 if (val & SDHCI_INT_CARD_INT)
219 gap_ctrl |= 0x8;
220 else
221 gap_ctrl &= ~0x8;
222 writeb(gap_ctrl, host->ioaddr + SDHCI_BLOCK_GAP_CONTROL);
223 }
224}
225
226static bool tegra_sdhci_configure_card_clk(struct sdhci_host *host, bool enable)
227{
228 bool status;
229 u32 reg;
230
231 reg = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
232 status = !!(reg & SDHCI_CLOCK_CARD_EN);
233
234 if (status == enable)
235 return status;
236
237 if (enable)
238 reg |= SDHCI_CLOCK_CARD_EN;
239 else
240 reg &= ~SDHCI_CLOCK_CARD_EN;
241
242 sdhci_writew(host, reg, SDHCI_CLOCK_CONTROL);
243
244 return status;
245}
246
247static void tegra210_sdhci_writew(struct sdhci_host *host, u16 val, int reg)
248{
249 bool is_tuning_cmd = 0;
250 bool clk_enabled;
251 u8 cmd;
252
253 if (reg == SDHCI_COMMAND) {
254 cmd = SDHCI_GET_CMD(val);
255 is_tuning_cmd = cmd == MMC_SEND_TUNING_BLOCK ||
256 cmd == MMC_SEND_TUNING_BLOCK_HS200;
257 }
258
259 if (is_tuning_cmd)
260 clk_enabled = tegra_sdhci_configure_card_clk(host, 0);
261
262 writew(val, host->ioaddr + reg);
263
264 if (is_tuning_cmd) {
265 udelay(1);
266 sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
267 tegra_sdhci_configure_card_clk(host, clk_enabled);
268 }
269}
270
271static unsigned int tegra_sdhci_get_ro(struct sdhci_host *host)
272{
273 /*
274 * Write-enable shall be assumed if GPIO is missing in a board's
275 * device-tree because SDHCI's WRITE_PROTECT bit doesn't work on
276 * Tegra.
277 */
278 return mmc_gpio_get_ro(host->mmc);
279}
280
281static bool tegra_sdhci_is_pad_and_regulator_valid(struct sdhci_host *host)
282{
283 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
284 struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
285 int has_1v8, has_3v3;
286
287 /*
288 * The SoCs which have NVQUIRK_NEEDS_PAD_CONTROL require software pad
289 * voltage configuration in order to perform voltage switching. This
290 * means that valid pinctrl info is required on SDHCI instances capable
291 * of performing voltage switching. Whether or not an SDHCI instance is
292 * capable of voltage switching is determined based on the regulator.
293 */
294
295 if (!(tegra_host->soc_data->nvquirks & NVQUIRK_NEEDS_PAD_CONTROL))
296 return true;
297
298 if (IS_ERR(host->mmc->supply.vqmmc))
299 return false;
300
301 has_1v8 = regulator_is_supported_voltage(host->mmc->supply.vqmmc,
302 1700000, 1950000);
303
304 has_3v3 = regulator_is_supported_voltage(host->mmc->supply.vqmmc,
305 2700000, 3600000);
306
307 if (has_1v8 == 1 && has_3v3 == 1)
308 return tegra_host->pad_control_available;
309
310 /* Fixed voltage, no pad control required. */
311 return true;
312}
313
314static void tegra_sdhci_set_tap(struct sdhci_host *host, unsigned int tap)
315{
316 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
317 struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
318 const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
319 bool card_clk_enabled = false;
320 u32 reg;
321
322 /*
323 * Touching the tap values is a bit tricky on some SoC generations.
324 * The quirk enables a workaround for a glitch that sometimes occurs if
325 * the tap values are changed.
326 */
327
328 if (soc_data->nvquirks & NVQUIRK_DIS_CARD_CLK_CONFIG_TAP)
329 card_clk_enabled = tegra_sdhci_configure_card_clk(host, false);
330
331 reg = sdhci_readl(host, SDHCI_TEGRA_VENDOR_CLOCK_CTRL);
332 reg &= ~SDHCI_CLOCK_CTRL_TAP_MASK;
333 reg |= tap << SDHCI_CLOCK_CTRL_TAP_SHIFT;
334 sdhci_writel(host, reg, SDHCI_TEGRA_VENDOR_CLOCK_CTRL);
335
336 if (soc_data->nvquirks & NVQUIRK_DIS_CARD_CLK_CONFIG_TAP &&
337 card_clk_enabled) {
338 udelay(1);
339 sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
340 tegra_sdhci_configure_card_clk(host, card_clk_enabled);
341 }
342}
343
344static void tegra_sdhci_reset(struct sdhci_host *host, u8 mask)
345{
346 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
347 struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
348 const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
349 u32 misc_ctrl, clk_ctrl, pad_ctrl;
350
351 sdhci_and_cqhci_reset(host, mask);
352
353 if (!(mask & SDHCI_RESET_ALL))
354 return;
355
356 tegra_sdhci_set_tap(host, tegra_host->default_tap);
357
358 misc_ctrl = sdhci_readl(host, SDHCI_TEGRA_VENDOR_MISC_CTRL);
359 clk_ctrl = sdhci_readl(host, SDHCI_TEGRA_VENDOR_CLOCK_CTRL);
360
361 misc_ctrl &= ~(SDHCI_MISC_CTRL_ENABLE_SDHCI_SPEC_300 |
362 SDHCI_MISC_CTRL_ENABLE_SDR50 |
363 SDHCI_MISC_CTRL_ENABLE_DDR50 |
364 SDHCI_MISC_CTRL_ENABLE_SDR104);
365
366 clk_ctrl &= ~(SDHCI_CLOCK_CTRL_TRIM_MASK |
367 SDHCI_CLOCK_CTRL_SPI_MODE_CLKEN_OVERRIDE);
368
369 if (tegra_sdhci_is_pad_and_regulator_valid(host)) {
370 /* Erratum: Enable SDHCI spec v3.00 support */
371 if (soc_data->nvquirks & NVQUIRK_ENABLE_SDHCI_SPEC_300)
372 misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_SDHCI_SPEC_300;
373 /* Advertise UHS modes as supported by host */
374 if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR50)
375 misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_SDR50;
376 if (soc_data->nvquirks & NVQUIRK_ENABLE_DDR50)
377 misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_DDR50;
378 if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR104)
379 misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_SDR104;
380 if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR50)
381 clk_ctrl |= SDHCI_CLOCK_CTRL_SDR50_TUNING_OVERRIDE;
382 }
383
384 clk_ctrl |= tegra_host->default_trim << SDHCI_CLOCK_CTRL_TRIM_SHIFT;
385
386 sdhci_writel(host, misc_ctrl, SDHCI_TEGRA_VENDOR_MISC_CTRL);
387 sdhci_writel(host, clk_ctrl, SDHCI_TEGRA_VENDOR_CLOCK_CTRL);
388
389 if (soc_data->nvquirks & NVQUIRK_HAS_PADCALIB) {
390 pad_ctrl = sdhci_readl(host, SDHCI_TEGRA_SDMEM_COMP_PADCTRL);
391 pad_ctrl &= ~SDHCI_TEGRA_SDMEM_COMP_PADCTRL_VREF_SEL_MASK;
392 pad_ctrl |= SDHCI_TEGRA_SDMEM_COMP_PADCTRL_VREF_SEL_VAL;
393 sdhci_writel(host, pad_ctrl, SDHCI_TEGRA_SDMEM_COMP_PADCTRL);
394
395 tegra_host->pad_calib_required = true;
396 }
397
398 tegra_host->ddr_signaling = false;
399}
400
401static void tegra_sdhci_configure_cal_pad(struct sdhci_host *host, bool enable)
402{
403 u32 val;
404
405 /*
406 * Enable or disable the additional I/O pad used by the drive strength
407 * calibration process.
408 */
409 val = sdhci_readl(host, SDHCI_TEGRA_SDMEM_COMP_PADCTRL);
410
411 if (enable)
412 val |= SDHCI_TEGRA_SDMEM_COMP_PADCTRL_E_INPUT_E_PWRD;
413 else
414 val &= ~SDHCI_TEGRA_SDMEM_COMP_PADCTRL_E_INPUT_E_PWRD;
415
416 sdhci_writel(host, val, SDHCI_TEGRA_SDMEM_COMP_PADCTRL);
417
418 if (enable)
419 usleep_range(1, 2);
420}
421
422static void tegra_sdhci_set_pad_autocal_offset(struct sdhci_host *host,
423 u16 pdpu)
424{
425 u32 reg;
426
427 reg = sdhci_readl(host, SDHCI_TEGRA_AUTO_CAL_CONFIG);
428 reg &= ~SDHCI_AUTO_CAL_PDPU_OFFSET_MASK;
429 reg |= pdpu;
430 sdhci_writel(host, reg, SDHCI_TEGRA_AUTO_CAL_CONFIG);
431}
432
433static int tegra_sdhci_set_padctrl(struct sdhci_host *host, int voltage,
434 bool state_drvupdn)
435{
436 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
437 struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
438 struct sdhci_tegra_autocal_offsets *offsets =
439 &tegra_host->autocal_offsets;
440 struct pinctrl_state *pinctrl_drvupdn = NULL;
441 int ret = 0;
442 u8 drvup = 0, drvdn = 0;
443 u32 reg;
444
445 if (!state_drvupdn) {
446 /* PADS Drive Strength */
447 if (voltage == MMC_SIGNAL_VOLTAGE_180) {
448 if (tegra_host->pinctrl_state_1v8_drv) {
449 pinctrl_drvupdn =
450 tegra_host->pinctrl_state_1v8_drv;
451 } else {
452 drvup = offsets->pull_up_1v8_timeout;
453 drvdn = offsets->pull_down_1v8_timeout;
454 }
455 } else {
456 if (tegra_host->pinctrl_state_3v3_drv) {
457 pinctrl_drvupdn =
458 tegra_host->pinctrl_state_3v3_drv;
459 } else {
460 drvup = offsets->pull_up_3v3_timeout;
461 drvdn = offsets->pull_down_3v3_timeout;
462 }
463 }
464
465 if (pinctrl_drvupdn != NULL) {
466 ret = pinctrl_select_state(tegra_host->pinctrl_sdmmc,
467 pinctrl_drvupdn);
468 if (ret < 0)
469 dev_err(mmc_dev(host->mmc),
470 "failed pads drvupdn, ret: %d\n", ret);
471 } else if ((drvup) || (drvdn)) {
472 reg = sdhci_readl(host,
473 SDHCI_TEGRA_SDMEM_COMP_PADCTRL);
474 reg &= ~SDHCI_COMP_PADCTRL_DRVUPDN_OFFSET_MASK;
475 reg |= (drvup << 20) | (drvdn << 12);
476 sdhci_writel(host, reg,
477 SDHCI_TEGRA_SDMEM_COMP_PADCTRL);
478 }
479
480 } else {
481 /* Dual Voltage PADS Voltage selection */
482 if (!tegra_host->pad_control_available)
483 return 0;
484
485 if (voltage == MMC_SIGNAL_VOLTAGE_180) {
486 ret = pinctrl_select_state(tegra_host->pinctrl_sdmmc,
487 tegra_host->pinctrl_state_1v8);
488 if (ret < 0)
489 dev_err(mmc_dev(host->mmc),
490 "setting 1.8V failed, ret: %d\n", ret);
491 } else {
492 ret = pinctrl_select_state(tegra_host->pinctrl_sdmmc,
493 tegra_host->pinctrl_state_3v3);
494 if (ret < 0)
495 dev_err(mmc_dev(host->mmc),
496 "setting 3.3V failed, ret: %d\n", ret);
497 }
498 }
499
500 return ret;
501}
502
503static void tegra_sdhci_pad_autocalib(struct sdhci_host *host)
504{
505 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
506 struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
507 struct sdhci_tegra_autocal_offsets offsets =
508 tegra_host->autocal_offsets;
509 struct mmc_ios *ios = &host->mmc->ios;
510 bool card_clk_enabled;
511 u16 pdpu;
512 u32 reg;
513 int ret;
514
515 switch (ios->timing) {
516 case MMC_TIMING_UHS_SDR104:
517 pdpu = offsets.pull_down_sdr104 << 8 | offsets.pull_up_sdr104;
518 break;
519 case MMC_TIMING_MMC_HS400:
520 pdpu = offsets.pull_down_hs400 << 8 | offsets.pull_up_hs400;
521 break;
522 default:
523 if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180)
524 pdpu = offsets.pull_down_1v8 << 8 | offsets.pull_up_1v8;
525 else
526 pdpu = offsets.pull_down_3v3 << 8 | offsets.pull_up_3v3;
527 }
528
529 /* Set initial offset before auto-calibration */
530 tegra_sdhci_set_pad_autocal_offset(host, pdpu);
531
532 card_clk_enabled = tegra_sdhci_configure_card_clk(host, false);
533
534 tegra_sdhci_configure_cal_pad(host, true);
535
536 reg = sdhci_readl(host, SDHCI_TEGRA_AUTO_CAL_CONFIG);
537 reg |= SDHCI_AUTO_CAL_ENABLE | SDHCI_AUTO_CAL_START;
538 sdhci_writel(host, reg, SDHCI_TEGRA_AUTO_CAL_CONFIG);
539
540 usleep_range(1, 2);
541 /* 10 ms timeout */
542 ret = readl_poll_timeout(host->ioaddr + SDHCI_TEGRA_AUTO_CAL_STATUS,
543 reg, !(reg & SDHCI_TEGRA_AUTO_CAL_ACTIVE),
544 1000, 10000);
545
546 tegra_sdhci_configure_cal_pad(host, false);
547
548 tegra_sdhci_configure_card_clk(host, card_clk_enabled);
549
550 if (ret) {
551 dev_err(mmc_dev(host->mmc), "Pad autocal timed out\n");
552
553 /* Disable automatic cal and use fixed Drive Strengths */
554 reg = sdhci_readl(host, SDHCI_TEGRA_AUTO_CAL_CONFIG);
555 reg &= ~SDHCI_AUTO_CAL_ENABLE;
556 sdhci_writel(host, reg, SDHCI_TEGRA_AUTO_CAL_CONFIG);
557
558 ret = tegra_sdhci_set_padctrl(host, ios->signal_voltage, false);
559 if (ret < 0)
560 dev_err(mmc_dev(host->mmc),
561 "Setting drive strengths failed: %d\n", ret);
562 }
563}
564
565static void tegra_sdhci_parse_pad_autocal_dt(struct sdhci_host *host)
566{
567 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
568 struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
569 struct sdhci_tegra_autocal_offsets *autocal =
570 &tegra_host->autocal_offsets;
571 int err;
572
573 err = device_property_read_u32(host->mmc->parent,
574 "nvidia,pad-autocal-pull-up-offset-3v3",
575 &autocal->pull_up_3v3);
576 if (err)
577 autocal->pull_up_3v3 = 0;
578
579 err = device_property_read_u32(host->mmc->parent,
580 "nvidia,pad-autocal-pull-down-offset-3v3",
581 &autocal->pull_down_3v3);
582 if (err)
583 autocal->pull_down_3v3 = 0;
584
585 err = device_property_read_u32(host->mmc->parent,
586 "nvidia,pad-autocal-pull-up-offset-1v8",
587 &autocal->pull_up_1v8);
588 if (err)
589 autocal->pull_up_1v8 = 0;
590
591 err = device_property_read_u32(host->mmc->parent,
592 "nvidia,pad-autocal-pull-down-offset-1v8",
593 &autocal->pull_down_1v8);
594 if (err)
595 autocal->pull_down_1v8 = 0;
596
597 err = device_property_read_u32(host->mmc->parent,
598 "nvidia,pad-autocal-pull-up-offset-3v3-timeout",
599 &autocal->pull_up_3v3_timeout);
600 if (err) {
601 if (!IS_ERR(tegra_host->pinctrl_state_3v3) &&
602 (tegra_host->pinctrl_state_3v3_drv == NULL))
603 pr_warn("%s: Missing autocal timeout 3v3-pad drvs\n",
604 mmc_hostname(host->mmc));
605 autocal->pull_up_3v3_timeout = 0;
606 }
607
608 err = device_property_read_u32(host->mmc->parent,
609 "nvidia,pad-autocal-pull-down-offset-3v3-timeout",
610 &autocal->pull_down_3v3_timeout);
611 if (err) {
612 if (!IS_ERR(tegra_host->pinctrl_state_3v3) &&
613 (tegra_host->pinctrl_state_3v3_drv == NULL))
614 pr_warn("%s: Missing autocal timeout 3v3-pad drvs\n",
615 mmc_hostname(host->mmc));
616 autocal->pull_down_3v3_timeout = 0;
617 }
618
619 err = device_property_read_u32(host->mmc->parent,
620 "nvidia,pad-autocal-pull-up-offset-1v8-timeout",
621 &autocal->pull_up_1v8_timeout);
622 if (err) {
623 if (!IS_ERR(tegra_host->pinctrl_state_1v8) &&
624 (tegra_host->pinctrl_state_1v8_drv == NULL))
625 pr_warn("%s: Missing autocal timeout 1v8-pad drvs\n",
626 mmc_hostname(host->mmc));
627 autocal->pull_up_1v8_timeout = 0;
628 }
629
630 err = device_property_read_u32(host->mmc->parent,
631 "nvidia,pad-autocal-pull-down-offset-1v8-timeout",
632 &autocal->pull_down_1v8_timeout);
633 if (err) {
634 if (!IS_ERR(tegra_host->pinctrl_state_1v8) &&
635 (tegra_host->pinctrl_state_1v8_drv == NULL))
636 pr_warn("%s: Missing autocal timeout 1v8-pad drvs\n",
637 mmc_hostname(host->mmc));
638 autocal->pull_down_1v8_timeout = 0;
639 }
640
641 err = device_property_read_u32(host->mmc->parent,
642 "nvidia,pad-autocal-pull-up-offset-sdr104",
643 &autocal->pull_up_sdr104);
644 if (err)
645 autocal->pull_up_sdr104 = autocal->pull_up_1v8;
646
647 err = device_property_read_u32(host->mmc->parent,
648 "nvidia,pad-autocal-pull-down-offset-sdr104",
649 &autocal->pull_down_sdr104);
650 if (err)
651 autocal->pull_down_sdr104 = autocal->pull_down_1v8;
652
653 err = device_property_read_u32(host->mmc->parent,
654 "nvidia,pad-autocal-pull-up-offset-hs400",
655 &autocal->pull_up_hs400);
656 if (err)
657 autocal->pull_up_hs400 = autocal->pull_up_1v8;
658
659 err = device_property_read_u32(host->mmc->parent,
660 "nvidia,pad-autocal-pull-down-offset-hs400",
661 &autocal->pull_down_hs400);
662 if (err)
663 autocal->pull_down_hs400 = autocal->pull_down_1v8;
664}
665
666static void tegra_sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
667{
668 struct sdhci_host *host = mmc_priv(mmc);
669 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
670 struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
671 ktime_t since_calib = ktime_sub(ktime_get(), tegra_host->last_calib);
672
673 /* 100 ms calibration interval is specified in the TRM */
674 if (ktime_to_ms(since_calib) > 100) {
675 tegra_sdhci_pad_autocalib(host);
676 tegra_host->last_calib = ktime_get();
677 }
678
679 sdhci_request(mmc, mrq);
680}
681
682static void tegra_sdhci_parse_tap_and_trim(struct sdhci_host *host)
683{
684 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
685 struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
686 int err;
687
688 err = device_property_read_u32(host->mmc->parent, "nvidia,default-tap",
689 &tegra_host->default_tap);
690 if (err)
691 tegra_host->default_tap = 0;
692
693 err = device_property_read_u32(host->mmc->parent, "nvidia,default-trim",
694 &tegra_host->default_trim);
695 if (err)
696 tegra_host->default_trim = 0;
697
698 err = device_property_read_u32(host->mmc->parent, "nvidia,dqs-trim",
699 &tegra_host->dqs_trim);
700 if (err)
701 tegra_host->dqs_trim = 0x11;
702}
703
704static void tegra_sdhci_parse_dt(struct sdhci_host *host)
705{
706 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
707 struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
708
709 if (device_property_read_bool(host->mmc->parent, "supports-cqe"))
710 tegra_host->enable_hwcq = true;
711 else
712 tegra_host->enable_hwcq = false;
713
714 tegra_sdhci_parse_pad_autocal_dt(host);
715 tegra_sdhci_parse_tap_and_trim(host);
716}
717
718static void tegra_sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
719{
720 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
721 struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
722 unsigned long host_clk;
723
724 if (!clock)
725 return sdhci_set_clock(host, clock);
726
727 /*
728 * In DDR50/52 modes the Tegra SDHCI controllers require the SDHCI
729 * divider to be configured to divided the host clock by two. The SDHCI
730 * clock divider is calculated as part of sdhci_set_clock() by
731 * sdhci_calc_clk(). The divider is calculated from host->max_clk and
732 * the requested clock rate.
733 *
734 * By setting the host->max_clk to clock * 2 the divider calculation
735 * will always result in the correct value for DDR50/52 modes,
736 * regardless of clock rate rounding, which may happen if the value
737 * from clk_get_rate() is used.
738 */
739 host_clk = tegra_host->ddr_signaling ? clock * 2 : clock;
740 clk_set_rate(pltfm_host->clk, host_clk);
741 tegra_host->curr_clk_rate = host_clk;
742 if (tegra_host->ddr_signaling)
743 host->max_clk = host_clk;
744 else
745 host->max_clk = clk_get_rate(pltfm_host->clk);
746
747 sdhci_set_clock(host, clock);
748
749 if (tegra_host->pad_calib_required) {
750 tegra_sdhci_pad_autocalib(host);
751 tegra_host->pad_calib_required = false;
752 }
753}
754
755static void tegra_sdhci_hs400_enhanced_strobe(struct mmc_host *mmc,
756 struct mmc_ios *ios)
757{
758 struct sdhci_host *host = mmc_priv(mmc);
759 u32 val;
760
761 val = sdhci_readl(host, SDHCI_TEGRA_VENDOR_SYS_SW_CTRL);
762
763 if (ios->enhanced_strobe) {
764 val |= SDHCI_TEGRA_SYS_SW_CTRL_ENHANCED_STROBE;
765 /*
766 * When CMD13 is sent from mmc_select_hs400es() after
767 * switching to HS400ES mode, the bus is operating at
768 * either MMC_HIGH_26_MAX_DTR or MMC_HIGH_52_MAX_DTR.
769 * To meet Tegra SDHCI requirement at HS400ES mode, force SDHCI
770 * interface clock to MMC_HS200_MAX_DTR (200 MHz) so that host
771 * controller CAR clock and the interface clock are rate matched.
772 */
773 tegra_sdhci_set_clock(host, MMC_HS200_MAX_DTR);
774 } else {
775 val &= ~SDHCI_TEGRA_SYS_SW_CTRL_ENHANCED_STROBE;
776 }
777
778 sdhci_writel(host, val, SDHCI_TEGRA_VENDOR_SYS_SW_CTRL);
779}
780
781static unsigned int tegra_sdhci_get_max_clock(struct sdhci_host *host)
782{
783 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
784
785 return clk_round_rate(pltfm_host->clk, UINT_MAX);
786}
787
788static void tegra_sdhci_set_dqs_trim(struct sdhci_host *host, u8 trim)
789{
790 u32 val;
791
792 val = sdhci_readl(host, SDHCI_TEGRA_VENDOR_CAP_OVERRIDES);
793 val &= ~SDHCI_TEGRA_CAP_OVERRIDES_DQS_TRIM_MASK;
794 val |= trim << SDHCI_TEGRA_CAP_OVERRIDES_DQS_TRIM_SHIFT;
795 sdhci_writel(host, val, SDHCI_TEGRA_VENDOR_CAP_OVERRIDES);
796}
797
798static void tegra_sdhci_hs400_dll_cal(struct sdhci_host *host)
799{
800 u32 reg;
801 int err;
802
803 reg = sdhci_readl(host, SDHCI_TEGRA_VENDOR_DLLCAL_CFG);
804 reg |= SDHCI_TEGRA_DLLCAL_CALIBRATE;
805 sdhci_writel(host, reg, SDHCI_TEGRA_VENDOR_DLLCAL_CFG);
806
807 /* 1 ms sleep, 5 ms timeout */
808 err = readl_poll_timeout(host->ioaddr + SDHCI_TEGRA_VENDOR_DLLCAL_STA,
809 reg, !(reg & SDHCI_TEGRA_DLLCAL_STA_ACTIVE),
810 1000, 5000);
811 if (err)
812 dev_err(mmc_dev(host->mmc),
813 "HS400 delay line calibration timed out\n");
814}
815
816static void tegra_sdhci_tap_correction(struct sdhci_host *host, u8 thd_up,
817 u8 thd_low, u8 fixed_tap)
818{
819 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
820 struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
821 u32 val, tun_status;
822 u8 word, bit, edge1, tap, window;
823 bool tap_result;
824 bool start_fail = false;
825 bool start_pass = false;
826 bool end_pass = false;
827 bool first_fail = false;
828 bool first_pass = false;
829 u8 start_pass_tap = 0;
830 u8 end_pass_tap = 0;
831 u8 first_fail_tap = 0;
832 u8 first_pass_tap = 0;
833 u8 total_tuning_words = host->tuning_loop_count / TUNING_WORD_BIT_SIZE;
834
835 /*
836 * Read auto-tuned results and extract good valid passing window by
837 * filtering out un-wanted bubble/partial/merged windows.
838 */
839 for (word = 0; word < total_tuning_words; word++) {
840 val = sdhci_readl(host, SDHCI_VNDR_TUN_CTRL0_0);
841 val &= ~SDHCI_VNDR_TUN_CTRL0_TUN_WORD_SEL_MASK;
842 val |= word;
843 sdhci_writel(host, val, SDHCI_VNDR_TUN_CTRL0_0);
844 tun_status = sdhci_readl(host, SDHCI_TEGRA_VNDR_TUN_STATUS0);
845 bit = 0;
846 while (bit < TUNING_WORD_BIT_SIZE) {
847 tap = word * TUNING_WORD_BIT_SIZE + bit;
848 tap_result = tun_status & (1 << bit);
849 if (!tap_result && !start_fail) {
850 start_fail = true;
851 if (!first_fail) {
852 first_fail_tap = tap;
853 first_fail = true;
854 }
855
856 } else if (tap_result && start_fail && !start_pass) {
857 start_pass_tap = tap;
858 start_pass = true;
859 if (!first_pass) {
860 first_pass_tap = tap;
861 first_pass = true;
862 }
863
864 } else if (!tap_result && start_fail && start_pass &&
865 !end_pass) {
866 end_pass_tap = tap - 1;
867 end_pass = true;
868 } else if (tap_result && start_pass && start_fail &&
869 end_pass) {
870 window = end_pass_tap - start_pass_tap;
871 /* discard merged window and bubble window */
872 if (window >= thd_up || window < thd_low) {
873 start_pass_tap = tap;
874 end_pass = false;
875 } else {
876 /* set tap at middle of valid window */
877 tap = start_pass_tap + window / 2;
878 tegra_host->tuned_tap_delay = tap;
879 return;
880 }
881 }
882
883 bit++;
884 }
885 }
886
887 if (!first_fail) {
888 WARN(1, "no edge detected, continue with hw tuned delay.\n");
889 } else if (first_pass) {
890 /* set tap location at fixed tap relative to the first edge */
891 edge1 = first_fail_tap + (first_pass_tap - first_fail_tap) / 2;
892 if (edge1 - 1 > fixed_tap)
893 tegra_host->tuned_tap_delay = edge1 - fixed_tap;
894 else
895 tegra_host->tuned_tap_delay = edge1 + fixed_tap;
896 }
897}
898
899static void tegra_sdhci_post_tuning(struct sdhci_host *host)
900{
901 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
902 struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
903 const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
904 u32 avg_tap_dly, val, min_tap_dly, max_tap_dly;
905 u8 fixed_tap, start_tap, end_tap, window_width;
906 u8 thdupper, thdlower;
907 u8 num_iter;
908 u32 clk_rate_mhz, period_ps, bestcase, worstcase;
909
910 /* retain HW tuned tap to use incase if no correction is needed */
911 val = sdhci_readl(host, SDHCI_TEGRA_VENDOR_CLOCK_CTRL);
912 tegra_host->tuned_tap_delay = (val & SDHCI_CLOCK_CTRL_TAP_MASK) >>
913 SDHCI_CLOCK_CTRL_TAP_SHIFT;
914 if (soc_data->min_tap_delay && soc_data->max_tap_delay) {
915 min_tap_dly = soc_data->min_tap_delay;
916 max_tap_dly = soc_data->max_tap_delay;
917 clk_rate_mhz = tegra_host->curr_clk_rate / USEC_PER_SEC;
918 period_ps = USEC_PER_SEC / clk_rate_mhz;
919 bestcase = period_ps / min_tap_dly;
920 worstcase = period_ps / max_tap_dly;
921 /*
922 * Upper and Lower bound thresholds used to detect merged and
923 * bubble windows
924 */
925 thdupper = (2 * worstcase + bestcase) / 2;
926 thdlower = worstcase / 4;
927 /*
928 * fixed tap is used when HW tuning result contains single edge
929 * and tap is set at fixed tap delay relative to the first edge
930 */
931 avg_tap_dly = (period_ps * 2) / (min_tap_dly + max_tap_dly);
932 fixed_tap = avg_tap_dly / 2;
933
934 val = sdhci_readl(host, SDHCI_TEGRA_VNDR_TUN_STATUS1);
935 start_tap = val & SDHCI_TEGRA_VNDR_TUN_STATUS1_TAP_MASK;
936 end_tap = (val >> SDHCI_TEGRA_VNDR_TUN_STATUS1_END_TAP_SHIFT) &
937 SDHCI_TEGRA_VNDR_TUN_STATUS1_TAP_MASK;
938 window_width = end_tap - start_tap;
939 num_iter = host->tuning_loop_count;
940 /*
941 * partial window includes edges of the tuning range.
942 * merged window includes more taps so window width is higher
943 * than upper threshold.
944 */
945 if (start_tap == 0 || (end_tap == (num_iter - 1)) ||
946 (end_tap == num_iter - 2) || window_width >= thdupper) {
947 pr_debug("%s: Apply tuning correction\n",
948 mmc_hostname(host->mmc));
949 tegra_sdhci_tap_correction(host, thdupper, thdlower,
950 fixed_tap);
951 }
952 }
953
954 tegra_sdhci_set_tap(host, tegra_host->tuned_tap_delay);
955}
956
957static int tegra_sdhci_execute_hw_tuning(struct mmc_host *mmc, u32 opcode)
958{
959 struct sdhci_host *host = mmc_priv(mmc);
960 int err;
961
962 err = sdhci_execute_tuning(mmc, opcode);
963 if (!err && !host->tuning_err)
964 tegra_sdhci_post_tuning(host);
965
966 return err;
967}
968
969static void tegra_sdhci_set_uhs_signaling(struct sdhci_host *host,
970 unsigned timing)
971{
972 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
973 struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
974 bool set_default_tap = false;
975 bool set_dqs_trim = false;
976 bool do_hs400_dll_cal = false;
977 u8 iter = TRIES_256;
978 u32 val;
979
980 tegra_host->ddr_signaling = false;
981 switch (timing) {
982 case MMC_TIMING_UHS_SDR50:
983 break;
984 case MMC_TIMING_UHS_SDR104:
985 case MMC_TIMING_MMC_HS200:
986 /* Don't set default tap on tunable modes. */
987 iter = TRIES_128;
988 break;
989 case MMC_TIMING_MMC_HS400:
990 set_dqs_trim = true;
991 do_hs400_dll_cal = true;
992 iter = TRIES_128;
993 break;
994 case MMC_TIMING_MMC_DDR52:
995 case MMC_TIMING_UHS_DDR50:
996 tegra_host->ddr_signaling = true;
997 set_default_tap = true;
998 break;
999 default:
1000 set_default_tap = true;
1001 break;
1002 }
1003
1004 val = sdhci_readl(host, SDHCI_VNDR_TUN_CTRL0_0);
1005 val &= ~(SDHCI_VNDR_TUN_CTRL0_TUN_ITER_MASK |
1006 SDHCI_VNDR_TUN_CTRL0_START_TAP_VAL_MASK |
1007 SDHCI_VNDR_TUN_CTRL0_MUL_M_MASK);
1008 val |= (iter << SDHCI_VNDR_TUN_CTRL0_TUN_ITER_SHIFT |
1009 0 << SDHCI_VNDR_TUN_CTRL0_START_TAP_VAL_SHIFT |
1010 1 << SDHCI_VNDR_TUN_CTRL0_MUL_M_SHIFT);
1011 sdhci_writel(host, val, SDHCI_VNDR_TUN_CTRL0_0);
1012 sdhci_writel(host, 0, SDHCI_TEGRA_VNDR_TUN_CTRL1_0);
1013
1014 host->tuning_loop_count = (iter == TRIES_128) ? 128 : 256;
1015
1016 sdhci_set_uhs_signaling(host, timing);
1017
1018 tegra_sdhci_pad_autocalib(host);
1019
1020 if (tegra_host->tuned_tap_delay && !set_default_tap)
1021 tegra_sdhci_set_tap(host, tegra_host->tuned_tap_delay);
1022 else
1023 tegra_sdhci_set_tap(host, tegra_host->default_tap);
1024
1025 if (set_dqs_trim)
1026 tegra_sdhci_set_dqs_trim(host, tegra_host->dqs_trim);
1027
1028 if (do_hs400_dll_cal)
1029 tegra_sdhci_hs400_dll_cal(host);
1030}
1031
1032static int tegra_sdhci_execute_tuning(struct sdhci_host *host, u32 opcode)
1033{
1034 unsigned int min, max;
1035
1036 /*
1037 * Start search for minimum tap value at 10, as smaller values are
1038 * may wrongly be reported as working but fail at higher speeds,
1039 * according to the TRM.
1040 */
1041 min = 10;
1042 while (min < 255) {
1043 tegra_sdhci_set_tap(host, min);
1044 if (!mmc_send_tuning(host->mmc, opcode, NULL))
1045 break;
1046 min++;
1047 }
1048
1049 /* Find the maximum tap value that still passes. */
1050 max = min + 1;
1051 while (max < 255) {
1052 tegra_sdhci_set_tap(host, max);
1053 if (mmc_send_tuning(host->mmc, opcode, NULL)) {
1054 max--;
1055 break;
1056 }
1057 max++;
1058 }
1059
1060 /* The TRM states the ideal tap value is at 75% in the passing range. */
1061 tegra_sdhci_set_tap(host, min + ((max - min) * 3 / 4));
1062
1063 return mmc_send_tuning(host->mmc, opcode, NULL);
1064}
1065
1066static int sdhci_tegra_start_signal_voltage_switch(struct mmc_host *mmc,
1067 struct mmc_ios *ios)
1068{
1069 struct sdhci_host *host = mmc_priv(mmc);
1070 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1071 struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
1072 int ret = 0;
1073
1074 if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330) {
1075 ret = tegra_sdhci_set_padctrl(host, ios->signal_voltage, true);
1076 if (ret < 0)
1077 return ret;
1078 ret = sdhci_start_signal_voltage_switch(mmc, ios);
1079 } else if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180) {
1080 ret = sdhci_start_signal_voltage_switch(mmc, ios);
1081 if (ret < 0)
1082 return ret;
1083 ret = tegra_sdhci_set_padctrl(host, ios->signal_voltage, true);
1084 }
1085
1086 if (tegra_host->pad_calib_required)
1087 tegra_sdhci_pad_autocalib(host);
1088
1089 return ret;
1090}
1091
1092static int tegra_sdhci_init_pinctrl_info(struct device *dev,
1093 struct sdhci_tegra *tegra_host)
1094{
1095 tegra_host->pinctrl_sdmmc = devm_pinctrl_get(dev);
1096 if (IS_ERR(tegra_host->pinctrl_sdmmc)) {
1097 dev_dbg(dev, "No pinctrl info, err: %ld\n",
1098 PTR_ERR(tegra_host->pinctrl_sdmmc));
1099 return -1;
1100 }
1101
1102 tegra_host->pinctrl_state_1v8_drv = pinctrl_lookup_state(
1103 tegra_host->pinctrl_sdmmc, "sdmmc-1v8-drv");
1104 if (IS_ERR(tegra_host->pinctrl_state_1v8_drv)) {
1105 if (PTR_ERR(tegra_host->pinctrl_state_1v8_drv) == -ENODEV)
1106 tegra_host->pinctrl_state_1v8_drv = NULL;
1107 }
1108
1109 tegra_host->pinctrl_state_3v3_drv = pinctrl_lookup_state(
1110 tegra_host->pinctrl_sdmmc, "sdmmc-3v3-drv");
1111 if (IS_ERR(tegra_host->pinctrl_state_3v3_drv)) {
1112 if (PTR_ERR(tegra_host->pinctrl_state_3v3_drv) == -ENODEV)
1113 tegra_host->pinctrl_state_3v3_drv = NULL;
1114 }
1115
1116 tegra_host->pinctrl_state_3v3 =
1117 pinctrl_lookup_state(tegra_host->pinctrl_sdmmc, "sdmmc-3v3");
1118 if (IS_ERR(tegra_host->pinctrl_state_3v3)) {
1119 dev_warn(dev, "Missing 3.3V pad state, err: %ld\n",
1120 PTR_ERR(tegra_host->pinctrl_state_3v3));
1121 return -1;
1122 }
1123
1124 tegra_host->pinctrl_state_1v8 =
1125 pinctrl_lookup_state(tegra_host->pinctrl_sdmmc, "sdmmc-1v8");
1126 if (IS_ERR(tegra_host->pinctrl_state_1v8)) {
1127 dev_warn(dev, "Missing 1.8V pad state, err: %ld\n",
1128 PTR_ERR(tegra_host->pinctrl_state_1v8));
1129 return -1;
1130 }
1131
1132 tegra_host->pad_control_available = true;
1133
1134 return 0;
1135}
1136
1137static void tegra_sdhci_voltage_switch(struct sdhci_host *host)
1138{
1139 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1140 struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
1141 const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
1142
1143 if (soc_data->nvquirks & NVQUIRK_HAS_PADCALIB)
1144 tegra_host->pad_calib_required = true;
1145}
1146
1147static void tegra_cqhci_writel(struct cqhci_host *cq_host, u32 val, int reg)
1148{
1149 struct mmc_host *mmc = cq_host->mmc;
1150 u8 ctrl;
1151 ktime_t timeout;
1152 bool timed_out;
1153
1154 /*
1155 * During CQE resume/unhalt, CQHCI driver unhalts CQE prior to
1156 * cqhci_host_ops enable where SDHCI DMA and BLOCK_SIZE registers need
1157 * to be re-configured.
1158 * Tegra CQHCI/SDHCI prevents write access to block size register when
1159 * CQE is unhalted. So handling CQE resume sequence here to configure
1160 * SDHCI block registers prior to exiting CQE halt state.
1161 */
1162 if (reg == CQHCI_CTL && !(val & CQHCI_HALT) &&
1163 cqhci_readl(cq_host, CQHCI_CTL) & CQHCI_HALT) {
1164 sdhci_cqe_enable(mmc);
1165 writel(val, cq_host->mmio + reg);
1166 timeout = ktime_add_us(ktime_get(), 50);
1167 while (1) {
1168 timed_out = ktime_compare(ktime_get(), timeout) > 0;
1169 ctrl = cqhci_readl(cq_host, CQHCI_CTL);
1170 if (!(ctrl & CQHCI_HALT) || timed_out)
1171 break;
1172 }
1173 /*
1174 * CQE usually resumes very quick, but incase if Tegra CQE
1175 * doesn't resume retry unhalt.
1176 */
1177 if (timed_out)
1178 writel(val, cq_host->mmio + reg);
1179 } else {
1180 writel(val, cq_host->mmio + reg);
1181 }
1182}
1183
1184static void sdhci_tegra_update_dcmd_desc(struct mmc_host *mmc,
1185 struct mmc_request *mrq, u64 *data)
1186{
1187 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(mmc_priv(mmc));
1188 struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
1189 const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
1190
1191 if (soc_data->nvquirks & NVQUIRK_CQHCI_DCMD_R1B_CMD_TIMING &&
1192 mrq->cmd->flags & MMC_RSP_R1B)
1193 *data |= CQHCI_CMD_TIMING(1);
1194}
1195
1196static void sdhci_tegra_cqe_enable(struct mmc_host *mmc)
1197{
1198 struct cqhci_host *cq_host = mmc->cqe_private;
1199 u32 val;
1200
1201 /*
1202 * Tegra CQHCI/SDMMC design prevents write access to sdhci block size
1203 * register when CQE is enabled and unhalted.
1204 * CQHCI driver enables CQE prior to activation, so disable CQE before
1205 * programming block size in sdhci controller and enable it back.
1206 */
1207 if (!cq_host->activated) {
1208 val = cqhci_readl(cq_host, CQHCI_CFG);
1209 if (val & CQHCI_ENABLE)
1210 cqhci_writel(cq_host, (val & ~CQHCI_ENABLE),
1211 CQHCI_CFG);
1212 sdhci_cqe_enable(mmc);
1213 if (val & CQHCI_ENABLE)
1214 cqhci_writel(cq_host, val, CQHCI_CFG);
1215 }
1216
1217 /*
1218 * CMD CRC errors are seen sometimes with some eMMC devices when status
1219 * command is sent during transfer of last data block which is the
1220 * default case as send status command block counter (CBC) is 1.
1221 * Recommended fix to set CBC to 0 allowing send status command only
1222 * when data lines are idle.
1223 */
1224 val = cqhci_readl(cq_host, CQHCI_SSC1);
1225 val &= ~CQHCI_SSC1_CBC_MASK;
1226 cqhci_writel(cq_host, val, CQHCI_SSC1);
1227}
1228
1229static void sdhci_tegra_dumpregs(struct mmc_host *mmc)
1230{
1231 sdhci_dumpregs(mmc_priv(mmc));
1232}
1233
1234static u32 sdhci_tegra_cqhci_irq(struct sdhci_host *host, u32 intmask)
1235{
1236 int cmd_error = 0;
1237 int data_error = 0;
1238
1239 if (!sdhci_cqe_irq(host, intmask, &cmd_error, &data_error))
1240 return intmask;
1241
1242 cqhci_irq(host->mmc, intmask, cmd_error, data_error);
1243
1244 return 0;
1245}
1246
1247static const struct cqhci_host_ops sdhci_tegra_cqhci_ops = {
1248 .write_l = tegra_cqhci_writel,
1249 .enable = sdhci_tegra_cqe_enable,
1250 .disable = sdhci_cqe_disable,
1251 .dumpregs = sdhci_tegra_dumpregs,
1252 .update_dcmd_desc = sdhci_tegra_update_dcmd_desc,
1253};
1254
1255static int tegra_sdhci_set_dma_mask(struct sdhci_host *host)
1256{
1257 struct sdhci_pltfm_host *platform = sdhci_priv(host);
1258 struct sdhci_tegra *tegra = sdhci_pltfm_priv(platform);
1259 const struct sdhci_tegra_soc_data *soc = tegra->soc_data;
1260 struct device *dev = mmc_dev(host->mmc);
1261
1262 if (soc->dma_mask)
1263 return dma_set_mask_and_coherent(dev, soc->dma_mask);
1264
1265 return 0;
1266}
1267
1268static const struct sdhci_ops tegra_sdhci_ops = {
1269 .get_ro = tegra_sdhci_get_ro,
1270 .read_w = tegra_sdhci_readw,
1271 .write_l = tegra_sdhci_writel,
1272 .set_clock = tegra_sdhci_set_clock,
1273 .set_dma_mask = tegra_sdhci_set_dma_mask,
1274 .set_bus_width = sdhci_set_bus_width,
1275 .reset = tegra_sdhci_reset,
1276 .platform_execute_tuning = tegra_sdhci_execute_tuning,
1277 .set_uhs_signaling = tegra_sdhci_set_uhs_signaling,
1278 .voltage_switch = tegra_sdhci_voltage_switch,
1279 .get_max_clock = tegra_sdhci_get_max_clock,
1280};
1281
1282static const struct sdhci_pltfm_data sdhci_tegra20_pdata = {
1283 .quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
1284 SDHCI_QUIRK_SINGLE_POWER_WRITE |
1285 SDHCI_QUIRK_NO_HISPD_BIT |
1286 SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC |
1287 SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
1288 .ops = &tegra_sdhci_ops,
1289};
1290
1291static const struct sdhci_tegra_soc_data soc_data_tegra20 = {
1292 .pdata = &sdhci_tegra20_pdata,
1293 .dma_mask = DMA_BIT_MASK(32),
1294 .nvquirks = NVQUIRK_FORCE_SDHCI_SPEC_200 |
1295 NVQUIRK_ENABLE_BLOCK_GAP_DET,
1296};
1297
1298static const struct sdhci_pltfm_data sdhci_tegra30_pdata = {
1299 .quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
1300 SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
1301 SDHCI_QUIRK_SINGLE_POWER_WRITE |
1302 SDHCI_QUIRK_NO_HISPD_BIT |
1303 SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
1304 .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
1305 SDHCI_QUIRK2_BROKEN_HS200 |
1306 /*
1307 * Auto-CMD23 leads to "Got command interrupt 0x00010000 even
1308 * though no command operation was in progress."
1309 *
1310 * The exact reason is unknown, as the same hardware seems
1311 * to support Auto CMD23 on a downstream 3.1 kernel.
1312 */
1313 SDHCI_QUIRK2_ACMD23_BROKEN,
1314 .ops = &tegra_sdhci_ops,
1315};
1316
1317static const struct sdhci_tegra_soc_data soc_data_tegra30 = {
1318 .pdata = &sdhci_tegra30_pdata,
1319 .dma_mask = DMA_BIT_MASK(32),
1320 .nvquirks = NVQUIRK_ENABLE_SDHCI_SPEC_300 |
1321 NVQUIRK_ENABLE_SDR50 |
1322 NVQUIRK_ENABLE_SDR104 |
1323 NVQUIRK_HAS_PADCALIB,
1324};
1325
1326static const struct sdhci_ops tegra114_sdhci_ops = {
1327 .get_ro = tegra_sdhci_get_ro,
1328 .read_w = tegra_sdhci_readw,
1329 .write_w = tegra_sdhci_writew,
1330 .write_l = tegra_sdhci_writel,
1331 .set_clock = tegra_sdhci_set_clock,
1332 .set_dma_mask = tegra_sdhci_set_dma_mask,
1333 .set_bus_width = sdhci_set_bus_width,
1334 .reset = tegra_sdhci_reset,
1335 .platform_execute_tuning = tegra_sdhci_execute_tuning,
1336 .set_uhs_signaling = tegra_sdhci_set_uhs_signaling,
1337 .voltage_switch = tegra_sdhci_voltage_switch,
1338 .get_max_clock = tegra_sdhci_get_max_clock,
1339};
1340
1341static const struct sdhci_pltfm_data sdhci_tegra114_pdata = {
1342 .quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
1343 SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
1344 SDHCI_QUIRK_SINGLE_POWER_WRITE |
1345 SDHCI_QUIRK_NO_HISPD_BIT |
1346 SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC |
1347 SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
1348 .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
1349 .ops = &tegra114_sdhci_ops,
1350};
1351
1352static const struct sdhci_tegra_soc_data soc_data_tegra114 = {
1353 .pdata = &sdhci_tegra114_pdata,
1354 .dma_mask = DMA_BIT_MASK(32),
1355};
1356
1357static const struct sdhci_pltfm_data sdhci_tegra124_pdata = {
1358 .quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
1359 SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
1360 SDHCI_QUIRK_SINGLE_POWER_WRITE |
1361 SDHCI_QUIRK_NO_HISPD_BIT |
1362 SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC |
1363 SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
1364 .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
1365 .ops = &tegra114_sdhci_ops,
1366};
1367
1368static const struct sdhci_tegra_soc_data soc_data_tegra124 = {
1369 .pdata = &sdhci_tegra124_pdata,
1370 .dma_mask = DMA_BIT_MASK(34),
1371};
1372
1373static const struct sdhci_ops tegra210_sdhci_ops = {
1374 .get_ro = tegra_sdhci_get_ro,
1375 .read_w = tegra_sdhci_readw,
1376 .write_w = tegra210_sdhci_writew,
1377 .write_l = tegra_sdhci_writel,
1378 .set_clock = tegra_sdhci_set_clock,
1379 .set_dma_mask = tegra_sdhci_set_dma_mask,
1380 .set_bus_width = sdhci_set_bus_width,
1381 .reset = tegra_sdhci_reset,
1382 .set_uhs_signaling = tegra_sdhci_set_uhs_signaling,
1383 .voltage_switch = tegra_sdhci_voltage_switch,
1384 .get_max_clock = tegra_sdhci_get_max_clock,
1385};
1386
1387static const struct sdhci_pltfm_data sdhci_tegra210_pdata = {
1388 .quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
1389 SDHCI_QUIRK_SINGLE_POWER_WRITE |
1390 SDHCI_QUIRK_NO_HISPD_BIT |
1391 SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC |
1392 SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
1393 .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
1394 .ops = &tegra210_sdhci_ops,
1395};
1396
1397static const struct sdhci_tegra_soc_data soc_data_tegra210 = {
1398 .pdata = &sdhci_tegra210_pdata,
1399 .dma_mask = DMA_BIT_MASK(34),
1400 .nvquirks = NVQUIRK_NEEDS_PAD_CONTROL |
1401 NVQUIRK_HAS_PADCALIB |
1402 NVQUIRK_DIS_CARD_CLK_CONFIG_TAP |
1403 NVQUIRK_ENABLE_SDR50 |
1404 NVQUIRK_ENABLE_SDR104 |
1405 NVQUIRK_HAS_TMCLK,
1406 .min_tap_delay = 106,
1407 .max_tap_delay = 185,
1408};
1409
1410static const struct sdhci_ops tegra186_sdhci_ops = {
1411 .get_ro = tegra_sdhci_get_ro,
1412 .read_w = tegra_sdhci_readw,
1413 .write_l = tegra_sdhci_writel,
1414 .set_clock = tegra_sdhci_set_clock,
1415 .set_dma_mask = tegra_sdhci_set_dma_mask,
1416 .set_bus_width = sdhci_set_bus_width,
1417 .reset = tegra_sdhci_reset,
1418 .set_uhs_signaling = tegra_sdhci_set_uhs_signaling,
1419 .voltage_switch = tegra_sdhci_voltage_switch,
1420 .get_max_clock = tegra_sdhci_get_max_clock,
1421 .irq = sdhci_tegra_cqhci_irq,
1422};
1423
1424static const struct sdhci_pltfm_data sdhci_tegra186_pdata = {
1425 .quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
1426 SDHCI_QUIRK_SINGLE_POWER_WRITE |
1427 SDHCI_QUIRK_NO_HISPD_BIT |
1428 SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC |
1429 SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
1430 .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
1431 .ops = &tegra186_sdhci_ops,
1432};
1433
1434static const struct sdhci_tegra_soc_data soc_data_tegra186 = {
1435 .pdata = &sdhci_tegra186_pdata,
1436 .dma_mask = DMA_BIT_MASK(40),
1437 .nvquirks = NVQUIRK_NEEDS_PAD_CONTROL |
1438 NVQUIRK_HAS_PADCALIB |
1439 NVQUIRK_DIS_CARD_CLK_CONFIG_TAP |
1440 NVQUIRK_ENABLE_SDR50 |
1441 NVQUIRK_ENABLE_SDR104 |
1442 NVQUIRK_HAS_TMCLK |
1443 NVQUIRK_CQHCI_DCMD_R1B_CMD_TIMING,
1444 .min_tap_delay = 84,
1445 .max_tap_delay = 136,
1446};
1447
1448static const struct sdhci_tegra_soc_data soc_data_tegra194 = {
1449 .pdata = &sdhci_tegra186_pdata,
1450 .dma_mask = DMA_BIT_MASK(39),
1451 .nvquirks = NVQUIRK_NEEDS_PAD_CONTROL |
1452 NVQUIRK_HAS_PADCALIB |
1453 NVQUIRK_DIS_CARD_CLK_CONFIG_TAP |
1454 NVQUIRK_ENABLE_SDR50 |
1455 NVQUIRK_ENABLE_SDR104 |
1456 NVQUIRK_HAS_TMCLK,
1457 .min_tap_delay = 96,
1458 .max_tap_delay = 139,
1459};
1460
1461static const struct of_device_id sdhci_tegra_dt_match[] = {
1462 { .compatible = "nvidia,tegra194-sdhci", .data = &soc_data_tegra194 },
1463 { .compatible = "nvidia,tegra186-sdhci", .data = &soc_data_tegra186 },
1464 { .compatible = "nvidia,tegra210-sdhci", .data = &soc_data_tegra210 },
1465 { .compatible = "nvidia,tegra124-sdhci", .data = &soc_data_tegra124 },
1466 { .compatible = "nvidia,tegra114-sdhci", .data = &soc_data_tegra114 },
1467 { .compatible = "nvidia,tegra30-sdhci", .data = &soc_data_tegra30 },
1468 { .compatible = "nvidia,tegra20-sdhci", .data = &soc_data_tegra20 },
1469 {}
1470};
1471MODULE_DEVICE_TABLE(of, sdhci_tegra_dt_match);
1472
1473static int sdhci_tegra_add_host(struct sdhci_host *host)
1474{
1475 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1476 struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
1477 struct cqhci_host *cq_host;
1478 bool dma64;
1479 int ret;
1480
1481 if (!tegra_host->enable_hwcq)
1482 return sdhci_add_host(host);
1483
1484 sdhci_enable_v4_mode(host);
1485
1486 ret = sdhci_setup_host(host);
1487 if (ret)
1488 return ret;
1489
1490 host->mmc->caps2 |= MMC_CAP2_CQE | MMC_CAP2_CQE_DCMD;
1491
1492 cq_host = devm_kzalloc(host->mmc->parent,
1493 sizeof(*cq_host), GFP_KERNEL);
1494 if (!cq_host) {
1495 ret = -ENOMEM;
1496 goto cleanup;
1497 }
1498
1499 cq_host->mmio = host->ioaddr + SDHCI_TEGRA_CQE_BASE_ADDR;
1500 cq_host->ops = &sdhci_tegra_cqhci_ops;
1501
1502 dma64 = host->flags & SDHCI_USE_64_BIT_DMA;
1503 if (dma64)
1504 cq_host->caps |= CQHCI_TASK_DESC_SZ_128;
1505
1506 ret = cqhci_init(cq_host, host->mmc, dma64);
1507 if (ret)
1508 goto cleanup;
1509
1510 ret = __sdhci_add_host(host);
1511 if (ret)
1512 goto cleanup;
1513
1514 return 0;
1515
1516cleanup:
1517 sdhci_cleanup_host(host);
1518 return ret;
1519}
1520
1521static int sdhci_tegra_probe(struct platform_device *pdev)
1522{
1523 const struct of_device_id *match;
1524 const struct sdhci_tegra_soc_data *soc_data;
1525 struct sdhci_host *host;
1526 struct sdhci_pltfm_host *pltfm_host;
1527 struct sdhci_tegra *tegra_host;
1528 struct clk *clk;
1529 int rc;
1530
1531 match = of_match_device(sdhci_tegra_dt_match, &pdev->dev);
1532 if (!match)
1533 return -EINVAL;
1534 soc_data = match->data;
1535
1536 host = sdhci_pltfm_init(pdev, soc_data->pdata, sizeof(*tegra_host));
1537 if (IS_ERR(host))
1538 return PTR_ERR(host);
1539 pltfm_host = sdhci_priv(host);
1540
1541 tegra_host = sdhci_pltfm_priv(pltfm_host);
1542 tegra_host->ddr_signaling = false;
1543 tegra_host->pad_calib_required = false;
1544 tegra_host->pad_control_available = false;
1545 tegra_host->soc_data = soc_data;
1546
1547 if (soc_data->nvquirks & NVQUIRK_NEEDS_PAD_CONTROL) {
1548 rc = tegra_sdhci_init_pinctrl_info(&pdev->dev, tegra_host);
1549 if (rc == 0)
1550 host->mmc_host_ops.start_signal_voltage_switch =
1551 sdhci_tegra_start_signal_voltage_switch;
1552 }
1553
1554 /* Hook to periodically rerun pad calibration */
1555 if (soc_data->nvquirks & NVQUIRK_HAS_PADCALIB)
1556 host->mmc_host_ops.request = tegra_sdhci_request;
1557
1558 host->mmc_host_ops.hs400_enhanced_strobe =
1559 tegra_sdhci_hs400_enhanced_strobe;
1560
1561 if (!host->ops->platform_execute_tuning)
1562 host->mmc_host_ops.execute_tuning =
1563 tegra_sdhci_execute_hw_tuning;
1564
1565 rc = mmc_of_parse(host->mmc);
1566 if (rc)
1567 goto err_parse_dt;
1568
1569 if (tegra_host->soc_data->nvquirks & NVQUIRK_ENABLE_DDR50)
1570 host->mmc->caps |= MMC_CAP_1_8V_DDR;
1571
1572 /* R1B responses is required to properly manage HW busy detection. */
1573 host->mmc->caps |= MMC_CAP_NEED_RSP_BUSY;
1574
1575 tegra_sdhci_parse_dt(host);
1576
1577 tegra_host->power_gpio = devm_gpiod_get_optional(&pdev->dev, "power",
1578 GPIOD_OUT_HIGH);
1579 if (IS_ERR(tegra_host->power_gpio)) {
1580 rc = PTR_ERR(tegra_host->power_gpio);
1581 goto err_power_req;
1582 }
1583
1584 /*
1585 * Tegra210 has a separate SDMMC_LEGACY_TM clock used for host
1586 * timeout clock and SW can choose TMCLK or SDCLK for hardware
1587 * data timeout through the bit USE_TMCLK_FOR_DATA_TIMEOUT of
1588 * the register SDHCI_TEGRA_VENDOR_SYS_SW_CTRL.
1589 *
1590 * USE_TMCLK_FOR_DATA_TIMEOUT bit default is set to 1 and SDMMC uses
1591 * 12Mhz TMCLK which is advertised in host capability register.
1592 * With TMCLK of 12Mhz provides maximum data timeout period that can
1593 * be achieved is 11s better than using SDCLK for data timeout.
1594 *
1595 * So, TMCLK is set to 12Mhz and kept enabled all the time on SoC's
1596 * supporting separate TMCLK.
1597 */
1598
1599 if (soc_data->nvquirks & NVQUIRK_HAS_TMCLK) {
1600 clk = devm_clk_get(&pdev->dev, "tmclk");
1601 if (IS_ERR(clk)) {
1602 rc = PTR_ERR(clk);
1603 if (rc == -EPROBE_DEFER)
1604 goto err_power_req;
1605
1606 dev_warn(&pdev->dev, "failed to get tmclk: %d\n", rc);
1607 clk = NULL;
1608 }
1609
1610 clk_set_rate(clk, 12000000);
1611 rc = clk_prepare_enable(clk);
1612 if (rc) {
1613 dev_err(&pdev->dev,
1614 "failed to enable tmclk: %d\n", rc);
1615 goto err_power_req;
1616 }
1617
1618 tegra_host->tmclk = clk;
1619 }
1620
1621 clk = devm_clk_get(mmc_dev(host->mmc), NULL);
1622 if (IS_ERR(clk)) {
1623 rc = PTR_ERR(clk);
1624
1625 if (rc != -EPROBE_DEFER)
1626 dev_err(&pdev->dev, "failed to get clock: %d\n", rc);
1627
1628 goto err_clk_get;
1629 }
1630 clk_prepare_enable(clk);
1631 pltfm_host->clk = clk;
1632
1633 tegra_host->rst = devm_reset_control_get_exclusive(&pdev->dev,
1634 "sdhci");
1635 if (IS_ERR(tegra_host->rst)) {
1636 rc = PTR_ERR(tegra_host->rst);
1637 dev_err(&pdev->dev, "failed to get reset control: %d\n", rc);
1638 goto err_rst_get;
1639 }
1640
1641 rc = reset_control_assert(tegra_host->rst);
1642 if (rc)
1643 goto err_rst_get;
1644
1645 usleep_range(2000, 4000);
1646
1647 rc = reset_control_deassert(tegra_host->rst);
1648 if (rc)
1649 goto err_rst_get;
1650
1651 usleep_range(2000, 4000);
1652
1653 rc = sdhci_tegra_add_host(host);
1654 if (rc)
1655 goto err_add_host;
1656
1657 return 0;
1658
1659err_add_host:
1660 reset_control_assert(tegra_host->rst);
1661err_rst_get:
1662 clk_disable_unprepare(pltfm_host->clk);
1663err_clk_get:
1664 clk_disable_unprepare(tegra_host->tmclk);
1665err_power_req:
1666err_parse_dt:
1667 sdhci_pltfm_free(pdev);
1668 return rc;
1669}
1670
1671static int sdhci_tegra_remove(struct platform_device *pdev)
1672{
1673 struct sdhci_host *host = platform_get_drvdata(pdev);
1674 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1675 struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
1676
1677 sdhci_remove_host(host, 0);
1678
1679 reset_control_assert(tegra_host->rst);
1680 usleep_range(2000, 4000);
1681 clk_disable_unprepare(pltfm_host->clk);
1682 clk_disable_unprepare(tegra_host->tmclk);
1683
1684 sdhci_pltfm_free(pdev);
1685
1686 return 0;
1687}
1688
1689#ifdef CONFIG_PM_SLEEP
1690static int __maybe_unused sdhci_tegra_suspend(struct device *dev)
1691{
1692 struct sdhci_host *host = dev_get_drvdata(dev);
1693 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1694 int ret;
1695
1696 if (host->mmc->caps2 & MMC_CAP2_CQE) {
1697 ret = cqhci_suspend(host->mmc);
1698 if (ret)
1699 return ret;
1700 }
1701
1702 ret = sdhci_suspend_host(host);
1703 if (ret) {
1704 cqhci_resume(host->mmc);
1705 return ret;
1706 }
1707
1708 clk_disable_unprepare(pltfm_host->clk);
1709 return 0;
1710}
1711
1712static int __maybe_unused sdhci_tegra_resume(struct device *dev)
1713{
1714 struct sdhci_host *host = dev_get_drvdata(dev);
1715 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1716 int ret;
1717
1718 ret = clk_prepare_enable(pltfm_host->clk);
1719 if (ret)
1720 return ret;
1721
1722 ret = sdhci_resume_host(host);
1723 if (ret)
1724 goto disable_clk;
1725
1726 if (host->mmc->caps2 & MMC_CAP2_CQE) {
1727 ret = cqhci_resume(host->mmc);
1728 if (ret)
1729 goto suspend_host;
1730 }
1731
1732 return 0;
1733
1734suspend_host:
1735 sdhci_suspend_host(host);
1736disable_clk:
1737 clk_disable_unprepare(pltfm_host->clk);
1738 return ret;
1739}
1740#endif
1741
1742static SIMPLE_DEV_PM_OPS(sdhci_tegra_dev_pm_ops, sdhci_tegra_suspend,
1743 sdhci_tegra_resume);
1744
1745static struct platform_driver sdhci_tegra_driver = {
1746 .driver = {
1747 .name = "sdhci-tegra",
1748 .of_match_table = sdhci_tegra_dt_match,
1749 .pm = &sdhci_tegra_dev_pm_ops,
1750 },
1751 .probe = sdhci_tegra_probe,
1752 .remove = sdhci_tegra_remove,
1753};
1754
1755module_platform_driver(sdhci_tegra_driver);
1756
1757MODULE_DESCRIPTION("SDHCI driver for Tegra");
1758MODULE_AUTHOR("Google, Inc.");
1759MODULE_LICENSE("GPL v2");