blob: 48765208e2953774b92de445daa7929e056f9b34 [file] [log] [blame]
b.liue9582032025-04-17 19:18:16 +08001// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Freescale eSDHC controller driver.
4 *
5 * Copyright (c) 2007, 2010, 2012 Freescale Semiconductor, Inc.
6 * Copyright (c) 2009 MontaVista Software, Inc.
7 *
8 * Authors: Xiaobo Xie <X.Xie@freescale.com>
9 * Anton Vorontsov <avorontsov@ru.mvista.com>
10 */
11
12#include <linux/err.h>
13#include <linux/io.h>
14#include <linux/of.h>
15#include <linux/of_address.h>
16#include <linux/delay.h>
17#include <linux/module.h>
18#include <linux/sys_soc.h>
19#include <linux/clk.h>
20#include <linux/ktime.h>
21#include <linux/dma-mapping.h>
22#include <linux/mmc/host.h>
23#include <linux/mmc/mmc.h>
24#include "sdhci-pltfm.h"
25#include "sdhci-esdhc.h"
26
27#define VENDOR_V_22 0x12
28#define VENDOR_V_23 0x13
29
30#define MMC_TIMING_NUM (MMC_TIMING_MMC_HS400 + 1)
31
32struct esdhc_clk_fixup {
33 const unsigned int sd_dflt_max_clk;
34 const unsigned int max_clk[MMC_TIMING_NUM];
35};
36
37static const struct esdhc_clk_fixup ls1021a_esdhc_clk = {
38 .sd_dflt_max_clk = 25000000,
39 .max_clk[MMC_TIMING_MMC_HS] = 46500000,
40 .max_clk[MMC_TIMING_SD_HS] = 46500000,
41};
42
43static const struct esdhc_clk_fixup ls1046a_esdhc_clk = {
44 .sd_dflt_max_clk = 25000000,
45 .max_clk[MMC_TIMING_UHS_SDR104] = 167000000,
46 .max_clk[MMC_TIMING_MMC_HS200] = 167000000,
47};
48
49static const struct esdhc_clk_fixup ls1012a_esdhc_clk = {
50 .sd_dflt_max_clk = 25000000,
51 .max_clk[MMC_TIMING_UHS_SDR104] = 125000000,
52 .max_clk[MMC_TIMING_MMC_HS200] = 125000000,
53};
54
55static const struct esdhc_clk_fixup p1010_esdhc_clk = {
56 .sd_dflt_max_clk = 20000000,
57 .max_clk[MMC_TIMING_LEGACY] = 20000000,
58 .max_clk[MMC_TIMING_MMC_HS] = 42000000,
59 .max_clk[MMC_TIMING_SD_HS] = 40000000,
60};
61
62static const struct of_device_id sdhci_esdhc_of_match[] = {
63 { .compatible = "fsl,ls1021a-esdhc", .data = &ls1021a_esdhc_clk},
64 { .compatible = "fsl,ls1046a-esdhc", .data = &ls1046a_esdhc_clk},
65 { .compatible = "fsl,ls1012a-esdhc", .data = &ls1012a_esdhc_clk},
66 { .compatible = "fsl,p1010-esdhc", .data = &p1010_esdhc_clk},
67 { .compatible = "fsl,mpc8379-esdhc" },
68 { .compatible = "fsl,mpc8536-esdhc" },
69 { .compatible = "fsl,esdhc" },
70 { }
71};
72MODULE_DEVICE_TABLE(of, sdhci_esdhc_of_match);
73
74struct sdhci_esdhc {
75 u8 vendor_ver;
76 u8 spec_ver;
77 bool quirk_incorrect_hostver;
78 bool quirk_limited_clk_division;
79 bool quirk_unreliable_pulse_detection;
80 bool quirk_tuning_erratum_type1;
81 bool quirk_tuning_erratum_type2;
82 bool quirk_ignore_data_inhibit;
83 bool quirk_delay_before_data_reset;
84 bool quirk_trans_complete_erratum;
85 bool in_sw_tuning;
86 unsigned int peripheral_clock;
87 const struct esdhc_clk_fixup *clk_fixup;
88 u32 div_ratio;
89};
90
91/**
92 * esdhc_read*_fixup - Fixup the value read from incompatible eSDHC register
93 * to make it compatible with SD spec.
94 *
95 * @host: pointer to sdhci_host
96 * @spec_reg: SD spec register address
97 * @value: 32bit eSDHC register value on spec_reg address
98 *
99 * In SD spec, there are 8/16/32/64 bits registers, while all of eSDHC
100 * registers are 32 bits. There are differences in register size, register
101 * address, register function, bit position and function between eSDHC spec
102 * and SD spec.
103 *
104 * Return a fixed up register value
105 */
106static u32 esdhc_readl_fixup(struct sdhci_host *host,
107 int spec_reg, u32 value)
108{
109 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
110 struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
111 u32 ret;
112
113 /*
114 * The bit of ADMA flag in eSDHC is not compatible with standard
115 * SDHC register, so set fake flag SDHCI_CAN_DO_ADMA2 when ADMA is
116 * supported by eSDHC.
117 * And for many FSL eSDHC controller, the reset value of field
118 * SDHCI_CAN_DO_ADMA1 is 1, but some of them can't support ADMA,
119 * only these vendor version is greater than 2.2/0x12 support ADMA.
120 */
121 if ((spec_reg == SDHCI_CAPABILITIES) && (value & SDHCI_CAN_DO_ADMA1)) {
122 if (esdhc->vendor_ver > VENDOR_V_22) {
123 ret = value | SDHCI_CAN_DO_ADMA2;
124 return ret;
125 }
126 }
127
128 /*
129 * The DAT[3:0] line signal levels and the CMD line signal level are
130 * not compatible with standard SDHC register. The line signal levels
131 * DAT[7:0] are at bits 31:24 and the command line signal level is at
132 * bit 23. All other bits are the same as in the standard SDHC
133 * register.
134 */
135 if (spec_reg == SDHCI_PRESENT_STATE) {
136 ret = value & 0x000fffff;
137 ret |= (value >> 4) & SDHCI_DATA_LVL_MASK;
138 ret |= (value << 1) & SDHCI_CMD_LVL;
139
140 /*
141 * Some controllers have unreliable Data Line Active
142 * bit for commands with busy signal. This affects
143 * Command Inhibit (data) bit. Just ignore it since
144 * MMC core driver has already polled card status
145 * with CMD13 after any command with busy siganl.
146 */
147 if (esdhc->quirk_ignore_data_inhibit)
148 ret &= ~SDHCI_DATA_INHIBIT;
149 return ret;
150 }
151
152 /*
153 * DTS properties of mmc host are used to enable each speed mode
154 * according to soc and board capability. So clean up
155 * SDR50/SDR104/DDR50 support bits here.
156 */
157 if (spec_reg == SDHCI_CAPABILITIES_1) {
158 ret = value & ~(SDHCI_SUPPORT_SDR50 | SDHCI_SUPPORT_SDR104 |
159 SDHCI_SUPPORT_DDR50);
160 return ret;
161 }
162
163 ret = value;
164 return ret;
165}
166
167static u16 esdhc_readw_fixup(struct sdhci_host *host,
168 int spec_reg, u32 value)
169{
170 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
171 struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
172 u16 ret;
173 int shift = (spec_reg & 0x2) * 8;
174
175 if (spec_reg == SDHCI_HOST_VERSION)
176 ret = value & 0xffff;
177 else
178 ret = (value >> shift) & 0xffff;
179 /* Workaround for T4240-R1.0-R2.0 eSDHC which has incorrect
180 * vendor version and spec version information.
181 */
182 if ((spec_reg == SDHCI_HOST_VERSION) &&
183 (esdhc->quirk_incorrect_hostver))
184 ret = (VENDOR_V_23 << SDHCI_VENDOR_VER_SHIFT) | SDHCI_SPEC_200;
185 return ret;
186}
187
188static u8 esdhc_readb_fixup(struct sdhci_host *host,
189 int spec_reg, u32 value)
190{
191 u8 ret;
192 u8 dma_bits;
193 int shift = (spec_reg & 0x3) * 8;
194
195 ret = (value >> shift) & 0xff;
196
197 /*
198 * "DMA select" locates at offset 0x28 in SD specification, but on
199 * P5020 or P3041, it locates at 0x29.
200 */
201 if (spec_reg == SDHCI_HOST_CONTROL) {
202 /* DMA select is 22,23 bits in Protocol Control Register */
203 dma_bits = (value >> 5) & SDHCI_CTRL_DMA_MASK;
204 /* fixup the result */
205 ret &= ~SDHCI_CTRL_DMA_MASK;
206 ret |= dma_bits;
207 }
208 return ret;
209}
210
211/**
212 * esdhc_write*_fixup - Fixup the SD spec register value so that it could be
213 * written into eSDHC register.
214 *
215 * @host: pointer to sdhci_host
216 * @spec_reg: SD spec register address
217 * @value: 8/16/32bit SD spec register value that would be written
218 * @old_value: 32bit eSDHC register value on spec_reg address
219 *
220 * In SD spec, there are 8/16/32/64 bits registers, while all of eSDHC
221 * registers are 32 bits. There are differences in register size, register
222 * address, register function, bit position and function between eSDHC spec
223 * and SD spec.
224 *
225 * Return a fixed up register value
226 */
227static u32 esdhc_writel_fixup(struct sdhci_host *host,
228 int spec_reg, u32 value, u32 old_value)
229{
230 u32 ret;
231
232 /*
233 * Enabling IRQSTATEN[BGESEN] is just to set IRQSTAT[BGE]
234 * when SYSCTL[RSTD] is set for some special operations.
235 * No any impact on other operation.
236 */
237 if (spec_reg == SDHCI_INT_ENABLE)
238 ret = value | SDHCI_INT_BLK_GAP;
239 else
240 ret = value;
241
242 return ret;
243}
244
245static u32 esdhc_writew_fixup(struct sdhci_host *host,
246 int spec_reg, u16 value, u32 old_value)
247{
248 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
249 int shift = (spec_reg & 0x2) * 8;
250 u32 ret;
251
252 switch (spec_reg) {
253 case SDHCI_TRANSFER_MODE:
254 /*
255 * Postpone this write, we must do it together with a
256 * command write that is down below. Return old value.
257 */
258 pltfm_host->xfer_mode_shadow = value;
259 return old_value;
260 case SDHCI_COMMAND:
261 ret = (value << 16) | pltfm_host->xfer_mode_shadow;
262 return ret;
263 }
264
265 ret = old_value & (~(0xffff << shift));
266 ret |= (value << shift);
267
268 if (spec_reg == SDHCI_BLOCK_SIZE) {
269 /*
270 * Two last DMA bits are reserved, and first one is used for
271 * non-standard blksz of 4096 bytes that we don't support
272 * yet. So clear the DMA boundary bits.
273 */
274 ret &= (~SDHCI_MAKE_BLKSZ(0x7, 0));
275 }
276 return ret;
277}
278
279static u32 esdhc_writeb_fixup(struct sdhci_host *host,
280 int spec_reg, u8 value, u32 old_value)
281{
282 u32 ret;
283 u32 dma_bits;
284 u8 tmp;
285 int shift = (spec_reg & 0x3) * 8;
286
287 /*
288 * eSDHC doesn't have a standard power control register, so we do
289 * nothing here to avoid incorrect operation.
290 */
291 if (spec_reg == SDHCI_POWER_CONTROL)
292 return old_value;
293 /*
294 * "DMA select" location is offset 0x28 in SD specification, but on
295 * P5020 or P3041, it's located at 0x29.
296 */
297 if (spec_reg == SDHCI_HOST_CONTROL) {
298 /*
299 * If host control register is not standard, exit
300 * this function
301 */
302 if (host->quirks2 & SDHCI_QUIRK2_BROKEN_HOST_CONTROL)
303 return old_value;
304
305 /* DMA select is 22,23 bits in Protocol Control Register */
306 dma_bits = (value & SDHCI_CTRL_DMA_MASK) << 5;
307 ret = (old_value & (~(SDHCI_CTRL_DMA_MASK << 5))) | dma_bits;
308 tmp = (value & (~SDHCI_CTRL_DMA_MASK)) |
309 (old_value & SDHCI_CTRL_DMA_MASK);
310 ret = (ret & (~0xff)) | tmp;
311
312 /* Prevent SDHCI core from writing reserved bits (e.g. HISPD) */
313 ret &= ~ESDHC_HOST_CONTROL_RES;
314 return ret;
315 }
316
317 ret = (old_value & (~(0xff << shift))) | (value << shift);
318 return ret;
319}
320
321static u32 esdhc_be_readl(struct sdhci_host *host, int reg)
322{
323 u32 ret;
324 u32 value;
325
326 if (reg == SDHCI_CAPABILITIES_1)
327 value = ioread32be(host->ioaddr + ESDHC_CAPABILITIES_1);
328 else
329 value = ioread32be(host->ioaddr + reg);
330
331 ret = esdhc_readl_fixup(host, reg, value);
332
333 return ret;
334}
335
336static u32 esdhc_le_readl(struct sdhci_host *host, int reg)
337{
338 u32 ret;
339 u32 value;
340
341 if (reg == SDHCI_CAPABILITIES_1)
342 value = ioread32(host->ioaddr + ESDHC_CAPABILITIES_1);
343 else
344 value = ioread32(host->ioaddr + reg);
345
346 ret = esdhc_readl_fixup(host, reg, value);
347
348 return ret;
349}
350
351static u16 esdhc_be_readw(struct sdhci_host *host, int reg)
352{
353 u16 ret;
354 u32 value;
355 int base = reg & ~0x3;
356
357 value = ioread32be(host->ioaddr + base);
358 ret = esdhc_readw_fixup(host, reg, value);
359 return ret;
360}
361
362static u16 esdhc_le_readw(struct sdhci_host *host, int reg)
363{
364 u16 ret;
365 u32 value;
366 int base = reg & ~0x3;
367
368 value = ioread32(host->ioaddr + base);
369 ret = esdhc_readw_fixup(host, reg, value);
370 return ret;
371}
372
373static u8 esdhc_be_readb(struct sdhci_host *host, int reg)
374{
375 u8 ret;
376 u32 value;
377 int base = reg & ~0x3;
378
379 value = ioread32be(host->ioaddr + base);
380 ret = esdhc_readb_fixup(host, reg, value);
381 return ret;
382}
383
384static u8 esdhc_le_readb(struct sdhci_host *host, int reg)
385{
386 u8 ret;
387 u32 value;
388 int base = reg & ~0x3;
389
390 value = ioread32(host->ioaddr + base);
391 ret = esdhc_readb_fixup(host, reg, value);
392 return ret;
393}
394
395static void esdhc_be_writel(struct sdhci_host *host, u32 val, int reg)
396{
397 u32 value;
398
399 value = esdhc_writel_fixup(host, reg, val, 0);
400 iowrite32be(value, host->ioaddr + reg);
401}
402
403static void esdhc_le_writel(struct sdhci_host *host, u32 val, int reg)
404{
405 u32 value;
406
407 value = esdhc_writel_fixup(host, reg, val, 0);
408 iowrite32(value, host->ioaddr + reg);
409}
410
411static void esdhc_be_writew(struct sdhci_host *host, u16 val, int reg)
412{
413 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
414 struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
415 int base = reg & ~0x3;
416 u32 value;
417 u32 ret;
418
419 value = ioread32be(host->ioaddr + base);
420 ret = esdhc_writew_fixup(host, reg, val, value);
421 if (reg != SDHCI_TRANSFER_MODE)
422 iowrite32be(ret, host->ioaddr + base);
423
424 /* Starting SW tuning requires ESDHC_SMPCLKSEL to be set
425 * 1us later after ESDHC_EXTN is set.
426 */
427 if (base == ESDHC_SYSTEM_CONTROL_2) {
428 if (!(value & ESDHC_EXTN) && (ret & ESDHC_EXTN) &&
429 esdhc->in_sw_tuning) {
430 udelay(1);
431 ret |= ESDHC_SMPCLKSEL;
432 iowrite32be(ret, host->ioaddr + base);
433 }
434 }
435}
436
437static void esdhc_le_writew(struct sdhci_host *host, u16 val, int reg)
438{
439 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
440 struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
441 int base = reg & ~0x3;
442 u32 value;
443 u32 ret;
444
445 value = ioread32(host->ioaddr + base);
446 ret = esdhc_writew_fixup(host, reg, val, value);
447 if (reg != SDHCI_TRANSFER_MODE)
448 iowrite32(ret, host->ioaddr + base);
449
450 /* Starting SW tuning requires ESDHC_SMPCLKSEL to be set
451 * 1us later after ESDHC_EXTN is set.
452 */
453 if (base == ESDHC_SYSTEM_CONTROL_2) {
454 if (!(value & ESDHC_EXTN) && (ret & ESDHC_EXTN) &&
455 esdhc->in_sw_tuning) {
456 udelay(1);
457 ret |= ESDHC_SMPCLKSEL;
458 iowrite32(ret, host->ioaddr + base);
459 }
460 }
461}
462
463static void esdhc_be_writeb(struct sdhci_host *host, u8 val, int reg)
464{
465 int base = reg & ~0x3;
466 u32 value;
467 u32 ret;
468
469 value = ioread32be(host->ioaddr + base);
470 ret = esdhc_writeb_fixup(host, reg, val, value);
471 iowrite32be(ret, host->ioaddr + base);
472}
473
474static void esdhc_le_writeb(struct sdhci_host *host, u8 val, int reg)
475{
476 int base = reg & ~0x3;
477 u32 value;
478 u32 ret;
479
480 value = ioread32(host->ioaddr + base);
481 ret = esdhc_writeb_fixup(host, reg, val, value);
482 iowrite32(ret, host->ioaddr + base);
483}
484
485/*
486 * For Abort or Suspend after Stop at Block Gap, ignore the ADMA
487 * error(IRQSTAT[ADMAE]) if both Transfer Complete(IRQSTAT[TC])
488 * and Block Gap Event(IRQSTAT[BGE]) are also set.
489 * For Continue, apply soft reset for data(SYSCTL[RSTD]);
490 * and re-issue the entire read transaction from beginning.
491 */
492static void esdhc_of_adma_workaround(struct sdhci_host *host, u32 intmask)
493{
494 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
495 struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
496 bool applicable;
497 dma_addr_t dmastart;
498 dma_addr_t dmanow;
499
500 applicable = (intmask & SDHCI_INT_DATA_END) &&
501 (intmask & SDHCI_INT_BLK_GAP) &&
502 (esdhc->vendor_ver == VENDOR_V_23);
503 if (!applicable)
504 return;
505
506 host->data->error = 0;
507 dmastart = sg_dma_address(host->data->sg);
508 dmanow = dmastart + host->data->bytes_xfered;
509 /*
510 * Force update to the next DMA block boundary.
511 */
512 dmanow = (dmanow & ~(SDHCI_DEFAULT_BOUNDARY_SIZE - 1)) +
513 SDHCI_DEFAULT_BOUNDARY_SIZE;
514 host->data->bytes_xfered = dmanow - dmastart;
515 sdhci_writel(host, dmanow, SDHCI_DMA_ADDRESS);
516}
517
518static int esdhc_of_enable_dma(struct sdhci_host *host)
519{
520 int ret;
521 u32 value;
522 struct device *dev = mmc_dev(host->mmc);
523
524 if (of_device_is_compatible(dev->of_node, "fsl,ls1043a-esdhc") ||
525 of_device_is_compatible(dev->of_node, "fsl,ls1046a-esdhc")) {
526 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40));
527 if (ret)
528 return ret;
529 }
530
531 value = sdhci_readl(host, ESDHC_DMA_SYSCTL);
532
533 if (of_dma_is_coherent(dev->of_node))
534 value |= ESDHC_DMA_SNOOP;
535 else
536 value &= ~ESDHC_DMA_SNOOP;
537
538 sdhci_writel(host, value, ESDHC_DMA_SYSCTL);
539 return 0;
540}
541
542static unsigned int esdhc_of_get_max_clock(struct sdhci_host *host)
543{
544 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
545 struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
546
547 if (esdhc->peripheral_clock)
548 return esdhc->peripheral_clock;
549 else
550 return pltfm_host->clock;
551}
552
553static unsigned int esdhc_of_get_min_clock(struct sdhci_host *host)
554{
555 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
556 struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
557 unsigned int clock;
558
559 if (esdhc->peripheral_clock)
560 clock = esdhc->peripheral_clock;
561 else
562 clock = pltfm_host->clock;
563 return clock / 256 / 16;
564}
565
566static void esdhc_clock_enable(struct sdhci_host *host, bool enable)
567{
568 u32 val;
569 ktime_t timeout;
570
571 val = sdhci_readl(host, ESDHC_SYSTEM_CONTROL);
572
573 if (enable)
574 val |= ESDHC_CLOCK_SDCLKEN;
575 else
576 val &= ~ESDHC_CLOCK_SDCLKEN;
577
578 sdhci_writel(host, val, ESDHC_SYSTEM_CONTROL);
579
580 /* Wait max 20 ms */
581 timeout = ktime_add_ms(ktime_get(), 20);
582 val = ESDHC_CLOCK_STABLE;
583 while (1) {
584 bool timedout = ktime_after(ktime_get(), timeout);
585
586 if (sdhci_readl(host, ESDHC_PRSSTAT) & val)
587 break;
588 if (timedout) {
589 pr_err("%s: Internal clock never stabilised.\n",
590 mmc_hostname(host->mmc));
591 break;
592 }
593 udelay(10);
594 }
595}
596
597static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock)
598{
599 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
600 struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
601 int pre_div = 1;
602 int div = 1;
603 int division;
604 ktime_t timeout;
605 long fixup = 0;
606 u32 temp;
607
608 host->mmc->actual_clock = 0;
609
610 if (clock == 0) {
611 esdhc_clock_enable(host, false);
612 return;
613 }
614
615 /* Workaround to start pre_div at 2 for VNN < VENDOR_V_23 */
616 if (esdhc->vendor_ver < VENDOR_V_23)
617 pre_div = 2;
618
619 if (host->mmc->card && mmc_card_sd(host->mmc->card) &&
620 esdhc->clk_fixup && host->mmc->ios.timing == MMC_TIMING_LEGACY)
621 fixup = esdhc->clk_fixup->sd_dflt_max_clk;
622 else if (esdhc->clk_fixup)
623 fixup = esdhc->clk_fixup->max_clk[host->mmc->ios.timing];
624
625 if (fixup && clock > fixup)
626 clock = fixup;
627
628 temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL);
629 temp &= ~(ESDHC_CLOCK_SDCLKEN | ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN |
630 ESDHC_CLOCK_PEREN | ESDHC_CLOCK_MASK);
631 sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL);
632
633 while (host->max_clk / pre_div / 16 > clock && pre_div < 256)
634 pre_div *= 2;
635
636 while (host->max_clk / pre_div / div > clock && div < 16)
637 div++;
638
639 if (esdhc->quirk_limited_clk_division &&
640 clock == MMC_HS200_MAX_DTR &&
641 (host->mmc->ios.timing == MMC_TIMING_MMC_HS400 ||
642 host->flags & SDHCI_HS400_TUNING)) {
643 division = pre_div * div;
644 if (division <= 4) {
645 pre_div = 4;
646 div = 1;
647 } else if (division <= 8) {
648 pre_div = 4;
649 div = 2;
650 } else if (division <= 12) {
651 pre_div = 4;
652 div = 3;
653 } else {
654 pr_warn("%s: using unsupported clock division.\n",
655 mmc_hostname(host->mmc));
656 }
657 }
658
659 dev_dbg(mmc_dev(host->mmc), "desired SD clock: %d, actual: %d\n",
660 clock, host->max_clk / pre_div / div);
661 host->mmc->actual_clock = host->max_clk / pre_div / div;
662 esdhc->div_ratio = pre_div * div;
663 pre_div >>= 1;
664 div--;
665
666 temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL);
667 temp |= (ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN
668 | (div << ESDHC_DIVIDER_SHIFT)
669 | (pre_div << ESDHC_PREDIV_SHIFT));
670 sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL);
671
672 if (host->mmc->ios.timing == MMC_TIMING_MMC_HS400 &&
673 clock == MMC_HS200_MAX_DTR) {
674 temp = sdhci_readl(host, ESDHC_TBCTL);
675 sdhci_writel(host, temp | ESDHC_HS400_MODE, ESDHC_TBCTL);
676 temp = sdhci_readl(host, ESDHC_SDCLKCTL);
677 sdhci_writel(host, temp | ESDHC_CMD_CLK_CTL, ESDHC_SDCLKCTL);
678 esdhc_clock_enable(host, true);
679
680 temp = sdhci_readl(host, ESDHC_DLLCFG0);
681 temp |= ESDHC_DLL_ENABLE;
682 if (host->mmc->actual_clock == MMC_HS200_MAX_DTR)
683 temp |= ESDHC_DLL_FREQ_SEL;
684 sdhci_writel(host, temp, ESDHC_DLLCFG0);
685 temp = sdhci_readl(host, ESDHC_TBCTL);
686 sdhci_writel(host, temp | ESDHC_HS400_WNDW_ADJUST, ESDHC_TBCTL);
687
688 esdhc_clock_enable(host, false);
689 temp = sdhci_readl(host, ESDHC_DMA_SYSCTL);
690 temp |= ESDHC_FLUSH_ASYNC_FIFO;
691 sdhci_writel(host, temp, ESDHC_DMA_SYSCTL);
692 }
693
694 /* Wait max 20 ms */
695 timeout = ktime_add_ms(ktime_get(), 20);
696 while (1) {
697 bool timedout = ktime_after(ktime_get(), timeout);
698
699 if (sdhci_readl(host, ESDHC_PRSSTAT) & ESDHC_CLOCK_STABLE)
700 break;
701 if (timedout) {
702 pr_err("%s: Internal clock never stabilised.\n",
703 mmc_hostname(host->mmc));
704 return;
705 }
706 udelay(10);
707 }
708
709 temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL);
710 temp |= ESDHC_CLOCK_SDCLKEN;
711 sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL);
712}
713
714static void esdhc_pltfm_set_bus_width(struct sdhci_host *host, int width)
715{
716 u32 ctrl;
717
718 ctrl = sdhci_readl(host, ESDHC_PROCTL);
719 ctrl &= (~ESDHC_CTRL_BUSWIDTH_MASK);
720 switch (width) {
721 case MMC_BUS_WIDTH_8:
722 ctrl |= ESDHC_CTRL_8BITBUS;
723 break;
724
725 case MMC_BUS_WIDTH_4:
726 ctrl |= ESDHC_CTRL_4BITBUS;
727 break;
728
729 default:
730 break;
731 }
732
733 sdhci_writel(host, ctrl, ESDHC_PROCTL);
734}
735
736static void esdhc_reset(struct sdhci_host *host, u8 mask)
737{
738 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
739 struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
740 u32 val, bus_width = 0;
741
742 /*
743 * Add delay to make sure all the DMA transfers are finished
744 * for quirk.
745 */
746 if (esdhc->quirk_delay_before_data_reset &&
747 (mask & SDHCI_RESET_DATA) &&
748 (host->flags & SDHCI_REQ_USE_DMA))
749 mdelay(5);
750
751 /*
752 * Save bus-width for eSDHC whose vendor version is 2.2
753 * or lower for data reset.
754 */
755 if ((mask & SDHCI_RESET_DATA) &&
756 (esdhc->vendor_ver <= VENDOR_V_22)) {
757 val = sdhci_readl(host, ESDHC_PROCTL);
758 bus_width = val & ESDHC_CTRL_BUSWIDTH_MASK;
759 }
760
761 sdhci_reset(host, mask);
762
763 /*
764 * Restore bus-width setting and interrupt registers for eSDHC
765 * whose vendor version is 2.2 or lower for data reset.
766 */
767 if ((mask & SDHCI_RESET_DATA) &&
768 (esdhc->vendor_ver <= VENDOR_V_22)) {
769 val = sdhci_readl(host, ESDHC_PROCTL);
770 val &= ~ESDHC_CTRL_BUSWIDTH_MASK;
771 val |= bus_width;
772 sdhci_writel(host, val, ESDHC_PROCTL);
773
774 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
775 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
776 }
777
778 /*
779 * Some bits have to be cleaned manually for eSDHC whose spec
780 * version is higher than 3.0 for all reset.
781 */
782 if ((mask & SDHCI_RESET_ALL) &&
783 (esdhc->spec_ver >= SDHCI_SPEC_300)) {
784 val = sdhci_readl(host, ESDHC_TBCTL);
785 val &= ~ESDHC_TB_EN;
786 sdhci_writel(host, val, ESDHC_TBCTL);
787
788 /*
789 * Initialize eSDHC_DLLCFG1[DLL_PD_PULSE_STRETCH_SEL] to
790 * 0 for quirk.
791 */
792 if (esdhc->quirk_unreliable_pulse_detection) {
793 val = sdhci_readl(host, ESDHC_DLLCFG1);
794 val &= ~ESDHC_DLL_PD_PULSE_STRETCH_SEL;
795 sdhci_writel(host, val, ESDHC_DLLCFG1);
796 }
797 }
798}
799
800/* The SCFG, Supplemental Configuration Unit, provides SoC specific
801 * configuration and status registers for the device. There is a
802 * SDHC IO VSEL control register on SCFG for some platforms. It's
803 * used to support SDHC IO voltage switching.
804 */
805static const struct of_device_id scfg_device_ids[] = {
806 { .compatible = "fsl,t1040-scfg", },
807 { .compatible = "fsl,ls1012a-scfg", },
808 { .compatible = "fsl,ls1046a-scfg", },
809 {}
810};
811
812/* SDHC IO VSEL control register definition */
813#define SCFG_SDHCIOVSELCR 0x408
814#define SDHCIOVSELCR_TGLEN 0x80000000
815#define SDHCIOVSELCR_VSELVAL 0x60000000
816#define SDHCIOVSELCR_SDHC_VS 0x00000001
817
818static int esdhc_signal_voltage_switch(struct mmc_host *mmc,
819 struct mmc_ios *ios)
820{
821 struct sdhci_host *host = mmc_priv(mmc);
822 struct device_node *scfg_node;
823 void __iomem *scfg_base = NULL;
824 u32 sdhciovselcr;
825 u32 val;
826
827 /*
828 * Signal Voltage Switching is only applicable for Host Controllers
829 * v3.00 and above.
830 */
831 if (host->version < SDHCI_SPEC_300)
832 return 0;
833
834 val = sdhci_readl(host, ESDHC_PROCTL);
835
836 switch (ios->signal_voltage) {
837 case MMC_SIGNAL_VOLTAGE_330:
838 val &= ~ESDHC_VOLT_SEL;
839 sdhci_writel(host, val, ESDHC_PROCTL);
840 return 0;
841 case MMC_SIGNAL_VOLTAGE_180:
842 scfg_node = of_find_matching_node(NULL, scfg_device_ids);
843 if (scfg_node)
844 scfg_base = of_iomap(scfg_node, 0);
845 of_node_put(scfg_node);
846 if (scfg_base) {
847 sdhciovselcr = SDHCIOVSELCR_TGLEN |
848 SDHCIOVSELCR_VSELVAL;
849 iowrite32be(sdhciovselcr,
850 scfg_base + SCFG_SDHCIOVSELCR);
851
852 val |= ESDHC_VOLT_SEL;
853 sdhci_writel(host, val, ESDHC_PROCTL);
854 mdelay(5);
855
856 sdhciovselcr = SDHCIOVSELCR_TGLEN |
857 SDHCIOVSELCR_SDHC_VS;
858 iowrite32be(sdhciovselcr,
859 scfg_base + SCFG_SDHCIOVSELCR);
860 iounmap(scfg_base);
861 } else {
862 val |= ESDHC_VOLT_SEL;
863 sdhci_writel(host, val, ESDHC_PROCTL);
864 }
865 return 0;
866 default:
867 return 0;
868 }
869}
870
871static struct soc_device_attribute soc_tuning_erratum_type1[] = {
872 { .family = "QorIQ T1023", .revision = "1.0", },
873 { .family = "QorIQ T1040", .revision = "1.0", },
874 { .family = "QorIQ T2080", .revision = "1.0", },
875 { .family = "QorIQ LS1021A", .revision = "1.0", },
876 { },
877};
878
879static struct soc_device_attribute soc_tuning_erratum_type2[] = {
880 { .family = "QorIQ LS1012A", .revision = "1.0", },
881 { .family = "QorIQ LS1043A", .revision = "1.*", },
882 { .family = "QorIQ LS1046A", .revision = "1.0", },
883 { .family = "QorIQ LS1080A", .revision = "1.0", },
884 { .family = "QorIQ LS2080A", .revision = "1.0", },
885 { .family = "QorIQ LA1575A", .revision = "1.0", },
886 { },
887};
888
889static void esdhc_tuning_block_enable(struct sdhci_host *host, bool enable)
890{
891 u32 val;
892
893 esdhc_clock_enable(host, false);
894
895 val = sdhci_readl(host, ESDHC_DMA_SYSCTL);
896 val |= ESDHC_FLUSH_ASYNC_FIFO;
897 sdhci_writel(host, val, ESDHC_DMA_SYSCTL);
898
899 val = sdhci_readl(host, ESDHC_TBCTL);
900 if (enable)
901 val |= ESDHC_TB_EN;
902 else
903 val &= ~ESDHC_TB_EN;
904 sdhci_writel(host, val, ESDHC_TBCTL);
905
906 esdhc_clock_enable(host, true);
907}
908
909static void esdhc_prepare_sw_tuning(struct sdhci_host *host, u8 *window_start,
910 u8 *window_end)
911{
912 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
913 struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
914 u8 tbstat_15_8, tbstat_7_0;
915 u32 val;
916
917 if (esdhc->quirk_tuning_erratum_type1) {
918 *window_start = 5 * esdhc->div_ratio;
919 *window_end = 3 * esdhc->div_ratio;
920 return;
921 }
922
923 /* Write TBCTL[11:8]=4'h8 */
924 val = sdhci_readl(host, ESDHC_TBCTL);
925 val &= ~(0xf << 8);
926 val |= 8 << 8;
927 sdhci_writel(host, val, ESDHC_TBCTL);
928
929 mdelay(1);
930
931 /* Read TBCTL[31:0] register and rewrite again */
932 val = sdhci_readl(host, ESDHC_TBCTL);
933 sdhci_writel(host, val, ESDHC_TBCTL);
934
935 mdelay(1);
936
937 /* Read the TBSTAT[31:0] register twice */
938 val = sdhci_readl(host, ESDHC_TBSTAT);
939 val = sdhci_readl(host, ESDHC_TBSTAT);
940
941 /* Reset data lines by setting ESDHCCTL[RSTD] */
942 sdhci_reset(host, SDHCI_RESET_DATA);
943 /* Write 32'hFFFF_FFFF to IRQSTAT register */
944 sdhci_writel(host, 0xFFFFFFFF, SDHCI_INT_STATUS);
945
946 /* If TBSTAT[15:8]-TBSTAT[7:0] > 4 * div_ratio
947 * or TBSTAT[7:0]-TBSTAT[15:8] > 4 * div_ratio,
948 * then program TBPTR[TB_WNDW_END_PTR] = 4 * div_ratio
949 * and program TBPTR[TB_WNDW_START_PTR] = 8 * div_ratio.
950 */
951 tbstat_7_0 = val & 0xff;
952 tbstat_15_8 = (val >> 8) & 0xff;
953
954 if (abs(tbstat_15_8 - tbstat_7_0) > (4 * esdhc->div_ratio)) {
955 *window_start = 8 * esdhc->div_ratio;
956 *window_end = 4 * esdhc->div_ratio;
957 } else {
958 *window_start = 5 * esdhc->div_ratio;
959 *window_end = 3 * esdhc->div_ratio;
960 }
961}
962
963static int esdhc_execute_sw_tuning(struct mmc_host *mmc, u32 opcode,
964 u8 window_start, u8 window_end)
965{
966 struct sdhci_host *host = mmc_priv(mmc);
967 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
968 struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
969 u32 val;
970 int ret;
971
972 /* Program TBPTR[TB_WNDW_END_PTR] and TBPTR[TB_WNDW_START_PTR] */
973 val = ((u32)window_start << ESDHC_WNDW_STRT_PTR_SHIFT) &
974 ESDHC_WNDW_STRT_PTR_MASK;
975 val |= window_end & ESDHC_WNDW_END_PTR_MASK;
976 sdhci_writel(host, val, ESDHC_TBPTR);
977
978 /* Program the software tuning mode by setting TBCTL[TB_MODE]=2'h3 */
979 val = sdhci_readl(host, ESDHC_TBCTL);
980 val &= ~ESDHC_TB_MODE_MASK;
981 val |= ESDHC_TB_MODE_SW;
982 sdhci_writel(host, val, ESDHC_TBCTL);
983
984 esdhc->in_sw_tuning = true;
985 ret = sdhci_execute_tuning(mmc, opcode);
986 esdhc->in_sw_tuning = false;
987 return ret;
988}
989
990static int esdhc_execute_tuning(struct mmc_host *mmc, u32 opcode)
991{
992 struct sdhci_host *host = mmc_priv(mmc);
993 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
994 struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
995 u8 window_start, window_end;
996 int ret, retries = 1;
997 bool hs400_tuning;
998 unsigned int clk;
999 u32 val;
1000
1001 /* For tuning mode, the sd clock divisor value
1002 * must be larger than 3 according to reference manual.
1003 */
1004 clk = esdhc->peripheral_clock / 3;
1005 if (host->clock > clk)
1006 esdhc_of_set_clock(host, clk);
1007
1008 esdhc_tuning_block_enable(host, true);
1009
1010 /*
1011 * The eSDHC controller takes the data timeout value into account
1012 * during tuning. If the SD card is too slow sending the response, the
1013 * timer will expire and a "Buffer Read Ready" interrupt without data
1014 * is triggered. This leads to tuning errors.
1015 *
1016 * Just set the timeout to the maximum value because the core will
1017 * already take care of it in sdhci_send_tuning().
1018 */
1019 sdhci_writeb(host, 0xe, SDHCI_TIMEOUT_CONTROL);
1020
1021 hs400_tuning = host->flags & SDHCI_HS400_TUNING;
1022
1023 do {
1024 if (esdhc->quirk_limited_clk_division &&
1025 hs400_tuning)
1026 esdhc_of_set_clock(host, host->clock);
1027
1028 /* Do HW tuning */
1029 val = sdhci_readl(host, ESDHC_TBCTL);
1030 val &= ~ESDHC_TB_MODE_MASK;
1031 val |= ESDHC_TB_MODE_3;
1032 sdhci_writel(host, val, ESDHC_TBCTL);
1033
1034 ret = sdhci_execute_tuning(mmc, opcode);
1035 if (ret)
1036 break;
1037
1038 /* If HW tuning fails and triggers erratum,
1039 * try workaround.
1040 */
1041 ret = host->tuning_err;
1042 if (ret == -EAGAIN &&
1043 (esdhc->quirk_tuning_erratum_type1 ||
1044 esdhc->quirk_tuning_erratum_type2)) {
1045 /* Recover HS400 tuning flag */
1046 if (hs400_tuning)
1047 host->flags |= SDHCI_HS400_TUNING;
1048 pr_info("%s: Hold on to use fixed sampling clock. Try SW tuning!\n",
1049 mmc_hostname(mmc));
1050 /* Do SW tuning */
1051 esdhc_prepare_sw_tuning(host, &window_start,
1052 &window_end);
1053 ret = esdhc_execute_sw_tuning(mmc, opcode,
1054 window_start,
1055 window_end);
1056 if (ret)
1057 break;
1058
1059 /* Retry both HW/SW tuning with reduced clock. */
1060 ret = host->tuning_err;
1061 if (ret == -EAGAIN && retries) {
1062 /* Recover HS400 tuning flag */
1063 if (hs400_tuning)
1064 host->flags |= SDHCI_HS400_TUNING;
1065
1066 clk = host->max_clk / (esdhc->div_ratio + 1);
1067 esdhc_of_set_clock(host, clk);
1068 pr_info("%s: Hold on to use fixed sampling clock. Try tuning with reduced clock!\n",
1069 mmc_hostname(mmc));
1070 } else {
1071 break;
1072 }
1073 } else {
1074 break;
1075 }
1076 } while (retries--);
1077
1078 if (ret) {
1079 esdhc_tuning_block_enable(host, false);
1080 } else if (hs400_tuning) {
1081 val = sdhci_readl(host, ESDHC_SDTIMNGCTL);
1082 val |= ESDHC_FLW_CTL_BG;
1083 sdhci_writel(host, val, ESDHC_SDTIMNGCTL);
1084 }
1085
1086 return ret;
1087}
1088
1089static void esdhc_set_uhs_signaling(struct sdhci_host *host,
1090 unsigned int timing)
1091{
1092 if (timing == MMC_TIMING_MMC_HS400)
1093 esdhc_tuning_block_enable(host, true);
1094 else
1095 sdhci_set_uhs_signaling(host, timing);
1096}
1097
1098static u32 esdhc_irq(struct sdhci_host *host, u32 intmask)
1099{
1100 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1101 struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
1102 u32 command;
1103
1104 if (esdhc->quirk_trans_complete_erratum) {
1105 command = SDHCI_GET_CMD(sdhci_readw(host,
1106 SDHCI_COMMAND));
1107 if (command == MMC_WRITE_MULTIPLE_BLOCK &&
1108 sdhci_readw(host, SDHCI_BLOCK_COUNT) &&
1109 intmask & SDHCI_INT_DATA_END) {
1110 intmask &= ~SDHCI_INT_DATA_END;
1111 sdhci_writel(host, SDHCI_INT_DATA_END,
1112 SDHCI_INT_STATUS);
1113 }
1114 }
1115 return intmask;
1116}
1117
1118#ifdef CONFIG_PM_SLEEP
1119static u32 esdhc_proctl;
1120static int esdhc_of_suspend(struct device *dev)
1121{
1122 struct sdhci_host *host = dev_get_drvdata(dev);
1123
1124 esdhc_proctl = sdhci_readl(host, SDHCI_HOST_CONTROL);
1125
1126 if (host->tuning_mode != SDHCI_TUNING_MODE_3)
1127 mmc_retune_needed(host->mmc);
1128
1129 return sdhci_suspend_host(host);
1130}
1131
1132static int esdhc_of_resume(struct device *dev)
1133{
1134 struct sdhci_host *host = dev_get_drvdata(dev);
1135 int ret = sdhci_resume_host(host);
1136
1137 if (ret == 0) {
1138 /* Isn't this already done by sdhci_resume_host() ? --rmk */
1139 esdhc_of_enable_dma(host);
1140 sdhci_writel(host, esdhc_proctl, SDHCI_HOST_CONTROL);
1141 }
1142 return ret;
1143}
1144#endif
1145
1146static SIMPLE_DEV_PM_OPS(esdhc_of_dev_pm_ops,
1147 esdhc_of_suspend,
1148 esdhc_of_resume);
1149
1150static const struct sdhci_ops sdhci_esdhc_be_ops = {
1151 .read_l = esdhc_be_readl,
1152 .read_w = esdhc_be_readw,
1153 .read_b = esdhc_be_readb,
1154 .write_l = esdhc_be_writel,
1155 .write_w = esdhc_be_writew,
1156 .write_b = esdhc_be_writeb,
1157 .set_clock = esdhc_of_set_clock,
1158 .enable_dma = esdhc_of_enable_dma,
1159 .get_max_clock = esdhc_of_get_max_clock,
1160 .get_min_clock = esdhc_of_get_min_clock,
1161 .adma_workaround = esdhc_of_adma_workaround,
1162 .set_bus_width = esdhc_pltfm_set_bus_width,
1163 .reset = esdhc_reset,
1164 .set_uhs_signaling = esdhc_set_uhs_signaling,
1165 .irq = esdhc_irq,
1166};
1167
1168static const struct sdhci_ops sdhci_esdhc_le_ops = {
1169 .read_l = esdhc_le_readl,
1170 .read_w = esdhc_le_readw,
1171 .read_b = esdhc_le_readb,
1172 .write_l = esdhc_le_writel,
1173 .write_w = esdhc_le_writew,
1174 .write_b = esdhc_le_writeb,
1175 .set_clock = esdhc_of_set_clock,
1176 .enable_dma = esdhc_of_enable_dma,
1177 .get_max_clock = esdhc_of_get_max_clock,
1178 .get_min_clock = esdhc_of_get_min_clock,
1179 .adma_workaround = esdhc_of_adma_workaround,
1180 .set_bus_width = esdhc_pltfm_set_bus_width,
1181 .reset = esdhc_reset,
1182 .set_uhs_signaling = esdhc_set_uhs_signaling,
1183 .irq = esdhc_irq,
1184};
1185
1186static const struct sdhci_pltfm_data sdhci_esdhc_be_pdata = {
1187 .quirks = ESDHC_DEFAULT_QUIRKS |
1188#ifdef CONFIG_PPC
1189 SDHCI_QUIRK_BROKEN_CARD_DETECTION |
1190#endif
1191 SDHCI_QUIRK_NO_CARD_NO_RESET |
1192 SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
1193 .ops = &sdhci_esdhc_be_ops,
1194};
1195
1196static const struct sdhci_pltfm_data sdhci_esdhc_le_pdata = {
1197 .quirks = ESDHC_DEFAULT_QUIRKS |
1198 SDHCI_QUIRK_NO_CARD_NO_RESET |
1199 SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
1200 .ops = &sdhci_esdhc_le_ops,
1201};
1202
1203static struct soc_device_attribute soc_incorrect_hostver[] = {
1204 { .family = "QorIQ T4240", .revision = "1.0", },
1205 { .family = "QorIQ T4240", .revision = "2.0", },
1206 { },
1207};
1208
1209static struct soc_device_attribute soc_fixup_sdhc_clkdivs[] = {
1210 { .family = "QorIQ LX2160A", .revision = "1.0", },
1211 { .family = "QorIQ LX2160A", .revision = "2.0", },
1212 { .family = "QorIQ LS1028A", .revision = "1.0", },
1213 { },
1214};
1215
1216static struct soc_device_attribute soc_unreliable_pulse_detection[] = {
1217 { .family = "QorIQ LX2160A", .revision = "1.0", },
1218 { .family = "QorIQ LX2160A", .revision = "2.0", },
1219 { .family = "QorIQ LS1028A", .revision = "1.0", },
1220 { },
1221};
1222
1223static void esdhc_init(struct platform_device *pdev, struct sdhci_host *host)
1224{
1225 const struct of_device_id *match;
1226 struct sdhci_pltfm_host *pltfm_host;
1227 struct sdhci_esdhc *esdhc;
1228 struct device_node *np;
1229 struct clk *clk;
1230 u32 val;
1231 u16 host_ver;
1232
1233 pltfm_host = sdhci_priv(host);
1234 esdhc = sdhci_pltfm_priv(pltfm_host);
1235
1236 host_ver = sdhci_readw(host, SDHCI_HOST_VERSION);
1237 esdhc->vendor_ver = (host_ver & SDHCI_VENDOR_VER_MASK) >>
1238 SDHCI_VENDOR_VER_SHIFT;
1239 esdhc->spec_ver = host_ver & SDHCI_SPEC_VER_MASK;
1240 if (soc_device_match(soc_incorrect_hostver))
1241 esdhc->quirk_incorrect_hostver = true;
1242 else
1243 esdhc->quirk_incorrect_hostver = false;
1244
1245 if (soc_device_match(soc_fixup_sdhc_clkdivs))
1246 esdhc->quirk_limited_clk_division = true;
1247 else
1248 esdhc->quirk_limited_clk_division = false;
1249
1250 if (soc_device_match(soc_unreliable_pulse_detection))
1251 esdhc->quirk_unreliable_pulse_detection = true;
1252 else
1253 esdhc->quirk_unreliable_pulse_detection = false;
1254
1255 match = of_match_node(sdhci_esdhc_of_match, pdev->dev.of_node);
1256 if (match)
1257 esdhc->clk_fixup = match->data;
1258 np = pdev->dev.of_node;
1259
1260 if (of_device_is_compatible(np, "fsl,p2020-esdhc")) {
1261 esdhc->quirk_delay_before_data_reset = true;
1262 esdhc->quirk_trans_complete_erratum = true;
1263 }
1264
1265 clk = of_clk_get(np, 0);
1266 if (!IS_ERR(clk)) {
1267 /*
1268 * esdhc->peripheral_clock would be assigned with a value
1269 * which is eSDHC base clock when use periperal clock.
1270 * For some platforms, the clock value got by common clk
1271 * API is peripheral clock while the eSDHC base clock is
1272 * 1/2 peripheral clock.
1273 */
1274 if (of_device_is_compatible(np, "fsl,ls1046a-esdhc") ||
1275 of_device_is_compatible(np, "fsl,ls1028a-esdhc"))
1276 esdhc->peripheral_clock = clk_get_rate(clk) / 2;
1277 else
1278 esdhc->peripheral_clock = clk_get_rate(clk);
1279
1280 clk_put(clk);
1281 }
1282
1283 if (esdhc->peripheral_clock) {
1284 esdhc_clock_enable(host, false);
1285 val = sdhci_readl(host, ESDHC_DMA_SYSCTL);
1286 val |= ESDHC_PERIPHERAL_CLK_SEL;
1287 sdhci_writel(host, val, ESDHC_DMA_SYSCTL);
1288 esdhc_clock_enable(host, true);
1289 }
1290}
1291
1292static int esdhc_hs400_prepare_ddr(struct mmc_host *mmc)
1293{
1294 esdhc_tuning_block_enable(mmc_priv(mmc), false);
1295 return 0;
1296}
1297
1298static int sdhci_esdhc_probe(struct platform_device *pdev)
1299{
1300 struct sdhci_host *host;
1301 struct device_node *np;
1302 struct sdhci_pltfm_host *pltfm_host;
1303 struct sdhci_esdhc *esdhc;
1304 int ret;
1305
1306 np = pdev->dev.of_node;
1307
1308 if (of_property_read_bool(np, "little-endian"))
1309 host = sdhci_pltfm_init(pdev, &sdhci_esdhc_le_pdata,
1310 sizeof(struct sdhci_esdhc));
1311 else
1312 host = sdhci_pltfm_init(pdev, &sdhci_esdhc_be_pdata,
1313 sizeof(struct sdhci_esdhc));
1314
1315 if (IS_ERR(host))
1316 return PTR_ERR(host);
1317
1318 host->mmc_host_ops.start_signal_voltage_switch =
1319 esdhc_signal_voltage_switch;
1320 host->mmc_host_ops.execute_tuning = esdhc_execute_tuning;
1321 host->mmc_host_ops.hs400_prepare_ddr = esdhc_hs400_prepare_ddr;
1322 host->tuning_delay = 1;
1323
1324 esdhc_init(pdev, host);
1325
1326 sdhci_get_of_property(pdev);
1327
1328 pltfm_host = sdhci_priv(host);
1329 esdhc = sdhci_pltfm_priv(pltfm_host);
1330 if (soc_device_match(soc_tuning_erratum_type1))
1331 esdhc->quirk_tuning_erratum_type1 = true;
1332 else
1333 esdhc->quirk_tuning_erratum_type1 = false;
1334
1335 if (soc_device_match(soc_tuning_erratum_type2))
1336 esdhc->quirk_tuning_erratum_type2 = true;
1337 else
1338 esdhc->quirk_tuning_erratum_type2 = false;
1339
1340 if (esdhc->vendor_ver == VENDOR_V_22)
1341 host->quirks2 |= SDHCI_QUIRK2_HOST_NO_CMD23;
1342
1343 if (esdhc->vendor_ver > VENDOR_V_22)
1344 host->quirks &= ~SDHCI_QUIRK_NO_BUSY_IRQ;
1345
1346 if (of_find_compatible_node(NULL, NULL, "fsl,p2020-esdhc")) {
1347 host->quirks |= SDHCI_QUIRK_RESET_AFTER_REQUEST;
1348 host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL;
1349 }
1350
1351 if (of_device_is_compatible(np, "fsl,p5040-esdhc") ||
1352 of_device_is_compatible(np, "fsl,p5020-esdhc") ||
1353 of_device_is_compatible(np, "fsl,p4080-esdhc") ||
1354 of_device_is_compatible(np, "fsl,p1020-esdhc") ||
1355 of_device_is_compatible(np, "fsl,t1040-esdhc"))
1356 host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION;
1357
1358 if (of_device_is_compatible(np, "fsl,ls1021a-esdhc"))
1359 host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL;
1360
1361 esdhc->quirk_ignore_data_inhibit = false;
1362 if (of_device_is_compatible(np, "fsl,p2020-esdhc")) {
1363 /*
1364 * Freescale messed up with P2020 as it has a non-standard
1365 * host control register
1366 */
1367 host->quirks2 |= SDHCI_QUIRK2_BROKEN_HOST_CONTROL;
1368 esdhc->quirk_ignore_data_inhibit = true;
1369 }
1370
1371 /* call to generic mmc_of_parse to support additional capabilities */
1372 ret = mmc_of_parse(host->mmc);
1373 if (ret)
1374 goto err;
1375
1376 mmc_of_parse_voltage(np, &host->ocr_mask);
1377
1378 ret = sdhci_add_host(host);
1379 if (ret)
1380 goto err;
1381
1382 return 0;
1383 err:
1384 sdhci_pltfm_free(pdev);
1385 return ret;
1386}
1387
1388static struct platform_driver sdhci_esdhc_driver = {
1389 .driver = {
1390 .name = "sdhci-esdhc",
1391 .of_match_table = sdhci_esdhc_of_match,
1392 .pm = &esdhc_of_dev_pm_ops,
1393 },
1394 .probe = sdhci_esdhc_probe,
1395 .remove = sdhci_pltfm_unregister,
1396};
1397
1398module_platform_driver(sdhci_esdhc_driver);
1399
1400MODULE_DESCRIPTION("SDHCI OF driver for Freescale MPC eSDHC");
1401MODULE_AUTHOR("Xiaobo Xie <X.Xie@freescale.com>, "
1402 "Anton Vorontsov <avorontsov@ru.mvista.com>");
1403MODULE_LICENSE("GPL v2");