blob: a24949d1b4beb24c5b523466bcf30fa54601549a [file] [log] [blame]
b.liue9582032025-04-17 19:18:16 +08001/*
2 * Copyright (C) 2010 Marvell International Ltd.
3 * Zhangfei Gao <zhangfei.gao@marvell.com>
4 * Kevin Wang <dwang4@marvell.com>
5 * Mingwei Wang <mwwang@marvell.com>
6 * Philip Rakity <prakity@marvell.com>
7 * Mark Brown <markb@marvell.com>
8 *
9 * This software is licensed under the terms of the GNU General Public
10 * License version 2, as published by the Free Software Foundation, and
11 * may be copied, distributed, and modified under those terms.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 */
19#include <linux/err.h>
20#include <linux/init.h>
21#include <linux/platform_device.h>
22#include <linux/clk.h>
23#include <linux/clk/mmp.h>
24#include <linux/crc32.h>
25#include <linux/io.h>
26#include <linux/gpio.h>
27#include <linux/mmc/mmc.h>
28#include <linux/mmc/card.h>
29#include <linux/mmc/host.h>
30#include <linux/mmc/slot-gpio.h>
31#include <linux/mmc/asr_dvfs.h>
32#include <linux/slab.h>
33#include <linux/delay.h>
34#include <linux/module.h>
35#include <linux/of.h>
36#include <linux/of_device.h>
37#include <linux/of_gpio.h>
38#include <linux/pm.h>
39#include <linux/pm_runtime.h>
40#include <linux/pm_qos.h>
41#include <linux/bitmap.h>
42#include <linux/kernel.h>
43#include <linux/pinctrl/consumer.h>
44#include <linux/dma-mapping.h>
45#include <linux/platform_data/asr_sdhci.h>
46
47#include "sdhci.h"
48#include "sdhci-pltfm.h"
49
50#define ASR_RPM_DELAY_MS 50
51
52#define SDHC_OP_CTRL 0x104
53#define SDHC_OP_EXT_REG 0x108
54#define INT_CLK_GATE_MASK (0x3<<8)
55#define OVRRD_CLK_OEN 0x0800
56#define FORCE_CLK_ON 0x1000
57
58#define SDHC_LEGACY_CTRL_REG 0x10C
59#define GEN_PAD_CLK_ON (0x1 << 6)
60
61#define SDHC_LEGACY_CEATA_REG 0x110
62#define SDHC_MMC_CTRL_REG 0x114
63#define MISC_INT_EN 0x0002
64#define MISC_INT 0x0004
65#define ENHANCE_STROBE_EN 0x0100
66#define MMC_HS400 0x0200
67#define MMC_HS200 0x0400
68#define MMC_CARD_MODE 0x1000
69
70#define SDHC_RX_CFG_REG 0x118
71#define RX_SDCLK_SEL0_MASK 0x3
72#define RX_SDCLK_SEL0_SHIFT 0
73#define RX_SDCLK_SEL1_MASK 0x3
74#define RX_SDCLK_SEL1_SHIFT 2
75#define RX_SDCLK_SEL1_PAD 0x0
76#define RX_SDCLK_SEL1_DDLL 0x01
77#define RX_SDCLK_SEL1_INTERNAL 0x02
78
79#define SDHC_TX_CFG_REG 0x11C
80#define TX_DLINE_SRC_SEL (0x1 << 29)
81#define TX_INT_CLK_SEL (0x1 << 30)
82#define TX_MUX_SEL (0x1 << 31)
83
84#define SDHC_HWTUNE_CFG_REG 0x120
85#define SDHC_HWTUNE_CFG2_REG 0x124
86#define SDHC_ROUNDTRIP_TIMING_REG 0x128
87#define WRDATA_WAIT_CYCLES_MASK 0xF
88#define WRDATA_WAIT_CYCLES_SHIFT 16
89
90#define SDHC_GPIO_CFG_REG 0x12C
91
92#define SDHC_DLINE_CTRL_REG 0x130
93#define DLINE_PU 0x01
94#define RX_DLINE_CODE_MASK 0xFF
95#define RX_DLINE_CODE_SHIFT 0x10
96#define TX_DLINE_CODE_MASK 0xFF
97#define TX_DLINE_CODE_SHIFT 0x18
98
99#define SDHC_DLINE_CFG_REG 0x134
100#define RX_DLINE_REG_MASK 0xFF
101#define RX_DLINE_REG_SHIFT 0x00
102#define RX_DLINE_RSTB_MASK 0x1
103#define RX_DLINE_RSTB_SHIFT 7
104#define RX_DLINE_GAIN_MASK 0x1
105#define RX_DLINE_GAIN_SHIFT 0x8
106#define RX_DLINE_GAIN 0x1
107#define TX_DLINE_REG_MASK 0xFF
108#define TX_DLINE_REG_SHIFT 0x10
109#define TX_DLINE_RSTB_MASK 0x1
110#define TX_DLINE_RSTB_SHIFT 23
111
112#define SDHC_RX_TUNE_DELAY_MIN 0x0
113#define SDHC_RX_TUNE_DELAY_MAX 0xFF
114#define SDHC_RX_TUNE_DELAY_STEP 0x1
115
116#define AIB_MMC1_IO_REG 0xD401E81C
117#define APBC_ASFAR 0xD4015050
118#define AKEY_ASFAR 0xbaba
119#define AKEY_ASSAR 0xeb10
120#define MMC1_PAD_1V8 (0x1 << 2)
121
122struct sdhci_asr {
123 struct clk *clk_core;
124 struct clk *clk_io;
125 u8 clk_enable;
126 u8 power_mode;
127 unsigned int tx_dly_val;
128 unsigned int rx_dly_val;
129};
130
131static const u32 tuning_patten4[16] = {
132 0x00ff0fff, 0xccc3ccff, 0xffcc3cc3, 0xeffefffe,
133 0xddffdfff, 0xfbfffbff, 0xff7fffbf, 0xefbdf777,
134 0xf0fff0ff, 0x3cccfc0f, 0xcfcc33cc, 0xeeffefff,
135 0xfdfffdff, 0xffbfffdf, 0xfff7ffbb, 0xde7b7ff7,
136};
137
138static const u32 tuning_patten8[32] = {
139 0xff00ffff, 0x0000ffff, 0xccccffff, 0xcccc33cc,
140 0xcc3333cc, 0xffffcccc, 0xffffeeff, 0xffeeeeff,
141 0xffddffff, 0xddddffff, 0xbbffffff, 0xbbffffff,
142 0xffffffbb, 0xffffff77, 0x77ff7777, 0xffeeddbb,
143 0x00ffffff, 0x00ffffff, 0xccffff00, 0xcc33cccc,
144 0x3333cccc, 0xffcccccc, 0xffeeffff, 0xeeeeffff,
145 0xddffffff, 0xddffffff, 0xffffffdd, 0xffffffbb,
146 0xffffbbbb, 0xffff77ff, 0xff7777ff, 0xeeddbb77,
147};
148
149static void asr_sw_rx_tuning_prepare(struct sdhci_host *host, u8 dline_reg)
150{
151 u32 reg;
152
153 reg = sdhci_readl(host, SDHC_DLINE_CFG_REG);
154 reg &= ~(RX_DLINE_REG_MASK << RX_DLINE_REG_SHIFT);
155 reg |= dline_reg << RX_DLINE_REG_SHIFT;
156 /* release RX reset signal */
157 reg |= 0x1 << RX_DLINE_RSTB_SHIFT;
158 sdhci_writel(host, reg, SDHC_DLINE_CFG_REG);
159
160 reg = sdhci_readl(host, SDHC_DLINE_CTRL_REG);
161 reg |= DLINE_PU;
162 sdhci_writel(host, reg, SDHC_DLINE_CTRL_REG);
163
164 reg = sdhci_readl(host, SDHC_RX_CFG_REG);
165 reg &= ~(RX_SDCLK_SEL1_MASK << RX_SDCLK_SEL1_SHIFT);
166 reg |= RX_SDCLK_SEL1_DDLL << RX_SDCLK_SEL1_SHIFT;
167 sdhci_writel(host, reg, SDHC_RX_CFG_REG);
168}
169
170static void asr_sw_rx_set_delaycode(struct sdhci_host *host, u32 delay)
171{
172 u32 reg;
173
174 reg = sdhci_readl(host, SDHC_DLINE_CTRL_REG);
175 reg &= ~(RX_DLINE_CODE_MASK << RX_DLINE_CODE_SHIFT);
176 reg |= (delay & RX_DLINE_CODE_MASK) << RX_DLINE_CODE_SHIFT;
177 sdhci_writel(host, reg, SDHC_DLINE_CTRL_REG);
178}
179
180static void asr_sw_tx_no_tuning(struct sdhci_host *host)
181{
182 u32 reg;
183
184 /* set TX_MUX_SEL */
185 reg = sdhci_readl(host, SDHC_TX_CFG_REG);
186 reg &= ~TX_MUX_SEL;
187 sdhci_writel(host, reg, SDHC_TX_CFG_REG);
188}
189
190static void asr_sw_tx_tuning_prepare(struct sdhci_host *host)
191{
192 u32 reg;
193
194 /* set TX_MUX_SEL */
195 reg = sdhci_readl(host, SDHC_TX_CFG_REG);
196 reg |= TX_MUX_SEL;
197 sdhci_writel(host, reg, SDHC_TX_CFG_REG);
198
199 reg = sdhci_readl(host, SDHC_DLINE_CTRL_REG);
200 reg |= DLINE_PU;
201 sdhci_writel(host, reg, SDHC_DLINE_CTRL_REG);
202}
203
204static void asr_sw_tx_set_dlinereg(struct sdhci_host *host, u8 dline_reg)
205{
206 u32 reg;
207
208 reg = sdhci_readl(host, SDHC_DLINE_CFG_REG);
209 reg &= ~(TX_DLINE_REG_MASK << TX_DLINE_REG_SHIFT);
210 reg |= dline_reg << TX_DLINE_REG_SHIFT;
211 /* release TX reset signal */
212 reg |= 0x1 << TX_DLINE_RSTB_SHIFT;
213 sdhci_writel(host, reg, SDHC_DLINE_CFG_REG);
214}
215
216static void asr_sw_tx_set_delaycode(struct sdhci_host *host, u32 delay)
217{
218 u32 reg;
219
220 reg = sdhci_readl(host, SDHC_DLINE_CTRL_REG);
221 reg &= ~(TX_DLINE_CODE_MASK << TX_DLINE_CODE_SHIFT);
222 reg |= (delay & TX_DLINE_CODE_MASK) << TX_DLINE_CODE_SHIFT;
223 sdhci_writel(host, reg, SDHC_DLINE_CTRL_REG);
224}
225
226static void asr_sdhci_clear_set_irqs(struct sdhci_host *host, u32 clr, u32 set)
227{
228 u32 ier;
229
230 ier = sdhci_readl(host, SDHCI_INT_ENABLE);
231 ier &= ~clr;
232 ier |= set;
233 sdhci_writel(host, ier, SDHCI_INT_ENABLE);
234 sdhci_writel(host, ier, SDHCI_SIGNAL_ENABLE);
235}
236
237static void asr_select_rx_pad_clk(struct sdhci_host *host)
238{
239 u32 tmp_reg = 0;
240
241 tmp_reg = sdhci_readl(host, SDHC_RX_CFG_REG);
242 tmp_reg &= ~(RX_SDCLK_SEL0_MASK<< RX_SDCLK_SEL0_SHIFT);
243 tmp_reg &= ~(RX_SDCLK_SEL1_MASK<< RX_SDCLK_SEL1_SHIFT);
244 tmp_reg |= RX_SDCLK_SEL1_PAD << RX_SDCLK_SEL1_SHIFT;
245 sdhci_writel(host, tmp_reg, SDHC_RX_CFG_REG);
246
247 /*
248 * Data CRC status response delay need to be 3 cycle for some wifi,
249 * like Hi2825, violating sdio SPEC. Make a fix from host.
250 *
251 * Should not use blow code for other sdio wifi.
252 */
253 if (host->quirks2 & SDHCI_QUIRK2_LONG_DATA_CRC_STATUS) {
254 tmp_reg = sdhci_readl(host, SDHC_ROUNDTRIP_TIMING_REG);
255 tmp_reg &= ~(WRDATA_WAIT_CYCLES_MASK << WRDATA_WAIT_CYCLES_SHIFT);
256 tmp_reg |= 0x4 << WRDATA_WAIT_CYCLES_SHIFT;
257 sdhci_writel(host, tmp_reg, SDHC_ROUNDTRIP_TIMING_REG);
258 }
259}
260
261static int asr_set_rx_timing_cfg(struct sdhci_host *host,
262 struct asr_sdhci_platdata *pdata,
263 unsigned int clock)
264{
265 unsigned char timing = host->mmc->ios.timing;
266 struct asr_sdhci_dtr_data *dtr_data;
267
268 if (!pdata || !pdata->dtr_data)
269 return 0;
270
271 if (timing > MMC_TIMING_MMC_HS400) {
272 pr_err("%s: invalid timing %d\n",
273 mmc_hostname(host->mmc), timing);
274 return 0;
275 }
276
277 dtr_data = &pdata->dtr_data[timing];
278 if (timing != dtr_data->timing)
279 return 0;
280
281 if (clock <= 26000000 || !dtr_data->rx_delay) {
282 asr_select_rx_pad_clk(host);
283 return 0;
284 }
285
286 asr_sw_rx_tuning_prepare(host, dtr_data->rx_dline_reg);
287 asr_sw_rx_set_delaycode(host, dtr_data->rx_delay);
288 return 1;
289}
290
291static void asr_set_tx_timing_cfg(struct sdhci_host *host,
292 struct asr_sdhci_platdata *pdata)
293{
294 unsigned char timing = host->mmc->ios.timing;
295 struct asr_sdhci_dtr_data *dtr_data;
296 u32 tmp_reg = 0;
297
298 if (!pdata || !pdata->dtr_data)
299 return;
300
301 if (timing > MMC_TIMING_MMC_HS400) {
302 pr_err("%s: invalid timing %d\n", mmc_hostname(host->mmc),
303 timing);
304 return;
305 }
306
307 dtr_data = &pdata->dtr_data[timing];
308 if (timing != dtr_data->timing)
309 return;
310
311 /* set Tx delay */
312 if (dtr_data->tx_delay) {
313 asr_sw_tx_set_dlinereg(host, dtr_data->tx_dline_reg);
314 asr_sw_tx_set_delaycode(host, dtr_data->tx_delay);
315 asr_sw_tx_tuning_prepare(host);
316 } else {
317 asr_sw_tx_no_tuning(host);
318
319 /*
320 * For default or high speed mode, enable TX_INT_CLK_SEL
321 * to select clock from inverter of internal work clock.
322 * This setting will guarantee the hold time
323 */
324 tmp_reg = sdhci_readl(host, SDHC_TX_CFG_REG);
325 if (timing <= MMC_TIMING_UHS_SDR50)
326 tmp_reg |= TX_INT_CLK_SEL;
327 else
328 tmp_reg &= ~TX_INT_CLK_SEL;
329 if(host->quirks2 & SDHCI_QUIRK2_TX_INT_CLOCK)
330 tmp_reg |= TX_INT_CLK_SEL;
331 sdhci_writel(host, tmp_reg, SDHC_TX_CFG_REG);
332 }
333}
334
335#define SLOW_CLOCK 52000000
336#define FAST_CLOCK 100000000
337static void asr_set_clock(struct sdhci_host *host, unsigned int clock)
338{
339 struct platform_device *pdev = to_platform_device(mmc_dev(host->mmc));
340 struct asr_sdhci_platdata *pdata = pdev->dev.platform_data;
341
342 if (clock == 0)
343 return;
344
345 asr_set_tx_timing_cfg(host, pdata);
346 asr_set_rx_timing_cfg(host, pdata, clock);
347
348 /*
349 * Configure pin state like drive strength according to bus clock.
350 * 1. Use slow setting when new bus clock < FAST_CLOCK while
351 * current >= FAST_CLOCK.
352 * 2. Use fast setting when new bus clock >= FAST_CLOCK while
353 * current < FAST_CLOCK.
354 */
355 if (clock <= SLOW_CLOCK) {
356 if ((host->clock > SLOW_CLOCK) && (!IS_ERR(pdata->pin_slow)))
357 pinctrl_select_state(pdata->pinctrl, pdata->pin_slow);
358 } else if (clock < FAST_CLOCK) {
359 if ((host->clock >= FAST_CLOCK) && (!IS_ERR(pdata->pin_default)))
360 pinctrl_select_state(pdata->pinctrl, pdata->pin_default);
361 } else {
362 if ((host->clock < FAST_CLOCK) && (!IS_ERR(pdata->pin_fast)))
363 pinctrl_select_state(pdata->pinctrl, pdata->pin_fast);
364 }
365
366 sdhci_set_clock(host, clock);
367}
368
369static unsigned long asr_clk_prepare(struct sdhci_host *host,
370 unsigned long rate)
371{
372 struct platform_device *pdev = to_platform_device(mmc_dev(host->mmc));
373 struct asr_sdhci_platdata *pdata = pdev->dev.platform_data;
374 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
375 unsigned char timing = host->mmc->ios.timing;
376 struct asr_sdhci_dtr_data *dtr_data;
377 unsigned long preset_rate = 0, src_rate = 0;
378
379 if (!pdata || !pdata->dtr_data || !rate)
380 return rate;
381
382 if (timing > MMC_TIMING_MMC_HS400) {
383 pr_err("%s: invalid timing %d\n",
384 mmc_hostname(host->mmc), timing);
385 return rate;
386 }
387
388 dtr_data = &pdata->dtr_data[timing];
389 if (timing != dtr_data->timing)
390 return rate;
391
392 if ((MMC_TIMING_LEGACY == timing) && (rate < 25000000))
393 preset_rate = rate;
394 else
395 {
396 if(host->quirks2 & SDHCI_QUIRK2_CHANGE_SDIO_CLOCK_FREQ_DYNAMIC)
397 preset_rate = rate;
398 else
399 preset_rate = dtr_data->preset_rate;
400 }
401
402 src_rate = dtr_data->src_rate;
403 clk_set_rate(pltfm_host->clk, src_rate);
404 return preset_rate;
405}
406
407static void asr_set_delaycode(struct sdhci_host *host, int tx, u32 delay)
408{
409 if (tx)
410 asr_sw_tx_set_delaycode(host, delay);
411 else
412 asr_sw_rx_set_delaycode(host, delay);
413}
414
415static void asr_enable_delay_line(struct sdhci_host *host, int tx, int enable)
416{
417 struct platform_device *pdev = to_platform_device(mmc_dev(host->mmc));
418 struct asr_sdhci_platdata *pdata = pdev->dev.platform_data;
419 unsigned char timing = host->mmc->ios.timing;
420 struct asr_sdhci_dtr_data *dtr_data;
421
422 if (!pdata || !pdata->dtr_data)
423 return;
424
425 if (timing > MMC_TIMING_MMC_HS400) {
426 pr_err("%s: invalid timing %d\n", mmc_hostname(host->mmc),
427 timing);
428 return;
429 }
430
431 dtr_data = &pdata->dtr_data[timing];
432 if (timing != dtr_data->timing)
433 return;
434
435 if (tx) {
436 if (enable) {
437 asr_sw_tx_set_dlinereg(host, dtr_data->tx_dline_reg);
438 asr_sw_tx_set_delaycode(host, dtr_data->tx_delay);
439 asr_sw_tx_tuning_prepare(host);
440 } else {
441 asr_sw_tx_no_tuning(host);
442 }
443 } else {
444 if (enable) {
445 asr_sw_rx_tuning_prepare(host, dtr_data->rx_dline_reg);
446 asr_sw_rx_set_delaycode(host, dtr_data->rx_delay);
447 } else {
448 asr_select_rx_pad_clk(host);
449 }
450 }
451}
452
453static void asr_clk_gate_auto(struct sdhci_host *host, unsigned int ctrl)
454{
455 unsigned int reg;
456
457 reg = sdhci_readl(host, SDHC_OP_EXT_REG);
458 if (ctrl)
459 reg &= ~(OVRRD_CLK_OEN | FORCE_CLK_ON);
460 else
461 reg |= (OVRRD_CLK_OEN | FORCE_CLK_ON);
462 sdhci_writel(host, reg, SDHC_OP_EXT_REG);
463}
464
465static void asr_sdhci_reset(struct sdhci_host *host, u8 mask)
466{
467 struct platform_device *pdev = to_platform_device(mmc_dev(host->mmc));
468 struct asr_sdhci_platdata *pdata = pdev->dev.platform_data;
469
470 sdhci_reset(host, mask);
471
472 if (mask != SDHCI_RESET_ALL) {
473 /* Return if not Reset All */
474 return;
475 }
476
477 /*
478 * tune timing of read data/command when crc error happen
479 * no performance impact
480 */
481 asr_set_tx_timing_cfg(host, pdata);
482 asr_set_rx_timing_cfg(host, pdata, host->clock);
483}
484
485#define MAX_WAIT_COUNT 74
486static void asr_gen_init_74_clocks(struct sdhci_host *host, u8 power_mode)
487{
488 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
489 struct sdhci_asr *asr = sdhci_pltfm_priv(pltfm_host);
490 u32 tmp;
491 int count = 0;
492
493 if (asr->power_mode == MMC_POWER_UP && power_mode == MMC_POWER_ON) {
494 dev_dbg(mmc_dev(host->mmc),
495 "%s: slot->power_mode = %d,"
496 "ios->power_mode = %d\n",
497 __func__,
498 asr->power_mode,
499 power_mode);
500
501 /* clear the interrupt bit if posted and
502 * set we want notice of when 74 clocks are sent
503 */
504 tmp = sdhci_readl(host, SDHC_MMC_CTRL_REG);
505 tmp |= MISC_INT_EN;
506 sdhci_writel(host, tmp, SDHC_MMC_CTRL_REG);
507
508 /* start sending the 74 clocks */
509 tmp = sdhci_readl(host, SDHC_LEGACY_CTRL_REG);
510 tmp |= GEN_PAD_CLK_ON;
511 sdhci_writel(host, tmp, SDHC_LEGACY_CTRL_REG);
512
513 /* slowest speed is about 100KHz or 10usec per clock */
514 while (count++ < MAX_WAIT_COUNT) {
515 if (readw(host->ioaddr + SDHC_MMC_CTRL_REG)
516 & MISC_INT) {
517 break;
518 }
519 udelay(20);
520 }
521
522 if (count >= MAX_WAIT_COUNT)
523 dev_warn(mmc_dev(host->mmc),
524 "74 clock interrupt not cleared\n");
525
526 tmp = sdhci_readl(host, SDHC_MMC_CTRL_REG);
527 tmp |= MISC_INT;
528 sdhci_writel(host, tmp, SDHC_MMC_CTRL_REG);
529 }
530
531 asr->power_mode = power_mode;
532}
533
534static void asr_set_uhs_signaling(struct sdhci_host *host, unsigned timing)
535{
536 u16 reg;
537
538 if ((timing == MMC_TIMING_MMC_HS200) ||
539 (timing == MMC_TIMING_MMC_HS400)) {
540 reg = sdhci_readw(host, SDHC_MMC_CTRL_REG);
541 reg |= (timing == MMC_TIMING_MMC_HS200) ? MMC_HS200 : MMC_HS400;
542 sdhci_writew(host, reg, SDHC_MMC_CTRL_REG);
543 }
544 sdhci_set_uhs_signaling(host, timing);
545}
546
547static void asr_set_power(struct sdhci_host *host, unsigned char mode,
548 unsigned short vdd)
549{
550 struct mmc_host *mmc = host->mmc;
551 u8 pwr = host->pwr;
552
553 sdhci_set_power_noreg(host, mode, vdd);
554
555 if (host->pwr == pwr)
556 return;
557
558 if (host->pwr == 0)
559 vdd = 0;
560
561 if (!IS_ERR(mmc->supply.vmmc))
562 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
563
564 if (mode == MMC_POWER_OFF)
565 mmc_regulator_disable_vqmmc(mmc);
566 else
567 mmc_regulator_enable_vqmmc(mmc);
568}
569
570static void set_mmc1_aib(struct sdhci_host *host, int vol)
571{
572 u32 tmp;
573 void __iomem *aib_mmc1_io;
574 void __iomem *apbc_asfar;
575
576 aib_mmc1_io = ioremap(AIB_MMC1_IO_REG, 4);
577 apbc_asfar = ioremap(APBC_ASFAR, 8);
578
579 writel(AKEY_ASFAR, apbc_asfar);
580 writel(AKEY_ASSAR, apbc_asfar + 4);
581 tmp = readl(aib_mmc1_io);
582
583 if (vol >= 2800000)
584 tmp &= ~MMC1_PAD_1V8;
585 else
586 tmp |= MMC1_PAD_1V8;
587
588 writel(AKEY_ASFAR, apbc_asfar);
589 writel(AKEY_ASSAR, apbc_asfar + 4);
590 writel(tmp, aib_mmc1_io);
591
592 iounmap(apbc_asfar);
593 iounmap(aib_mmc1_io);
594}
595
596static void asr_sdhci_disable_irq_wakeups(struct sdhci_host *host)
597{
598 u8 val;
599 u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE
600 | SDHCI_WAKE_ON_INT;
601
602 val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
603 val &= ~mask;
604 sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL);
605 if (host->ops->clr_wakeup_event)
606 host->ops->clr_wakeup_event(host);
607}
608
609static void asr_handle_none_irq(struct sdhci_host *host)
610{
611 struct platform_device *pdev = to_platform_device(mmc_dev(host->mmc));
612 struct asr_sdhci_platdata *pdata = pdev->dev.platform_data;
613 int ret;
614
615 if (pdata->check_sdh_wakeup_event) {
616 ret = pdata->check_sdh_wakeup_event();
617 if (ret)
618 asr_sdhci_disable_irq_wakeups(host);
619 }
620}
621
622static void asr_reset_wakeup_event(struct sdhci_host *host)
623{
624 struct platform_device *pdev = to_platform_device(mmc_dev(host->mmc));
625 struct asr_sdhci_platdata *pdata = pdev->dev.platform_data;
626
627 if (!pdata)
628 return;
629
630 if (pdata->reset_wakeup_event)
631 pdata->reset_wakeup_event();
632}
633
634static void asr_clr_wakeup_event(struct sdhci_host *host)
635{
636 struct platform_device *pdev = to_platform_device(mmc_dev(host->mmc));
637 struct asr_sdhci_platdata *pdata = pdev->dev.platform_data;
638
639 if (!pdata)
640 return;
641
642 if (pdata->clear_wakeup_event)
643 pdata->clear_wakeup_event();
644}
645
646static void asr_signal_vol_change(struct sdhci_host *host)
647{
648 struct mmc_host *mmc = host->mmc;
649 struct mmc_ios ios = mmc->ios;
650 struct platform_device *pdev = to_platform_device(mmc_dev(host->mmc));
651 struct asr_sdhci_platdata *pdata = pdev->dev.platform_data;
652 unsigned int set = 0;
653 u8 vol = ios.signal_voltage;
654
655 if (!pdata || !(pdata->quirks2 & SDHCI_QUIRK2_SET_AIB_MMC))
656 return;
657
658 switch (vol) {
659 case MMC_SIGNAL_VOLTAGE_330:
660 set = 3300000;
661 break;
662 case MMC_SIGNAL_VOLTAGE_180:
663 set = 1800000;
664 break;
665 case MMC_SIGNAL_VOLTAGE_120:
666 set = 1200000;
667 break;
668 default:
669 set = 3300000;
670 break;
671 }
672
673 set_mmc1_aib(host, set);
674}
675
676static void asr_access_constrain(struct sdhci_host *host, unsigned int ac)
677{
678 struct platform_device *pdev = to_platform_device(mmc_dev(host->mmc));
679 struct asr_sdhci_platdata *pdata = pdev->dev.platform_data;
680
681 if (!pdata)
682 return;
683
684 if (ac)
685 pm_qos_update_request(&pdata->qos_idle, pdata->lpm_qos);
686 else
687 pm_qos_update_request(&pdata->qos_idle,
688 PM_QOS_CPUIDLE_BLOCK_DEFAULT_VALUE);
689}
690
691static void asr_prepare_tuning(struct sdhci_host *host, u32 val, bool done)
692{
693 struct platform_device *pdev = to_platform_device(mmc_dev(host->mmc));
694 struct asr_sdhci_platdata *pdata = pdev->dev.platform_data;
695 unsigned char timing = host->mmc->ios.timing;
696 struct asr_sdhci_dtr_data *dtr_data;
697
698 if (pdata && pdata->dtr_data) {
699 if (timing <= MMC_TIMING_MMC_HS400) {
700 dtr_data = &pdata->dtr_data[timing];
701 if (timing == dtr_data->timing && done)
702 dtr_data->rx_delay = val;
703 }
704 }
705
706 asr_sw_rx_set_delaycode(host, val);
707 dev_dbg(mmc_dev(host->mmc), "tunning with delay 0x%x \n", val);
708}
709
710/*
711 * return 0: sucess, >=1: the num of pattern check errors
712 */
713static int asr_tuning_pio_check(struct sdhci_host *host, int point)
714{
715 u32 rd_patten;
716 unsigned int i;
717 u32 *tuning_patten;
718 int patten_len;
719 int err = 0;
720
721 if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_8) {
722 tuning_patten = (u32 *)tuning_patten8;
723 patten_len = ARRAY_SIZE(tuning_patten8);
724 } else {
725 tuning_patten = (u32 *)tuning_patten4;
726 patten_len = ARRAY_SIZE(tuning_patten4);
727 }
728
729 /* read all the data from FIFO, avoid error if IC design is not good */
730 for (i = 0; i < patten_len; i++) {
731 rd_patten = sdhci_readl(host, SDHCI_BUFFER);
732 if (rd_patten != tuning_patten[i])
733 err++;
734 }
735 dev_dbg(mmc_dev(host->mmc), "point: %d, error: %d\n", point, err);
736 return err;
737}
738
739static int asr_send_tuning_cmd(struct sdhci_host *host, u32 opcode,
740 int point, unsigned long flags)
741{
742 struct mmc_command cmd = { 0 };
743 struct mmc_request mrq = { NULL };
744 int err = 0;
745
746 cmd.opcode = opcode;
747 cmd.arg = 0;
748 cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
749 cmd.mrq = &mrq;
750 cmd.retries = 0;
751 cmd.data = NULL;
752 cmd.error = 0;
753
754 mrq.cmd = &cmd;
755
756 if (cmd.opcode == MMC_SEND_TUNING_BLOCK_HS200 &&
757 host->mmc->ios.bus_width == MMC_BUS_WIDTH_8)
758 sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 128), SDHCI_BLOCK_SIZE);
759 else
760 sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 64), SDHCI_BLOCK_SIZE);
761
762 /*
763 * The tuning block is sent by the card to the host controller.
764 * So we set the TRNS_READ bit in the Transfer Mode register.
765 * This also takes care of setting DMA Enable and Multi Block
766 * Select in the same register to 0.
767 */
768 sdhci_writew(host, SDHCI_TRNS_READ, SDHCI_TRANSFER_MODE);
769
770 if (!sdhci_send_command_retry(host, &cmd, flags)) {
771 spin_unlock_irqrestore(&host->lock, flags);
772 host->tuning_done = 0;
773 return -EIO;
774 }
775
776 spin_unlock_irqrestore(&host->lock, flags);
777 /* Wait for Buffer Read Ready interrupt */
778 wait_event_timeout(host->buf_ready_int,
779 (host->tuning_done > 0), msecs_to_jiffies(50));
780 spin_lock_irqsave(&host->lock, flags);
781
782 host->cmd = NULL;
783
784 sdhci_del_timer(host, &mrq);
785
786 if (host->tuning_done == 1) {
787 err = asr_tuning_pio_check(host, point);
788 } else {
789 pr_debug("%s: Timeout or error waiting for Buffer Read Ready interrupt"
790 " during tuning procedure, resetting CMD and DATA\n",
791 mmc_hostname(host->mmc));
792 sdhci_reset(host, SDHCI_RESET_CMD|SDHCI_RESET_DATA);
793 err = -EIO;
794 }
795
796 host->tuning_done = 0;
797 return err;
798}
799
800static int asr_execute_tuning(struct sdhci_host *host, u32 opcode)
801{
802 struct platform_device *pdev = to_platform_device(mmc_dev(host->mmc));
803 struct asr_sdhci_platdata *pdata = pdev->dev.platform_data;
804 unsigned char timing = host->mmc->ios.timing;
805 struct asr_sdhci_dtr_data *dtr_data;
806 int min, max, ret;
807 int len = 0, avg = 0;
808 unsigned long flags = 0;
809 u32 ier, ier_new;
810 int dvfs_level;
811
812 dvfs_level = asr_sdh_get_highest_dvfs_level(host);
813 pdata->dvfs_level_sel = dvfs_level;
814 asr_sdh_request_dvfs_level(host, dvfs_level);
815
816 if (pdata && pdata->dtr_data) {
817 if (timing <= MMC_TIMING_MMC_HS400) {
818 dtr_data = &pdata->dtr_data[timing];
819 asr_sw_rx_tuning_prepare(host, dtr_data->rx_dline_reg);
820 }
821 }
822
823 /* change to pio mode during the tuning stage */
824 spin_lock_irqsave(&host->lock, flags);
825 ier = sdhci_readl(host, SDHCI_INT_ENABLE);
826
827 ier_new = SDHCI_INT_DATA_AVAIL;
828 ier_new |= SDHCI_INT_CRC | SDHCI_INT_END_BIT | SDHCI_INT_INDEX;
829 ier_new |= SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_END_BIT;
830 asr_sdhci_clear_set_irqs(host, ier, ier_new);
831
832 /* find the mininum delay first which can pass tuning */
833 min = SDHC_RX_TUNE_DELAY_MIN;
834 do {
835 while (min < SDHC_RX_TUNE_DELAY_MAX) {
836 asr_prepare_tuning(host, min, false);
837 if (!asr_send_tuning_cmd(host, opcode, min, flags))
838 break;
839 min += SDHC_RX_TUNE_DELAY_STEP;
840 }
841
842 /* find the maxinum delay which can not pass tuning */
843 max = min + SDHC_RX_TUNE_DELAY_STEP;
844 while (max < SDHC_RX_TUNE_DELAY_MAX) {
845 asr_prepare_tuning(host, max, false);
846 if (asr_send_tuning_cmd(host, opcode, max, flags))
847 break;
848 max += SDHC_RX_TUNE_DELAY_STEP;
849 }
850
851 if ((max - min) > len) {
852 len = max - min;
853 avg = (min + max - 1) / 2;
854 }
855 if ((max - min) > 20)
856 printk(KERN_DEBUG "%s: tuning pass window [%d : %d], len = %d\n",
857 mmc_hostname(host->mmc), min,
858 max - 1, max - min);
859 min = max + SDHC_RX_TUNE_DELAY_STEP;
860 } while (min < SDHC_RX_TUNE_DELAY_MAX);
861
862 asr_prepare_tuning(host, avg, true);
863 ret = asr_send_tuning_cmd(host, opcode, avg, flags);
864
865 asr_sdhci_clear_set_irqs(host, ier_new, ier);
866 spin_unlock_irqrestore(&host->lock, flags);
867
868 if (ret)
869 pr_err("%s: tunning failed at %d, pass window length is %d\n",
870 mmc_hostname(host->mmc), avg, len);
871 else
872 printk(KERN_DEBUG "%s: tunning passed at %d, pass window length is %d\n",
873 mmc_hostname(host->mmc), avg, len);
874 return ret;
875}
876
877void sdhci_postpone_clock_gate(struct mmc_host *mmc)
878{
879 struct sdhci_host *host = mmc_priv(mmc);
880 unsigned int reg;
881 reg = sdhci_readl(host, SDHC_OP_EXT_REG);
882 reg |= (0xf<<16);
883 sdhci_writel(host, reg, SDHC_OP_EXT_REG);
884 pr_err("%s sdhci_postpone_clock_gate: read SDHC_OP_EXT_REG(0x%x) is 0x%x\n", mmc_hostname(host->mmc), SDHC_OP_EXT_REG, sdhci_readl(host, SDHC_OP_EXT_REG));
885}
886EXPORT_SYMBOL_GPL(sdhci_postpone_clock_gate);
887
888/*
889 * remove the caps that supported by the controller but not available
890 * for certain platforms.
891 */
892static void asr_host_caps_disable(struct sdhci_host *host)
893{
894 struct platform_device *pdev = to_platform_device(mmc_dev(host->mmc));
895 struct asr_sdhci_platdata *pdata = pdev->dev.platform_data;
896
897 if (pdata->host_caps_disable)
898 host->mmc->caps &= ~(pdata->host_caps_disable);
899 if (pdata->host_caps2_disable)
900 host->mmc->caps2 &= ~(pdata->host_caps2_disable);
901}
902
903static void sdhci_asr_hw_reset(struct sdhci_host *host)
904{
905 u32 *delays_rst = host->delays_rst;
906 int rst_gpio = host->rst_gpio;
907 int low_active_rst = host->low_active_rst;
908
909 if (rst_gpio < 0)
910 return;
911
912 if (delays_rst[0]) {
913 gpio_set_value(rst_gpio, low_active_rst ? 1 : 0);
914 usleep_range(delays_rst[0], delays_rst[0] + 100);
915 }
916
917 /* For eMMC, minimum is 1us but give it 9us for good measure */
918 gpio_set_value(rst_gpio, low_active_rst ? 0 : 1);
919 if (delays_rst[1])
920 udelay(delays_rst[1]);
921
922 gpio_set_value(rst_gpio, low_active_rst ? 1 : 0);
923
924 /* For eMMC, minimum is 200us but give it 300us for good measure */
925 if (delays_rst[2])
926 usleep_range(delays_rst[2], delays_rst[2] + 100);
927}
928
929static void asr_dump_priv_regs(struct sdhci_host *host)
930{
931 printk(KERN_INFO "sdhci: OP_CTRL: 0x%08x\n",
932 sdhci_readl(host, SDHC_OP_CTRL));
933 printk(KERN_INFO "sdhci: OP_EXT_REG: 0x%08x\n",
934 sdhci_readl(host, SDHC_OP_EXT_REG));
935 printk(KERN_INFO "sdhci: LEGACY_CTRL_REG: 0x%08x\n",
936 sdhci_readl(host, SDHC_LEGACY_CTRL_REG));
937 printk(KERN_INFO "sdhci: MMC_CTRL_REG: 0x%08x\n",
938 sdhci_readl(host, SDHC_MMC_CTRL_REG));
939 printk(KERN_INFO "sdhci: RX_CFG_REG: 0x%08x\n",
940 sdhci_readl(host, SDHC_RX_CFG_REG));
941 printk(KERN_INFO "sdhci: TX_CFG_REG: 0x%08x\n",
942 sdhci_readl(host, SDHC_TX_CFG_REG));
943 printk(KERN_INFO "sdhci: HWTUNE_CFG_REG: 0x%08x\n",
944 sdhci_readl(host, SDHC_HWTUNE_CFG_REG));
945 printk(KERN_INFO "sdhci: HWTUNE_CFG2_REG: 0x%08x\n",
946 sdhci_readl(host, SDHC_HWTUNE_CFG2_REG));
947 printk(KERN_INFO "sdhci: ROUNDTRIP_TIMING_REG: 0x%08x\n",
948 sdhci_readl(host, SDHC_ROUNDTRIP_TIMING_REG));
949 printk(KERN_INFO "sdhci: GPIO_CFG_REG: 0x%08x\n",
950 sdhci_readl(host, SDHC_GPIO_CFG_REG));
951 printk(KERN_INFO "sdhci: DLINE_CTRL_REG: 0x%08x\n",
952 sdhci_readl(host, SDHC_DLINE_CTRL_REG));
953 printk(KERN_INFO "sdhci: DLINE_CFG_REG: 0x%08x\n",
954 sdhci_readl(host, SDHC_DLINE_CFG_REG));
955}
956
957static const struct sdhci_ops asr_sdhci_ops = {
958 .set_delay_val = asr_set_delaycode,
959 .enable_delay_line = asr_enable_delay_line,
960 .set_clock = asr_set_clock,
961 .set_power = asr_set_power,
962 .reset = asr_sdhci_reset,
963 .set_uhs_signaling = asr_set_uhs_signaling,
964 .platform_send_init_74_clocks = asr_gen_init_74_clocks,
965 .get_max_clock = sdhci_pltfm_clk_get_max_clock,
966 .set_bus_width = sdhci_set_bus_width,
967 .dump_vendor_regs = asr_dump_priv_regs,
968 .clk_prepare = asr_clk_prepare,
969 .reset_wakeup_event = asr_reset_wakeup_event,
970 .clr_wakeup_event = asr_clr_wakeup_event,
971 .voltage_switch = asr_signal_vol_change,
972 .clk_gate_auto = asr_clk_gate_auto,
973 .platform_handle_none_irq = asr_handle_none_irq,
974 .platform_execute_tuning = asr_execute_tuning,
975 .host_caps_disable = asr_host_caps_disable,
976 .hw_reset = sdhci_asr_hw_reset,
977};
978
979static struct sdhci_pltfm_data sdhci_asr_pdata = {
980 .quirks = SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK
981 | SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC
982 | SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
983 .ops = &asr_sdhci_ops,
984};
985
986static int asr_init_host_with_pdata(struct sdhci_host *host,
987 struct asr_sdhci_platdata *pdata)
988{
989 int ret = 0;
990
991 host->mmc->caps |= MMC_CAP_NEED_RSP_BUSY;
992
993 if (!(pdata->flags & PXA_FLAG_DISABLE_CLOCK_AUTO_GATING))
994 host->mmc->caps2 |= MMC_CAP2_BUS_AUTO_CLK_GATE;
995
996 if (pdata->quirks)
997 host->quirks |= pdata->quirks;
998 if (pdata->quirks2)
999 host->quirks2 |= pdata->quirks2;
1000 if (pdata->host_caps)
1001 host->mmc->caps |= pdata->host_caps;
1002 if (pdata->host_caps2)
1003 host->mmc->caps2 |= pdata->host_caps2;
1004 if (pdata->pm_caps)
1005 host->mmc->pm_caps |= pdata->pm_caps;
1006
1007 return ret;
1008}
1009
1010#ifdef CONFIG_OF
1011static const struct of_device_id sdhci_asr_of_match[] = {
1012 {
1013 .compatible = "asr,sdhci",
1014 },
1015 {},
1016};
1017MODULE_DEVICE_TABLE(of, sdhci_asr_of_match);
1018
1019static void asr_get_of_perperty(struct sdhci_host *host,
1020 struct device *dev, struct asr_sdhci_platdata *pdata)
1021{
1022 struct device_node *np = dev->of_node;
1023 struct asr_sdhci_dtr_data *dtr_data;
1024 struct property *prop;
1025 const __be32 *p;
1026 u32 tmp, val, timing;
1027 u32 *delays_rst = host->delays_rst;
1028 int rst_gpio;
1029
1030 host->rst_gpio = -1;
1031 rst_gpio = of_get_named_gpio(np, "reset-gpio", 0);
1032 if (rst_gpio >= 0) {
1033 host->low_active_rst = of_property_read_bool(np, "reset-active-low");
1034 if (of_property_read_u32_array(np, "reset-delays-us",
1035 delays_rst, 3)) {
1036 delays_rst[0] = 0;
1037 delays_rst[1] = 10;
1038 delays_rst[3] = 300;
1039 }
1040
1041 if (gpio_request(rst_gpio, "mmc-reset")) {
1042 printk("%s: reset-gpio=%d request failed\n",
1043 mmc_hostname(host->mmc), rst_gpio);
1044 return;
1045 }
1046
1047 gpio_direction_output(rst_gpio, host->low_active_rst ? 1 : 0);
1048
1049 host->rst_gpio = rst_gpio;
1050 host->mmc->caps |= MMC_CAP_HW_RESET;
1051 }
1052
1053 if (!of_property_read_u32(np, "asr,sdh-flags", &tmp))
1054 pdata->flags |= tmp;
1055
1056 of_property_read_u32(np, "asr,max-speed", &pdata->max_speed);
1057
1058 if (!of_property_read_u32(np, "asr,sdh-host-caps", &tmp))
1059 pdata->host_caps |= tmp;
1060 if (!of_property_read_u32(np, "asr,sdh-host-caps2", &tmp))
1061 pdata->host_caps2 |= tmp;
1062 if (!of_property_read_u32(np, "asr,sdh-host-caps-disable", &tmp))
1063 pdata->host_caps_disable |= tmp;
1064 if (!of_property_read_u32(np, "asr,sdh-host-caps2-disable", &tmp))
1065 pdata->host_caps2_disable |= tmp;
1066 if (!of_property_read_u32(np, "asr,sdh-quirks", &tmp))
1067 pdata->quirks |= tmp;
1068 if (!of_property_read_u32(np, "asr,sdh-quirks2", &tmp))
1069 pdata->quirks2 |= tmp;
1070 if (!of_property_read_u32(np, "asr,sdh-pm-caps", &tmp))
1071 pdata->pm_caps |= tmp;
1072 if (!of_property_read_u32(np, "lpm-qos", &tmp))
1073 pdata->lpm_qos = tmp;
1074 else
1075 pdata->lpm_qos = PM_QOS_CPUIDLE_BLOCK_DEFAULT_VALUE;
1076
1077 if (!of_property_read_u32(np, "asr,sdh-tuning-win-limit", &tmp))
1078 pdata->tuning_win_limit = tmp;
1079 else
1080 pdata->tuning_win_limit = 100; /* default limit value */
1081
1082 /*
1083 * property "asr,sdh-dtr-data": <timing preset_rate src_rate tx_delay rx_delay>, [<..>]
1084 * allow to set clock related parameters.
1085 */
1086 if (of_property_read_bool(np, "asr,sdh-dtr-data")) {
1087 dtr_data = devm_kzalloc(dev,
1088 (MMC_TIMING_MMC_HS400 + 1) * sizeof(struct asr_sdhci_dtr_data),
1089 GFP_KERNEL);
1090 if (!dtr_data) {
1091 dev_err(dev, "failed to allocate memory for sdh-dtr-data\n");
1092 return;
1093 }
1094 of_property_for_each_u32(np, "asr,sdh-dtr-data", prop, p, timing) {
1095 if (timing > MMC_TIMING_MMC_HS400) {
1096 dev_err(dev, "invalid timing %d on sdh-dtr-data prop\n",
1097 timing);
1098 continue;
1099 } else {
1100 dtr_data[timing].timing = timing;
1101 }
1102 p = of_prop_next_u32(prop, p, &val);
1103 if (!p) {
1104 dev_err(dev, "missing preset_rate for timing %d\n",
1105 timing);
1106 } else {
1107 dtr_data[timing].preset_rate = val;
1108 }
1109 p = of_prop_next_u32(prop, p, &val);
1110 if (!p) {
1111 dev_err(dev, "missing src_rate for timing %d\n",
1112 timing);
1113 } else {
1114 dtr_data[timing].src_rate = val;
1115 }
1116 p = of_prop_next_u32(prop, p, &val);
1117 if (!p) {
1118 dev_err(dev, "missing tx_delay for timing %d\n",
1119 timing);
1120 } else {
1121 dtr_data[timing].tx_delay = val;
1122 }
1123 p = of_prop_next_u32(prop, p, &val);
1124 if (!p) {
1125 dev_err(dev, "missing rx_delay for timing %d\n",
1126 timing);
1127 } else {
1128 dtr_data[timing].rx_delay = val;
1129 }
1130 p = of_prop_next_u32(prop, p, &val);
1131 if (!p) {
1132 dev_err(dev, "missing tx_dline_reg for timing %d\n",
1133 timing);
1134 } else {
1135 dtr_data[timing].tx_dline_reg = val;
1136 }
1137 p = of_prop_next_u32(prop, p, &val);
1138 if (!p) {
1139 dev_err(dev, "missing rx_dline_reg for timing %d\n",
1140 timing);
1141 } else {
1142 dtr_data[timing].rx_dline_reg = val;
1143 }
1144 }
1145 pdata->dtr_data = dtr_data;
1146 }
1147}
1148#endif
1149
1150static int asr_sdhci_probe(struct platform_device *pdev)
1151{
1152 struct sdhci_pltfm_host *pltfm_host;
1153 struct asr_sdhci_platdata *pdata = pdev->dev.platform_data;
1154 struct device *dev = &pdev->dev;
1155 struct sdhci_host *host = NULL;
1156 struct sdhci_asr *asr = NULL;
1157 const struct of_device_id *match;
1158 int ret = 0;
1159
1160 host = sdhci_pltfm_init(pdev, &sdhci_asr_pdata, sizeof(*asr));
1161 if (IS_ERR(host))
1162 return PTR_ERR(host);
1163
1164 pltfm_host = sdhci_priv(host);
1165 asr = sdhci_pltfm_priv(pltfm_host);
1166
1167 asr->clk_io = devm_clk_get(dev, "sdh-io");
1168 if (IS_ERR(asr->clk_io))
1169 asr->clk_io = devm_clk_get(dev, NULL);
1170 if (IS_ERR(asr->clk_io)) {
1171 dev_err(dev, "failed to get io clock\n");
1172 ret = PTR_ERR(asr->clk_io);
1173 goto err_clk_get;
1174 }
1175 pltfm_host->clk = asr->clk_io;
1176 clk_prepare_enable(asr->clk_io);
1177
1178 asr->clk_core = devm_clk_get(dev, "sdh-core");
1179 if (!IS_ERR(asr->clk_core))
1180 clk_prepare_enable(asr->clk_core);
1181
1182 host->quirks2 = SDHCI_QUIRK2_TIMEOUT_DIVIDE_4
1183 | SDHCI_QUIRK2_NO_CURRENT_LIMIT
1184 | SDHCI_QUIRK2_PRESET_VALUE_BROKEN;
1185
1186 match = of_match_device(of_match_ptr(sdhci_asr_of_match), &pdev->dev);
1187 if (match) {
1188 mmc_of_parse(host->mmc);
1189 sdhci_get_of_property(pdev);
1190 }
1191
1192 if (!pdata) {
1193 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
1194 if (!pdata) {
1195 dev_err(mmc_dev(host->mmc),
1196 "failed to alloc pdata\n");
1197 goto err_init_host;
1198 }
1199 pdev->dev.platform_data = pdata;
1200 }
1201 asr_get_of_perperty(host, dev, pdata);
1202
1203 pdata->pinctrl = devm_pinctrl_get(dev);
1204 if (IS_ERR(pdata->pinctrl))
1205 dev_err(dev, "could not get pinctrl handle\n");
1206 pdata->pin_default = pinctrl_lookup_state(pdata->pinctrl, "default");
1207 if (IS_ERR(pdata->pin_default))
1208 dev_err(dev, "could not get default pinstate\n");
1209 pdata->pin_slow = pinctrl_lookup_state(pdata->pinctrl, "slow");
1210 if (IS_ERR(pdata->pin_slow))
1211 dev_err(dev, "could not get slow pinstate\n");
1212 else
1213 pinctrl_select_state(pdata->pinctrl, pdata->pin_slow);
1214 pdata->pin_fast = pinctrl_lookup_state(pdata->pinctrl, "fast");
1215 if (IS_ERR(pdata->pin_fast))
1216 dev_info(dev, "could not get fast pinstate\n");
1217
1218 ret = asr_init_host_with_pdata(host, pdata);
1219 if (ret) {
1220 dev_err(mmc_dev(host->mmc),
1221 "failed to init host with pdata\n");
1222 goto err_init_host;
1223 }
1224 pdata->qos_idle.name = pdev->name;
1225 pm_qos_add_request(&pdata->qos_idle, PM_QOS_CPUIDLE_BLOCK,
1226 PM_QOS_CPUIDLE_BLOCK_DEFAULT_VALUE);
1227
1228 /*
1229 * as RPM will set as active just below, so here enable dvfs too
1230 * And there is not dvfs requst by default, the driver needs to
1231 * call pxa_sdh_request_dvfs_level when need.
1232 */
1233 asr_sdh_create_dvfs(host);
1234 asr_sdh_request_dvfs_level(host, 0);
1235 asr_sdh_enable_dvfs(host);
1236
1237 pm_runtime_get_noresume(&pdev->dev);
1238 pm_runtime_set_active(&pdev->dev);
1239 pm_runtime_set_autosuspend_delay(&pdev->dev, ASR_RPM_DELAY_MS);
1240 pm_runtime_use_autosuspend(&pdev->dev);
1241 pm_suspend_ignore_children(&pdev->dev, 1);
1242 pm_runtime_enable(&pdev->dev);
1243
1244 /* dma only 32 bit now */
1245 pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
1246 pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
1247
1248 asr_access_constrain(host, 1);
1249 ret = sdhci_add_host(host);
1250 if (ret) {
1251 dev_err(&pdev->dev, "failed to add host\n");
1252 goto err_add_host;
1253 }
1254
1255 platform_set_drvdata(pdev, host);
1256
1257 if (host->mmc->pm_caps & MMC_PM_WAKE_SDIO_IRQ)
1258 device_init_wakeup(&pdev->dev, 1);
1259 else
1260 device_init_wakeup(&pdev->dev, 0);
1261
1262 pm_runtime_put_autosuspend(&pdev->dev);
1263 return 0;
1264
1265err_add_host:
1266 pm_runtime_put_noidle(&pdev->dev);
1267 pm_runtime_set_suspended(&pdev->dev);
1268 pm_runtime_disable(&pdev->dev);
1269err_init_host:
1270 clk_disable_unprepare(pltfm_host->clk);
1271 clk_disable_unprepare(asr->clk_core);
1272 if (pdata)
1273 pm_qos_remove_request(&pdata->qos_idle);
1274err_clk_get:
1275 sdhci_pltfm_free(pdev);
1276 return ret;
1277}
1278
1279static int asr_sdhci_remove(struct platform_device *pdev)
1280{
1281 struct sdhci_host *host = platform_get_drvdata(pdev);
1282 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1283 struct sdhci_asr *asr = sdhci_pltfm_priv(pltfm_host);
1284 struct asr_sdhci_platdata *pdata = pdev->dev.platform_data;
1285
1286 pm_runtime_get_sync(&pdev->dev);
1287 sdhci_remove_host(host, 1);
1288 pm_runtime_disable(&pdev->dev);
1289
1290 if (pdata)
1291 pm_qos_remove_request(&pdata->qos_idle);
1292
1293 clk_disable_unprepare(pltfm_host->clk);
1294 clk_put(pltfm_host->clk);
1295
1296 sdhci_pltfm_free(pdev);
1297 kfree(asr);
1298
1299 platform_set_drvdata(pdev, NULL);
1300
1301 return 0;
1302}
1303
1304#ifdef CONFIG_PM_SLEEP
1305static int sdhci_asr_suspend(struct device *dev)
1306{
1307 int ret;
1308 struct sdhci_host *host = dev_get_drvdata(dev);
1309
1310 pm_runtime_get_sync(dev);
1311 ret = sdhci_suspend_host(host);
1312 if (ret)
1313 return ret;
1314
1315 if (host->mmc->caps & MMC_CAP_CD_WAKE)
1316 mmc_gpio_set_cd_wake(host->mmc, true);
1317
1318 ret = pm_runtime_force_suspend(dev);
1319 return ret;
1320}
1321
1322static int sdhci_asr_resume(struct device *dev)
1323{
1324 int ret;
1325 struct sdhci_host *host = dev_get_drvdata(dev);
1326
1327 ret = pm_runtime_force_resume(dev);
1328 if (ret) {
1329 dev_err(dev, "failed to resume pm_runtime (%d)\n", ret);
1330 return ret;
1331 }
1332
1333 ret = sdhci_resume_host(host);
1334
1335 if (host->mmc->caps & MMC_CAP_CD_WAKE)
1336 mmc_gpio_set_cd_wake(host->mmc, false);
1337
1338 pm_runtime_mark_last_busy(dev);
1339 pm_runtime_put_autosuspend(dev);
1340
1341 return ret;
1342}
1343#endif
1344
1345#ifdef CONFIG_PM
1346static int sdhci_asr_runtime_suspend(struct device *dev)
1347{
1348 struct sdhci_host *host = dev_get_drvdata(dev);
1349 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1350 struct sdhci_asr *asr = sdhci_pltfm_priv(pltfm_host);
1351 unsigned long flags;
1352
1353 asr_access_constrain(host, 0);
1354 if (host->quirks2 & SDHCI_QUIRK2_BASE_CLOCK_ALWAYS_ON)
1355 goto fakeclk;
1356
1357 spin_lock_irqsave(&host->lock, flags);
1358 host->runtime_suspended = true;
1359 spin_unlock_irqrestore(&host->lock, flags);
1360
1361 clk_disable_unprepare(pltfm_host->clk);
1362 if (!IS_ERR(asr->clk_core))
1363 clk_disable_unprepare(asr->clk_core);
1364
1365fakeclk:
1366 asr_sdh_disable_dvfs(host);
1367 return 0;
1368}
1369
1370static int sdhci_asr_runtime_resume(struct device *dev)
1371{
1372 struct sdhci_host *host = dev_get_drvdata(dev);
1373 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1374 struct sdhci_asr *asr = sdhci_pltfm_priv(pltfm_host);
1375 unsigned long flags;
1376
1377 asr_sdh_enable_dvfs(host);
1378
1379 asr_access_constrain(host, 1);
1380 if (host->quirks2 & SDHCI_QUIRK2_BASE_CLOCK_ALWAYS_ON)
1381 return 0;
1382
1383 clk_prepare_enable(pltfm_host->clk);
1384 if (!IS_ERR(asr->clk_core))
1385 clk_prepare_enable(asr->clk_core);
1386
1387 spin_lock_irqsave(&host->lock, flags);
1388 host->runtime_suspended = false;
1389 spin_unlock_irqrestore(&host->lock, flags);
1390
1391 return 0;
1392}
1393#endif
1394
1395static const struct dev_pm_ops sdhci_asr_pmops = {
1396 SET_SYSTEM_SLEEP_PM_OPS(sdhci_asr_suspend, sdhci_asr_resume)
1397 SET_RUNTIME_PM_OPS(sdhci_asr_runtime_suspend,
1398 sdhci_asr_runtime_resume, NULL)
1399};
1400
1401static struct platform_driver asr_sdhci_driver = {
1402 .driver = {
1403 .name = "sdhci-asr",
1404#ifdef CONFIG_OF
1405 .of_match_table = sdhci_asr_of_match,
1406#endif
1407 .owner = THIS_MODULE,
1408 .pm = &sdhci_asr_pmops,
1409 },
1410 .probe = asr_sdhci_probe,
1411 .remove = asr_sdhci_remove,
1412};
1413
1414module_platform_driver(asr_sdhci_driver);
1415
1416MODULE_DESCRIPTION("SDHCI driver for ASR");
1417MODULE_AUTHOR("ASR Microelectronics Ltd.");
1418MODULE_LICENSE("GPL v2");
1419