blob: 22320a8b78b7c2e652770382f269f5e34e7de7eb [file] [log] [blame]
b.liue9582032025-04-17 19:18:16 +08001// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * linux/drivers/mmc/host/sdhci.c - Secure Digital Host Controller Interface driver
4 *
5 * Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
6 *
7 * Thanks to the following companies for their support:
8 *
9 * - JMicron (hardware and technical support)
10 */
11
12#include <linux/bitfield.h>
13#include <linux/delay.h>
14#include <linux/ktime.h>
15#include <linux/highmem.h>
16#include <linux/io.h>
17#include <linux/module.h>
18#include <linux/dma-mapping.h>
19#include <linux/slab.h>
20#include <linux/scatterlist.h>
21#include <linux/sizes.h>
22#include <linux/swiotlb.h>
23#include <linux/regulator/consumer.h>
24#include <linux/pm_runtime.h>
25#include <linux/of.h>
26
27#include <linux/leds.h>
28
29#include <linux/mmc/mmc.h>
30#include <linux/mmc/host.h>
31#include <linux/mmc/card.h>
32#include <linux/mmc/sdio.h>
33#include <linux/mmc/slot-gpio.h>
34
35#include "sdhci.h"
36
37#define DRIVER_NAME "sdhci"
38
39#define SDHCI_DIRECT_MAP
40#define SDHCI_CMD_POLL_OPT
41#define SDHCI_ADMA2_NO_NEED_ALIGN
42
43#define DBG(f, x...) \
44 pr_debug("%s: " DRIVER_NAME ": " f, mmc_hostname(host->mmc), ## x)
45
46#define SDHCI_DUMP(f, x...) \
47 pr_err("%s: " DRIVER_NAME ": " f, mmc_hostname(host->mmc), ## x)
48
49#define MAX_TUNING_LOOP 40
50
51static unsigned int debug_quirks = 0;
52static unsigned int debug_quirks2;
53
54static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable);
55static void sdhci_auto_clk_gate(struct mmc_host *mmc, int auto_gate);
56
57bool sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd);
58static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask, u32 *intmask_p);
59static void sdhci_data_irq(struct sdhci_host *host, u32 intmask);
60static bool sdhci_request_done(struct sdhci_host *host);
61
62void sdhci_dumpregs(struct sdhci_host *host)
63{
64 SDHCI_DUMP("============ SDHCI REGISTER DUMP ===========\n");
65
66 SDHCI_DUMP("Sys addr: 0x%08x | Version: 0x%08x\n",
67 sdhci_readl(host, SDHCI_DMA_ADDRESS),
68 sdhci_readw(host, SDHCI_HOST_VERSION));
69 SDHCI_DUMP("Blk size: 0x%08x | Blk cnt: 0x%08x\n",
70 sdhci_readw(host, SDHCI_BLOCK_SIZE),
71 sdhci_readw(host, SDHCI_BLOCK_COUNT));
72 SDHCI_DUMP("Argument: 0x%08x | Trn mode: 0x%08x\n",
73 sdhci_readl(host, SDHCI_ARGUMENT),
74 sdhci_readw(host, SDHCI_TRANSFER_MODE));
75 SDHCI_DUMP("Present: 0x%08x | Host ctl: 0x%08x\n",
76 sdhci_readl(host, SDHCI_PRESENT_STATE),
77 sdhci_readb(host, SDHCI_HOST_CONTROL));
78 SDHCI_DUMP("Power: 0x%08x | Blk gap: 0x%08x\n",
79 sdhci_readb(host, SDHCI_POWER_CONTROL),
80 sdhci_readb(host, SDHCI_BLOCK_GAP_CONTROL));
81 SDHCI_DUMP("Wake-up: 0x%08x | Clock: 0x%08x\n",
82 sdhci_readb(host, SDHCI_WAKE_UP_CONTROL),
83 sdhci_readw(host, SDHCI_CLOCK_CONTROL));
84 SDHCI_DUMP("Timeout: 0x%08x | Int stat: 0x%08x\n",
85 sdhci_readb(host, SDHCI_TIMEOUT_CONTROL),
86 sdhci_readl(host, SDHCI_INT_STATUS));
87 SDHCI_DUMP("Int enab: 0x%08x | Sig enab: 0x%08x\n",
88 sdhci_readl(host, SDHCI_INT_ENABLE),
89 sdhci_readl(host, SDHCI_SIGNAL_ENABLE));
90 SDHCI_DUMP("ACmd stat: 0x%08x | Slot int: 0x%08x\n",
91 sdhci_readw(host, SDHCI_AUTO_CMD_STATUS),
92 sdhci_readw(host, SDHCI_SLOT_INT_STATUS));
93 SDHCI_DUMP("Caps: 0x%08x | Caps_1: 0x%08x\n",
94 sdhci_readl(host, SDHCI_CAPABILITIES),
95 sdhci_readl(host, SDHCI_CAPABILITIES_1));
96 SDHCI_DUMP("Cmd: 0x%08x | Max curr: 0x%08x\n",
97 sdhci_readw(host, SDHCI_COMMAND),
98 sdhci_readl(host, SDHCI_MAX_CURRENT));
99 SDHCI_DUMP("Resp[0]: 0x%08x | Resp[1]: 0x%08x\n",
100 sdhci_readl(host, SDHCI_RESPONSE),
101 sdhci_readl(host, SDHCI_RESPONSE + 4));
102 SDHCI_DUMP("Resp[2]: 0x%08x | Resp[3]: 0x%08x\n",
103 sdhci_readl(host, SDHCI_RESPONSE + 8),
104 sdhci_readl(host, SDHCI_RESPONSE + 12));
105 SDHCI_DUMP("Host ctl2: 0x%08x\n",
106 sdhci_readw(host, SDHCI_HOST_CONTROL2));
107
108 if (host->flags & SDHCI_USE_ADMA) {
109 if (host->flags & SDHCI_USE_64_BIT_DMA) {
110 SDHCI_DUMP("ADMA Err: 0x%08x | ADMA Ptr: 0x%08x%08x\n",
111 sdhci_readl(host, SDHCI_ADMA_ERROR),
112 sdhci_readl(host, SDHCI_ADMA_ADDRESS_HI),
113 sdhci_readl(host, SDHCI_ADMA_ADDRESS));
114 } else {
115 SDHCI_DUMP("ADMA Err: 0x%08x | ADMA Ptr: 0x%08x\n",
116 sdhci_readl(host, SDHCI_ADMA_ERROR),
117 sdhci_readl(host, SDHCI_ADMA_ADDRESS));
118 }
119 }
120
121 if (host->ops->dump_vendor_regs)
122 host->ops->dump_vendor_regs(host);
123
124 SDHCI_DUMP("============================================\n");
125}
126EXPORT_SYMBOL_GPL(sdhci_dumpregs);
127
128/*****************************************************************************\
129 * *
130 * Low level functions *
131 * *
132\*****************************************************************************/
133
134#ifdef SDHCI_DIRECT_MAP
135dma_addr_t inline sdhci_map_direct(unsigned buf, unsigned len)
136{
137 unsigned ret;
138 ret = mv_cp_virtual_to_physical(buf);
139 BUG_ON(ret == buf);
140 __cpuc_flush_dcache_area((void *)(buf & ~ 31),
141 ((len + (buf & 31) + 31) & ~ 31));
142 return (dma_addr_t)ret;
143}
144#endif
145
146static inline void sdhci_unmap_single(struct device *dev, dma_addr_t handle,
147 size_t size, enum dma_data_direction dir)
148{
149#ifdef SDHCI_DIRECT_MAP
150 if (dir == DMA_TO_DEVICE)
151 return;
152#endif
153 dma_unmap_single(dev, handle, size ,dir);
154}
155
156static inline void sdhci_unmap_sg(struct device *dev, struct scatterlist *sg,
157 int nents, enum dma_data_direction dir)
158{
159#ifdef SDHCI_DIRECT_MAP
160 if (dir == DMA_TO_DEVICE)
161 return;
162#endif
163 dma_unmap_sg(dev, sg, nents, dir);
164}
165
166static inline dma_addr_t sdhci_map_single(struct device *dev, void *ptr,
167 size_t size,enum dma_data_direction dir)
168{
169#ifndef SDHCI_DIRECT_MAP
170 return dma_map_single(dev, ptr, size, dir);
171#else
172 if (dir == DMA_FROM_DEVICE)
173 return dma_map_single(dev, ptr, size, dir);
174
175 return sdhci_map_direct((unsigned)ptr, (unsigned)size);
176#endif
177}
178
179static inline int sdhci_map_sg(struct device *dev, struct scatterlist *sg,
180 int nents, enum dma_data_direction dir)
181{
182#ifndef SDHCI_DIRECT_MAP
183 return dma_map_sg(dev, sg, nents, dir);
184#else
185 struct scatterlist *s;
186 int i;
187
188 if (dir == DMA_FROM_DEVICE)
189 return dma_map_sg(dev, sg, nents, dir);
190
191 for_each_sg(sg, s, nents, i) {
192#ifdef CONFIG_NEED_SG_DMA_LENGTH
193 s->dma_length = s->length;
194#endif
195 s->dma_address = sdhci_map_direct((unsigned)sg_virt(s),
196 (unsigned)s->length);
197 }
198 return nents;
199#endif
200}
201
202static void sdhci_do_enable_v4_mode(struct sdhci_host *host)
203{
204 u16 ctrl2;
205
206 ctrl2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
207 if (ctrl2 & SDHCI_CTRL_V4_MODE)
208 return;
209
210 ctrl2 |= SDHCI_CTRL_V4_MODE;
211 sdhci_writew(host, ctrl2, SDHCI_HOST_CONTROL2);
212}
213
214/*
215 * This can be called before sdhci_add_host() by Vendor's host controller
216 * driver to enable v4 mode if supported.
217 */
218void sdhci_enable_v4_mode(struct sdhci_host *host)
219{
220 host->v4_mode = true;
221 sdhci_do_enable_v4_mode(host);
222}
223EXPORT_SYMBOL_GPL(sdhci_enable_v4_mode);
224
225static inline bool sdhci_data_line_cmd(struct mmc_command *cmd)
226{
227 return cmd->data || cmd->flags & MMC_RSP_BUSY;
228}
229
230static void sdhci_set_card_detection(struct sdhci_host *host, bool enable)
231{
232 u32 present;
233
234 if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) ||
235 !mmc_card_is_removable(host->mmc) || mmc_can_gpio_cd(host->mmc))
236 return;
237
238 if (enable) {
239 present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
240 SDHCI_CARD_PRESENT;
241
242 host->ier |= present ? SDHCI_INT_CARD_REMOVE :
243 SDHCI_INT_CARD_INSERT;
244 } else {
245 host->ier &= ~(SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT);
246 }
247
248 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
249 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
250}
251
252static void sdhci_enable_card_detection(struct sdhci_host *host)
253{
254 sdhci_set_card_detection(host, true);
255}
256
257static void sdhci_disable_card_detection(struct sdhci_host *host)
258{
259 sdhci_set_card_detection(host, false);
260}
261
262static void sdhci_runtime_pm_bus_on(struct sdhci_host *host)
263{
264 if (host->bus_on)
265 return;
266 host->bus_on = true;
267 pm_runtime_get_noresume(host->mmc->parent);
268}
269
270static void sdhci_runtime_pm_bus_off(struct sdhci_host *host)
271{
272 if (!host->bus_on)
273 return;
274 host->bus_on = false;
275 pm_runtime_put_noidle(host->mmc->parent);
276}
277
278void sdhci_reset(struct sdhci_host *host, u8 mask)
279{
280 ktime_t timeout;
281
282 sdhci_writeb(host, mask, SDHCI_SOFTWARE_RESET);
283
284 if (mask & SDHCI_RESET_ALL) {
285 host->clock = 0;
286 /* Reset-all turns off SD Bus Power */
287 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
288 sdhci_runtime_pm_bus_off(host);
289 }
290
291 /* Wait max 100 ms */
292 timeout = ktime_add_ms(ktime_get(), 100);
293
294 /* hw clears the bit when it's done */
295 while (1) {
296 bool timedout = ktime_after(ktime_get(), timeout);
297
298 if (!(sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask))
299 break;
300 if (timedout) {
301 pr_err("%s: Reset 0x%x never completed.\n",
302 mmc_hostname(host->mmc), (int)mask);
303 sdhci_dumpregs(host);
304 return;
305 }
306 udelay(10);
307 }
308}
309EXPORT_SYMBOL_GPL(sdhci_reset);
310
311static void sdhci_do_reset(struct sdhci_host *host, u8 mask)
312{
313 if (host->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) {
314 struct mmc_host *mmc = host->mmc;
315
316 if (!mmc->ops->get_cd(mmc))
317 return;
318 }
319
320 host->ops->reset(host, mask);
321
322 if (mask & SDHCI_RESET_ALL) {
323 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
324 if (host->ops->enable_dma)
325 host->ops->enable_dma(host);
326 }
327
328 /* Resetting the controller clears many */
329 host->preset_enabled = false;
330 }
331}
332
333static void sdhci_set_default_irqs(struct sdhci_host *host)
334{
335 host->ier = SDHCI_INT_BUS_POWER | SDHCI_INT_DATA_END_BIT |
336 SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_TIMEOUT |
337 SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC |
338 SDHCI_INT_TIMEOUT | SDHCI_INT_DATA_END |
339 SDHCI_INT_RESPONSE;
340
341 if (host->quirks2 & SDHCI_QUIRK2_NO_CURRENT_LIMIT)
342 host->ier &= ~SDHCI_INT_BUS_POWER;
343
344 if (host->tuning_mode == SDHCI_TUNING_MODE_2 ||
345 host->tuning_mode == SDHCI_TUNING_MODE_3)
346 host->ier |= SDHCI_INT_RETUNE;
347
348 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
349 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
350}
351
352static void sdhci_config_dma(struct sdhci_host *host)
353{
354 u8 ctrl;
355 u16 ctrl2;
356
357 if (host->version < SDHCI_SPEC_200)
358 return;
359
360 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
361
362 /*
363 * Always adjust the DMA selection as some controllers
364 * (e.g. JMicron) can't do PIO properly when the selection
365 * is ADMA.
366 */
367 ctrl &= ~SDHCI_CTRL_DMA_MASK;
368 if (!(host->flags & SDHCI_REQ_USE_DMA))
369 goto out;
370
371 /* Note if DMA Select is zero then SDMA is selected */
372 if (host->flags & SDHCI_USE_ADMA)
373 ctrl |= SDHCI_CTRL_ADMA32;
374
375 if (host->flags & SDHCI_USE_64_BIT_DMA) {
376 /*
377 * If v4 mode, all supported DMA can be 64-bit addressing if
378 * controller supports 64-bit system address, otherwise only
379 * ADMA can support 64-bit addressing.
380 */
381 if (host->v4_mode) {
382 ctrl2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
383 ctrl2 |= SDHCI_CTRL_64BIT_ADDR;
384 sdhci_writew(host, ctrl2, SDHCI_HOST_CONTROL2);
385 } else if (host->flags & SDHCI_USE_ADMA) {
386 /*
387 * Don't need to undo SDHCI_CTRL_ADMA32 in order to
388 * set SDHCI_CTRL_ADMA64.
389 */
390 ctrl |= SDHCI_CTRL_ADMA64;
391 }
392 }
393
394out:
395 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
396}
397
398static void sdhci_init(struct sdhci_host *host, int soft)
399{
400 struct mmc_host *mmc = host->mmc;
401 int auto_gate;
402
403 if (soft)
404 sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
405 else
406 sdhci_do_reset(host, SDHCI_RESET_ALL);
407
408 if (host->v4_mode)
409 sdhci_do_enable_v4_mode(host);
410
411 sdhci_set_default_irqs(host);
412
413 if (mmc->caps2 & MMC_CAP2_BUS_AUTO_CLK_GATE)
414 auto_gate = 1;
415 else
416 auto_gate = 0;
417 sdhci_auto_clk_gate(mmc, auto_gate);
418
419 host->cqe_on = false;
420
421 if (soft) {
422 /* force clock reconfiguration */
423 host->clock = 0;
424 host->reinit_uhs = true;
425 mmc->ops->set_ios(mmc, &mmc->ios);
426 }
427}
428
429static void sdhci_reinit(struct sdhci_host *host)
430{
431 sdhci_init(host, 0);
432 sdhci_enable_card_detection(host);
433}
434
435static void __sdhci_led_activate(struct sdhci_host *host)
436{
437 u8 ctrl;
438
439 if (host->quirks & SDHCI_QUIRK_NO_LED)
440 return;
441
442 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
443 ctrl |= SDHCI_CTRL_LED;
444 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
445}
446
447static void __sdhci_led_deactivate(struct sdhci_host *host)
448{
449 u8 ctrl;
450
451 if (host->quirks & SDHCI_QUIRK_NO_LED)
452 return;
453
454 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
455 ctrl &= ~SDHCI_CTRL_LED;
456 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
457}
458
459#if IS_REACHABLE(CONFIG_LEDS_CLASS)
460static void sdhci_led_control(struct led_classdev *led,
461 enum led_brightness brightness)
462{
463 struct sdhci_host *host = container_of(led, struct sdhci_host, led);
464 unsigned long flags;
465
466 spin_lock_irqsave(&host->lock, flags);
467
468 if (host->runtime_suspended)
469 goto out;
470
471 if (brightness == LED_OFF)
472 __sdhci_led_deactivate(host);
473 else
474 __sdhci_led_activate(host);
475out:
476 spin_unlock_irqrestore(&host->lock, flags);
477}
478
479static int sdhci_led_register(struct sdhci_host *host)
480{
481 struct mmc_host *mmc = host->mmc;
482
483 if (host->quirks & SDHCI_QUIRK_NO_LED)
484 return 0;
485
486 snprintf(host->led_name, sizeof(host->led_name),
487 "%s::", mmc_hostname(mmc));
488
489 host->led.name = host->led_name;
490 host->led.brightness = LED_OFF;
491 host->led.default_trigger = mmc_hostname(mmc);
492 host->led.brightness_set = sdhci_led_control;
493
494 return led_classdev_register(mmc_dev(mmc), &host->led);
495}
496
497static void sdhci_led_unregister(struct sdhci_host *host)
498{
499 if (host->quirks & SDHCI_QUIRK_NO_LED)
500 return;
501
502 led_classdev_unregister(&host->led);
503}
504
505static inline void sdhci_led_activate(struct sdhci_host *host)
506{
507}
508
509static inline void sdhci_led_deactivate(struct sdhci_host *host)
510{
511}
512
513#else
514
515static inline int sdhci_led_register(struct sdhci_host *host)
516{
517 return 0;
518}
519
520static inline void sdhci_led_unregister(struct sdhci_host *host)
521{
522}
523
524static inline void sdhci_led_activate(struct sdhci_host *host)
525{
526 __sdhci_led_activate(host);
527}
528
529static inline void sdhci_led_deactivate(struct sdhci_host *host)
530{
531 __sdhci_led_deactivate(host);
532}
533
534#endif
535
536static void sdhci_mod_timer(struct sdhci_host *host, struct mmc_request *mrq,
537 unsigned long timeout)
538{
539 if (sdhci_data_line_cmd(mrq->cmd))
540 mod_timer(&host->data_timer, timeout);
541 else
542 mod_timer(&host->timer, timeout);
543}
544
545void sdhci_del_timer(struct sdhci_host *host, struct mmc_request *mrq)
546{
547 if (sdhci_data_line_cmd(mrq->cmd))
548 del_timer(&host->data_timer);
549 else
550 del_timer(&host->timer);
551}
552
553static inline bool sdhci_has_requests(struct sdhci_host *host)
554{
555 return host->cmd || host->data_cmd;
556}
557
558/*****************************************************************************\
559 * *
560 * Core functions *
561 * *
562\*****************************************************************************/
563
564static void sdhci_read_block_pio(struct sdhci_host *host)
565{
566 unsigned long flags;
567 size_t blksize, len, chunk;
568 u32 scratch;
569 u8 *buf;
570
571 DBG("PIO reading\n");
572
573 blksize = host->data->blksz;
574 chunk = 0;
575
576 local_irq_save(flags);
577
578 while (blksize) {
579 BUG_ON(!sg_miter_next(&host->sg_miter));
580
581 len = min(host->sg_miter.length, blksize);
582
583 blksize -= len;
584 host->sg_miter.consumed = len;
585
586 buf = host->sg_miter.addr;
587
588 while (len) {
589 if (chunk == 0) {
590 scratch = sdhci_readl(host, SDHCI_BUFFER);
591 chunk = 4;
592 }
593
594 *buf = scratch & 0xFF;
595
596 buf++;
597 scratch >>= 8;
598 chunk--;
599 len--;
600 }
601 }
602
603 sg_miter_stop(&host->sg_miter);
604
605 local_irq_restore(flags);
606}
607
608static void sdhci_write_block_pio(struct sdhci_host *host)
609{
610 unsigned long flags;
611 size_t blksize, len, chunk;
612 u32 scratch;
613 u8 *buf;
614
615 DBG("PIO writing\n");
616
617 blksize = host->data->blksz;
618 chunk = 0;
619 scratch = 0;
620
621 local_irq_save(flags);
622
623 while (blksize) {
624 BUG_ON(!sg_miter_next(&host->sg_miter));
625
626 len = min(host->sg_miter.length, blksize);
627
628 blksize -= len;
629 host->sg_miter.consumed = len;
630
631 buf = host->sg_miter.addr;
632
633 while (len) {
634 scratch |= (u32)*buf << (chunk * 8);
635
636 buf++;
637 chunk++;
638 len--;
639
640 if ((chunk == 4) || ((len == 0) && (blksize == 0))) {
641 sdhci_writel(host, scratch, SDHCI_BUFFER);
642 chunk = 0;
643 scratch = 0;
644 }
645 }
646 }
647
648 sg_miter_stop(&host->sg_miter);
649
650 local_irq_restore(flags);
651}
652
653static void sdhci_transfer_pio(struct sdhci_host *host)
654{
655 u32 mask;
656
657 if (host->blocks == 0)
658 return;
659
660 if (host->data->flags & MMC_DATA_READ)
661 mask = SDHCI_DATA_AVAILABLE;
662 else
663 mask = SDHCI_SPACE_AVAILABLE;
664
665 /*
666 * Some controllers (JMicron JMB38x) mess up the buffer bits
667 * for transfers < 4 bytes. As long as it is just one block,
668 * we can ignore the bits.
669 */
670 if ((host->quirks & SDHCI_QUIRK_BROKEN_SMALL_PIO) &&
671 (host->data->blocks == 1))
672 mask = ~0;
673
674 while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) {
675 if (host->quirks & SDHCI_QUIRK_PIO_NEEDS_DELAY)
676 udelay(100);
677
678 if (host->data->flags & MMC_DATA_READ)
679 sdhci_read_block_pio(host);
680 else
681 sdhci_write_block_pio(host);
682
683 host->blocks--;
684 if (host->blocks == 0)
685 break;
686 }
687
688 DBG("PIO transfer complete.\n");
689}
690
691static int sdhci_pre_dma_transfer(struct sdhci_host *host,
692 struct mmc_data *data, int cookie)
693{
694 int sg_count;
695
696 /*
697 * If the data buffers are already mapped, return the previous
698 * dma_map_sg() result.
699 */
700 if (data->host_cookie == COOKIE_PRE_MAPPED)
701 return data->sg_count;
702
703 /* Bounce write requests to the bounce buffer */
704 if (host->bounce_buffer) {
705 unsigned int length = data->blksz * data->blocks;
706
707 if (length > host->bounce_buffer_size) {
708 pr_err("%s: asked for transfer of %u bytes exceeds bounce buffer %u bytes\n",
709 mmc_hostname(host->mmc), length,
710 host->bounce_buffer_size);
711 return -EIO;
712 }
713 if (mmc_get_dma_dir(data) == DMA_TO_DEVICE) {
714 /* Copy the data to the bounce buffer */
715 sg_copy_to_buffer(data->sg, data->sg_len,
716 host->bounce_buffer,
717 length);
718 }
719 /* Switch ownership to the DMA */
720 dma_sync_single_for_device(host->mmc->parent,
721 host->bounce_addr,
722 host->bounce_buffer_size,
723 mmc_get_dma_dir(data));
724 /* Just a dummy value */
725 sg_count = 1;
726 } else {
727 /* Just access the data directly from memory */
728 sg_count = sdhci_map_sg(mmc_dev(host->mmc),
729 data->sg, data->sg_len,
730 mmc_get_dma_dir(data));
731 }
732
733 if (sg_count == 0)
734 return -ENOSPC;
735
736 data->sg_count = sg_count;
737 data->host_cookie = cookie;
738
739 return sg_count;
740}
741
742#ifndef SDHCI_ADMA2_NO_NEED_ALIGN
743static char *sdhci_kmap_atomic(struct scatterlist *sg, unsigned long *flags)
744{
745 local_irq_save(*flags);
746 return kmap_atomic(sg_page(sg)) + sg->offset;
747}
748
749static void sdhci_kunmap_atomic(void *buffer, unsigned long *flags)
750{
751 kunmap_atomic(buffer);
752 local_irq_restore(*flags);
753}
754#endif
755
756void sdhci_adma_write_desc(struct sdhci_host *host, void **desc,
757 dma_addr_t addr, int len, unsigned int cmd)
758{
759 struct sdhci_adma2_64_desc *dma_desc = *desc;
760
761 /* 32-bit and 64-bit descriptors have these members in same position */
762 dma_desc->cmd = cpu_to_le16(cmd);
763 dma_desc->len = cpu_to_le16(len);
764 dma_desc->addr_lo = cpu_to_le32(lower_32_bits(addr));
765
766 if (host->flags & SDHCI_USE_64_BIT_DMA)
767 dma_desc->addr_hi = cpu_to_le32(upper_32_bits(addr));
768
769 *desc += host->desc_sz;
770}
771EXPORT_SYMBOL_GPL(sdhci_adma_write_desc);
772
773static inline void __sdhci_adma_write_desc(struct sdhci_host *host,
774 void **desc, dma_addr_t addr,
775 int len, unsigned int cmd)
776{
777 if (host->ops->adma_write_desc)
778 host->ops->adma_write_desc(host, desc, addr, len, cmd);
779 else
780 sdhci_adma_write_desc(host, desc, addr, len, cmd);
781}
782
783static void sdhci_adma_mark_end(void *desc)
784{
785 struct sdhci_adma2_64_desc *dma_desc = desc;
786
787 /* 32-bit and 64-bit descriptors have 'cmd' in same position */
788 dma_desc->cmd |= cpu_to_le16(ADMA2_END);
789}
790
791static void sdhci_adma_table_pre(struct sdhci_host *host,
792 struct mmc_data *data, int sg_count)
793{
794 struct scatterlist *sg;
795 dma_addr_t addr;
796 void *desc;
797 int len, i;
798#ifndef SDHCI_ADMA2_NO_NEED_ALIGN
799 dma_addr_t align_addr;
800 void *align;
801 int offset;
802 char *buffer;
803 unsigned long flags;
804#endif
805
806 /*
807 * The spec does not specify endianness of descriptor table.
808 * We currently guess that it is LE.
809 */
810
811 host->sg_count = sg_count;
812
813 desc = host->adma_table;
814#ifndef SDHCI_ADMA2_NO_NEED_ALIGN
815 align = host->align_buffer;
816
817 align_addr = host->align_addr;
818#endif
819 for_each_sg(data->sg, sg, host->sg_count, i) {
820 addr = sg_dma_address(sg);
821 len = sg_dma_len(sg);
822
823 /*
824 * The SDHCI specification states that ADMA addresses must
825 * be 32-bit aligned. If they aren't, then we use a bounce
826 * buffer for the (up to three) bytes that screw up the
827 * alignment.
828 */
829#ifndef SDHCI_ADMA2_NO_NEED_ALIGN
830 offset = (SDHCI_ADMA2_ALIGN - (addr & SDHCI_ADMA2_MASK)) &
831 SDHCI_ADMA2_MASK;
832 if (offset) {
833 offset = (offset < len) ? offset : len;
834 if (data->flags & MMC_DATA_WRITE) {
835 buffer = sdhci_kmap_atomic(sg, &flags);
836 memcpy(align, buffer, offset);
837 sdhci_kunmap_atomic(buffer, &flags);
838 }
839
840 /* tran, valid */
841 __sdhci_adma_write_desc(host, &desc, align_addr,
842 offset, ADMA2_TRAN_VALID);
843
844 BUG_ON(offset > 65536);
845
846 align += SDHCI_ADMA2_ALIGN;
847 align_addr += SDHCI_ADMA2_ALIGN;
848
849 addr += offset;
850 len -= offset;
851 }
852#endif
853 /*
854 * The block layer forces a minimum segment size of PAGE_SIZE,
855 * so 'len' can be too big here if PAGE_SIZE >= 64KiB. Write
856 * multiple descriptors, noting that the ADMA table is sized
857 * for 4KiB chunks anyway, so it will be big enough.
858 */
859 while (len > host->max_adma) {
860 int n = 32 * 1024; /* 32KiB*/
861
862 __sdhci_adma_write_desc(host, &desc, addr, n, ADMA2_TRAN_VALID);
863 addr += n;
864 len -= n;
865 }
866
867 /* tran, valid */
868 if (len)
869 __sdhci_adma_write_desc(host, &desc, addr, len,
870 ADMA2_TRAN_VALID);
871
872 /*
873 * If this triggers then we have a calculation bug
874 * somewhere. :/
875 */
876 WARN_ON((desc - host->adma_table) >= host->adma_table_sz);
877 }
878
879 if (host->quirks & SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC) {
880 /* Mark the last descriptor as the terminating descriptor */
881 if (desc != host->adma_table) {
882 desc -= host->desc_sz;
883 sdhci_adma_mark_end(desc);
884 }
885 } else {
886 /* Add a terminating entry - nop, end, valid */
887 __sdhci_adma_write_desc(host, &desc, 0, 0, ADMA2_NOP_END_VALID);
888 }
889}
890
891static void sdhci_adma_table_post(struct sdhci_host *host,
892 struct mmc_data *data)
893{
894#ifndef SDHCI_ADMA2_NO_NEED_ALIGN
895 struct scatterlist *sg;
896 int i, size;
897 void *align;
898 char *buffer;
899 unsigned long flags;
900
901 if (data->flags & MMC_DATA_READ) {
902 bool has_unaligned = false;
903
904 /* Do a quick scan of the SG list for any unaligned mappings */
905 for_each_sg(data->sg, sg, host->sg_count, i)
906 if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) {
907 has_unaligned = true;
908 break;
909 }
910
911 if (has_unaligned) {
912 dma_sync_sg_for_cpu(mmc_dev(host->mmc), data->sg,
913 data->sg_len, DMA_FROM_DEVICE);
914
915 align = host->align_buffer;
916
917 for_each_sg(data->sg, sg, host->sg_count, i) {
918 if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) {
919 size = SDHCI_ADMA2_ALIGN -
920 (sg_dma_address(sg) & SDHCI_ADMA2_MASK);
921
922 buffer = sdhci_kmap_atomic(sg, &flags);
923 memcpy(buffer, align, size);
924 sdhci_kunmap_atomic(buffer, &flags);
925
926 align += SDHCI_ADMA2_ALIGN;
927 }
928 }
929 }
930 }
931#endif
932}
933
934static void sdhci_set_adma_addr(struct sdhci_host *host, dma_addr_t addr)
935{
936 sdhci_writel(host, lower_32_bits(addr), SDHCI_ADMA_ADDRESS);
937 if (host->flags & SDHCI_USE_64_BIT_DMA)
938 sdhci_writel(host, upper_32_bits(addr), SDHCI_ADMA_ADDRESS_HI);
939}
940
941static dma_addr_t sdhci_sdma_address(struct sdhci_host *host)
942{
943 if (host->bounce_buffer)
944 return host->bounce_addr;
945 else
946 return sg_dma_address(host->data->sg);
947}
948
949static void sdhci_set_sdma_addr(struct sdhci_host *host, dma_addr_t addr)
950{
951 if (host->v4_mode)
952 sdhci_set_adma_addr(host, addr);
953 else
954 sdhci_writel(host, addr, SDHCI_DMA_ADDRESS);
955}
956
957static unsigned int sdhci_target_timeout(struct sdhci_host *host,
958 struct mmc_command *cmd,
959 struct mmc_data *data)
960{
961 unsigned int target_timeout;
962
963 /* timeout in us */
964 if (!data) {
965 target_timeout = cmd->busy_timeout * 1000;
966 } else {
967 target_timeout = DIV_ROUND_UP(data->timeout_ns, 1000);
968 if (host->clock && data->timeout_clks) {
969 unsigned long long val;
970
971 /*
972 * data->timeout_clks is in units of clock cycles.
973 * host->clock is in Hz. target_timeout is in us.
974 * Hence, us = 1000000 * cycles / Hz. Round up.
975 */
976 val = 1000000ULL * data->timeout_clks;
977 if (do_div(val, host->clock))
978 target_timeout++;
979 target_timeout += val;
980 }
981 }
982
983 return target_timeout;
984}
985
986static void sdhci_calc_sw_timeout(struct sdhci_host *host,
987 struct mmc_command *cmd)
988{
989 struct mmc_data *data = cmd->data;
990 struct mmc_host *mmc = host->mmc;
991 struct mmc_ios *ios = &mmc->ios;
992 unsigned char bus_width = 1 << ios->bus_width;
993 unsigned int blksz;
994 unsigned int freq;
995 u64 target_timeout;
996 u64 transfer_time;
997
998 target_timeout = sdhci_target_timeout(host, cmd, data);
999 target_timeout *= NSEC_PER_USEC;
1000
1001 if (data) {
1002 blksz = data->blksz;
1003 freq = host->mmc->actual_clock ? : host->clock;
1004 transfer_time = (u64)blksz * NSEC_PER_SEC * (8 / bus_width);
1005 do_div(transfer_time, freq);
1006 /* multiply by '2' to account for any unknowns */
1007 transfer_time = transfer_time * 2;
1008 /* calculate timeout for the entire data */
1009 host->data_timeout = data->blocks * target_timeout +
1010 transfer_time;
1011 } else {
1012 host->data_timeout = target_timeout;
1013 }
1014
1015 if (host->data_timeout)
1016 host->data_timeout += MMC_CMD_TRANSFER_TIME;
1017}
1018
1019static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd,
1020 bool *too_big)
1021{
1022 u8 count;
1023 struct mmc_data *data;
1024 unsigned target_timeout, current_timeout;
1025
1026 *too_big = true;
1027
1028 /*
1029 * If the host controller provides us with an incorrect timeout
1030 * value, just skip the check and use 0xE. The hardware may take
1031 * longer to time out, but that's much better than having a too-short
1032 * timeout value.
1033 */
1034 if (host->quirks & SDHCI_QUIRK_BROKEN_TIMEOUT_VAL)
1035 return 0xE;
1036
1037 /* Unspecified command, asume max */
1038 if (cmd == NULL)
1039 return 0xE;
1040
1041 data = cmd->data;
1042 /* Unspecified timeout, assume max */
1043 if (!data && !cmd->busy_timeout)
1044 return 0xE;
1045
1046 /* timeout in us */
1047 target_timeout = sdhci_target_timeout(host, cmd, data);
1048
1049 /*
1050 * Figure out needed cycles.
1051 * We do this in steps in order to fit inside a 32 bit int.
1052 * The first step is the minimum timeout, which will have a
1053 * minimum resolution of 6 bits:
1054 * (1) 2^13*1000 > 2^22,
1055 * (2) host->timeout_clk < 2^16
1056 * =>
1057 * (1) / (2) > 2^6
1058 */
1059 count = 0;
1060 current_timeout = (1 << 13) * 1000 / host->timeout_clk;
1061 while (current_timeout < target_timeout) {
1062 count++;
1063 current_timeout <<= 1;
1064 if (count >= 0xF)
1065 break;
1066 }
1067
1068 if (count >= 0xF) {
1069 if (!(host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT))
1070 DBG("Too large timeout 0x%x requested for CMD%d!\n",
1071 count, cmd->opcode);
1072 count = 0xE;
1073 } else {
1074 *too_big = false;
1075 }
1076
1077 return count;
1078}
1079
1080static void sdhci_set_transfer_irqs(struct sdhci_host *host)
1081{
1082 u32 pio_irqs = SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL;
1083 u32 dma_irqs = SDHCI_INT_DMA_END | SDHCI_INT_ADMA_ERROR;
1084
1085 if (host->flags & SDHCI_REQ_USE_DMA)
1086 host->ier = (host->ier & ~pio_irqs) | dma_irqs;
1087 else
1088 host->ier = (host->ier & ~dma_irqs) | pio_irqs;
1089
1090 if (host->flags & (SDHCI_AUTO_CMD23 | SDHCI_AUTO_CMD12))
1091 host->ier |= SDHCI_INT_AUTO_CMD_ERR;
1092 else
1093 host->ier &= ~SDHCI_INT_AUTO_CMD_ERR;
1094
1095 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
1096 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
1097}
1098
1099void sdhci_set_data_timeout_irq(struct sdhci_host *host, bool enable)
1100{
1101 if (enable)
1102 host->ier |= SDHCI_INT_DATA_TIMEOUT;
1103 else
1104 host->ier &= ~SDHCI_INT_DATA_TIMEOUT;
1105 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
1106 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
1107}
1108EXPORT_SYMBOL_GPL(sdhci_set_data_timeout_irq);
1109
1110void __sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd)
1111{
1112 bool too_big = false;
1113 u8 count = sdhci_calc_timeout(host, cmd, &too_big);
1114
1115 if (too_big &&
1116 host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT) {
1117 sdhci_calc_sw_timeout(host, cmd);
1118 sdhci_set_data_timeout_irq(host, false);
1119 } else if (!(host->ier & SDHCI_INT_DATA_TIMEOUT)) {
1120 sdhci_set_data_timeout_irq(host, true);
1121 }
1122
1123 sdhci_writeb(host, count, SDHCI_TIMEOUT_CONTROL);
1124}
1125EXPORT_SYMBOL_GPL(__sdhci_set_timeout);
1126
1127static void sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd)
1128{
1129 if (host->ops->set_timeout)
1130 host->ops->set_timeout(host, cmd);
1131 else
1132 __sdhci_set_timeout(host, cmd);
1133}
1134
1135static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
1136{
1137 struct mmc_data *data = cmd->data;
1138
1139 host->data_timeout = 0;
1140
1141 if (sdhci_data_line_cmd(cmd))
1142 sdhci_set_timeout(host, cmd);
1143
1144 if (!data)
1145 return;
1146
1147 WARN_ON(host->data);
1148
1149 /* Sanity checks */
1150 BUG_ON(data->blksz * data->blocks > 524288);
1151 BUG_ON(data->blksz > host->mmc->max_blk_size);
1152 BUG_ON(data->blocks > 65535);
1153
1154 host->data = data;
1155 host->data_early = 0;
1156 host->data->bytes_xfered = 0;
1157
1158 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
1159 struct scatterlist *sg;
1160 unsigned int length_mask, offset_mask;
1161 int i;
1162
1163 host->flags |= SDHCI_REQ_USE_DMA;
1164
1165 /*
1166 * FIXME: This doesn't account for merging when mapping the
1167 * scatterlist.
1168 *
1169 * The assumption here being that alignment and lengths are
1170 * the same after DMA mapping to device address space.
1171 */
1172 length_mask = 0;
1173 offset_mask = 0;
1174 if (host->flags & SDHCI_USE_ADMA) {
1175 if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE) {
1176 length_mask = 3;
1177 /*
1178 * As we use up to 3 byte chunks to work
1179 * around alignment problems, we need to
1180 * check the offset as well.
1181 */
1182 offset_mask = 3;
1183 }
1184 } else {
1185 if (host->quirks & SDHCI_QUIRK_32BIT_DMA_SIZE)
1186 length_mask = 3;
1187 if (host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR)
1188 offset_mask = 3;
1189 }
1190
1191 if (unlikely(length_mask | offset_mask)) {
1192 for_each_sg(data->sg, sg, data->sg_len, i) {
1193 if (sg->length & length_mask) {
1194 DBG("Reverting to PIO because of transfer size (%d)\n",
1195 sg->length);
1196 host->flags &= ~SDHCI_REQ_USE_DMA;
1197 break;
1198 }
1199 if (sg->offset & offset_mask) {
1200 DBG("Reverting to PIO because of bad alignment\n");
1201 host->flags &= ~SDHCI_REQ_USE_DMA;
1202 break;
1203 }
1204 }
1205 }
1206 }
1207
1208 sdhci_config_dma(host);
1209
1210 if (host->flags & SDHCI_REQ_USE_DMA) {
1211 int sg_cnt = sdhci_pre_dma_transfer(host, data, COOKIE_MAPPED);
1212
1213 if (sg_cnt <= 0) {
1214 /*
1215 * This only happens when someone fed
1216 * us an invalid request.
1217 */
1218 WARN_ON(1);
1219 host->flags &= ~SDHCI_REQ_USE_DMA;
1220 } else if (host->flags & SDHCI_USE_ADMA) {
1221 sdhci_adma_table_pre(host, data, sg_cnt);
1222 sdhci_set_adma_addr(host, host->adma_addr);
1223 } else {
1224 WARN_ON(sg_cnt != 1);
1225 sdhci_set_sdma_addr(host, sdhci_sdma_address(host));
1226 }
1227 }
1228
1229 if (!(host->flags & SDHCI_REQ_USE_DMA)) {
1230 int flags;
1231
1232 flags = SG_MITER_ATOMIC;
1233 if (host->data->flags & MMC_DATA_READ)
1234 flags |= SG_MITER_TO_SG;
1235 else
1236 flags |= SG_MITER_FROM_SG;
1237 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
1238 host->blocks = data->blocks;
1239 }
1240
1241 sdhci_set_transfer_irqs(host);
1242
1243 /* Set the DMA boundary value and block size */
1244 sdhci_writew(host, SDHCI_MAKE_BLKSZ(host->sdma_boundary, data->blksz),
1245 SDHCI_BLOCK_SIZE);
1246
1247 /*
1248 * For Version 4.10 onwards, if v4 mode is enabled, 32-bit Block Count
1249 * can be supported, in that case 16-bit block count register must be 0.
1250 */
1251 if (host->version >= SDHCI_SPEC_410 && host->v4_mode &&
1252 (host->quirks2 & SDHCI_QUIRK2_USE_32BIT_BLK_CNT)) {
1253 if (sdhci_readw(host, SDHCI_BLOCK_COUNT))
1254 sdhci_writew(host, 0, SDHCI_BLOCK_COUNT);
1255 sdhci_writew(host, data->blocks, SDHCI_32BIT_BLK_CNT);
1256 } else {
1257 sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT);
1258 }
1259}
1260
1261static inline bool sdhci_auto_cmd12(struct sdhci_host *host,
1262 struct mmc_request *mrq)
1263{
1264 return !mrq->sbc && (host->flags & SDHCI_AUTO_CMD12) &&
1265 !mrq->cap_cmd_during_tfr;
1266}
1267
1268static inline bool sdhci_auto_cmd23(struct sdhci_host *host,
1269 struct mmc_request *mrq)
1270{
1271 return mrq->sbc && (host->flags & SDHCI_AUTO_CMD23);
1272}
1273
1274static inline bool sdhci_manual_cmd23(struct sdhci_host *host,
1275 struct mmc_request *mrq)
1276{
1277 return mrq->sbc && !(host->flags & SDHCI_AUTO_CMD23);
1278}
1279
1280static inline void sdhci_auto_cmd_select(struct sdhci_host *host,
1281 struct mmc_command *cmd,
1282 u16 *mode)
1283{
1284 bool use_cmd12 = sdhci_auto_cmd12(host, cmd->mrq) &&
1285 (cmd->opcode != SD_IO_RW_EXTENDED);
1286 bool use_cmd23 = sdhci_auto_cmd23(host, cmd->mrq);
1287 u16 ctrl2;
1288
1289 /*
1290 * In case of Version 4.10 or later, use of 'Auto CMD Auto
1291 * Select' is recommended rather than use of 'Auto CMD12
1292 * Enable' or 'Auto CMD23 Enable'. We require Version 4 Mode
1293 * here because some controllers (e.g sdhci-of-dwmshc) expect it.
1294 */
1295 if (host->version >= SDHCI_SPEC_410 && host->v4_mode &&
1296 (use_cmd12 || use_cmd23)) {
1297 *mode |= SDHCI_TRNS_AUTO_SEL;
1298
1299 ctrl2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1300 if (use_cmd23)
1301 ctrl2 |= SDHCI_CMD23_ENABLE;
1302 else
1303 ctrl2 &= ~SDHCI_CMD23_ENABLE;
1304 sdhci_writew(host, ctrl2, SDHCI_HOST_CONTROL2);
1305
1306 return;
1307 }
1308
1309 /*
1310 * If we are sending CMD23, CMD12 never gets sent
1311 * on successful completion (so no Auto-CMD12).
1312 */
1313 if (use_cmd12)
1314 *mode |= SDHCI_TRNS_AUTO_CMD12;
1315 else if (use_cmd23)
1316 *mode |= SDHCI_TRNS_AUTO_CMD23;
1317}
1318
1319static void sdhci_set_transfer_mode(struct sdhci_host *host,
1320 struct mmc_command *cmd)
1321{
1322 u16 mode = 0;
1323 struct mmc_data *data = cmd->data;
1324
1325 if (data == NULL) {
1326 if (host->quirks2 &
1327 SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD) {
1328 /* must not clear SDHCI_TRANSFER_MODE when tuning */
1329 if (cmd->opcode != MMC_SEND_TUNING_BLOCK_HS200)
1330 sdhci_writew(host, 0x0, SDHCI_TRANSFER_MODE);
1331 } else {
1332 /* clear Auto CMD settings for no data CMDs */
1333 mode = sdhci_readw(host, SDHCI_TRANSFER_MODE);
1334 sdhci_writew(host, mode & ~(SDHCI_TRNS_AUTO_CMD12 |
1335 SDHCI_TRNS_AUTO_CMD23), SDHCI_TRANSFER_MODE);
1336 }
1337 return;
1338 }
1339
1340 WARN_ON(!host->data);
1341
1342 if (!(host->quirks2 & SDHCI_QUIRK2_SUPPORT_SINGLE))
1343 mode = SDHCI_TRNS_BLK_CNT_EN;
1344
1345 if (mmc_op_multi(cmd->opcode) || data->blocks > 1) {
1346 mode = SDHCI_TRNS_BLK_CNT_EN | SDHCI_TRNS_MULTI;
1347 sdhci_auto_cmd_select(host, cmd, &mode);
1348 if (sdhci_auto_cmd23(host, cmd->mrq))
1349 sdhci_writel(host, cmd->mrq->sbc->arg, SDHCI_ARGUMENT2);
1350 }
1351
1352 if (data->flags & MMC_DATA_READ)
1353 mode |= SDHCI_TRNS_READ;
1354 if (host->flags & SDHCI_REQ_USE_DMA)
1355 mode |= SDHCI_TRNS_DMA;
1356
1357 sdhci_writew(host, mode, SDHCI_TRANSFER_MODE);
1358}
1359
1360static bool sdhci_needs_reset(struct sdhci_host *host, struct mmc_request *mrq)
1361{
1362 return (!(host->flags & SDHCI_DEVICE_DEAD) &&
1363 ((mrq->cmd && mrq->cmd->error) ||
1364 (mrq->sbc && mrq->sbc->error) ||
1365 (mrq->data && mrq->data->stop && mrq->data->stop->error) ||
1366 (host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST)));
1367}
1368
1369static void __sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq)
1370{
1371 int i;
1372
1373 if (host->cmd && host->cmd->mrq == mrq)
1374 host->cmd = NULL;
1375
1376 if (host->data_cmd && host->data_cmd->mrq == mrq)
1377 host->data_cmd = NULL;
1378
1379 if (host->deferred_cmd && host->deferred_cmd->mrq == mrq)
1380 host->deferred_cmd = NULL;
1381
1382 if (host->data && host->data->mrq == mrq)
1383 host->data = NULL;
1384
1385 if (sdhci_needs_reset(host, mrq))
1386 host->pending_reset = true;
1387
1388 for (i = 0; i < SDHCI_MAX_MRQS; i++) {
1389 if (host->mrqs_done[i] == mrq) {
1390 WARN_ON(1);
1391 return;
1392 }
1393 }
1394
1395 for (i = 0; i < SDHCI_MAX_MRQS; i++) {
1396 if (!host->mrqs_done[i]) {
1397 host->mrqs_done[i] = mrq;
1398 break;
1399 }
1400 }
1401
1402 WARN_ON(i >= SDHCI_MAX_MRQS);
1403
1404 sdhci_del_timer(host, mrq);
1405
1406 if (!sdhci_has_requests(host))
1407 sdhci_led_deactivate(host);
1408}
1409
1410static void sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq)
1411{
1412 __sdhci_finish_mrq(host, mrq);
1413
1414 queue_work(host->complete_wq, &host->complete_work);
1415}
1416
1417static void __sdhci_finish_data(struct sdhci_host *host, bool sw_data_timeout)
1418{
1419 struct mmc_command *data_cmd = host->data_cmd;
1420 struct mmc_data *data = host->data;
1421
1422 host->data = NULL;
1423 host->data_cmd = NULL;
1424
1425 /*
1426 * The controller needs a reset of internal state machines upon error
1427 * conditions.
1428 */
1429 if (data->error) {
1430 if (!host->cmd || host->cmd == data_cmd)
1431 sdhci_do_reset(host, SDHCI_RESET_CMD);
1432 sdhci_do_reset(host, SDHCI_RESET_DATA);
1433 }
1434
1435 if ((host->flags & (SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA)) ==
1436 (SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA))
1437 sdhci_adma_table_post(host, data);
1438
1439 /*
1440 * The specification states that the block count register must
1441 * be updated, but it does not specify at what point in the
1442 * data flow. That makes the register entirely useless to read
1443 * back so we have to assume that nothing made it to the card
1444 * in the event of an error.
1445 */
1446 if (data->error)
1447 data->bytes_xfered = 0;
1448 else
1449 data->bytes_xfered = data->blksz * data->blocks;
1450
1451 /*
1452 * Need to send CMD12 if -
1453 * a) open-ended multiblock transfer (no CMD23)
1454 * b) error in multiblock transfer
1455 */
1456 if (data->stop &&
1457 (data->error ||
1458 !data->mrq->sbc)) {
1459 /*
1460 * 'cap_cmd_during_tfr' request must not use the command line
1461 * after mmc_command_done() has been called. It is upper layer's
1462 * responsibility to send the stop command if required.
1463 */
1464 if (data->mrq->cap_cmd_during_tfr) {
1465 __sdhci_finish_mrq(host, data->mrq);
1466 } else {
1467 /* Avoid triggering warning in sdhci_send_command() */
1468 host->cmd = NULL;
1469 if (!sdhci_send_command(host, data->stop)) {
1470 if (sw_data_timeout) {
1471 /*
1472 * This is anyway a sw data timeout, so
1473 * give up now.
1474 */
1475 data->stop->error = -EIO;
1476 __sdhci_finish_mrq(host, data->mrq);
1477 } else {
1478 WARN_ON(host->deferred_cmd);
1479 host->deferred_cmd = data->stop;
1480 }
1481 }
1482 }
1483 } else {
1484 __sdhci_finish_mrq(host, data->mrq);
1485 }
1486}
1487
1488static void sdhci_finish_data(struct sdhci_host *host)
1489{
1490 __sdhci_finish_data(host, false);
1491}
1492
1493bool sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
1494{
1495 int flags;
1496 u32 mask;
1497 unsigned long timeout;
1498
1499 WARN_ON(host->cmd);
1500
1501 /* Initially, a command has no error */
1502 cmd->error = 0;
1503
1504 if ((host->quirks2 & SDHCI_QUIRK2_STOP_WITH_TC) &&
1505 cmd->opcode == MMC_STOP_TRANSMISSION)
1506 cmd->flags |= MMC_RSP_BUSY;
1507
1508 mask = SDHCI_CMD_INHIBIT;
1509 if (sdhci_data_line_cmd(cmd))
1510 mask |= SDHCI_DATA_INHIBIT;
1511
1512 /* We shouldn't wait for data inihibit for stop commands, even
1513 though they might use busy signaling */
1514 if (cmd->mrq->data && (cmd == cmd->mrq->data->stop))
1515 mask &= ~SDHCI_DATA_INHIBIT;
1516
1517 if (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask)
1518 return false;
1519
1520 host->cmd = cmd;
1521 if (sdhci_data_line_cmd(cmd)) {
1522 WARN_ON(host->data_cmd);
1523 host->data_cmd = cmd;
1524 }
1525
1526 sdhci_prepare_data(host, cmd);
1527
1528 sdhci_writel(host, cmd->arg, SDHCI_ARGUMENT);
1529
1530 sdhci_set_transfer_mode(host, cmd);
1531
1532 if ((cmd->flags & MMC_RSP_136) && (cmd->flags & MMC_RSP_BUSY)) {
1533 WARN_ONCE(1, "Unsupported response type!\n");
1534 /*
1535 * This does not happen in practice because 136-bit response
1536 * commands never have busy waiting, so rather than complicate
1537 * the error path, just remove busy waiting and continue.
1538 */
1539 cmd->flags &= ~MMC_RSP_BUSY;
1540 }
1541
1542 if (!(cmd->flags & MMC_RSP_PRESENT))
1543 flags = SDHCI_CMD_RESP_NONE;
1544 else if (cmd->flags & MMC_RSP_136)
1545 flags = SDHCI_CMD_RESP_LONG;
1546 else if (cmd->flags & MMC_RSP_BUSY)
1547 flags = SDHCI_CMD_RESP_SHORT_BUSY;
1548 else
1549 flags = SDHCI_CMD_RESP_SHORT;
1550
1551 if (cmd->flags & MMC_RSP_CRC)
1552 flags |= SDHCI_CMD_CRC;
1553 if (cmd->flags & MMC_RSP_OPCODE)
1554 flags |= SDHCI_CMD_INDEX;
1555
1556 /* CMD19 is special in that the Data Present Select should be set */
1557 if (cmd->data || cmd->opcode == MMC_SEND_TUNING_BLOCK ||
1558 cmd->opcode == MMC_SEND_TUNING_BLOCK_HS200)
1559 flags |= SDHCI_CMD_DATA;
1560
1561 timeout = jiffies;
1562 if (host->data_timeout)
1563 timeout += nsecs_to_jiffies(host->data_timeout);
1564 else if (!cmd->data && cmd->busy_timeout > 9000)
1565 timeout += DIV_ROUND_UP(cmd->busy_timeout, 1000) * HZ + HZ;
1566 else
1567 timeout += 10 * HZ;
1568 sdhci_mod_timer(host, cmd->mrq, timeout);
1569
1570 sdhci_writew(host, SDHCI_MAKE_CMD(cmd->opcode, flags), SDHCI_COMMAND);
1571
1572 return true;
1573}
1574
1575static bool sdhci_present_error(struct sdhci_host *host,
1576 struct mmc_command *cmd, bool present)
1577{
1578 if (!present || host->flags & SDHCI_DEVICE_DEAD) {
1579 cmd->error = -ENOMEDIUM;
1580 return true;
1581 }
1582
1583 return false;
1584}
1585
1586bool sdhci_send_command_retry(struct sdhci_host *host,
1587 struct mmc_command *cmd,
1588 unsigned long flags)
1589 __releases(host->lock)
1590 __acquires(host->lock)
1591{
1592 struct mmc_command *deferred_cmd = host->deferred_cmd;
1593 int timeout = 10; /* Approx. 10 ms */
1594 bool present;
1595
1596 /* Enable HW clock auto-gate to avoid bus error */
1597 if (!(host->mmc->caps2 & MMC_CAP2_BUS_AUTO_CLK_GATE))
1598 sdhci_auto_clk_gate(host->mmc, 1);
1599
1600 while (!sdhci_send_command(host, cmd)) {
1601 if (!timeout--) {
1602 pr_err("%s: Controller never released inhibit bit(s).\n",
1603 mmc_hostname(host->mmc));
1604 sdhci_dumpregs(host);
1605 cmd->error = -EIO;
1606 return false;
1607 }
1608
1609 spin_unlock_irqrestore(&host->lock, flags);
1610
1611 usleep_range(1000, 1250);
1612
1613 present = host->mmc->ops->get_cd(host->mmc);
1614
1615 spin_lock_irqsave(&host->lock, flags);
1616
1617 /* A deferred command might disappear, handle that */
1618 if (cmd == deferred_cmd && cmd != host->deferred_cmd)
1619 return true;
1620
1621 if (sdhci_present_error(host, cmd, present))
1622 return false;
1623 }
1624
1625 if (cmd == host->deferred_cmd)
1626 host->deferred_cmd = NULL;
1627
1628 return true;
1629}
1630
1631static void sdhci_read_rsp_136(struct sdhci_host *host, struct mmc_command *cmd)
1632{
1633 int i, reg;
1634
1635 for (i = 0; i < 4; i++) {
1636 reg = SDHCI_RESPONSE + (3 - i) * 4;
1637 cmd->resp[i] = sdhci_readl(host, reg);
1638 }
1639
1640 if (host->quirks2 & SDHCI_QUIRK2_RSP_136_HAS_CRC)
1641 return;
1642
1643 /* CRC is stripped so we need to do some shifting */
1644 for (i = 0; i < 4; i++) {
1645 cmd->resp[i] <<= 8;
1646 if (i != 3)
1647 cmd->resp[i] |= cmd->resp[i + 1] >> 24;
1648 }
1649}
1650
1651static void sdhci_finish_command(struct sdhci_host *host)
1652{
1653 struct mmc_command *cmd = host->cmd;
1654
1655 host->cmd = NULL;
1656
1657 if (cmd->flags & MMC_RSP_PRESENT) {
1658 if (cmd->flags & MMC_RSP_136) {
1659 sdhci_read_rsp_136(host, cmd);
1660 } else {
1661 cmd->resp[0] = sdhci_readl(host, SDHCI_RESPONSE);
1662 }
1663 }
1664
1665 if (cmd->mrq->cap_cmd_during_tfr && cmd == cmd->mrq->cmd)
1666 mmc_command_done(host->mmc, cmd->mrq);
1667
1668 /*
1669 * The host can send and interrupt when the busy state has
1670 * ended, allowing us to wait without wasting CPU cycles.
1671 * The busy signal uses DAT0 so this is similar to waiting
1672 * for data to complete.
1673 *
1674 * Note: The 1.0 specification is a bit ambiguous about this
1675 * feature so there might be some problems with older
1676 * controllers.
1677 */
1678 if (cmd->flags & MMC_RSP_BUSY) {
1679 if (cmd->data) {
1680 DBG("Cannot wait for busy signal when also doing a data transfer");
1681 } else if (!(host->quirks & SDHCI_QUIRK_NO_BUSY_IRQ) &&
1682 cmd == host->data_cmd) {
1683 /* Command complete before busy is ended */
1684 return;
1685 }
1686 }
1687
1688 /* Finished CMD23, now send actual command. */
1689 if (cmd == cmd->mrq->sbc) {
1690 if (!sdhci_send_command(host, cmd->mrq->cmd)) {
1691 WARN_ON(host->deferred_cmd);
1692 host->deferred_cmd = cmd->mrq->cmd;
1693 }
1694 } else {
1695
1696 /* Processed actual command. */
1697 if (host->data && host->data_early)
1698 sdhci_finish_data(host);
1699
1700 if (!cmd->data)
1701 __sdhci_finish_mrq(host, cmd->mrq);
1702 }
1703}
1704
1705static u16 sdhci_get_preset_value(struct sdhci_host *host)
1706{
1707 u16 preset = 0;
1708
1709 switch (host->timing) {
1710 case MMC_TIMING_MMC_HS:
1711 case MMC_TIMING_SD_HS:
1712 preset = sdhci_readw(host, SDHCI_PRESET_FOR_HIGH_SPEED);
1713 break;
1714 case MMC_TIMING_UHS_SDR12:
1715 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12);
1716 break;
1717 case MMC_TIMING_UHS_SDR25:
1718 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR25);
1719 break;
1720 case MMC_TIMING_UHS_SDR50:
1721 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR50);
1722 break;
1723 case MMC_TIMING_UHS_SDR104:
1724 case MMC_TIMING_MMC_HS200:
1725 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR104);
1726 break;
1727 case MMC_TIMING_UHS_DDR50:
1728 case MMC_TIMING_MMC_DDR52:
1729 preset = sdhci_readw(host, SDHCI_PRESET_FOR_DDR50);
1730 break;
1731 case MMC_TIMING_MMC_HS400:
1732 preset = sdhci_readw(host, SDHCI_PRESET_FOR_HS400);
1733 break;
1734 default:
1735 pr_warn("%s: Invalid UHS-I mode selected\n",
1736 mmc_hostname(host->mmc));
1737 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12);
1738 break;
1739 }
1740 return preset;
1741}
1742
1743u16 sdhci_calc_clk(struct sdhci_host *host, unsigned int clock,
1744 unsigned int *actual_clock)
1745{
1746 int div = 0; /* Initialized for compiler warning */
1747 int real_div = div, clk_mul = 1;
1748 u16 clk = 0;
1749 bool switch_base_clk = false;
1750
1751 if (host->version >= SDHCI_SPEC_300) {
1752 if (host->preset_enabled) {
1753 u16 pre_val;
1754
1755 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1756 pre_val = sdhci_get_preset_value(host);
1757 div = FIELD_GET(SDHCI_PRESET_SDCLK_FREQ_MASK, pre_val);
1758 if (host->clk_mul &&
1759 (pre_val & SDHCI_PRESET_CLKGEN_SEL)) {
1760 clk = SDHCI_PROG_CLOCK_MODE;
1761 real_div = div + 1;
1762 clk_mul = host->clk_mul;
1763 } else {
1764 real_div = max_t(int, 1, div << 1);
1765 }
1766 goto clock_set;
1767 }
1768
1769 /*
1770 * Check if the Host Controller supports Programmable Clock
1771 * Mode.
1772 */
1773 if (host->clk_mul) {
1774 for (div = 1; div <= 1024; div++) {
1775 if ((host->max_clk * host->clk_mul / div)
1776 <= clock)
1777 break;
1778 }
1779 if ((host->max_clk * host->clk_mul / div) <= clock) {
1780 /*
1781 * Set Programmable Clock Mode in the Clock
1782 * Control register.
1783 */
1784 clk = SDHCI_PROG_CLOCK_MODE;
1785 real_div = div;
1786 clk_mul = host->clk_mul;
1787 div--;
1788 } else {
1789 /*
1790 * Divisor can be too small to reach clock
1791 * speed requirement. Then use the base clock.
1792 */
1793 switch_base_clk = true;
1794 }
1795 }
1796
1797 if (!host->clk_mul || switch_base_clk) {
1798 /* Version 3.00 divisors must be a multiple of 2. */
1799 if (host->max_clk <= clock)
1800 div = 1;
1801 else {
1802 for (div = 2; div < SDHCI_MAX_DIV_SPEC_300;
1803 div += 2) {
1804 if ((host->max_clk / div) <= clock)
1805 break;
1806 }
1807 }
1808 real_div = div;
1809 div >>= 1;
1810 if ((host->quirks2 & SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN)
1811 && !div && host->max_clk <= 25000000)
1812 div = 1;
1813 }
1814 } else {
1815 /* Version 2.00 divisors must be a power of 2. */
1816 for (div = 1; div < SDHCI_MAX_DIV_SPEC_200; div *= 2) {
1817 if ((host->max_clk / div) <= clock)
1818 break;
1819 }
1820 real_div = div;
1821 div >>= 1;
1822 }
1823
1824clock_set:
1825 if (real_div)
1826 *actual_clock = (host->max_clk * clk_mul) / real_div;
1827 clk |= (div & SDHCI_DIV_MASK) << SDHCI_DIVIDER_SHIFT;
1828 clk |= ((div & SDHCI_DIV_HI_MASK) >> SDHCI_DIV_MASK_LEN)
1829 << SDHCI_DIVIDER_HI_SHIFT;
1830
1831 return clk;
1832}
1833EXPORT_SYMBOL_GPL(sdhci_calc_clk);
1834
1835void sdhci_enable_clk(struct sdhci_host *host, u16 clk)
1836{
1837 ktime_t timeout;
1838
1839 clk |= SDHCI_CLOCK_INT_EN;
1840 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1841
1842 /* Wait max 150 ms */
1843 timeout = ktime_add_ms(ktime_get(), 150);
1844 while (1) {
1845 bool timedout = ktime_after(ktime_get(), timeout);
1846
1847 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1848 if (clk & SDHCI_CLOCK_INT_STABLE)
1849 break;
1850 if (timedout) {
1851 pr_err("%s: Internal clock never stabilised.\n",
1852 mmc_hostname(host->mmc));
1853 sdhci_dumpregs(host);
1854 return;
1855 }
1856 udelay(10);
1857 }
1858
1859 if (host->version >= SDHCI_SPEC_410 && host->v4_mode) {
1860 clk |= SDHCI_CLOCK_PLL_EN;
1861 clk &= ~SDHCI_CLOCK_INT_STABLE;
1862 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1863
1864 /* Wait max 150 ms */
1865 timeout = ktime_add_ms(ktime_get(), 150);
1866 while (1) {
1867 bool timedout = ktime_after(ktime_get(), timeout);
1868
1869 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1870 if (clk & SDHCI_CLOCK_INT_STABLE)
1871 break;
1872 if (timedout) {
1873 pr_err("%s: PLL clock never stabilised.\n",
1874 mmc_hostname(host->mmc));
1875 sdhci_dumpregs(host);
1876 return;
1877 }
1878 udelay(10);
1879 }
1880 }
1881
1882 clk |= SDHCI_CLOCK_CARD_EN;
1883 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1884}
1885EXPORT_SYMBOL_GPL(sdhci_enable_clk);
1886
1887void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
1888{
1889 u16 clk;
1890
1891 host->mmc->actual_clock = 0;
1892
1893 sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
1894
1895 if (clock == 0)
1896 return;
1897
1898 clk = sdhci_calc_clk(host, clock, &host->mmc->actual_clock);
1899 sdhci_enable_clk(host, clk);
1900}
1901EXPORT_SYMBOL_GPL(sdhci_set_clock);
1902
1903static void sdhci_set_power_reg(struct sdhci_host *host, unsigned char mode,
1904 unsigned short vdd)
1905{
1906 struct mmc_host *mmc = host->mmc;
1907
1908 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
1909
1910 if (mode != MMC_POWER_OFF)
1911 sdhci_writeb(host, SDHCI_POWER_ON, SDHCI_POWER_CONTROL);
1912 else
1913 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
1914}
1915
1916void sdhci_set_power_noreg(struct sdhci_host *host, unsigned char mode,
1917 unsigned short vdd)
1918{
1919 u8 pwr = 0;
1920
1921 if (mode != MMC_POWER_OFF) {
1922 switch (1 << vdd) {
1923 case MMC_VDD_165_195:
1924 /*
1925 * Without a regulator, SDHCI does not support 2.0v
1926 * so we only get here if the driver deliberately
1927 * added the 2.0v range to ocr_avail. Map it to 1.8v
1928 * for the purpose of turning on the power.
1929 */
1930 case MMC_VDD_20_21:
1931 pwr = SDHCI_POWER_180;
1932 break;
1933 case MMC_VDD_29_30:
1934 case MMC_VDD_30_31:
1935 pwr = SDHCI_POWER_300;
1936 break;
1937 case MMC_VDD_32_33:
1938 case MMC_VDD_33_34:
1939 /*
1940 * 3.4 ~ 3.6V are valid only for those platforms where it's
1941 * known that the voltage range is supported by hardware.
1942 */
1943 case MMC_VDD_34_35:
1944 case MMC_VDD_35_36:
1945 pwr = SDHCI_POWER_330;
1946 break;
1947 default:
1948 WARN(1, "%s: Invalid vdd %#x\n",
1949 mmc_hostname(host->mmc), vdd);
1950 break;
1951 }
1952 }
1953
1954 if (host->pwr == pwr)
1955 return;
1956
1957 host->pwr = pwr;
1958
1959 if (pwr == 0) {
1960 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
1961 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
1962 sdhci_runtime_pm_bus_off(host);
1963 } else {
1964 /*
1965 * Spec says that we should clear the power reg before setting
1966 * a new value. Some controllers don't seem to like this though.
1967 */
1968 if (!(host->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE))
1969 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
1970
1971 /*
1972 * At least the Marvell CaFe chip gets confused if we set the
1973 * voltage and set turn on power at the same time, so set the
1974 * voltage first.
1975 */
1976 if (host->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER)
1977 sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
1978
1979 pwr |= SDHCI_POWER_ON;
1980
1981 sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
1982
1983 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
1984 sdhci_runtime_pm_bus_on(host);
1985
1986 /*
1987 * Some controllers need an extra 10ms delay of 10ms before
1988 * they can apply clock after applying power
1989 */
1990 if (host->quirks & SDHCI_QUIRK_DELAY_AFTER_POWER)
1991 mdelay(10);
1992 }
1993}
1994EXPORT_SYMBOL_GPL(sdhci_set_power_noreg);
1995
1996void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
1997 unsigned short vdd)
1998{
1999 if (IS_ERR(host->mmc->supply.vmmc))
2000 sdhci_set_power_noreg(host, mode, vdd);
2001 else
2002 sdhci_set_power_reg(host, mode, vdd);
2003}
2004EXPORT_SYMBOL_GPL(sdhci_set_power);
2005
2006void sdhci_clear_set_irqs_sig(struct sdhci_host *host, u32 clear, u32 set)
2007{
2008 u32 ier;
2009
2010 ier = sdhci_readl(host, SDHCI_SIGNAL_ENABLE);
2011 ier &= ~clear;
2012 ier |= set;
2013 sdhci_writel(host, ier, SDHCI_SIGNAL_ENABLE);
2014}
2015
2016/*****************************************************************************\
2017 * *
2018 * MMC callbacks *
2019 * *
2020\*****************************************************************************/
2021
2022void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
2023{
2024 struct sdhci_host *host = mmc_priv(mmc);
2025 struct mmc_command *cmd;
2026 unsigned long flags;
2027 bool present;
2028#ifdef SDHCI_CMD_POLL_OPT
2029 unsigned long timeo;
2030 u32 intmask, ier_sig;
2031 u8 polling = 0;
2032#endif
2033
2034 /* Firstly check card presence */
2035 present = mmc->ops->get_cd(mmc);
2036
2037 spin_lock_irqsave(&host->lock, flags);
2038
2039 sdhci_led_activate(host);
2040
2041 /*
2042 * Ensure we don't send the STOP for non-SET_BLOCK_COUNTED
2043 * requests if Auto-CMD12 is enabled.
2044 */
2045 if (sdhci_auto_cmd12(host, mrq)) {
2046 if (mrq->stop) {
2047 mrq->data->stop = NULL;
2048 mrq->stop = NULL;
2049 }
2050 }
2051
2052
2053 if (sdhci_present_error(host, mrq->cmd, present))
2054 goto out_finish;
2055
2056 cmd = sdhci_manual_cmd23(host, mrq) ? mrq->sbc : mrq->cmd;
2057
2058#ifdef SDHCI_CMD_POLL_OPT
2059 polling = 0;
2060 intmask = SDHCI_INT_CMD_MASK;
2061 if (mrq->cmd->data && mrq->cmd->data->blocks == 1) {
2062 polling = 1;
2063 intmask |= SDHCI_INT_DATA_MASK;
2064 }
2065
2066 if (!sdhci_send_command_retry(host, cmd, flags))
2067 goto out_finish;
2068
2069 /*
2070 * Disable command/data interrupts,must do this after
2071 * "sdhci_send_command_retry", since SDHCI_SIGNAL_ENABLE may be
2072 * changed in this function.
2073 */
2074 ier_sig = sdhci_readl(host, SDHCI_SIGNAL_ENABLE);
2075 sdhci_clear_set_irqs_sig(host, SDHCI_INT_ALL_MASK, 0);
2076 host->cmd_polling = true;
2077 spin_unlock_irqrestore(&host->lock, flags);
2078
2079 /* wait command to be finished */
2080 timeo = jiffies + msecs_to_jiffies(1000);
2081 do {
2082 intmask = sdhci_readl(host, SDHCI_INT_STATUS);
2083 if (intmask & (SDHCI_INT_ERROR | SDHCI_INT_RESPONSE))
2084 break;
2085
2086 if (!time_before(jiffies, timeo)) {
2087 intmask |= SDHCI_INT_ERROR | SDHCI_INT_TIMEOUT;
2088 pr_err("%s: CMD software timeout, STATUS=0x%x\n",
2089 mmc_hostname(host->mmc),
2090 sdhci_readl(host, SDHCI_INT_STATUS));
2091 break;
2092 }
2093 } while(1);
2094 if (intmask & (SDHCI_INT_ERROR))
2095 polling = 0;
2096
2097 while (polling) {
2098 /* wait data transfer to be finished */
2099 timeo = jiffies + msecs_to_jiffies(2000);
2100 do {
2101 intmask = sdhci_readl(host, SDHCI_INT_STATUS);
2102 if (intmask & (SDHCI_INT_ERROR | SDHCI_INT_DATA_MASK))
2103 break;
2104 if (!time_before(jiffies, timeo)) {
2105 intmask |= SDHCI_INT_ERROR | SDHCI_INT_TIMEOUT;
2106 pr_err("%s: DATA software timeout, STATUS=0x%x!\n",
2107 mmc_hostname(host->mmc),
2108 sdhci_readl(host, SDHCI_INT_STATUS));
2109 break;
2110 }
2111 } while(1);
2112
2113 spin_lock_irqsave(&host->lock, flags);
2114 sdhci_writel(host, intmask & SDHCI_INT_DATA_MASK,
2115 SDHCI_INT_STATUS);
2116 sdhci_data_irq(host, intmask & SDHCI_INT_DATA_MASK);
2117 spin_unlock_irqrestore(&host->lock, flags);
2118
2119 if (intmask & (SDHCI_INT_ERROR | SDHCI_INT_DATA_END))
2120 break;
2121 }
2122
2123 spin_lock_irqsave(&host->lock, flags);
2124 sdhci_writel(host, intmask & SDHCI_INT_CMD_MASK, SDHCI_INT_STATUS);
2125 sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK, &intmask);
2126 host->cmd_polling = false;
2127 sdhci_clear_set_irqs_sig(host, 0, ier_sig);
2128 spin_unlock_irqrestore(&host->lock, flags);
2129
2130 if (!cmd->data || polling || (intmask & SDHCI_INT_ERROR)) {
2131 while (!sdhci_request_done(host));
2132 }
2133#else
2134 if (!sdhci_send_command_retry(host, cmd, flags))
2135 goto out_finish;
2136
2137 spin_unlock_irqrestore(&host->lock, flags);
2138#endif
2139
2140 return;
2141
2142out_finish:
2143 sdhci_finish_mrq(host, mrq);
2144 spin_unlock_irqrestore(&host->lock, flags);
2145}
2146EXPORT_SYMBOL_GPL(sdhci_request);
2147
2148int sdhci_request_atomic(struct mmc_host *mmc, struct mmc_request *mrq)
2149{
2150 struct sdhci_host *host = mmc_priv(mmc);
2151 struct mmc_command *cmd;
2152 unsigned long flags;
2153 int ret = 0;
2154
2155 spin_lock_irqsave(&host->lock, flags);
2156
2157 if (sdhci_present_error(host, mrq->cmd, true)) {
2158 sdhci_finish_mrq(host, mrq);
2159 goto out_finish;
2160 }
2161
2162 cmd = sdhci_manual_cmd23(host, mrq) ? mrq->sbc : mrq->cmd;
2163
2164 /*
2165 * The HSQ may send a command in interrupt context without polling
2166 * the busy signaling, which means we should return BUSY if controller
2167 * has not released inhibit bits to allow HSQ trying to send request
2168 * again in non-atomic context. So we should not finish this request
2169 * here.
2170 */
2171 if (!sdhci_send_command(host, cmd))
2172 ret = -EBUSY;
2173 else
2174 sdhci_led_activate(host);
2175
2176out_finish:
2177 spin_unlock_irqrestore(&host->lock, flags);
2178 return ret;
2179}
2180EXPORT_SYMBOL_GPL(sdhci_request_atomic);
2181
2182void sdhci_set_bus_width(struct sdhci_host *host, int width)
2183{
2184 u8 ctrl;
2185
2186 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
2187 if (width == MMC_BUS_WIDTH_8) {
2188 ctrl &= ~SDHCI_CTRL_4BITBUS;
2189 ctrl |= SDHCI_CTRL_8BITBUS;
2190 } else {
2191 if (host->mmc->caps & MMC_CAP_8_BIT_DATA)
2192 ctrl &= ~SDHCI_CTRL_8BITBUS;
2193 if (width == MMC_BUS_WIDTH_4)
2194 ctrl |= SDHCI_CTRL_4BITBUS;
2195 else
2196 ctrl &= ~SDHCI_CTRL_4BITBUS;
2197 }
2198 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
2199}
2200EXPORT_SYMBOL_GPL(sdhci_set_bus_width);
2201
2202void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing)
2203{
2204 u16 ctrl_2;
2205
2206 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2207 /* Select Bus Speed Mode for host */
2208 ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
2209 if ((timing == MMC_TIMING_MMC_HS200) ||
2210 (timing == MMC_TIMING_UHS_SDR104))
2211 ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
2212 else if (timing == MMC_TIMING_UHS_SDR12)
2213 ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
2214 else if (timing == MMC_TIMING_UHS_SDR25)
2215 ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
2216 else if (timing == MMC_TIMING_UHS_SDR50)
2217 ctrl_2 |= SDHCI_CTRL_UHS_SDR50;
2218 else if ((timing == MMC_TIMING_UHS_DDR50) ||
2219 (timing == MMC_TIMING_MMC_DDR52))
2220 ctrl_2 |= SDHCI_CTRL_UHS_DDR50;
2221 else if (timing == MMC_TIMING_MMC_HS400)
2222 ctrl_2 |= SDHCI_CTRL_HS400; /* Non-standard */
2223 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
2224}
2225EXPORT_SYMBOL_GPL(sdhci_set_uhs_signaling);
2226
2227static bool sdhci_timing_has_preset(unsigned char timing)
2228{
2229 switch (timing) {
2230 case MMC_TIMING_UHS_SDR12:
2231 case MMC_TIMING_UHS_SDR25:
2232 case MMC_TIMING_UHS_SDR50:
2233 case MMC_TIMING_UHS_SDR104:
2234 case MMC_TIMING_UHS_DDR50:
2235 case MMC_TIMING_MMC_DDR52:
2236 return true;
2237 };
2238 return false;
2239}
2240
2241static bool sdhci_preset_needed(struct sdhci_host *host, unsigned char timing)
2242{
2243 return !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN) &&
2244 sdhci_timing_has_preset(timing);
2245}
2246
2247static bool sdhci_presetable_values_change(struct sdhci_host *host, struct mmc_ios *ios)
2248{
2249 /*
2250 * Preset Values are: Driver Strength, Clock Generator and SDCLK/RCLK
2251 * Frequency. Check if preset values need to be enabled, or the Driver
2252 * Strength needs updating. Note, clock changes are handled separately.
2253 */
2254 return !host->preset_enabled &&
2255 (sdhci_preset_needed(host, ios->timing) || host->drv_type != ios->drv_type);
2256}
2257
2258void sdhci_set_txrx_delay(struct mmc_host *mmc, int tx, unsigned int val)
2259{
2260 struct sdhci_host *host = mmc_priv(mmc);
2261
2262 host->ops->set_delay_val(host, tx, val);
2263}
2264
2265void sdhci_enable_txrx_delay(struct mmc_host *mmc, int tx, int enable)
2266{
2267 struct sdhci_host *host = mmc_priv(mmc);
2268
2269 host->ops->enable_delay_line(host, tx, enable);
2270}
2271
2272void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
2273{
2274 struct sdhci_host *host = mmc_priv(mmc);
2275 bool reinit_uhs = host->reinit_uhs;
2276 bool turning_on_clk = false;
2277 u8 ctrl;
2278
2279 host->reinit_uhs = false;
2280
2281 if (ios->power_mode == MMC_POWER_UNDEFINED)
2282 return;
2283
2284 if (host->flags & SDHCI_DEVICE_DEAD) {
2285 if (!IS_ERR(mmc->supply.vmmc) &&
2286 ios->power_mode == MMC_POWER_OFF)
2287 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
2288 return;
2289 }
2290
2291 /*
2292 * Reset the chip on each power off.
2293 * Should clear out any weird states.
2294 */
2295 if (ios->power_mode == MMC_POWER_OFF) {
2296 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
2297 sdhci_reinit(host);
2298 }
2299
2300 if (host->version >= SDHCI_SPEC_300 &&
2301 (ios->power_mode == MMC_POWER_UP) &&
2302 !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN))
2303 sdhci_enable_preset_value(host, false);
2304
2305 if (!ios->clock || ios->clock != host->clock) {
2306 turning_on_clk = ios->clock && !host->clock;
2307
2308 if (host->ops->clk_prepare) {
2309 sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
2310 ios->clock = host->ops->clk_prepare(host, ios->clock);
2311 if (host->ops->get_max_clock)
2312 host->max_clk = host->ops->get_max_clock(host);
2313 }
2314
2315 host->ops->set_clock(host, ios->clock);
2316 host->clock = ios->clock;
2317
2318 if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK &&
2319 host->clock) {
2320 host->timeout_clk = host->mmc->actual_clock ?
2321 host->mmc->actual_clock / 1000 :
2322 host->clock / 1000;
2323
2324 if (host->quirks2 & SDHCI_QUIRK2_TIMEOUT_DIVIDE_4)
2325 host->timeout_clk /= 4;
2326
2327 host->mmc->max_busy_timeout =
2328 host->ops->get_max_timeout_count ?
2329 host->ops->get_max_timeout_count(host) :
2330 1 << 27;
2331 host->mmc->max_busy_timeout /= host->timeout_clk;
2332 }
2333 }
2334
2335 if (host->ops->set_power)
2336 host->ops->set_power(host, ios->power_mode, ios->vdd);
2337 else
2338 sdhci_set_power(host, ios->power_mode, ios->vdd);
2339
2340 if (host->ops->platform_send_init_74_clocks)
2341 host->ops->platform_send_init_74_clocks(host, ios->power_mode);
2342
2343 host->ops->set_bus_width(host, ios->bus_width);
2344
2345 /*
2346 * Special case to avoid multiple clock changes during voltage
2347 * switching.
2348 */
2349 if (!reinit_uhs &&
2350 turning_on_clk &&
2351 host->timing == ios->timing &&
2352 host->version >= SDHCI_SPEC_300 &&
2353 !sdhci_presetable_values_change(host, ios))
2354 return;
2355
2356 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
2357
2358 if (!(host->quirks & SDHCI_QUIRK_NO_HISPD_BIT)) {
2359 if (ios->timing == MMC_TIMING_SD_HS ||
2360 ios->timing == MMC_TIMING_MMC_HS ||
2361 ios->timing == MMC_TIMING_MMC_HS400 ||
2362 ios->timing == MMC_TIMING_MMC_HS200 ||
2363 ios->timing == MMC_TIMING_MMC_DDR52 ||
2364 ios->timing == MMC_TIMING_UHS_SDR50 ||
2365 ios->timing == MMC_TIMING_UHS_SDR104 ||
2366 ios->timing == MMC_TIMING_UHS_DDR50 ||
2367 ios->timing == MMC_TIMING_UHS_SDR25)
2368 ctrl |= SDHCI_CTRL_HISPD;
2369 else
2370 ctrl &= ~SDHCI_CTRL_HISPD;
2371 }
2372
2373 if (host->version >= SDHCI_SPEC_300) {
2374 u16 clk, ctrl_2;
2375
2376 if (!host->preset_enabled) {
2377 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
2378 /*
2379 * We only need to set Driver Strength if the
2380 * preset value enable is not set.
2381 */
2382 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2383 ctrl_2 &= ~SDHCI_CTRL_DRV_TYPE_MASK;
2384 if (ios->drv_type == MMC_SET_DRIVER_TYPE_A)
2385 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_A;
2386 else if (ios->drv_type == MMC_SET_DRIVER_TYPE_B)
2387 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_B;
2388 else if (ios->drv_type == MMC_SET_DRIVER_TYPE_C)
2389 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_C;
2390 else if (ios->drv_type == MMC_SET_DRIVER_TYPE_D)
2391 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_D;
2392 else {
2393 pr_warn("%s: invalid driver type, default to driver type B\n",
2394 mmc_hostname(mmc));
2395 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_B;
2396 }
2397
2398 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
2399 host->drv_type = ios->drv_type;
2400 } else {
2401 /*
2402 * According to SDHC Spec v3.00, if the Preset Value
2403 * Enable in the Host Control 2 register is set, we
2404 * need to reset SD Clock Enable before changing High
2405 * Speed Enable to avoid generating clock gliches.
2406 */
2407
2408 /* Reset SD Clock Enable */
2409 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
2410 clk &= ~SDHCI_CLOCK_CARD_EN;
2411 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
2412
2413 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
2414
2415 /* Re-enable SD Clock */
2416 host->ops->set_clock(host, host->clock);
2417 }
2418
2419 /* Reset SD Clock Enable */
2420 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
2421 clk &= ~SDHCI_CLOCK_CARD_EN;
2422 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
2423
2424 host->ops->set_uhs_signaling(host, ios->timing);
2425 host->timing = ios->timing;
2426
2427 if (sdhci_preset_needed(host, ios->timing)) {
2428 u16 preset;
2429
2430 sdhci_enable_preset_value(host, true);
2431 preset = sdhci_get_preset_value(host);
2432 ios->drv_type = FIELD_GET(SDHCI_PRESET_DRV_MASK,
2433 preset);
2434 host->drv_type = ios->drv_type;
2435 }
2436
2437 /* Re-enable SD Clock */
2438 host->ops->set_clock(host, host->clock);
2439 } else
2440 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
2441
2442 /*
2443 * Some (ENE) controllers go apeshit on some ios operation,
2444 * signalling timeout and CRC errors even on CMD0. Resetting
2445 * it on each ios seems to solve the problem.
2446 */
2447 if (host->quirks & SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS)
2448 sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
2449}
2450EXPORT_SYMBOL_GPL(sdhci_set_ios);
2451
2452static int sdhci_get_cd(struct mmc_host *mmc)
2453{
2454 struct sdhci_host *host = mmc_priv(mmc);
2455 int gpio_cd = mmc_gpio_get_cd(mmc);
2456
2457 if (host->flags & SDHCI_DEVICE_DEAD)
2458 return 0;
2459
2460 /* If nonremovable, assume that the card is always present. */
2461 if (!mmc_card_is_removable(host->mmc))
2462 return 1;
2463
2464 /*
2465 * Try slot gpio detect, if defined it take precedence
2466 * over build in controller functionality
2467 */
2468 if (gpio_cd >= 0)
2469 return !!gpio_cd;
2470
2471 /* If polling, assume that the card is always present. */
2472 if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
2473 return 1;
2474
2475 /* Host native card detect */
2476 return !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT);
2477}
2478
2479static int sdhci_check_ro(struct sdhci_host *host)
2480{
2481 bool allow_invert = false;
2482 int is_readonly;
2483
2484 if (host->flags & SDHCI_DEVICE_DEAD) {
2485 is_readonly = 0;
2486 } else if (host->ops->get_ro) {
2487 is_readonly = host->ops->get_ro(host);
2488 } else if (mmc_can_gpio_ro(host->mmc)) {
2489 is_readonly = mmc_gpio_get_ro(host->mmc);
2490 /* Do not invert twice */
2491 allow_invert = !(host->mmc->caps2 & MMC_CAP2_RO_ACTIVE_HIGH);
2492 } else {
2493 is_readonly = !(sdhci_readl(host, SDHCI_PRESENT_STATE)
2494 & SDHCI_WRITE_PROTECT);
2495 allow_invert = true;
2496 }
2497
2498 if (is_readonly >= 0 &&
2499 allow_invert &&
2500 (host->quirks & SDHCI_QUIRK_INVERTED_WRITE_PROTECT))
2501 is_readonly = !is_readonly;
2502
2503 return is_readonly;
2504}
2505
2506#define SAMPLE_COUNT 5
2507
2508static int sdhci_get_ro(struct mmc_host *mmc)
2509{
2510 struct sdhci_host *host = mmc_priv(mmc);
2511 int i, ro_count;
2512
2513 if (!(host->quirks & SDHCI_QUIRK_UNSTABLE_RO_DETECT))
2514 return sdhci_check_ro(host);
2515
2516 ro_count = 0;
2517 for (i = 0; i < SAMPLE_COUNT; i++) {
2518 if (sdhci_check_ro(host)) {
2519 if (++ro_count > SAMPLE_COUNT / 2)
2520 return 1;
2521 }
2522 msleep(30);
2523 }
2524 return 0;
2525}
2526
2527static void sdhci_hw_reset(struct mmc_host *mmc)
2528{
2529 struct sdhci_host *host = mmc_priv(mmc);
2530
2531 if (host->ops && host->ops->hw_reset)
2532 host->ops->hw_reset(host);
2533}
2534
2535static void sdhci_enable_sdio_irq_nolock(struct sdhci_host *host, int enable)
2536{
2537 if (!(host->flags & SDHCI_DEVICE_DEAD)) {
2538 if (enable)
2539 host->ier |= SDHCI_INT_CARD_INT;
2540 else
2541 host->ier &= ~SDHCI_INT_CARD_INT;
2542
2543 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
2544 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
2545 }
2546}
2547
2548void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable)
2549{
2550 struct sdhci_host *host = mmc_priv(mmc);
2551 unsigned long flags;
2552
2553 if (enable)
2554 pm_runtime_get_noresume(host->mmc->parent);
2555
2556 spin_lock_irqsave(&host->lock, flags);
2557 sdhci_enable_sdio_irq_nolock(host, enable);
2558 spin_unlock_irqrestore(&host->lock, flags);
2559
2560 if (!enable)
2561 pm_runtime_put_noidle(host->mmc->parent);
2562}
2563EXPORT_SYMBOL_GPL(sdhci_enable_sdio_irq);
2564
2565static void sdhci_ack_sdio_irq(struct mmc_host *mmc)
2566{
2567 struct sdhci_host *host = mmc_priv(mmc);
2568 unsigned long flags;
2569
2570 spin_lock_irqsave(&host->lock, flags);
2571 sdhci_enable_sdio_irq_nolock(host, true);
2572 spin_unlock_irqrestore(&host->lock, flags);
2573}
2574
2575int sdhci_start_signal_voltage_switch(struct mmc_host *mmc,
2576 struct mmc_ios *ios)
2577{
2578 struct sdhci_host *host = mmc_priv(mmc);
2579 u16 ctrl;
2580 int ret;
2581
2582 /*
2583 * Signal Voltage Switching is only applicable for Host Controllers
2584 * v3.00 and above.
2585 */
2586 if (host->version < SDHCI_SPEC_300)
2587 return 0;
2588
2589 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2590
2591 switch (ios->signal_voltage) {
2592 case MMC_SIGNAL_VOLTAGE_330:
2593 if (!(host->flags & SDHCI_SIGNALING_330))
2594 return -EINVAL;
2595 /* Set 1.8V Signal Enable in the Host Control2 register to 0 */
2596 ctrl &= ~SDHCI_CTRL_VDD_180;
2597 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2598
2599 if (host->ops->voltage_switch)
2600 host->ops->voltage_switch(host);
2601
2602 if (!IS_ERR(mmc->supply.vqmmc)) {
2603 ret = mmc_regulator_set_vqmmc(mmc, ios);
2604 if (ret) {
2605 pr_warn("%s: Switching to 3.3V signalling voltage failed\n",
2606 mmc_hostname(mmc));
2607 return -EIO;
2608 }
2609 }
2610 /* Wait for 5ms */
2611 usleep_range(5000, 5500);
2612
2613 /* 3.3V regulator output should be stable within 5 ms */
2614 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2615 if (!(ctrl & SDHCI_CTRL_VDD_180))
2616 return 0;
2617
2618 pr_warn("%s: 3.3V regulator output did not became stable\n",
2619 mmc_hostname(mmc));
2620
2621 return -EAGAIN;
2622 case MMC_SIGNAL_VOLTAGE_180:
2623 if (!(host->flags & SDHCI_SIGNALING_180))
2624 return -EINVAL;
2625 if (!IS_ERR(mmc->supply.vqmmc)) {
2626 ret = mmc_regulator_set_vqmmc(mmc, ios);
2627 if (ret) {
2628 pr_warn("%s: Switching to 1.8V signalling voltage failed\n",
2629 mmc_hostname(mmc));
2630 return -EIO;
2631 }
2632 }
2633
2634 /*
2635 * Enable 1.8V Signal Enable in the Host Control2
2636 * register
2637 */
2638 ctrl |= SDHCI_CTRL_VDD_180;
2639 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2640
2641 /* Some controller need to do more when switching */
2642 if (host->ops->voltage_switch)
2643 host->ops->voltage_switch(host);
2644
2645 /* 1.8V regulator output should be stable within 5 ms */
2646 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2647 if (ctrl & SDHCI_CTRL_VDD_180)
2648 return 0;
2649
2650 pr_warn("%s: 1.8V regulator output did not became stable\n",
2651 mmc_hostname(mmc));
2652
2653 return -EAGAIN;
2654 case MMC_SIGNAL_VOLTAGE_120:
2655 if (!(host->flags & SDHCI_SIGNALING_120))
2656 return -EINVAL;
2657 if (!IS_ERR(mmc->supply.vqmmc)) {
2658 ret = mmc_regulator_set_vqmmc(mmc, ios);
2659 if (ret) {
2660 pr_warn("%s: Switching to 1.2V signalling voltage failed\n",
2661 mmc_hostname(mmc));
2662 return -EIO;
2663 }
2664 }
2665 return 0;
2666 default:
2667 /* No signal voltage switch required */
2668 return 0;
2669 }
2670}
2671EXPORT_SYMBOL_GPL(sdhci_start_signal_voltage_switch);
2672
2673static int sdhci_card_busy(struct mmc_host *mmc)
2674{
2675 struct sdhci_host *host = mmc_priv(mmc);
2676 u32 present_state;
2677
2678 /* Check whether DAT[0] is 0 */
2679 present_state = sdhci_readl(host, SDHCI_PRESENT_STATE);
2680
2681 return !(present_state & SDHCI_DATA_0_LVL_MASK);
2682}
2683
2684static void sdhci_auto_clk_gate(struct mmc_host *mmc, int auto_gate)
2685{
2686 struct sdhci_host *host = mmc_priv(mmc);
2687
2688 if (host->ops->clk_gate_auto)
2689 host->ops->clk_gate_auto(host, auto_gate);
2690}
2691
2692static int sdhci_prepare_hs400_tuning(struct mmc_host *mmc, struct mmc_ios *ios)
2693{
2694 struct sdhci_host *host = mmc_priv(mmc);
2695 unsigned long flags;
2696
2697 spin_lock_irqsave(&host->lock, flags);
2698 host->flags |= SDHCI_HS400_TUNING;
2699 spin_unlock_irqrestore(&host->lock, flags);
2700
2701 return 0;
2702}
2703
2704void sdhci_start_tuning(struct sdhci_host *host)
2705{
2706 u16 ctrl;
2707
2708 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2709 ctrl |= SDHCI_CTRL_EXEC_TUNING;
2710 if (host->quirks2 & SDHCI_QUIRK2_TUNING_WORK_AROUND)
2711 ctrl |= SDHCI_CTRL_TUNED_CLK;
2712 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2713
2714 /*
2715 * As per the Host Controller spec v3.00, tuning command
2716 * generates Buffer Read Ready interrupt, so enable that.
2717 *
2718 * Note: The spec clearly says that when tuning sequence
2719 * is being performed, the controller does not generate
2720 * interrupts other than Buffer Read Ready interrupt. But
2721 * to make sure we don't hit a controller bug, we _only_
2722 * enable Buffer Read Ready interrupt here.
2723 */
2724 sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_INT_ENABLE);
2725 sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_SIGNAL_ENABLE);
2726}
2727EXPORT_SYMBOL_GPL(sdhci_start_tuning);
2728
2729void sdhci_end_tuning(struct sdhci_host *host)
2730{
2731 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
2732 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
2733}
2734EXPORT_SYMBOL_GPL(sdhci_end_tuning);
2735
2736void sdhci_reset_tuning(struct sdhci_host *host)
2737{
2738 u16 ctrl;
2739
2740 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2741 ctrl &= ~SDHCI_CTRL_TUNED_CLK;
2742 ctrl &= ~SDHCI_CTRL_EXEC_TUNING;
2743 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2744}
2745EXPORT_SYMBOL_GPL(sdhci_reset_tuning);
2746
2747void mmc_reset_tuning(struct mmc_host *mmc)
2748{
2749 struct sdhci_host *host = mmc_priv(mmc);
2750 sdhci_reset_tuning(host);
2751}
2752EXPORT_SYMBOL(mmc_reset_tuning);
2753
2754void sdhci_abort_tuning(struct sdhci_host *host, u32 opcode)
2755{
2756 sdhci_reset_tuning(host);
2757
2758 sdhci_do_reset(host, SDHCI_RESET_CMD);
2759 sdhci_do_reset(host, SDHCI_RESET_DATA);
2760
2761 sdhci_end_tuning(host);
2762
2763 mmc_abort_tuning(host->mmc, opcode);
2764}
2765EXPORT_SYMBOL_GPL(sdhci_abort_tuning);
2766
2767/*
2768 * We use sdhci_send_tuning() because mmc_send_tuning() is not a good fit. SDHCI
2769 * tuning command does not have a data payload (or rather the hardware does it
2770 * automatically) so mmc_send_tuning() will return -EIO. Also the tuning command
2771 * interrupt setup is different to other commands and there is no timeout
2772 * interrupt so special handling is needed.
2773 */
2774void sdhci_send_tuning(struct sdhci_host *host, u32 opcode)
2775{
2776 struct mmc_host *mmc = host->mmc;
2777 struct mmc_command cmd = {};
2778 struct mmc_request mrq = {};
2779 unsigned long flags;
2780 u32 b = host->sdma_boundary;
2781
2782 spin_lock_irqsave(&host->lock, flags);
2783
2784 cmd.opcode = opcode;
2785 cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
2786 cmd.mrq = &mrq;
2787
2788 mrq.cmd = &cmd;
2789 /*
2790 * In response to CMD19, the card sends 64 bytes of tuning
2791 * block to the Host Controller. So we set the block size
2792 * to 64 here.
2793 */
2794 if (cmd.opcode == MMC_SEND_TUNING_BLOCK_HS200 &&
2795 mmc->ios.bus_width == MMC_BUS_WIDTH_8)
2796 sdhci_writew(host, SDHCI_MAKE_BLKSZ(b, 128), SDHCI_BLOCK_SIZE);
2797 else
2798 sdhci_writew(host, SDHCI_MAKE_BLKSZ(b, 64), SDHCI_BLOCK_SIZE);
2799
2800 /*
2801 * The tuning block is sent by the card to the host controller.
2802 * So we set the TRNS_READ bit in the Transfer Mode register.
2803 * This also takes care of setting DMA Enable and Multi Block
2804 * Select in the same register to 0.
2805 */
2806 sdhci_writew(host, SDHCI_TRNS_READ, SDHCI_TRANSFER_MODE);
2807
2808 if (!sdhci_send_command_retry(host, &cmd, flags)) {
2809 spin_unlock_irqrestore(&host->lock, flags);
2810 host->tuning_done = 0;
2811 return;
2812 }
2813
2814 host->cmd = NULL;
2815
2816 sdhci_del_timer(host, &mrq);
2817
2818 host->tuning_done = 0;
2819
2820 spin_unlock_irqrestore(&host->lock, flags);
2821
2822 /* Wait for Buffer Read Ready interrupt */
2823 wait_event_timeout(host->buf_ready_int, (host->tuning_done == 1),
2824 msecs_to_jiffies(50));
2825
2826}
2827EXPORT_SYMBOL_GPL(sdhci_send_tuning);
2828
2829static int __sdhci_execute_tuning(struct sdhci_host *host, u32 opcode)
2830{
2831 int i;
2832
2833 /*
2834 * Issue opcode repeatedly till Execute Tuning is set to 0 or the number
2835 * of loops reaches tuning loop count.
2836 */
2837 for (i = 0; i < host->tuning_loop_count; i++) {
2838 u16 ctrl;
2839
2840 sdhci_send_tuning(host, opcode);
2841
2842 if (!host->tuning_done) {
2843 pr_debug("%s: Tuning timeout, falling back to fixed sampling clock\n",
2844 mmc_hostname(host->mmc));
2845 sdhci_abort_tuning(host, opcode);
2846 return -ETIMEDOUT;
2847 }
2848
2849 /* Spec does not require a delay between tuning cycles */
2850 if (host->tuning_delay > 0)
2851 mdelay(host->tuning_delay);
2852
2853 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2854 if (!(ctrl & SDHCI_CTRL_EXEC_TUNING)) {
2855 if (ctrl & SDHCI_CTRL_TUNED_CLK)
2856 return 0; /* Success! */
2857 break;
2858 }
2859
2860 }
2861
2862 pr_info("%s: Tuning failed, falling back to fixed sampling clock\n",
2863 mmc_hostname(host->mmc));
2864 sdhci_reset_tuning(host);
2865 return -EAGAIN;
2866}
2867
2868int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
2869{
2870 struct sdhci_host *host = mmc_priv(mmc);
2871 int err = 0;
2872 unsigned int tuning_count = 0;
2873 bool hs400_tuning;
2874
2875 hs400_tuning = host->flags & SDHCI_HS400_TUNING;
2876
2877 if (host->tuning_mode == SDHCI_TUNING_MODE_1)
2878 tuning_count = host->tuning_count;
2879
2880 /*
2881 * The Host Controller needs tuning in case of SDR104 and DDR50
2882 * mode, and for SDR50 mode when Use Tuning for SDR50 is set in
2883 * the Capabilities register.
2884 * If the Host Controller supports the HS200 mode then the
2885 * tuning function has to be executed.
2886 */
2887 switch (host->timing) {
2888 /* HS400 tuning is done in HS200 mode */
2889 case MMC_TIMING_MMC_HS400:
2890 err = -EINVAL;
2891 goto out;
2892
2893 case MMC_TIMING_MMC_HS200:
2894 /*
2895 * Periodic re-tuning for HS400 is not expected to be needed, so
2896 * disable it here.
2897 */
2898 if (hs400_tuning)
2899 tuning_count = 0;
2900 break;
2901
2902 case MMC_TIMING_UHS_SDR104:
2903 break;
2904
2905 case MMC_TIMING_UHS_SDR50:
2906 if (host->flags & SDHCI_SDR50_NEEDS_TUNING)
2907 break;
2908 /* FALLTHROUGH */
2909
2910 default:
2911 goto out;
2912 }
2913
2914 if (host->ops->platform_execute_tuning) {
2915 err = host->ops->platform_execute_tuning(host, opcode);
2916 goto out;
2917 }
2918
2919 host->mmc->retune_period = tuning_count;
2920
2921 if (host->tuning_delay < 0)
2922 host->tuning_delay = opcode == MMC_SEND_TUNING_BLOCK;
2923
2924 sdhci_start_tuning(host);
2925
2926 host->tuning_err = __sdhci_execute_tuning(host, opcode);
2927
2928 sdhci_end_tuning(host);
2929out:
2930 host->flags &= ~SDHCI_HS400_TUNING;
2931
2932 return err;
2933}
2934EXPORT_SYMBOL_GPL(sdhci_execute_tuning);
2935
2936static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable)
2937{
2938 /* Host Controller v3.00 defines preset value registers */
2939 if (host->version < SDHCI_SPEC_300)
2940 return;
2941
2942 /*
2943 * We only enable or disable Preset Value if they are not already
2944 * enabled or disabled respectively. Otherwise, we bail out.
2945 */
2946 if (host->preset_enabled != enable) {
2947 u16 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2948
2949 if (enable)
2950 ctrl |= SDHCI_CTRL_PRESET_VAL_ENABLE;
2951 else
2952 ctrl &= ~SDHCI_CTRL_PRESET_VAL_ENABLE;
2953
2954 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2955
2956 if (enable)
2957 host->flags |= SDHCI_PV_ENABLED;
2958 else
2959 host->flags &= ~SDHCI_PV_ENABLED;
2960
2961 host->preset_enabled = enable;
2962 }
2963}
2964
2965static void sdhci_post_req(struct mmc_host *mmc, struct mmc_request *mrq,
2966 int err)
2967{
2968 struct sdhci_host *host = mmc_priv(mmc);
2969 struct mmc_data *data = mrq->data;
2970
2971 if (data->host_cookie != COOKIE_UNMAPPED)
2972 dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
2973 mmc_get_dma_dir(data));
2974
2975 data->host_cookie = COOKIE_UNMAPPED;
2976}
2977
2978static void sdhci_pre_req(struct mmc_host *mmc, struct mmc_request *mrq)
2979{
2980 struct sdhci_host *host = mmc_priv(mmc);
2981
2982 mrq->data->host_cookie = COOKIE_UNMAPPED;
2983
2984 /*
2985 * No pre-mapping in the pre hook if we're using the bounce buffer,
2986 * for that we would need two bounce buffers since one buffer is
2987 * in flight when this is getting called.
2988 */
2989 if (host->flags & SDHCI_REQ_USE_DMA && !host->bounce_buffer)
2990 sdhci_pre_dma_transfer(host, mrq->data, COOKIE_PRE_MAPPED);
2991}
2992
2993static void sdhci_error_out_mrqs(struct sdhci_host *host, int err)
2994{
2995 if (host->data_cmd) {
2996 host->data_cmd->error = err;
2997 sdhci_finish_mrq(host, host->data_cmd->mrq);
2998 }
2999
3000 if (host->cmd) {
3001 host->cmd->error = err;
3002 sdhci_finish_mrq(host, host->cmd->mrq);
3003 }
3004}
3005
3006static void sdhci_card_event(struct mmc_host *mmc)
3007{
3008 struct sdhci_host *host = mmc_priv(mmc);
3009 unsigned long flags;
3010 int present;
3011
3012 /* First check if client has provided their own card event */
3013 if (host->ops->card_event)
3014 host->ops->card_event(host);
3015
3016 present = mmc->ops->get_cd(mmc);
3017
3018 spin_lock_irqsave(&host->lock, flags);
3019
3020 /* Check sdhci_has_requests() first in case we are runtime suspended */
3021 if (sdhci_has_requests(host) && !present) {
3022 pr_err("%s: Card removed during transfer!\n",
3023 mmc_hostname(host->mmc));
3024 pr_err("%s: Resetting controller.\n",
3025 mmc_hostname(host->mmc));
3026
3027 sdhci_do_reset(host, SDHCI_RESET_CMD);
3028 sdhci_do_reset(host, SDHCI_RESET_DATA);
3029
3030 sdhci_error_out_mrqs(host, -ENOMEDIUM);
3031 }
3032
3033 spin_unlock_irqrestore(&host->lock, flags);
3034}
3035
3036static const struct mmc_host_ops sdhci_ops = {
3037 .request = sdhci_request,
3038 .post_req = sdhci_post_req,
3039 .pre_req = sdhci_pre_req,
3040 .set_ios = sdhci_set_ios,
3041 .get_cd = sdhci_get_cd,
3042 .get_ro = sdhci_get_ro,
3043 .hw_reset = sdhci_hw_reset,
3044 .enable_sdio_irq = sdhci_enable_sdio_irq,
3045 .ack_sdio_irq = sdhci_ack_sdio_irq,
3046 .start_signal_voltage_switch = sdhci_start_signal_voltage_switch,
3047 .prepare_hs400_tuning = sdhci_prepare_hs400_tuning,
3048 .execute_tuning = sdhci_execute_tuning,
3049 .card_event = sdhci_card_event,
3050 .card_busy = sdhci_card_busy,
3051 .clk_gate_auto = sdhci_auto_clk_gate,
3052 .set_txrx_delay = sdhci_set_txrx_delay,
3053 .enable_delay_line = sdhci_enable_txrx_delay,
3054};
3055
3056/*****************************************************************************\
3057 * *
3058 * Request done *
3059 * *
3060\*****************************************************************************/
3061
3062static bool sdhci_request_done(struct sdhci_host *host)
3063{
3064 unsigned long flags;
3065 struct mmc_request *mrq;
3066 int i;
3067
3068 spin_lock_irqsave(&host->lock, flags);
3069
3070 for (i = 0; i < SDHCI_MAX_MRQS; i++) {
3071 mrq = host->mrqs_done[i];
3072 if (mrq)
3073 break;
3074 }
3075
3076 if (!mrq) {
3077 spin_unlock_irqrestore(&host->lock, flags);
3078 return true;
3079 }
3080
3081 /*
3082 * The controller needs a reset of internal state machines
3083 * upon error conditions.
3084 */
3085 if (sdhci_needs_reset(host, mrq)) {
3086 /*
3087 * Do not finish until command and data lines are available for
3088 * reset. Note there can only be one other mrq, so it cannot
3089 * also be in mrqs_done, otherwise host->cmd and host->data_cmd
3090 * would both be null.
3091 */
3092 if (host->cmd || host->data_cmd) {
3093 spin_unlock_irqrestore(&host->lock, flags);
3094 return true;
3095 }
3096
3097 /* Some controllers need this kick or reset won't work here */
3098 if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET)
3099 /* This is to force an update */
3100 host->ops->set_clock(host, host->clock);
3101
3102 /*
3103 * Spec says we should do both at the same time, but Ricoh
3104 * controllers do not like that.
3105 */
3106 sdhci_do_reset(host, SDHCI_RESET_CMD);
3107 sdhci_do_reset(host, SDHCI_RESET_DATA);
3108
3109 host->pending_reset = false;
3110 }
3111
3112 /*
3113 * Always unmap the data buffers if they were mapped by
3114 * sdhci_prepare_data() whenever we finish with a request.
3115 * This avoids leaking DMA mappings on error.
3116 */
3117 if (host->flags & SDHCI_REQ_USE_DMA) {
3118 struct mmc_data *data = mrq->data;
3119
3120 if (data && data->host_cookie == COOKIE_MAPPED) {
3121 if (host->bounce_buffer) {
3122 /*
3123 * On reads, copy the bounced data into the
3124 * sglist
3125 */
3126 if (mmc_get_dma_dir(data) == DMA_FROM_DEVICE) {
3127 unsigned int length = data->bytes_xfered;
3128
3129 if (length > host->bounce_buffer_size) {
3130 pr_err("%s: bounce buffer is %u bytes but DMA claims to have transferred %u bytes\n",
3131 mmc_hostname(host->mmc),
3132 host->bounce_buffer_size,
3133 data->bytes_xfered);
3134 /* Cap it down and continue */
3135 length = host->bounce_buffer_size;
3136 }
3137 dma_sync_single_for_cpu(
3138 host->mmc->parent,
3139 host->bounce_addr,
3140 host->bounce_buffer_size,
3141 DMA_FROM_DEVICE);
3142 sg_copy_from_buffer(data->sg,
3143 data->sg_len,
3144 host->bounce_buffer,
3145 length);
3146 } else {
3147 /* No copying, just switch ownership */
3148 dma_sync_single_for_cpu(
3149 host->mmc->parent,
3150 host->bounce_addr,
3151 host->bounce_buffer_size,
3152 mmc_get_dma_dir(data));
3153 }
3154 } else {
3155 /* Unmap the raw data */
3156 dma_unmap_sg(mmc_dev(host->mmc), data->sg,
3157 data->sg_len,
3158 mmc_get_dma_dir(data));
3159 }
3160 data->host_cookie = COOKIE_UNMAPPED;
3161 }
3162 }
3163
3164 host->mrqs_done[i] = NULL;
3165
3166 if (!(host->mmc->caps2 & MMC_CAP2_BUS_AUTO_CLK_GATE))
3167 sdhci_auto_clk_gate(host->mmc, 0);
3168
3169 spin_unlock_irqrestore(&host->lock, flags);
3170
3171 if (host->ops->request_done)
3172 host->ops->request_done(host, mrq);
3173 else
3174 mmc_request_done(host->mmc, mrq);
3175
3176 return false;
3177}
3178
3179static void sdhci_complete_work(struct work_struct *work)
3180{
3181 struct sdhci_host *host = container_of(work, struct sdhci_host,
3182 complete_work);
3183
3184 while (!sdhci_request_done(host))
3185 ;
3186}
3187
3188static void sdhci_timeout_timer(struct timer_list *t)
3189{
3190 struct sdhci_host *host;
3191 unsigned long flags;
3192
3193 host = from_timer(host, t, timer);
3194
3195 spin_lock_irqsave(&host->lock, flags);
3196
3197 if (host->cmd && !sdhci_data_line_cmd(host->cmd)) {
3198 pr_err("%s: Timeout waiting for hardware cmd interrupt.\n",
3199 mmc_hostname(host->mmc));
3200 sdhci_dumpregs(host);
3201
3202 host->cmd->error = -ETIMEDOUT;
3203 sdhci_finish_mrq(host, host->cmd->mrq);
3204 }
3205
3206 spin_unlock_irqrestore(&host->lock, flags);
3207}
3208
3209static void sdhci_timeout_data_timer(struct timer_list *t)
3210{
3211 struct sdhci_host *host;
3212 unsigned long flags;
3213
3214 host = from_timer(host, t, data_timer);
3215
3216 spin_lock_irqsave(&host->lock, flags);
3217
3218 if (host->data || host->data_cmd ||
3219 (host->cmd && sdhci_data_line_cmd(host->cmd))) {
3220 pr_err("%s: Timeout waiting for hardware interrupt.\n",
3221 mmc_hostname(host->mmc));
3222 sdhci_dumpregs(host);
3223
3224 if (host->data) {
3225 host->data->error = -ETIMEDOUT;
3226 __sdhci_finish_data(host, true);
3227 queue_work(host->complete_wq, &host->complete_work);
3228 } else if (host->data_cmd) {
3229 host->data_cmd->error = -ETIMEDOUT;
3230 sdhci_finish_mrq(host, host->data_cmd->mrq);
3231 } else {
3232 host->cmd->error = -ETIMEDOUT;
3233 sdhci_finish_mrq(host, host->cmd->mrq);
3234 }
3235 }
3236
3237 spin_unlock_irqrestore(&host->lock, flags);
3238}
3239
3240/*****************************************************************************\
3241 * *
3242 * Interrupt handling *
3243 * *
3244\*****************************************************************************/
3245
3246static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask, u32 *intmask_p)
3247{
3248 /* Handle auto-CMD12 error */
3249 if (intmask & SDHCI_INT_AUTO_CMD_ERR && host->data_cmd) {
3250 struct mmc_request *mrq = host->data_cmd->mrq;
3251 u16 auto_cmd_status = sdhci_readw(host, SDHCI_AUTO_CMD_STATUS);
3252 int data_err_bit = (auto_cmd_status & SDHCI_AUTO_CMD_TIMEOUT) ?
3253 SDHCI_INT_DATA_TIMEOUT :
3254 SDHCI_INT_DATA_CRC;
3255
3256 /* Treat auto-CMD12 error the same as data error */
3257 if (!mrq->sbc && (host->flags & SDHCI_AUTO_CMD12)) {
3258 *intmask_p |= data_err_bit;
3259 return;
3260 }
3261 }
3262
3263 if (!host->cmd) {
3264 /*
3265 * SDHCI recovers from errors by resetting the cmd and data
3266 * circuits. Until that is done, there very well might be more
3267 * interrupts, so ignore them in that case.
3268 */
3269 if (host->pending_reset)
3270 return;
3271 pr_err("%s: Got command interrupt 0x%08x even though no command operation was in progress.\n",
3272 mmc_hostname(host->mmc), (unsigned)intmask);
3273 sdhci_dumpregs(host);
3274 return;
3275 }
3276
3277 if (intmask & (SDHCI_INT_TIMEOUT | SDHCI_INT_CRC |
3278 SDHCI_INT_END_BIT | SDHCI_INT_INDEX)) {
3279 if (intmask & SDHCI_INT_TIMEOUT)
3280 host->cmd->error = -ETIMEDOUT;
3281 else
3282 host->cmd->error = -EILSEQ;
3283
3284 /* Treat data command CRC error the same as data CRC error */
3285 if (host->cmd->data &&
3286 (intmask & (SDHCI_INT_CRC | SDHCI_INT_TIMEOUT)) ==
3287 SDHCI_INT_CRC) {
3288 host->cmd = NULL;
3289 *intmask_p |= SDHCI_INT_DATA_CRC;
3290 return;
3291 }
3292
3293 __sdhci_finish_mrq(host, host->cmd->mrq);
3294 return;
3295 }
3296
3297 /* Handle auto-CMD23 error */
3298 if (intmask & SDHCI_INT_AUTO_CMD_ERR) {
3299 struct mmc_request *mrq = host->cmd->mrq;
3300 u16 auto_cmd_status = sdhci_readw(host, SDHCI_AUTO_CMD_STATUS);
3301 int err = (auto_cmd_status & SDHCI_AUTO_CMD_TIMEOUT) ?
3302 -ETIMEDOUT :
3303 -EILSEQ;
3304
3305 if (mrq->sbc && (host->flags & SDHCI_AUTO_CMD23)) {
3306 mrq->sbc->error = err;
3307 __sdhci_finish_mrq(host, mrq);
3308 return;
3309 }
3310 }
3311
3312 if (intmask & SDHCI_INT_RESPONSE)
3313 sdhci_finish_command(host);
3314}
3315
3316static void sdhci_adma_show_error(struct sdhci_host *host)
3317{
3318 void *desc = host->adma_table;
3319 dma_addr_t dma = host->adma_addr;
3320
3321 sdhci_dumpregs(host);
3322
3323 while (true) {
3324 struct sdhci_adma2_64_desc *dma_desc = desc;
3325
3326 if (host->flags & SDHCI_USE_64_BIT_DMA)
3327 SDHCI_DUMP("%08llx: DMA 0x%08x%08x, LEN 0x%04x, Attr=0x%02x\n",
3328 (unsigned long long)dma,
3329 le32_to_cpu(dma_desc->addr_hi),
3330 le32_to_cpu(dma_desc->addr_lo),
3331 le16_to_cpu(dma_desc->len),
3332 le16_to_cpu(dma_desc->cmd));
3333 else
3334 SDHCI_DUMP("%08llx: DMA 0x%08x, LEN 0x%04x, Attr=0x%02x\n",
3335 (unsigned long long)dma,
3336 le32_to_cpu(dma_desc->addr_lo),
3337 le16_to_cpu(dma_desc->len),
3338 le16_to_cpu(dma_desc->cmd));
3339
3340 desc += host->desc_sz;
3341 dma += host->desc_sz;
3342
3343 if (dma_desc->cmd & cpu_to_le16(ADMA2_END))
3344 break;
3345 }
3346}
3347
3348static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
3349{
3350 u32 command;
3351
3352 /* CMD19 generates _only_ Buffer Read Ready interrupt */
3353 if (intmask & (SDHCI_INT_DATA_AVAIL | SDHCI_INT_ERROR)) {
3354 command = SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND));
3355 if (command == MMC_SEND_TUNING_BLOCK ||
3356 command == MMC_SEND_TUNING_BLOCK_HS200) {
3357 if (intmask & SDHCI_INT_ERROR)
3358 host->tuning_done = 2;
3359 else
3360 host->tuning_done = 1;
3361 wake_up(&host->buf_ready_int);
3362 return;
3363 }
3364 }
3365
3366 if (!host->data) {
3367 struct mmc_command *data_cmd = host->data_cmd;
3368
3369 /*
3370 * The "data complete" interrupt is also used to
3371 * indicate that a busy state has ended. See comment
3372 * above in sdhci_cmd_irq().
3373 */
3374 if (data_cmd && (data_cmd->flags & MMC_RSP_BUSY)) {
3375 if (intmask & SDHCI_INT_DATA_TIMEOUT) {
3376 host->data_cmd = NULL;
3377 data_cmd->error = -ETIMEDOUT;
3378 __sdhci_finish_mrq(host, data_cmd->mrq);
3379 return;
3380 }
3381 if (intmask & SDHCI_INT_DATA_END) {
3382 host->data_cmd = NULL;
3383 /*
3384 * Some cards handle busy-end interrupt
3385 * before the command completed, so make
3386 * sure we do things in the proper order.
3387 */
3388 if (host->cmd == data_cmd)
3389 return;
3390
3391 __sdhci_finish_mrq(host, data_cmd->mrq);
3392 return;
3393 }
3394 }
3395
3396 /*
3397 * SDHCI recovers from errors by resetting the cmd and data
3398 * circuits. Until that is done, there very well might be more
3399 * interrupts, so ignore them in that case.
3400 */
3401 if (host->pending_reset)
3402 return;
3403
3404 pr_err("%s: Got data interrupt 0x%08x even though no data operation was in progress.\n",
3405 mmc_hostname(host->mmc), (unsigned)intmask);
3406 sdhci_dumpregs(host);
3407
3408 return;
3409 }
3410
3411 if (intmask & SDHCI_INT_DATA_TIMEOUT)
3412 host->data->error = -ETIMEDOUT;
3413 else if (intmask & SDHCI_INT_DATA_END_BIT)
3414 host->data->error = -EILSEQ;
3415 else if ((intmask & SDHCI_INT_DATA_CRC) &&
3416 SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND))
3417 != MMC_BUS_TEST_R)
3418 host->data->error = -EILSEQ;
3419 else if (intmask & SDHCI_INT_ADMA_ERROR) {
3420 pr_err("%s: ADMA error: 0x%08x\n", mmc_hostname(host->mmc),
3421 intmask);
3422 sdhci_adma_show_error(host);
3423 host->data->error = -EIO;
3424 if (host->ops->adma_workaround)
3425 host->ops->adma_workaround(host, intmask);
3426 }
3427
3428 if (host->data->error)
3429 sdhci_finish_data(host);
3430 else {
3431 if (intmask & (SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL))
3432 sdhci_transfer_pio(host);
3433
3434 /*
3435 * We currently don't do anything fancy with DMA
3436 * boundaries, but as we can't disable the feature
3437 * we need to at least restart the transfer.
3438 *
3439 * According to the spec sdhci_readl(host, SDHCI_DMA_ADDRESS)
3440 * should return a valid address to continue from, but as
3441 * some controllers are faulty, don't trust them.
3442 */
3443 if (intmask & SDHCI_INT_DMA_END) {
3444 dma_addr_t dmastart, dmanow;
3445
3446 dmastart = sdhci_sdma_address(host);
3447 dmanow = dmastart + host->data->bytes_xfered;
3448 /*
3449 * Force update to the next DMA block boundary.
3450 */
3451 dmanow = (dmanow &
3452 ~((dma_addr_t)SDHCI_DEFAULT_BOUNDARY_SIZE - 1)) +
3453 SDHCI_DEFAULT_BOUNDARY_SIZE;
3454 host->data->bytes_xfered = dmanow - dmastart;
3455 DBG("DMA base %pad, transferred 0x%06x bytes, next %pad\n",
3456 &dmastart, host->data->bytes_xfered, &dmanow);
3457 sdhci_set_sdma_addr(host, dmanow);
3458 }
3459
3460 if (intmask & SDHCI_INT_DATA_END) {
3461 if (host->cmd == host->data_cmd) {
3462 /*
3463 * Data managed to finish before the
3464 * command completed. Make sure we do
3465 * things in the proper order.
3466 */
3467 host->data_early = 1;
3468 } else {
3469 sdhci_finish_data(host);
3470 }
3471 }
3472 }
3473}
3474
3475static inline bool sdhci_defer_done(struct sdhci_host *host,
3476 struct mmc_request *mrq)
3477{
3478 struct mmc_data *data = mrq->data;
3479
3480 if (mrq->cmd->opcode == MMC_SEND_TUNING_BLOCK ||
3481 mrq->cmd->opcode == MMC_SEND_TUNING_BLOCK_HS200)
3482 return false;
3483
3484 return host->pending_reset || host->always_defer_done ||
3485 ((host->flags & SDHCI_REQ_USE_DMA) && data &&
3486 data->host_cookie == COOKIE_MAPPED);
3487}
3488
3489static irqreturn_t sdhci_irq(int irq, void *dev_id)
3490{
3491 struct mmc_request *mrqs_done[SDHCI_MAX_MRQS] = {0};
3492 irqreturn_t result = IRQ_NONE;
3493 struct sdhci_host *host = dev_id;
3494 u32 intmask, mask, unexpected = 0;
3495 int max_loops = 16;
3496 int i;
3497
3498 spin_lock(&host->lock);
3499
3500 if (host->runtime_suspended || host->cmd_polling) {
3501 spin_unlock(&host->lock);
3502 return IRQ_NONE;
3503 }
3504
3505 intmask = sdhci_readl(host, SDHCI_INT_STATUS);
3506 if (!intmask || intmask == 0xffffffff) {
3507 result = IRQ_NONE;
3508 if (host->ops->platform_handle_none_irq)
3509 host->ops->platform_handle_none_irq(host);
3510 goto out;
3511 }
3512
3513 do {
3514 DBG("IRQ status 0x%08x\n", intmask);
3515
3516 if (host->ops->irq) {
3517 intmask = host->ops->irq(host, intmask);
3518 if (!intmask)
3519 goto cont;
3520 }
3521
3522 /* Clear selected interrupts. */
3523 mask = intmask & (SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK |
3524 SDHCI_INT_BUS_POWER);
3525 sdhci_writel(host, mask, SDHCI_INT_STATUS);
3526
3527 if (intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
3528 u32 present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
3529 SDHCI_CARD_PRESENT;
3530
3531 /*
3532 * There is a observation on i.mx esdhc. INSERT
3533 * bit will be immediately set again when it gets
3534 * cleared, if a card is inserted. We have to mask
3535 * the irq to prevent interrupt storm which will
3536 * freeze the system. And the REMOVE gets the
3537 * same situation.
3538 *
3539 * More testing are needed here to ensure it works
3540 * for other platforms though.
3541 */
3542 host->ier &= ~(SDHCI_INT_CARD_INSERT |
3543 SDHCI_INT_CARD_REMOVE);
3544 host->ier |= present ? SDHCI_INT_CARD_REMOVE :
3545 SDHCI_INT_CARD_INSERT;
3546 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
3547 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
3548
3549 sdhci_writel(host, intmask & (SDHCI_INT_CARD_INSERT |
3550 SDHCI_INT_CARD_REMOVE), SDHCI_INT_STATUS);
3551
3552 host->thread_isr |= intmask & (SDHCI_INT_CARD_INSERT |
3553 SDHCI_INT_CARD_REMOVE);
3554 result = IRQ_WAKE_THREAD;
3555 }
3556
3557 if (intmask & SDHCI_INT_CMD_MASK)
3558 sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK, &intmask);
3559
3560 if (intmask & SDHCI_INT_DATA_MASK)
3561 sdhci_data_irq(host, intmask & (SDHCI_INT_DATA_MASK | SDHCI_INT_ERROR));
3562
3563 if (intmask & SDHCI_INT_BUS_POWER)
3564 pr_err("%s: Card is consuming too much power!\n",
3565 mmc_hostname(host->mmc));
3566
3567 if (intmask & SDHCI_INT_RETUNE)
3568 mmc_retune_needed(host->mmc);
3569
3570 if ((intmask & SDHCI_INT_CARD_INT) &&
3571 (host->ier & SDHCI_INT_CARD_INT)) {
3572 sdhci_enable_sdio_irq_nolock(host, false);
3573 sdio_signal_irq(host->mmc);
3574 }
3575
3576 intmask &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE |
3577 SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK |
3578 SDHCI_INT_ERROR | SDHCI_INT_BUS_POWER |
3579 SDHCI_INT_RETUNE | SDHCI_INT_CARD_INT);
3580
3581 if (intmask) {
3582 unexpected |= intmask;
3583 sdhci_writel(host, intmask, SDHCI_INT_STATUS);
3584 }
3585cont:
3586 if (result == IRQ_NONE)
3587 result = IRQ_HANDLED;
3588
3589 intmask = sdhci_readl(host, SDHCI_INT_STATUS);
3590 } while (intmask && --max_loops);
3591
3592 /* Determine if mrqs can be completed immediately */
3593 for (i = 0; i < SDHCI_MAX_MRQS; i++) {
3594 struct mmc_request *mrq = host->mrqs_done[i];
3595
3596 if (!mrq)
3597 continue;
3598
3599 if (sdhci_defer_done(host, mrq)) {
3600 result = IRQ_WAKE_THREAD;
3601 } else {
3602 mrqs_done[i] = mrq;
3603 host->mrqs_done[i] = NULL;
3604 }
3605 }
3606out:
3607 if (host->deferred_cmd)
3608 result = IRQ_WAKE_THREAD;
3609
3610 spin_unlock(&host->lock);
3611
3612 /* Process mrqs ready for immediate completion */
3613 for (i = 0; i < SDHCI_MAX_MRQS; i++) {
3614 if (!mrqs_done[i])
3615 continue;
3616
3617 if (host->ops->request_done)
3618 host->ops->request_done(host, mrqs_done[i]);
3619 else
3620 mmc_request_done(host->mmc, mrqs_done[i]);
3621 }
3622
3623 if (unexpected) {
3624 pr_err("%s: Unexpected interrupt 0x%08x.\n",
3625 mmc_hostname(host->mmc), unexpected);
3626 sdhci_dumpregs(host);
3627 }
3628
3629 return result;
3630}
3631
3632static irqreturn_t sdhci_thread_irq(int irq, void *dev_id)
3633{
3634 struct sdhci_host *host = dev_id;
3635 struct mmc_command *cmd;
3636 unsigned long flags;
3637 u32 isr;
3638
3639 while (!sdhci_request_done(host))
3640 ;
3641
3642 spin_lock_irqsave(&host->lock, flags);
3643
3644 isr = host->thread_isr;
3645 host->thread_isr = 0;
3646
3647 cmd = host->deferred_cmd;
3648 if (cmd && !sdhci_send_command_retry(host, cmd, flags))
3649 sdhci_finish_mrq(host, cmd->mrq);
3650
3651 spin_unlock_irqrestore(&host->lock, flags);
3652
3653 if (isr & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
3654 struct mmc_host *mmc = host->mmc;
3655
3656 mmc->ops->card_event(mmc);
3657 mmc_detect_change(mmc, msecs_to_jiffies(200));
3658 }
3659
3660 return IRQ_HANDLED;
3661}
3662
3663/*****************************************************************************\
3664 * *
3665 * Suspend/resume *
3666 * *
3667\*****************************************************************************/
3668
3669#ifdef CONFIG_PM
3670
3671static bool sdhci_cd_irq_can_wakeup(struct sdhci_host *host)
3672{
3673 return mmc_card_is_removable(host->mmc) &&
3674 !(host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) &&
3675 !mmc_can_gpio_cd(host->mmc);
3676}
3677
3678/*
3679 * To enable wakeup events, the corresponding events have to be enabled in
3680 * the Interrupt Status Enable register too. See 'Table 1-6: Wakeup Signal
3681 * Table' in the SD Host Controller Standard Specification.
3682 * It is useless to restore SDHCI_INT_ENABLE state in
3683 * sdhci_disable_irq_wakeups() since it will be set by
3684 * sdhci_enable_card_detection() or sdhci_init().
3685 */
3686static bool sdhci_enable_irq_wakeups(struct sdhci_host *host)
3687{
3688 u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE |
3689 SDHCI_WAKE_ON_INT;
3690 u32 irq_val = 0;
3691 u8 wake_val = 0;
3692 u8 val;
3693
3694 if (sdhci_cd_irq_can_wakeup(host)) {
3695 wake_val |= SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE;
3696 irq_val |= SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE;
3697 }
3698
3699 if (mmc_card_wake_sdio_irq(host->mmc)) {
3700 wake_val |= SDHCI_WAKE_ON_INT;
3701 irq_val |= SDHCI_INT_CARD_INT;
3702 }
3703
3704 if (!irq_val)
3705 return false;
3706
3707 val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
3708 val &= ~mask;
3709 val |= wake_val;
3710 sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL);
3711
3712 sdhci_writel(host, irq_val, SDHCI_INT_ENABLE);
3713
3714 host->irq_wake_enabled = !enable_irq_wake(host->irq);
3715
3716 return host->irq_wake_enabled;
3717}
3718
3719static void sdhci_disable_irq_wakeups(struct sdhci_host *host)
3720{
3721 u8 val;
3722 u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE
3723 | SDHCI_WAKE_ON_INT;
3724
3725 val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
3726 val &= ~mask;
3727 sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL);
3728
3729 if (host->ops->clr_wakeup_event)
3730 host->ops->clr_wakeup_event(host);
3731
3732 disable_irq_wake(host->irq);
3733
3734 host->irq_wake_enabled = false;
3735}
3736
3737int sdhci_suspend_host(struct sdhci_host *host)
3738{
3739 sdhci_disable_card_detection(host);
3740
3741 mmc_retune_timer_stop(host->mmc);
3742
3743 if (!device_may_wakeup(mmc_dev(host->mmc)) ||
3744 !sdhci_enable_irq_wakeups(host)) {
3745 host->ier = 0;
3746 sdhci_writel(host, 0, SDHCI_INT_ENABLE);
3747 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
3748 free_irq(host->irq, host);
3749 }
3750
3751 return 0;
3752}
3753
3754EXPORT_SYMBOL_GPL(sdhci_suspend_host);
3755
3756int sdhci_resume_host(struct sdhci_host *host)
3757{
3758 struct mmc_host *mmc = host->mmc;
3759 int ret = 0;
3760
3761 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
3762 if (host->ops->enable_dma)
3763 host->ops->enable_dma(host);
3764 }
3765
3766 if ((host->mmc->pm_flags & MMC_PM_KEEP_POWER) &&
3767 (host->quirks2 & SDHCI_QUIRK2_HOST_OFF_CARD_ON)) {
3768 /* Card keeps power but host controller does not */
3769 sdhci_init(host, 0);
3770 host->pwr = 0;
3771 host->clock = 0;
3772 host->reinit_uhs = true;
3773 mmc->ops->set_ios(mmc, &mmc->ios);
3774 } else {
3775 sdhci_init(host, (host->mmc->pm_flags & MMC_PM_KEEP_POWER));
3776 }
3777
3778 if (host->irq_wake_enabled) {
3779 sdhci_disable_irq_wakeups(host);
3780 } else {
3781 ret = request_threaded_irq(host->irq, sdhci_irq,
3782 sdhci_thread_irq, IRQF_SHARED,
3783 mmc_hostname(host->mmc), host);
3784 if (ret)
3785 return ret;
3786 }
3787
3788 sdhci_enable_card_detection(host);
3789
3790 return ret;
3791}
3792
3793EXPORT_SYMBOL_GPL(sdhci_resume_host);
3794
3795int sdhci_runtime_suspend_host(struct sdhci_host *host)
3796{
3797 unsigned long flags;
3798
3799 mmc_retune_timer_stop(host->mmc);
3800
3801 spin_lock_irqsave(&host->lock, flags);
3802 host->ier &= SDHCI_INT_CARD_INT;
3803 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
3804 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
3805 spin_unlock_irqrestore(&host->lock, flags);
3806
3807 synchronize_hardirq(host->irq);
3808
3809 spin_lock_irqsave(&host->lock, flags);
3810 host->runtime_suspended = true;
3811 spin_unlock_irqrestore(&host->lock, flags);
3812
3813 return 0;
3814}
3815EXPORT_SYMBOL_GPL(sdhci_runtime_suspend_host);
3816
3817int sdhci_runtime_resume_host(struct sdhci_host *host, int soft_reset)
3818{
3819 struct mmc_host *mmc = host->mmc;
3820 unsigned long flags;
3821 int host_flags = host->flags;
3822
3823 if (host_flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
3824 if (host->ops->enable_dma)
3825 host->ops->enable_dma(host);
3826 }
3827
3828 sdhci_init(host, soft_reset);
3829
3830 if (mmc->ios.power_mode != MMC_POWER_UNDEFINED &&
3831 mmc->ios.power_mode != MMC_POWER_OFF) {
3832 /* Force clock and power re-program */
3833 host->pwr = 0;
3834 host->clock = 0;
3835 host->reinit_uhs = true;
3836 mmc->ops->start_signal_voltage_switch(mmc, &mmc->ios);
3837 mmc->ops->set_ios(mmc, &mmc->ios);
3838
3839 if ((host_flags & SDHCI_PV_ENABLED) &&
3840 !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN)) {
3841 spin_lock_irqsave(&host->lock, flags);
3842 sdhci_enable_preset_value(host, true);
3843 spin_unlock_irqrestore(&host->lock, flags);
3844 }
3845
3846 if ((mmc->caps2 & MMC_CAP2_HS400_ES) &&
3847 mmc->ops->hs400_enhanced_strobe)
3848 mmc->ops->hs400_enhanced_strobe(mmc, &mmc->ios);
3849 }
3850
3851 spin_lock_irqsave(&host->lock, flags);
3852
3853 host->runtime_suspended = false;
3854
3855 /* Enable SDIO IRQ */
3856 if (sdio_irq_claimed(mmc))
3857 sdhci_enable_sdio_irq_nolock(host, true);
3858
3859 /* Enable Card Detection */
3860 sdhci_enable_card_detection(host);
3861
3862 spin_unlock_irqrestore(&host->lock, flags);
3863
3864 return 0;
3865}
3866EXPORT_SYMBOL_GPL(sdhci_runtime_resume_host);
3867
3868#endif /* CONFIG_PM */
3869
3870/*****************************************************************************\
3871 * *
3872 * Command Queue Engine (CQE) helpers *
3873 * *
3874\*****************************************************************************/
3875
3876void sdhci_cqe_enable(struct mmc_host *mmc)
3877{
3878 struct sdhci_host *host = mmc_priv(mmc);
3879 unsigned long flags;
3880 u8 ctrl;
3881
3882 spin_lock_irqsave(&host->lock, flags);
3883
3884 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
3885 ctrl &= ~SDHCI_CTRL_DMA_MASK;
3886 /*
3887 * Host from V4.10 supports ADMA3 DMA type.
3888 * ADMA3 performs integrated descriptor which is more suitable
3889 * for cmd queuing to fetch both command and transfer descriptors.
3890 */
3891 if (host->v4_mode && (host->caps1 & SDHCI_CAN_DO_ADMA3))
3892 ctrl |= SDHCI_CTRL_ADMA3;
3893 else if (host->flags & SDHCI_USE_64_BIT_DMA)
3894 ctrl |= SDHCI_CTRL_ADMA64;
3895 else
3896 ctrl |= SDHCI_CTRL_ADMA32;
3897 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
3898
3899 sdhci_writew(host, SDHCI_MAKE_BLKSZ(host->sdma_boundary, 512),
3900 SDHCI_BLOCK_SIZE);
3901
3902 /* Set maximum timeout */
3903 sdhci_set_timeout(host, NULL);
3904
3905 host->ier = host->cqe_ier;
3906
3907 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
3908 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
3909
3910 host->cqe_on = true;
3911
3912 pr_debug("%s: sdhci: CQE on, IRQ mask %#x, IRQ status %#x\n",
3913 mmc_hostname(mmc), host->ier,
3914 sdhci_readl(host, SDHCI_INT_STATUS));
3915
3916 spin_unlock_irqrestore(&host->lock, flags);
3917}
3918EXPORT_SYMBOL_GPL(sdhci_cqe_enable);
3919
3920void sdhci_cqe_disable(struct mmc_host *mmc, bool recovery)
3921{
3922 struct sdhci_host *host = mmc_priv(mmc);
3923 unsigned long flags;
3924
3925 spin_lock_irqsave(&host->lock, flags);
3926
3927 sdhci_set_default_irqs(host);
3928
3929 host->cqe_on = false;
3930
3931 if (recovery) {
3932 sdhci_do_reset(host, SDHCI_RESET_CMD);
3933 sdhci_do_reset(host, SDHCI_RESET_DATA);
3934 }
3935
3936 pr_debug("%s: sdhci: CQE off, IRQ mask %#x, IRQ status %#x\n",
3937 mmc_hostname(mmc), host->ier,
3938 sdhci_readl(host, SDHCI_INT_STATUS));
3939
3940 spin_unlock_irqrestore(&host->lock, flags);
3941}
3942EXPORT_SYMBOL_GPL(sdhci_cqe_disable);
3943
3944bool sdhci_cqe_irq(struct sdhci_host *host, u32 intmask, int *cmd_error,
3945 int *data_error)
3946{
3947 u32 mask;
3948
3949 if (!host->cqe_on)
3950 return false;
3951
3952 if (intmask & (SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC))
3953 *cmd_error = -EILSEQ;
3954 else if (intmask & SDHCI_INT_TIMEOUT)
3955 *cmd_error = -ETIMEDOUT;
3956 else
3957 *cmd_error = 0;
3958
3959 if (intmask & (SDHCI_INT_DATA_END_BIT | SDHCI_INT_DATA_CRC))
3960 *data_error = -EILSEQ;
3961 else if (intmask & SDHCI_INT_DATA_TIMEOUT)
3962 *data_error = -ETIMEDOUT;
3963 else if (intmask & SDHCI_INT_ADMA_ERROR)
3964 *data_error = -EIO;
3965 else
3966 *data_error = 0;
3967
3968 /* Clear selected interrupts. */
3969 mask = intmask & host->cqe_ier;
3970 sdhci_writel(host, mask, SDHCI_INT_STATUS);
3971
3972 if (intmask & SDHCI_INT_BUS_POWER)
3973 pr_err("%s: Card is consuming too much power!\n",
3974 mmc_hostname(host->mmc));
3975
3976 intmask &= ~(host->cqe_ier | SDHCI_INT_ERROR);
3977 if (intmask) {
3978 sdhci_writel(host, intmask, SDHCI_INT_STATUS);
3979 pr_err("%s: CQE: Unexpected interrupt 0x%08x.\n",
3980 mmc_hostname(host->mmc), intmask);
3981 sdhci_dumpregs(host);
3982 }
3983
3984 return true;
3985}
3986EXPORT_SYMBOL_GPL(sdhci_cqe_irq);
3987
3988/*****************************************************************************\
3989 * *
3990 * Device allocation/registration *
3991 * *
3992\*****************************************************************************/
3993
3994struct sdhci_host *sdhci_alloc_host(struct device *dev,
3995 size_t priv_size)
3996{
3997 struct mmc_host *mmc;
3998 struct sdhci_host *host;
3999
4000 WARN_ON(dev == NULL);
4001
4002 mmc = mmc_alloc_host(sizeof(struct sdhci_host) + priv_size, dev);
4003 if (!mmc)
4004 return ERR_PTR(-ENOMEM);
4005
4006 host = mmc_priv(mmc);
4007 host->mmc = mmc;
4008 host->mmc_host_ops = sdhci_ops;
4009 mmc->ops = &host->mmc_host_ops;
4010
4011 host->flags = SDHCI_SIGNALING_330;
4012
4013 host->cqe_ier = SDHCI_CQE_INT_MASK;
4014 host->cqe_err_ier = SDHCI_CQE_INT_ERR_MASK;
4015
4016 host->tuning_delay = -1;
4017 host->tuning_loop_count = MAX_TUNING_LOOP;
4018
4019 host->sdma_boundary = SDHCI_DEFAULT_BOUNDARY_ARG;
4020
4021 /*
4022 * The DMA table descriptor count is calculated as the maximum
4023 * number of segments times 2, to allow for an alignment
4024 * descriptor for each segment, plus 1 for a nop end descriptor.
4025 */
4026 host->adma_table_cnt = SDHCI_MAX_SEGS * 2 + 1;
4027 host->max_adma = 65536;
4028
4029 return host;
4030}
4031
4032EXPORT_SYMBOL_GPL(sdhci_alloc_host);
4033
4034static int sdhci_set_dma_mask(struct sdhci_host *host)
4035{
4036 struct mmc_host *mmc = host->mmc;
4037 struct device *dev = mmc_dev(mmc);
4038 int ret = -EINVAL;
4039
4040 if (host->quirks2 & SDHCI_QUIRK2_BROKEN_64_BIT_DMA)
4041 host->flags &= ~SDHCI_USE_64_BIT_DMA;
4042
4043 /* Try 64-bit mask if hardware is capable of it */
4044 if (host->flags & SDHCI_USE_64_BIT_DMA) {
4045 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
4046 if (ret) {
4047 pr_warn("%s: Failed to set 64-bit DMA mask.\n",
4048 mmc_hostname(mmc));
4049 host->flags &= ~SDHCI_USE_64_BIT_DMA;
4050 }
4051 }
4052
4053 /* 32-bit mask as default & fallback */
4054 if (ret) {
4055 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
4056 if (ret)
4057 pr_warn("%s: Failed to set 32-bit DMA mask.\n",
4058 mmc_hostname(mmc));
4059 }
4060
4061 return ret;
4062}
4063
4064void __sdhci_read_caps(struct sdhci_host *host, const u16 *ver,
4065 const u32 *caps, const u32 *caps1)
4066{
4067 u16 v;
4068 u64 dt_caps_mask = 0;
4069 u64 dt_caps = 0;
4070
4071 if (host->read_caps)
4072 return;
4073
4074 host->read_caps = true;
4075
4076 if (debug_quirks)
4077 host->quirks = debug_quirks;
4078
4079 if (debug_quirks2)
4080 host->quirks2 = debug_quirks2;
4081
4082 sdhci_do_reset(host, SDHCI_RESET_ALL);
4083
4084 if (host->v4_mode)
4085 sdhci_do_enable_v4_mode(host);
4086
4087 of_property_read_u64(mmc_dev(host->mmc)->of_node,
4088 "sdhci-caps-mask", &dt_caps_mask);
4089 of_property_read_u64(mmc_dev(host->mmc)->of_node,
4090 "sdhci-caps", &dt_caps);
4091
4092 v = ver ? *ver : sdhci_readw(host, SDHCI_HOST_VERSION);
4093 host->version = (v & SDHCI_SPEC_VER_MASK) >> SDHCI_SPEC_VER_SHIFT;
4094
4095 if (host->quirks & SDHCI_QUIRK_MISSING_CAPS)
4096 return;
4097
4098 if (caps) {
4099 host->caps = *caps;
4100 } else {
4101 host->caps = sdhci_readl(host, SDHCI_CAPABILITIES);
4102 host->caps &= ~lower_32_bits(dt_caps_mask);
4103 host->caps |= lower_32_bits(dt_caps);
4104 }
4105
4106 if (host->version < SDHCI_SPEC_300)
4107 return;
4108
4109 if (caps1) {
4110 host->caps1 = *caps1;
4111 } else {
4112 host->caps1 = sdhci_readl(host, SDHCI_CAPABILITIES_1);
4113 host->caps1 &= ~upper_32_bits(dt_caps_mask);
4114 host->caps1 |= upper_32_bits(dt_caps);
4115 }
4116}
4117EXPORT_SYMBOL_GPL(__sdhci_read_caps);
4118
4119static void sdhci_allocate_bounce_buffer(struct sdhci_host *host)
4120{
4121 struct mmc_host *mmc = host->mmc;
4122 unsigned int max_blocks;
4123 unsigned int bounce_size;
4124 int ret;
4125
4126 /*
4127 * Cap the bounce buffer at 64KB. Using a bigger bounce buffer
4128 * has diminishing returns, this is probably because SD/MMC
4129 * cards are usually optimized to handle this size of requests.
4130 */
4131 bounce_size = SZ_64K;
4132 /*
4133 * Adjust downwards to maximum request size if this is less
4134 * than our segment size, else hammer down the maximum
4135 * request size to the maximum buffer size.
4136 */
4137 if (mmc->max_req_size < bounce_size)
4138 bounce_size = mmc->max_req_size;
4139 max_blocks = bounce_size / 512;
4140
4141 /*
4142 * When we just support one segment, we can get significant
4143 * speedups by the help of a bounce buffer to group scattered
4144 * reads/writes together.
4145 */
4146 host->bounce_buffer = devm_kmalloc(mmc->parent,
4147 bounce_size,
4148 GFP_KERNEL);
4149 if (!host->bounce_buffer) {
4150 pr_err("%s: failed to allocate %u bytes for bounce buffer, falling back to single segments\n",
4151 mmc_hostname(mmc),
4152 bounce_size);
4153 /*
4154 * Exiting with zero here makes sure we proceed with
4155 * mmc->max_segs == 1.
4156 */
4157 return;
4158 }
4159
4160 host->bounce_addr = dma_map_single(mmc->parent,
4161 host->bounce_buffer,
4162 bounce_size,
4163 DMA_BIDIRECTIONAL);
4164 ret = dma_mapping_error(mmc->parent, host->bounce_addr);
4165 if (ret)
4166 /* Again fall back to max_segs == 1 */
4167 return;
4168 host->bounce_buffer_size = bounce_size;
4169
4170 /* Lie about this since we're bouncing */
4171 mmc->max_segs = max_blocks;
4172 mmc->max_seg_size = bounce_size;
4173 mmc->max_req_size = bounce_size;
4174
4175 pr_info("%s bounce up to %u segments into one, max segment size %u bytes\n",
4176 mmc_hostname(mmc), max_blocks, bounce_size);
4177}
4178
4179static inline bool sdhci_can_64bit_dma(struct sdhci_host *host)
4180{
4181 /*
4182 * According to SD Host Controller spec v4.10, bit[27] added from
4183 * version 4.10 in Capabilities Register is used as 64-bit System
4184 * Address support for V4 mode.
4185 */
4186 if (host->version >= SDHCI_SPEC_410 && host->v4_mode)
4187 return host->caps & SDHCI_CAN_64BIT_V4;
4188
4189 return host->caps & SDHCI_CAN_64BIT;
4190}
4191
4192int sdhci_setup_host(struct sdhci_host *host)
4193{
4194 struct mmc_host *mmc;
4195 u32 max_current_caps;
4196 unsigned int ocr_avail;
4197 unsigned int override_timeout_clk;
4198 u32 max_clk;
4199 int ret;
4200
4201 WARN_ON(host == NULL);
4202 if (host == NULL)
4203 return -EINVAL;
4204
4205 mmc = host->mmc;
4206
4207 /*
4208 * If there are external regulators, get them. Note this must be done
4209 * early before resetting the host and reading the capabilities so that
4210 * the host can take the appropriate action if regulators are not
4211 * available.
4212 */
4213 ret = mmc_regulator_get_supply(mmc);
4214 if (ret)
4215 return ret;
4216
4217 DBG("Version: 0x%08x | Present: 0x%08x\n",
4218 sdhci_readw(host, SDHCI_HOST_VERSION),
4219 sdhci_readl(host, SDHCI_PRESENT_STATE));
4220 DBG("Caps: 0x%08x | Caps_1: 0x%08x\n",
4221 sdhci_readl(host, SDHCI_CAPABILITIES),
4222 sdhci_readl(host, SDHCI_CAPABILITIES_1));
4223
4224 sdhci_read_caps(host);
4225
4226 override_timeout_clk = host->timeout_clk;
4227
4228 if (host->version > SDHCI_SPEC_420) {
4229 pr_err("%s: Unknown controller version (%d). You may experience problems.\n",
4230 mmc_hostname(mmc), host->version);
4231 }
4232
4233 if (host->quirks & SDHCI_QUIRK_FORCE_DMA)
4234 host->flags |= SDHCI_USE_SDMA;
4235 else if (!(host->caps & SDHCI_CAN_DO_SDMA))
4236 DBG("Controller doesn't have SDMA capability\n");
4237 else
4238 host->flags |= SDHCI_USE_SDMA;
4239
4240 if ((host->quirks & SDHCI_QUIRK_BROKEN_DMA) &&
4241 (host->flags & SDHCI_USE_SDMA)) {
4242 DBG("Disabling DMA as it is marked broken\n");
4243 host->flags &= ~SDHCI_USE_SDMA;
4244 }
4245
4246 if ((host->version >= SDHCI_SPEC_200) &&
4247 (host->caps & SDHCI_CAN_DO_ADMA2))
4248 host->flags |= SDHCI_USE_ADMA;
4249
4250 if ((host->quirks & SDHCI_QUIRK_BROKEN_ADMA) &&
4251 (host->flags & SDHCI_USE_ADMA)) {
4252 DBG("Disabling ADMA as it is marked broken\n");
4253 host->flags &= ~SDHCI_USE_ADMA;
4254 }
4255
4256 if (sdhci_can_64bit_dma(host))
4257 host->flags |= SDHCI_USE_64_BIT_DMA;
4258
4259 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
4260 if (host->ops->set_dma_mask)
4261 ret = host->ops->set_dma_mask(host);
4262 else
4263 ret = sdhci_set_dma_mask(host);
4264
4265 if (!ret && host->ops->enable_dma)
4266 ret = host->ops->enable_dma(host);
4267
4268 if (ret) {
4269 pr_warn("%s: No suitable DMA available - falling back to PIO\n",
4270 mmc_hostname(mmc));
4271 host->flags &= ~(SDHCI_USE_SDMA | SDHCI_USE_ADMA);
4272
4273 ret = 0;
4274 }
4275 }
4276
4277 /* SDMA does not support 64-bit DMA if v4 mode not set */
4278 if ((host->flags & SDHCI_USE_64_BIT_DMA) && !host->v4_mode)
4279 host->flags &= ~SDHCI_USE_SDMA;
4280
4281 if (host->flags & SDHCI_USE_ADMA) {
4282 dma_addr_t dma;
4283 void *buf;
4284
4285 if (!(host->flags & SDHCI_USE_64_BIT_DMA))
4286 host->alloc_desc_sz = SDHCI_ADMA2_32_DESC_SZ;
4287 else if (!host->alloc_desc_sz)
4288 host->alloc_desc_sz = SDHCI_ADMA2_64_DESC_SZ(host);
4289
4290 host->desc_sz = host->alloc_desc_sz;
4291 host->adma_table_sz = host->adma_table_cnt * host->desc_sz;
4292
4293 host->align_buffer_sz = SDHCI_MAX_SEGS * SDHCI_ADMA2_ALIGN;
4294 /*
4295 * Use zalloc to zero the reserved high 32-bits of 128-bit
4296 * descriptors so that they never need to be written.
4297 */
4298 buf = dma_alloc_coherent(mmc_dev(mmc),
4299 host->align_buffer_sz + host->adma_table_sz,
4300 &dma, GFP_KERNEL);
4301 if (!buf) {
4302 pr_warn("%s: Unable to allocate ADMA buffers - falling back to standard DMA\n",
4303 mmc_hostname(mmc));
4304 host->flags &= ~SDHCI_USE_ADMA;
4305 } else if ((dma + host->align_buffer_sz) &
4306 (SDHCI_ADMA2_DESC_ALIGN - 1)) {
4307 pr_warn("%s: unable to allocate aligned ADMA descriptor\n",
4308 mmc_hostname(mmc));
4309 host->flags &= ~SDHCI_USE_ADMA;
4310 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
4311 host->adma_table_sz, buf, dma);
4312 } else {
4313 host->align_buffer = buf;
4314 host->align_addr = dma;
4315
4316 host->adma_table = buf + host->align_buffer_sz;
4317 host->adma_addr = dma + host->align_buffer_sz;
4318 }
4319 }
4320
4321 /*
4322 * If we use DMA, then it's up to the caller to set the DMA
4323 * mask, but PIO does not need the hw shim so we set a new
4324 * mask here in that case.
4325 */
4326 if (!(host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA))) {
4327 host->dma_mask = DMA_BIT_MASK(64);
4328 mmc_dev(mmc)->dma_mask = &host->dma_mask;
4329 }
4330
4331 if (host->version >= SDHCI_SPEC_300)
4332 host->max_clk = (host->caps & SDHCI_CLOCK_V3_BASE_MASK)
4333 >> SDHCI_CLOCK_BASE_SHIFT;
4334 else
4335 host->max_clk = (host->caps & SDHCI_CLOCK_BASE_MASK)
4336 >> SDHCI_CLOCK_BASE_SHIFT;
4337
4338 host->max_clk *= 1000000;
4339 if (host->max_clk == 0 || host->quirks &
4340 SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN) {
4341 if (!host->ops->get_max_clock) {
4342 pr_err("%s: Hardware doesn't specify base clock frequency.\n",
4343 mmc_hostname(mmc));
4344 ret = -ENODEV;
4345 goto undma;
4346 }
4347 host->max_clk = host->ops->get_max_clock(host);
4348 }
4349
4350 /*
4351 * In case of Host Controller v3.00, find out whether clock
4352 * multiplier is supported.
4353 */
4354 host->clk_mul = (host->caps1 & SDHCI_CLOCK_MUL_MASK) >>
4355 SDHCI_CLOCK_MUL_SHIFT;
4356
4357 /*
4358 * In case the value in Clock Multiplier is 0, then programmable
4359 * clock mode is not supported, otherwise the actual clock
4360 * multiplier is one more than the value of Clock Multiplier
4361 * in the Capabilities Register.
4362 */
4363 if (host->clk_mul)
4364 host->clk_mul += 1;
4365
4366 /*
4367 * Set host parameters.
4368 */
4369 max_clk = host->max_clk;
4370
4371 if (host->ops->get_min_clock)
4372 mmc->f_min = host->ops->get_min_clock(host);
4373 else if (host->version >= SDHCI_SPEC_300) {
4374 if (host->clk_mul)
4375 max_clk = host->max_clk * host->clk_mul;
4376 /*
4377 * Divided Clock Mode minimum clock rate is always less than
4378 * Programmable Clock Mode minimum clock rate.
4379 */
4380 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300;
4381 } else
4382 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200;
4383
4384 if (!mmc->f_max || mmc->f_max > max_clk)
4385 mmc->f_max = max_clk;
4386
4387 if (!(host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)) {
4388 host->timeout_clk = (host->caps & SDHCI_TIMEOUT_CLK_MASK) >>
4389 SDHCI_TIMEOUT_CLK_SHIFT;
4390
4391 if (host->caps & SDHCI_TIMEOUT_CLK_UNIT)
4392 host->timeout_clk *= 1000;
4393
4394 if (host->timeout_clk == 0) {
4395 if (!host->ops->get_timeout_clock) {
4396 pr_err("%s: Hardware doesn't specify timeout clock frequency.\n",
4397 mmc_hostname(mmc));
4398 ret = -ENODEV;
4399 goto undma;
4400 }
4401
4402 host->timeout_clk =
4403 DIV_ROUND_UP(host->ops->get_timeout_clock(host),
4404 1000);
4405 }
4406
4407 if (override_timeout_clk)
4408 host->timeout_clk = override_timeout_clk;
4409
4410 mmc->max_busy_timeout = host->ops->get_max_timeout_count ?
4411 host->ops->get_max_timeout_count(host) : 1 << 27;
4412 mmc->max_busy_timeout /= host->timeout_clk;
4413 }
4414
4415 if (host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT &&
4416 !host->ops->get_max_timeout_count)
4417 mmc->max_busy_timeout = 0;
4418
4419 mmc->caps |= MMC_CAP_SDIO_IRQ | MMC_CAP_ERASE | MMC_CAP_CMD23;
4420 mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD;
4421
4422 if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12)
4423 host->flags |= SDHCI_AUTO_CMD12;
4424
4425 /*
4426 * For v3 mode, Auto-CMD23 stuff only works in ADMA or PIO.
4427 * For v4 mode, SDMA may use Auto-CMD23 as well.
4428 */
4429 if ((host->version >= SDHCI_SPEC_300) &&
4430 ((host->flags & SDHCI_USE_ADMA) ||
4431 !(host->flags & SDHCI_USE_SDMA) || host->v4_mode) &&
4432 !(host->quirks2 & SDHCI_QUIRK2_ACMD23_BROKEN)) {
4433 host->flags |= SDHCI_AUTO_CMD23;
4434 DBG("Auto-CMD23 available\n");
4435 } else {
4436 DBG("Auto-CMD23 unavailable\n");
4437 }
4438
4439 /*
4440 * A controller may support 8-bit width, but the board itself
4441 * might not have the pins brought out. Boards that support
4442 * 8-bit width must set "mmc->caps |= MMC_CAP_8_BIT_DATA;" in
4443 * their platform code before calling sdhci_add_host(), and we
4444 * won't assume 8-bit width for hosts without that CAP.
4445 */
4446 if (!(host->quirks & SDHCI_QUIRK_FORCE_1_BIT_DATA))
4447 mmc->caps |= MMC_CAP_4_BIT_DATA;
4448
4449 if (host->quirks2 & SDHCI_QUIRK2_HOST_NO_CMD23)
4450 mmc->caps &= ~MMC_CAP_CMD23;
4451
4452 if (host->caps & SDHCI_CAN_DO_HISPD)
4453 mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
4454
4455 if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) &&
4456 mmc_card_is_removable(mmc) &&
4457 mmc_gpio_get_cd(host->mmc) < 0)
4458 mmc->caps |= MMC_CAP_NEEDS_POLL;
4459
4460 if (!IS_ERR(mmc->supply.vqmmc)) {
4461 ret = mmc_regulator_enable_vqmmc(mmc);
4462
4463 /* If vqmmc provides no 1.8V signalling, then there's no UHS */
4464 if (!regulator_is_supported_voltage(mmc->supply.vqmmc, 1700000,
4465 1950000))
4466 host->caps1 &= ~(SDHCI_SUPPORT_SDR104 |
4467 SDHCI_SUPPORT_SDR50 |
4468 SDHCI_SUPPORT_DDR50);
4469
4470 /* In eMMC case vqmmc might be a fixed 1.8V regulator */
4471 if (!regulator_is_supported_voltage(mmc->supply.vqmmc, 2700000,
4472 3600000))
4473 host->flags &= ~SDHCI_SIGNALING_330;
4474
4475 if (ret) {
4476 pr_warn("%s: Failed to enable vqmmc regulator: %d\n",
4477 mmc_hostname(mmc), ret);
4478 mmc->supply.vqmmc = ERR_PTR(-EINVAL);
4479 }
4480 }
4481
4482 if (host->quirks2 & SDHCI_QUIRK2_NO_1_8_V) {
4483 host->caps1 &= ~(SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
4484 SDHCI_SUPPORT_DDR50);
4485 /*
4486 * The SDHCI controller in a SoC might support HS200/HS400
4487 * (indicated using mmc-hs200-1_8v/mmc-hs400-1_8v dt property),
4488 * but if the board is modeled such that the IO lines are not
4489 * connected to 1.8v then HS200/HS400 cannot be supported.
4490 * Disable HS200/HS400 if the board does not have 1.8v connected
4491 * to the IO lines. (Applicable for other modes in 1.8v)
4492 */
4493 mmc->caps2 &= ~(MMC_CAP2_HSX00_1_8V | MMC_CAP2_HS400_ES);
4494 mmc->caps &= ~(MMC_CAP_1_8V_DDR | MMC_CAP_UHS);
4495 }
4496
4497 /* Any UHS-I mode in caps implies SDR12 and SDR25 support. */
4498 if (host->caps1 & (SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
4499 SDHCI_SUPPORT_DDR50))
4500 mmc->caps |= MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25;
4501
4502 /* SDR104 supports also implies SDR50 support */
4503 if (host->caps1 & SDHCI_SUPPORT_SDR104) {
4504 mmc->caps |= MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_SDR50;
4505 /* SD3.0: SDR104 is supported so (for eMMC) the caps2
4506 * field can be promoted to support HS200.
4507 */
4508 if (!(host->quirks2 & SDHCI_QUIRK2_BROKEN_HS200))
4509 mmc->caps2 |= MMC_CAP2_HS200;
4510 } else if (host->caps1 & SDHCI_SUPPORT_SDR50) {
4511 mmc->caps |= MMC_CAP_UHS_SDR50;
4512 }
4513
4514 if (host->quirks2 & SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400 &&
4515 (host->caps1 & SDHCI_SUPPORT_HS400))
4516 mmc->caps2 |= MMC_CAP2_HS400;
4517
4518 if ((mmc->caps2 & MMC_CAP2_HSX00_1_2V) &&
4519 (IS_ERR(mmc->supply.vqmmc) ||
4520 !regulator_is_supported_voltage(mmc->supply.vqmmc, 1100000,
4521 1300000)))
4522 mmc->caps2 &= ~MMC_CAP2_HSX00_1_2V;
4523
4524 if ((host->caps1 & SDHCI_SUPPORT_DDR50) &&
4525 !(host->quirks2 & SDHCI_QUIRK2_BROKEN_DDR50))
4526 mmc->caps |= MMC_CAP_UHS_DDR50;
4527
4528 /* Does the host need tuning for SDR50? */
4529 if (host->caps1 & SDHCI_USE_SDR50_TUNING)
4530 host->flags |= SDHCI_SDR50_NEEDS_TUNING;
4531
4532 /* Driver Type(s) (A, C, D) supported by the host */
4533 if (host->caps1 & SDHCI_DRIVER_TYPE_A)
4534 mmc->caps |= MMC_CAP_DRIVER_TYPE_A;
4535 if (host->caps1 & SDHCI_DRIVER_TYPE_C)
4536 mmc->caps |= MMC_CAP_DRIVER_TYPE_C;
4537 if (host->caps1 & SDHCI_DRIVER_TYPE_D)
4538 mmc->caps |= MMC_CAP_DRIVER_TYPE_D;
4539
4540 if (host->ops->host_caps_disable)
4541 host->ops->host_caps_disable(host);
4542
4543 /* Initial value for re-tuning timer count */
4544 host->tuning_count = (host->caps1 & SDHCI_RETUNING_TIMER_COUNT_MASK) >>
4545 SDHCI_RETUNING_TIMER_COUNT_SHIFT;
4546
4547 /*
4548 * In case Re-tuning Timer is not disabled, the actual value of
4549 * re-tuning timer will be 2 ^ (n - 1).
4550 */
4551 if (host->tuning_count)
4552 host->tuning_count = 1 << (host->tuning_count - 1);
4553
4554 /* Re-tuning mode supported by the Host Controller */
4555 host->tuning_mode = (host->caps1 & SDHCI_RETUNING_MODE_MASK) >>
4556 SDHCI_RETUNING_MODE_SHIFT;
4557
4558 ocr_avail = 0;
4559
4560 /*
4561 * According to SD Host Controller spec v3.00, if the Host System
4562 * can afford more than 150mA, Host Driver should set XPC to 1. Also
4563 * the value is meaningful only if Voltage Support in the Capabilities
4564 * register is set. The actual current value is 4 times the register
4565 * value.
4566 */
4567 max_current_caps = sdhci_readl(host, SDHCI_MAX_CURRENT);
4568 if (!max_current_caps && !IS_ERR(mmc->supply.vmmc)) {
4569 int curr = regulator_get_current_limit(mmc->supply.vmmc);
4570 if (curr > 0) {
4571
4572 /* convert to SDHCI_MAX_CURRENT format */
4573 curr = curr/1000; /* convert to mA */
4574 curr = curr/SDHCI_MAX_CURRENT_MULTIPLIER;
4575
4576 curr = min_t(u32, curr, SDHCI_MAX_CURRENT_LIMIT);
4577 max_current_caps =
4578 (curr << SDHCI_MAX_CURRENT_330_SHIFT) |
4579 (curr << SDHCI_MAX_CURRENT_300_SHIFT) |
4580 (curr << SDHCI_MAX_CURRENT_180_SHIFT);
4581 }
4582 }
4583
4584 if (host->caps & SDHCI_CAN_VDD_330) {
4585 ocr_avail |= MMC_VDD_32_33 | MMC_VDD_33_34;
4586
4587 mmc->max_current_330 = ((max_current_caps &
4588 SDHCI_MAX_CURRENT_330_MASK) >>
4589 SDHCI_MAX_CURRENT_330_SHIFT) *
4590 SDHCI_MAX_CURRENT_MULTIPLIER;
4591 }
4592 if (host->caps & SDHCI_CAN_VDD_300) {
4593 ocr_avail |= MMC_VDD_29_30 | MMC_VDD_30_31;
4594
4595 mmc->max_current_300 = ((max_current_caps &
4596 SDHCI_MAX_CURRENT_300_MASK) >>
4597 SDHCI_MAX_CURRENT_300_SHIFT) *
4598 SDHCI_MAX_CURRENT_MULTIPLIER;
4599 }
4600 if (host->caps & SDHCI_CAN_VDD_180) {
4601 ocr_avail |= MMC_VDD_165_195;
4602
4603 mmc->max_current_180 = ((max_current_caps &
4604 SDHCI_MAX_CURRENT_180_MASK) >>
4605 SDHCI_MAX_CURRENT_180_SHIFT) *
4606 SDHCI_MAX_CURRENT_MULTIPLIER;
4607 }
4608
4609 /* If OCR set by host, use it instead. */
4610 if (host->ocr_mask)
4611 ocr_avail = host->ocr_mask;
4612
4613 /* If OCR set by external regulators, give it highest prio. */
4614 if (mmc->ocr_avail)
4615 ocr_avail = mmc->ocr_avail;
4616
4617 mmc->ocr_avail = ocr_avail;
4618 mmc->ocr_avail_sdio = ocr_avail;
4619 host->ocr_avail_mmc = MMC_VDD_32_33 | MMC_VDD_33_34 |
4620 MMC_VDD_29_30 | MMC_VDD_30_31;
4621 if (host->ocr_avail_sdio)
4622 mmc->ocr_avail_sdio &= host->ocr_avail_sdio;
4623 mmc->ocr_avail_sd = ocr_avail;
4624 if (host->ocr_avail_sd)
4625 mmc->ocr_avail_sd &= host->ocr_avail_sd;
4626 else /* normal SD controllers don't support 1.8V */
4627 mmc->ocr_avail_sd &= ~MMC_VDD_165_195;
4628 mmc->ocr_avail_mmc = ocr_avail;
4629 if (host->ocr_avail_mmc)
4630 mmc->ocr_avail_mmc &= host->ocr_avail_mmc;
4631
4632 if (mmc->ocr_avail == 0) {
4633 pr_err("%s: Hardware doesn't report any support voltages.\n",
4634 mmc_hostname(mmc));
4635 ret = -ENODEV;
4636 goto unreg;
4637 }
4638
4639 if ((mmc->caps & (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 |
4640 MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 |
4641 MMC_CAP_UHS_DDR50 | MMC_CAP_1_8V_DDR)) ||
4642 (mmc->caps2 & (MMC_CAP2_HS200_1_8V_SDR | MMC_CAP2_HS400_1_8V)))
4643 host->flags |= SDHCI_SIGNALING_180;
4644
4645 if (mmc->caps2 & MMC_CAP2_HSX00_1_2V)
4646 host->flags |= SDHCI_SIGNALING_120;
4647
4648 spin_lock_init(&host->lock);
4649
4650 /*
4651 * Maximum number of sectors in one transfer. Limited by SDMA boundary
4652 * size (512KiB). Note some tuning modes impose a 4MiB limit, but this
4653 * is less anyway.
4654 */
4655 mmc->max_req_size = 524288;
4656
4657 /*
4658 * Maximum number of segments. Depends on if the hardware
4659 * can do scatter/gather or not.
4660 */
4661 if (host->flags & SDHCI_USE_ADMA) {
4662 mmc->max_segs = SDHCI_MAX_SEGS;
4663 } else if (host->flags & SDHCI_USE_SDMA) {
4664 mmc->max_segs = 1;
4665 if (swiotlb_max_segment()) {
4666 unsigned int max_req_size = (1 << IO_TLB_SHIFT) *
4667 IO_TLB_SEGSIZE;
4668 mmc->max_req_size = min(mmc->max_req_size,
4669 max_req_size);
4670 }
4671 } else { /* PIO */
4672 mmc->max_segs = SDHCI_MAX_SEGS;
4673 }
4674
4675 /*
4676 * Maximum segment size. Could be one segment with the maximum number
4677 * of bytes. When doing hardware scatter/gather, each entry cannot
4678 * be larger than 64 KiB though.
4679 */
4680 if (host->flags & SDHCI_USE_ADMA) {
4681 if (host->quirks & SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC) {
4682 host->max_adma = 65532; /* 32-bit alignment */
4683 mmc->max_seg_size = 65535;
4684 } else {
4685 mmc->max_seg_size = 65536;
4686 }
4687 } else {
4688 mmc->max_seg_size = mmc->max_req_size;
4689 }
4690
4691 /*
4692 * Maximum block size. This varies from controller to controller and
4693 * is specified in the capabilities register.
4694 */
4695 if (host->quirks & SDHCI_QUIRK_FORCE_BLK_SZ_2048) {
4696 mmc->max_blk_size = 2;
4697 } else {
4698 mmc->max_blk_size = (host->caps & SDHCI_MAX_BLOCK_MASK) >>
4699 SDHCI_MAX_BLOCK_SHIFT;
4700 if (mmc->max_blk_size >= 3) {
4701 pr_warn("%s: Invalid maximum block size, assuming 512 bytes\n",
4702 mmc_hostname(mmc));
4703 mmc->max_blk_size = 0;
4704 }
4705 }
4706
4707 mmc->max_blk_size = 512 << mmc->max_blk_size;
4708
4709 /*
4710 * Maximum block count.
4711 */
4712 mmc->max_blk_count = (host->quirks & SDHCI_QUIRK_NO_MULTIBLOCK) ? 1 : 65535;
4713
4714 if (mmc->max_segs == 1)
4715 /* This may alter mmc->*_blk_* parameters */
4716 sdhci_allocate_bounce_buffer(host);
4717
4718 return 0;
4719
4720unreg:
4721 mmc_regulator_disable_vqmmc(mmc);
4722undma:
4723 if (host->align_buffer)
4724 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
4725 host->adma_table_sz, host->align_buffer,
4726 host->align_addr);
4727 host->adma_table = NULL;
4728 host->align_buffer = NULL;
4729
4730 return ret;
4731}
4732EXPORT_SYMBOL_GPL(sdhci_setup_host);
4733
4734void sdhci_cleanup_host(struct sdhci_host *host)
4735{
4736 struct mmc_host *mmc = host->mmc;
4737
4738 mmc_regulator_disable_vqmmc(mmc);
4739
4740 if (host->align_buffer)
4741 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
4742 host->adma_table_sz, host->align_buffer,
4743 host->align_addr);
4744 host->adma_table = NULL;
4745 host->align_buffer = NULL;
4746}
4747EXPORT_SYMBOL_GPL(sdhci_cleanup_host);
4748
4749int __sdhci_add_host(struct sdhci_host *host)
4750{
4751 unsigned int flags = WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_HIGHPRI;
4752 struct mmc_host *mmc = host->mmc;
4753 int ret;
4754
4755 if ((mmc->caps2 & MMC_CAP2_CQE) &&
4756 (host->quirks & SDHCI_QUIRK_BROKEN_CQE)) {
4757 mmc->caps2 &= ~MMC_CAP2_CQE;
4758 mmc->cqe_ops = NULL;
4759 }
4760
4761 host->complete_wq = alloc_workqueue("sdhci", flags, 0);
4762 if (!host->complete_wq)
4763 return -ENOMEM;
4764
4765 INIT_WORK(&host->complete_work, sdhci_complete_work);
4766
4767 timer_setup(&host->timer, sdhci_timeout_timer, 0);
4768 timer_setup(&host->data_timer, sdhci_timeout_data_timer, 0);
4769
4770 init_waitqueue_head(&host->buf_ready_int);
4771
4772 sdhci_init(host, 0);
4773
4774 ret = request_threaded_irq(host->irq, sdhci_irq, sdhci_thread_irq,
4775 IRQF_SHARED, mmc_hostname(mmc), host);
4776 if (ret) {
4777 pr_err("%s: Failed to request IRQ %d: %d\n",
4778 mmc_hostname(mmc), host->irq, ret);
4779 goto unwq;
4780 }
4781
4782 ret = sdhci_led_register(host);
4783 if (ret) {
4784 pr_err("%s: Failed to register LED device: %d\n",
4785 mmc_hostname(mmc), ret);
4786 goto unirq;
4787 }
4788
4789 ret = mmc_add_host(mmc);
4790 if (ret)
4791 goto unled;
4792
4793 pr_info("%s: SDHCI controller on %s [%s] using %s\n",
4794 mmc_hostname(mmc), host->hw_name, dev_name(mmc_dev(mmc)),
4795 (host->flags & SDHCI_USE_ADMA) ?
4796 (host->flags & SDHCI_USE_64_BIT_DMA) ? "ADMA 64-bit" : "ADMA" :
4797 (host->flags & SDHCI_USE_SDMA) ? "DMA" : "PIO");
4798
4799 sdhci_enable_card_detection(host);
4800
4801 return 0;
4802
4803unled:
4804 sdhci_led_unregister(host);
4805unirq:
4806 sdhci_do_reset(host, SDHCI_RESET_ALL);
4807 sdhci_writel(host, 0, SDHCI_INT_ENABLE);
4808 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
4809 free_irq(host->irq, host);
4810unwq:
4811 destroy_workqueue(host->complete_wq);
4812
4813 return ret;
4814}
4815EXPORT_SYMBOL_GPL(__sdhci_add_host);
4816
4817int sdhci_add_host(struct sdhci_host *host)
4818{
4819 int ret;
4820
4821 ret = sdhci_setup_host(host);
4822 if (ret)
4823 return ret;
4824
4825 ret = __sdhci_add_host(host);
4826 if (ret)
4827 goto cleanup;
4828
4829 return 0;
4830
4831cleanup:
4832 sdhci_cleanup_host(host);
4833
4834 return ret;
4835}
4836EXPORT_SYMBOL_GPL(sdhci_add_host);
4837
4838void sdhci_remove_host(struct sdhci_host *host, int dead)
4839{
4840 struct mmc_host *mmc = host->mmc;
4841 unsigned long flags;
4842
4843 if (dead) {
4844 spin_lock_irqsave(&host->lock, flags);
4845
4846 host->flags |= SDHCI_DEVICE_DEAD;
4847
4848 if (sdhci_has_requests(host)) {
4849 pr_err("%s: Controller removed during "
4850 " transfer!\n", mmc_hostname(mmc));
4851 sdhci_error_out_mrqs(host, -ENOMEDIUM);
4852 }
4853
4854 spin_unlock_irqrestore(&host->lock, flags);
4855 }
4856
4857 sdhci_disable_card_detection(host);
4858
4859 mmc_remove_host(mmc);
4860
4861 sdhci_led_unregister(host);
4862
4863 if (!dead)
4864 sdhci_do_reset(host, SDHCI_RESET_ALL);
4865
4866 sdhci_writel(host, 0, SDHCI_INT_ENABLE);
4867 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
4868 free_irq(host->irq, host);
4869
4870 del_timer_sync(&host->timer);
4871 del_timer_sync(&host->data_timer);
4872
4873 destroy_workqueue(host->complete_wq);
4874
4875 mmc_regulator_disable_vqmmc(mmc);
4876
4877 if (host->align_buffer)
4878 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
4879 host->adma_table_sz, host->align_buffer,
4880 host->align_addr);
4881
4882 host->adma_table = NULL;
4883 host->align_buffer = NULL;
4884}
4885
4886EXPORT_SYMBOL_GPL(sdhci_remove_host);
4887
4888void sdhci_free_host(struct sdhci_host *host)
4889{
4890 mmc_free_host(host->mmc);
4891}
4892
4893EXPORT_SYMBOL_GPL(sdhci_free_host);
4894
4895/*****************************************************************************\
4896 * *
4897 * Driver init/exit *
4898 * *
4899\*****************************************************************************/
4900
4901static int __init sdhci_drv_init(void)
4902{
4903 pr_info(DRIVER_NAME
4904 ": Secure Digital Host Controller Interface driver\n");
4905 pr_info(DRIVER_NAME ": Copyright(c) Pierre Ossman\n");
4906
4907 return 0;
4908}
4909
4910static void __exit sdhci_drv_exit(void)
4911{
4912}
4913
4914module_init(sdhci_drv_init);
4915module_exit(sdhci_drv_exit);
4916
4917module_param(debug_quirks, uint, 0444);
4918module_param(debug_quirks2, uint, 0444);
4919
4920MODULE_AUTHOR("Pierre Ossman <pierre@ossman.eu>");
4921MODULE_DESCRIPTION("Secure Digital Host Controller Interface core driver");
4922MODULE_LICENSE("GPL");
4923
4924MODULE_PARM_DESC(debug_quirks, "Force certain quirks.");
4925MODULE_PARM_DESC(debug_quirks2, "Force certain other quirks.");