blob: d667fa59ebd368675313b8aebc5c18f399f0f688 [file] [log] [blame]
xjb04a4022021-11-25 15:01:52 +08001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2020 MediaTek Inc.
4 * Author: Chaotian.Jing <chaotian.jing@mediatek.com>
5 */
6#include <linux/module.h>
7#include <linux/clk.h>
8#include <linux/delay.h>
9#include <linux/dma-mapping.h>
10#include <linux/interrupt.h>
11#include <linux/ioport.h>
12#include <linux/irq.h>
13#include <linux/of_address.h>
14#include <linux/of_irq.h>
15#include <linux/of_gpio.h>
16#include <linux/pinctrl/consumer.h>
17#include <linux/platform_device.h>
18#include <linux/pm.h>
19#include <linux/pm_runtime.h>
20#include <linux/regulator/consumer.h>
21#include <linux/slab.h>
22#include <linux/spinlock.h>
23#include <linux/mmc/card.h>
24#include <linux/mmc/core.h>
25#include <linux/mmc/host.h>
26#include <linux/mmc/mmc.h>
27#include <linux/mmc/sd.h>
28#include <linux/mmc/sdio.h>
29#include <linux/mmc/slot-gpio.h>
30
31#include "mtk-legacy-sdio.h"
32
33static void sdr_set_bits(void __iomem *reg, u32 bs)
34{
35 u32 val = readl(reg);
36
37 val |= bs;
38 writel(val, reg);
39}
40
41static void sdr_clr_bits(void __iomem *reg, u32 bs)
42{
43 u32 val = readl(reg);
44
45 val &= ~bs;
46 writel(val, reg);
47}
48
49static void sdr_set_field(void __iomem *reg, u32 field, u32 val)
50{
51 unsigned int tv = readl(reg);
52
53 tv &= ~field;
54 tv |= ((val) << (ffs((unsigned int)field) - 1));
55 writel(tv, reg);
56}
57
58static void sdr_get_field(void __iomem *reg, u32 field, u32 *val)
59{
60 unsigned int tv = readl(reg);
61
62 *val = ((tv & field) >> (ffs((unsigned int)field) - 1));
63}
64
65static void msdc_reset_hw(struct msdc_host *host)
66{
67 u32 val;
68
69 sdr_set_bits(host->base + MSDC_CFG, MSDC_CFG_RST);
70 while (readl(host->base + MSDC_CFG) & MSDC_CFG_RST)
71 cpu_relax();
72
73 sdr_set_bits(host->base + MSDC_FIFOCS, MSDC_FIFOCS_CLR);
74 while (readl(host->base + MSDC_FIFOCS) & MSDC_FIFOCS_CLR)
75 cpu_relax();
76
77 val = readl(host->base + MSDC_INT);
78 writel(val, host->base + MSDC_INT);
79}
80
81static bool sdio_online_tune_fail;
82static void msdc_dump_all_register(struct msdc_host *host);
83static void msdc_cmd_next(struct msdc_host *host,
84 struct mmc_request *mrq, struct mmc_command *cmd);
85#ifndef SUPPORT_LEGACY_SDIO
86static void msdc_recheck_sdio_irq(struct msdc_host *host);
87#endif
88static const u32 cmd_ints_mask = MSDC_INTEN_CMDRDY | MSDC_INTEN_RSPCRCERR |
89 MSDC_INTEN_CMDTMO | MSDC_INTEN_ACMDRDY |
90 MSDC_INTEN_ACMDCRCERR | MSDC_INTEN_ACMDTMO;
91static const u32 data_ints_mask = MSDC_INTEN_XFER_COMPL | MSDC_INTEN_DATTMO |
92 MSDC_INTEN_DATCRCERR | MSDC_INTEN_DMA_BDCSERR |
93 MSDC_INTEN_DMA_GPDCSERR | MSDC_INTEN_DMA_PROTECT;
94
95static u8 msdc_dma_calcs(u8 *buf, u32 len)
96{
97 u32 i, sum = 0;
98
99 for (i = 0; i < len; i++)
100 sum += buf[i];
101 return 0xff - (u8) sum;
102}
103
104static inline void msdc_dma_setup(struct msdc_host *host, struct msdc_dma *dma,
105 struct mmc_data *data)
106{
107 unsigned int j, dma_len;
108 dma_addr_t dma_address;
109 u32 dma_ctrl;
110 struct scatterlist *sg;
111 struct mt_gpdma_desc *gpd;
112 struct mt_bdma_desc *bd;
113
114 sg = data->sg;
115
116 gpd = dma->gpd;
117 bd = dma->bd;
118
119 /* modify gpd */
120 gpd->gpd_info |= GPDMA_DESC_HWO;
121 gpd->gpd_info |= GPDMA_DESC_BDP;
122 /* need to clear first. use these bits to calc checksum */
123 gpd->gpd_info &= ~GPDMA_DESC_CHECKSUM;
124 gpd->gpd_info |= msdc_dma_calcs((u8 *) gpd, 16) << 8;
125
126 /* modify bd */
127 for_each_sg(data->sg, sg, data->sg_count, j) {
128 dma_address = sg_dma_address(sg);
129 dma_len = sg_dma_len(sg);
130
131 /* init bd */
132 bd[j].bd_info &= ~BDMA_DESC_BLKPAD;
133 bd[j].bd_info &= ~BDMA_DESC_DWPAD;
134 bd[j].ptr = (u32)dma_address;
135 bd[j].bd_data_len &= ~BDMA_DESC_BUFLEN;
136 bd[j].bd_data_len |= (dma_len & BDMA_DESC_BUFLEN);
137
138 if (j == data->sg_count - 1) /* the last bd */
139 bd[j].bd_info |= BDMA_DESC_EOL;
140 else
141 bd[j].bd_info &= ~BDMA_DESC_EOL;
142
143 /* checksume need to clear first */
144 bd[j].bd_info &= ~BDMA_DESC_CHECKSUM;
145 bd[j].bd_info |= msdc_dma_calcs((u8 *)(&bd[j]), 16) << 8;
146 }
147
148 sdr_set_field(host->base + MSDC_DMA_CFG, MSDC_DMA_CFG_DECSEN, 1);
149 dma_ctrl = readl_relaxed(host->base + MSDC_DMA_CTRL);
150 dma_ctrl &= ~(MSDC_DMA_CTRL_BRUSTSZ | MSDC_DMA_CTRL_MODE);
151 dma_ctrl |= (MSDC_BURST_64B << 12 | 1 << 8);
152 writel_relaxed(dma_ctrl, host->base + MSDC_DMA_CTRL);
153 writel((u32)dma->gpd_addr, host->base + MSDC_DMA_SA);
154}
155
156static void msdc_prepare_data(struct msdc_host *host, struct mmc_request *mrq)
157{
158 struct mmc_data *data = mrq->data;
159
160 if (!(data->host_cookie & MSDC_PREPARE_FLAG)) {
161 bool read = (data->flags & MMC_DATA_READ) != 0;
162
163 data->host_cookie |= MSDC_PREPARE_FLAG;
164 data->sg_count = dma_map_sg(host->dev, data->sg, data->sg_len,
165 read ? DMA_FROM_DEVICE :
166 DMA_TO_DEVICE);
167 }
168}
169
170static void msdc_unprepare_data(struct msdc_host *host, struct mmc_request *mrq)
171{
172 struct mmc_data *data = mrq->data;
173
174 if (data->host_cookie & MSDC_ASYNC_FLAG)
175 return;
176
177 if (data->host_cookie & MSDC_PREPARE_FLAG) {
178 bool read = (data->flags & MMC_DATA_READ) != 0;
179
180 dma_unmap_sg(host->dev, data->sg, data->sg_len,
181 read ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
182 data->host_cookie &= ~MSDC_PREPARE_FLAG;
183 }
184}
185
186/* clock control primitives */
187static void msdc_set_timeout(struct msdc_host *host, u32 ns, u32 clks)
188{
189 u32 timeout, clk_ns;
190 u32 mode = 0;
191
192 host->timeout_ns = ns;
193 host->timeout_clks = clks;
194 if (host->sclk == 0) {
195 timeout = 0;
196 } else {
197 clk_ns = 1000000000UL / host->sclk;
198 timeout = (ns + clk_ns - 1) / clk_ns + clks;
199 /* in 1048576 sclk cycle unit */
200 timeout = (timeout + (0x1 << 20) - 1) >> 20;
201 sdr_get_field(host->base + MSDC_CFG, MSDC_CFG_CKMOD, &mode);
202 /*DDR mode will double the clk cycles for data timeout */
203 timeout = mode >= 2 ? timeout * 2 : timeout;
204 timeout = timeout > 1 ? timeout - 1 : 0;
205 timeout = timeout > 255 ? 255 : timeout;
206 }
207 sdr_set_field(host->base + SDC_CFG, SDC_CFG_DTOC, timeout);
208}
209
210static void msdc_gate_clock(struct msdc_host *host)
211{
212 clk_disable_unprepare(host->src_clk);
213 clk_disable_unprepare(host->h_clk);
214 clk_disable_unprepare(host->src_clk_cg);
215
216 host->sdio_clk_cnt--;
217 if (!host->sdio_clk_cnt)
218 host->clock_on = false;
219}
220
221static void msdc_ungate_clock(struct msdc_host *host)
222{
223 clk_prepare_enable(host->src_clk_cg);
224 clk_prepare_enable(host->h_clk);
225 clk_prepare_enable(host->src_clk);
226 while (!(readl(host->base + MSDC_CFG) & MSDC_CFG_CKSTB))
227 cpu_relax();
228
229 host->clock_on = true;
230 host->sdio_clk_cnt++;
231}
232
233static void msdc_set_mclk(struct msdc_host *host, unsigned char timing, u32 hz)
234{
235 u32 mode;
236 u32 flags;
237 u32 div;
238 u32 sclk;
239 unsigned long irq_flags;
240
241 if (!hz) {
242 dev_info(host->dev, "set mclk to 0\n");
243 host->mclk = 0;
244 sdr_clr_bits(host->base + MSDC_CFG, MSDC_CFG_CKPDN);
245 return;
246 }
247
248 if (hz >= 100 * 1000 * 1000 && sdio_online_tune_fail)
249 hz = 50 * 1000 * 1000;
250
251 spin_lock_irqsave(&host->irqlock, irq_flags);
252 flags = readl(host->base + MSDC_INTEN);
253 sdr_clr_bits(host->base + MSDC_INTEN, flags);
254 spin_unlock_irqrestore(&host->irqlock, irq_flags);
255
256 sdr_clr_bits(host->base + MSDC_CFG, MSDC_CFG_HS400_CK_MODE);
257 if (timing == MMC_TIMING_UHS_DDR50 ||
258 timing == MMC_TIMING_MMC_DDR52 ||
259 timing == MMC_TIMING_MMC_HS400) {
260 if (timing == MMC_TIMING_MMC_HS400)
261 mode = 0x3;
262 else
263 mode = 0x2; /* ddr mode and use divisor */
264
265 if (hz >= (host->src_clk_freq >> 2)) {
266 div = 0; /* mean div = 1/4 */
267 sclk = host->src_clk_freq >> 2; /* sclk = clk / 4 */
268 } else {
269 div = (host->src_clk_freq + ((hz << 2) - 1)) /
270 (hz << 2);
271 sclk = (host->src_clk_freq >> 2) / div;
272 div = (div >> 1);
273 }
274
275 if (timing == MMC_TIMING_MMC_HS400 &&
276 hz >= (host->src_clk_freq >> 1)) {
277 sdr_set_bits(host->base + MSDC_CFG,
278 MSDC_CFG_HS400_CK_MODE);
279 sclk = host->src_clk_freq >> 1;
280 div = 0; /* div is ignore when bit18 is set */
281 }
282 } else if (hz >= host->src_clk_freq) {
283 mode = 0x1; /* no divisor */
284 div = 0;
285 sclk = host->src_clk_freq;
286 } else {
287 mode = 0x0; /* use divisor */
288 if (hz >= (host->src_clk_freq >> 1)) {
289 div = 0; /* mean div = 1/2 */
290 sclk = host->src_clk_freq >> 1; /* sclk = clk / 2 */
291 } else {
292 div = (host->src_clk_freq + ((hz << 2) - 1)) /
293 (hz << 2);
294 sclk = (host->src_clk_freq >> 2) / div;
295 }
296 }
297 /*
298 * As src_clk/HCLK use the same bit to gate/ungate,
299 * So if want to only gate src_clk, need gate its parent(mux).
300 */
301 sdr_clr_bits(host->base + MSDC_CFG, MSDC_CFG_CKPDN);
302 if (host->src_clk_cg)
303 clk_disable_unprepare(host->src_clk_cg);
304 else
305 clk_disable_unprepare(clk_get_parent(host->src_clk_cg));
306 sdr_set_field(host->base + MSDC_CFG, MSDC_CFG_CKMOD | MSDC_CFG_CKDIV,
307 (mode << 12) | div);
308 if (host->src_clk_cg)
309 clk_prepare_enable(host->src_clk_cg);
310 else
311 clk_prepare_enable(clk_get_parent(host->src_clk_cg));
312 while (!(readl(host->base + MSDC_CFG) & MSDC_CFG_CKSTB))
313 cpu_relax();
314 sdr_set_bits(host->base + MSDC_CFG, MSDC_CFG_CKPDN);
315 host->sclk = sclk;
316 host->mclk = hz;
317 host->timing = timing;
318 /* need because clk changed. */
319 msdc_set_timeout(host, host->timeout_ns, host->timeout_clks);
320
321 spin_lock_irqsave(&host->irqlock, irq_flags);
322 sdr_set_bits(host->base + MSDC_INTEN, flags);
323 spin_unlock_irqrestore(&host->irqlock, irq_flags);
324
325 if (host->sclk <= 52000000) {
326 sdr_set_field(host->base + MSDC_PATCH_BIT1,
327 MSDC_PB1_WRDAT_CRCS_TA_CNTR, 0x1);
328 sdr_set_field(host->base + MSDC_PATCH_BIT1,
329 MSDC_PB1_CMD_RSP_TA_CNTR, 0x1);
330 } else {
331 sdr_set_field(host->base + MSDC_PATCH_BIT1,
332 MSDC_PB1_WRDAT_CRCS_TA_CNTR, 0x2);
333 sdr_set_field(host->base + MSDC_PATCH_BIT1,
334 MSDC_PB1_CMD_RSP_TA_CNTR, 0x4);
335 }
336
337 dev_info(host->dev, "sclk: %d, timing: %d hz:%d cfg:0x%x\n", host->sclk,
338 timing, hz, readl(host->base + MSDC_CFG));
339}
340
341static inline u32 msdc_cmd_find_resp(struct msdc_host *host,
342 struct mmc_request *mrq, struct mmc_command *cmd)
343{
344 u32 resp;
345
346 switch (mmc_resp_type(cmd)) {
347 /* Actually, R1, R5, R6, R7 are the same */
348 case MMC_RSP_R1:
349 resp = 0x1;
350 break;
351 case MMC_RSP_R1B:
352 resp = 0x7;
353 break;
354 case MMC_RSP_R2:
355 resp = 0x2;
356 break;
357 case MMC_RSP_R3:
358 resp = 0x3;
359 break;
360 case MMC_RSP_NONE:
361 default:
362 resp = 0x0;
363 break;
364 }
365
366 return resp;
367}
368
369static inline u32 msdc_cmd_prepare_raw_cmd(struct msdc_host *host,
370 struct mmc_request *mrq, struct mmc_command *cmd)
371{
372 /* rawcmd :
373 * vol_swt << 30 | auto_cmd << 28 | blklen << 16 | go_irq << 15 |
374 * stop << 14 | rw << 13 | dtype << 11 | rsptyp << 7 | brk << 6 | opcode
375 */
376 u32 opcode = cmd->opcode;
377 u32 resp = msdc_cmd_find_resp(host, mrq, cmd);
378 u32 rawcmd = (opcode & 0x3f) | ((resp & 0x7) << 7);
379
380 host->cmd_rsp = resp;
381
382 if ((opcode == SD_IO_RW_DIRECT &&
383 ((cmd->arg >> 9) & 0x1ffff) == SDIO_CCCR_ABORT) ||
384 opcode == MMC_STOP_TRANSMISSION)
385 rawcmd |= (0x1 << 14);
386 else if (opcode == SD_SWITCH_VOLTAGE)
387 rawcmd |= (0x1 << 30);
388 else if (opcode == SD_APP_SEND_SCR ||
389 opcode == SD_APP_SEND_NUM_WR_BLKS ||
390 (opcode == SD_SWITCH &&
391 mmc_cmd_type(cmd) == MMC_CMD_ADTC) ||
392 (opcode == SD_APP_SD_STATUS &&
393 mmc_cmd_type(cmd) == MMC_CMD_ADTC) ||
394 (opcode == MMC_SEND_EXT_CSD &&
395 mmc_cmd_type(cmd) == MMC_CMD_ADTC))
396 rawcmd |= (0x1 << 11);
397
398 if (cmd->data) {
399 struct mmc_data *data = cmd->data;
400
401 if (mmc_op_multi(opcode)) {
402 if (mmc_card_mmc(host->mmc->card) && mrq->sbc &&
403 !(mrq->sbc->arg & 0xFFFF0000))
404 rawcmd |= 0x2 << 28; /* AutoCMD23 */
405 }
406
407 rawcmd |= ((data->blksz & 0xFFF) << 16);
408 if (data->flags & MMC_DATA_WRITE)
409 rawcmd |= (0x1 << 13);
410 if (data->blocks > 1)
411 rawcmd |= (0x2 << 11);
412 else
413 rawcmd |= (0x1 << 11);
414 /* Always use dma mode */
415 sdr_clr_bits(host->base + MSDC_CFG, MSDC_CFG_PIO);
416
417 if (host->timeout_ns != data->timeout_ns ||
418 host->timeout_clks != data->timeout_clks)
419 msdc_set_timeout(host, data->timeout_ns,
420 data->timeout_clks);
421
422 writel(data->blocks, host->base + SDC_BLK_NUM);
423 }
424 return rawcmd;
425}
426
427static void msdc_start_data(struct msdc_host *host, struct mmc_request *mrq,
428 struct mmc_command *cmd, struct mmc_data *data)
429{
430 unsigned long flags;
431 bool read;
432
433 WARN_ON(host->data);
434 host->data = data;
435 read = data->flags & MMC_DATA_READ;
436
437 mod_delayed_work(system_wq, &host->req_timeout, DAT_TIMEOUT);
438 msdc_dma_setup(host, &host->dma, data);
439
440 spin_lock_irqsave(&host->irqlock, flags);
441 sdr_set_bits(host->base + MSDC_INTEN, data_ints_mask);
442 sdr_set_field(host->base + MSDC_DMA_CTRL, MSDC_DMA_CTRL_START, 1);
443 spin_unlock_irqrestore(&host->irqlock, flags);
444
445 dev_dbg(host->dev, "DMA start\n");
446 dev_dbg(host->dev, "%s: cmd=%d DMA data: %d blocks; read=%d\n",
447 __func__, cmd->opcode, data->blocks, read);
448}
449
450static int msdc_auto_cmd_done(struct msdc_host *host, int events,
451 struct mmc_command *cmd)
452{
453 u32 *rsp = cmd->resp;
454
455 rsp[0] = readl(host->base + SDC_ACMD_RESP);
456
457 if (events & MSDC_INT_ACMDRDY) {
458 cmd->error = 0;
459 } else {
460 msdc_reset_hw(host);
461 if (events & MSDC_INT_ACMDCRCERR) {
462 cmd->error = -EILSEQ;
463 host->error |= REQ_STOP_EIO;
464 } else if (events & MSDC_INT_ACMDTMO) {
465 cmd->error = -ETIMEDOUT;
466 host->error |= REQ_STOP_TMO;
467 }
468 dev_info(host->dev,
469 "%s: AUTO_CMD%d arg=%08X; rsp %08X; cmd_error=%d\n",
470 __func__, cmd->opcode, cmd->arg, rsp[0], cmd->error);
471 }
472 return cmd->error;
473}
474
475static void msdc_track_cmd_data(struct msdc_host *host,
476 struct mmc_command *cmd, struct mmc_data *data)
477{
478 if (host->error)
479 dev_info(host->dev, "cmd=%d arg=%08X; err=0x%08X\n",
480 cmd->opcode, cmd->arg, host->error);
481}
482
483static void msdc_request_done(struct msdc_host *host, struct mmc_request *mrq)
484{
485 unsigned long flags;
486 bool ret;
487
488 ret = cancel_delayed_work(&host->req_timeout);
489 if (!ret && in_interrupt()) {
490 /* delay work already running */
491 return;
492 }
493 spin_lock_irqsave(&host->lock, flags);
494 host->mrq = NULL;
495 spin_unlock_irqrestore(&host->lock, flags);
496
497 msdc_track_cmd_data(host, mrq->cmd, mrq->data);
498 if (mrq->data)
499 msdc_unprepare_data(host, mrq);
500 mmc_request_done(host->mmc, mrq);
501#ifndef SUPPORT_LEGACY_SDIO
502 msdc_recheck_sdio_irq(host);
503#endif
504}
505
506/* returns true if command is fully handled; returns false otherwise */
507static bool msdc_cmd_done(struct msdc_host *host, int events,
508 struct mmc_request *mrq, struct mmc_command *cmd)
509{
510 bool done = false;
511 bool sbc_error;
512 unsigned long flags;
513 u32 *rsp = cmd->resp;
514
515 if (mrq->sbc && cmd == mrq->cmd &&
516 (events & (MSDC_INT_ACMDRDY | MSDC_INT_ACMDCRCERR
517 | MSDC_INT_ACMDTMO)))
518 msdc_auto_cmd_done(host, events, mrq->sbc);
519
520 sbc_error = mrq->sbc && mrq->sbc->error;
521
522 if (!sbc_error && !(events & (MSDC_INT_CMDRDY
523 | MSDC_INT_RSPCRCERR
524 | MSDC_INT_CMDTMO)))
525 return done;
526
527 done = !host->cmd;
528 spin_lock_irqsave(&host->lock, flags);
529 host->cmd = NULL;
530 spin_unlock_irqrestore(&host->lock, flags);
531
532 if (done)
533 return true;
534
535 spin_lock_irqsave(&host->irqlock, flags);
536 sdr_clr_bits(host->base + MSDC_INTEN, cmd_ints_mask);
537 spin_unlock_irqrestore(&host->irqlock, flags);
538
539 if (cmd->flags & MMC_RSP_PRESENT) {
540 if (cmd->flags & MMC_RSP_136) {
541 rsp[0] = readl(host->base + SDC_RESP3);
542 rsp[1] = readl(host->base + SDC_RESP2);
543 rsp[2] = readl(host->base + SDC_RESP1);
544 rsp[3] = readl(host->base + SDC_RESP0);
545 } else {
546 rsp[0] = readl(host->base + SDC_RESP0);
547 }
548 }
549
550 if (!sbc_error && !(events & MSDC_INT_CMDRDY)) {
551 if (cmd->opcode != MMC_SEND_TUNING_BLOCK &&
552 cmd->opcode != MMC_SEND_TUNING_BLOCK_HS200)
553 /*
554 * should not clear fifo/interrupt as the tune data
555 * may have alreay come.
556 */
557 msdc_reset_hw(host);
558 if (events & MSDC_INT_RSPCRCERR) {
559 cmd->error = -EILSEQ;
560 host->error |= REQ_CMD_EIO;
561 } else if (events & MSDC_INT_CMDTMO) {
562 cmd->error = -ETIMEDOUT;
563 host->error |= REQ_CMD_TMO;
564 }
565 }
566 if (cmd->error && cmd->opcode != MMC_SEND_TUNING_BLOCK)
567 dev_dbg(host->dev,
568 "%s: cmd=%d arg=%08X; rsp %08X; cmd_error=%d\n",
569 __func__, cmd->opcode, cmd->arg, rsp[0],
570 cmd->error);
571
572 msdc_cmd_next(host, mrq, cmd);
573 return true;
574}
575
576static int msdc_card_busy(struct mmc_host *mmc)
577{
578 struct msdc_host *host = mmc_priv(mmc);
579 u32 status = readl(host->base + MSDC_PS);
580
581 /* check if data0 is low */
582 return !(status & BIT(16));
583}
584
585/* It is the core layer's responsibility to ensure card status
586 * is correct before issue a request. but host design do below
587 * checks recommended.
588 */
589static inline bool msdc_cmd_is_ready(struct msdc_host *host,
590 struct mmc_request *mrq, struct mmc_command *cmd)
591{
592 /* The max busy time we can endure is 20ms */
593 unsigned long tmo = jiffies + msecs_to_jiffies(20);
594 u32 count = 0;
595
596 if (in_interrupt()) {
597 while ((readl(host->base + SDC_STS) & SDC_STS_CMDBUSY) &&
598 (count < 1000)) {
599 udelay(1);
600 count++;
601 }
602 } else {
603 while ((readl(host->base + SDC_STS) & SDC_STS_CMDBUSY) &&
604 time_before(jiffies, tmo))
605 cpu_relax();
606 }
607
608 if (readl(host->base + SDC_STS) & SDC_STS_CMDBUSY) {
609 dev_info(host->dev, "CMD bus busy detected\n");
610 host->error |= REQ_CMD_BUSY;
611 msdc_cmd_done(host, MSDC_INT_CMDTMO, mrq, cmd);
612 return false;
613 }
614
615 if (cmd->opcode != MMC_SEND_STATUS) {
616 count = 0;
617 /* Consider that CMD6 crc error before card was init done,
618 * mmc_retune() will return directly as host->card is null.
619 * and CMD6 will retry 3 times, must ensure card is in transfer
620 * state when retry.
621 */
622 tmo = jiffies + msecs_to_jiffies(60 * 1000);
623 while (1) {
624 if (msdc_card_busy(host->mmc)) {
625 if (in_interrupt()) {
626 udelay(1);
627 count++;
628 } else {
629 msleep_interruptible(10);
630 }
631 } else {
632 break;
633 }
634 /* Timeout if the device never
635 * leaves the program state.
636 */
637 if (count > 1000 || time_after(jiffies, tmo)) {
638 pr_info("%s: Card is in programming state!\n",
639 mmc_hostname(host->mmc));
640 host->error |= REQ_CMD_BUSY;
641 msdc_cmd_done(host, MSDC_INT_CMDTMO, mrq, cmd);
642 return false;
643 }
644 }
645 }
646 return true;
647}
648
649static void msdc_start_command(struct msdc_host *host,
650 struct mmc_request *mrq, struct mmc_command *cmd)
651{
652 unsigned long flags;
653 u32 rawcmd;
654
655 WARN_ON(host->cmd);
656 host->cmd = cmd;
657
658 mod_delayed_work(system_wq, &host->req_timeout, DAT_TIMEOUT);
659 if (!msdc_cmd_is_ready(host, mrq, cmd))
660 return;
661
662 if ((readl(host->base + MSDC_FIFOCS) & MSDC_FIFOCS_TXCNT) >> 16 ||
663 readl(host->base + MSDC_FIFOCS) & MSDC_FIFOCS_RXCNT) {
664 dev_info(host->dev,
665 "TX/RX FIFO non-empty before start of IO. Reset\n");
666 msdc_reset_hw(host);
667 }
668
669 cmd->error = 0;
670 rawcmd = msdc_cmd_prepare_raw_cmd(host, mrq, cmd);
671
672 spin_lock_irqsave(&host->irqlock, flags);
673 sdr_set_bits(host->base + MSDC_INTEN, cmd_ints_mask);
674 spin_unlock_irqrestore(&host->irqlock, flags);
675
676 writel(cmd->arg, host->base + SDC_ARG);
677 writel(rawcmd, host->base + SDC_CMD);
678
679}
680
681static void msdc_cmd_next(struct msdc_host *host,
682 struct mmc_request *mrq, struct mmc_command *cmd)
683{
684 if ((cmd->error &&
685 !(cmd->error == -EILSEQ &&
686 (cmd->opcode == MMC_SEND_TUNING_BLOCK ||
687 cmd->opcode == MMC_SEND_TUNING_BLOCK_HS200))) ||
688 (mrq->sbc && mrq->sbc->error))
689 msdc_request_done(host, mrq);
690 else if (cmd == mrq->sbc)
691 msdc_start_command(host, mrq, mrq->cmd);
692 else if (!cmd->data)
693 msdc_request_done(host, mrq);
694 else
695 msdc_start_data(host, mrq, cmd, cmd->data);
696}
697
698static void msdc_ops_request(struct mmc_host *mmc, struct mmc_request *mrq)
699{
700 struct msdc_host *host = mmc_priv(mmc);
701
702 host->error = 0;
703 WARN_ON(host->mrq);
704 host->mrq = mrq;
705
706 if (mrq->data)
707 msdc_prepare_data(host, mrq);
708
709 /* if SBC is required, we have HW option and SW option.
710 * if HW option is enabled, and SBC does not have "special" flags,
711 * use HW option, otherwise use SW option
712 */
713 if (mrq->sbc && (!mmc_card_mmc(mmc->card) ||
714 (mrq->sbc->arg & 0xFFFF0000)))
715 msdc_start_command(host, mrq, mrq->sbc);
716 else
717 msdc_start_command(host, mrq, mrq->cmd);
718}
719
720static void msdc_pre_req(struct mmc_host *mmc, struct mmc_request *mrq)
721{
722 struct msdc_host *host = mmc_priv(mmc);
723 struct mmc_data *data = mrq->data;
724
725 if (!data)
726 return;
727
728 msdc_prepare_data(host, mrq);
729 data->host_cookie |= MSDC_ASYNC_FLAG;
730}
731
732static void msdc_post_req(struct mmc_host *mmc, struct mmc_request *mrq,
733 int err)
734{
735 struct msdc_host *host = mmc_priv(mmc);
736 struct mmc_data *data;
737
738 data = mrq->data;
739 if (!data)
740 return;
741 if (data->host_cookie) {
742 data->host_cookie &= ~MSDC_ASYNC_FLAG;
743 msdc_unprepare_data(host, mrq);
744 }
745}
746
747static void msdc_data_xfer_next(struct msdc_host *host,
748 struct mmc_request *mrq, struct mmc_data *data)
749{
750 if (mmc_op_multi(mrq->cmd->opcode) && mrq->stop && !mrq->stop->error &&
751 !mrq->sbc)
752 msdc_start_command(host, mrq, mrq->stop);
753 else
754 msdc_request_done(host, mrq);
755}
756
757static bool msdc_data_xfer_done(struct msdc_host *host, u32 events,
758 struct mmc_request *mrq, struct mmc_data *data)
759{
760 struct mmc_command *stop = data->stop;
761 unsigned long flags;
762 bool done;
763 unsigned int check_data = events &
764 (MSDC_INT_XFER_COMPL | MSDC_INT_DATCRCERR | MSDC_INT_DATTMO
765 | MSDC_INT_DMA_BDCSERR | MSDC_INT_DMA_GPDCSERR
766 | MSDC_INT_DMA_PROTECT);
767
768 done = !host->data;
769 spin_lock_irqsave(&host->lock, flags);
770 if (check_data)
771 host->data = NULL;
772 spin_unlock_irqrestore(&host->lock, flags);
773
774 if (done)
775 return true;
776
777 if (check_data || (stop && stop->error)) {
778 dev_dbg(host->dev, "DMA status: 0x%8X\n",
779 readl(host->base + MSDC_DMA_CFG));
780 sdr_set_field(host->base + MSDC_DMA_CTRL,
781 MSDC_DMA_CTRL_STOP, 1);
782 while (readl(host->base + MSDC_DMA_CFG) & MSDC_DMA_CFG_STS)
783 cpu_relax();
784
785 spin_lock_irqsave(&host->irqlock, flags);
786 sdr_clr_bits(host->base + MSDC_INTEN, data_ints_mask);
787 spin_unlock_irqrestore(&host->irqlock, flags);
788
789 dev_dbg(host->dev, "DMA stop\n");
790
791 if ((events & MSDC_INT_XFER_COMPL) && (!stop || !stop->error)) {
792 data->bytes_xfered = data->blocks * data->blksz;
793 } else {
794 dev_info(host->dev, "interrupt events: %x\n", events);
795 msdc_reset_hw(host);
796 host->error |= REQ_DAT_ERR;
797 data->bytes_xfered = 0;
798
799 if (events & MSDC_INT_DATTMO)
800 data->error = -ETIMEDOUT;
801 else if (events & MSDC_INT_DATCRCERR)
802 data->error = -EILSEQ;
803
804 if (mrq->cmd->opcode != MMC_SEND_TUNING_BLOCK) {
805 dev_info(host->dev, "%s: cmd=%d; blocks=%d",
806 __func__, mrq->cmd->opcode, data->blocks);
807 dev_info(host->dev, "data_error=%d xfer_size=%d\n",
808 (int)data->error, data->bytes_xfered);
809 }
810 }
811
812 msdc_data_xfer_next(host, mrq, data);
813 done = true;
814 }
815 return done;
816}
817
818static void msdc_set_buswidth(struct msdc_host *host, u32 width)
819{
820 u32 val = readl(host->base + SDC_CFG);
821
822 val &= ~SDC_CFG_BUSWIDTH;
823
824 switch (width) {
825 default:
826 case MMC_BUS_WIDTH_1:
827 val |= (MSDC_BUS_1BITS << 16);
828 break;
829 case MMC_BUS_WIDTH_4:
830 val |= (MSDC_BUS_4BITS << 16);
831 break;
832 case MMC_BUS_WIDTH_8:
833 val |= (MSDC_BUS_8BITS << 16);
834 break;
835 }
836
837 writel(val, host->base + SDC_CFG);
838 dev_dbg(host->dev, "Bus Width = %d", width);
839}
840
841static int msdc_ops_switch_volt(struct mmc_host *mmc, struct mmc_ios *ios)
842{
843
844 struct msdc_host *host = mmc_priv(mmc);
845 int min_uv, max_uv;
846 int ret = 0;
847
848 if (!IS_ERR(mmc->supply.vqmmc)) {
849 if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330) {
850 min_uv = 3300000;
851 max_uv = 3300000;
852 } else if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180) {
853 min_uv = 1800000;
854 max_uv = 1800000;
855 } else {
856 dev_info(host->dev, "Unsupported signal voltage!\n");
857 return -EINVAL;
858 }
859
860 ret = regulator_set_voltage(mmc->supply.vqmmc, min_uv, max_uv);
861 if (ret) {
862 dev_dbg(host->dev, "Regulator set error %d (%d)\n",
863 ret, ios->signal_voltage);
864 } else {
865 /* Apply different pinctrl settings
866 * for different signal voltage
867 */
868 if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180)
869 pinctrl_select_state(host->pinctrl,
870 host->pins_uhs);
871 else
872 pinctrl_select_state(host->pinctrl,
873 host->pins_default);
874 }
875 }
876 return ret;
877}
878
879static void msdc_request_timeout(struct work_struct *work)
880{
881 struct msdc_host *host = container_of(work, struct msdc_host,
882 req_timeout.work);
883
884 /* simulate HW timeout status */
885 dev_info(host->dev, "%s: aborting cmd/data/mrq\n", __func__);
886 if (host->mrq) {
887 dev_info(host->dev, "%s: aborting mrq=%p cmd=%d\n", __func__,
888 host->mrq, host->mrq->cmd->opcode);
889 if (host->cmd) {
890 dev_info(host->dev,
891 "%s: aborting cmd=%d, arg=0x%x\n", __func__,
892 host->cmd->opcode, host->cmd->arg);
893 msdc_cmd_done(host, MSDC_INT_CMDTMO, host->mrq,
894 host->cmd);
895 } else if (host->data) {
896 dev_info(host->dev,
897 "%s: aborting data: cmd%d; %d blocks\n",
898 __func__, host->mrq->cmd->opcode,
899 host->data->blocks);
900 msdc_data_xfer_done(host, MSDC_INT_DATTMO, host->mrq,
901 host->data);
902 }
903 }
904}
905
906static irqreturn_t msdc_irq(int irq, void *dev_id)
907{
908 unsigned long flags;
909 struct msdc_host *host = (struct msdc_host *) dev_id;
910 struct mmc_request *mrq;
911 struct mmc_command *cmd;
912 struct mmc_data *data;
913 u32 events, event_mask;
914
915 spin_lock_irqsave(&host->irqlock, flags);
916 events = readl(host->base + MSDC_INT);
917 event_mask = readl(host->base + MSDC_INTEN);
918 /* clear interrupts */
919 writel(events & event_mask, host->base + MSDC_INT);
920
921 mrq = host->mrq;
922 cmd = host->cmd;
923 data = host->data;
924 spin_unlock_irqrestore(&host->irqlock, flags);
925
926 if ((events & event_mask) & MSDC_INT_SDIOIRQ) {
927 mmc_signal_sdio_irq(host->mmc);
928 if (!mrq)
929 return IRQ_HANDLED;
930 }
931
932 if (!(events & (event_mask & ~MSDC_INT_SDIOIRQ)))
933 return IRQ_HANDLED;
934
935 if (!mrq) {
936 dev_info(host->dev,
937 "%s: MRQ=NULL; events=%08X; event_mask=%08X\n",
938 __func__, events, event_mask);
939 WARN_ON(1);
940 return IRQ_HANDLED;
941 }
942
943 if (cmd)
944 msdc_cmd_done(host, events, mrq, cmd);
945 else if (data)
946 msdc_data_xfer_done(host, events, mrq, data);
947
948 return IRQ_HANDLED;
949}
950
951static struct msdc_host *sdio_host;
952
953static void sdio_status_notify_cb(int card_present, void *dev_id)
954{
955 struct msdc_host *host = (struct msdc_host *)dev_id;
956
957 pr_info("%s: card_present %d\n", mmc_hostname(host->mmc), card_present);
958
959 if (card_present == 1) {
960 host->mmc->rescan_disable = 0;
961 mmc_detect_change(host->mmc, 0);
962 } else if (card_present == 0) {
963 host->mmc->detect_change = 0;
964 host->mmc->rescan_disable = 1;
965 }
966}
967
968void sdio_card_detect(int card_present)
969{
970 pr_info("%s: enter present:%d\n", __func__, card_present);
971 if (sdio_host)
972 sdio_status_notify_cb(card_present, sdio_host);
973
974}
975EXPORT_SYMBOL(sdio_card_detect);
976
977static void msdc_init_hw(struct msdc_host *host)
978{
979 u32 val;
980 unsigned long flags;
981
982 /* Configure to MMC/SD mode, clock free running */
983 sdr_set_bits(host->base + MSDC_CFG, MSDC_CFG_MODE);
984
985 /* Reset */
986 msdc_reset_hw(host);
987
988 /* Disable card detection */
989 sdr_clr_bits(host->base + MSDC_PS, MSDC_PS_CDEN);
990
991 /* Disable and clear all interrupts */
992 spin_lock_irqsave(&host->irqlock, flags);
993 writel(0, host->base + MSDC_INTEN);
994 val = readl(host->base + MSDC_INT);
995 writel(val, host->base + MSDC_INT);
996 spin_unlock_irqrestore(&host->irqlock, flags);
997
998 writel(0, host->base + MSDC_IOCON);
999 sdr_set_field(host->base + MSDC_IOCON, MSDC_IOCON_DDLSEL, 0);
1000 writel(0x403c0046, host->base + MSDC_PATCH_BIT0);
1001 sdr_set_field(host->base + MSDC_PATCH_BIT0, MSDC_CKGEN_MSDC_DLY_SEL, 1);
1002 writel(0xffff0089, host->base + MSDC_PATCH_BIT1);
1003
1004 /* For SDIO3.0+ IP, this bit should be set to 0 */
1005 if (host->dev_comp->v3_plus)
1006 sdr_clr_bits(host->base + MSDC_PATCH_BIT1,
1007 MSDC_PB1_SINGLE_BURST);
1008
1009 sdr_set_bits(host->base + EMMC50_CFG0, EMMC50_CFG_CFCSTS_SEL);
1010
1011 /* Configure to enable SDIO mode.
1012 * it's must otherwise sdio cmd5 failed
1013 */
1014 sdr_set_bits(host->base + SDC_CFG, SDC_CFG_SDIO);
1015
1016 if (host->mmc->caps & MMC_CAP_SDIO_IRQ)
1017 sdr_set_bits(host->base + SDC_CFG, SDC_CFG_SDIOIDE);
1018 else
1019 /* disable detect SDIO device interrupt function */
1020 sdr_clr_bits(host->base + SDC_CFG, SDC_CFG_SDIOIDE);
1021 /* Configure to default data timeout */
1022 sdr_set_field(host->base + SDC_CFG, SDC_CFG_DTOC, 3);
1023
1024 host->def_tune_para.iocon = readl(host->base + MSDC_IOCON);
1025 host->def_tune_para.pad_tune0 = readl(host->base + MSDC_PAD_TUNE0);
1026 host->def_tune_para.pad_tune1 = readl(host->base + MSDC_PAD_TUNE1);
1027 dev_dbg(host->dev, "init hardware done!");
1028}
1029
1030static void msdc_deinit_hw(struct msdc_host *host)
1031{
1032 u32 val;
1033 unsigned long flags;
1034
1035 /* Disable and clear all interrupts */
1036 spin_lock_irqsave(&host->irqlock, flags);
1037 writel(0, host->base + MSDC_INTEN);
1038
1039 val = readl(host->base + MSDC_INT);
1040 writel(val, host->base + MSDC_INT);
1041 spin_unlock_irqrestore(&host->irqlock, flags);
1042}
1043
1044/* init gpd and bd list in msdc_drv_probe */
1045static void msdc_init_gpd_bd(struct msdc_host *host, struct msdc_dma *dma)
1046{
1047 struct mt_gpdma_desc *gpd = dma->gpd;
1048 struct mt_bdma_desc *bd = dma->bd;
1049 int i;
1050
1051 memset(gpd, 0, sizeof(struct mt_gpdma_desc) * 2);
1052
1053 gpd->gpd_info = GPDMA_DESC_BDP; /* hwo, cs, bd pointer */
1054 gpd->ptr = (u32)dma->bd_addr; /* physical address */
1055 /* gpd->next is must set for desc DMA
1056 * That's why must alloc 2 gpd structure.
1057 */
1058 gpd->next = (u32)dma->gpd_addr + sizeof(struct mt_gpdma_desc);
1059 memset(bd, 0, sizeof(struct mt_bdma_desc) * MAX_BD_NUM);
1060 for (i = 0; i < (MAX_BD_NUM - 1); i++)
1061 bd[i].next = (u32)dma->bd_addr + sizeof(*bd) * (i + 1);
1062}
1063
1064static void msdc_ops_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1065{
1066 int ret;
1067 struct msdc_host *host = mmc_priv(mmc);
1068
1069 msdc_set_buswidth(host, ios->bus_width);
1070
1071 /* Suspend/Resume will do power off/on */
1072 switch (ios->power_mode) {
1073 case MMC_POWER_UP:
1074 if (!IS_ERR(mmc->supply.vmmc)) {
1075 msdc_init_hw(host);
1076 ret = mmc_regulator_set_ocr(mmc, mmc->supply.vmmc,
1077 ios->vdd);
1078 if (ret) {
1079 dev_info(host->dev, "Failed to set vmmc power!\n");
1080 return;
1081 }
1082 }
1083 break;
1084 case MMC_POWER_ON:
1085 if (!IS_ERR(mmc->supply.vqmmc) && !host->vqmmc_enabled) {
1086 ret = regulator_enable(mmc->supply.vqmmc);
1087 if (ret)
1088 dev_info(host->dev, "Failed to set vqmmc power!\n");
1089 else
1090 host->vqmmc_enabled = true;
1091 }
1092 break;
1093 case MMC_POWER_OFF:
1094 /* power always on */
1095 if (!IS_ERR(mmc->supply.vmmc))
1096 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
1097
1098 if (!IS_ERR(mmc->supply.vqmmc) && host->vqmmc_enabled) {
1099 regulator_disable(mmc->supply.vqmmc);
1100 host->vqmmc_enabled = false;
1101 }
1102 break;
1103 default:
1104 break;
1105 }
1106
1107 if (host->mclk != ios->clock || host->timing != ios->timing)
1108 msdc_set_mclk(host, ios->timing, ios->clock);
1109}
1110
1111/*************** SDIO AUTOK ******************/
1112#define MSDC_FIFO_THD_1K (1024)
1113#define TUNE_TX_CNT (100)
1114#define MSDC_FIFO_SZ (128)
1115/*#define TUNE_DATA_TX_ADDR (0x358000)*/
1116/* Use negative value to represent address from end of device,
1117 * 33 blocks used by SGPT at end of device,
1118 * 32768 blocks used by flashinfo immediate before SGPT
1119 */
1120#define TUNE_DATA_TX_ADDR (-33-32768)
1121#define CMDQ
1122#define AUTOK_LATCH_CK_EMMC_TUNE_TIMES (10) /* 5.0IP eMMC 1KB fifo ZIZE */
1123#define AUTOK_LATCH_CK_SDIO_TUNE_TIMES (20) /* 4.5IP SDIO 128fifo ZIZE */
1124#define AUTOK_LATCH_CK_SD_TUNE_TIMES (3) /* 4.5IP SD 128fifo ZIZE */
1125#define AUTOK_CMD_TIMES (20)
1126#define AUTOK_TUNING_INACCURACY (3) /* scan result may find xxxooxxx */
1127#define AUTOK_MARGIN_THOLD (5)
1128#define AUTOK_BD_WIDTH_REF (3)
1129
1130#define AUTOK_READ 0
1131#define AUTOK_WRITE 1
1132
1133#define AUTOK_FINAL_CKGEN_SEL (0)
1134#define SCALE_TA_CNTR (8)
1135#define SCALE_CMD_RSP_TA_CNTR (8)
1136#define SCALE_WDAT_CRC_TA_CNTR (8)
1137#define SCALE_INT_DAT_LATCH_CK_SEL (8)
1138#define SCALE_INTERNAL_DLY_CNTR (32)
1139#define SCALE_PAD_DAT_DLY_CNTR (32)
1140
1141#define TUNING_INACCURACY (2)
1142
1143/* autok platform specific setting */
1144#define AUTOK_CKGEN_VALUE (0)
1145#define AUTOK_CMD_LATCH_EN_HS400_VALUE (3)
1146#define AUTOK_CMD_LATCH_EN_NON_HS400_VALUE (2)
1147#define AUTOK_CRC_LATCH_EN_HS400_VALUE (0)
1148#define AUTOK_CRC_LATCH_EN_NON_HS400_VALUE (2)
1149#define AUTOK_LATCH_CK_VALUE (1)
1150#define AUTOK_CMD_TA_VALUE (0)
1151#define AUTOK_CRC_TA_VALUE (0)
1152#define AUTOK_CRC_MA_VALUE (1)
1153#define AUTOK_BUSY_MA_VALUE (1)
1154
1155#define AUTOK_FAIL -1
1156
1157#define E_RESULT_PASS (0)
1158#define E_RESULT_CMD_TMO (1<<0)
1159#define E_RESULT_RSP_CRC (1<<1)
1160#define E_RESULT_DAT_CRC (1<<2)
1161#define E_RESULT_DAT_TMO (1<<3)
1162#define E_RESULT_W_CRC (1<<4)
1163#define E_RESULT_ERR (1<<5)
1164#define E_RESULT_START (1<<6)
1165#define E_RESULT_PW_SMALL (1<<7)
1166#define E_RESULT_KEEP_OLD (1<<8)
1167#define E_RESULT_CMP_ERR (1<<9)
1168#define E_RESULT_FATAL_ERR (1<<10)
1169
1170#define E_RESULT_MAX
1171
1172#ifndef NULL
1173#define NULL 0
1174#endif
1175#ifndef TRUE
1176#define TRUE (0 == 0)
1177#endif
1178#ifndef FALSE
1179#define FALSE (0 != 0)
1180#endif
1181
1182#define ATK_OFF 0
1183#define ATK_ERROR 1
1184#define ATK_RES 2
1185#define ATK_WARN 3
1186#define ATK_TRACE 4
1187#define ATK_LOUD 5
1188
1189static unsigned int autok_debug_level = ATK_RES;
1190
1191#define ATK_DBG(_level, _fmt ...) \
1192({ \
1193 if (autok_debug_level >= _level) { \
1194 pr_info(_fmt); \
1195 } \
1196})
1197
1198#define ATK_ERR(_fmt ...) \
1199({ \
1200 pr_info(_fmt); \
1201})
1202
1203enum AUTOK_PARAM {
1204 /* command response sample selection
1205 * (MSDC_SMPL_RISING, MSDC_SMPL_FALLING)
1206 */
1207 CMD_EDGE,
1208
1209 /* read data sample selection (MSDC_SMPL_RISING, MSDC_SMPL_FALLING) */
1210 RDATA_EDGE,
1211
1212 /* read data async fifo out edge select */
1213 RD_FIFO_EDGE,
1214
1215 /* write data crc status async fifo out edge select */
1216 WD_FIFO_EDGE,
1217
1218 /* [Data Tune]CMD Pad RX Delay Line1 Control.
1219 * This register is used to fine-tune CMD pad macro respose
1220 * latch timing. Total 32 stages[Data Tune]
1221 */
1222 CMD_RD_D_DLY1,
1223
1224 /* [Data Tune]CMD Pad RX Delay Line1 Sel-> delay cell1 enable */
1225 CMD_RD_D_DLY1_SEL,
1226
1227 /* [Data Tune]CMD Pad RX Delay Line2 Control. This register is used to
1228 * fine-tune CMD pad macro respose latch timing.
1229 * Total 32 stages[Data Tune]
1230 */
1231 CMD_RD_D_DLY2,
1232
1233 /* [Data Tune]CMD Pad RX Delay Line1 Sel-> delay cell2 enable */
1234 CMD_RD_D_DLY2_SEL,
1235
1236 /* [Data Tune]DAT Pad RX Delay Line1 Control (for MSDC RD),
1237 * Total 32 stages [Data Tune]
1238 */
1239 DAT_RD_D_DLY1,
1240
1241 /* [Data Tune]DAT Pad RX Delay Line1 Sel-> delay cell1 enable */
1242 DAT_RD_D_DLY1_SEL,
1243
1244 /* [Data Tune]DAT Pad RX Delay Line2 Control (for MSDC RD),
1245 * Total 32 stages [Data Tune]
1246 */
1247 DAT_RD_D_DLY2,
1248
1249 /* [Data Tune]DAT Pad RX Delay Line2 Sel-> delay cell2 enable */
1250 DAT_RD_D_DLY2_SEL,
1251
1252 /* Internal MSDC clock phase selection. Total 8 stages,
1253 * each stage can delay 1 clock period of msdc_src_ck
1254 */
1255 INT_DAT_LATCH_CK,
1256
1257 /* DS Pad Z clk delay count, range: 0~63, Z dly1(0~31)+Z dly2(0~31) */
1258 EMMC50_DS_Z_DLY1,
1259
1260 /* DS Pad Z clk del sel: [dly2_sel:dly1_sel]
1261 * -> [0,1]: dly1 enable [1,2]:dl2 & dly1 enable ,else :no dly enable
1262 */
1263 EMMC50_DS_Z_DLY1_SEL,
1264
1265 /* DS Pad Z clk delay count, range: 0~63, Z dly1(0~31)+Z dly2(0~31) */
1266 EMMC50_DS_Z_DLY2,
1267
1268 /* DS Pad Z clk del sel: [dly2_sel:dly1_sel]
1269 * -> [0,1]: dly1 enable [1,2]:dl2 & dly1 enable ,else :no dly enable
1270 */
1271 EMMC50_DS_Z_DLY2_SEL,
1272
1273 /* DS Pad Z_DLY clk delay count, range: 0~31 */
1274 EMMC50_DS_ZDLY_DLY,
1275 TUNING_PARAM_COUNT,
1276
1277 /* Data line rising/falling latch fine tune selection
1278 * in read transaction.
1279 * 1'b0: All data line share one value
1280 * indicated by MSDC_IOCON.R_D_SMPL.
1281 * 1'b1: Each data line has its own selection value
1282 * indicated by Data line (x): MSDC_IOCON.R_D(x)_SMPL
1283 */
1284 READ_DATA_SMPL_SEL,
1285
1286 /* Data line rising/falling latch fine tune selection
1287 * in write transaction.
1288 * 1'b0: All data line share one value indicated
1289 * by MSDC_IOCON.W_D_SMPL.
1290 * 1'b1: Each data line has its own selection value indicated
1291 * by Data line (x): MSDC_IOCON.W_D(x)_SMPL
1292 */
1293 WRITE_DATA_SMPL_SEL,
1294
1295 /* Data line delay line fine tune selection.
1296 * 1'b0: All data line share one delay
1297 * selection value indicated by PAD_TUNE.PAD_DAT_RD_RXDLY.
1298 * 1'b1: Each data line has its own delay selection value indicated by
1299 * Data line (x): DAT_RD_DLY(x).DAT0_RD_DLY
1300 */
1301 DATA_DLYLINE_SEL,
1302
1303 /* [Data Tune]CMD & DATA Pin tune Data Selection[Data Tune Sel] */
1304 MSDC_DAT_TUNE_SEL,
1305
1306 /* [Async_FIFO Mode Sel For Write Path] */
1307 MSDC_WCRC_ASYNC_FIFO_SEL,
1308
1309 /* [Async_FIFO Mode Sel For CMD Path] */
1310 MSDC_RESP_ASYNC_FIFO_SEL,
1311
1312 /* Write Path Mux for emmc50 function & emmc45 function ,
1313 * Only emmc50 design valid,[1-eMMC50, 0-eMMC45]
1314 */
1315 EMMC50_WDATA_MUX_EN,
1316
1317 /* CMD Path Mux for emmc50 function & emmc45 function ,
1318 * Only emmc50 design valid,[1-eMMC50, 0-eMMC45]
1319 */
1320 EMMC50_CMD_MUX_EN,
1321
1322 /* write data crc status async fifo output edge select */
1323 EMMC50_WDATA_EDGE,
1324
1325 /* CKBUF in CKGEN Delay Selection. Total 32 stages */
1326 CKGEN_MSDC_DLY_SEL,
1327
1328 /* CMD response turn around period.
1329 * The turn around cycle = CMD_RSP_TA_CNTR + 2,
1330 * Only for USH104 mode, this register should be
1331 * set to 0 in non-UHS104 mode
1332 */
1333 CMD_RSP_TA_CNTR,
1334
1335 /* Write data and CRC status turn around period.
1336 * The turn around cycle = WRDAT_CRCS_TA_CNTR + 2,
1337 * Only for USH104 mode, this register should be
1338 * set to 0 in non-UHS104 mode
1339 */
1340 WRDAT_CRCS_TA_CNTR,
1341
1342 /* CLK Pad TX Delay Control.
1343 * This register is used to add delay to CLK phase.
1344 * Total 32 stages
1345 */
1346 PAD_CLK_TXDLY,
1347 TOTAL_PARAM_COUNT
1348};
1349
1350/*
1351 *********************************************************
1352 * Feature Control Defination *
1353 *********************************************************
1354 */
1355#define AUTOK_OFFLINE_TUNE_TX_ENABLE 1
1356#define AUTOK_OFFLINE_TUNE_ENABLE 0
1357#define HS400_OFFLINE_TUNE_ENABLE 0
1358#define HS200_OFFLINE_TUNE_ENABLE 0
1359#define HS400_DSCLK_NEED_TUNING 0
1360#define AUTOK_PARAM_DUMP_ENABLE 0
1361/* #define CHIP_DENALI_3_DAT_TUNE */
1362/* #define SDIO_TUNE_WRITE_PATH */
1363
1364enum TUNE_TYPE {
1365 TUNE_CMD = 0,
1366 TUNE_DATA,
1367 TUNE_LATCH_CK,
1368};
1369
1370#define autok_msdc_retry(expr, retry, cnt) \
1371 do { \
1372 int backup = cnt; \
1373 while (retry) { \
1374 if (!(expr)) \
1375 break; \
1376 if (cnt-- == 0) { \
1377 retry--; cnt = backup; \
1378 } \
1379 } \
1380 WARN_ON(retry == 0); \
1381} while (0)
1382
1383#define autok_msdc_reset() \
1384 do { \
1385 int retry = 3, cnt = 1000; \
1386 sdr_set_bits(base + MSDC_CFG, MSDC_CFG_RST); \
1387 /* ensure reset operation be sequential */ \
1388 mb(); \
1389 autok_msdc_retry(readl(base + MSDC_CFG) & \
1390 MSDC_CFG_RST, retry, cnt); \
1391 } while (0)
1392
1393#define msdc_rxfifocnt() \
1394 ((readl(base + MSDC_FIFOCS) & MSDC_FIFOCS_RXCNT) >> 0)
1395#define msdc_txfifocnt() \
1396 ((readl(base + MSDC_FIFOCS) & MSDC_FIFOCS_TXCNT) >> 16)
1397
1398#define wait_cond(cond, tmo, left) \
1399 do { \
1400 u32 t = tmo; \
1401 while (1) { \
1402 if ((cond) || (t == 0)) \
1403 break; \
1404 if (t > 0) { \
1405 ndelay(1); \
1406 t--; \
1407 } \
1408 } \
1409 left = t; \
1410 } while (0)
1411
1412
1413#define msdc_clear_fifo() \
1414 do { \
1415 int retry = 5, cnt = 1000; \
1416 sdr_set_bits(base + MSDC_FIFOCS, MSDC_FIFOCS_CLR); \
1417 /* ensure fifo clear operation be sequential */ \
1418 mb(); \
1419 autok_msdc_retry(readl(base + MSDC_FIFOCS) & MSDC_FIFOCS_CLR, \
1420 retry, cnt); \
1421 } while (0)
1422
1423struct AUTOK_PARAM_RANGE {
1424 unsigned int start;
1425 unsigned int end;
1426};
1427
1428struct AUTOK_PARAM_INFO {
1429 struct AUTOK_PARAM_RANGE range;
1430 char *param_name;
1431};
1432
1433struct BOUND_INFO {
1434 unsigned int Bound_Start;
1435 unsigned int Bound_End;
1436 unsigned int Bound_width;
1437 bool is_fullbound;
1438};
1439
1440#define BD_MAX_CNT 4 /* Max Allowed Boundary Number */
1441struct AUTOK_SCAN_RES {
1442 /* Bound info record, currently only allow max to 2 bounds exist,
1443 * but in extreme case, may have 4 bounds
1444 */
1445 struct BOUND_INFO bd_info[BD_MAX_CNT];
1446 /* Bound cnt record, must be in rang [0,3] */
1447 unsigned int bd_cnt;
1448 /* Full boundary cnt record */
1449 unsigned int fbd_cnt;
1450};
1451
1452struct AUTOK_REF_INFO {
1453 /* inf[0] - rising edge res, inf[1] - falling edge res */
1454 struct AUTOK_SCAN_RES scan_info[2];
1455 /* optimised sample edge select */
1456 unsigned int opt_edge_sel;
1457 /* optimised dly cnt sel */
1458 unsigned int opt_dly_cnt;
1459 /* 1clk cycle equal how many delay cell cnt, if cycle_cnt is 0,
1460 * that is cannot calc cycle_cnt by current Boundary info
1461 */
1462 unsigned int cycle_cnt;
1463};
1464
1465unsigned int do_autok_offline_tune_tx;
1466u8 sdio_autok_res[TUNING_PARAM_COUNT];
1467
1468static const struct AUTOK_PARAM_INFO autok_param_info[] = {
1469 {{0, 1}, "CMD_EDGE"},
1470 /* async fifo mode Pad dat edge must fix to 0 */
1471 {{0, 1}, "RDATA_EDGE"},
1472 {{0, 1}, "RD_FIFO_EDGE"},
1473 {{0, 1}, "WD_FIFO_EDGE"},
1474
1475 /* Cmd Pad Tune Data Phase */
1476 {{0, 31}, "CMD_RD_D_DLY1"},
1477 {{0, 1}, "CMD_RD_D_DLY1_SEL"},
1478 {{0, 31}, "CMD_RD_D_DLY2"},
1479 {{0, 1}, "CMD_RD_D_DLY2_SEL"},
1480
1481 /* Data Pad Tune Data Phase */
1482 {{0, 31}, "DAT_RD_D_DLY1"},
1483 {{0, 1}, "DAT_RD_D_DLY1_SEL"},
1484 {{0, 31}, "DAT_RD_D_DLY2"},
1485 {{0, 1}, "DAT_RD_D_DLY2_SEL"},
1486
1487 /* Latch CK Delay for data read when clock stop */
1488 {{0, 7}, "INT_DAT_LATCH_CK"},
1489
1490 /* eMMC50 Related tuning param */
1491 {{0, 31}, "EMMC50_DS_Z_DLY1"},
1492 {{0, 1}, "EMMC50_DS_Z_DLY1_SEL"},
1493 {{0, 31}, "EMMC50_DS_Z_DLY2"},
1494 {{0, 1}, "EMMC50_DS_Z_DLY2_SEL"},
1495 {{0, 31}, "EMMC50_DS_ZDLY_DLY"},
1496
1497 /* ================================================= */
1498 /* Timming Related Mux & Common Setting Config */
1499 /* all data line path share sample edge */
1500 {{0, 1}, "READ_DATA_SMPL_SEL"},
1501 {{0, 1}, "WRITE_DATA_SMPL_SEL"},
1502 /* clK tune all data Line share dly */
1503 {{0, 1}, "DATA_DLYLINE_SEL"},
1504 /* data tune mode select */
1505 {{0, 1}, "MSDC_WCRC_ASYNC_FIFO_SEL"},
1506 /* data tune mode select */
1507 {{0, 1}, "MSDC_RESP_ASYNC_FIFO_SEL"},
1508
1509 /* eMMC50 Function Mux */
1510 /* write path switch to emmc45 */
1511 {{0, 1}, "EMMC50_WDATA_MUX_EN"},
1512 /* response path switch to emmc45 */
1513 {{0, 1}, "EMMC50_CMD_MUX_EN"},
1514 {{0, 1}, "EMMC50_WDATA_EDGE"},
1515 /* Common Setting Config */
1516 {{0, 31}, "CKGEN_MSDC_DLY_SEL"},
1517 {{1, 7}, "CMD_RSP_TA_CNTR"},
1518 {{1, 7}, "WRDAT_CRCS_TA_CNTR"},
1519 /* tx clk dly fix to 0 for HQA res */
1520 {{0, 31}, "PAD_CLK_TXDLY"},
1521};
1522
1523static int autok_send_tune_cmd(struct msdc_host *host, unsigned int opcode,
1524 enum TUNE_TYPE tune_type_value)
1525{
1526 void __iomem *base = host->base;
1527 unsigned int value;
1528 unsigned int rawcmd = 0;
1529 unsigned int arg = 0;
1530 unsigned int sts = 0;
1531 unsigned int wints = 0;
1532 unsigned int tmo = 0;
1533 unsigned int left = 0;
1534 unsigned int fifo_have = 0;
1535 unsigned int fifo_1k_cnt = 0;
1536 unsigned int i = 0;
1537 int ret = E_RESULT_PASS;
1538
1539 switch (opcode) {
1540 case MMC_SEND_EXT_CSD:
1541 rawcmd = (512 << 16) | (0 << 13) | (1 << 11) | (1 << 7) | (8);
1542 arg = 0;
1543 if (tune_type_value == TUNE_LATCH_CK)
1544 writel(host->tune_latch_ck_cnt, base + SDC_BLK_NUM);
1545 else
1546 writel(1, base + SDC_BLK_NUM);
1547 break;
1548 case MMC_STOP_TRANSMISSION:
1549 rawcmd = (1 << 14) | (7 << 7) | (12);
1550 arg = 0;
1551 break;
1552 case MMC_SEND_STATUS:
1553 rawcmd = (1 << 7) | (13);
1554 arg = (1 << 16);
1555 break;
1556 case MMC_READ_SINGLE_BLOCK:
1557 left = 512;
1558 rawcmd = (512 << 16) | (0 << 13) | (1 << 11) | (1 << 7) | (17);
1559 arg = 0;
1560 if (tune_type_value == TUNE_LATCH_CK)
1561 writel(host->tune_latch_ck_cnt, base + SDC_BLK_NUM);
1562 else
1563 writel(1, base + SDC_BLK_NUM);
1564 break;
1565 case MMC_SEND_TUNING_BLOCK:
1566 left = 64;
1567 rawcmd = (64 << 16) | (0 << 13) | (1 << 11) | (1 << 7) | (19);
1568 arg = 0;
1569 if (tune_type_value == TUNE_LATCH_CK)
1570 writel(host->tune_latch_ck_cnt, base + SDC_BLK_NUM);
1571 else
1572 writel(1, base + SDC_BLK_NUM);
1573 break;
1574 case MMC_SEND_TUNING_BLOCK_HS200:
1575 left = 128;
1576 rawcmd = (128 << 16) | (0 << 13) | (1 << 11) | (1 << 7) | (21);
1577 arg = 0;
1578 if (tune_type_value == TUNE_LATCH_CK)
1579 writel(host->tune_latch_ck_cnt, base + SDC_BLK_NUM);
1580 else
1581 writel(1, base + SDC_BLK_NUM);
1582 break;
1583 case MMC_WRITE_BLOCK:
1584 rawcmd = (512 << 16) | (1 << 13) | (1 << 11) | (1 << 7) | (24);
1585 if (TUNE_DATA_TX_ADDR >= 0)
1586 arg = TUNE_DATA_TX_ADDR;
1587 else
1588 arg = host->mmc->card->ext_csd.sectors
1589 + TUNE_DATA_TX_ADDR;
1590 break;
1591 case SD_IO_RW_DIRECT:
1592 break;
1593 case SD_IO_RW_EXTENDED:
1594 break;
1595 }
1596
1597 while ((readl(base + SDC_STS) & SDC_STS_SDCBUSY))
1598 ;
1599
1600 /* clear fifo */
1601 if ((tune_type_value == TUNE_CMD) || (tune_type_value == TUNE_DATA)) {
1602 autok_msdc_reset();
1603 msdc_clear_fifo();
1604 writel(0xffffffff, base + MSDC_INT);
1605 }
1606
1607 /* start command */
1608 writel(arg, base + SDC_ARG);
1609 writel(rawcmd, base + SDC_CMD);
1610
1611 /* wait interrupt status */
1612 wints = MSDC_INT_CMDTMO | MSDC_INT_CMDRDY | MSDC_INT_RSPCRCERR;
1613 tmo = 0x3FFFFF;
1614 wait_cond(((sts = readl(base + MSDC_INT)) & wints), tmo, tmo);
1615 if (tmo == 0) {
1616 ret = E_RESULT_CMD_TMO;
1617 goto end;
1618 }
1619
1620 writel((sts & wints), base + MSDC_INT);
1621 if (sts == 0) {
1622 ret = E_RESULT_CMD_TMO;
1623 goto end;
1624 }
1625
1626 if (sts & MSDC_INT_CMDRDY) {
1627 if (tune_type_value == TUNE_CMD) {
1628 ret = E_RESULT_PASS;
1629 goto end;
1630 }
1631 } else if (sts & MSDC_INT_RSPCRCERR) {
1632 ret = E_RESULT_RSP_CRC;
1633 goto end;
1634 } else if (sts & MSDC_INT_CMDTMO) {
1635 ret = E_RESULT_CMD_TMO;
1636 goto end;
1637 }
1638 if ((tune_type_value != TUNE_LATCH_CK) &&
1639 (tune_type_value != TUNE_DATA))
1640 goto skip_tune_latch_ck_and_tune_data;
1641
1642 while ((readl(base + SDC_STS) & SDC_STS_SDCBUSY)) {
1643 if (tune_type_value == TUNE_LATCH_CK) {
1644 fifo_have = msdc_rxfifocnt();
1645 if ((opcode == MMC_SEND_TUNING_BLOCK_HS200) ||
1646 (opcode == MMC_READ_SINGLE_BLOCK) ||
1647 (opcode == MMC_SEND_EXT_CSD) ||
1648 (opcode == MMC_SEND_TUNING_BLOCK)) {
1649 sdr_set_field(base + MSDC_DBG_SEL,
1650 0xffff << 0, 0x0b);
1651 sdr_get_field(base + MSDC_DBG_OUT,
1652 0x7ff << 0, &fifo_1k_cnt);
1653 if ((fifo_1k_cnt >= MSDC_FIFO_THD_1K) &&
1654 (fifo_have >= MSDC_FIFO_SZ)) {
1655 value = readl(base + MSDC_RXDATA);
1656 value = readl(base + MSDC_RXDATA);
1657 value = readl(base + MSDC_RXDATA);
1658 value = readl(base + MSDC_RXDATA);
1659 }
1660 }
1661 } else if ((tune_type_value == TUNE_DATA) &&
1662 (opcode == MMC_WRITE_BLOCK)) {
1663 for (i = 0; i < 64; i++) {
1664 writel(0x5af00fa5, base + MSDC_TXDATA);
1665 writel(0x33cc33cc, base + MSDC_TXDATA);
1666 }
1667
1668 while ((readl(base + SDC_STS) & SDC_STS_SDCBUSY))
1669 ;
1670 }
1671 }
1672
1673 sts = readl(base + MSDC_INT);
1674 wints = MSDC_INT_XFER_COMPL | MSDC_INT_DATCRCERR | MSDC_INT_DATTMO;
1675 if (sts) {
1676 /* clear status */
1677 writel((sts & wints), base + MSDC_INT);
1678 if (sts & MSDC_INT_XFER_COMPL)
1679 ret = E_RESULT_PASS;
1680 if (MSDC_INT_DATCRCERR & sts)
1681 ret = E_RESULT_DAT_CRC;
1682 if (MSDC_INT_DATTMO & sts)
1683 ret = E_RESULT_DAT_TMO;
1684 }
1685
1686skip_tune_latch_ck_and_tune_data:
1687 while ((readl(base + SDC_STS) & SDC_STS_SDCBUSY))
1688 ;
1689 if ((tune_type_value == TUNE_CMD) || (tune_type_value == TUNE_DATA))
1690 msdc_clear_fifo();
1691
1692end:
1693 if (opcode == MMC_STOP_TRANSMISSION) {
1694 while ((readl(base + MSDC_PS) & 0x10000) != 0x10000)
1695 ;
1696 }
1697
1698 return ret;
1699}
1700
1701static int autok_simple_score64(char *res_str64, u64 result64)
1702{
1703 unsigned int bit = 0;
1704 unsigned int num = 0;
1705 unsigned int old = 0;
1706
1707 if (result64 == 0) {
1708 /* maybe result is 0 */
1709 strcpy(res_str64,
1710 "OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO");
1711 return 64;
1712 }
1713 if (result64 == 0xFFFFFFFFFFFFFFFF) {
1714 strcpy(res_str64,
1715 "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX");
1716 return 0;
1717 }
1718
1719 /* calc continue zero number */
1720 while (bit < 64) {
1721 if (result64 & ((u64) (1LL << bit))) {
1722 res_str64[bit] = 'X';
1723 bit++;
1724 if (old < num)
1725 old = num;
1726 num = 0;
1727 continue;
1728 }
1729 res_str64[bit] = 'O';
1730 bit++;
1731 num++;
1732 }
1733 if (num > old)
1734 old = num;
1735
1736 return old;
1737}
1738
1739enum {
1740 RD_SCAN_NONE,
1741 RD_SCAN_PAD_BOUND_S,
1742 RD_SCAN_PAD_BOUND_E,
1743 RD_SCAN_PAD_MARGIN,
1744};
1745
1746static int autok_check_scan_res64(u64 rawdat, struct AUTOK_SCAN_RES *scan_res)
1747{
1748 unsigned int bit;
1749 unsigned int filter = 4;
1750 struct BOUND_INFO *pBD = (struct BOUND_INFO *)scan_res->bd_info;
1751 unsigned int RawScanSta = RD_SCAN_NONE;
1752
1753 for (bit = 0; bit < 64; bit++) {
1754 if (rawdat & (1LL << bit)) {
1755 switch (RawScanSta) {
1756 case RD_SCAN_NONE:
1757 RawScanSta = RD_SCAN_PAD_BOUND_S;
1758 pBD->Bound_Start = 0;
1759 pBD->Bound_width = 1;
1760 scan_res->bd_cnt += 1;
1761 break;
1762 case RD_SCAN_PAD_MARGIN:
1763 RawScanSta = RD_SCAN_PAD_BOUND_S;
1764 pBD->Bound_Start = bit;
1765 pBD->Bound_width = 1;
1766 scan_res->bd_cnt += 1;
1767 break;
1768 case RD_SCAN_PAD_BOUND_E:
1769 if ((filter) && ((bit - pBD->Bound_End) <=
1770 AUTOK_TUNING_INACCURACY)) {
1771 ATK_DBG(ATK_TRACE,
1772 "[AUTOK]WARN: Try to filter the holes\n");
1773 RawScanSta = RD_SCAN_PAD_BOUND_S;
1774
1775 pBD->Bound_width += (bit -
1776 pBD->Bound_End);
1777 pBD->Bound_End = 0;
1778 filter--;
1779
1780 /* update full bound info */
1781 if (pBD->is_fullbound) {
1782 pBD->is_fullbound = 0;
1783 scan_res->fbd_cnt -= 1;
1784 }
1785 } else {
1786 /* No filter Check and Get the next
1787 * boundary information
1788 */
1789 RawScanSta = RD_SCAN_PAD_BOUND_S;
1790 pBD++;
1791 pBD->Bound_Start = bit;
1792 pBD->Bound_width = 1;
1793 scan_res->bd_cnt += 1;
1794 if (scan_res->bd_cnt > BD_MAX_CNT) {
1795 ATK_ERR(
1796 "[AUTOK]Error: more than %d Boundary Exist\n",
1797 BD_MAX_CNT);
1798 return -1;
1799 }
1800 }
1801 break;
1802 case RD_SCAN_PAD_BOUND_S:
1803 pBD->Bound_width++;
1804 break;
1805 default:
1806 break;
1807 }
1808 } else {
1809 switch (RawScanSta) {
1810 case RD_SCAN_NONE:
1811 RawScanSta = RD_SCAN_PAD_MARGIN;
1812 break;
1813 case RD_SCAN_PAD_BOUND_S:
1814 RawScanSta = RD_SCAN_PAD_BOUND_E;
1815 pBD->Bound_End = bit - 1;
1816 /* update full bound info */
1817 if (pBD->Bound_Start > 0) {
1818 pBD->is_fullbound = 1;
1819 scan_res->fbd_cnt += 1;
1820 }
1821 break;
1822 case RD_SCAN_PAD_MARGIN:
1823 case RD_SCAN_PAD_BOUND_E:
1824 default:
1825 break;
1826 }
1827 }
1828 }
1829 if ((pBD->Bound_End == 0) && (pBD->Bound_width != 0))
1830 pBD->Bound_End = pBD->Bound_Start + pBD->Bound_width - 1;
1831
1832 return 0;
1833}
1834
1835static int autok_pad_dly_corner_check(struct AUTOK_REF_INFO *pInfo)
1836{
1837 /* scan result @ rising edge */
1838 struct AUTOK_SCAN_RES *pBdInfo_R = NULL;
1839 /* scan result @ falling edge */
1840 struct AUTOK_SCAN_RES *pBdInfo_F = NULL;
1841 struct AUTOK_SCAN_RES *p_Temp[2] = {NULL,};
1842 unsigned int i, j, k, l;
1843 unsigned int pass_bd_size[BD_MAX_CNT + 1];
1844 unsigned int max_pass = 0;
1845 unsigned int max_size = 0;
1846 unsigned int bd_max_size = 0;
1847 unsigned int bd_overlap = 0;
1848 unsigned int corner_case_flag = 0;
1849
1850 pBdInfo_R = &(pInfo->scan_info[0]);
1851 pBdInfo_F = &(pInfo->scan_info[1]);
1852 /*
1853 * for corner case
1854 * oooooooooooooooooo rising has no fail bound
1855 * oooooooooooooooooo falling has no fail bound
1856 */
1857 if ((pBdInfo_R->bd_cnt == 0) && (pBdInfo_F->bd_cnt == 0)) {
1858 ATK_ERR("[ATUOK]Warn:can't find bd both edge\r\n");
1859 pInfo->opt_dly_cnt = 31;
1860 pInfo->opt_edge_sel = 0;
1861 return AUTOK_RECOVERABLE_ERROR;
1862 }
1863 /*
1864 * for corner case
1865 * xxxxxxxxxxxxxxxxxxxx rising only has one boundary,but all fail
1866 * oooooooooxxooooooo falling has normal boundary
1867 * or
1868 * ooooooooooooxooooo rising has normal boundary
1869 * xxxxxxxxxxxxxxxxxxxx falling only has one boundary,but all fail
1870 */
1871 if ((pBdInfo_R->bd_cnt == 1) && (pBdInfo_F->bd_cnt == 1)
1872 && (pBdInfo_R->bd_info[0].Bound_Start == 0)
1873 && (pBdInfo_R->bd_info[0].Bound_End == 63)
1874 && (pBdInfo_F->bd_info[0].Bound_Start == 0)
1875 && (pBdInfo_F->bd_info[0].Bound_End == 63)) {
1876 ATK_ERR("[ATUOK]Err:can't find window both edge\r\n");
1877 return AUTOK_NONE_RECOVERABLE_ERROR;
1878 }
1879 for (j = 0; j < sizeof(p_Temp); j++) {
1880 if (j == 0) {
1881 p_Temp[0] = pBdInfo_R;
1882 p_Temp[1] = pBdInfo_F;
1883 } else {
1884 p_Temp[0] = pBdInfo_F;
1885 p_Temp[1] = pBdInfo_R;
1886 }
1887 /* check boundary overlap */
1888 for (k = 0; k < p_Temp[0]->bd_cnt; k++) {
1889 for (l = 0; l < p_Temp[1]->bd_cnt; l++)
1890 if (((p_Temp[0]->bd_info[k].Bound_Start
1891 >= p_Temp[1]->bd_info[l].Bound_Start)
1892 && (p_Temp[0]->bd_info[k].Bound_Start
1893 <= p_Temp[1]->bd_info[l].Bound_End))
1894 || ((p_Temp[0]->bd_info[k].Bound_End
1895 <= p_Temp[1]->bd_info[l].Bound_End)
1896 && (p_Temp[0]->bd_info[k].Bound_End
1897 >= p_Temp[1]->bd_info[l].Bound_Start))
1898 || ((p_Temp[1]->bd_info[l].Bound_Start
1899 >= p_Temp[0]->bd_info[k].Bound_Start)
1900 && (p_Temp[1]->bd_info[l].Bound_Start
1901 <= p_Temp[0]->bd_info[k].Bound_End)))
1902 bd_overlap = 1;
1903 }
1904 /*check max boundary size */
1905 for (k = 0; k < p_Temp[0]->bd_cnt; k++) {
1906 if ((p_Temp[0]->bd_info[k].Bound_End
1907 - p_Temp[0]->bd_info[k].Bound_Start)
1908 >= 20)
1909 bd_max_size = 1;
1910 }
1911 if (((bd_overlap == 1)
1912 && (bd_max_size == 1))
1913 || ((p_Temp[1]->bd_cnt == 0)
1914 && (bd_max_size == 1))) {
1915 corner_case_flag = 1;
1916 }
1917 if (((p_Temp[0]->bd_cnt == 1)
1918 && (p_Temp[0]->bd_info[0].Bound_Start == 0)
1919 && (p_Temp[0]->bd_info[0].Bound_End == 63))
1920 || (corner_case_flag == 1)) {
1921 if (j == 0)
1922 pInfo->opt_edge_sel = 1;
1923 else
1924 pInfo->opt_edge_sel = 0;
1925 /* 1T calc fail,need check max pass bd,select mid */
1926 switch (p_Temp[1]->bd_cnt) {
1927 case 4:
1928 pass_bd_size[0] =
1929 p_Temp[1]->bd_info[0].Bound_Start - 0;
1930 pass_bd_size[1] =
1931 p_Temp[1]->bd_info[1].Bound_Start
1932 - p_Temp[1]->bd_info[0].Bound_End;
1933 pass_bd_size[2] =
1934 p_Temp[1]->bd_info[2].Bound_Start
1935 - p_Temp[1]->bd_info[1].Bound_End;
1936 pass_bd_size[3] =
1937 p_Temp[1]->bd_info[3].Bound_Start
1938 - p_Temp[1]->bd_info[2].Bound_End;
1939 pass_bd_size[4] =
1940 63 - p_Temp[1]->bd_info[3].Bound_End;
1941 max_size = pass_bd_size[0];
1942 max_pass = 0;
1943 for (i = 0; i < 5; i++) {
1944 if (pass_bd_size[i] >= max_size) {
1945 max_size = pass_bd_size[i];
1946 max_pass = i;
1947 }
1948 }
1949 if (max_pass == 0)
1950 pInfo->opt_dly_cnt =
1951 p_Temp[1]->bd_info[0].Bound_Start
1952 / 2;
1953 else if (max_pass == 4)
1954 pInfo->opt_dly_cnt =
1955 (63 +
1956 p_Temp[1]->bd_info[3].Bound_End)
1957 / 2;
1958 else {
1959 pInfo->opt_dly_cnt =
1960 (p_Temp[1]->bd_info[max_pass].Bound_Start
1961 +
1962 p_Temp[1]->bd_info[max_pass - 1].Bound_End)
1963 / 2;
1964 }
1965 break;
1966 case 3:
1967 pass_bd_size[0] =
1968 p_Temp[1]->bd_info[0].Bound_Start - 0;
1969 pass_bd_size[1] =
1970 p_Temp[1]->bd_info[1].Bound_Start
1971 - p_Temp[1]->bd_info[0].Bound_End;
1972 pass_bd_size[2] =
1973 p_Temp[1]->bd_info[2].Bound_Start
1974 - p_Temp[1]->bd_info[1].Bound_End;
1975 pass_bd_size[3] =
1976 63 - p_Temp[1]->bd_info[2].Bound_End;
1977 max_size = pass_bd_size[0];
1978 max_pass = 0;
1979 for (i = 0; i < 4; i++) {
1980 if (pass_bd_size[i] >= max_size) {
1981 max_size = pass_bd_size[i];
1982 max_pass = i;
1983 }
1984 }
1985 if (max_pass == 0)
1986 pInfo->opt_dly_cnt =
1987 p_Temp[1]->bd_info[0].Bound_Start / 2;
1988 else if (max_pass == 3)
1989 pInfo->opt_dly_cnt =
1990 (63 + p_Temp[1]->bd_info[2].Bound_End) / 2;
1991 else {
1992 pInfo->opt_dly_cnt =
1993 (p_Temp[1]->bd_info[max_pass].Bound_Start
1994 +
1995 p_Temp[1]->bd_info[max_pass - 1].Bound_End)
1996 / 2;
1997 }
1998 break;
1999 case 2:
2000 pass_bd_size[0] =
2001 p_Temp[1]->bd_info[0].Bound_Start - 0;
2002 pass_bd_size[1] =
2003 p_Temp[1]->bd_info[1].Bound_Start
2004 - p_Temp[1]->bd_info[0].Bound_End;
2005 pass_bd_size[2] =
2006 63 - p_Temp[1]->bd_info[1].Bound_End;
2007 max_size = pass_bd_size[0];
2008 max_pass = 0;
2009 for (i = 0; i < 3; i++) {
2010 if (pass_bd_size[i] >= max_size) {
2011 max_size = pass_bd_size[i];
2012 max_pass = i;
2013 }
2014 }
2015 if (max_pass == 0)
2016 pInfo->opt_dly_cnt =
2017 p_Temp[1]->bd_info[0].Bound_Start / 2;
2018 else if (max_pass == 2)
2019 pInfo->opt_dly_cnt =
2020 (63 + p_Temp[1]->bd_info[1].Bound_End) / 2;
2021 else {
2022 pInfo->opt_dly_cnt =
2023 (p_Temp[1]->bd_info[max_pass].Bound_Start
2024 +
2025 p_Temp[1]->bd_info[max_pass - 1].Bound_End)
2026 / 2;
2027 }
2028 break;
2029 case 1:
2030 pass_bd_size[0] =
2031 p_Temp[1]->bd_info[0].Bound_Start - 0;
2032 pass_bd_size[1] =
2033 63 -
2034 p_Temp[1]->bd_info[0].Bound_End;
2035 max_size = pass_bd_size[0];
2036 max_pass = 0;
2037 for (i = 0; i < 2; i++) {
2038 if (pass_bd_size[i] >= max_size) {
2039 max_size = pass_bd_size[i];
2040 max_pass = i;
2041 }
2042 }
2043 if (max_pass == 0)
2044 pInfo->opt_dly_cnt =
2045 p_Temp[1]->bd_info[0].Bound_Start
2046 / 2;
2047 else if (max_pass == 1)
2048 pInfo->opt_dly_cnt =
2049 (63 +
2050 p_Temp[1]->bd_info[0].Bound_End)
2051 / 2;
2052 break;
2053 case 0:
2054 pInfo->opt_dly_cnt = 31;
2055 break;
2056 default:
2057 break;
2058 }
2059 return AUTOK_RECOVERABLE_ERROR;
2060 }
2061 }
2062 return 0;
2063}
2064
2065static int autok_pad_dly_sel(struct AUTOK_REF_INFO *pInfo)
2066{
2067 /* scan result @ rising edge */
2068 struct AUTOK_SCAN_RES *pBdInfo_R = NULL;
2069 /* scan result @ falling edge */
2070 struct AUTOK_SCAN_RES *pBdInfo_F = NULL;
2071 /* Save the first boundary info for calc optimised dly count */
2072 struct BOUND_INFO *pBdPrev = NULL;
2073 /* Save the second boundary info for calc optimised dly count */
2074 struct BOUND_INFO *pBdNext = NULL;
2075 struct BOUND_INFO *pBdTmp = NULL;
2076 /* Full Boundary count */
2077 unsigned int FBound_Cnt_R = 0;
2078 unsigned int Bound_Cnt_R = 0;
2079 unsigned int Bound_Cnt_F = 0;
2080 unsigned int cycle_cnt = 64;
2081 int uBD_mid_prev = 0;
2082 int uBD_mid_next = 0;
2083 int uBD_width = 3;
2084 int uDlySel_F = 0;
2085 int uDlySel_R = 0;
2086 /* for falling edge margin compress */
2087 int uMgLost_F = 0;
2088 /* for rising edge margin compress */
2089 int uMgLost_R = 0;
2090 unsigned int i;
2091 unsigned int ret = 0;
2092 int corner_res = 0;
2093
2094 pBdInfo_R = &(pInfo->scan_info[0]);
2095 pBdInfo_F = &(pInfo->scan_info[1]);
2096 FBound_Cnt_R = pBdInfo_R->fbd_cnt;
2097 Bound_Cnt_R = pBdInfo_R->bd_cnt;
2098 Bound_Cnt_F = pBdInfo_F->bd_cnt;
2099
2100 corner_res = autok_pad_dly_corner_check(pInfo);
2101 if (corner_res == -1)
2102 return 0;
2103 else if (corner_res == -2)
2104 return -2;
2105
2106 switch (FBound_Cnt_R) {
2107 case 4: /* SSSS Corner may cover 2~3T */
2108 case 3:
2109 ATK_ERR("[AUTOK]Warning: Too Many Full boundary count:%d\r\n",
2110 FBound_Cnt_R);
2111 case 2: /* mode_1 : 2 full boudary */
2112 for (i = 0; i < BD_MAX_CNT; i++) {
2113 if (pBdInfo_R->bd_info[i].is_fullbound) {
2114 if (pBdPrev == NULL) {
2115 pBdPrev = &(pBdInfo_R->bd_info[i]);
2116 } else {
2117 pBdNext = &(pBdInfo_R->bd_info[i]);
2118 break;
2119 }
2120 }
2121 }
2122
2123 if (pBdPrev && pBdNext) {
2124 uBD_mid_prev = (pBdPrev->Bound_Start +
2125 pBdPrev->Bound_End) / 2;
2126 uBD_mid_next = (pBdNext->Bound_Start +
2127 pBdNext->Bound_End) / 2;
2128 /* while in 2 full bound case, bd_width calc */
2129 uBD_width = (pBdPrev->Bound_width +
2130 pBdNext->Bound_width) / 2;
2131 cycle_cnt = uBD_mid_next - uBD_mid_prev;
2132 /* delay count sel at rising edge */
2133 if (uBD_mid_prev >= cycle_cnt / 2) {
2134 uDlySel_R = uBD_mid_prev - cycle_cnt / 2;
2135 uMgLost_R = 0;
2136 } else if ((cycle_cnt / 2 - uBD_mid_prev) >
2137 AUTOK_MARGIN_THOLD) {
2138 uDlySel_R = uBD_mid_prev + cycle_cnt / 2;
2139 uMgLost_R = 0;
2140 } else {
2141 uDlySel_R = 0;
2142 uMgLost_R = cycle_cnt / 2 - uBD_mid_prev;
2143 }
2144 /* delay count sel at falling edge */
2145 pBdTmp = &(pBdInfo_R->bd_info[0]);
2146 if (pBdTmp->is_fullbound) {
2147 /* ooooxxxooooooxxxooo */
2148 uDlySel_F = uBD_mid_prev;
2149 uMgLost_F = 0;
2150 } else {
2151 /* xooooooxxxoooooooxxxoo */
2152 if (pBdTmp->Bound_End > uBD_width / 2) {
2153 uDlySel_F = (pBdTmp->Bound_End) -
2154 (uBD_width / 2);
2155 uMgLost_F = 0;
2156 } else {
2157 uDlySel_F = 0;
2158 uMgLost_F = (uBD_width / 2) -
2159 (pBdTmp->Bound_End);
2160 }
2161 }
2162 } else {
2163 /* error can not find 2 foull boary */
2164 ATK_ERR("[AUTOK] can not find 2 full boudary @Mode1\n");
2165 return -1;
2166 }
2167 break;
2168
2169 case 1: /* rising edge find one full boundary */
2170 if (Bound_Cnt_R > 1) {
2171 /* mode_2: 1 full boundary and boundary count > 1 */
2172 pBdPrev = &(pBdInfo_R->bd_info[0]);
2173 pBdNext = &(pBdInfo_R->bd_info[1]);
2174
2175 if (pBdPrev->is_fullbound)
2176 uBD_width = pBdPrev->Bound_width;
2177 else
2178 uBD_width = pBdNext->Bound_width;
2179
2180 if ((pBdPrev->is_fullbound) ||
2181 (pBdNext->is_fullbound)) {
2182 if (pBdPrev->Bound_Start > 0)
2183 cycle_cnt = pBdNext->Bound_Start -
2184 pBdPrev->Bound_Start;
2185 else
2186 cycle_cnt = pBdNext->Bound_End -
2187 pBdPrev->Bound_End;
2188
2189 /* delay count sel@rising & falling edge */
2190 if (pBdPrev->is_fullbound) {
2191 uBD_mid_prev = (pBdPrev->Bound_Start +
2192 pBdPrev->Bound_End) / 2;
2193 uDlySel_F = uBD_mid_prev;
2194 uMgLost_F = 0;
2195 if (uBD_mid_prev >= cycle_cnt / 2) {
2196 uDlySel_R = uBD_mid_prev -
2197 cycle_cnt / 2;
2198 uMgLost_R = 0;
2199 } else if ((cycle_cnt / 2 -
2200 uBD_mid_prev) >
2201 AUTOK_MARGIN_THOLD) {
2202 uDlySel_R = uBD_mid_prev +
2203 cycle_cnt / 2;
2204 uMgLost_R = 0;
2205 } else {
2206 uDlySel_R = 0;
2207 uMgLost_R = cycle_cnt / 2 -
2208 uBD_mid_prev;
2209 }
2210 } else {
2211 /* first boundary not full boudary */
2212 uBD_mid_next = (pBdNext->Bound_Start +
2213 pBdNext->Bound_End) / 2;
2214 uDlySel_R = uBD_mid_next -
2215 cycle_cnt / 2;
2216 uMgLost_R = 0;
2217 if (pBdPrev->Bound_End >
2218 uBD_width / 2) {
2219 uDlySel_F = pBdPrev->Bound_End -
2220 (uBD_width / 2);
2221 uMgLost_F = 0;
2222 } else {
2223 uDlySel_F = 0;
2224 uMgLost_F = (uBD_width / 2) -
2225 (pBdPrev->Bound_End);
2226 }
2227 }
2228 } else {
2229 /* full bound must in first 2 boundary */
2230 return -1;
2231 }
2232 } else if (Bound_Cnt_F > 0) {
2233 /* mode_3: 1 full boundary and only
2234 * one boundary exist @rising edge
2235 */
2236 /* this boundary is full bound */
2237 pBdPrev = &(pBdInfo_R->bd_info[0]);
2238 pBdNext = &(pBdInfo_F->bd_info[0]);
2239 uBD_mid_prev = (pBdPrev->Bound_Start +
2240 pBdPrev->Bound_End) / 2;
2241 uBD_width = pBdPrev->Bound_width;
2242
2243 if (pBdNext->Bound_Start == 0) {
2244 cycle_cnt = (pBdPrev->Bound_End -
2245 pBdNext->Bound_End) * 2;
2246 } else if (pBdNext->Bound_End == 63) {
2247 cycle_cnt = (pBdNext->Bound_Start -
2248 pBdPrev->Bound_Start) * 2;
2249 } else {
2250 uBD_mid_next = (pBdNext->Bound_Start +
2251 pBdNext->Bound_End) / 2;
2252
2253 if (uBD_mid_next > uBD_mid_prev)
2254 cycle_cnt = (uBD_mid_next -
2255 uBD_mid_prev) * 2;
2256 else
2257 cycle_cnt = (uBD_mid_prev -
2258 uBD_mid_next) * 2;
2259 }
2260
2261 uDlySel_F = uBD_mid_prev;
2262 uMgLost_F = 0;
2263
2264 if (uBD_mid_prev >= cycle_cnt / 2) {
2265 /* case 1 */
2266 uDlySel_R = uBD_mid_prev - cycle_cnt / 2;
2267 uMgLost_R = 0;
2268 } else if (cycle_cnt / 2 - uBD_mid_prev <=
2269 AUTOK_MARGIN_THOLD) {
2270 /* case 2 */
2271 uDlySel_R = 0;
2272 uMgLost_R = cycle_cnt / 2 - uBD_mid_prev;
2273 } else if (cycle_cnt / 2 + uBD_mid_prev <= 63) {
2274 /* case 3 */
2275 uDlySel_R = cycle_cnt / 2 + uBD_mid_prev;
2276 uMgLost_R = 0;
2277 } else if (32 - uBD_mid_prev <= AUTOK_MARGIN_THOLD) {
2278 /* case 4 */
2279 uDlySel_R = 0;
2280 uMgLost_R = cycle_cnt / 2 - uBD_mid_prev;
2281 } else { /* case 5 */
2282 uDlySel_R = 63;
2283 uMgLost_R = uBD_mid_prev + cycle_cnt / 2 - 63;
2284 }
2285 } else {
2286 /* mode_4: falling edge no boundary found & rising
2287 * edge only one full boundary exist
2288 */
2289 /* this boundary is full bound */
2290 pBdPrev = &(pBdInfo_R->bd_info[0]);
2291 uBD_mid_prev = (pBdPrev->Bound_Start +
2292 pBdPrev->Bound_End) / 2;
2293 uBD_width = pBdPrev->Bound_width;
2294
2295 if (pBdPrev->Bound_End > (64 - pBdPrev->Bound_Start))
2296 cycle_cnt = 2 * (pBdPrev->Bound_End + 1);
2297 else
2298 cycle_cnt = 2 * (64 - pBdPrev->Bound_Start);
2299
2300 uDlySel_R = (uBD_mid_prev >= 32) ? 0 : 63;
2301 /* Margin enough donot care margin lost */
2302 uMgLost_R = 0xFF;
2303 uDlySel_F = uBD_mid_prev;
2304 /* Margin enough donot care margin lost */
2305 uMgLost_F = 0xFF;
2306
2307 ATK_ERR("[AUTOK]Warning: 1T > %d\n", cycle_cnt);
2308 }
2309 break;
2310
2311 case 0: /* rising edge cannot find full boudary */
2312 if (Bound_Cnt_R == 2) {
2313 pBdPrev = &(pBdInfo_R->bd_info[0]);
2314 /* this boundary is full bound */
2315 pBdNext = &(pBdInfo_F->bd_info[0]);
2316
2317 if (pBdNext->is_fullbound) {
2318 /* mode_5: rising_edge 2 boundary
2319 * (not full bound), falling edge
2320 * one full boundary
2321 */
2322 uBD_width = pBdNext->Bound_width;
2323 cycle_cnt = 2 * (pBdNext->Bound_End -
2324 pBdPrev->Bound_End);
2325 uBD_mid_next = (pBdNext->Bound_Start +
2326 pBdNext->Bound_End) / 2;
2327 uDlySel_R = uBD_mid_next;
2328 uMgLost_R = 0;
2329 if (pBdPrev->Bound_End >= uBD_width / 2) {
2330 uDlySel_F = pBdPrev->Bound_End -
2331 uBD_width / 2;
2332 uMgLost_F = 0;
2333 } else {
2334 uDlySel_F = 0;
2335 uMgLost_F = uBD_width / 2 -
2336 pBdPrev->Bound_End;
2337 }
2338 } else {
2339 /* for falling edge there must be one full
2340 * boundary between two bounary_mid at rising
2341 */
2342 return -1;
2343 }
2344 } else if (Bound_Cnt_R == 1) {
2345 if (Bound_Cnt_F > 1) {
2346 /* when rising_edge have only one boundary
2347 * (not full bound), falling edge should not
2348 * more than 1Bound exist
2349 */
2350 return -1;
2351 } else if (Bound_Cnt_F == 1) {
2352 /* mode_6: rising edge only 1 boundary
2353 * (not full Bound)
2354 * & falling edge have only 1 bound too
2355 */
2356 pBdPrev = &(pBdInfo_R->bd_info[0]);
2357 pBdNext = &(pBdInfo_F->bd_info[0]);
2358 if (pBdNext->is_fullbound) {
2359 uBD_width = pBdNext->Bound_width;
2360 } else {
2361 if (pBdNext->Bound_width >
2362 pBdPrev->Bound_width)
2363 uBD_width = pBdNext->Bound_width
2364 + 1;
2365 else
2366 uBD_width = pBdPrev->Bound_width
2367 + 1;
2368
2369 if (uBD_width < AUTOK_BD_WIDTH_REF)
2370 uBD_width = AUTOK_BD_WIDTH_REF;
2371 } /* Boundary width calc done */
2372
2373 if (pBdPrev->Bound_Start == 0) {
2374 /* Current Desing Not Allowed */
2375 if (pBdNext->Bound_Start == 0)
2376 return -1;
2377
2378 cycle_cnt = (pBdNext->Bound_Start -
2379 pBdPrev->Bound_End +
2380 uBD_width) * 2;
2381 } else if (pBdPrev->Bound_End == 63) {
2382 /* Current Desing Not Allowed */
2383 if (pBdNext->Bound_End == 63)
2384 return -1;
2385
2386 cycle_cnt = (pBdPrev->Bound_Start -
2387 pBdNext->Bound_End +
2388 uBD_width) * 2;
2389 } /* cycle count calc done */
2390
2391 /* calc optimise delay count */
2392 if (pBdPrev->Bound_Start == 0) {
2393 /* falling edge sel */
2394 if (pBdPrev->Bound_End >=
2395 uBD_width / 2) {
2396 uDlySel_F = pBdPrev->Bound_End -
2397 uBD_width / 2;
2398 uMgLost_F = 0;
2399 } else {
2400 uDlySel_F = 0;
2401 uMgLost_F = uBD_width / 2 -
2402 pBdPrev->Bound_End;
2403 }
2404
2405 /* rising edge sel */
2406 if (pBdPrev->Bound_End - uBD_width / 2 +
2407 cycle_cnt / 2 > 63) {
2408 uDlySel_R = 63;
2409 uMgLost_R =
2410 pBdPrev->Bound_End -
2411 uBD_width / 2 +
2412 cycle_cnt / 2 - 63;
2413 } else {
2414 uDlySel_R =
2415 pBdPrev->Bound_End -
2416 uBD_width / 2 +
2417 cycle_cnt / 2;
2418 uMgLost_R = 0;
2419 }
2420 } else if (pBdPrev->Bound_End == 63) {
2421 /* falling edge sel */
2422 if (pBdPrev->Bound_Start +
2423 uBD_width / 2 < 63) {
2424 uDlySel_F =
2425 pBdPrev->Bound_Start +
2426 uBD_width / 2;
2427 uMgLost_F = 0;
2428 } else {
2429 uDlySel_F = 63;
2430 uMgLost_F =
2431 pBdPrev->Bound_Start +
2432 uBD_width / 2 - 63;
2433 }
2434
2435 /* rising edge sel */
2436 if (pBdPrev->Bound_Start +
2437 uBD_width / 2 - cycle_cnt / 2 < 0) {
2438 uDlySel_R = 0;
2439 uMgLost_R =
2440 cycle_cnt / 2 -
2441 (pBdPrev->Bound_Start +
2442 uBD_width / 2);
2443 } else {
2444 uDlySel_R =
2445 pBdPrev->Bound_Start +
2446 uBD_width / 2 -
2447 cycle_cnt / 2;
2448 uMgLost_R = 0;
2449 }
2450 } else {
2451 return -1;
2452 }
2453 } else if (Bound_Cnt_F == 0) {
2454 /* mode_7: rising edge only one bound
2455 * (not full), falling no boundary
2456 */
2457 cycle_cnt = 128;
2458 pBdPrev = &(pBdInfo_R->bd_info[0]);
2459 if (pBdPrev->Bound_Start == 0) {
2460 uDlySel_F = 0;
2461 uDlySel_R = 63;
2462 } else if (pBdPrev->Bound_End == 63) {
2463 uDlySel_F = 63;
2464 uDlySel_R = 0;
2465 } else {
2466 return -1;
2467 }
2468 uMgLost_F = 0xFF;
2469 uMgLost_R = 0xFF;
2470
2471 ATK_ERR("[AUTOK]Warning: 1T > %d\n", cycle_cnt);
2472 }
2473 } else if (Bound_Cnt_R == 0) { /* Rising Edge No Boundary */
2474 if (Bound_Cnt_F > 1) {
2475 /* falling edge not allowed two boundary
2476 * Exist for this case
2477 */
2478 return -1;
2479 } else if (Bound_Cnt_F > 0) {
2480 /* mode_8: falling edge has one Boundary */
2481 pBdPrev = &(pBdInfo_F->bd_info[0]);
2482
2483 /* this boundary is full bound */
2484 if (pBdPrev->is_fullbound) {
2485 uBD_mid_prev =
2486 (pBdPrev->Bound_Start +
2487 pBdPrev->Bound_End) / 2;
2488
2489 if (pBdPrev->Bound_End >
2490 (64 - pBdPrev->Bound_Start))
2491 cycle_cnt =
2492 2 * (pBdPrev->Bound_End + 1);
2493 else
2494 cycle_cnt =
2495 2 * (64 - pBdPrev->Bound_Start);
2496
2497 uDlySel_R = uBD_mid_prev;
2498 uMgLost_R = 0xFF;
2499 uDlySel_F =
2500 (uBD_mid_prev >= 32) ? 0 : 63;
2501 uMgLost_F = 0xFF;
2502 } else {
2503 cycle_cnt = 128;
2504
2505 uDlySel_R = (pBdPrev->Bound_Start ==
2506 0) ? 0 : 63;
2507 uMgLost_R = 0xFF;
2508 uDlySel_F = (pBdPrev->Bound_Start ==
2509 0) ? 63 : 0;
2510 uMgLost_F = 0xFF;
2511 }
2512
2513 ATK_ERR("[AUTOK]Warning: 1T > %d\n", cycle_cnt);
2514 } else {
2515 /* falling edge no boundary. no need tuning */
2516 cycle_cnt = 128;
2517 uDlySel_F = 0;
2518 uMgLost_F = 0xFF;
2519 uDlySel_R = 0;
2520 uMgLost_R = 0xFF;
2521 ATK_ERR("[AUTOK]Warning: 1T > %d\n", cycle_cnt);
2522 }
2523 } else {
2524 /* Error if bound_cnt > 3 there must be
2525 * at least one full boundary exist
2526 */
2527 return -1;
2528 }
2529 break;
2530
2531 default:
2532 /* warning if boundary count > 4
2533 * (from current hw design, this case cannot happen)
2534 */
2535 return -1;
2536 }
2537
2538 /* Select Optimised Sample edge & delay count (the small one) */
2539 pInfo->cycle_cnt = cycle_cnt;
2540 if (uDlySel_R <= uDlySel_F) {
2541 pInfo->opt_edge_sel = 0;
2542 pInfo->opt_dly_cnt = uDlySel_R;
2543 } else {
2544 pInfo->opt_edge_sel = 1;
2545 pInfo->opt_dly_cnt = uDlySel_F;
2546
2547 }
2548 ATK_ERR("[AUTOK]Analysis Result: 1T = %d\n ", cycle_cnt);
2549 return ret;
2550}
2551
2552/*
2553 ************************************************************************
2554 * FUNCTION
2555 * autok_adjust_param
2556 *
2557 * DESCRIPTION
2558 * This function for auto-K, adjust msdc parameter
2559 *
2560 * PARAMETERS
2561 * host: msdc host manipulator pointer
2562 * param: enum of msdc parameter
2563 * value: value of msdc parameter
2564 * rw: AUTOK_READ/AUTOK_WRITE
2565 *
2566 * RETURN VALUES
2567 * error code: 0 success,
2568 * -1 parameter input error
2569 * -2 read/write fail
2570 * -3 else error
2571 *************************************************************************
2572 */
2573static int autok_adjust_param(struct msdc_host *host,
2574 enum AUTOK_PARAM param,
2575 u32 *value,
2576 int rw)
2577{
2578 void __iomem *base = host->base;
2579 void __iomem *base_top = host->base_top;
2580 u32 *reg;
2581 u32 field = 0;
2582
2583 switch (param) {
2584 case READ_DATA_SMPL_SEL:
2585 if ((rw == AUTOK_WRITE) && (*value > 1)) {
2586 pr_debug("READ_DATA_SMPL_SEL(%d) is out of [0~1]\n",
2587 *value);
2588 return -1;
2589 }
2590
2591 reg = (u32 *) (base + MSDC_IOCON);
2592 field = (u32) (MSDC_IOCON_R_D_SMPL_SEL);
2593 break;
2594 case WRITE_DATA_SMPL_SEL:
2595 if ((rw == AUTOK_WRITE) && (*value > 1)) {
2596 pr_debug("WRITE_DATA_SMPL_SEL(%d) is out of [0~1]\n",
2597 *value);
2598 return -1;
2599 }
2600
2601 reg = (u32 *) (base + MSDC_IOCON);
2602 field = (u32) (MSDC_IOCON_W_D_SMPL_SEL);
2603 break;
2604 case DATA_DLYLINE_SEL:
2605 if ((rw == AUTOK_WRITE) && (*value > 1)) {
2606 pr_debug("DATA_DLYLINE_SEL(%d) is out of [0~1]\n",
2607 *value);
2608 return -1;
2609 }
2610 if (host->base_top) {
2611 reg = (u32 *) (base_top + MSDC_TOP_CONTROL);
2612 field = (u32) (DATA_K_VALUE_SEL);
2613 } else {
2614 reg = (u32 *) (base + MSDC_IOCON);
2615 field = (u32) (MSDC_IOCON_DDLSEL);
2616 }
2617 break;
2618 case MSDC_DAT_TUNE_SEL: /* 0-Dat tune 1-CLk tune ; */
2619 if ((rw == AUTOK_WRITE) && (*value > 1)) {
2620 pr_debug("DATA_TUNE_SEL(%d) is out of [0~1]\n",
2621 *value);
2622 return -1;
2623 }
2624 if (host->base_top) {
2625 reg = (u32 *) (base_top + MSDC_TOP_CONTROL);
2626 field = (u32) (PAD_RXDLY_SEL);
2627 } else {
2628 reg = (u32 *) (base + MSDC_PAD_TUNE0);
2629 field = (u32) (MSDC_PAD_TUNE0_RXDLYSEL);
2630 }
2631 break;
2632 case MSDC_WCRC_ASYNC_FIFO_SEL:
2633 if ((rw == AUTOK_WRITE) && (*value > 1)) {
2634 pr_debug("WCRC_ASYNC_FIFO_SEL(%d) is out of [0~1]\n",
2635 *value);
2636 return -1;
2637 }
2638 reg = (u32 *) (base + MSDC_PATCH_BIT2);
2639 field = (u32) (MSDC_PB2_CFGCRCSTS);
2640 break;
2641 case MSDC_RESP_ASYNC_FIFO_SEL:
2642 if ((rw == AUTOK_WRITE) && (*value > 1)) {
2643 pr_debug("RESP_ASYNC_FIFO_SEL(%d) is out of [0~1]\n",
2644 *value);
2645 return -1;
2646 }
2647 reg = (u32 *) (base + MSDC_PATCH_BIT2);
2648 field = (u32) (MSDC_PB2_CFGRESP);
2649 break;
2650 case CMD_EDGE:
2651 if ((rw == AUTOK_WRITE) && (*value > 1)) {
2652 pr_debug("CMD_EDGE(%d) is out of [0~1]\n", *value);
2653 return -1;
2654 }
2655 reg = (u32 *) (base + MSDC_IOCON);
2656 field = (u32) (MSDC_IOCON_RSPL);
2657 break;
2658 case RDATA_EDGE:
2659 if ((rw == AUTOK_WRITE) && (*value > 1)) {
2660 pr_debug("RDATA_EDGE(%d) is out of [0~1]\n", *value);
2661 return -1;
2662 }
2663 reg = (u32 *) (base + MSDC_IOCON);
2664 field = (u32) (MSDC_IOCON_R_D_SMPL);
2665 break;
2666 case RD_FIFO_EDGE:
2667 if ((rw == AUTOK_WRITE) && (*value > 1)) {
2668 pr_debug("RD_FIFO_EDGE(%d) is out of [0~1]\n", *value);
2669 return -1;
2670 }
2671 reg = (u32 *) (base + MSDC_PATCH_BIT0);
2672 field = (u32) (MSDC_PB0_RD_DAT_SEL);
2673 break;
2674 case WD_FIFO_EDGE:
2675 if ((rw == AUTOK_WRITE) && (*value > 1)) {
2676 pr_debug("WD_FIFO_EDGE(%d) is out of [0~1]\n", *value);
2677 return -1;
2678 }
2679 reg = (u32 *) (base + MSDC_PATCH_BIT2);
2680 field = (u32) (MSDC_PB2_CFGCRCSTSEDGE);
2681 break;
2682 case CMD_RD_D_DLY1:
2683 if ((rw == AUTOK_WRITE) && (*value > 31)) {
2684 pr_debug("CMD_RD_D_DLY1(%d) is out of [0~31]\n",
2685 *value);
2686 return -1;
2687 }
2688 if (host->base_top) {
2689 reg = (u32 *) (base_top + MSDC_TOP_CMD);
2690 field = (u32) (PAD_CMD_RXDLY);
2691 } else {
2692 reg = (u32 *) (base + MSDC_PAD_TUNE0);
2693 field = (u32) (MSDC_PAD_TUNE0_CMDRDLY);
2694 }
2695 break;
2696 case CMD_RD_D_DLY1_SEL:
2697 if ((rw == AUTOK_WRITE) && (*value > 1)) {
2698 pr_debug("CMD_RD_D_DLY1_SEL(%d) is out of [0~1]\n",
2699 *value);
2700 return -1;
2701 }
2702 if (host->base_top) {
2703 reg = (u32 *) (base_top + MSDC_TOP_CMD);
2704 field = (u32) (PAD_CMD_RD_RXDLY_SEL);
2705 } else {
2706 reg = (u32 *) (base + MSDC_PAD_TUNE0);
2707 field = (u32) (MSDC_PAD_TUNE0_CMDRRDLYSEL);
2708 }
2709 break;
2710 case CMD_RD_D_DLY2:
2711 if ((rw == AUTOK_WRITE) && (*value > 31)) {
2712 pr_debug("CMD_RD_D_DLY2(%d) is out of [0~31]\n",
2713 *value);
2714 return -1;
2715 }
2716 if (host->base_top) {
2717 reg = (u32 *) (base_top + MSDC_TOP_CMD);
2718 field = (u32) (PAD_CMD_RXDLY2);
2719 } else {
2720 reg = (u32 *) (base + MSDC_PAD_TUNE1);
2721 field = (u32) (MSDC_PAD_TUNE1_CMDRDLY2);
2722 }
2723 break;
2724 case CMD_RD_D_DLY2_SEL:
2725 if ((rw == AUTOK_WRITE) && (*value > 1)) {
2726 pr_debug("CMD_RD_D_DLY2_SEL(%d) is out of [0~1]\n",
2727 *value);
2728 return -1;
2729 }
2730 if (host->base_top) {
2731 reg = (u32 *) (base_top + MSDC_TOP_CMD);
2732 field = (u32) (PAD_CMD_RD_RXDLY2_SEL);
2733 } else {
2734 reg = (u32 *) (base + MSDC_PAD_TUNE1);
2735 field = (u32) (MSDC_PAD_TUNE1_CMDRRDLY2SEL);
2736 }
2737 break;
2738 case DAT_RD_D_DLY1:
2739 if ((rw == AUTOK_WRITE) && (*value > 31)) {
2740 pr_debug("DAT_RD_D_DLY1(%d) is out of [0~31]\n",
2741 *value);
2742 return -1;
2743 }
2744 if (host->base_top) {
2745 reg = (u32 *) (base_top + MSDC_TOP_CONTROL);
2746 field = (u32) (PAD_DAT_RD_RXDLY);
2747 } else {
2748 reg = (u32 *) (base + MSDC_PAD_TUNE0);
2749 field = (u32) (MSDC_PAD_TUNE0_DATRRDLY);
2750 }
2751 break;
2752 case DAT_RD_D_DLY1_SEL:
2753 if ((rw == AUTOK_WRITE) && (*value > 1)) {
2754 pr_debug("DAT_RD_D_DLY1_SEL(%d) is out of [0~1]\n",
2755 *value);
2756 return -1;
2757 }
2758 if (host->base_top) {
2759 reg = (u32 *) (base_top + MSDC_TOP_CONTROL);
2760 field = (u32) (PAD_DAT_RD_RXDLY_SEL);
2761 } else {
2762 reg = (u32 *) (base + MSDC_PAD_TUNE0);
2763 field = (u32) (MSDC_PAD_TUNE0_DATRRDLYSEL);
2764 }
2765 break;
2766 case DAT_RD_D_DLY2:
2767 if ((rw == AUTOK_WRITE) && (*value > 31)) {
2768 pr_debug("DAT_RD_D_DLY2(%d) is out of [0~31]\n",
2769 *value);
2770 return -1;
2771 }
2772 if (host->base_top) {
2773 reg = (u32 *) (base_top + MSDC_TOP_CONTROL);
2774 field = (u32) (PAD_DAT_RD_RXDLY2);
2775 } else {
2776 reg = (u32 *) (base + MSDC_PAD_TUNE1);
2777 field = (u32) (MSDC_PAD_TUNE1_DATRRDLY2);
2778 }
2779 break;
2780 case DAT_RD_D_DLY2_SEL:
2781 if ((rw == AUTOK_WRITE) && (*value > 1)) {
2782 pr_debug("DAT_RD_D_DLY2_SEL(%d) is out of [0~1]\n",
2783 *value);
2784 return -1;
2785 }
2786 if (host->base_top) {
2787 reg = (u32 *) (base_top + MSDC_TOP_CONTROL);
2788 field = (u32) (PAD_DAT_RD_RXDLY2_SEL);
2789 } else {
2790 reg = (u32 *) (base + MSDC_PAD_TUNE1);
2791 field = (u32) (MSDC_PAD_TUNE1_DATRRDLY2SEL);
2792 }
2793 break;
2794 case INT_DAT_LATCH_CK:
2795 if ((rw == AUTOK_WRITE) && (*value > 7)) {
2796 pr_debug("INT_DAT_LATCH_CK(%d) is out of [0~7]\n",
2797 *value);
2798 return -1;
2799 }
2800 reg = (u32 *) (base + MSDC_PATCH_BIT0);
2801 field = (u32) (MSDC_PB0_INT_DAT_LATCH_CK_SEL);
2802 break;
2803 case CKGEN_MSDC_DLY_SEL:
2804 if ((rw == AUTOK_WRITE) && (*value > 31)) {
2805 pr_debug("CKGEN_MSDC_DLY_SEL(%d) is out of [0~31]\n",
2806 *value);
2807 return -1;
2808 }
2809 reg = (u32 *) (base + MSDC_PATCH_BIT0);
2810 field = (u32) (MSDC_PB0_CKGEN_MSDC_DLY_SEL);
2811 break;
2812 case CMD_RSP_TA_CNTR:
2813 if ((rw == AUTOK_WRITE) && (*value > 7)) {
2814 pr_debug("CMD_RSP_TA_CNTR(%d) is out of [0~7]\n",
2815 *value);
2816 return -1;
2817 }
2818 reg = (u32 *) (base + MSDC_PATCH_BIT1);
2819 field = (u32) (MSDC_PB1_CMD_RSP_TA_CNTR);
2820 break;
2821 case WRDAT_CRCS_TA_CNTR:
2822 if ((rw == AUTOK_WRITE) && (*value > 7)) {
2823 pr_debug("WRDAT_CRCS_TA_CNTR(%d) is out of [0~7]\n",
2824 *value);
2825 return -1;
2826 }
2827 reg = (u32 *) (base + MSDC_PATCH_BIT1);
2828 field = (u32) (MSDC_PB1_WRDAT_CRCS_TA_CNTR);
2829 break;
2830 case PAD_CLK_TXDLY:
2831 if ((rw == AUTOK_WRITE) && (*value > 31)) {
2832 pr_debug("PAD_CLK_TXDLY(%d) is out of [0~31]\n",
2833 *value);
2834 return -1;
2835 }
2836 if (host->base_top) {
2837 reg = (u32 *) (base_top + MSDC_TOP_PAD_CTRL0);
2838 field = (u32) (MSDC_PAD_CLK_TXDLY);
2839 } else {
2840 reg = (u32 *) (base + MSDC_PAD_TUNE0);
2841 field = (u32) (MSDC_PAD_TUNE0_CLKTXDLY);
2842 }
2843 break;
2844 case EMMC50_WDATA_MUX_EN:
2845 if ((rw == AUTOK_WRITE) && (*value > 1)) {
2846 pr_debug("EMMC50_WDATA_MUX_EN(%d) is out of [0~1]\n",
2847 *value);
2848 return -1;
2849 }
2850 reg = (u32 *) (base + EMMC50_CFG0);
2851 field = (u32) (MSDC_EMMC50_CFG_CRC_STS_SEL);
2852 break;
2853 case EMMC50_CMD_MUX_EN:
2854 if ((rw == AUTOK_WRITE) && (*value > 1)) {
2855 pr_debug("EMMC50_CMD_MUX_EN(%d) is out of [0~1]\n",
2856 *value);
2857 return -1;
2858 }
2859 reg = (u32 *) (base + EMMC50_CFG0);
2860 field = (u32) (MSDC_EMMC50_CFG_CMD_RESP_SEL);
2861 break;
2862 case EMMC50_WDATA_EDGE:
2863 if ((rw == AUTOK_WRITE) && (*value > 1)) {
2864 pr_debug("EMMC50_WDATA_EDGE(%d) is out of [0~1]\n",
2865 *value);
2866 return -1;
2867 }
2868 reg = (u32 *) (base + EMMC50_CFG0);
2869 field = (u32) (MSDC_EMMC50_CFG_CRC_STS_EDGE);
2870 break;
2871 case EMMC50_DS_Z_DLY1:
2872 if ((rw == AUTOK_WRITE) && (*value > 31)) {
2873 pr_debug("EMMC50_DS_Z_DLY1(%d) is out of [0~31]\n",
2874 *value);
2875 return -1;
2876 }
2877 if (host->base_top) {
2878 reg = (u32 *) (base_top + MSDC_TOP_PAD_DS_TUNE);
2879 field = (u32) (PAD_DS_DLY1);
2880 } else {
2881 reg = (u32 *) (base + EMMC50_PAD_DS_TUNE);
2882 field = (u32) (MSDC_EMMC50_PAD_DS_TUNE_DLY1);
2883 }
2884 break;
2885 case EMMC50_DS_Z_DLY1_SEL:
2886 if ((rw == AUTOK_WRITE) && (*value > 1)) {
2887 pr_debug("EMMC50_DS_Z_DLY1_SEL(%d) is out of [0~1]\n",
2888 *value);
2889 return -1;
2890 }
2891 if (host->base_top) {
2892 reg = (u32 *) (base_top + MSDC_TOP_PAD_DS_TUNE);
2893 field = (u32) (PAD_DS_DLY_SEL);
2894 } else {
2895 reg = (u32 *) (base + EMMC50_PAD_DS_TUNE);
2896 field = (u32) (MSDC_EMMC50_PAD_DS_TUNE_DLYSEL);
2897 }
2898 break;
2899 case EMMC50_DS_Z_DLY2:
2900 if ((rw == AUTOK_WRITE) && (*value > 31)) {
2901 pr_debug("EMMC50_DS_Z_DLY2(%d) is out of [0~31]\n",
2902 *value);
2903 return -1;
2904 }
2905 if (host->base_top) {
2906 reg = (u32 *) (base_top + MSDC_TOP_PAD_DS_TUNE);
2907 field = (u32) (PAD_DS_DLY2);
2908 } else {
2909 reg = (u32 *) (base + EMMC50_PAD_DS_TUNE);
2910 field = (u32) (MSDC_EMMC50_PAD_DS_TUNE_DLY2);
2911 }
2912 break;
2913 case EMMC50_DS_Z_DLY2_SEL:
2914 if ((rw == AUTOK_WRITE) && (*value > 1)) {
2915 pr_debug("EMMC50_DS_Z_DLY2_SEL(%d) is out of [0~1]\n",
2916 *value);
2917 return -1;
2918 }
2919 if (host->base_top) {
2920 reg = (u32 *) (base_top + MSDC_TOP_PAD_DS_TUNE);
2921 field = (u32) (PAD_DS_DLY2_SEL);
2922 } else {
2923 reg = (u32 *) (base + EMMC50_PAD_DS_TUNE);
2924 field = (u32) (MSDC_EMMC50_PAD_DS_TUNE_DLY2SEL);
2925 }
2926 break;
2927 case EMMC50_DS_ZDLY_DLY:
2928 if ((rw == AUTOK_WRITE) && (*value > 31)) {
2929 pr_debug("EMMC50_DS_Z_DLY(%d) is out of [0~31]\n",
2930 *value);
2931 return -1;
2932 }
2933 if (host->base_top) {
2934 reg = (u32 *) (base_top + MSDC_TOP_PAD_DS_TUNE);
2935 field = (u32) (PAD_DS_DLY3);
2936 } else {
2937 reg = (u32 *) (base + EMMC50_PAD_DS_TUNE);
2938 field = (u32) (MSDC_EMMC50_PAD_DS_TUNE_DLY3);
2939 }
2940 break;
2941 default:
2942 pr_debug("Value of [enum AUTOK_PARAM param] is wrong\n");
2943 return -1;
2944 }
2945
2946 if (rw == AUTOK_READ)
2947 sdr_get_field(reg, field, value);
2948 else if (rw == AUTOK_WRITE) {
2949 sdr_set_field(reg, field, *value);
2950
2951 if (param == CKGEN_MSDC_DLY_SEL)
2952 mdelay(1);
2953 } else {
2954 pr_debug("Value of [int rw] is wrong\n");
2955 return -1;
2956 }
2957
2958 return 0;
2959}
2960
2961static int autok_param_update(enum AUTOK_PARAM param_id,
2962 unsigned int result, u8 *autok_tune_res)
2963{
2964 if (param_id < TUNING_PARAM_COUNT) {
2965 if ((result > autok_param_info[param_id].range.end) ||
2966 (result < autok_param_info[param_id].range.start)) {
2967 ATK_ERR("[AUTOK]param:%d out of range[%d,%d]\n",
2968 result,
2969 autok_param_info[param_id].range.start,
2970 autok_param_info[param_id].range.end);
2971 return -1;
2972 }
2973 autok_tune_res[param_id] = (u8) result;
2974 return 0;
2975 }
2976 ATK_ERR("[AUTOK]param not found\r\n");
2977
2978 return -1;
2979}
2980
2981static int autok_param_apply(struct msdc_host *host, u8 *autok_tune_res)
2982{
2983 unsigned int i = 0;
2984 unsigned int value = 0;
2985
2986 for (i = 0; i < TUNING_PARAM_COUNT; i++) {
2987 value = (u8) autok_tune_res[i];
2988 autok_adjust_param(host, i, &value, AUTOK_WRITE);
2989 }
2990
2991 return 0;
2992}
2993
2994static void autok_tuning_parameter_init(struct msdc_host *host, u8 *res)
2995{
2996 unsigned int ret = 0;
2997 /* void __iomem *base = host->base; */
2998
2999 /* MSDC_SET_FIELD(MSDC_PATCH_BIT2, 7<<29, 2); */
3000 /* MSDC_SET_FIELD(MSDC_PATCH_BIT2, 7<<16, 4); */
3001
3002 ret = autok_param_apply(host, res);
3003}
3004
3005static int autok_result_dump(struct msdc_host *host, u8 *autok_tune_res)
3006{
3007 ATK_ERR("[AUTOK]CMD [EDGE:%d DLY1:%d DLY2:%d ]\n",
3008 autok_tune_res[0], autok_tune_res[4], autok_tune_res[6]);
3009 ATK_ERR("[AUTOK]DAT [RDAT_EDGE:%d RD_FIFO_EDGE:%d WD_FIFO_EDGE:%d]\n",
3010 autok_tune_res[1], autok_tune_res[2], autok_tune_res[3]);
3011 ATK_ERR("[AUTOK]DAT [LATCH_CK:%d DLY1:%d DLY2:%d ]\n",
3012 autok_tune_res[12], autok_tune_res[8], autok_tune_res[10]);
3013 ATK_ERR("[AUTOK]DS [DLY1:%d DLY2:%d DLY3:%d]\n",
3014 autok_tune_res[13], autok_tune_res[15], autok_tune_res[17]);
3015
3016 return 0;
3017}
3018
3019/* online tuning for latch ck */
3020int autok_execute_tuning_latch_ck(struct msdc_host *host, unsigned int opcode,
3021 unsigned int latch_ck_initail_value)
3022{
3023 unsigned int ret = 0;
3024 unsigned int j, k;
3025 void __iomem *base = host->base;
3026 unsigned int tune_time;
3027
3028 writel(0xffffffff, base + MSDC_INT);
3029 tune_time = AUTOK_LATCH_CK_SDIO_TUNE_TIMES;
3030 for (j = latch_ck_initail_value; j < 8;
3031 j += (host->src_clk_freq / host->sclk)) {
3032 host->tune_latch_ck_cnt = 0;
3033 msdc_clear_fifo();
3034 sdr_set_field(base + MSDC_PATCH_BIT0,
3035 MSDC_PB0_INT_DAT_LATCH_CK_SEL, j);
3036 for (k = 0; k < tune_time; k++) {
3037 if (opcode == MMC_SEND_TUNING_BLOCK_HS200) {
3038 switch (k) {
3039 case 0:
3040 host->tune_latch_ck_cnt = 1;
3041 break;
3042 default:
3043 host->tune_latch_ck_cnt = k;
3044 break;
3045 }
3046 } else if (opcode == MMC_SEND_TUNING_BLOCK) {
3047 switch (k) {
3048 case 0:
3049 case 1:
3050 case 2:
3051 host->tune_latch_ck_cnt = 1;
3052 break;
3053 default:
3054 host->tune_latch_ck_cnt = k - 1;
3055 break;
3056 }
3057 } else if (opcode == MMC_SEND_EXT_CSD) {
3058 host->tune_latch_ck_cnt = k + 1;
3059 } else
3060 host->tune_latch_ck_cnt++;
3061 ret = autok_send_tune_cmd(host, opcode, TUNE_LATCH_CK);
3062 if ((ret &
3063 (E_RESULT_CMD_TMO | E_RESULT_RSP_CRC)) != 0) {
3064 ATK_ERR("[AUTOK]CMD Fail when tune LATCH CK\n");
3065 break;
3066 } else if ((ret &
3067 (E_RESULT_DAT_CRC |
3068 E_RESULT_DAT_TMO)) != 0) {
3069 ATK_ERR("[AUTOK]Tune LATCH_CK error %d\r\n", j);
3070 break;
3071 }
3072 }
3073 if (ret == 0) {
3074 sdr_set_field(base + MSDC_PATCH_BIT0,
3075 MSDC_PB0_INT_DAT_LATCH_CK_SEL, j);
3076 break;
3077 }
3078 }
3079 host->tune_latch_ck_cnt = 0;
3080 return (j >= 8) ? 0 : j;
3081}
3082
3083/*
3084 ******************************************************
3085 * Function: msdc_autok_adjust_paddly *
3086 * Param : value - delay cnt from 0 to 63 *
3087 * pad_sel - 0 for cmd pad and 1 for data pad *
3088 ******************************************************
3089 */
3090#define CMD_PAD_RDLY 0
3091#define DAT_PAD_RDLY 1
3092#define DS_PAD_RDLY 2
3093static void msdc_autok_adjust_paddly(struct msdc_host *host,
3094 unsigned int *value,
3095 unsigned int pad_sel)
3096{
3097 unsigned int uCfgL = 0;
3098 unsigned int uCfgLSel = 0;
3099 unsigned int uCfgH = 0;
3100 unsigned int uCfgHSel = 0;
3101 unsigned int dly_cnt = *value;
3102
3103 uCfgL = (dly_cnt > 31) ? (31) : dly_cnt;
3104 uCfgH = (dly_cnt > 31) ? (dly_cnt - 32) : 0;
3105
3106 uCfgLSel = (uCfgL > 0) ? 1 : 0;
3107 uCfgHSel = (uCfgH > 0) ? 1 : 0;
3108 switch (pad_sel) {
3109 case CMD_PAD_RDLY:
3110 autok_adjust_param(host, CMD_RD_D_DLY1, &uCfgL, AUTOK_WRITE);
3111 autok_adjust_param(host, CMD_RD_D_DLY2, &uCfgH, AUTOK_WRITE);
3112
3113 autok_adjust_param(host, CMD_RD_D_DLY1_SEL,
3114 &uCfgLSel, AUTOK_WRITE);
3115 autok_adjust_param(host, CMD_RD_D_DLY2_SEL,
3116 &uCfgHSel, AUTOK_WRITE);
3117 break;
3118 case DAT_PAD_RDLY:
3119 autok_adjust_param(host, DAT_RD_D_DLY1, &uCfgL, AUTOK_WRITE);
3120 autok_adjust_param(host, DAT_RD_D_DLY2, &uCfgH, AUTOK_WRITE);
3121
3122 autok_adjust_param(host, DAT_RD_D_DLY1_SEL,
3123 &uCfgLSel, AUTOK_WRITE);
3124 autok_adjust_param(host, DAT_RD_D_DLY2_SEL,
3125 &uCfgHSel, AUTOK_WRITE);
3126 break;
3127 case DS_PAD_RDLY:
3128 autok_adjust_param(host, EMMC50_DS_Z_DLY1, &uCfgL, AUTOK_WRITE);
3129 autok_adjust_param(host, EMMC50_DS_Z_DLY2, &uCfgH, AUTOK_WRITE);
3130
3131 autok_adjust_param(host, EMMC50_DS_Z_DLY1_SEL,
3132 &uCfgLSel, AUTOK_WRITE);
3133 autok_adjust_param(host, EMMC50_DS_Z_DLY2_SEL,
3134 &uCfgHSel, AUTOK_WRITE);
3135 break;
3136 }
3137}
3138
3139static void autok_paddly_update(unsigned int pad_sel,
3140 unsigned int dly_cnt,
3141 u8 *autok_tune_res)
3142{
3143 unsigned int uCfgL = 0;
3144 unsigned int uCfgLSel = 0;
3145 unsigned int uCfgH = 0;
3146 unsigned int uCfgHSel = 0;
3147
3148 uCfgL = (dly_cnt > 31) ? (31) : dly_cnt;
3149 uCfgH = (dly_cnt > 31) ? (dly_cnt - 32) : 0;
3150
3151 uCfgLSel = (uCfgL > 0) ? 1 : 0;
3152 uCfgHSel = (uCfgH > 0) ? 1 : 0;
3153 switch (pad_sel) {
3154 case CMD_PAD_RDLY:
3155 autok_param_update(CMD_RD_D_DLY1, uCfgL, autok_tune_res);
3156 autok_param_update(CMD_RD_D_DLY2, uCfgH, autok_tune_res);
3157
3158 autok_param_update(CMD_RD_D_DLY1_SEL, uCfgLSel, autok_tune_res);
3159 autok_param_update(CMD_RD_D_DLY2_SEL, uCfgHSel, autok_tune_res);
3160 break;
3161 case DAT_PAD_RDLY:
3162 autok_param_update(DAT_RD_D_DLY1, uCfgL, autok_tune_res);
3163 autok_param_update(DAT_RD_D_DLY2, uCfgH, autok_tune_res);
3164
3165 autok_param_update(DAT_RD_D_DLY1_SEL, uCfgLSel, autok_tune_res);
3166 autok_param_update(DAT_RD_D_DLY2_SEL, uCfgHSel, autok_tune_res);
3167 break;
3168 case DS_PAD_RDLY:
3169 autok_param_update(EMMC50_DS_Z_DLY1, uCfgL, autok_tune_res);
3170 autok_param_update(EMMC50_DS_Z_DLY2, uCfgH, autok_tune_res);
3171
3172 autok_param_update(EMMC50_DS_Z_DLY1_SEL,
3173 uCfgLSel, autok_tune_res);
3174 autok_param_update(EMMC50_DS_Z_DLY2_SEL,
3175 uCfgHSel, autok_tune_res);
3176 break;
3177 }
3178}
3179
3180/*
3181 ******************************************************
3182 * Exectue tuning IF Implenment *
3183 ******************************************************
3184 */
3185static int autok_write_param(struct msdc_host *host,
3186 enum AUTOK_PARAM param, u32 value)
3187{
3188 autok_adjust_param(host, param, &value, AUTOK_WRITE);
3189
3190 return 0;
3191}
3192
3193static int autok_path_sel(struct msdc_host *host)
3194{
3195 void __iomem *base = host->base;
3196
3197 autok_write_param(host, READ_DATA_SMPL_SEL, 0);
3198 autok_write_param(host, WRITE_DATA_SMPL_SEL, 0);
3199
3200 /* clK tune all data Line share dly */
3201 autok_write_param(host, DATA_DLYLINE_SEL, 0);
3202
3203 /* data tune mode select */
3204#if defined(CHIP_DENALI_3_DAT_TUNE)
3205 autok_write_param(host, MSDC_DAT_TUNE_SEL, 1);
3206#else
3207 autok_write_param(host, MSDC_DAT_TUNE_SEL, 0);
3208#endif
3209 autok_write_param(host, MSDC_WCRC_ASYNC_FIFO_SEL, 1);
3210 autok_write_param(host, MSDC_RESP_ASYNC_FIFO_SEL, 0);
3211
3212 /* eMMC50 Function Mux */
3213 /* write path switch to emmc45 */
3214 autok_write_param(host, EMMC50_WDATA_MUX_EN, 0);
3215
3216 /* response path switch to emmc45 */
3217 autok_write_param(host, EMMC50_CMD_MUX_EN, 0);
3218 autok_write_param(host, EMMC50_WDATA_EDGE, 0);
3219
3220 /* Common Setting Config */
3221 autok_write_param(host, CKGEN_MSDC_DLY_SEL, AUTOK_CKGEN_VALUE);
3222 autok_write_param(host, CMD_RSP_TA_CNTR, AUTOK_CMD_TA_VALUE);
3223 autok_write_param(host, WRDAT_CRCS_TA_CNTR, AUTOK_CRC_TA_VALUE);
3224
3225 sdr_set_field(base + MSDC_PATCH_BIT1, MSDC_PB1_GET_BUSY_MA,
3226 AUTOK_BUSY_MA_VALUE);
3227 sdr_set_field(base + MSDC_PATCH_BIT1, MSDC_PB1_GET_CRC_MA,
3228 AUTOK_CRC_MA_VALUE);
3229
3230 return 0;
3231}
3232
3233static int autok_init_sdr104(struct msdc_host *host)
3234{
3235 void __iomem *base = host->base;
3236
3237 /* driver may miss data tune path setting in the interim */
3238 autok_path_sel(host);
3239
3240 /* if any specific config need modify add here */
3241 /* LATCH_TA_EN Config for WCRC Path non_HS400 */
3242 sdr_set_field(base + MSDC_PATCH_BIT2, MSDC_PB2_CRCSTSENSEL,
3243 AUTOK_CRC_LATCH_EN_NON_HS400_VALUE);
3244
3245 /* LATCH_TA_EN Config for CMD Path non_HS400 */
3246 sdr_set_field(base + MSDC_PATCH_BIT2, MSDC_PB2_RESPSTENSEL,
3247 AUTOK_CMD_LATCH_EN_NON_HS400_VALUE);
3248
3249 return 0;
3250}
3251
3252/* online tuning for SDIO/SD */
3253static int execute_online_tuning(struct msdc_host *host, u8 *res)
3254{
3255 unsigned int ret = 0;
3256 unsigned int uCmdEdge = 0;
3257 unsigned int uDatEdge = 0;
3258 u64 RawData64 = 0LL;
3259 unsigned int score = 0;
3260 unsigned int j, k;
3261 unsigned int opcode = MMC_SEND_TUNING_BLOCK;
3262 struct AUTOK_REF_INFO uCmdDatInfo;
3263 struct AUTOK_SCAN_RES *pBdInfo;
3264 char tune_result_str64[65];
3265 u8 p_autok_tune_res[TUNING_PARAM_COUNT];
3266
3267 autok_init_sdr104(host);
3268 memset((void *)p_autok_tune_res, 0,
3269 sizeof(p_autok_tune_res) / sizeof(u8));
3270
3271 /* Step1 : Tuning Cmd Path */
3272 autok_tuning_parameter_init(host, p_autok_tune_res);
3273 memset(&uCmdDatInfo, 0, sizeof(struct AUTOK_REF_INFO));
3274
3275 uCmdEdge = 0;
3276 do {
3277 pBdInfo = (struct AUTOK_SCAN_RES *)&
3278 (uCmdDatInfo.scan_info[uCmdEdge]);
3279 autok_adjust_param(host, CMD_EDGE, &uCmdEdge, AUTOK_WRITE);
3280 RawData64 = 0LL;
3281 for (j = 0; j < 64; j++) {
3282 msdc_autok_adjust_paddly(host, &j, CMD_PAD_RDLY);
3283 for (k = 0; k < AUTOK_CMD_TIMES / 2; k++) {
3284 ret = autok_send_tune_cmd(host,
3285 opcode, TUNE_CMD);
3286 if ((ret & (E_RESULT_CMD_TMO |
3287 E_RESULT_RSP_CRC)) != 0) {
3288 RawData64 |= (u64) (1LL << j);
3289 break;
3290 }
3291 }
3292 }
3293 score = autok_simple_score64(tune_result_str64, RawData64);
3294 //ATK_DBG(ATK_RES, "[AUTOK]CMD %d \t %d \t %s\r\n",
3295 // uCmdEdge, score, tune_result_str64);
3296 if (autok_check_scan_res64(RawData64, pBdInfo) != 0) {
3297 host->autok_error = AUTOK_FAIL;
3298 msdc_dump_all_register(host);
3299 return AUTOK_FAIL;
3300 }
3301 #if 0
3302 ATK_DBG(ATK_RES,
3303 "[AUTOK]Edge:%d \t BoundaryCnt:%d \t FullBoundaryCnt:%d \t\n",
3304 uCmdEdge, pBdInfo->bd_cnt, pBdInfo->fbd_cnt);
3305
3306 for (i = 0; i < BD_MAX_CNT; i++) {
3307 ATK_DBG(ATK_RES,
3308 "[AUTOK]BoundInf[%d]: S:%d \t E:%d \t W:%d \t FullBound:%d\n",
3309 i, pBdInfo->bd_info[i].Bound_Start,
3310 pBdInfo->bd_info[i].Bound_End, pBdInfo->bd_info[i].Bound_width,
3311 pBdInfo->bd_info[i].is_fullbound);
3312 }
3313 #endif
3314
3315 uCmdEdge ^= 0x1;
3316 } while (uCmdEdge);
3317
3318 if (autok_pad_dly_sel(&uCmdDatInfo) == 0) {
3319 autok_param_update(CMD_EDGE, uCmdDatInfo.opt_edge_sel,
3320 p_autok_tune_res);
3321 autok_paddly_update(CMD_PAD_RDLY, uCmdDatInfo.opt_dly_cnt,
3322 p_autok_tune_res);
3323 } else {
3324 ATK_DBG(ATK_RES, "[AUTOK]======Analysis Fail!!=======\n");
3325 host->autok_error = AUTOK_FAIL;
3326 msdc_dump_all_register(host);
3327 return AUTOK_FAIL;
3328 }
3329
3330 /* Step2 : Tuning Data Path */
3331 autok_tuning_parameter_init(host, p_autok_tune_res);
3332 memset(&uCmdDatInfo, 0, sizeof(struct AUTOK_REF_INFO));
3333
3334 uDatEdge = 0;
3335 do {
3336 pBdInfo = (struct AUTOK_SCAN_RES *)&
3337 (uCmdDatInfo.scan_info[uDatEdge]);
3338 autok_adjust_param(host, RD_FIFO_EDGE, &uDatEdge, AUTOK_WRITE);
3339 RawData64 = 0LL;
3340 for (j = 0; j < 64; j++) {
3341 msdc_autok_adjust_paddly(host, &j, DAT_PAD_RDLY);
3342 for (k = 0; k < AUTOK_CMD_TIMES / 2; k++) {
3343 ret = autok_send_tune_cmd(host, opcode,
3344 TUNE_DATA);
3345 if ((ret & (E_RESULT_CMD_TMO |
3346 E_RESULT_RSP_CRC)) != 0) {
3347 ATK_ERR("[AUTOK]Tune read CMD Fail\n");
3348 host->autok_error = -1;
3349 return -1;
3350 } else if ((ret & (E_RESULT_DAT_CRC |
3351 E_RESULT_DAT_TMO)) != 0) {
3352 RawData64 |= (u64) (1LL << j);
3353 break;
3354 }
3355 }
3356 }
3357 score = autok_simple_score64(tune_result_str64, RawData64);
3358 //ATK_DBG(ATK_RES, "[AUTOK]DAT %d \t %d \t %s\r\n",
3359 // uDatEdge, score, tune_result_str64);
3360 if (autok_check_scan_res64(RawData64, pBdInfo) != 0) {
3361 host->autok_error = AUTOK_FAIL;
3362 msdc_dump_all_register(host);
3363 return AUTOK_FAIL;
3364 }
3365 #if 0
3366 ATK_DBG(ATK_RES,
3367 "[AUTOK]Edge:%d \t BoundaryCnt:%d \t FullBoundaryCnt:%d \t\n",
3368 uDatEdge, pBdInfo->bd_cnt, pBdInfo->fbd_cnt);
3369
3370 for (i = 0; i < BD_MAX_CNT; i++) {
3371 ATK_DBG(ATK_RES,
3372 "[AUTOK]BoundInf[%d]: S:%d \t E:%d \t W:%d \t FullBound:%d\r\n",
3373 i, pBdInfo->bd_info[i].Bound_Start,
3374 pBdInfo->bd_info[i].Bound_End, pBdInfo->bd_info[i].Bound_width,
3375 pBdInfo->bd_info[i].is_fullbound);
3376 }
3377 #endif
3378
3379 uDatEdge ^= 0x1;
3380 } while (uDatEdge);
3381
3382 if (autok_pad_dly_sel(&uCmdDatInfo) == 0) {
3383 autok_param_update(RD_FIFO_EDGE, uCmdDatInfo.opt_edge_sel,
3384 p_autok_tune_res);
3385 autok_paddly_update(DAT_PAD_RDLY, uCmdDatInfo.opt_dly_cnt,
3386 p_autok_tune_res);
3387 autok_param_update(WD_FIFO_EDGE, uCmdDatInfo.opt_edge_sel,
3388 p_autok_tune_res);
3389 } else {
3390 ATK_DBG(ATK_RES, "[AUTOK][Error]=====Analysis Fail!!=======\n");
3391 msdc_dump_all_register(host);
3392 host->autok_error = AUTOK_FAIL;
3393 return AUTOK_FAIL;
3394 }
3395
3396 autok_tuning_parameter_init(host, p_autok_tune_res);
3397
3398 /* Step3 : Tuning LATCH CK */
3399 p_autok_tune_res[INT_DAT_LATCH_CK] = autok_execute_tuning_latch_ck(host,
3400 opcode, p_autok_tune_res[INT_DAT_LATCH_CK]);
3401
3402 autok_result_dump(host, p_autok_tune_res);
3403#if AUTOK_PARAM_DUMP_ENABLE
3404 autok_register_dump(host);
3405#endif
3406 if (res != NULL) {
3407 memcpy((void *)res, (void *)p_autok_tune_res,
3408 sizeof(p_autok_tune_res) / sizeof(u8));
3409 }
3410 host->autok_error = 0;
3411
3412 return 0;
3413}
3414
3415static int autok_execute_tuning(struct msdc_host *host, u8 *res)
3416{
3417 int ret = 0;
3418 struct timeval tm_s, tm_e;
3419 unsigned int tm_val = 0;
3420 unsigned int clk_pwdn = 0;
3421 unsigned int int_en = 0;
3422 unsigned int retry_cnt = 3;
3423 void __iomem *base = host->base;
3424
3425 do_gettimeofday(&tm_s);
3426
3427 do {
3428 autok_msdc_reset();
3429 msdc_clear_fifo();
3430 int_en = readl(base + MSDC_INTEN);
3431 writel(0, base + MSDC_INTEN);
3432 sdr_get_field(base + MSDC_CFG, MSDC_CFG_CKPDN, &clk_pwdn);
3433 sdr_set_field(base + MSDC_CFG, MSDC_CFG_CKPDN, 1);
3434 ret = execute_online_tuning(host, res);
3435 if (!ret)
3436 break;
3437 retry_cnt--;
3438 } while (retry_cnt);
3439
3440 autok_msdc_reset();
3441 msdc_clear_fifo();
3442 writel(0xffffffff, base + MSDC_INT);
3443 writel(int_en, base + MSDC_INTEN);
3444 sdr_set_field(base + MSDC_CFG, MSDC_CFG_CKPDN, clk_pwdn);
3445
3446 do_gettimeofday(&tm_e);
3447 tm_val = (tm_e.tv_sec - tm_s.tv_sec) * 1000 +
3448 (tm_e.tv_usec - tm_s.tv_usec) / 1000;
3449 ATK_ERR("[AUTOK]=========Time Cost:%d ms========\n", tm_val);
3450
3451 return ret;
3452}
3453
3454static void msdc_dump_all_register(struct msdc_host *host)
3455{
3456 void __iomem *base = host->base;
3457 int i;
3458 unsigned int left_cnt;
3459 unsigned int byte16_align_cnt;
3460
3461 byte16_align_cnt = MAX_REGISTER_ADDR / 16;
3462 for (i = 0; i < byte16_align_cnt; i++)
3463 pr_info("SDIO reg[%.2x]=0x%.8x reg[%.2x]=0x%.8x reg[%.2x]=0x%.8x reg[%.2x]=0x%.8x\n",
3464 i * 16, readl(base + i * 16),
3465 i * 16 + 4, readl(base + i * 16 + 4),
3466 i * 16 + 8, readl(base + i * 16 + 8),
3467 i * 16 + 12, readl(base + i * 16 + 12));
3468
3469 left_cnt = (MAX_REGISTER_ADDR - byte16_align_cnt * 16) / 4 + 1;
3470 for (i = 0; i < left_cnt; i++)
3471 pr_info("SDIO reg[%.2x]=0x%.8x\n",
3472 byte16_align_cnt * 16 + i * 4,
3473 readl(base + byte16_align_cnt * 16 + i * 4));
3474}
3475
3476static void msdc_dump_register(struct msdc_host *host)
3477{
3478 void __iomem *base = host->base;
3479
3480 pr_info("SDIO MSDC_CFG=0x%.8x\n", readl(base + MSDC_CFG));
3481 pr_info("SDIO MSDC_IOCON=0x%.8x\n", readl(base + MSDC_IOCON));
3482 pr_info("SDIO MSDC_PATCH_BIT0=0x%.8x\n", readl(base + MSDC_PATCH_BIT0));
3483 pr_info("SDIO MSDC_PATCH_BIT1=0x%.8x\n", readl(base + MSDC_PATCH_BIT1));
3484 pr_info("SDIO MSDC_PATCH_BIT2=0x%.8x\n", readl(base + MSDC_PATCH_BIT2));
3485 pr_info("SDIO MSDC_PAD_TUNE0=0x%.8x\n", readl(base + MSDC_PAD_TUNE0));
3486 pr_info("SDIO MSDC_PAD_TUNE1=0x%.8x\n", readl(base + MSDC_PAD_TUNE1));
3487}
3488
3489static int msdc_execute_tuning(struct mmc_host *mmc, u32 opcode)
3490{
3491 struct msdc_host *host = mmc_priv(mmc);
3492
3493 if (host->autok_done) {
3494 autok_init_sdr104(host);
3495 autok_param_apply(host, sdio_autok_res);
3496 } else {
3497 autok_execute_tuning(host, sdio_autok_res);
3498 host->autok_done = true;
3499 }
3500
3501 msdc_dump_register(host);
3502 return 0;
3503}
3504
3505static void msdc_hw_reset(struct mmc_host *mmc)
3506{
3507 struct msdc_host *host = mmc_priv(mmc);
3508
3509 sdr_set_bits(host->base + EMMC_IOCON, 1);
3510 udelay(10); /* 10us is enough */
3511 sdr_clr_bits(host->base + EMMC_IOCON, 1);
3512}
3513
3514/*
3515 * msdc_recheck_sdio_irq - recheck whether the SDIO IRQ is lost
3516 * @host: The host to check.
3517 *
3518 * Host controller may lost interrupt in some special case.
3519 * Add sdio IRQ recheck mechanism to make sure all interrupts
3520 * can be processed immediately
3521 *
3522 */
3523#ifndef SUPPORT_LEGACY_SDIO
3524static void msdc_recheck_sdio_irq(struct msdc_host *host)
3525{
3526 u32 reg_int, reg_ps, reg_inten;
3527
3528 reg_inten = readl(host->base + MSDC_INTEN);
3529 if (host->clock_on && (host->mmc->caps & MMC_CAP_SDIO_IRQ) &&
3530 (reg_inten & MSDC_INTEN_SDIOIRQ) &&
3531 host->irq_thread_alive) {
3532 reg_int = readl(host->base + MSDC_INT);
3533 reg_ps = readl(host->base + MSDC_PS);
3534 if (!((reg_int & MSDC_INT_SDIOIRQ) || (reg_ps & MSDC_PS_DATA1)))
3535 mmc_signal_sdio_irq(host->mmc);
3536 }
3537}
3538#endif
3539
3540static void msdc_enable_sdio_irq(struct mmc_host *mmc, int enable)
3541{
3542 unsigned long flags;
3543 struct msdc_host *host = mmc_priv(mmc);
3544
3545 host->irq_thread_alive = true;
3546
3547#ifdef SUPPORT_LEGACY_SDIO
3548 if (host->cap_eirq) {
3549 if (enable)
3550 host->enable_sdio_eirq(); /* combo_sdio_enable_eirq */
3551 else
3552 host->disable_sdio_eirq(); /* combo_sdio_disable_eirq */
3553 }
3554 return;
3555#endif
3556
3557 if (enable) {
3558 pm_runtime_get_sync(host->dev);
3559
3560 spin_lock_irqsave(&host->irqlock, flags);
3561 sdr_set_bits(host->base + SDC_CFG, SDC_CFG_SDIOIDE);
3562 sdr_set_bits(host->base + MSDC_INTEN, MSDC_INTEN_SDIOIRQ);
3563 spin_unlock_irqrestore(&host->irqlock, flags);
3564 pm_runtime_mark_last_busy(host->dev);
3565 pm_runtime_put_autosuspend(host->dev);
3566 } else {
3567 spin_lock_irqsave(&host->irqlock, flags);
3568 sdr_clr_bits(host->base + MSDC_INTEN, MSDC_INTEN_SDIOIRQ);
3569 /*
3570 * if no msdc_recheck_sdio_irq(), then
3571 * no race condition of disable_irq
3572 * twice and only enable_irq once time.
3573 */
3574 if (likely(host->sdio_irq_cnt > 0)) {
3575 disable_irq_nosync(host->eint_irq);
3576 host->sdio_irq_cnt--;
3577 if (mmc->card && (mmc->card->cccr.eai == 0))
3578 pm_runtime_put_noidle(host->dev);
3579 }
3580 spin_unlock_irqrestore(&host->irqlock, flags);
3581 }
3582}
3583
3584static struct mmc_host_ops mt_msdc_ops = {
3585 .post_req = msdc_post_req,
3586 .pre_req = msdc_pre_req,
3587 .request = msdc_ops_request,
3588 .set_ios = msdc_ops_set_ios,
3589 .get_ro = mmc_gpio_get_ro,
3590 .start_signal_voltage_switch = msdc_ops_switch_volt,
3591 .card_busy = msdc_card_busy,
3592 .execute_tuning = msdc_execute_tuning,
3593 .hw_reset = msdc_hw_reset,
3594 .enable_sdio_irq = msdc_enable_sdio_irq,
3595};
3596
3597#ifndef SUPPORT_LEGACY_SDIO
3598static irqreturn_t sdio_eint_irq(int irq, void *dev_id)
3599{
3600 struct msdc_host *host = (struct msdc_host *)dev_id;
3601
3602 mmc_signal_sdio_irq(host->mmc);
3603
3604 return IRQ_HANDLED;
3605}
3606
3607static int request_dat1_eint_irq(struct msdc_host *host)
3608{
3609 struct gpio_desc *desc;
3610 int ret = 0;
3611 int irq;
3612
3613 desc = devm_gpiod_get_index(host->dev, "eint", 0, GPIOD_IN);
3614 if (IS_ERR(desc))
3615 return PTR_ERR(desc);
3616
3617 irq = gpiod_to_irq(desc);
3618 if (irq >= 0) {
3619 irq_set_status_flags(irq, IRQ_NOAUTOEN);
3620 ret = devm_request_threaded_irq(host->dev, irq,
3621 NULL, sdio_eint_irq,
3622 IRQF_TRIGGER_LOW | IRQF_ONESHOT,
3623 "sdio-eint", host);
3624 } else {
3625 ret = irq;
3626 }
3627
3628 host->eint_irq = irq;
3629 return ret;
3630}
3631
3632#else
3633/* For backward compatible, remove later */
3634int wait_sdio_autok_ready(void *data)
3635{
3636 return 0;
3637}
3638EXPORT_SYMBOL(wait_sdio_autok_ready);
3639
3640static void register_legacy_sdio_apis(struct msdc_host *host)
3641{
3642 host->request_sdio_eirq = mt_sdio_ops[SDIO_USE_PORT].sdio_request_eirq;
3643 host->enable_sdio_eirq = mt_sdio_ops[SDIO_USE_PORT].sdio_enable_eirq;
3644 host->disable_sdio_eirq = mt_sdio_ops[SDIO_USE_PORT].sdio_disable_eirq;
3645 host->register_pm = mt_sdio_ops[SDIO_USE_PORT].sdio_register_pm;
3646}
3647
3648static void msdc_eirq_sdio(void *data)
3649{
3650 struct msdc_host *host = (struct msdc_host *)data;
3651
3652 mmc_signal_sdio_irq(host->mmc);
3653}
3654
3655static void msdc_pm(pm_message_t state, void *data)
3656{
3657 struct msdc_host *host = (struct msdc_host *)data;
3658
3659 int evt = state.event;
3660
3661 if ((evt == PM_EVENT_SUSPEND) || (evt == PM_EVENT_USER_SUSPEND)) {
3662 if (host->suspend != 0)
3663 return;
3664
3665 pr_info("msdc%d -> %s Suspend\n", SDIO_USE_PORT,
3666 evt == PM_EVENT_SUSPEND ? "PM" : "USR");
3667 host->suspend = 1;
3668 host->mmc->pm_flags |= MMC_PM_IGNORE_PM_NOTIFY;
3669 mmc_remove_host(host->mmc);
3670 }
3671
3672 if ((evt == PM_EVENT_RESUME) || (evt == PM_EVENT_USER_RESUME)) {
3673 if (host->suspend == 0)
3674 return;
3675
3676 pr_info("msdc%d -> %s Resume\n", SDIO_USE_PORT,
3677 evt == PM_EVENT_RESUME ? "PM" : "USR");
3678 host->suspend = 0;
3679 host->mmc->pm_flags |= MMC_PM_IGNORE_PM_NOTIFY;
3680 host->mmc->pm_flags |= MMC_PM_KEEP_POWER;
3681 host->mmc->rescan_entered = 0;
3682 mmc_add_host(host->mmc);
3683 }
3684}
3685#endif
3686
3687void sdio_set_card_clkpd(int on)
3688{
3689 if (!on)
3690 sdr_clr_bits(sdio_host->base + MSDC_CFG,
3691 MSDC_CFG_CKPDN);
3692 else
3693 sdr_set_bits(sdio_host->base + MSDC_CFG,
3694 MSDC_CFG_CKPDN);
3695}
3696EXPORT_SYMBOL(sdio_set_card_clkpd);
3697
3698static const struct mt81xx_sdio_compatible mt8183_compat = {
3699 .v3_plus = true,
3700 .top_reg = true,
3701};
3702
3703static const struct mt81xx_sdio_compatible mt8167_compat = {
3704 .v3_plus = false,
3705 .top_reg = false,
3706};
3707
3708static const struct mt81xx_sdio_compatible mt2712_compat = {
3709 .v3_plus = false,
3710 .top_reg = false,
3711};
3712
3713static const struct mt81xx_sdio_compatible mt2735_compat = {
3714 .v3_plus = false,
3715 .top_reg = true,
3716};
3717
3718static const struct mt81xx_sdio_compatible mt8695_compat = {
3719 .v3_plus = true,
3720 .top_reg = false,
3721};
3722
3723static const struct of_device_id msdc_of_ids[] = {
3724 { .compatible = "mediatek,mt8183-sdio", .data = &mt8183_compat},
3725 { .compatible = "mediatek,mt8167-sdio", .data = &mt8167_compat},
3726 { .compatible = "mediatek,mt2712-sdio", .data = &mt2712_compat},
3727 { .compatible = "mediatek,mt2735-sdio", .data = &mt2735_compat},
3728 { .compatible = "mediatek,mt8695-sdio", .data = &mt8695_compat},
3729 {}
3730};
3731
3732static int msdc_drv_probe(struct platform_device *pdev)
3733{
3734 struct mmc_host *mmc;
3735 struct msdc_host *host;
3736 struct resource *res;
3737 struct resource *res_top;
3738 const struct of_device_id *of_id;
3739 int ret;
3740 u32 val;
3741
3742 if (!pdev->dev.of_node) {
3743 dev_info(&pdev->dev, "No DT found\n");
3744 return -EINVAL;
3745 }
3746
3747 of_id = of_match_node(msdc_of_ids, pdev->dev.of_node);
3748 if (!of_id)
3749 return -EINVAL;
3750 /* Allocate MMC host for this device */
3751 mmc = mmc_alloc_host(sizeof(struct msdc_host), &pdev->dev);
3752 if (!mmc)
3753 return -ENOMEM;
3754
3755 host = mmc_priv(mmc);
3756 ret = mmc_of_parse(mmc);
3757 if (ret)
3758 goto host_free;
3759
3760 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
3761 host->base = devm_ioremap_resource(&pdev->dev, res);
3762 if (IS_ERR(host->base)) {
3763 ret = PTR_ERR(host->base);
3764 goto host_free;
3765 }
3766
3767 host->dev_comp = of_id->data;
3768 if (host->dev_comp->top_reg) {
3769 res_top = platform_get_resource(pdev, IORESOURCE_MEM, 1);
3770 host->base_top = devm_ioremap_resource(&pdev->dev, res_top);
3771 if (IS_ERR(host->base_top)) {
3772 ret = PTR_ERR(host->base_top);
3773 goto host_free;
3774 }
3775 } else {
3776 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
3777 host->infra_reset = devm_ioremap_resource(&pdev->dev, res);
3778 if (IS_ERR(host->infra_reset)) {
3779 ret = PTR_ERR(host->infra_reset);
3780 goto host_free;
3781 }
3782
3783 if (!of_property_read_u32(pdev->dev.of_node,
3784 "module_reset_bit", &host->module_reset_bit))
3785 dev_dbg(&pdev->dev, "module_reset_bit: %x\n",
3786 host->module_reset_bit);
3787 }
3788
3789 ret = mmc_regulator_get_supply(mmc);
3790 if (ret == -EPROBE_DEFER)
3791 goto host_free;
3792
3793 host->src_clk = devm_clk_get(&pdev->dev, "source");
3794 if (IS_ERR(host->src_clk)) {
3795 ret = PTR_ERR(host->src_clk);
3796 goto host_free;
3797 }
3798
3799 host->h_clk = devm_clk_get(&pdev->dev, "hclk");
3800 if (IS_ERR(host->h_clk)) {
3801 ret = PTR_ERR(host->h_clk);
3802 goto host_free;
3803 }
3804
3805 host->src_clk_cg = devm_clk_get(&pdev->dev, "source_cg");
3806 if (IS_ERR(host->src_clk_cg))
3807 host->src_clk_cg = NULL;
3808
3809 host->irq = platform_get_irq(pdev, 0);
3810 if (host->irq < 0) {
3811 ret = -EINVAL;
3812 goto host_free;
3813 }
3814
3815 host->pinctrl = devm_pinctrl_get(&pdev->dev);
3816 if (IS_ERR(host->pinctrl)) {
3817 ret = PTR_ERR(host->pinctrl);
3818 dev_info(&pdev->dev, "Cannot find pinctrl!\n");
3819 goto host_free;
3820 }
3821
3822 host->pins_default = pinctrl_lookup_state(host->pinctrl, "default");
3823 if (IS_ERR(host->pins_default)) {
3824 ret = PTR_ERR(host->pins_default);
3825 dev_info(&pdev->dev, "Cannot find pinctrl default!\n");
3826 goto host_free;
3827 }
3828
3829 host->pins_uhs = pinctrl_lookup_state(host->pinctrl, "state_uhs");
3830 if (IS_ERR(host->pins_uhs)) {
3831 ret = PTR_ERR(host->pins_uhs);
3832 dev_info(&pdev->dev, "Cannot ... find pinctrl uhs!\n");
3833 goto host_free;
3834 }
3835 pinctrl_select_state(host->pinctrl, host->pins_uhs);
3836
3837 host->pins_dat1 = pinctrl_lookup_state(host->pinctrl, "state_dat1");
3838 if (IS_ERR(host->pins_dat1)) {
3839 ret = PTR_ERR(host->pins_dat1);
3840 dev_info(&pdev->dev, "Cannot find pinctrl dat1!\n");
3841 goto host_free;
3842 }
3843
3844 host->pins_dat1_eint = pinctrl_lookup_state(host->pinctrl,
3845 "state_eint");
3846 if (IS_ERR(host->pins_dat1_eint)) {
3847 ret = PTR_ERR(host->pins_dat1_eint);
3848 dev_info(&pdev->dev, "Cannot find pinctrl dat1 eint!\n");
3849 goto host_free;
3850 }
3851
3852 if (!of_property_read_u32(pdev->dev.of_node,
3853 "hs400-ds-delay", &host->hs400_ds_delay))
3854 dev_dbg(&pdev->dev, "hs400-ds-delay: %x\n",
3855 host->hs400_ds_delay);
3856
3857#ifdef SUPPORT_LEGACY_SDIO
3858 if (of_property_read_bool(pdev->dev.of_node, "cap-sdio-irq"))
3859 host->cap_eirq = false;
3860 else
3861 host->cap_eirq = true;
3862#endif
3863
3864 host->dev = &pdev->dev;
3865 host->mmc = mmc;
3866 host->src_clk_freq = clk_get_rate(host->src_clk);
3867 if (host->src_clk_freq > 200000000)
3868 host->src_clk_freq = 200000000;
3869 /* Set host parameters to mmc */
3870#ifdef SUPPORT_LEGACY_SDIO
3871 if (host->cap_eirq)
3872 mmc->caps |= MMC_CAP_SDIO_IRQ;
3873#endif
3874 mmc->ops = &mt_msdc_ops;
3875 mmc->f_min = host->src_clk_freq / (4 * 255);
3876 mmc->ocr_avail = MMC_VDD_28_29 | MMC_VDD_29_30 | MMC_VDD_30_31 |
3877 MMC_VDD_31_32 | MMC_VDD_32_33;
3878
3879 mmc->caps |= MMC_CAP_ERASE | MMC_CAP_CMD23;
3880 /* MMC core transfer sizes tunable parameters */
3881 mmc->max_segs = MAX_BD_NUM;
3882 mmc->max_seg_size = BDMA_DESC_BUFLEN;
3883 mmc->max_blk_size = 2048;
3884 mmc->max_req_size = 512 * 1024;
3885 mmc->max_blk_count = mmc->max_req_size / 512;
3886 host->dma_mask = DMA_BIT_MASK(32);
3887 mmc_dev(mmc)->dma_mask = &host->dma_mask;
3888
3889 host->timeout_clks = 3 * 1048576;
3890 host->irq_thread_alive = false;
3891 host->dma.gpd = dma_alloc_coherent(&pdev->dev,
3892 2 * sizeof(struct mt_gpdma_desc),
3893 &host->dma.gpd_addr, GFP_KERNEL);
3894 host->dma.bd = dma_alloc_coherent(&pdev->dev,
3895 MAX_BD_NUM * sizeof(struct mt_bdma_desc),
3896 &host->dma.bd_addr, GFP_KERNEL);
3897 if (!host->dma.gpd || !host->dma.bd) {
3898 ret = -ENOMEM;
3899 goto release_mem;
3900 }
3901 msdc_init_gpd_bd(host, &host->dma);
3902 INIT_DELAYED_WORK(&host->req_timeout, msdc_request_timeout);
3903 spin_lock_init(&host->lock);
3904 spin_lock_init(&host->irqlock);
3905
3906 platform_set_drvdata(pdev, mmc);
3907 msdc_ungate_clock(host);
3908
3909 if (!host->dev_comp->top_reg) {
3910 /* just test module reset func */
3911 sdr_clr_bits(host->base + MSDC_CFG, MSDC_CFG_MODE);
3912 /* do MSDC module reset */
3913 val = readl(host->infra_reset);
3914 pr_debug("init 0x10001030: 0x%x, MSDC_CFG: 0x%x\n",
3915 val, readl(host->base + MSDC_CFG));
3916 writel(0x1 << host->module_reset_bit, host->infra_reset);
3917 val = readl(host->infra_reset);
3918 udelay(1);
3919 pr_debug("msdc module resetting 0x10001030: 0x%x\n", val);
3920 writel(0x1 << host->module_reset_bit, host->infra_reset + 0x04);
3921 udelay(1);
3922 val = readl(host->infra_reset);
3923 pr_info("msdc module reset done 0x10001030: 0x%x, MSDC_CFG: 0x%x\n",
3924 val, readl(host->base + MSDC_CFG));
3925 }
3926
3927 msdc_init_hw(host);
3928
3929 ret = devm_request_irq(&pdev->dev, host->irq, msdc_irq,
3930 IRQF_TRIGGER_LOW | IRQF_ONESHOT, pdev->name, host);
3931 if (ret)
3932 goto release;
3933
3934#ifndef SUPPORT_LEGACY_SDIO
3935 ret = request_dat1_eint_irq(host);
3936 if (ret) {
3937 dev_info(host->dev, "failed to register data1 eint irq!\n");
3938 goto release;
3939 }
3940
3941 pinctrl_select_state(host->pinctrl, host->pins_dat1);
3942#else
3943 host->suspend = 0;
3944
3945 register_legacy_sdio_apis(host);
3946 if (host->request_sdio_eirq)
3947 host->request_sdio_eirq(msdc_eirq_sdio, (void *)host);
3948 if (host->register_pm) {
3949 host->register_pm(msdc_pm, (void *)host);
3950
3951 /* pm not controlled by system but by client. */
3952 mmc->pm_flags |= MMC_PM_IGNORE_PM_NOTIFY;
3953 }
3954#endif
3955
3956 pm_runtime_set_active(host->dev);
3957 pm_runtime_set_autosuspend_delay(host->dev, MTK_MMC_AUTOSUSPEND_DELAY);
3958 pm_runtime_use_autosuspend(host->dev);
3959 pm_runtime_enable(host->dev);
3960
3961 if (!host->dev_comp->top_reg)
3962 mmc->caps2 |= MMC_CAP2_NO_PRESCAN_POWERUP;
3963 host->mmc->caps |= MMC_CAP_NONREMOVABLE;
3964 host->mmc->pm_caps |= MMC_PM_KEEP_POWER;
3965 host->mmc->pm_flags |= MMC_PM_KEEP_POWER;
3966
3967 ret = mmc_add_host(mmc);
3968 pr_info("%s: add new sdio_host %s, index=%d, ret=%d\n", __func__,
3969 mmc_hostname(host->mmc), mmc->index, ret);
3970
3971 sdio_host = host;
3972 if (ret)
3973 goto end;
3974
3975 return 0;
3976end:
3977 pm_runtime_disable(host->dev);
3978release:
3979 platform_set_drvdata(pdev, NULL);
3980 msdc_deinit_hw(host);
3981 msdc_gate_clock(host);
3982release_mem:
3983 if (host->dma.gpd)
3984 dma_free_coherent(&pdev->dev,
3985 2 * sizeof(struct mt_gpdma_desc),
3986 host->dma.gpd, host->dma.gpd_addr);
3987 if (host->dma.bd)
3988 dma_free_coherent(&pdev->dev,
3989 MAX_BD_NUM * sizeof(struct mt_bdma_desc),
3990 host->dma.bd, host->dma.bd_addr);
3991host_free:
3992 mmc_free_host(mmc);
3993
3994 return ret;
3995}
3996
3997static int msdc_drv_remove(struct platform_device *pdev)
3998{
3999 struct mmc_host *mmc;
4000 struct msdc_host *host;
4001
4002 mmc = platform_get_drvdata(pdev);
4003 host = mmc_priv(mmc);
4004
4005 pm_runtime_get_sync(host->dev);
4006
4007 platform_set_drvdata(pdev, NULL);
4008 mmc_remove_host(host->mmc);
4009 msdc_deinit_hw(host);
4010 msdc_gate_clock(host);
4011
4012 if (host->mmc->caps & MMC_CAP_SDIO_IRQ)
4013 pm_runtime_put_sync(host->dev);
4014
4015 pm_runtime_disable(host->dev);
4016 pm_runtime_put_noidle(host->dev);
4017 dma_free_coherent(&pdev->dev,
4018 sizeof(struct mt_gpdma_desc),
4019 host->dma.gpd, host->dma.gpd_addr);
4020 dma_free_coherent(&pdev->dev, MAX_BD_NUM * sizeof(struct mt_bdma_desc),
4021 host->dma.bd, host->dma.bd_addr);
4022
4023 mmc_free_host(host->mmc);
4024
4025 return 0;
4026}
4027
4028#ifdef CONFIG_PM
4029static void msdc_save_reg(struct msdc_host *host)
4030{
4031 host->save_para.msdc_cfg = readl(host->base + MSDC_CFG);
4032 host->save_para.iocon = readl(host->base + MSDC_IOCON);
4033 host->save_para.sdc_cfg = readl(host->base + SDC_CFG);
4034 host->save_para.pad_tune0 = readl(host->base + MSDC_PAD_TUNE0);
4035 host->save_para.pad_tune1 = readl(host->base + MSDC_PAD_TUNE1);
4036 host->save_para.patch_bit0 = readl(host->base + MSDC_PATCH_BIT0);
4037 host->save_para.patch_bit1 = readl(host->base + MSDC_PATCH_BIT1);
4038 host->save_para.patch_bit2 = readl(host->base + MSDC_PATCH_BIT2);
4039 host->save_para.pad_ds_tune = readl(host->base + EMMC50_PAD_DS_TUNE);
4040 host->save_para.emmc50_cfg0 = readl(host->base + EMMC50_CFG0);
4041 host->save_para.msdc_inten = readl(host->base + MSDC_INTEN);
4042}
4043
4044static void msdc_restore_reg(struct msdc_host *host)
4045{
4046 writel(host->save_para.msdc_cfg, host->base + MSDC_CFG);
4047 writel(host->save_para.iocon, host->base + MSDC_IOCON);
4048 writel(host->save_para.sdc_cfg, host->base + SDC_CFG);
4049 writel(host->save_para.pad_tune0, host->base + MSDC_PAD_TUNE0);
4050 writel(host->save_para.pad_tune1, host->base + MSDC_PAD_TUNE1);
4051 writel(host->save_para.patch_bit0, host->base + MSDC_PATCH_BIT0);
4052 writel(host->save_para.patch_bit1, host->base + MSDC_PATCH_BIT1);
4053 writel(host->save_para.patch_bit2, host->base + MSDC_PATCH_BIT2);
4054 writel(host->save_para.pad_ds_tune, host->base + EMMC50_PAD_DS_TUNE);
4055 writel(host->save_para.emmc50_cfg0, host->base + EMMC50_CFG0);
4056 writel(host->save_para.msdc_inten, host->base + MSDC_INTEN);
4057}
4058
4059static int msdc_runtime_suspend(struct device *dev)
4060{
4061 struct mmc_host *mmc = dev_get_drvdata(dev);
4062 struct msdc_host *host = mmc_priv(mmc);
4063
4064#ifdef SUPPORT_LEGACY_SDIO
4065 msdc_save_reg(host);
4066 msdc_gate_clock(host);
4067 return 0;
4068#else
4069 unsigned long flags;
4070
4071 msdc_save_reg(host);
4072 disable_irq(host->irq);
4073 msdc_gate_clock(host);
4074 pinctrl_select_state(host->pinctrl, host->pins_dat1_eint);
4075 spin_lock_irqsave(&host->irqlock, flags);
4076 if (host->sdio_irq_cnt == 0) {
4077 enable_irq(host->eint_irq);
4078 enable_irq_wake(host->eint_irq);
4079 host->sdio_irq_cnt++;
4080 /*
4081 * if SDIO card do not support async irq,
4082 * make clk always on.
4083 */
4084 if (mmc->card && (mmc->card->cccr.eai == 0))
4085 pm_runtime_get_noresume(host->dev);
4086 }
4087 spin_unlock_irqrestore(&host->irqlock, flags);
4088 return 0;
4089#endif
4090}
4091
4092static int msdc_runtime_resume(struct device *dev)
4093{
4094 struct mmc_host *mmc = dev_get_drvdata(dev);
4095 struct msdc_host *host = mmc_priv(mmc);
4096
4097#ifdef SUPPORT_LEGACY_SDIO
4098 msdc_ungate_clock(host);
4099 msdc_restore_reg(host);
4100 return 0;
4101#else
4102 unsigned long flags;
4103
4104 spin_lock_irqsave(&host->irqlock, flags);
4105 if (host->sdio_irq_cnt > 0) {
4106 disable_irq_nosync(host->eint_irq);
4107 disable_irq_wake(host->eint_irq);
4108 host->sdio_irq_cnt--;
4109 if (mmc->card && (mmc->card->cccr.eai == 0))
4110 pm_runtime_put_noidle(host->dev);
4111 }
4112 spin_unlock_irqrestore(&host->irqlock, flags);
4113 pinctrl_select_state(host->pinctrl, host->pins_dat1);
4114 msdc_ungate_clock(host);
4115 msdc_restore_reg(host);
4116 enable_irq(host->irq);
4117 return 0;
4118#endif
4119}
4120#endif
4121
4122static const struct dev_pm_ops msdc_dev_pm_ops = {
4123 SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
4124 pm_runtime_force_resume)
4125 SET_RUNTIME_PM_OPS(msdc_runtime_suspend, msdc_runtime_resume, NULL)
4126};
4127
4128
4129static struct platform_driver mt_sdio_driver = {
4130 .probe = msdc_drv_probe,
4131 .remove = msdc_drv_remove,
4132 .driver = {
4133 .name = "mtk-sdio",
4134 .of_match_table = msdc_of_ids,
4135 .pm = &msdc_dev_pm_ops,
4136 },
4137};
4138
4139module_platform_driver(mt_sdio_driver);
4140MODULE_LICENSE("GPL v2");
4141MODULE_DESCRIPTION("MediaTek SDIO Driver");
4142