blob: cfa4612bfba2655b0ae8276d77fc256e6feec4f2 [file] [log] [blame]
xjb04a4022021-11-25 15:01:52 +08001// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (c) 2015 MediaTek Inc.
4 * Author: Leilk Liu <leilk.liu@mediatek.com>
5 */
6
7#include <linux/clk.h>
8#include <linux/device.h>
9#include <linux/err.h>
10#include <linux/interrupt.h>
11#include <linux/io.h>
12#include <linux/ioport.h>
13#include <linux/module.h>
14#include <linux/of.h>
15#include <linux/of_gpio.h>
16#include <linux/platform_device.h>
17#include <linux/platform_data/spi-mt65xx.h>
18#include <linux/pm_runtime.h>
19#include <linux/spi/spi.h>
20#include <linux/dma-mapping.h>
21
22#define SPI_CFG0_REG 0x0000
23#define SPI_CFG1_REG 0x0004
24#define SPI_TX_SRC_REG 0x0008
25#define SPI_RX_DST_REG 0x000c
26#define SPI_TX_DATA_REG 0x0010
27#define SPI_RX_DATA_REG 0x0014
28#define SPI_CMD_REG 0x0018
29#define SPI_STATUS0_REG 0x001c
30#define SPI_PAD_SEL_REG 0x0024
31#define SPI_CFG2_REG 0x0028
32#define SPI_TX_SRC_REG_64 0x002c
33#define SPI_RX_DST_REG_64 0x0030
34
35#define SPI_CFG0_SCK_HIGH_OFFSET 0
36#define SPI_CFG0_SCK_LOW_OFFSET 8
37#define SPI_CFG0_CS_HOLD_OFFSET 16
38#define SPI_CFG0_CS_SETUP_OFFSET 24
39#define SPI_ADJUST_CFG0_SCK_LOW_OFFSET 16
40#define SPI_ADJUST_CFG0_CS_HOLD_OFFSET 0
41#define SPI_ADJUST_CFG0_CS_SETUP_OFFSET 16
42
43#define SPI_CFG1_CS_IDLE_OFFSET 0
44#define SPI_CFG1_PACKET_LOOP_OFFSET 8
45#define SPI_CFG1_PACKET_LENGTH_OFFSET 16
46#define SPI_CFG1_GET_TICK_DLY_OFFSET 30
47
48#define SPI_CFG1_CS_IDLE_MASK 0xff
49#define SPI_CFG1_PACKET_LOOP_MASK 0xff00
50#define SPI_CFG1_PACKET_LENGTH_MASK 0x3ff0000
51
52#define SPI_CMD_ACT BIT(0)
53#define SPI_CMD_RESUME BIT(1)
54#define SPI_CMD_RST BIT(2)
55#define SPI_CMD_PAUSE_EN BIT(4)
56#define SPI_CMD_DEASSERT BIT(5)
57#define SPI_CMD_SAMPLE_SEL BIT(6)
58#define SPI_CMD_CS_POL BIT(7)
59#define SPI_CMD_CPHA BIT(8)
60#define SPI_CMD_CPOL BIT(9)
61#define SPI_CMD_RX_DMA BIT(10)
62#define SPI_CMD_TX_DMA BIT(11)
63#define SPI_CMD_TXMSBF BIT(12)
64#define SPI_CMD_RXMSBF BIT(13)
65#define SPI_CMD_RX_ENDIAN BIT(14)
66#define SPI_CMD_TX_ENDIAN BIT(15)
67#define SPI_CMD_FINISH_IE BIT(16)
68#define SPI_CMD_PAUSE_IE BIT(17)
69
70#define MT8173_SPI_MAX_PAD_SEL 3
71
72#define MTK_SPI_PAUSE_INT_STATUS 0x2
73
74#define MTK_SPI_IDLE 0
75#define MTK_SPI_PAUSED 1
76
77#define MTK_SPI_MAX_FIFO_SIZE 32U
78#define MTK_SPI_PACKET_SIZE 1024
79#define MTK_SPI_32BITS_MASK (0xffffffff)
80
81#define DMA_ADDR_EXT_BITS (36)
82#define DMA_ADDR_DEF_BITS (32)
83
84struct mtk_spi_compatible {
85 bool need_pad_sel;
86 /* Must explicitly send dummy Tx bytes to do Rx only transfer */
87 bool must_tx;
88 /* some IC design adjust cfg register to enhance time accuracy */
89 bool enhance_timing;
90 /* some IC support DMA addr extension */
91 bool dma_ext;
92};
93
94struct mtk_spi {
95 void __iomem *base;
96 u32 state;
97 int pad_num;
98 u32 *pad_sel;
99 struct clk *parent_clk, *sel_clk, *spi_clk, *spare_clk;
100 struct spi_transfer *cur_transfer;
101 u32 xfer_len;
102 u32 num_xfered;
103 struct scatterlist *tx_sgl, *rx_sgl;
104 u32 tx_sgl_len, rx_sgl_len;
105 const struct mtk_spi_compatible *dev_comp;
106};
107
108static const struct mtk_spi_compatible mtk_common_compat;
109
110static const struct mtk_spi_compatible mt2712_compat = {
111 .must_tx = true,
112};
113
114static const struct mtk_spi_compatible mt6765_compat = {
115 .need_pad_sel = true,
116 .must_tx = true,
117 .enhance_timing = true,
118 .dma_ext = true,
119};
120
121static const struct mtk_spi_compatible mt7622_compat = {
122 .must_tx = true,
123 .enhance_timing = true,
124};
125
126static const struct mtk_spi_compatible mt8173_compat = {
127 .need_pad_sel = true,
128 .must_tx = true,
129};
130
131static const struct mtk_spi_compatible mt8183_compat = {
132 .need_pad_sel = true,
133 .must_tx = true,
134 .enhance_timing = true,
135};
136
137/*
138 * A piece of default chip info unless the platform
139 * supplies it.
140 */
141static const struct mtk_chip_config mtk_default_chip_info = {
142 .cs_pol = 0,
143 .sample_sel = 0,
144};
145
146static const struct of_device_id mtk_spi_of_match[] = {
147 { .compatible = "mediatek,mt2701-spi",
148 .data = (void *)&mtk_common_compat,
149 },
150 { .compatible = "mediatek,mt2712-spi",
151 .data = (void *)&mt2712_compat,
152 },
153 { .compatible = "mediatek,mt6589-spi",
154 .data = (void *)&mtk_common_compat,
155 },
156 { .compatible = "mediatek,mt6765-spi",
157 .data = (void *)&mt6765_compat,
158 },
159 { .compatible = "mediatek,mt7622-spi",
160 .data = (void *)&mt7622_compat,
161 },
162 { .compatible = "mediatek,mt7629-spi",
163 .data = (void *)&mt7622_compat,
164 },
165 { .compatible = "mediatek,mt8135-spi",
166 .data = (void *)&mtk_common_compat,
167 },
168 { .compatible = "mediatek,mt8173-spi",
169 .data = (void *)&mt8173_compat,
170 },
171 { .compatible = "mediatek,mt8183-spi",
172 .data = (void *)&mt8183_compat,
173 },
174 {}
175};
176MODULE_DEVICE_TABLE(of, mtk_spi_of_match);
177
178static void mtk_spi_reset(struct mtk_spi *mdata)
179{
180 u32 reg_val;
181
182 /* set the software reset bit in SPI_CMD_REG. */
183 reg_val = readl(mdata->base + SPI_CMD_REG);
184 reg_val |= SPI_CMD_RST;
185 writel(reg_val, mdata->base + SPI_CMD_REG);
186
187 reg_val = readl(mdata->base + SPI_CMD_REG);
188 reg_val &= ~SPI_CMD_RST;
189 writel(reg_val, mdata->base + SPI_CMD_REG);
190}
191
192static int mtk_spi_prepare_message(struct spi_master *master,
193 struct spi_message *msg)
194{
195 u16 cpha, cpol;
196 u32 reg_val;
197 struct spi_device *spi = msg->spi;
198 struct mtk_chip_config *chip_config = spi->controller_data;
199 struct mtk_spi *mdata = spi_master_get_devdata(master);
200
201 cpha = spi->mode & SPI_CPHA ? 1 : 0;
202 cpol = spi->mode & SPI_CPOL ? 1 : 0;
203
204 reg_val = readl(mdata->base + SPI_CMD_REG);
205 if (cpha)
206 reg_val |= SPI_CMD_CPHA;
207 else
208 reg_val &= ~SPI_CMD_CPHA;
209 if (cpol)
210 reg_val |= SPI_CMD_CPOL;
211 else
212 reg_val &= ~SPI_CMD_CPOL;
213
214 /* set the mlsbx and mlsbtx */
215 if (spi->mode & SPI_LSB_FIRST) {
216 reg_val &= ~SPI_CMD_TXMSBF;
217 reg_val &= ~SPI_CMD_RXMSBF;
218 } else {
219 reg_val |= SPI_CMD_TXMSBF;
220 reg_val |= SPI_CMD_RXMSBF;
221 }
222
223 /* set the tx/rx endian */
224#ifdef __LITTLE_ENDIAN
225 reg_val &= ~SPI_CMD_TX_ENDIAN;
226 reg_val &= ~SPI_CMD_RX_ENDIAN;
227#else
228 reg_val |= SPI_CMD_TX_ENDIAN;
229 reg_val |= SPI_CMD_RX_ENDIAN;
230#endif
231
232 if (mdata->dev_comp->enhance_timing) {
233 if (chip_config->cs_pol)
234 reg_val |= SPI_CMD_CS_POL;
235 else
236 reg_val &= ~SPI_CMD_CS_POL;
237 if (chip_config->sample_sel)
238 reg_val |= SPI_CMD_SAMPLE_SEL;
239 else
240 reg_val &= ~SPI_CMD_SAMPLE_SEL;
241 }
242
243 /* set finish and pause interrupt always enable */
244 reg_val |= SPI_CMD_FINISH_IE | SPI_CMD_PAUSE_IE;
245
246 /* disable dma mode */
247 reg_val &= ~(SPI_CMD_TX_DMA | SPI_CMD_RX_DMA);
248
249 /* disable deassert mode */
250 reg_val &= ~SPI_CMD_DEASSERT;
251
252 writel(reg_val, mdata->base + SPI_CMD_REG);
253
254 /* pad select */
255 if (mdata->dev_comp->need_pad_sel)
256 writel(mdata->pad_sel[spi->chip_select],
257 mdata->base + SPI_PAD_SEL_REG);
258
259 return 0;
260}
261
262static void mtk_spi_set_cs(struct spi_device *spi, bool enable)
263{
264 u32 reg_val;
265 struct mtk_spi *mdata = spi_master_get_devdata(spi->master);
266
267 reg_val = readl(mdata->base + SPI_CMD_REG);
268 if (!enable) {
269 reg_val |= SPI_CMD_PAUSE_EN;
270 writel(reg_val, mdata->base + SPI_CMD_REG);
271 } else {
272 reg_val &= ~SPI_CMD_PAUSE_EN;
273 writel(reg_val, mdata->base + SPI_CMD_REG);
274 mdata->state = MTK_SPI_IDLE;
275 mtk_spi_reset(mdata);
276 }
277}
278
279static void mtk_spi_prepare_transfer(struct spi_master *master,
280 struct spi_transfer *xfer)
281{
282 u32 spi_clk_hz, div, sck_time, cs_time, reg_val = 0;
283 struct mtk_spi *mdata = spi_master_get_devdata(master);
284
285 spi_clk_hz = clk_get_rate(mdata->spi_clk);
286
287 if (xfer->speed_hz < spi_clk_hz / 2)
288 div = DIV_ROUND_UP(spi_clk_hz, xfer->speed_hz);
289 else
290 div = 1;
291
292 sck_time = (div + 1) / 2;
293 cs_time = sck_time * 2;
294
295 if (mdata->dev_comp->enhance_timing) {
296 reg_val |= (((sck_time - 1) & 0xffff)
297 << SPI_CFG0_SCK_HIGH_OFFSET);
298 reg_val |= (((sck_time - 1) & 0xffff)
299 << SPI_ADJUST_CFG0_SCK_LOW_OFFSET);
300 writel(reg_val, mdata->base + SPI_CFG2_REG);
301 reg_val |= (((cs_time - 1) & 0xffff)
302 << SPI_ADJUST_CFG0_CS_HOLD_OFFSET);
303 reg_val |= (((cs_time - 1) & 0xffff)
304 << SPI_ADJUST_CFG0_CS_SETUP_OFFSET);
305 writel(reg_val, mdata->base + SPI_CFG0_REG);
306 } else {
307 reg_val |= (((sck_time - 1) & 0xff)
308 << SPI_CFG0_SCK_HIGH_OFFSET);
309 reg_val |= (((sck_time - 1) & 0xff) << SPI_CFG0_SCK_LOW_OFFSET);
310 reg_val |= (((cs_time - 1) & 0xff) << SPI_CFG0_CS_HOLD_OFFSET);
311 reg_val |= (((cs_time - 1) & 0xff) << SPI_CFG0_CS_SETUP_OFFSET);
312 writel(reg_val, mdata->base + SPI_CFG0_REG);
313 }
314
315 reg_val = readl(mdata->base + SPI_CFG1_REG);
316 reg_val &= ~SPI_CFG1_CS_IDLE_MASK;
317 reg_val |= (((cs_time - 1) & 0xff) << SPI_CFG1_CS_IDLE_OFFSET);
318 writel(reg_val, mdata->base + SPI_CFG1_REG);
319}
320
321static void mtk_spi_setup_packet(struct spi_master *master)
322{
323 u32 packet_size, packet_loop, reg_val;
324 struct mtk_spi *mdata = spi_master_get_devdata(master);
325
326 packet_size = min_t(u32, mdata->xfer_len, MTK_SPI_PACKET_SIZE);
327 packet_loop = mdata->xfer_len / packet_size;
328
329 reg_val = readl(mdata->base + SPI_CFG1_REG);
330 reg_val &= ~(SPI_CFG1_PACKET_LENGTH_MASK | SPI_CFG1_PACKET_LOOP_MASK);
331 reg_val |= (packet_size - 1) << SPI_CFG1_PACKET_LENGTH_OFFSET;
332 reg_val |= (packet_loop - 1) << SPI_CFG1_PACKET_LOOP_OFFSET;
333 writel(reg_val, mdata->base + SPI_CFG1_REG);
334}
335
336static void mtk_spi_enable_transfer(struct spi_master *master)
337{
338 u32 cmd;
339 struct mtk_spi *mdata = spi_master_get_devdata(master);
340
341 cmd = readl(mdata->base + SPI_CMD_REG);
342 if (mdata->state == MTK_SPI_IDLE)
343 cmd |= SPI_CMD_ACT;
344 else
345 cmd |= SPI_CMD_RESUME;
346 writel(cmd, mdata->base + SPI_CMD_REG);
347}
348
349static int mtk_spi_get_mult_delta(u32 xfer_len)
350{
351 u32 mult_delta;
352
353 if (xfer_len > MTK_SPI_PACKET_SIZE)
354 mult_delta = xfer_len % MTK_SPI_PACKET_SIZE;
355 else
356 mult_delta = 0;
357
358 return mult_delta;
359}
360
361static void mtk_spi_update_mdata_len(struct spi_master *master)
362{
363 int mult_delta;
364 struct mtk_spi *mdata = spi_master_get_devdata(master);
365
366 if (mdata->tx_sgl_len && mdata->rx_sgl_len) {
367 if (mdata->tx_sgl_len > mdata->rx_sgl_len) {
368 mult_delta = mtk_spi_get_mult_delta(mdata->rx_sgl_len);
369 mdata->xfer_len = mdata->rx_sgl_len - mult_delta;
370 mdata->rx_sgl_len = mult_delta;
371 mdata->tx_sgl_len -= mdata->xfer_len;
372 } else {
373 mult_delta = mtk_spi_get_mult_delta(mdata->tx_sgl_len);
374 mdata->xfer_len = mdata->tx_sgl_len - mult_delta;
375 mdata->tx_sgl_len = mult_delta;
376 mdata->rx_sgl_len -= mdata->xfer_len;
377 }
378 } else if (mdata->tx_sgl_len) {
379 mult_delta = mtk_spi_get_mult_delta(mdata->tx_sgl_len);
380 mdata->xfer_len = mdata->tx_sgl_len - mult_delta;
381 mdata->tx_sgl_len = mult_delta;
382 } else if (mdata->rx_sgl_len) {
383 mult_delta = mtk_spi_get_mult_delta(mdata->rx_sgl_len);
384 mdata->xfer_len = mdata->rx_sgl_len - mult_delta;
385 mdata->rx_sgl_len = mult_delta;
386 }
387}
388
389static void mtk_spi_setup_dma_addr(struct spi_master *master,
390 struct spi_transfer *xfer)
391{
392 struct mtk_spi *mdata = spi_master_get_devdata(master);
393
394 if (mdata->tx_sgl) {
395 writel((u32)(xfer->tx_dma & MTK_SPI_32BITS_MASK),
396 mdata->base + SPI_TX_SRC_REG);
397#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
398 if (mdata->dev_comp->dma_ext)
399 writel((u32)(xfer->tx_dma >> 32),
400 mdata->base + SPI_TX_SRC_REG_64);
401#endif
402 }
403
404 if (mdata->rx_sgl) {
405 writel((u32)(xfer->rx_dma & MTK_SPI_32BITS_MASK),
406 mdata->base + SPI_RX_DST_REG);
407#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
408 if (mdata->dev_comp->dma_ext)
409 writel((u32)(xfer->rx_dma >> 32),
410 mdata->base + SPI_RX_DST_REG_64);
411#endif
412 }
413}
414
415static int mtk_spi_fifo_transfer(struct spi_master *master,
416 struct spi_device *spi,
417 struct spi_transfer *xfer)
418{
419 int cnt, remainder;
420 u32 reg_val;
421 struct mtk_spi *mdata = spi_master_get_devdata(master);
422
423 mdata->cur_transfer = xfer;
424 mdata->xfer_len = min(MTK_SPI_MAX_FIFO_SIZE, xfer->len);
425 mdata->num_xfered = 0;
426 mtk_spi_prepare_transfer(master, xfer);
427 mtk_spi_setup_packet(master);
428
429 cnt = xfer->len / 4;
430 iowrite32_rep(mdata->base + SPI_TX_DATA_REG, xfer->tx_buf, cnt);
431
432 remainder = xfer->len % 4;
433 if (remainder > 0) {
434 reg_val = 0;
435 memcpy(&reg_val, xfer->tx_buf + (cnt * 4), remainder);
436 writel(reg_val, mdata->base + SPI_TX_DATA_REG);
437 }
438
439 mtk_spi_enable_transfer(master);
440
441 return 1;
442}
443
444static int mtk_spi_dma_transfer(struct spi_master *master,
445 struct spi_device *spi,
446 struct spi_transfer *xfer)
447{
448 int cmd;
449 struct mtk_spi *mdata = spi_master_get_devdata(master);
450
451 mdata->tx_sgl = NULL;
452 mdata->rx_sgl = NULL;
453 mdata->tx_sgl_len = 0;
454 mdata->rx_sgl_len = 0;
455 mdata->cur_transfer = xfer;
456 mdata->num_xfered = 0;
457
458 mtk_spi_prepare_transfer(master, xfer);
459
460 cmd = readl(mdata->base + SPI_CMD_REG);
461 if (xfer->tx_buf)
462 cmd |= SPI_CMD_TX_DMA;
463 if (xfer->rx_buf)
464 cmd |= SPI_CMD_RX_DMA;
465 writel(cmd, mdata->base + SPI_CMD_REG);
466
467 if (xfer->tx_buf)
468 mdata->tx_sgl = xfer->tx_sg.sgl;
469 if (xfer->rx_buf)
470 mdata->rx_sgl = xfer->rx_sg.sgl;
471
472 if (mdata->tx_sgl) {
473 xfer->tx_dma = sg_dma_address(mdata->tx_sgl);
474 mdata->tx_sgl_len = sg_dma_len(mdata->tx_sgl);
475 }
476 if (mdata->rx_sgl) {
477 xfer->rx_dma = sg_dma_address(mdata->rx_sgl);
478 mdata->rx_sgl_len = sg_dma_len(mdata->rx_sgl);
479 }
480
481 mtk_spi_update_mdata_len(master);
482 mtk_spi_setup_packet(master);
483 mtk_spi_setup_dma_addr(master, xfer);
484 mtk_spi_enable_transfer(master);
485
486 return 1;
487}
488
489static int mtk_spi_transfer_one(struct spi_master *master,
490 struct spi_device *spi,
491 struct spi_transfer *xfer)
492{
493 if (master->can_dma(master, spi, xfer))
494 return mtk_spi_dma_transfer(master, spi, xfer);
495 else
496 return mtk_spi_fifo_transfer(master, spi, xfer);
497}
498
499static bool mtk_spi_can_dma(struct spi_master *master,
500 struct spi_device *spi,
501 struct spi_transfer *xfer)
502{
503 /* Buffers for DMA transactions must be 4-byte aligned */
504 return (xfer->len > MTK_SPI_MAX_FIFO_SIZE &&
505 (unsigned long)xfer->tx_buf % 4 == 0 &&
506 (unsigned long)xfer->rx_buf % 4 == 0);
507}
508
509static int mtk_spi_setup(struct spi_device *spi)
510{
511 struct mtk_spi *mdata = spi_master_get_devdata(spi->master);
512
513 if (!spi->controller_data)
514 spi->controller_data = (void *)&mtk_default_chip_info;
515
516 if (mdata->dev_comp->need_pad_sel && gpio_is_valid(spi->cs_gpio))
517 gpio_direction_output(spi->cs_gpio, !(spi->mode & SPI_CS_HIGH));
518
519 return 0;
520}
521
522static irqreturn_t mtk_spi_interrupt(int irq, void *dev_id)
523{
524 u32 cmd, reg_val, cnt, remainder, len;
525 struct spi_master *master = dev_id;
526 struct mtk_spi *mdata = spi_master_get_devdata(master);
527 struct spi_transfer *trans = mdata->cur_transfer;
528
529 reg_val = readl(mdata->base + SPI_STATUS0_REG);
530 if (reg_val & MTK_SPI_PAUSE_INT_STATUS)
531 mdata->state = MTK_SPI_PAUSED;
532 else
533 mdata->state = MTK_SPI_IDLE;
534
535 if (!master->can_dma(master, master->cur_msg->spi, trans)) {
536 if (trans->rx_buf) {
537 cnt = mdata->xfer_len / 4;
538 ioread32_rep(mdata->base + SPI_RX_DATA_REG,
539 trans->rx_buf + mdata->num_xfered, cnt);
540 remainder = mdata->xfer_len % 4;
541 if (remainder > 0) {
542 reg_val = readl(mdata->base + SPI_RX_DATA_REG);
543 memcpy(trans->rx_buf +
544 mdata->num_xfered +
545 (cnt * 4),
546 &reg_val,
547 remainder);
548 }
549 }
550
551 mdata->num_xfered += mdata->xfer_len;
552 if (mdata->num_xfered == trans->len) {
553 spi_finalize_current_transfer(master);
554 return IRQ_HANDLED;
555 }
556
557 len = trans->len - mdata->num_xfered;
558 mdata->xfer_len = min(MTK_SPI_MAX_FIFO_SIZE, len);
559 mtk_spi_setup_packet(master);
560
561 cnt = mdata->xfer_len / 4;
562 iowrite32_rep(mdata->base + SPI_TX_DATA_REG,
563 trans->tx_buf + mdata->num_xfered, cnt);
564
565 remainder = mdata->xfer_len % 4;
566 if (remainder > 0) {
567 reg_val = 0;
568 memcpy(&reg_val,
569 trans->tx_buf + (cnt * 4) + mdata->num_xfered,
570 remainder);
571 writel(reg_val, mdata->base + SPI_TX_DATA_REG);
572 }
573
574 mtk_spi_enable_transfer(master);
575
576 return IRQ_HANDLED;
577 }
578
579 if (mdata->tx_sgl)
580 trans->tx_dma += mdata->xfer_len;
581 if (mdata->rx_sgl)
582 trans->rx_dma += mdata->xfer_len;
583
584 if (mdata->tx_sgl && (mdata->tx_sgl_len == 0)) {
585 mdata->tx_sgl = sg_next(mdata->tx_sgl);
586 if (mdata->tx_sgl) {
587 trans->tx_dma = sg_dma_address(mdata->tx_sgl);
588 mdata->tx_sgl_len = sg_dma_len(mdata->tx_sgl);
589 }
590 }
591 if (mdata->rx_sgl && (mdata->rx_sgl_len == 0)) {
592 mdata->rx_sgl = sg_next(mdata->rx_sgl);
593 if (mdata->rx_sgl) {
594 trans->rx_dma = sg_dma_address(mdata->rx_sgl);
595 mdata->rx_sgl_len = sg_dma_len(mdata->rx_sgl);
596 }
597 }
598
599 if (!mdata->tx_sgl && !mdata->rx_sgl) {
600 /* spi disable dma */
601 cmd = readl(mdata->base + SPI_CMD_REG);
602 cmd &= ~SPI_CMD_TX_DMA;
603 cmd &= ~SPI_CMD_RX_DMA;
604 writel(cmd, mdata->base + SPI_CMD_REG);
605
606 spi_finalize_current_transfer(master);
607 return IRQ_HANDLED;
608 }
609
610 mtk_spi_update_mdata_len(master);
611 mtk_spi_setup_packet(master);
612 mtk_spi_setup_dma_addr(master, trans);
613 mtk_spi_enable_transfer(master);
614
615 return IRQ_HANDLED;
616}
617
618static int mtk_spi_probe(struct platform_device *pdev)
619{
620 struct spi_master *master;
621 struct mtk_spi *mdata;
622 const struct of_device_id *of_id;
623 struct resource *res;
624 int i, irq, ret, addr_bits;
625
626 master = spi_alloc_master(&pdev->dev, sizeof(*mdata));
627 if (!master) {
628 dev_err(&pdev->dev, "failed to alloc spi master\n");
629 return -ENOMEM;
630 }
631
632 master->auto_runtime_pm = true;
633 master->dev.of_node = pdev->dev.of_node;
634 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST;
635
636 master->set_cs = mtk_spi_set_cs;
637 master->prepare_message = mtk_spi_prepare_message;
638 master->transfer_one = mtk_spi_transfer_one;
639 master->can_dma = mtk_spi_can_dma;
640 master->setup = mtk_spi_setup;
641
642 of_id = of_match_node(mtk_spi_of_match, pdev->dev.of_node);
643 if (!of_id) {
644 dev_err(&pdev->dev, "failed to probe of_node\n");
645 ret = -EINVAL;
646 goto err_put_master;
647 }
648
649 mdata = spi_master_get_devdata(master);
650 mdata->dev_comp = of_id->data;
651 if (mdata->dev_comp->must_tx)
652 master->flags = SPI_MASTER_MUST_TX;
653
654 if (mdata->dev_comp->need_pad_sel) {
655 mdata->pad_num = of_property_count_u32_elems(
656 pdev->dev.of_node,
657 "mediatek,pad-select");
658 if (mdata->pad_num < 0) {
659 dev_err(&pdev->dev,
660 "No 'mediatek,pad-select' property\n");
661 ret = -EINVAL;
662 goto err_put_master;
663 }
664
665 mdata->pad_sel = devm_kmalloc_array(&pdev->dev, mdata->pad_num,
666 sizeof(u32), GFP_KERNEL);
667 if (!mdata->pad_sel) {
668 ret = -ENOMEM;
669 goto err_put_master;
670 }
671
672 for (i = 0; i < mdata->pad_num; i++) {
673 of_property_read_u32_index(pdev->dev.of_node,
674 "mediatek,pad-select",
675 i, &mdata->pad_sel[i]);
676 if (mdata->pad_sel[i] > MT8173_SPI_MAX_PAD_SEL) {
677 dev_err(&pdev->dev, "wrong pad-sel[%d]: %u\n",
678 i, mdata->pad_sel[i]);
679 ret = -EINVAL;
680 goto err_put_master;
681 }
682 }
683 }
684
685 platform_set_drvdata(pdev, master);
686
687 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
688 if (!res) {
689 ret = -ENODEV;
690 dev_err(&pdev->dev, "failed to determine base address\n");
691 goto err_put_master;
692 }
693
694 mdata->base = devm_ioremap_resource(&pdev->dev, res);
695 if (IS_ERR(mdata->base)) {
696 ret = PTR_ERR(mdata->base);
697 goto err_put_master;
698 }
699
700 irq = platform_get_irq(pdev, 0);
701 if (irq < 0) {
702 dev_err(&pdev->dev, "failed to get irq (%d)\n", irq);
703 ret = irq;
704 goto err_put_master;
705 }
706
707 if (!pdev->dev.dma_mask)
708 pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
709
710 ret = devm_request_irq(&pdev->dev, irq, mtk_spi_interrupt,
711 IRQF_TRIGGER_NONE, dev_name(&pdev->dev), master);
712 if (ret) {
713 dev_err(&pdev->dev, "failed to register irq (%d)\n", ret);
714 goto err_put_master;
715 }
716
717 mdata->parent_clk = devm_clk_get(&pdev->dev, "parent-clk");
718 if (IS_ERR(mdata->parent_clk)) {
719 ret = PTR_ERR(mdata->parent_clk);
720 dev_err(&pdev->dev, "failed to get parent-clk: %d\n", ret);
721 goto err_put_master;
722 }
723
724 mdata->sel_clk = devm_clk_get(&pdev->dev, "sel-clk");
725 if (IS_ERR(mdata->sel_clk)) {
726 ret = PTR_ERR(mdata->sel_clk);
727 dev_err(&pdev->dev, "failed to get sel-clk: %d\n", ret);
728 goto err_put_master;
729 }
730
731 mdata->spi_clk = devm_clk_get(&pdev->dev, "spi-clk");
732 if (IS_ERR(mdata->spi_clk)) {
733 ret = PTR_ERR(mdata->spi_clk);
734 dev_err(&pdev->dev, "failed to get spi-clk: %d\n", ret);
735 goto err_put_master;
736 }
737
738 ret = clk_prepare_enable(mdata->spi_clk);
739 if (ret < 0) {
740 dev_err(&pdev->dev, "failed to enable spi_clk (%d)\n", ret);
741 goto err_put_master;
742 }
743
744 mdata->spare_clk = devm_clk_get(&pdev->dev, "spare-clk");
745 if (IS_ERR(mdata->spare_clk))
746 dev_notice(&pdev->dev, "spi is trying to get spare-clk\n");
747 else {
748 ret = clk_prepare_enable(mdata->spare_clk);
749 if (ret < 0) {
750 dev_err(&pdev->dev,
751 "failed to enable spare_clk (%d)\n", ret);
752 clk_disable_unprepare(mdata->spi_clk);
753 goto err_put_master;
754 }
755 }
756
757 ret = clk_set_parent(mdata->sel_clk, mdata->parent_clk);
758 if (ret < 0) {
759 dev_err(&pdev->dev, "failed to clk_set_parent (%d)\n", ret);
760 clk_disable_unprepare(mdata->spi_clk);
761 goto err_put_master;
762 }
763
764 clk_disable_unprepare(mdata->spi_clk);
765 if (!IS_ERR(mdata->spare_clk))
766 clk_disable_unprepare(mdata->spare_clk);
767
768 pm_runtime_enable(&pdev->dev);
769
770 ret = devm_spi_register_master(&pdev->dev, master);
771 if (ret) {
772 dev_err(&pdev->dev, "failed to register master (%d)\n", ret);
773 goto err_disable_runtime_pm;
774 }
775
776 if (mdata->dev_comp->need_pad_sel) {
777 if (mdata->pad_num != master->num_chipselect) {
778 dev_err(&pdev->dev,
779 "pad_num does not match num_chipselect(%d != %d)\n",
780 mdata->pad_num, master->num_chipselect);
781 ret = -EINVAL;
782 goto err_disable_runtime_pm;
783 }
784
785 if (!master->cs_gpios && master->num_chipselect > 1) {
786 dev_err(&pdev->dev,
787 "cs_gpios not specified and num_chipselect > 1\n");
788 ret = -EINVAL;
789 goto err_disable_runtime_pm;
790 }
791
792 if (master->cs_gpios) {
793 for (i = 0; i < master->num_chipselect; i++) {
794 ret = devm_gpio_request(&pdev->dev,
795 master->cs_gpios[i],
796 dev_name(&pdev->dev));
797 if (ret) {
798 dev_err(&pdev->dev,
799 "can't get CS GPIO %i\n", i);
800 goto err_disable_runtime_pm;
801 }
802 }
803 }
804 }
805
806 if (mdata->dev_comp->dma_ext)
807 addr_bits = DMA_ADDR_EXT_BITS;
808 else
809 addr_bits = DMA_ADDR_DEF_BITS;
810 ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(addr_bits));
811 if (ret)
812 dev_notice(&pdev->dev, "SPI dma_set_mask(%d) failed, ret:%d\n",
813 addr_bits, ret);
814
815 return 0;
816
817err_disable_runtime_pm:
818 pm_runtime_disable(&pdev->dev);
819err_put_master:
820 spi_master_put(master);
821
822 return ret;
823}
824
825static int mtk_spi_remove(struct platform_device *pdev)
826{
827 struct spi_master *master = platform_get_drvdata(pdev);
828 struct mtk_spi *mdata = spi_master_get_devdata(master);
829
830 pm_runtime_disable(&pdev->dev);
831
832 mtk_spi_reset(mdata);
833
834 return 0;
835}
836
837#ifdef CONFIG_PM_SLEEP
838static int mtk_spi_suspend(struct device *dev)
839{
840 int ret;
841 struct spi_master *master = dev_get_drvdata(dev);
842 struct mtk_spi *mdata = spi_master_get_devdata(master);
843
844 ret = spi_master_suspend(master);
845 if (ret)
846 return ret;
847
848 if (!pm_runtime_suspended(dev)) {
849 clk_disable_unprepare(mdata->spi_clk);
850 if (!IS_ERR(mdata->spare_clk))
851 clk_disable_unprepare(mdata->spare_clk);
852 }
853
854 ret = pinctrl_pm_select_sleep_state(dev);
855 if (ret < 0)
856 dev_notice(dev, "failed to set pin sleep_state (%d)\n", ret);
857
858 return ret;
859}
860
861static int mtk_spi_resume(struct device *dev)
862{
863 int ret;
864 struct spi_master *master = dev_get_drvdata(dev);
865 struct mtk_spi *mdata = spi_master_get_devdata(master);
866
867 ret = pinctrl_pm_select_default_state(dev);
868 if (ret < 0)
869 dev_notice(dev, "failed to set pin default_state (%d)\n", ret);
870
871 if (!pm_runtime_suspended(dev)) {
872 ret = clk_prepare_enable(mdata->spi_clk);
873 if (ret < 0) {
874 dev_err(dev, "failed to enable spi_clk (%d)\n", ret);
875 return ret;
876 }
877
878 if (!IS_ERR(mdata->spare_clk)) {
879 ret = clk_prepare_enable(mdata->spare_clk);
880 if (ret < 0) {
881 clk_disable_unprepare(mdata->spi_clk);
882 dev_err(dev,
883 "failed to enable spare-clk (%d)\n",
884 ret);
885 return ret;
886 }
887 }
888 }
889
890 ret = spi_master_resume(master);
891 if (ret < 0) {
892 clk_disable_unprepare(mdata->spi_clk);
893 if (!IS_ERR(mdata->spare_clk))
894 clk_disable_unprepare(mdata->spare_clk);
895 }
896
897 return ret;
898}
899#endif /* CONFIG_PM_SLEEP */
900
901#ifdef CONFIG_PM
902static int mtk_spi_runtime_suspend(struct device *dev)
903{
904 struct spi_master *master = dev_get_drvdata(dev);
905 struct mtk_spi *mdata = spi_master_get_devdata(master);
906
907 clk_disable_unprepare(mdata->spi_clk);
908 if (!IS_ERR(mdata->spare_clk))
909 clk_disable_unprepare(mdata->spare_clk);
910
911 return 0;
912}
913
914static int mtk_spi_runtime_resume(struct device *dev)
915{
916 struct spi_master *master = dev_get_drvdata(dev);
917 struct mtk_spi *mdata = spi_master_get_devdata(master);
918 int ret;
919
920 ret = clk_prepare_enable(mdata->spi_clk);
921 if (ret < 0) {
922 dev_err(dev, "failed to enable spi_clk (%d)\n", ret);
923 return ret;
924 }
925
926 if (!IS_ERR(mdata->spare_clk)) {
927 ret = clk_prepare_enable(mdata->spare_clk);
928 if (ret < 0) {
929 clk_disable_unprepare(mdata->spi_clk);
930 dev_err(dev,
931 "failed to enable spare-clk (%d)\n", ret);
932 return ret;
933 }
934 }
935
936 return 0;
937}
938#endif /* CONFIG_PM */
939
940static const struct dev_pm_ops mtk_spi_pm = {
941 SET_SYSTEM_SLEEP_PM_OPS(mtk_spi_suspend, mtk_spi_resume)
942 SET_RUNTIME_PM_OPS(mtk_spi_runtime_suspend,
943 mtk_spi_runtime_resume, NULL)
944};
945
946static struct platform_driver mtk_spi_driver = {
947 .driver = {
948 .name = "mtk-spi",
949 .pm = &mtk_spi_pm,
950 .of_match_table = mtk_spi_of_match,
951 },
952 .probe = mtk_spi_probe,
953 .remove = mtk_spi_remove,
954};
955
956module_platform_driver(mtk_spi_driver);
957
958MODULE_DESCRIPTION("MTK SPI Controller driver");
959MODULE_AUTHOR("Leilk Liu <leilk.liu@mediatek.com>");
960MODULE_LICENSE("GPL v2");
961MODULE_ALIAS("platform:mtk-spi");