blob: 351b2989db0716952c8fc01aa4eee78a8a9a75f7 [file] [log] [blame]
b.liue9582032025-04-17 19:18:16 +08001// SPDX-License-Identifier: GPL-2.0+
2//
3// Copyright 2013 Freescale Semiconductor, Inc.
4// Copyright 2020 NXP
5//
6// Freescale DSPI driver
7// This file contains a driver for the Freescale DSPI
8
9#include <linux/clk.h>
10#include <linux/delay.h>
11#include <linux/dmaengine.h>
12#include <linux/dma-mapping.h>
13#include <linux/interrupt.h>
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/of_device.h>
17#include <linux/pinctrl/consumer.h>
18#include <linux/regmap.h>
19#include <linux/spi/spi.h>
20#include <linux/spi/spi-fsl-dspi.h>
21
22#define DRIVER_NAME "fsl-dspi"
23
24#ifdef CONFIG_M5441x
25#define DSPI_FIFO_SIZE 16
26#else
27#define DSPI_FIFO_SIZE 4
28#endif
29#define DSPI_DMA_BUFSIZE (DSPI_FIFO_SIZE * 1024)
30
31#define SPI_MCR 0x00
32#define SPI_MCR_MASTER BIT(31)
33#define SPI_MCR_PCSIS (0x3F << 16)
34#define SPI_MCR_CLR_TXF BIT(11)
35#define SPI_MCR_CLR_RXF BIT(10)
36#define SPI_MCR_XSPI BIT(3)
37#define SPI_MCR_DIS_TXF BIT(13)
38#define SPI_MCR_DIS_RXF BIT(12)
39#define SPI_MCR_HALT BIT(0)
40
41#define SPI_TCR 0x08
42#define SPI_TCR_GET_TCNT(x) (((x) & GENMASK(31, 16)) >> 16)
43
44#define SPI_CTAR(x) (0x0c + (((x) & GENMASK(1, 0)) * 4))
45#define SPI_CTAR_FMSZ(x) (((x) << 27) & GENMASK(30, 27))
46#define SPI_CTAR_CPOL BIT(26)
47#define SPI_CTAR_CPHA BIT(25)
48#define SPI_CTAR_LSBFE BIT(24)
49#define SPI_CTAR_PCSSCK(x) (((x) << 22) & GENMASK(23, 22))
50#define SPI_CTAR_PASC(x) (((x) << 20) & GENMASK(21, 20))
51#define SPI_CTAR_PDT(x) (((x) << 18) & GENMASK(19, 18))
52#define SPI_CTAR_PBR(x) (((x) << 16) & GENMASK(17, 16))
53#define SPI_CTAR_CSSCK(x) (((x) << 12) & GENMASK(15, 12))
54#define SPI_CTAR_ASC(x) (((x) << 8) & GENMASK(11, 8))
55#define SPI_CTAR_DT(x) (((x) << 4) & GENMASK(7, 4))
56#define SPI_CTAR_BR(x) ((x) & GENMASK(3, 0))
57#define SPI_CTAR_SCALE_BITS 0xf
58
59#define SPI_CTAR0_SLAVE 0x0c
60
61#define SPI_SR 0x2c
62#define SPI_SR_TCFQF BIT(31)
63#define SPI_SR_EOQF BIT(28)
64#define SPI_SR_TFUF BIT(27)
65#define SPI_SR_TFFF BIT(25)
66#define SPI_SR_CMDTCF BIT(23)
67#define SPI_SR_SPEF BIT(21)
68#define SPI_SR_RFOF BIT(19)
69#define SPI_SR_TFIWF BIT(18)
70#define SPI_SR_RFDF BIT(17)
71#define SPI_SR_CMDFFF BIT(16)
72#define SPI_SR_CLEAR (SPI_SR_TCFQF | SPI_SR_EOQF | \
73 SPI_SR_TFUF | SPI_SR_TFFF | \
74 SPI_SR_CMDTCF | SPI_SR_SPEF | \
75 SPI_SR_RFOF | SPI_SR_TFIWF | \
76 SPI_SR_RFDF | SPI_SR_CMDFFF)
77
78#define SPI_RSER_TFFFE BIT(25)
79#define SPI_RSER_TFFFD BIT(24)
80#define SPI_RSER_RFDFE BIT(17)
81#define SPI_RSER_RFDFD BIT(16)
82
83#define SPI_RSER 0x30
84#define SPI_RSER_TCFQE BIT(31)
85#define SPI_RSER_EOQFE BIT(28)
86
87#define SPI_PUSHR 0x34
88#define SPI_PUSHR_CMD_CONT BIT(15)
89#define SPI_PUSHR_CMD_CTAS(x) (((x) << 12 & GENMASK(14, 12)))
90#define SPI_PUSHR_CMD_EOQ BIT(11)
91#define SPI_PUSHR_CMD_CTCNT BIT(10)
92#define SPI_PUSHR_CMD_PCS(x) (BIT(x) & GENMASK(5, 0))
93
94#define SPI_PUSHR_SLAVE 0x34
95
96#define SPI_POPR 0x38
97
98#define SPI_TXFR0 0x3c
99#define SPI_TXFR1 0x40
100#define SPI_TXFR2 0x44
101#define SPI_TXFR3 0x48
102#define SPI_RXFR0 0x7c
103#define SPI_RXFR1 0x80
104#define SPI_RXFR2 0x84
105#define SPI_RXFR3 0x88
106
107#define SPI_CTARE(x) (0x11c + (((x) & GENMASK(1, 0)) * 4))
108#define SPI_CTARE_FMSZE(x) (((x) & 0x1) << 16)
109#define SPI_CTARE_DTCP(x) ((x) & 0x7ff)
110
111#define SPI_SREX 0x13c
112
113#define SPI_FRAME_BITS(bits) SPI_CTAR_FMSZ((bits) - 1)
114#define SPI_FRAME_EBITS(bits) SPI_CTARE_FMSZE(((bits) - 1) >> 4)
115
116/* Register offsets for regmap_pushr */
117#define PUSHR_CMD 0x0
118#define PUSHR_TX 0x2
119
120#define DMA_COMPLETION_TIMEOUT msecs_to_jiffies(3000)
121
122struct chip_data {
123 u32 ctar_val;
124};
125
126enum dspi_trans_mode {
127 DSPI_EOQ_MODE = 0,
128 DSPI_TCFQ_MODE,
129 DSPI_DMA_MODE,
130};
131
132struct fsl_dspi_devtype_data {
133 enum dspi_trans_mode trans_mode;
134 u8 max_clock_factor;
135 bool xspi_mode;
136};
137
138static const struct fsl_dspi_devtype_data vf610_data = {
139 .trans_mode = DSPI_DMA_MODE,
140 .max_clock_factor = 2,
141};
142
143static const struct fsl_dspi_devtype_data ls1021a_v1_data = {
144 .trans_mode = DSPI_TCFQ_MODE,
145 .max_clock_factor = 8,
146 .xspi_mode = true,
147};
148
149static const struct fsl_dspi_devtype_data ls2085a_data = {
150 .trans_mode = DSPI_TCFQ_MODE,
151 .max_clock_factor = 8,
152};
153
154static const struct fsl_dspi_devtype_data coldfire_data = {
155 .trans_mode = DSPI_EOQ_MODE,
156 .max_clock_factor = 8,
157};
158
159struct fsl_dspi_dma {
160 /* Length of transfer in words of DSPI_FIFO_SIZE */
161 u32 curr_xfer_len;
162
163 u32 *tx_dma_buf;
164 struct dma_chan *chan_tx;
165 dma_addr_t tx_dma_phys;
166 struct completion cmd_tx_complete;
167 struct dma_async_tx_descriptor *tx_desc;
168
169 u32 *rx_dma_buf;
170 struct dma_chan *chan_rx;
171 dma_addr_t rx_dma_phys;
172 struct completion cmd_rx_complete;
173 struct dma_async_tx_descriptor *rx_desc;
174};
175
176struct fsl_dspi {
177 struct spi_controller *ctlr;
178 struct platform_device *pdev;
179
180 struct regmap *regmap;
181 struct regmap *regmap_pushr;
182 int irq;
183 struct clk *clk;
184
185 struct spi_transfer *cur_transfer;
186 struct spi_message *cur_msg;
187 struct chip_data *cur_chip;
188 size_t len;
189 const void *tx;
190 void *rx;
191 void *rx_end;
192 u16 tx_cmd;
193 u8 bits_per_word;
194 u8 bytes_per_word;
195 const struct fsl_dspi_devtype_data *devtype_data;
196
197 struct completion xfer_done;
198
199 struct fsl_dspi_dma *dma;
200};
201
202static u32 dspi_pop_tx(struct fsl_dspi *dspi)
203{
204 u32 txdata = 0;
205
206 if (dspi->tx) {
207 if (dspi->bytes_per_word == 1)
208 txdata = *(u8 *)dspi->tx;
209 else if (dspi->bytes_per_word == 2)
210 txdata = *(u16 *)dspi->tx;
211 else /* dspi->bytes_per_word == 4 */
212 txdata = *(u32 *)dspi->tx;
213 dspi->tx += dspi->bytes_per_word;
214 }
215 dspi->len -= dspi->bytes_per_word;
216 return txdata;
217}
218
219static u32 dspi_pop_tx_pushr(struct fsl_dspi *dspi)
220{
221 u16 cmd = dspi->tx_cmd, data = dspi_pop_tx(dspi);
222
223 if (spi_controller_is_slave(dspi->ctlr))
224 return data;
225
226 if (dspi->len > 0)
227 cmd |= SPI_PUSHR_CMD_CONT;
228 return cmd << 16 | data;
229}
230
231static void dspi_push_rx(struct fsl_dspi *dspi, u32 rxdata)
232{
233 if (!dspi->rx)
234 return;
235
236 /* Mask off undefined bits */
237 rxdata &= (1 << dspi->bits_per_word) - 1;
238
239 if (dspi->bytes_per_word == 1)
240 *(u8 *)dspi->rx = rxdata;
241 else if (dspi->bytes_per_word == 2)
242 *(u16 *)dspi->rx = rxdata;
243 else /* dspi->bytes_per_word == 4 */
244 *(u32 *)dspi->rx = rxdata;
245 dspi->rx += dspi->bytes_per_word;
246}
247
248static void dspi_tx_dma_callback(void *arg)
249{
250 struct fsl_dspi *dspi = arg;
251 struct fsl_dspi_dma *dma = dspi->dma;
252
253 complete(&dma->cmd_tx_complete);
254}
255
256static void dspi_rx_dma_callback(void *arg)
257{
258 struct fsl_dspi *dspi = arg;
259 struct fsl_dspi_dma *dma = dspi->dma;
260 int i;
261
262 if (dspi->rx) {
263 for (i = 0; i < dma->curr_xfer_len; i++)
264 dspi_push_rx(dspi, dspi->dma->rx_dma_buf[i]);
265 }
266
267 complete(&dma->cmd_rx_complete);
268}
269
270static int dspi_next_xfer_dma_submit(struct fsl_dspi *dspi)
271{
272 struct device *dev = &dspi->pdev->dev;
273 struct fsl_dspi_dma *dma = dspi->dma;
274 int time_left;
275 int i;
276
277 for (i = 0; i < dma->curr_xfer_len; i++)
278 dspi->dma->tx_dma_buf[i] = dspi_pop_tx_pushr(dspi);
279
280 dma->tx_desc = dmaengine_prep_slave_single(dma->chan_tx,
281 dma->tx_dma_phys,
282 dma->curr_xfer_len *
283 DMA_SLAVE_BUSWIDTH_4_BYTES,
284 DMA_MEM_TO_DEV,
285 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
286 if (!dma->tx_desc) {
287 dev_err(dev, "Not able to get desc for DMA xfer\n");
288 return -EIO;
289 }
290
291 dma->tx_desc->callback = dspi_tx_dma_callback;
292 dma->tx_desc->callback_param = dspi;
293 if (dma_submit_error(dmaengine_submit(dma->tx_desc))) {
294 dev_err(dev, "DMA submit failed\n");
295 return -EINVAL;
296 }
297
298 dma->rx_desc = dmaengine_prep_slave_single(dma->chan_rx,
299 dma->rx_dma_phys,
300 dma->curr_xfer_len *
301 DMA_SLAVE_BUSWIDTH_4_BYTES,
302 DMA_DEV_TO_MEM,
303 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
304 if (!dma->rx_desc) {
305 dev_err(dev, "Not able to get desc for DMA xfer\n");
306 return -EIO;
307 }
308
309 dma->rx_desc->callback = dspi_rx_dma_callback;
310 dma->rx_desc->callback_param = dspi;
311 if (dma_submit_error(dmaengine_submit(dma->rx_desc))) {
312 dev_err(dev, "DMA submit failed\n");
313 return -EINVAL;
314 }
315
316 reinit_completion(&dspi->dma->cmd_rx_complete);
317 reinit_completion(&dspi->dma->cmd_tx_complete);
318
319 dma_async_issue_pending(dma->chan_rx);
320 dma_async_issue_pending(dma->chan_tx);
321
322 if (spi_controller_is_slave(dspi->ctlr)) {
323 wait_for_completion_interruptible(&dspi->dma->cmd_rx_complete);
324 return 0;
325 }
326
327 time_left = wait_for_completion_timeout(&dspi->dma->cmd_tx_complete,
328 DMA_COMPLETION_TIMEOUT);
329 if (time_left == 0) {
330 dev_err(dev, "DMA tx timeout\n");
331 dmaengine_terminate_all(dma->chan_tx);
332 dmaengine_terminate_all(dma->chan_rx);
333 return -ETIMEDOUT;
334 }
335
336 time_left = wait_for_completion_timeout(&dspi->dma->cmd_rx_complete,
337 DMA_COMPLETION_TIMEOUT);
338 if (time_left == 0) {
339 dev_err(dev, "DMA rx timeout\n");
340 dmaengine_terminate_all(dma->chan_tx);
341 dmaengine_terminate_all(dma->chan_rx);
342 return -ETIMEDOUT;
343 }
344
345 return 0;
346}
347
348static int dspi_dma_xfer(struct fsl_dspi *dspi)
349{
350 struct spi_message *message = dspi->cur_msg;
351 struct device *dev = &dspi->pdev->dev;
352 struct fsl_dspi_dma *dma = dspi->dma;
353 int curr_remaining_bytes;
354 int bytes_per_buffer;
355 int ret = 0;
356
357 curr_remaining_bytes = dspi->len;
358 bytes_per_buffer = DSPI_DMA_BUFSIZE / DSPI_FIFO_SIZE;
359 while (curr_remaining_bytes) {
360 /* Check if current transfer fits the DMA buffer */
361 dma->curr_xfer_len = curr_remaining_bytes
362 / dspi->bytes_per_word;
363 if (dma->curr_xfer_len > bytes_per_buffer)
364 dma->curr_xfer_len = bytes_per_buffer;
365
366 ret = dspi_next_xfer_dma_submit(dspi);
367 if (ret) {
368 dev_err(dev, "DMA transfer failed\n");
369 goto exit;
370
371 } else {
372 const int len =
373 dma->curr_xfer_len * dspi->bytes_per_word;
374 curr_remaining_bytes -= len;
375 message->actual_length += len;
376 if (curr_remaining_bytes < 0)
377 curr_remaining_bytes = 0;
378 }
379 }
380
381exit:
382 return ret;
383}
384
385static int dspi_request_dma(struct fsl_dspi *dspi, phys_addr_t phy_addr)
386{
387 struct device *dev = &dspi->pdev->dev;
388 struct dma_slave_config cfg;
389 struct fsl_dspi_dma *dma;
390 int ret;
391
392 dma = devm_kzalloc(dev, sizeof(*dma), GFP_KERNEL);
393 if (!dma)
394 return -ENOMEM;
395
396 dma->chan_rx = dma_request_slave_channel(dev, "rx");
397 if (!dma->chan_rx) {
398 dev_err(dev, "rx dma channel not available\n");
399 ret = -ENODEV;
400 return ret;
401 }
402
403 dma->chan_tx = dma_request_slave_channel(dev, "tx");
404 if (!dma->chan_tx) {
405 dev_err(dev, "tx dma channel not available\n");
406 ret = -ENODEV;
407 goto err_tx_channel;
408 }
409
410 dma->tx_dma_buf = dma_alloc_coherent(dev, DSPI_DMA_BUFSIZE,
411 &dma->tx_dma_phys, GFP_KERNEL);
412 if (!dma->tx_dma_buf) {
413 ret = -ENOMEM;
414 goto err_tx_dma_buf;
415 }
416
417 dma->rx_dma_buf = dma_alloc_coherent(dev, DSPI_DMA_BUFSIZE,
418 &dma->rx_dma_phys, GFP_KERNEL);
419 if (!dma->rx_dma_buf) {
420 ret = -ENOMEM;
421 goto err_rx_dma_buf;
422 }
423
424 memset(&cfg, 0, sizeof(cfg));
425 cfg.src_addr = phy_addr + SPI_POPR;
426 cfg.dst_addr = phy_addr + SPI_PUSHR;
427 cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
428 cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
429 cfg.src_maxburst = 1;
430 cfg.dst_maxburst = 1;
431
432 cfg.direction = DMA_DEV_TO_MEM;
433 ret = dmaengine_slave_config(dma->chan_rx, &cfg);
434 if (ret) {
435 dev_err(dev, "can't configure rx dma channel\n");
436 ret = -EINVAL;
437 goto err_slave_config;
438 }
439
440 cfg.direction = DMA_MEM_TO_DEV;
441 ret = dmaengine_slave_config(dma->chan_tx, &cfg);
442 if (ret) {
443 dev_err(dev, "can't configure tx dma channel\n");
444 ret = -EINVAL;
445 goto err_slave_config;
446 }
447
448 dspi->dma = dma;
449 init_completion(&dma->cmd_tx_complete);
450 init_completion(&dma->cmd_rx_complete);
451
452 return 0;
453
454err_slave_config:
455 dma_free_coherent(dev, DSPI_DMA_BUFSIZE,
456 dma->rx_dma_buf, dma->rx_dma_phys);
457err_rx_dma_buf:
458 dma_free_coherent(dev, DSPI_DMA_BUFSIZE,
459 dma->tx_dma_buf, dma->tx_dma_phys);
460err_tx_dma_buf:
461 dma_release_channel(dma->chan_tx);
462err_tx_channel:
463 dma_release_channel(dma->chan_rx);
464
465 devm_kfree(dev, dma);
466 dspi->dma = NULL;
467
468 return ret;
469}
470
471static void dspi_release_dma(struct fsl_dspi *dspi)
472{
473 struct fsl_dspi_dma *dma = dspi->dma;
474 struct device *dev = &dspi->pdev->dev;
475
476 if (!dma)
477 return;
478
479 if (dma->chan_tx) {
480 dma_unmap_single(dev, dma->tx_dma_phys,
481 DSPI_DMA_BUFSIZE, DMA_TO_DEVICE);
482 dma_release_channel(dma->chan_tx);
483 }
484
485 if (dma->chan_rx) {
486 dma_unmap_single(dev, dma->rx_dma_phys,
487 DSPI_DMA_BUFSIZE, DMA_FROM_DEVICE);
488 dma_release_channel(dma->chan_rx);
489 }
490}
491
492static void hz_to_spi_baud(char *pbr, char *br, int speed_hz,
493 unsigned long clkrate)
494{
495 /* Valid baud rate pre-scaler values */
496 int pbr_tbl[4] = {2, 3, 5, 7};
497 int brs[16] = { 2, 4, 6, 8,
498 16, 32, 64, 128,
499 256, 512, 1024, 2048,
500 4096, 8192, 16384, 32768 };
501 int scale_needed, scale, minscale = INT_MAX;
502 int i, j;
503
504 scale_needed = clkrate / speed_hz;
505 if (clkrate % speed_hz)
506 scale_needed++;
507
508 for (i = 0; i < ARRAY_SIZE(brs); i++)
509 for (j = 0; j < ARRAY_SIZE(pbr_tbl); j++) {
510 scale = brs[i] * pbr_tbl[j];
511 if (scale >= scale_needed) {
512 if (scale < minscale) {
513 minscale = scale;
514 *br = i;
515 *pbr = j;
516 }
517 break;
518 }
519 }
520
521 if (minscale == INT_MAX) {
522 pr_warn("Can not find valid baud rate,speed_hz is %d,clkrate is %ld, we use the max prescaler value.\n",
523 speed_hz, clkrate);
524 *pbr = ARRAY_SIZE(pbr_tbl) - 1;
525 *br = ARRAY_SIZE(brs) - 1;
526 }
527}
528
529static void ns_delay_scale(char *psc, char *sc, int delay_ns,
530 unsigned long clkrate)
531{
532 int scale_needed, scale, minscale = INT_MAX;
533 int pscale_tbl[4] = {1, 3, 5, 7};
534 u32 remainder;
535 int i, j;
536
537 scale_needed = div_u64_rem((u64)delay_ns * clkrate, NSEC_PER_SEC,
538 &remainder);
539 if (remainder)
540 scale_needed++;
541
542 for (i = 0; i < ARRAY_SIZE(pscale_tbl); i++)
543 for (j = 0; j <= SPI_CTAR_SCALE_BITS; j++) {
544 scale = pscale_tbl[i] * (2 << j);
545 if (scale >= scale_needed) {
546 if (scale < minscale) {
547 minscale = scale;
548 *psc = i;
549 *sc = j;
550 }
551 break;
552 }
553 }
554
555 if (minscale == INT_MAX) {
556 pr_warn("Cannot find correct scale values for %dns delay at clkrate %ld, using max prescaler value",
557 delay_ns, clkrate);
558 *psc = ARRAY_SIZE(pscale_tbl) - 1;
559 *sc = SPI_CTAR_SCALE_BITS;
560 }
561}
562
563static void fifo_write(struct fsl_dspi *dspi)
564{
565 regmap_write(dspi->regmap, SPI_PUSHR, dspi_pop_tx_pushr(dspi));
566}
567
568static void cmd_fifo_write(struct fsl_dspi *dspi)
569{
570 u16 cmd = dspi->tx_cmd;
571
572 if (dspi->len > 0)
573 cmd |= SPI_PUSHR_CMD_CONT;
574 regmap_write(dspi->regmap_pushr, PUSHR_CMD, cmd);
575}
576
577static void tx_fifo_write(struct fsl_dspi *dspi, u16 txdata)
578{
579 regmap_write(dspi->regmap_pushr, PUSHR_TX, txdata);
580}
581
582static void dspi_tcfq_write(struct fsl_dspi *dspi)
583{
584 /* Clear transfer count */
585 dspi->tx_cmd |= SPI_PUSHR_CMD_CTCNT;
586
587 if (dspi->devtype_data->xspi_mode && dspi->bits_per_word > 16) {
588 /* Write the CMD FIFO entry first, and then the two
589 * corresponding TX FIFO entries.
590 */
591 u32 data = dspi_pop_tx(dspi);
592
593 cmd_fifo_write(dspi);
594 tx_fifo_write(dspi, data & 0xFFFF);
595 tx_fifo_write(dspi, data >> 16);
596 } else {
597 /* Write one entry to both TX FIFO and CMD FIFO
598 * simultaneously.
599 */
600 fifo_write(dspi);
601 }
602}
603
604static u32 fifo_read(struct fsl_dspi *dspi)
605{
606 u32 rxdata = 0;
607
608 regmap_read(dspi->regmap, SPI_POPR, &rxdata);
609 return rxdata;
610}
611
612static void dspi_tcfq_read(struct fsl_dspi *dspi)
613{
614 dspi_push_rx(dspi, fifo_read(dspi));
615}
616
617static void dspi_eoq_write(struct fsl_dspi *dspi)
618{
619 int fifo_size = DSPI_FIFO_SIZE;
620 u16 xfer_cmd = dspi->tx_cmd;
621
622 /* Fill TX FIFO with as many transfers as possible */
623 while (dspi->len && fifo_size--) {
624 dspi->tx_cmd = xfer_cmd;
625 /* Request EOQF for last transfer in FIFO */
626 if (dspi->len == dspi->bytes_per_word || fifo_size == 0)
627 dspi->tx_cmd |= SPI_PUSHR_CMD_EOQ;
628 /* Clear transfer count for first transfer in FIFO */
629 if (fifo_size == (DSPI_FIFO_SIZE - 1))
630 dspi->tx_cmd |= SPI_PUSHR_CMD_CTCNT;
631 /* Write combined TX FIFO and CMD FIFO entry */
632 fifo_write(dspi);
633 }
634}
635
636static void dspi_eoq_read(struct fsl_dspi *dspi)
637{
638 int fifo_size = DSPI_FIFO_SIZE;
639
640 /* Read one FIFO entry and push to rx buffer */
641 while ((dspi->rx < dspi->rx_end) && fifo_size--)
642 dspi_push_rx(dspi, fifo_read(dspi));
643}
644
645static int dspi_rxtx(struct fsl_dspi *dspi)
646{
647 struct spi_message *msg = dspi->cur_msg;
648 enum dspi_trans_mode trans_mode;
649 u16 spi_tcnt;
650 u32 spi_tcr;
651
652 /* Get transfer counter (in number of SPI transfers). It was
653 * reset to 0 when transfer(s) were started.
654 */
655 regmap_read(dspi->regmap, SPI_TCR, &spi_tcr);
656 spi_tcnt = SPI_TCR_GET_TCNT(spi_tcr);
657 /* Update total number of bytes that were transferred */
658 msg->actual_length += spi_tcnt * dspi->bytes_per_word;
659
660 trans_mode = dspi->devtype_data->trans_mode;
661 if (trans_mode == DSPI_EOQ_MODE)
662 dspi_eoq_read(dspi);
663 else if (trans_mode == DSPI_TCFQ_MODE)
664 dspi_tcfq_read(dspi);
665
666 if (!dspi->len)
667 /* Success! */
668 return 0;
669
670 if (trans_mode == DSPI_EOQ_MODE)
671 dspi_eoq_write(dspi);
672 else if (trans_mode == DSPI_TCFQ_MODE)
673 dspi_tcfq_write(dspi);
674
675 return -EINPROGRESS;
676}
677
678static int dspi_poll(struct fsl_dspi *dspi)
679{
680 int tries = 1000;
681 u32 spi_sr;
682
683 do {
684 regmap_read(dspi->regmap, SPI_SR, &spi_sr);
685 regmap_write(dspi->regmap, SPI_SR, spi_sr);
686
687 if (spi_sr & (SPI_SR_EOQF | SPI_SR_TCFQF))
688 break;
689 } while (--tries);
690
691 if (!tries)
692 return -ETIMEDOUT;
693
694 return dspi_rxtx(dspi);
695}
696
697static irqreturn_t dspi_interrupt(int irq, void *dev_id)
698{
699 struct fsl_dspi *dspi = (struct fsl_dspi *)dev_id;
700 u32 spi_sr;
701
702 regmap_read(dspi->regmap, SPI_SR, &spi_sr);
703 regmap_write(dspi->regmap, SPI_SR, spi_sr);
704
705 if (!(spi_sr & (SPI_SR_EOQF | SPI_SR_TCFQF)))
706 return IRQ_NONE;
707
708 if (dspi_rxtx(dspi) == 0)
709 complete(&dspi->xfer_done);
710
711 return IRQ_HANDLED;
712}
713
714static int dspi_transfer_one_message(struct spi_controller *ctlr,
715 struct spi_message *message)
716{
717 struct fsl_dspi *dspi = spi_controller_get_devdata(ctlr);
718 struct spi_device *spi = message->spi;
719 enum dspi_trans_mode trans_mode;
720 struct spi_transfer *transfer;
721 int status = 0;
722
723 message->actual_length = 0;
724
725 list_for_each_entry(transfer, &message->transfers, transfer_list) {
726 dspi->cur_transfer = transfer;
727 dspi->cur_msg = message;
728 dspi->cur_chip = spi_get_ctldata(spi);
729 /* Prepare command word for CMD FIFO */
730 dspi->tx_cmd = SPI_PUSHR_CMD_CTAS(0) |
731 SPI_PUSHR_CMD_PCS(spi->chip_select);
732 if (list_is_last(&dspi->cur_transfer->transfer_list,
733 &dspi->cur_msg->transfers)) {
734 /* Leave PCS activated after last transfer when
735 * cs_change is set.
736 */
737 if (transfer->cs_change)
738 dspi->tx_cmd |= SPI_PUSHR_CMD_CONT;
739 } else {
740 /* Keep PCS active between transfers in same message
741 * when cs_change is not set, and de-activate PCS
742 * between transfers in the same message when
743 * cs_change is set.
744 */
745 if (!transfer->cs_change)
746 dspi->tx_cmd |= SPI_PUSHR_CMD_CONT;
747 }
748
749 dspi->tx = transfer->tx_buf;
750 dspi->rx = transfer->rx_buf;
751 dspi->rx_end = dspi->rx + transfer->len;
752 dspi->len = transfer->len;
753 /* Validated transfer specific frame size (defaults applied) */
754 dspi->bits_per_word = transfer->bits_per_word;
755 if (transfer->bits_per_word <= 8)
756 dspi->bytes_per_word = 1;
757 else if (transfer->bits_per_word <= 16)
758 dspi->bytes_per_word = 2;
759 else
760 dspi->bytes_per_word = 4;
761
762 regmap_update_bits(dspi->regmap, SPI_MCR,
763 SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF,
764 SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF);
765 regmap_write(dspi->regmap, SPI_CTAR(0),
766 dspi->cur_chip->ctar_val |
767 SPI_FRAME_BITS(transfer->bits_per_word));
768 if (dspi->devtype_data->xspi_mode)
769 regmap_write(dspi->regmap, SPI_CTARE(0),
770 SPI_FRAME_EBITS(transfer->bits_per_word) |
771 SPI_CTARE_DTCP(1));
772
773 trans_mode = dspi->devtype_data->trans_mode;
774 switch (trans_mode) {
775 case DSPI_EOQ_MODE:
776 regmap_write(dspi->regmap, SPI_RSER, SPI_RSER_EOQFE);
777 dspi_eoq_write(dspi);
778 break;
779 case DSPI_TCFQ_MODE:
780 regmap_write(dspi->regmap, SPI_RSER, SPI_RSER_TCFQE);
781 dspi_tcfq_write(dspi);
782 break;
783 case DSPI_DMA_MODE:
784 regmap_write(dspi->regmap, SPI_RSER,
785 SPI_RSER_TFFFE | SPI_RSER_TFFFD |
786 SPI_RSER_RFDFE | SPI_RSER_RFDFD);
787 status = dspi_dma_xfer(dspi);
788 break;
789 default:
790 dev_err(&dspi->pdev->dev, "unsupported trans_mode %u\n",
791 trans_mode);
792 status = -EINVAL;
793 goto out;
794 }
795
796 if (!dspi->irq) {
797 do {
798 status = dspi_poll(dspi);
799 } while (status == -EINPROGRESS);
800 } else if (trans_mode != DSPI_DMA_MODE) {
801 wait_for_completion(&dspi->xfer_done);
802 reinit_completion(&dspi->xfer_done);
803 }
804
805 if (transfer->delay_usecs)
806 udelay(transfer->delay_usecs);
807 }
808
809out:
810 message->status = status;
811 spi_finalize_current_message(ctlr);
812
813 return status;
814}
815
816static int dspi_setup(struct spi_device *spi)
817{
818 struct fsl_dspi *dspi = spi_controller_get_devdata(spi->controller);
819 u32 period_ns = DIV_ROUND_UP(NSEC_PER_SEC, spi->max_speed_hz);
820 unsigned char br = 0, pbr = 0, pcssck = 0, cssck = 0;
821 u32 quarter_period_ns = DIV_ROUND_UP(period_ns, 4);
822 u32 cs_sck_delay = 0, sck_cs_delay = 0;
823 struct fsl_dspi_platform_data *pdata;
824 unsigned char pasc = 0, asc = 0;
825 struct chip_data *chip;
826 unsigned long clkrate;
827
828 /* Only alloc on first setup */
829 chip = spi_get_ctldata(spi);
830 if (chip == NULL) {
831 chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL);
832 if (!chip)
833 return -ENOMEM;
834 }
835
836 pdata = dev_get_platdata(&dspi->pdev->dev);
837
838 if (!pdata) {
839 of_property_read_u32(spi->dev.of_node, "fsl,spi-cs-sck-delay",
840 &cs_sck_delay);
841
842 of_property_read_u32(spi->dev.of_node, "fsl,spi-sck-cs-delay",
843 &sck_cs_delay);
844 } else {
845 cs_sck_delay = pdata->cs_sck_delay;
846 sck_cs_delay = pdata->sck_cs_delay;
847 }
848
849 /* Since tCSC and tASC apply to continuous transfers too, avoid SCK
850 * glitches of half a cycle by never allowing tCSC + tASC to go below
851 * half a SCK period.
852 */
853 if (cs_sck_delay < quarter_period_ns)
854 cs_sck_delay = quarter_period_ns;
855 if (sck_cs_delay < quarter_period_ns)
856 sck_cs_delay = quarter_period_ns;
857
858 dev_dbg(&spi->dev,
859 "DSPI controller timing params: CS-to-SCK delay %u ns, SCK-to-CS delay %u ns\n",
860 cs_sck_delay, sck_cs_delay);
861
862 clkrate = clk_get_rate(dspi->clk);
863 hz_to_spi_baud(&pbr, &br, spi->max_speed_hz, clkrate);
864
865 /* Set PCS to SCK delay scale values */
866 ns_delay_scale(&pcssck, &cssck, cs_sck_delay, clkrate);
867
868 /* Set After SCK delay scale values */
869 ns_delay_scale(&pasc, &asc, sck_cs_delay, clkrate);
870
871 chip->ctar_val = 0;
872 if (spi->mode & SPI_CPOL)
873 chip->ctar_val |= SPI_CTAR_CPOL;
874 if (spi->mode & SPI_CPHA)
875 chip->ctar_val |= SPI_CTAR_CPHA;
876
877 if (!spi_controller_is_slave(dspi->ctlr)) {
878 chip->ctar_val |= SPI_CTAR_PCSSCK(pcssck) |
879 SPI_CTAR_CSSCK(cssck) |
880 SPI_CTAR_PASC(pasc) |
881 SPI_CTAR_ASC(asc) |
882 SPI_CTAR_PBR(pbr) |
883 SPI_CTAR_BR(br);
884
885 if (spi->mode & SPI_LSB_FIRST)
886 chip->ctar_val |= SPI_CTAR_LSBFE;
887 }
888
889 spi_set_ctldata(spi, chip);
890
891 return 0;
892}
893
894static void dspi_cleanup(struct spi_device *spi)
895{
896 struct chip_data *chip = spi_get_ctldata((struct spi_device *)spi);
897
898 dev_dbg(&spi->dev, "spi_device %u.%u cleanup\n",
899 spi->controller->bus_num, spi->chip_select);
900
901 kfree(chip);
902}
903
904static const struct of_device_id fsl_dspi_dt_ids[] = {
905 { .compatible = "fsl,vf610-dspi", .data = &vf610_data, },
906 { .compatible = "fsl,ls1021a-v1.0-dspi", .data = &ls1021a_v1_data, },
907 { .compatible = "fsl,ls2085a-dspi", .data = &ls2085a_data, },
908 { /* sentinel */ }
909};
910MODULE_DEVICE_TABLE(of, fsl_dspi_dt_ids);
911
912#ifdef CONFIG_PM_SLEEP
913static int dspi_suspend(struct device *dev)
914{
915 struct spi_controller *ctlr = dev_get_drvdata(dev);
916 struct fsl_dspi *dspi = spi_controller_get_devdata(ctlr);
917
918 if (dspi->irq)
919 disable_irq(dspi->irq);
920 spi_controller_suspend(ctlr);
921 clk_disable_unprepare(dspi->clk);
922
923 pinctrl_pm_select_sleep_state(dev);
924
925 return 0;
926}
927
928static int dspi_resume(struct device *dev)
929{
930 struct spi_controller *ctlr = dev_get_drvdata(dev);
931 struct fsl_dspi *dspi = spi_controller_get_devdata(ctlr);
932 int ret;
933
934 pinctrl_pm_select_default_state(dev);
935
936 ret = clk_prepare_enable(dspi->clk);
937 if (ret)
938 return ret;
939 spi_controller_resume(ctlr);
940 if (dspi->irq)
941 enable_irq(dspi->irq);
942
943 return 0;
944}
945#endif /* CONFIG_PM_SLEEP */
946
947static SIMPLE_DEV_PM_OPS(dspi_pm, dspi_suspend, dspi_resume);
948
949static const struct regmap_range dspi_volatile_ranges[] = {
950 regmap_reg_range(SPI_MCR, SPI_TCR),
951 regmap_reg_range(SPI_SR, SPI_SR),
952 regmap_reg_range(SPI_PUSHR, SPI_RXFR3),
953};
954
955static const struct regmap_access_table dspi_volatile_table = {
956 .yes_ranges = dspi_volatile_ranges,
957 .n_yes_ranges = ARRAY_SIZE(dspi_volatile_ranges),
958};
959
960static const struct regmap_config dspi_regmap_config = {
961 .reg_bits = 32,
962 .val_bits = 32,
963 .reg_stride = 4,
964 .max_register = 0x88,
965 .volatile_table = &dspi_volatile_table,
966};
967
968static const struct regmap_range dspi_xspi_volatile_ranges[] = {
969 regmap_reg_range(SPI_MCR, SPI_TCR),
970 regmap_reg_range(SPI_SR, SPI_SR),
971 regmap_reg_range(SPI_PUSHR, SPI_RXFR3),
972 regmap_reg_range(SPI_SREX, SPI_SREX),
973};
974
975static const struct regmap_access_table dspi_xspi_volatile_table = {
976 .yes_ranges = dspi_xspi_volatile_ranges,
977 .n_yes_ranges = ARRAY_SIZE(dspi_xspi_volatile_ranges),
978};
979
980static const struct regmap_config dspi_xspi_regmap_config[] = {
981 {
982 .reg_bits = 32,
983 .val_bits = 32,
984 .reg_stride = 4,
985 .max_register = 0x13c,
986 .volatile_table = &dspi_xspi_volatile_table,
987 },
988 {
989 .name = "pushr",
990 .reg_bits = 16,
991 .val_bits = 16,
992 .reg_stride = 2,
993 .max_register = 0x2,
994 },
995};
996
997static void dspi_init(struct fsl_dspi *dspi)
998{
999 unsigned int mcr = SPI_MCR_PCSIS;
1000
1001 if (dspi->devtype_data->xspi_mode)
1002 mcr |= SPI_MCR_XSPI;
1003 if (!spi_controller_is_slave(dspi->ctlr))
1004 mcr |= SPI_MCR_MASTER;
1005
1006 regmap_write(dspi->regmap, SPI_MCR, mcr);
1007 regmap_write(dspi->regmap, SPI_SR, SPI_SR_CLEAR);
1008 if (dspi->devtype_data->xspi_mode)
1009 regmap_write(dspi->regmap, SPI_CTARE(0),
1010 SPI_CTARE_FMSZE(0) | SPI_CTARE_DTCP(1));
1011}
1012
1013static int dspi_probe(struct platform_device *pdev)
1014{
1015 struct device_node *np = pdev->dev.of_node;
1016 const struct regmap_config *regmap_config;
1017 struct fsl_dspi_platform_data *pdata;
1018 struct spi_controller *ctlr;
1019 int ret, cs_num, bus_num;
1020 struct fsl_dspi *dspi;
1021 struct resource *res;
1022 void __iomem *base;
1023
1024 ctlr = spi_alloc_master(&pdev->dev, sizeof(struct fsl_dspi));
1025 if (!ctlr)
1026 return -ENOMEM;
1027
1028 dspi = spi_controller_get_devdata(ctlr);
1029 dspi->pdev = pdev;
1030 dspi->ctlr = ctlr;
1031
1032 ctlr->setup = dspi_setup;
1033 ctlr->transfer_one_message = dspi_transfer_one_message;
1034 ctlr->dev.of_node = pdev->dev.of_node;
1035
1036 ctlr->cleanup = dspi_cleanup;
1037 ctlr->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST;
1038
1039 pdata = dev_get_platdata(&pdev->dev);
1040 if (pdata) {
1041 ctlr->num_chipselect = pdata->cs_num;
1042 ctlr->bus_num = pdata->bus_num;
1043
1044 dspi->devtype_data = &coldfire_data;
1045 } else {
1046
1047 ret = of_property_read_u32(np, "spi-num-chipselects", &cs_num);
1048 if (ret < 0) {
1049 dev_err(&pdev->dev, "can't get spi-num-chipselects\n");
1050 goto out_ctlr_put;
1051 }
1052 ctlr->num_chipselect = cs_num;
1053
1054 ret = of_property_read_u32(np, "bus-num", &bus_num);
1055 if (ret < 0) {
1056 dev_err(&pdev->dev, "can't get bus-num\n");
1057 goto out_ctlr_put;
1058 }
1059 ctlr->bus_num = bus_num;
1060
1061 if (of_property_read_bool(np, "spi-slave"))
1062 ctlr->slave = true;
1063
1064 dspi->devtype_data = of_device_get_match_data(&pdev->dev);
1065 if (!dspi->devtype_data) {
1066 dev_err(&pdev->dev, "can't get devtype_data\n");
1067 ret = -EFAULT;
1068 goto out_ctlr_put;
1069 }
1070 }
1071
1072 if (dspi->devtype_data->xspi_mode)
1073 ctlr->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
1074 else
1075 ctlr->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 16);
1076
1077 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1078 base = devm_ioremap_resource(&pdev->dev, res);
1079 if (IS_ERR(base)) {
1080 ret = PTR_ERR(base);
1081 goto out_ctlr_put;
1082 }
1083
1084 if (dspi->devtype_data->xspi_mode)
1085 regmap_config = &dspi_xspi_regmap_config[0];
1086 else
1087 regmap_config = &dspi_regmap_config;
1088 dspi->regmap = devm_regmap_init_mmio(&pdev->dev, base, regmap_config);
1089 if (IS_ERR(dspi->regmap)) {
1090 dev_err(&pdev->dev, "failed to init regmap: %ld\n",
1091 PTR_ERR(dspi->regmap));
1092 ret = PTR_ERR(dspi->regmap);
1093 goto out_ctlr_put;
1094 }
1095
1096 if (dspi->devtype_data->xspi_mode) {
1097 dspi->regmap_pushr = devm_regmap_init_mmio(
1098 &pdev->dev, base + SPI_PUSHR,
1099 &dspi_xspi_regmap_config[1]);
1100 if (IS_ERR(dspi->regmap_pushr)) {
1101 dev_err(&pdev->dev,
1102 "failed to init pushr regmap: %ld\n",
1103 PTR_ERR(dspi->regmap_pushr));
1104 ret = PTR_ERR(dspi->regmap_pushr);
1105 goto out_ctlr_put;
1106 }
1107 }
1108
1109 dspi->clk = devm_clk_get(&pdev->dev, "dspi");
1110 if (IS_ERR(dspi->clk)) {
1111 ret = PTR_ERR(dspi->clk);
1112 dev_err(&pdev->dev, "unable to get clock\n");
1113 goto out_ctlr_put;
1114 }
1115 ret = clk_prepare_enable(dspi->clk);
1116 if (ret)
1117 goto out_ctlr_put;
1118
1119 dspi_init(dspi);
1120
1121 dspi->irq = platform_get_irq(pdev, 0);
1122 if (dspi->irq <= 0) {
1123 dev_info(&pdev->dev,
1124 "can't get platform irq, using poll mode\n");
1125 dspi->irq = 0;
1126 goto poll_mode;
1127 }
1128
1129 ret = request_threaded_irq(dspi->irq, dspi_interrupt, NULL,
1130 IRQF_SHARED, pdev->name, dspi);
1131 if (ret < 0) {
1132 dev_err(&pdev->dev, "Unable to attach DSPI interrupt\n");
1133 goto out_clk_put;
1134 }
1135
1136 init_completion(&dspi->xfer_done);
1137
1138poll_mode:
1139 if (dspi->devtype_data->trans_mode == DSPI_DMA_MODE) {
1140 ret = dspi_request_dma(dspi, res->start);
1141 if (ret < 0) {
1142 dev_err(&pdev->dev, "can't get dma channels\n");
1143 goto out_free_irq;
1144 }
1145 }
1146
1147 ctlr->max_speed_hz =
1148 clk_get_rate(dspi->clk) / dspi->devtype_data->max_clock_factor;
1149
1150 platform_set_drvdata(pdev, ctlr);
1151
1152 ret = spi_register_controller(ctlr);
1153 if (ret != 0) {
1154 dev_err(&pdev->dev, "Problem registering DSPI ctlr\n");
1155 goto out_release_dma;
1156 }
1157
1158 return ret;
1159
1160out_release_dma:
1161 dspi_release_dma(dspi);
1162out_free_irq:
1163 if (dspi->irq)
1164 free_irq(dspi->irq, dspi);
1165out_clk_put:
1166 clk_disable_unprepare(dspi->clk);
1167out_ctlr_put:
1168 spi_controller_put(ctlr);
1169
1170 return ret;
1171}
1172
1173static int dspi_remove(struct platform_device *pdev)
1174{
1175 struct spi_controller *ctlr = platform_get_drvdata(pdev);
1176 struct fsl_dspi *dspi = spi_controller_get_devdata(ctlr);
1177
1178 /* Disconnect from the SPI framework */
1179 spi_unregister_controller(dspi->ctlr);
1180
1181 /* Disable RX and TX */
1182 regmap_update_bits(dspi->regmap, SPI_MCR,
1183 SPI_MCR_DIS_TXF | SPI_MCR_DIS_RXF,
1184 SPI_MCR_DIS_TXF | SPI_MCR_DIS_RXF);
1185
1186 /* Stop Running */
1187 regmap_update_bits(dspi->regmap, SPI_MCR, SPI_MCR_HALT, SPI_MCR_HALT);
1188
1189 dspi_release_dma(dspi);
1190 if (dspi->irq)
1191 free_irq(dspi->irq, dspi);
1192 clk_disable_unprepare(dspi->clk);
1193
1194 return 0;
1195}
1196
1197static void dspi_shutdown(struct platform_device *pdev)
1198{
1199 dspi_remove(pdev);
1200}
1201
1202static struct platform_driver fsl_dspi_driver = {
1203 .driver.name = DRIVER_NAME,
1204 .driver.of_match_table = fsl_dspi_dt_ids,
1205 .driver.owner = THIS_MODULE,
1206 .driver.pm = &dspi_pm,
1207 .probe = dspi_probe,
1208 .remove = dspi_remove,
1209 .shutdown = dspi_shutdown,
1210};
1211module_platform_driver(fsl_dspi_driver);
1212
1213MODULE_DESCRIPTION("Freescale DSPI Controller Driver");
1214MODULE_LICENSE("GPL");
1215MODULE_ALIAS("platform:" DRIVER_NAME);