blob: 860d2575a92a8229dddefb0a238735fbf8f9c5a4 [file] [log] [blame]
xjb04a4022021-11-25 15:01:52 +08001// SPDX-License-Identifier: GPL-2.0+
2// Copyright (c) 2018 MediaTek Inc.
3
4#include <linux/clk.h>
5#include <linux/device.h>
6#include <linux/dma-mapping.h>
7#include <linux/err.h>
8#include <linux/interrupt.h>
9#include <linux/module.h>
10#include <linux/platform_device.h>
11#include <linux/pm_runtime.h>
12#include <linux/spi/spi.h>
13#include <linux/of.h>
14
15#define SPIS_IRQ_EN_REG 0x0
16#define SPIS_IRQ_CLR_REG 0x4
17#define SPIS_IRQ_ST_REG 0x8
18#define SPIS_IRQ_MASK_REG 0xc
19#define SPIS_CFG_REG 0x10
20#define SPIS_RX_DATA_REG 0x14
21#define SPIS_TX_DATA_REG 0x18
22#define SPIS_RX_DST_REG 0x1c
23#define SPIS_TX_SRC_REG 0x20
24#define SPIS_DMA_CFG_REG 0x30
25#define SPIS_SOFT_RST_REG 0x40
26
27/* SPIS_IRQ_EN_REG */
28#define DMA_DONE_EN BIT(7)
29#define DATA_DONE_EN BIT(2)
30#define RSTA_DONE_EN BIT(1)
31#define CMD_INVALID_EN BIT(0)
32#define MTK_SPI_SLAVE_MAX_FIFO_SIZE 512U //zhengzhou 2021.03.03 modify
33/* SPIS_IRQ_ST_REG */
34#define DMA_DONE_ST BIT(7)
35#define DATA_DONE_ST BIT(2)
36#define RSTA_DONE_ST BIT(1)
37#define CMD_INVALID_ST BIT(0)
38
39/* SPIS_IRQ_MASK_REG */
40#define DMA_DONE_MASK BIT(7)
41#define DATA_DONE_MASK BIT(2)
42#define RSTA_DONE_MASK BIT(1)
43#define CMD_INVALID_MASK BIT(0)
44
45/* SPIS_CFG_REG */
46#define SPIS_TX_ENDIAN BIT(7)
47#define SPIS_RX_ENDIAN BIT(6)
48#define SPIS_TXMSBF BIT(5)
49#define SPIS_RXMSBF BIT(4)
50#define SPIS_CPHA BIT(3)
51#define SPIS_CPOL BIT(2)
52#define SPIS_TX_EN BIT(1)
53#define SPIS_RX_EN BIT(0)
54
55/* SPIS_DMA_CFG_REG */
56#define TX_DMA_TRIG_EN BIT(31)
57#define TX_DMA_EN BIT(30)
58#define RX_DMA_EN BIT(29)
59#define TX_DMA_LEN 0xfffff
60
61/* SPIS_SOFT_RST_REG */
62#define SPIS_DMA_ADDR_EN BIT(1)
63#define SPIS_SOFT_RST BIT(0)
64
65
66struct mtk_spi_slave {
67 struct device *dev;
68 void __iomem *base;
69 struct clk *spi_clk, *axi_clk;
70 struct completion xfer_done;
71 struct spi_transfer *cur_transfer;
72 bool slave_aborted;
73 const struct mtk_spi_compatible *dev_comp;
74};
75
76struct mtk_spi_compatible {
77 const u32 max_fifo_size;
78 bool must_rx;
79 bool need_axi_clk;
80};
81static const struct mtk_spi_compatible mt2712_compat = {
82 .max_fifo_size = 512,
83 .must_rx = false,
84 .need_axi_clk = false,
85};
86static const struct mtk_spi_compatible mt8512_compat = {
87 .max_fifo_size = 128,
88 .must_rx = true,
89 .need_axi_clk = true,
90};
91
92static const struct of_device_id mtk_spi_slave_of_match[] = {
93 { .compatible = "mediatek,mt2712-spi-slave",
94 .data = (void *)&mt2712_compat,},
95 { .compatible = "mediatek,mt8512-spi-slave",
96 .data = (void *)&mt8512_compat,},
97 {}
98};
99MODULE_DEVICE_TABLE(of, mtk_spi_slave_of_match);
100
101static void mtk_spi_slave_disable_dma(struct mtk_spi_slave *mdata)
102{
103 u32 reg_val;
104
105 reg_val = readl(mdata->base + SPIS_DMA_CFG_REG);
106 reg_val &= ~RX_DMA_EN;
107 reg_val &= ~TX_DMA_EN;
108 writel(reg_val, mdata->base + SPIS_DMA_CFG_REG);
109}
110
111static void mtk_spi_slave_disable_xfer(struct mtk_spi_slave *mdata)
112{
113 u32 reg_val;
114
115 reg_val = readl(mdata->base + SPIS_CFG_REG);
116 reg_val &= ~SPIS_TX_EN;
117 reg_val &= ~SPIS_RX_EN;
118 writel(reg_val, mdata->base + SPIS_CFG_REG);
119}
120
121static int mtk_spi_slave_wait_for_completion(struct mtk_spi_slave *mdata)
122{
123 if (wait_for_completion_interruptible(&mdata->xfer_done) ||
124 mdata->slave_aborted) {
125 dev_err(mdata->dev, "interrupted\n");
126 return -EINTR;
127 }
128
129 return 0;
130}
131
132static int mtk_spi_slave_prepare_message(struct spi_controller *ctlr,
133 struct spi_message *msg)
134{
135 struct mtk_spi_slave *mdata = spi_controller_get_devdata(ctlr);
136 struct spi_device *spi = msg->spi;
137 bool cpha, cpol;
138 u32 reg_val;
139
140 cpha = spi->mode & SPI_CPHA ? 1 : 0;
141 cpol = spi->mode & SPI_CPOL ? 1 : 0;
142
143 reg_val = readl(mdata->base + SPIS_CFG_REG);
144 if (cpha)
145 reg_val |= SPIS_CPHA;
146 else
147 reg_val &= ~SPIS_CPHA;
148 if (cpol)
149 reg_val |= SPIS_CPOL;
150 else
151 reg_val &= ~SPIS_CPOL;
152
153 if (spi->mode & SPI_LSB_FIRST)
154 reg_val &= ~(SPIS_TXMSBF | SPIS_RXMSBF);
155 else
156 reg_val |= SPIS_TXMSBF | SPIS_RXMSBF;
157
158 reg_val &= ~SPIS_TX_ENDIAN;
159 reg_val &= ~SPIS_RX_ENDIAN;
160 writel(reg_val, mdata->base + SPIS_CFG_REG);
161
162 return 0;
163}
164
165static int mtk_spi_slave_fifo_transfer(struct spi_controller *ctlr,
166 struct spi_device *spi,
167 struct spi_transfer *xfer)
168{
169 struct mtk_spi_slave *mdata = spi_controller_get_devdata(ctlr);
170 int reg_val, cnt, remainder, ret;
171
172 writel(SPIS_SOFT_RST, mdata->base + SPIS_SOFT_RST_REG);
173
174 reg_val = readl(mdata->base + SPIS_CFG_REG);
175 if (xfer->rx_buf)
176 reg_val |= SPIS_RX_EN;
177 if (xfer->tx_buf)
178 reg_val |= SPIS_TX_EN;
179 writel(reg_val, mdata->base + SPIS_CFG_REG);
180
181 cnt = xfer->len / 4;
182 if (xfer->tx_buf)
183 iowrite32_rep(mdata->base + SPIS_TX_DATA_REG,
184 xfer->tx_buf, cnt);
185
186 remainder = xfer->len % 4;
187 if (xfer->tx_buf && remainder > 0) {
188 reg_val = 0;
189 memcpy(&reg_val, xfer->tx_buf + cnt * 4, remainder);
190 writel(reg_val, mdata->base + SPIS_TX_DATA_REG);
191 }
192
193 ret = mtk_spi_slave_wait_for_completion(mdata);
194 if (ret) {
195 mtk_spi_slave_disable_xfer(mdata);
196 writel(SPIS_SOFT_RST, mdata->base + SPIS_SOFT_RST_REG);
197 }
198
199 return ret;
200}
201
202static int mtk_spi_slave_dma_transfer(struct spi_controller *ctlr,
203 struct spi_device *spi,
204 struct spi_transfer *xfer)
205{
206 struct mtk_spi_slave *mdata = spi_controller_get_devdata(ctlr);
207 struct device *dev = mdata->dev;
208 int reg_val, ret;
209
210 writel(SPIS_SOFT_RST, mdata->base + SPIS_SOFT_RST_REG);
211
212 if (xfer->tx_buf) {
213 /* tx_buf is a const void* where we need a void * for
214 * the dma mapping
215 */
216 void *nonconst_tx = (void *)xfer->tx_buf;
217
218 xfer->tx_dma = dma_map_single(dev, nonconst_tx,
219 xfer->len, DMA_TO_DEVICE);
220 if (dma_mapping_error(dev, xfer->tx_dma)) {
221 ret = -ENOMEM;
222 goto disable_transfer;
223 }
224 }
225
226 if (xfer->rx_buf) {
227 xfer->rx_dma = dma_map_single(dev, xfer->rx_buf,
228 xfer->len, DMA_FROM_DEVICE);
229 if (dma_mapping_error(dev, xfer->rx_dma)) {
230 ret = -ENOMEM;
231 goto unmap_txdma;
232 }
233 }
234
235 writel(xfer->tx_dma, mdata->base + SPIS_TX_SRC_REG);
236 writel(xfer->rx_dma, mdata->base + SPIS_RX_DST_REG);
237
238 writel(SPIS_DMA_ADDR_EN, mdata->base + SPIS_SOFT_RST_REG);
239
240 /* enable config reg tx rx_enable */
241 reg_val = readl(mdata->base + SPIS_CFG_REG);
242 if (xfer->tx_buf)
243 reg_val |= SPIS_TX_EN;
244 if (xfer->rx_buf)
245 reg_val |= SPIS_RX_EN;
246 writel(reg_val, mdata->base + SPIS_CFG_REG);
247
248 /* config dma */
249 reg_val = 0;
250 reg_val |= (xfer->len - 1) & TX_DMA_LEN;
251 writel(reg_val, mdata->base + SPIS_DMA_CFG_REG);
252
253 reg_val = readl(mdata->base + SPIS_DMA_CFG_REG);
254 if (xfer->tx_buf)
255 reg_val |= TX_DMA_EN;
256 if (xfer->rx_buf)
257 reg_val |= RX_DMA_EN;
258 reg_val |= TX_DMA_TRIG_EN;
259 writel(reg_val, mdata->base + SPIS_DMA_CFG_REG);
260
261 ret = mtk_spi_slave_wait_for_completion(mdata);
262 if (ret)
263 goto unmap_rxdma;
264
265 return 0;
266
267unmap_rxdma:
268 if (xfer->rx_buf)
269 dma_unmap_single(dev, xfer->rx_dma,
270 xfer->len, DMA_FROM_DEVICE);
271
272unmap_txdma:
273 if (xfer->tx_buf)
274 dma_unmap_single(dev, xfer->tx_dma,
275 xfer->len, DMA_TO_DEVICE);
276
277disable_transfer:
278 mtk_spi_slave_disable_dma(mdata);
279 mtk_spi_slave_disable_xfer(mdata);
280 writel(SPIS_SOFT_RST, mdata->base + SPIS_SOFT_RST_REG);
281
282 return ret;
283}
284
285static int mtk_spi_slave_transfer_one(struct spi_controller *ctlr,
286 struct spi_device *spi,
287 struct spi_transfer *xfer)
288{
289 struct mtk_spi_slave *mdata = spi_controller_get_devdata(ctlr);
290
291 reinit_completion(&mdata->xfer_done);
292 mdata->slave_aborted = false;
293 mdata->cur_transfer = xfer;
294
295 if (xfer->len > mdata->dev_comp->max_fifo_size)
296 return mtk_spi_slave_dma_transfer(ctlr, spi, xfer);
297 else
298 return mtk_spi_slave_fifo_transfer(ctlr, spi, xfer);
299}
300
301static int mtk_spi_slave_setup(struct spi_device *spi)
302{
lh0d3f4db2022-09-17 00:16:39 -0700303 struct mtk_spi_slave *mdata = spi_controller_get_devdata(spi->master);
xjb04a4022021-11-25 15:01:52 +0800304 u32 reg_val;
lh0d3f4db2022-09-17 00:16:39 -0700305
xjb04a4022021-11-25 15:01:52 +0800306 reg_val = DMA_DONE_EN | DATA_DONE_EN |
307 RSTA_DONE_EN | CMD_INVALID_EN;
308 writel(reg_val, mdata->base + SPIS_IRQ_EN_REG);
309
310 reg_val = DMA_DONE_MASK | DATA_DONE_MASK |
311 RSTA_DONE_MASK | CMD_INVALID_MASK;
312 writel(reg_val, mdata->base + SPIS_IRQ_MASK_REG);
313
314 mtk_spi_slave_disable_dma(mdata);
315 mtk_spi_slave_disable_xfer(mdata);
316
317 return 0;
318}
319
320static int mtk_slave_abort(struct spi_controller *ctlr)
321{
322 struct mtk_spi_slave *mdata = spi_controller_get_devdata(ctlr);
323
324 mdata->slave_aborted = true;
325 complete(&mdata->xfer_done);
326
327 return 0;
328}
329
330static irqreturn_t mtk_spi_slave_interrupt(int irq, void *dev_id)
331{
332 struct spi_controller *ctlr = dev_id;
333 struct mtk_spi_slave *mdata = spi_controller_get_devdata(ctlr);
334 struct spi_transfer *trans = mdata->cur_transfer;
335 u32 int_status, reg_val, cnt, remainder;
336
337 int_status = readl(mdata->base + SPIS_IRQ_ST_REG);
338 writel(int_status, mdata->base + SPIS_IRQ_CLR_REG);
339
340 if (!trans)
341 return IRQ_NONE;
342
343 if ((int_status & DMA_DONE_ST) &&
344 ((int_status & DATA_DONE_ST) ||
345 (int_status & RSTA_DONE_ST))) {
346 writel(SPIS_SOFT_RST, mdata->base + SPIS_SOFT_RST_REG);
347
348 if (trans->tx_buf)
349 dma_unmap_single(mdata->dev, trans->tx_dma,
350 trans->len, DMA_TO_DEVICE);
351 if (trans->rx_buf)
352 dma_unmap_single(mdata->dev, trans->rx_dma,
353 trans->len, DMA_FROM_DEVICE);
354
355 mtk_spi_slave_disable_dma(mdata);
356 mtk_spi_slave_disable_xfer(mdata);
357 }
358
359 if ((!(int_status & DMA_DONE_ST)) &&
360 ((int_status & DATA_DONE_ST) ||
361 (int_status & RSTA_DONE_ST))) {
362 cnt = trans->len / 4;
363 if (trans->rx_buf)
364 ioread32_rep(mdata->base + SPIS_RX_DATA_REG,
365 trans->rx_buf, cnt);
366 remainder = trans->len % 4;
367 if (trans->rx_buf && remainder > 0) {
368 reg_val = readl(mdata->base + SPIS_RX_DATA_REG);
369 memcpy(trans->rx_buf + (cnt * 4),
370 &reg_val, remainder);
371 }
372
373 mtk_spi_slave_disable_xfer(mdata);
374 }
375
376 if (int_status & CMD_INVALID_ST) {
377 dev_warn(&ctlr->dev, "cmd invalid\n");
378 return IRQ_NONE;
379 }
380
381 mdata->cur_transfer = NULL;
382 complete(&mdata->xfer_done);
383
384 return IRQ_HANDLED;
385}
386
387static int mtk_spi_slave_probe(struct platform_device *pdev)
388{
389 struct spi_controller *ctlr;
390 struct mtk_spi_slave *mdata;
391 struct resource *res;
392 int irq, ret,reg_val;//zhengzhou 2021.03.03 modify
393 const struct of_device_id *of_id;//zhengzhou 2021.03.03 modify
394 ctlr = spi_alloc_slave(&pdev->dev, sizeof(*mdata));
395 if (!ctlr) {
396 dev_err(&pdev->dev, "failed to alloc spi slave\n");
397 return -ENOMEM;
398 }
399
400 ctlr->auto_runtime_pm = true;
401 ctlr->dev.of_node = pdev->dev.of_node;
402 ctlr->mode_bits = SPI_CPOL | SPI_CPHA;
403 ctlr->mode_bits |= SPI_LSB_FIRST;
404 ctlr->flags = SPI_MASTER_MUST_RX;
405
406 ctlr->prepare_message = mtk_spi_slave_prepare_message;
407 ctlr->transfer_one = mtk_spi_slave_transfer_one;
408 ctlr->setup = mtk_spi_slave_setup;
409 ctlr->slave_abort = mtk_slave_abort;
410
411 of_id = of_match_node(mtk_spi_slave_of_match, pdev->dev.of_node);
412 if (!of_id) {
413 dev_err(&pdev->dev, "failed to probe of_node\n");
414 ret = -EINVAL;
415 goto err_put_ctlr;
416 }
417 mdata = spi_controller_get_devdata(ctlr);
418 mdata->dev_comp = of_id->data;
419 if (mdata->dev_comp->must_rx)
420 ctlr->flags = SPI_MASTER_MUST_RX;
421
422 platform_set_drvdata(pdev, ctlr);
423
424 init_completion(&mdata->xfer_done);
425
426 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
427 if (!res) {
428 ret = -ENODEV;
429 dev_err(&pdev->dev, "failed to determine base address\n");
430 goto err_put_ctlr;
431 }
432
433 mdata->dev = &pdev->dev;
434
435 mdata->base = devm_ioremap_resource(&pdev->dev, res);
436 if (IS_ERR(mdata->base)) {
437 ret = PTR_ERR(mdata->base);
438 goto err_put_ctlr;
439 }
440
441 irq = platform_get_irq(pdev, 0);
442 if (irq < 0) {
443 dev_err(&pdev->dev, "failed to get irq (%d)\n", irq);
444 ret = irq;
445 goto err_put_ctlr;
446 }
447
448 ret = devm_request_irq(&pdev->dev, irq, mtk_spi_slave_interrupt,
449 IRQF_TRIGGER_NONE, dev_name(&pdev->dev), ctlr);
450 if (ret) {
451 dev_err(&pdev->dev, "failed to register irq (%d)\n", ret);
452 goto err_put_ctlr;
453 }
454
455 mdata->spi_clk = devm_clk_get(&pdev->dev, "spi");
456 if (IS_ERR(mdata->spi_clk)) {
457 ret = PTR_ERR(mdata->spi_clk);
458 dev_err(&pdev->dev, "failed to get spi-clk: %d\n", ret);
459 goto err_put_ctlr;
460 }
461
462 ret = clk_prepare_enable(mdata->spi_clk);
463 if (ret < 0) {
464 dev_err(&pdev->dev, "failed to enable spi_clk (%d)\n", ret);
465 goto err_put_ctlr;
466 }
467
468 if (mdata->dev_comp->need_axi_clk) {
469 mdata->axi_clk = devm_clk_get(&pdev->dev, "axi");
470 if (IS_ERR(mdata->axi_clk)) {
471 ret = PTR_ERR(mdata->axi_clk);
472 dev_err(&pdev->dev, "failed to get axi-clk: %d\n", ret);
473 goto err_disable_spi_clk;
474 }
475
476 ret = clk_prepare_enable(mdata->axi_clk);
477 if (ret < 0) {
478 dev_err(&pdev->dev,
479 "failed to enable axi_clk (%d)\n", ret);
480 goto err_disable_spi_clk;
481 }
482 }
483
484 pm_runtime_enable(&pdev->dev);
485
486 ret = devm_spi_register_controller(&pdev->dev, ctlr);
487 if (ret) {
488 dev_err(&pdev->dev,
489 "failed to register slave controller(%d)\n", ret);
490 if (mdata->dev_comp->need_axi_clk)
491 clk_disable_unprepare(mdata->axi_clk);
492 goto err_disable_runtime_pm;
493 }
494 //zhengzhou 2021.03.03 modify +++++
495 reg_val=0;
496 reg_val|=8&TX_DMA_EN;
497 writel(reg_val,mdata->base+SPIS_DMA_CFG_REG);
498 dev_err(&pdev->dev,"mtk_spi_slave_probe SPIS_DMA_CFG_REG(0x%x)\n ",readl(mdata->base+SPIS_CFG_REG));
499 dev_err(&pdev->dev,"mtk_spi_slave_probe SPIS_DMA_CFG_REG(0x%x)\n ",readl(mdata->base+SPIS_CFG_REG));
500 if (mdata->dev_comp->need_axi_clk)
501 clk_disable_unprepare(mdata->axi_clk);
502 clk_disable_unprepare(mdata->spi_clk);
503
504 return 0;
505
506err_disable_runtime_pm:
507 pm_runtime_disable(&pdev->dev);
508err_disable_spi_clk:
509 clk_disable_unprepare(mdata->spi_clk);
510err_put_ctlr:
511 spi_controller_put(ctlr);
512
513 return ret;
514}
515
516static int mtk_spi_slave_remove(struct platform_device *pdev)
517{
518 pm_runtime_disable(&pdev->dev);
519
520 return 0;
521}
522
523#ifdef CONFIG_PM_SLEEP
524static int mtk_spi_slave_suspend(struct device *dev)
525{
526 struct spi_controller *ctlr = dev_get_drvdata(dev);
527 struct mtk_spi_slave *mdata = spi_controller_get_devdata(ctlr);
528 int ret;
529
530 ret = spi_controller_suspend(ctlr);
531 if (ret)
532 return ret;
533
534 if (!pm_runtime_suspended(dev)) {
535 if (mdata->dev_comp->need_axi_clk)
536 clk_disable_unprepare(mdata->axi_clk);
537 clk_disable_unprepare(mdata->spi_clk);
538 }
539
540 return ret;
541}
542
543static int mtk_spi_slave_resume(struct device *dev)
544{
545 struct spi_controller *ctlr = dev_get_drvdata(dev);
546 struct mtk_spi_slave *mdata = spi_controller_get_devdata(ctlr);
547 int ret;
548
549 if (!pm_runtime_suspended(dev)) {
550 ret = clk_prepare_enable(mdata->spi_clk);
551 if (ret < 0) {
552 dev_err(dev, "failed to enable spi_clk (%d)\n", ret);
553 return ret;
554 }
555
556 if (mdata->dev_comp->need_axi_clk) {
557 ret = clk_prepare_enable(mdata->axi_clk);
558 if (ret < 0) {
559 clk_disable_unprepare(mdata->spi_clk);
560 dev_err(dev,
561 "failed to enable axi_clk (%d)\n", ret);
562 return ret;
563 }
564 }
565 }
566
567 ret = spi_controller_resume(ctlr);
568 if (ret < 0) {
569 if (mdata->dev_comp->need_axi_clk)
570 clk_disable_unprepare(mdata->axi_clk);
571 clk_disable_unprepare(mdata->spi_clk);
572 }
573
574 return ret;
575}
576#endif /* CONFIG_PM_SLEEP */
577
578#ifdef CONFIG_PM
579static int mtk_spi_slave_runtime_suspend(struct device *dev)
580{
581 struct spi_controller *ctlr = dev_get_drvdata(dev);
582 struct mtk_spi_slave *mdata = spi_controller_get_devdata(ctlr);
583
584 if (mdata->dev_comp->need_axi_clk)
585 clk_disable_unprepare(mdata->axi_clk);
586 clk_disable_unprepare(mdata->spi_clk);
587
588 return 0;
589}
590
591static int mtk_spi_slave_runtime_resume(struct device *dev)
592{
593 struct spi_controller *ctlr = dev_get_drvdata(dev);
594 struct mtk_spi_slave *mdata = spi_controller_get_devdata(ctlr);
595 int ret;
596
597 ret = clk_prepare_enable(mdata->spi_clk);
598 if (ret < 0) {
599 dev_err(dev, "failed to enable spi_clk (%d)\n", ret);
600 return ret;
601 }
602
603 if (mdata->dev_comp->need_axi_clk) {
604 ret = clk_prepare_enable(mdata->axi_clk);
605 if (ret < 0) {
606 clk_disable_unprepare(mdata->spi_clk);
607 dev_err(dev, "failed to enable axi_clk (%d)\n", ret);
608 return ret;
609 }
610 }
611
612 return 0;
613}
614#endif /* CONFIG_PM */
615
616static const struct dev_pm_ops mtk_spi_slave_pm = {
617 SET_SYSTEM_SLEEP_PM_OPS(mtk_spi_slave_suspend, mtk_spi_slave_resume)
618 SET_RUNTIME_PM_OPS(mtk_spi_slave_runtime_suspend,
619 mtk_spi_slave_runtime_resume, NULL)
620};
621
622static struct platform_driver mtk_spi_slave_driver = {
623 .driver = {
624 .name = "mtk-spi-slave",
625 .pm = &mtk_spi_slave_pm,
626 .of_match_table = mtk_spi_slave_of_match,
627 },
628 .probe = mtk_spi_slave_probe,
629 .remove = mtk_spi_slave_remove,
630};
631
632module_platform_driver(mtk_spi_slave_driver);
633
634MODULE_DESCRIPTION("MTK SPI Slave Controller driver");
635MODULE_AUTHOR("Leilk Liu <leilk.liu@mediatek.com>");
636MODULE_LICENSE("GPL v2");
637MODULE_ALIAS("platform:mtk-spi-slave");