blob: 4aa7e57bbdbac7d09068ba06e8331a7b1dfbe675 [file] [log] [blame]
b.liue9582032025-04-17 19:18:16 +08001#include <linux/err.h>
2#include <linux/module.h>
3#include <linux/init.h>
4#include <linux/types.h>
5#include <linux/interrupt.h>
6#include <linux/dma-mapping.h>
7#include <linux/slab.h>
8#include <linux/dmaengine.h>
9#include <linux/platform_device.h>
10#include <linux/device.h>
11#include <linux/platform_data/mmp_dma.h>
12#include <linux/dmapool.h>
13#include <linux/of_device.h>
14#include <linux/of_dma.h>
15#include <linux/of.h>
16#include <linux/pm_runtime.h>
17#include <linux/pm_qos.h>
18#include <linux/delay.h>
19#include "dmaengine.h"
20
21#define DCR 0x0 /* DMA Control Registers */
22#define DCFGR 0x4 /* DMA CFG Registers */
23#define DIER 0x8 /* DMA Interrupt Enable Registers */
24#define DISR 0xc /* DMA Interrupt Status Registers */
25
26#define OFFSET_CH(n) (((n) + 1) << 8)
27
28#define DCCR(n) (0x0000 + OFFSET_CH(n)) /* DMA Channel(n) Control Registers */
29#define DCBCR(n) (0x0004 + OFFSET_CH(n)) /* DMA Channel(n) Byte Cnt Registers */
30
31#define DCIER(n) (0x0020 + OFFSET_CH(n)) /* DMA Channel(n) Interrupt Enable Registers */
32#define DCISR(n) (0x0024 + OFFSET_CH(n)) /* DMA Channel(n) Interrupt Status Registers */
33#define DCICR(n) (0x0028 + OFFSET_CH(n)) /* DMA Channel(n) Interrupt Clear Registers */
34
35#define DCSR(n) (0x0030 + OFFSET_CH(n)) /* DMA Channel(n) Status Registers */
36#define DCCBCR(n) (0x0034 + OFFSET_CH(n)) /* DMA Channel(n) Current Byte Cnt Registers */
37
38#define DCDLAR(n) (0x0050 + OFFSET_CH(n)) /* DMA Channel(n) Descriptor Low Address Registers */
39#define DCDHAR(n) (0x0054 + OFFSET_CH(n)) /* DMA Channel(n) Descriptor High Address Registers */
40
41#define DCCR_ABT BIT(6) /* channel abort (read / write) */
42#define DCCR_INT_MOD BIT(5) /* channel int mode (read / write) */
43#define DCCR_CHAIN_MOD BIT(4) /* channel chain mode (read / write) */
44#define DCCR_EN BIT(0) /* channel enable (read / write) */
45
46#define HSDMA_MAX_DESC_BYTES 0xffff
47
48struct mmp_hsdma_desc_hw {
49 u32 src_laddr; /* Source Low Address */
50 u32 src_haddr; /* Source High Address */
51 u32 dest_laddr; /* Destination Low Address */
52 u32 dest_haddr; /* Destination High Address */
53 u32 byte_length; /* Byte Length */
54 u32 dummy; /* Dummy */
55 u32 next_desc_laddr; /* Next Descriptor Low Address */
56 u32 next_desc_haddr; /* Next Descriptor High Address */
57} __aligned(32);
58
59struct mmp_hsdma_desc_sw {
60 struct mmp_hsdma_desc_hw desc;
61 struct list_head node;
62 struct list_head tx_list;
63 struct dma_async_tx_descriptor async_tx;
64};
65
66struct mmp_hsdma_phy;
67
68struct mmp_hsdma_chan {
69 struct device *dev;
70 struct dma_chan chan;
71 struct dma_async_tx_descriptor desc;
72 struct mmp_hsdma_phy *phy;
73 enum dma_transfer_direction dir;
74 struct dma_slave_config slave_config;
75
76 /* channel's basic info */
77 struct tasklet_struct tasklet;
78 int dedicated_chan;
79
80 /* list for desc */
81 spinlock_t desc_lock; /* Descriptor list lock */
82 struct list_head chain_pending; /* Link descriptors queue for pending */
83 struct list_head chain_running; /* Link descriptors queue for running */
84 bool idle; /* channel state machine */
85 bool byte_align;
86
87 int user_do_qos;
88 int qos_count; /* Per-channel qos count */
89 enum dma_status status; /* channel state machine */
90 struct dma_pool *desc_pool; /* Descriptors pool */
91};
92
93struct mmp_hsdma_phy {
94 int idx;
95 void __iomem *base;
96 struct mmp_hsdma_chan *vchan;
97};
98
99struct mmp_hsdma_device {
100 int dma_channels;
101 int dedicated_chan_bitmap;
102 s32 lpm_qos;
103 struct pm_qos_request qos_idle;
104 void __iomem *base;
105 struct device *dev;
106 struct dma_device device;
107 struct mmp_hsdma_phy *phy;
108 spinlock_t phy_lock; /* protect alloc/free phy channels */
109};
110
111#define tx_to_mmp_hsdma_desc(tx) \
112 container_of(tx, struct mmp_hsdma_desc_sw, async_tx)
113#define to_mmp_hsdma_desc(lh) \
114 container_of(lh, struct mmp_hsdma_desc_sw, node)
115#define to_mmp_hsdma_chan(dchan) \
116 container_of(dchan, struct mmp_hsdma_chan, chan)
117#define to_mmp_hsdma_dev(dmadev) \
118 container_of(dmadev, struct mmp_hsdma_device, device)
119
120static void mmp_hsdma_qos_get(struct mmp_hsdma_chan *chan);
121static void mmp_hsdma_qos_put(struct mmp_hsdma_chan *chan);
122
123static void set_desc(struct mmp_hsdma_phy *phy, dma_addr_t addr)
124{
125 u32 reg;
126
127 reg = DCDLAR(phy->idx);
128 writel(addr, phy->base + reg);
129
130 reg = DCDHAR(phy->idx);
131 writel(0x00000000, phy->base + reg);
132}
133
134static void enable_chan(struct mmp_hsdma_phy *phy)
135{
136 u32 reg;
137
138 if (!phy->vchan)
139 return;
140
141 reg = DCR;
142 writel(0x1, phy->base + reg);
143
144 reg = DIER;
145 writel(readl(phy->base + reg) | BIT(phy->idx),
146 phy->base + reg);
147
148 reg = DCIER(phy->idx);
149 writel(0xf, phy->base + reg);
150
151 reg = DCCR(phy->idx);
152 writel(DCCR_INT_MOD | DCCR_CHAIN_MOD | DCCR_EN,
153 phy->base + reg);
154}
155
156static void disable_chan(struct mmp_hsdma_phy *phy)
157{
158 u32 reg;
159
160 if (!phy)
161 return;
162
163 reg = DCCR(phy->idx);
164 writel(readl(phy->base + reg) | DCCR_ABT | ~DCCR_EN,
165 phy->base + reg);
166}
167
168static int clear_chan_irq(struct mmp_hsdma_phy *phy)
169{
170 u32 reg;
171
172 reg = DCICR(phy->idx);
173 writel(0xf, phy->base + reg);
174
175 return 0;
176}
177
178static irqreturn_t mmp_hsdma_chan_handler(int irq, void *dev_id)
179{
180 struct mmp_hsdma_phy *phy = dev_id;
181 struct mmp_hsdma_chan *hschan = phy->vchan;
182
183 if (clear_chan_irq(phy) != 0)
184 return IRQ_NONE;
185
186 if (hschan)
187 tasklet_schedule(&hschan->tasklet);
188 else
189 pr_err("%s: hsdma channel has been freed\n", __func__);
190
191 return IRQ_HANDLED;
192}
193
194static irqreturn_t mmp_hsdma_int_handler(int irq, void *dev_id)
195{
196 struct mmp_hsdma_device *hsdev = dev_id;
197 struct mmp_hsdma_phy *phy;
198 u32 disr = readl(hsdev->base + DISR);
199 int i, ret;
200 int irq_num = 0;
201 unsigned long flags;
202
203 while (disr) {
204 i = __ffs(disr);
205 /* only handle interrupts belonging to hsdma driver*/
206 if (i >= hsdev->dma_channels)
207 break;
208 disr &= (disr - 1);
209 phy = &hsdev->phy[i];
210 spin_lock_irqsave(&hsdev->phy_lock, flags);
211 ret = mmp_hsdma_chan_handler(irq, phy);
212 spin_unlock_irqrestore(&hsdev->phy_lock, flags);
213 if (ret == IRQ_HANDLED)
214 irq_num++;
215 }
216
217 if (irq_num)
218 return IRQ_HANDLED;
219
220 return IRQ_NONE;
221}
222
223/* lookup free phy channel as descending priority */
224static struct mmp_hsdma_phy *lookup_phy(struct mmp_hsdma_chan *hschan)
225{
226 int prio, i;
227 struct mmp_hsdma_device *hsdev = to_mmp_hsdma_dev(hschan->chan.device);
228 struct mmp_hsdma_phy *phy, *found = NULL;
229 unsigned long flags;
230
231 /*
232 * dma channel priorities
233 * ch 0 - 3 <--> (0)
234 * ch 4 - 7 <--> (1)
235 */
236
237 spin_lock_irqsave(&hsdev->phy_lock, flags);
238 if (hschan->dedicated_chan > 0) {
239 phy = &hsdev->phy[hschan->dedicated_chan];
240 if (!phy->vchan) {
241 phy->vchan = hschan;
242 found = phy;
243 goto out_unlock;
244 } else {
245 dev_err(hschan->dev, "dedicated channel %d already used!\n",
246 hschan->dedicated_chan);
247 }
248 }
249
250 for (prio = 0; prio <= ((hsdev->dma_channels - 1) & 0xf) >> 2; prio++) {
251 for (i = 0; i < hsdev->dma_channels; i++) {
252 if (prio != (i & 0xf) >> 2)
253 continue;
254 phy = &hsdev->phy[i];
255 if (!phy->vchan) {
256 phy->vchan = hschan;
257 found = phy;
258 goto out_unlock;
259 }
260 }
261 }
262
263out_unlock:
264 spin_unlock_irqrestore(&hsdev->phy_lock, flags);
265 return found;
266}
267
268static void mmp_hsdma_free_phy(struct mmp_hsdma_chan *hschan)
269{
270 struct mmp_hsdma_device *hsdev = to_mmp_hsdma_dev(hschan->chan.device);
271 unsigned long flags;
272 u32 reg;
273
274 if (!hschan->phy)
275 return;
276
277 reg = DCCR(hschan->phy->idx);
278 writel(readl(hschan->phy->base + reg) | ~DCCR_EN,
279 hschan->phy->base + reg);
280
281 spin_lock_irqsave(&hsdev->phy_lock, flags);
282 hschan->phy->vchan = NULL;
283 hschan->phy = NULL;
284 spin_unlock_irqrestore(&hsdev->phy_lock, flags);
285}
286
287/**
288 * start_pending_queue - transfer any pending transactions
289 * pending list ==> running list
290 */
291static int start_pending_queue(struct mmp_hsdma_chan *chan)
292{
293 struct mmp_hsdma_desc_sw *desc;
294 struct mmp_hsdma_desc_sw *_desc;
295
296 /* still in running, irq will start the pending list */
297 if (chan->status == DMA_IN_PROGRESS) {
298 dev_dbg(chan->dev, "DMA controller still busy\n");
299 return -1;
300 }
301
302 if (list_empty(&chan->chain_pending)) {
303 /* chance to re-fetch phy channel with higher prio */
304 mmp_hsdma_free_phy(chan);
305 dev_dbg(chan->dev, "no pending list\n");
306 return -1;
307 }
308
309 if (!chan->phy) {
310 chan->phy = lookup_phy(chan);
311 if (!chan->phy) {
312 dev_dbg(chan->dev, "no free dma channel\n");
313 return -1;
314 }
315 }
316
317 /*
318 * pending -> running
319 * reintilize pending list
320 */
321 list_for_each_entry_safe(desc, _desc, &chan->chain_pending, node) {
322 list_del(&desc->node);
323 list_add_tail(&desc->node, &chan->chain_running);
324 }
325
326 desc = list_first_entry(&chan->chain_running,
327 struct mmp_hsdma_desc_sw, node);
328
329 /*
330 * Program the descriptor's address into the DMA controller,
331 * then start the DMA transaction
332 */
333 set_desc(chan->phy, desc->async_tx.phys);
334 /* ensure descriptors written before starting dma */
335 wmb();
336 enable_chan(chan->phy);
337 chan->idle = false;
338 chan->status = DMA_IN_PROGRESS;
339 return 0;
340}
341
342/* desc->tx_list ==> pending list */
343static dma_cookie_t mmp_hsdma_tx_submit(struct dma_async_tx_descriptor *tx)
344{
345 struct mmp_hsdma_chan *chan = to_mmp_hsdma_chan(tx->chan);
346 struct mmp_hsdma_desc_sw *desc = tx_to_mmp_hsdma_desc(tx);
347 struct mmp_hsdma_desc_sw *child;
348 unsigned long flags;
349 dma_cookie_t cookie = -EBUSY;
350
351 spin_lock_irqsave(&chan->desc_lock, flags);
352
353 list_for_each_entry(child, &desc->tx_list, node) {
354 cookie = dma_cookie_assign(&child->async_tx);
355 }
356
357 /* softly link to pending list - desc->tx_list ==> pending list */
358 list_splice_tail_init(&desc->tx_list, &chan->chain_pending);
359
360 spin_unlock_irqrestore(&chan->desc_lock, flags);
361
362 return cookie;
363}
364
365static struct mmp_hsdma_desc_sw *
366mmp_hsdma_alloc_descriptor(struct mmp_hsdma_chan *chan)
367{
368 struct mmp_hsdma_desc_sw *desc;
369 dma_addr_t hsdesc;
370
371 desc = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &hsdesc);
372 if (!desc) {
373 dev_err(chan->dev, "out of memory for link descriptor\n");
374 return NULL;
375 }
376
377 INIT_LIST_HEAD(&desc->tx_list);
378 dma_async_tx_descriptor_init(&desc->async_tx, &chan->chan);
379 /* each desc has submit */
380 desc->async_tx.tx_submit = mmp_hsdma_tx_submit;
381 desc->async_tx.phys = hsdesc;
382
383 return desc;
384}
385
386static int mmp_hsdma_alloc_chan_resources(struct dma_chan *dchan)
387{
388 struct mmp_hsdma_chan *chan = to_mmp_hsdma_chan(dchan);
389
390 if (chan->desc_pool)
391 return 1;
392
393 chan->desc_pool = dma_pool_create(dev_name(&dchan->dev->device),
394 chan->dev,
395 sizeof(struct mmp_hsdma_desc_sw),
396 __alignof__(struct mmp_hsdma_desc_sw),
397 0);
398 if (!chan->desc_pool) {
399 dev_err(chan->dev, "unable to allocate descriptor pool\n");
400 return -ENOMEM;
401 }
402
403 chan->status = DMA_COMPLETE;
404 chan->dir = 0;
405 chan->idle = true;
406 return 1;
407}
408
409static void mmp_hsdma_free_desc_list(struct mmp_hsdma_chan *chan,
410 struct list_head *list)
411{
412 struct mmp_hsdma_desc_sw *desc, *_desc;
413
414 list_for_each_entry_safe(desc, _desc, list, node) {
415 list_del(&desc->node);
416 dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
417 }
418}
419
420static void mmp_hsdma_free_chan_resources(struct dma_chan *dchan)
421{
422 struct mmp_hsdma_chan *chan = to_mmp_hsdma_chan(dchan);
423 unsigned long flags;
424
425 /* wait until task ends if necessary */
426 tasklet_kill(&chan->tasklet);
427 spin_lock_irqsave(&chan->desc_lock, flags);
428 mmp_hsdma_free_desc_list(chan, &chan->chain_pending);
429 mmp_hsdma_free_desc_list(chan, &chan->chain_running);
430 spin_unlock_irqrestore(&chan->desc_lock, flags);
431
432 dma_pool_destroy(chan->desc_pool);
433 chan->desc_pool = NULL;
434 chan->idle = true;
435 chan->status = DMA_COMPLETE;
436 chan->dir = 0;
437 return;
438}
439
440static struct dma_async_tx_descriptor *
441mmp_hsdma_prep_memcpy(struct dma_chan *dchan,
442 dma_addr_t dma_dst, dma_addr_t dma_src,
443 size_t len, unsigned long flags)
444{
445 struct mmp_hsdma_chan *chan;
446 struct mmp_hsdma_desc_sw *first = NULL, *prev = NULL, *new;
447 size_t copy = 0;
448
449 if (!dchan)
450 return NULL;
451
452 if (!len)
453 return NULL;
454
455 chan = to_mmp_hsdma_chan(dchan);
456 chan->byte_align = false;
457
458 if (!chan->dir) {
459 chan->dir = DMA_MEM_TO_MEM;
460 }
461
462 do {
463 /* Allocate the link descriptor from DMA pool */
464 new = mmp_hsdma_alloc_descriptor(chan);
465 if (!new) {
466 dev_err(chan->dev, "no memory for desc\n");
467 goto fail;
468 }
469
470 copy = min_t(size_t, len, HSDMA_MAX_DESC_BYTES);
471
472 if (dma_src & 0x3 || dma_dst & 0x3)
473 chan->byte_align = true;
474
475 new->desc.src_laddr = dma_src;
476 new->desc.src_haddr = 0x00000000;
477 new->desc.dest_laddr = dma_dst;
478 new->desc.dest_haddr = 0x00000000;
479 new->desc.byte_length = copy;
480 new->desc.dummy = 0x00000000;
481 new->desc.next_desc_laddr = 0x00000000;
482 new->desc.next_desc_haddr = 0x00000000;
483
484 if (!first)
485 first = new;
486 else
487 prev->desc.next_desc_laddr = new->async_tx.phys;
488
489 new->async_tx.cookie = 0;
490 async_tx_ack(&new->async_tx);
491
492 prev = new;
493 len -= copy;
494
495 if (chan->dir == DMA_MEM_TO_DEV) {
496 dma_src += copy;
497 } else if (chan->dir == DMA_DEV_TO_MEM) {
498 dma_dst += copy;
499 } else if (chan->dir == DMA_MEM_TO_MEM) {
500 dma_src += copy;
501 dma_dst += copy;
502 }
503
504 /* Insert the link descriptor to the LD ring */
505 list_add_tail(&new->node, &first->tx_list);
506 } while (len);
507
508 /* client is in control of this ack */
509 first->async_tx.flags = flags;
510 first->async_tx.cookie = -EBUSY;
511
512 return &first->async_tx;
513
514fail:
515 if (first) {
516 mmp_hsdma_free_desc_list(chan, &first->tx_list);
517 }
518 return NULL;
519}
520
521static int mmp_hsdma_config(struct dma_chan *dchan,
522 struct dma_slave_config *cfg)
523{
524 struct mmp_hsdma_chan *chan = to_mmp_hsdma_chan(dchan);
525
526 memcpy(&chan->slave_config, cfg, sizeof(*cfg));
527 return 0;
528}
529
530static int mmp_hsdma_pause_chan(struct dma_chan *dchan)
531{
532 struct mmp_hsdma_chan *chan = to_mmp_hsdma_chan(dchan);
533
534 if (!chan->phy)
535 return -1;
536
537 disable_chan(chan->phy);
538 mmp_hsdma_qos_put(chan);
539 chan->status = DMA_PAUSED;
540
541 return 0;
542}
543
544static int mmp_hsdma_terminate_all(struct dma_chan *dchan)
545{
546 struct mmp_hsdma_chan *chan = to_mmp_hsdma_chan(dchan);
547 unsigned long flags;
548
549 if (!dchan)
550 return -EINVAL;
551
552 spin_lock_irqsave(&chan->desc_lock, flags);
553 chan->status = DMA_COMPLETE;
554 disable_chan(chan->phy);
555 mmp_hsdma_free_phy(chan);
556 mmp_hsdma_free_desc_list(chan, &chan->chain_pending);
557 mmp_hsdma_free_desc_list(chan, &chan->chain_running);
558 spin_unlock_irqrestore(&chan->desc_lock, flags);
559 chan->idle = true;
560 mmp_hsdma_qos_put(chan);
561
562 return 0;
563}
564
565static int mmp_hsdma_dump_status(struct dma_chan *dchan)
566{
567 struct mmp_hsdma_chan *chan = to_mmp_hsdma_chan(dchan);
568 struct mmp_hsdma_device *hsdev = to_mmp_hsdma_dev(chan->chan.device);
569 struct mmp_hsdma_phy *phy;
570 u32 reg;
571
572 if (!dchan)
573 return -EINVAL;
574
575 if (chan->dedicated_chan > 0)
576 phy = &hsdev->phy[chan->dedicated_chan];
577 else
578 phy = chan->phy;
579
580 if (!phy) {
581 dev_info(chan->dev, "dma dump status: phy already freed\n");
582 return -EINVAL;
583 }
584
585 dev_info(chan->dev, "==== high speed dma dump status ====\n");
586 reg = DCR;
587 dev_info(chan->dev, "DCR[0x%x]=0x%x\n", reg, readl(phy->base + reg));
588 reg = DCFGR;
589 dev_info(chan->dev, "DCFGR[0x%x]=0x%x\n", reg, readl(phy->base + reg));
590 reg = DCCR(phy->idx);
591 dev_info(chan->dev, "DCCR(%d)[0x%x]=0x%x\n", phy->idx, reg, readl(phy->base + reg));
592 reg = DCDLAR(phy->idx);
593 dev_info(chan->dev, "DCDLAR(%d)[0x%x]=0x%x\n", phy->idx, reg, readl(phy->base + reg));
594 reg = DCDHAR(phy->idx);
595 dev_info(chan->dev, "DCDHAR(%d)[0x%x]=0x%x\n", phy->idx, reg, readl(phy->base + reg));
596
597 return 0;
598}
599
600static enum dma_status mmp_hsdma_tx_status(struct dma_chan *dchan,
601 dma_cookie_t cookie,
602 struct dma_tx_state *txstate)
603{
604 struct mmp_hsdma_chan *chan = to_mmp_hsdma_chan(dchan);
605 enum dma_status ret;
606 unsigned long flags;
607
608 spin_lock_irqsave(&chan->desc_lock, flags);
609 ret = dma_cookie_status(dchan, cookie, txstate);
610 spin_unlock_irqrestore(&chan->desc_lock, flags);
611
612 if (ret == DMA_COMPLETE)
613 return ret;
614 else
615 return chan->status;
616}
617
618/**
619 * mmp_hsdma_issue_pending - Issue the DMA start command
620 * pending list ==> running list
621 */
622static void mmp_hsdma_issue_pending(struct dma_chan *dchan)
623{
624
625 struct mmp_hsdma_chan *chan = to_mmp_hsdma_chan(dchan);
626 unsigned long flags;
627 int ret = 0;
628
629 mmp_hsdma_qos_get(chan);
630 spin_lock_irqsave(&chan->desc_lock, flags);
631 ret = start_pending_queue(chan);
632 spin_unlock_irqrestore(&chan->desc_lock, flags);
633 if (ret)
634 mmp_hsdma_qos_put(chan);
635}
636
637/*
638 * dma_do_tasklet
639 * Do call back
640 * Start pending list
641 */
642static void dma_do_tasklet(unsigned long data)
643{
644 struct mmp_hsdma_chan *chan = (struct mmp_hsdma_chan *)data;
645 struct mmp_hsdma_desc_sw *desc, *_desc;
646 LIST_HEAD(chain_cleanup);
647 unsigned long flags;
648 struct dmaengine_desc_callback cb;
649 int ret = 0;
650
651 /* return if this channel has been stopped */
652 spin_lock_irqsave(&chan->desc_lock, flags);
653 if (chan->status == DMA_COMPLETE) {
654 spin_unlock_irqrestore(&chan->desc_lock, flags);
655 return;
656 }
657 spin_unlock_irqrestore(&chan->desc_lock, flags);
658
659 /* submit pending list; callback for each desc; free desc */
660 spin_lock_irqsave(&chan->desc_lock, flags);
661
662 list_for_each_entry_safe(desc, _desc, &chan->chain_running, node) {
663 /*
664 * move the descriptors to a temporary list so we can drop
665 * the lock during the entire cleanup operation
666 */
667 list_move(&desc->node, &chain_cleanup);
668 }
669
670 /*
671 * The hardware is idle and ready for more when the
672 * chain_running list is empty.
673 */
674 chan->status = list_empty(&chan->chain_running) ?
675 DMA_COMPLETE : DMA_IN_PROGRESS;
676
677 /* Start any pending transactions automatically */
678 ret = start_pending_queue(chan);
679 spin_unlock_irqrestore(&chan->desc_lock, flags);
680
681 /* restart pending transactions failed, do not need qos anymore */
682 if (ret)
683 mmp_hsdma_qos_put(chan);
684 /* Run the callback for each descriptor, in order */
685 list_for_each_entry_safe(desc, _desc, &chain_cleanup, node) {
686 struct dma_async_tx_descriptor *txd = &desc->async_tx;
687
688 /* Remove from the list of transactions */
689 list_del(&desc->node);
690 /* Run the link descriptor callback function */
691 dmaengine_desc_get_callback(txd, &cb);
692 dmaengine_desc_callback_invoke(&cb, NULL);
693
694 dma_pool_free(chan->desc_pool, desc, txd->phys);
695 }
696}
697
698static int mmp_hsdma_remove(struct platform_device *op)
699{
700 struct mmp_hsdma_device *hsdev = platform_get_drvdata(op);
701 struct mmp_hsdma_phy *phy;
702 int i, irq = 0, irq_num = 0;
703
704 if (op->dev.of_node)
705 of_dma_controller_free(op->dev.of_node);
706
707#ifdef CONFIG_PM_RUNTIME
708 pm_qos_remove_request(&hsdev->qos_idle);
709#endif
710 for (i = 0; i < hsdev->dma_channels; i++) {
711 if (platform_get_irq(op, i) > 0)
712 irq_num++;
713 }
714
715 if (irq_num != hsdev->dma_channels) {
716 irq = platform_get_irq(op, 0);
717 devm_free_irq(&op->dev, irq, hsdev);
718 } else {
719 for (i = 0; i < hsdev->dma_channels; i++) {
720 phy = &hsdev->phy[i];
721 irq = platform_get_irq(op, i);
722 devm_free_irq(&op->dev, irq, phy);
723 }
724 }
725 dma_async_device_unregister(&hsdev->device);
726 platform_set_drvdata(op, NULL);
727 return 0;
728}
729
730static const struct of_device_id mmp_hsdma_dt_ids[] = {
731 { .compatible = "asr,hsdma-1.0", },
732 {}
733};
734MODULE_DEVICE_TABLE(of, mmp_hsdma_dt_ids);
735
736static int mmp_hsdma_chan_init(struct mmp_hsdma_device *hsdev, int idx, int irq)
737{
738 struct mmp_hsdma_phy *phy = &hsdev->phy[idx];
739 struct mmp_hsdma_chan *chan;
740 int ret;
741
742 chan = devm_kzalloc(hsdev->dev, sizeof(*chan), GFP_KERNEL);
743 if (chan == NULL)
744 return -ENOMEM;
745
746 phy->idx = idx;
747 phy->base = hsdev->base;
748
749 disable_chan(phy);
750 clear_chan_irq(phy);
751
752 if (irq) {
753 ret = devm_request_irq(hsdev->dev, irq, mmp_hsdma_chan_handler,
754 IRQF_SHARED, "hsdma", phy);
755 if (ret) {
756 dev_err(hsdev->dev, "channel request irq fail!\n");
757 return ret;
758 }
759 }
760
761 spin_lock_init(&chan->desc_lock);
762 chan->dev = hsdev->dev;
763 chan->chan.device = &hsdev->device;
764 tasklet_init(&chan->tasklet, dma_do_tasklet, (unsigned long)chan);
765 INIT_LIST_HEAD(&chan->chain_pending);
766 INIT_LIST_HEAD(&chan->chain_running);
767
768 chan->status = DMA_COMPLETE;
769 chan->qos_count = 0;
770 chan->user_do_qos = 1;
771
772 /* register virt channel to dma engine */
773 list_add_tail(&chan->chan.device_node, &hsdev->device.channels);
774
775 return 0;
776}
777
778static int mmp_hsdma_clk_init(void)
779{
780 void __iomem* apmu;
781 u32 val;
782
783 apmu = ioremap(0xd4282800, SZ_4K);
784 if (apmu == NULL) {
785 pr_err("hsdma: error to ioremap APMU base\n");
786 return -ENXIO;
787 }
788
789 val = readl(apmu + 0x3ec);
790 val &= ~(0x1 << 1);
791 writel(val, apmu + 0x3ec); // reset
792 ndelay(300);
793
794 val &= ~(0x7 <<8);
795 val |= 0x3 | (0x1 << 15) | (0x2 << 8);
796 writel(val, apmu + 0x3ec);
797 do {
798 val = readl(apmu + 0xe0);
799 if (!(val & BIT(15)))
800 break;
801 } while(1);
802 iounmap(apmu);
803
804 return 0;
805}
806
807static const struct of_device_id mmp_pdma_dt_ids[] = {
808 { .compatible = "asr,pdma-1.0", },
809 {}
810};
811MODULE_DEVICE_TABLE(of, mmp_pdma_dt_ids);
812
813static struct dma_chan *mmp_hsdma_dma_xlate(struct of_phandle_args *dma_spec,
814 struct of_dma *ofdma)
815{
816 struct mmp_hsdma_device *d = ofdma->of_dma_data;
817 struct dma_chan *chan;
818 struct mmp_hsdma_chan *vchan;
819 int dedicated_chan;
820
821 chan = dma_get_any_slave_channel(&d->device);
822 if (!chan)
823 return NULL;
824
825 vchan = to_mmp_hsdma_chan(chan);
826
827 dedicated_chan = (dma_spec->args[0] & 0xff) >> 8;
828 if (dedicated_chan == 0 || dedicated_chan >= d->dma_channels)
829 dedicated_chan = -1;
830 vchan->dedicated_chan = dedicated_chan;
831
832#ifdef CONFIG_PM_RUNTIME
833 if (unlikely(dma_spec->args_count != 1))
834 dev_err(d->dev, "#dma-cells should be 1!\n");
835
836 vchan->user_do_qos = (dma_spec->args[0] & 0xff00) ? 1 : 0;
837
838 if (vchan->user_do_qos)
839 dev_dbg(d->dev, "channel %d: user does qos itself\n",
840 vchan->chan.chan_id);
841 else
842 dev_dbg(d->dev, "channel %d: hsdma does qos\n",
843 vchan->chan.chan_id);
844#endif
845 return chan;
846}
847
848static int mmp_hsdma_probe(struct platform_device *op)
849{
850 struct mmp_hsdma_device *hsdev;
851 const struct of_device_id *of_id;
852 struct mmp_dma_platdata *hsdata = dev_get_platdata(&op->dev);
853 struct resource *iores;
854 int i, ret, irq = 0;
855 int dma_channels = 0, irq_num = 0;
856 const enum dma_slave_buswidth widths = DMA_SLAVE_BUSWIDTH_4_BYTES;
857
858 hsdev = devm_kzalloc(&op->dev, sizeof(*hsdev), GFP_KERNEL);
859 if (!hsdev)
860 return -ENOMEM;
861 hsdev->dev = &op->dev;
862
863 spin_lock_init(&hsdev->phy_lock);
864
865 iores = platform_get_resource(op, IORESOURCE_MEM, 0);
866 hsdev->base = devm_ioremap_resource(hsdev->dev, iores);
867 if (IS_ERR(hsdev->base))
868 return PTR_ERR(hsdev->base);
869
870 of_id = of_match_device(mmp_hsdma_dt_ids, hsdev->dev);
871 if (of_id)
872 of_property_read_u32(hsdev->dev->of_node, "#dma-channels",
873 &dma_channels);
874 else if (hsdata && hsdata->dma_channels)
875 dma_channels = hsdata->dma_channels;
876 else
877 dma_channels = 8; /* default 8 channel */
878 hsdev->dma_channels = dma_channels;
879
880#ifdef CONFIG_PM_RUNTIME
881 if (!of_id || of_property_read_u32(hsdev->dev->of_node,
882 "lpm-qos", &hsdev->lpm_qos)) {
883 dev_err(hsdev->dev, "cannot find lpm-qos in device tree\n");
884 return -EINVAL;
885 }
886 hsdev->qos_idle.name = op->name;
887
888 pm_qos_add_request(&hsdev->qos_idle, PM_QOS_CPUIDLE_BLOCK,
889 PM_QOS_CPUIDLE_BLOCK_DEFAULT_VALUE);
890
891 pm_runtime_enable(&op->dev);
892 /*
893 * We can't ensure the pm operations are always in non-atomic context.
894 * Actually it depends on the drivers' behavior. So mark it as irq safe.
895 */
896 pm_runtime_irq_safe(&op->dev);
897#endif
898
899 for (i = 0; i < dma_channels; i++) {
900 if (platform_get_irq_optional(op, i) > 0)
901 irq_num++;
902 }
903
904 hsdev->phy = devm_kcalloc(hsdev->dev, dma_channels, sizeof(*hsdev->phy),
905 GFP_KERNEL);
906 if (hsdev->phy == NULL)
907 return -ENOMEM;
908
909 INIT_LIST_HEAD(&hsdev->device.channels);
910
911 for (i = 0; i < dma_channels; i++) {
912 irq = (irq_num != dma_channels) ? 0 : platform_get_irq(op, i);
913 ret = mmp_hsdma_chan_init(hsdev, i, irq);
914 if (ret)
915 return ret;
916 }
917
918 if (irq_num != dma_channels) {
919 /* all chan share one irq, demux inside */
920 irq = platform_get_irq(op, 0);
921 ret = devm_request_irq(hsdev->dev, irq, mmp_hsdma_int_handler,
922 IRQF_SHARED, "hsdma", hsdev);
923 if (ret)
924 return ret;
925 }
926
927 dma_cap_set(DMA_SLAVE, hsdev->device.cap_mask);
928 dma_cap_set(DMA_MEMCPY, hsdev->device.cap_mask);
929 hsdev->device.dev = &op->dev;
930 hsdev->device.device_alloc_chan_resources = mmp_hsdma_alloc_chan_resources;
931 hsdev->device.device_free_chan_resources = mmp_hsdma_free_chan_resources;
932 hsdev->device.device_tx_status = mmp_hsdma_tx_status;
933 hsdev->device.device_prep_dma_memcpy = mmp_hsdma_prep_memcpy;
934
935 hsdev->device.device_issue_pending = mmp_hsdma_issue_pending;
936 hsdev->device.device_config = mmp_hsdma_config;
937 hsdev->device.device_pause = mmp_hsdma_pause_chan;
938 hsdev->device.device_terminate_all = mmp_hsdma_terminate_all;
939 hsdev->device.device_dump_status = mmp_hsdma_dump_status;
940 hsdev->device.copy_align = DMAENGINE_ALIGN_4_BYTES;
941 hsdev->device.src_addr_widths = widths;
942 hsdev->device.dst_addr_widths = widths;
943 hsdev->device.directions = BIT(DMA_MEM_TO_MEM);
944
945 if (hsdev->dev->coherent_dma_mask)
946 dma_set_mask(hsdev->dev, hsdev->dev->coherent_dma_mask);
947 else
948 dma_set_mask(hsdev->dev, DMA_BIT_MASK(64));
949
950 ret = dma_async_device_register(&hsdev->device);
951 if (ret) {
952 dev_err(hsdev->device.dev, "unable to register\n");
953 return ret;
954 }
955
956 if (op->dev.of_node) {
957 /* Device-tree DMA controller registration */
958 ret = of_dma_controller_register(op->dev.of_node,
959 mmp_hsdma_dma_xlate, hsdev);
960 if (ret < 0) {
961 dev_err(&op->dev, "of_dma_controller_register failed\n");
962 return ret;
963 }
964 }
965
966 platform_set_drvdata(op, hsdev);
967
968 /* Init hsdma clk */
969 mmp_hsdma_clk_init();
970
971 dev_info(hsdev->device.dev, "initialized %d channels\n", dma_channels);
972 return 0;
973}
974
975#ifdef CONFIG_PM_RUNTIME
976/*
977 * Per-channel qos get/put function. This function ensures that pm_
978 * runtime_get/put are not called multi times for one channel.
979 * This guarantees pm_runtime_get/put always match for the entire device.
980 */
981static void mmp_hsdma_qos_get(struct mmp_hsdma_chan *chan)
982{
983 unsigned long flags;
984
985 if (chan->user_do_qos)
986 return;
987
988 spin_lock_irqsave(&chan->desc_lock, flags);
989 if (chan->qos_count == 0) {
990 chan->qos_count = 1;
991 /*
992 * Safe in spin_lock because it's marked as irq safe.
993 * Similar case for mmp_pdma_qos_put().
994 */
995 pm_runtime_get_sync(chan->dev);
996 }
997 spin_unlock_irqrestore(&chan->desc_lock, flags);
998}
999
1000static void mmp_hsdma_qos_put(struct mmp_hsdma_chan *chan)
1001{
1002 unsigned long flags;
1003
1004 if (chan->user_do_qos)
1005 return;
1006
1007 spin_lock_irqsave(&chan->desc_lock, flags);
1008 if (chan->qos_count == 1) {
1009 chan->qos_count = 0;
1010 pm_runtime_put_autosuspend(chan->dev);
1011 }
1012 spin_unlock_irqrestore(&chan->desc_lock, flags);
1013}
1014
1015static int mmp_hsdma_runtime_suspend(struct device *dev)
1016{
1017 struct mmp_hsdma_device *hsdev = dev_get_drvdata(dev);
1018
1019 pm_qos_update_request(&hsdev->qos_idle,
1020 PM_QOS_CPUIDLE_BLOCK_DEFAULT_VALUE);
1021
1022 return 0;
1023}
1024
1025static int mmp_hsdma_runtime_resume(struct device *dev)
1026{
1027 struct mmp_hsdma_device *hsdev = dev_get_drvdata(dev);
1028
1029 pm_qos_update_request(&hsdev->qos_idle, hsdev->lpm_qos);
1030
1031 return 0;
1032}
1033
1034static const struct dev_pm_ops mmp_hsdma_pmops = {
1035 SET_RUNTIME_PM_OPS(mmp_hsdma_runtime_suspend,
1036 mmp_hsdma_runtime_resume, NULL)
1037};
1038#define MMP_HSDMA_PMOPS (&mmp_hsdma_pmops)
1039#else
1040static inline void mmp_hsdma_qos_get(struct mmp_hsdma_chan *chan)
1041{
1042}
1043
1044static inline void mmp_hsdma_qos_put(struct mmp_hsdma_chan *chan)
1045{
1046}
1047
1048#define mmp_hsdma_runtime_suspend NULL
1049#define mmp_hsdma_runtime_resume NULL
1050#define MMP_HSDMA_PMOPS NULL
1051#endif
1052
1053static const struct platform_device_id mmp_hsdma_id_table[] = {
1054 { "mmp-hsdma", },
1055 { },
1056};
1057
1058static struct platform_driver mmp_hsdma_driver = {
1059 .driver = {
1060 .name = "mmp-hsdma",
1061 .of_match_table = mmp_hsdma_dt_ids,
1062 .pm = MMP_HSDMA_PMOPS,
1063 },
1064 .id_table = mmp_hsdma_id_table,
1065 .probe = mmp_hsdma_probe,
1066 .remove = mmp_hsdma_remove,
1067};
1068
1069module_platform_driver(mmp_hsdma_driver);
1070
1071MODULE_AUTHOR("ASR Microelectronics");
1072MODULE_DESCRIPTION("ASR High Speed DMA Driver");
1073MODULE_LICENSE("GPL v2");