blob: 78e098b4bd89ea8faa0698e68fe71b8ec7e56687 [file] [log] [blame]
rjw1f884582022-01-06 17:20:42 +08001/*
2 * DMA driver for Nvidia's Tegra20 APB DMA controller.
3 *
4 * Copyright (c) 2012-2013, NVIDIA CORPORATION. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19#include <linux/bitops.h>
20#include <linux/clk.h>
21#include <linux/delay.h>
22#include <linux/dmaengine.h>
23#include <linux/dma-mapping.h>
24#include <linux/err.h>
25#include <linux/init.h>
26#include <linux/interrupt.h>
27#include <linux/io.h>
28#include <linux/mm.h>
29#include <linux/module.h>
30#include <linux/of.h>
31#include <linux/of_device.h>
32#include <linux/of_dma.h>
33#include <linux/platform_device.h>
34#include <linux/pm.h>
35#include <linux/pm_runtime.h>
36#include <linux/reset.h>
37#include <linux/slab.h>
38
39#include "dmaengine.h"
40
41#define TEGRA_APBDMA_GENERAL 0x0
42#define TEGRA_APBDMA_GENERAL_ENABLE BIT(31)
43
44#define TEGRA_APBDMA_CONTROL 0x010
45#define TEGRA_APBDMA_IRQ_MASK 0x01c
46#define TEGRA_APBDMA_IRQ_MASK_SET 0x020
47
48/* CSR register */
49#define TEGRA_APBDMA_CHAN_CSR 0x00
50#define TEGRA_APBDMA_CSR_ENB BIT(31)
51#define TEGRA_APBDMA_CSR_IE_EOC BIT(30)
52#define TEGRA_APBDMA_CSR_HOLD BIT(29)
53#define TEGRA_APBDMA_CSR_DIR BIT(28)
54#define TEGRA_APBDMA_CSR_ONCE BIT(27)
55#define TEGRA_APBDMA_CSR_FLOW BIT(21)
56#define TEGRA_APBDMA_CSR_REQ_SEL_SHIFT 16
57#define TEGRA_APBDMA_CSR_REQ_SEL_MASK 0x1F
58#define TEGRA_APBDMA_CSR_WCOUNT_MASK 0xFFFC
59
60/* STATUS register */
61#define TEGRA_APBDMA_CHAN_STATUS 0x004
62#define TEGRA_APBDMA_STATUS_BUSY BIT(31)
63#define TEGRA_APBDMA_STATUS_ISE_EOC BIT(30)
64#define TEGRA_APBDMA_STATUS_HALT BIT(29)
65#define TEGRA_APBDMA_STATUS_PING_PONG BIT(28)
66#define TEGRA_APBDMA_STATUS_COUNT_SHIFT 2
67#define TEGRA_APBDMA_STATUS_COUNT_MASK 0xFFFC
68
69#define TEGRA_APBDMA_CHAN_CSRE 0x00C
70#define TEGRA_APBDMA_CHAN_CSRE_PAUSE (1 << 31)
71
72/* AHB memory address */
73#define TEGRA_APBDMA_CHAN_AHBPTR 0x010
74
75/* AHB sequence register */
76#define TEGRA_APBDMA_CHAN_AHBSEQ 0x14
77#define TEGRA_APBDMA_AHBSEQ_INTR_ENB BIT(31)
78#define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_8 (0 << 28)
79#define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_16 (1 << 28)
80#define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_32 (2 << 28)
81#define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_64 (3 << 28)
82#define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_128 (4 << 28)
83#define TEGRA_APBDMA_AHBSEQ_DATA_SWAP BIT(27)
84#define TEGRA_APBDMA_AHBSEQ_BURST_1 (4 << 24)
85#define TEGRA_APBDMA_AHBSEQ_BURST_4 (5 << 24)
86#define TEGRA_APBDMA_AHBSEQ_BURST_8 (6 << 24)
87#define TEGRA_APBDMA_AHBSEQ_DBL_BUF BIT(19)
88#define TEGRA_APBDMA_AHBSEQ_WRAP_SHIFT 16
89#define TEGRA_APBDMA_AHBSEQ_WRAP_NONE 0
90
91/* APB address */
92#define TEGRA_APBDMA_CHAN_APBPTR 0x018
93
94/* APB sequence register */
95#define TEGRA_APBDMA_CHAN_APBSEQ 0x01c
96#define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_8 (0 << 28)
97#define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_16 (1 << 28)
98#define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_32 (2 << 28)
99#define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_64 (3 << 28)
100#define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_128 (4 << 28)
101#define TEGRA_APBDMA_APBSEQ_DATA_SWAP BIT(27)
102#define TEGRA_APBDMA_APBSEQ_WRAP_WORD_1 (1 << 16)
103
104/* Tegra148 specific registers */
105#define TEGRA_APBDMA_CHAN_WCOUNT 0x20
106
107#define TEGRA_APBDMA_CHAN_WORD_TRANSFER 0x24
108
109/*
110 * If any burst is in flight and DMA paused then this is the time to complete
111 * on-flight burst and update DMA status register.
112 */
113#define TEGRA_APBDMA_BURST_COMPLETE_TIME 20
114
115/* Channel base address offset from APBDMA base address */
116#define TEGRA_APBDMA_CHANNEL_BASE_ADD_OFFSET 0x1000
117
118#define TEGRA_APBDMA_SLAVE_ID_INVALID (TEGRA_APBDMA_CSR_REQ_SEL_MASK + 1)
119
120struct tegra_dma;
121
122/*
123 * tegra_dma_chip_data Tegra chip specific DMA data
124 * @nr_channels: Number of channels available in the controller.
125 * @channel_reg_size: Channel register size/stride.
126 * @max_dma_count: Maximum DMA transfer count supported by DMA controller.
127 * @support_channel_pause: Support channel wise pause of dma.
128 * @support_separate_wcount_reg: Support separate word count register.
129 */
130struct tegra_dma_chip_data {
131 int nr_channels;
132 int channel_reg_size;
133 int max_dma_count;
134 bool support_channel_pause;
135 bool support_separate_wcount_reg;
136};
137
138/* DMA channel registers */
139struct tegra_dma_channel_regs {
140 unsigned long csr;
141 unsigned long ahb_ptr;
142 unsigned long apb_ptr;
143 unsigned long ahb_seq;
144 unsigned long apb_seq;
145 unsigned long wcount;
146};
147
148/*
149 * tegra_dma_sg_req: Dma request details to configure hardware. This
150 * contains the details for one transfer to configure DMA hw.
151 * The client's request for data transfer can be broken into multiple
152 * sub-transfer as per requester details and hw support.
153 * This sub transfer get added in the list of transfer and point to Tegra
154 * DMA descriptor which manages the transfer details.
155 */
156struct tegra_dma_sg_req {
157 struct tegra_dma_channel_regs ch_regs;
158 int req_len;
159 bool configured;
160 bool last_sg;
161 struct list_head node;
162 struct tegra_dma_desc *dma_desc;
163};
164
165/*
166 * tegra_dma_desc: Tegra DMA descriptors which manages the client requests.
167 * This descriptor keep track of transfer status, callbacks and request
168 * counts etc.
169 */
170struct tegra_dma_desc {
171 struct dma_async_tx_descriptor txd;
172 int bytes_requested;
173 int bytes_transferred;
174 enum dma_status dma_status;
175 struct list_head node;
176 struct list_head tx_list;
177 struct list_head cb_node;
178 int cb_count;
179};
180
181struct tegra_dma_channel;
182
183typedef void (*dma_isr_handler)(struct tegra_dma_channel *tdc,
184 bool to_terminate);
185
186/* tegra_dma_channel: Channel specific information */
187struct tegra_dma_channel {
188 struct dma_chan dma_chan;
189 char name[30];
190 bool config_init;
191 int id;
192 int irq;
193 void __iomem *chan_addr;
194 spinlock_t lock;
195 bool busy;
196 struct tegra_dma *tdma;
197 bool cyclic;
198
199 /* Different lists for managing the requests */
200 struct list_head free_sg_req;
201 struct list_head pending_sg_req;
202 struct list_head free_dma_desc;
203 struct list_head cb_desc;
204
205 /* ISR handler and tasklet for bottom half of isr handling */
206 dma_isr_handler isr_handler;
207 struct tasklet_struct tasklet;
208
209 /* Channel-slave specific configuration */
210 unsigned int slave_id;
211 struct dma_slave_config dma_sconfig;
212 struct tegra_dma_channel_regs channel_reg;
213};
214
215/* tegra_dma: Tegra DMA specific information */
216struct tegra_dma {
217 struct dma_device dma_dev;
218 struct device *dev;
219 struct clk *dma_clk;
220 struct reset_control *rst;
221 spinlock_t global_lock;
222 void __iomem *base_addr;
223 const struct tegra_dma_chip_data *chip_data;
224
225 /*
226 * Counter for managing global pausing of the DMA controller.
227 * Only applicable for devices that don't support individual
228 * channel pausing.
229 */
230 u32 global_pause_count;
231
232 /* Some register need to be cache before suspend */
233 u32 reg_gen;
234
235 /* Last member of the structure */
236 struct tegra_dma_channel channels[0];
237};
238
239static inline void tdma_write(struct tegra_dma *tdma, u32 reg, u32 val)
240{
241 writel(val, tdma->base_addr + reg);
242}
243
244static inline u32 tdma_read(struct tegra_dma *tdma, u32 reg)
245{
246 return readl(tdma->base_addr + reg);
247}
248
249static inline void tdc_write(struct tegra_dma_channel *tdc,
250 u32 reg, u32 val)
251{
252 writel(val, tdc->chan_addr + reg);
253}
254
255static inline u32 tdc_read(struct tegra_dma_channel *tdc, u32 reg)
256{
257 return readl(tdc->chan_addr + reg);
258}
259
260static inline struct tegra_dma_channel *to_tegra_dma_chan(struct dma_chan *dc)
261{
262 return container_of(dc, struct tegra_dma_channel, dma_chan);
263}
264
265static inline struct tegra_dma_desc *txd_to_tegra_dma_desc(
266 struct dma_async_tx_descriptor *td)
267{
268 return container_of(td, struct tegra_dma_desc, txd);
269}
270
271static inline struct device *tdc2dev(struct tegra_dma_channel *tdc)
272{
273 return &tdc->dma_chan.dev->device;
274}
275
276static dma_cookie_t tegra_dma_tx_submit(struct dma_async_tx_descriptor *tx);
277static int tegra_dma_runtime_suspend(struct device *dev);
278static int tegra_dma_runtime_resume(struct device *dev);
279
280/* Get DMA desc from free list, if not there then allocate it. */
281static struct tegra_dma_desc *tegra_dma_desc_get(
282 struct tegra_dma_channel *tdc)
283{
284 struct tegra_dma_desc *dma_desc;
285 unsigned long flags;
286
287 spin_lock_irqsave(&tdc->lock, flags);
288
289 /* Do not allocate if desc are waiting for ack */
290 list_for_each_entry(dma_desc, &tdc->free_dma_desc, node) {
291 if (async_tx_test_ack(&dma_desc->txd) && !dma_desc->cb_count) {
292 list_del(&dma_desc->node);
293 spin_unlock_irqrestore(&tdc->lock, flags);
294 dma_desc->txd.flags = 0;
295 return dma_desc;
296 }
297 }
298
299 spin_unlock_irqrestore(&tdc->lock, flags);
300
301 /* Allocate DMA desc */
302 dma_desc = kzalloc(sizeof(*dma_desc), GFP_NOWAIT);
303 if (!dma_desc)
304 return NULL;
305
306 dma_async_tx_descriptor_init(&dma_desc->txd, &tdc->dma_chan);
307 dma_desc->txd.tx_submit = tegra_dma_tx_submit;
308 dma_desc->txd.flags = 0;
309 return dma_desc;
310}
311
312static void tegra_dma_desc_put(struct tegra_dma_channel *tdc,
313 struct tegra_dma_desc *dma_desc)
314{
315 unsigned long flags;
316
317 spin_lock_irqsave(&tdc->lock, flags);
318 if (!list_empty(&dma_desc->tx_list))
319 list_splice_init(&dma_desc->tx_list, &tdc->free_sg_req);
320 list_add_tail(&dma_desc->node, &tdc->free_dma_desc);
321 spin_unlock_irqrestore(&tdc->lock, flags);
322}
323
324static struct tegra_dma_sg_req *tegra_dma_sg_req_get(
325 struct tegra_dma_channel *tdc)
326{
327 struct tegra_dma_sg_req *sg_req = NULL;
328 unsigned long flags;
329
330 spin_lock_irqsave(&tdc->lock, flags);
331 if (!list_empty(&tdc->free_sg_req)) {
332 sg_req = list_first_entry(&tdc->free_sg_req,
333 typeof(*sg_req), node);
334 list_del(&sg_req->node);
335 spin_unlock_irqrestore(&tdc->lock, flags);
336 return sg_req;
337 }
338 spin_unlock_irqrestore(&tdc->lock, flags);
339
340 sg_req = kzalloc(sizeof(struct tegra_dma_sg_req), GFP_NOWAIT);
341
342 return sg_req;
343}
344
345static int tegra_dma_slave_config(struct dma_chan *dc,
346 struct dma_slave_config *sconfig)
347{
348 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
349
350 if (!list_empty(&tdc->pending_sg_req)) {
351 dev_err(tdc2dev(tdc), "Configuration not allowed\n");
352 return -EBUSY;
353 }
354
355 memcpy(&tdc->dma_sconfig, sconfig, sizeof(*sconfig));
356 if (tdc->slave_id == TEGRA_APBDMA_SLAVE_ID_INVALID) {
357 if (sconfig->slave_id > TEGRA_APBDMA_CSR_REQ_SEL_MASK)
358 return -EINVAL;
359 tdc->slave_id = sconfig->slave_id;
360 }
361 tdc->config_init = true;
362 return 0;
363}
364
365static void tegra_dma_global_pause(struct tegra_dma_channel *tdc,
366 bool wait_for_burst_complete)
367{
368 struct tegra_dma *tdma = tdc->tdma;
369
370 spin_lock(&tdma->global_lock);
371
372 if (tdc->tdma->global_pause_count == 0) {
373 tdma_write(tdma, TEGRA_APBDMA_GENERAL, 0);
374 if (wait_for_burst_complete)
375 udelay(TEGRA_APBDMA_BURST_COMPLETE_TIME);
376 }
377
378 tdc->tdma->global_pause_count++;
379
380 spin_unlock(&tdma->global_lock);
381}
382
383static void tegra_dma_global_resume(struct tegra_dma_channel *tdc)
384{
385 struct tegra_dma *tdma = tdc->tdma;
386
387 spin_lock(&tdma->global_lock);
388
389 if (WARN_ON(tdc->tdma->global_pause_count == 0))
390 goto out;
391
392 if (--tdc->tdma->global_pause_count == 0)
393 tdma_write(tdma, TEGRA_APBDMA_GENERAL,
394 TEGRA_APBDMA_GENERAL_ENABLE);
395
396out:
397 spin_unlock(&tdma->global_lock);
398}
399
400static void tegra_dma_pause(struct tegra_dma_channel *tdc,
401 bool wait_for_burst_complete)
402{
403 struct tegra_dma *tdma = tdc->tdma;
404
405 if (tdma->chip_data->support_channel_pause) {
406 tdc_write(tdc, TEGRA_APBDMA_CHAN_CSRE,
407 TEGRA_APBDMA_CHAN_CSRE_PAUSE);
408 if (wait_for_burst_complete)
409 udelay(TEGRA_APBDMA_BURST_COMPLETE_TIME);
410 } else {
411 tegra_dma_global_pause(tdc, wait_for_burst_complete);
412 }
413}
414
415static void tegra_dma_resume(struct tegra_dma_channel *tdc)
416{
417 struct tegra_dma *tdma = tdc->tdma;
418
419 if (tdma->chip_data->support_channel_pause) {
420 tdc_write(tdc, TEGRA_APBDMA_CHAN_CSRE, 0);
421 } else {
422 tegra_dma_global_resume(tdc);
423 }
424}
425
426static void tegra_dma_stop(struct tegra_dma_channel *tdc)
427{
428 u32 csr;
429 u32 status;
430
431 /* Disable interrupts */
432 csr = tdc_read(tdc, TEGRA_APBDMA_CHAN_CSR);
433 csr &= ~TEGRA_APBDMA_CSR_IE_EOC;
434 tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR, csr);
435
436 /* Disable DMA */
437 csr &= ~TEGRA_APBDMA_CSR_ENB;
438 tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR, csr);
439
440 /* Clear interrupt status if it is there */
441 status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
442 if (status & TEGRA_APBDMA_STATUS_ISE_EOC) {
443 dev_dbg(tdc2dev(tdc), "%s():clearing interrupt\n", __func__);
444 tdc_write(tdc, TEGRA_APBDMA_CHAN_STATUS, status);
445 }
446 tdc->busy = false;
447}
448
449static void tegra_dma_start(struct tegra_dma_channel *tdc,
450 struct tegra_dma_sg_req *sg_req)
451{
452 struct tegra_dma_channel_regs *ch_regs = &sg_req->ch_regs;
453
454 tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR, ch_regs->csr);
455 tdc_write(tdc, TEGRA_APBDMA_CHAN_APBSEQ, ch_regs->apb_seq);
456 tdc_write(tdc, TEGRA_APBDMA_CHAN_APBPTR, ch_regs->apb_ptr);
457 tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBSEQ, ch_regs->ahb_seq);
458 tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBPTR, ch_regs->ahb_ptr);
459 if (tdc->tdma->chip_data->support_separate_wcount_reg)
460 tdc_write(tdc, TEGRA_APBDMA_CHAN_WCOUNT, ch_regs->wcount);
461
462 /* Start DMA */
463 tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR,
464 ch_regs->csr | TEGRA_APBDMA_CSR_ENB);
465}
466
467static void tegra_dma_configure_for_next(struct tegra_dma_channel *tdc,
468 struct tegra_dma_sg_req *nsg_req)
469{
470 unsigned long status;
471
472 /*
473 * The DMA controller reloads the new configuration for next transfer
474 * after last burst of current transfer completes.
475 * If there is no IEC status then this makes sure that last burst
476 * has not be completed. There may be case that last burst is on
477 * flight and so it can complete but because DMA is paused, it
478 * will not generates interrupt as well as not reload the new
479 * configuration.
480 * If there is already IEC status then interrupt handler need to
481 * load new configuration.
482 */
483 tegra_dma_pause(tdc, false);
484 status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
485
486 /*
487 * If interrupt is pending then do nothing as the ISR will handle
488 * the programing for new request.
489 */
490 if (status & TEGRA_APBDMA_STATUS_ISE_EOC) {
491 dev_err(tdc2dev(tdc),
492 "Skipping new configuration as interrupt is pending\n");
493 tegra_dma_resume(tdc);
494 return;
495 }
496
497 /* Safe to program new configuration */
498 tdc_write(tdc, TEGRA_APBDMA_CHAN_APBPTR, nsg_req->ch_regs.apb_ptr);
499 tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBPTR, nsg_req->ch_regs.ahb_ptr);
500 if (tdc->tdma->chip_data->support_separate_wcount_reg)
501 tdc_write(tdc, TEGRA_APBDMA_CHAN_WCOUNT,
502 nsg_req->ch_regs.wcount);
503 tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR,
504 nsg_req->ch_regs.csr | TEGRA_APBDMA_CSR_ENB);
505 nsg_req->configured = true;
506
507 tegra_dma_resume(tdc);
508}
509
510static void tdc_start_head_req(struct tegra_dma_channel *tdc)
511{
512 struct tegra_dma_sg_req *sg_req;
513
514 if (list_empty(&tdc->pending_sg_req))
515 return;
516
517 sg_req = list_first_entry(&tdc->pending_sg_req,
518 typeof(*sg_req), node);
519 tegra_dma_start(tdc, sg_req);
520 sg_req->configured = true;
521 tdc->busy = true;
522}
523
524static void tdc_configure_next_head_desc(struct tegra_dma_channel *tdc)
525{
526 struct tegra_dma_sg_req *hsgreq;
527 struct tegra_dma_sg_req *hnsgreq;
528
529 if (list_empty(&tdc->pending_sg_req))
530 return;
531
532 hsgreq = list_first_entry(&tdc->pending_sg_req, typeof(*hsgreq), node);
533 if (!list_is_last(&hsgreq->node, &tdc->pending_sg_req)) {
534 hnsgreq = list_first_entry(&hsgreq->node,
535 typeof(*hnsgreq), node);
536 tegra_dma_configure_for_next(tdc, hnsgreq);
537 }
538}
539
540static inline int get_current_xferred_count(struct tegra_dma_channel *tdc,
541 struct tegra_dma_sg_req *sg_req, unsigned long status)
542{
543 return sg_req->req_len - (status & TEGRA_APBDMA_STATUS_COUNT_MASK) - 4;
544}
545
546static void tegra_dma_abort_all(struct tegra_dma_channel *tdc)
547{
548 struct tegra_dma_sg_req *sgreq;
549 struct tegra_dma_desc *dma_desc;
550
551 while (!list_empty(&tdc->pending_sg_req)) {
552 sgreq = list_first_entry(&tdc->pending_sg_req,
553 typeof(*sgreq), node);
554 list_move_tail(&sgreq->node, &tdc->free_sg_req);
555 if (sgreq->last_sg) {
556 dma_desc = sgreq->dma_desc;
557 dma_desc->dma_status = DMA_ERROR;
558 list_add_tail(&dma_desc->node, &tdc->free_dma_desc);
559
560 /* Add in cb list if it is not there. */
561 if (!dma_desc->cb_count)
562 list_add_tail(&dma_desc->cb_node,
563 &tdc->cb_desc);
564 dma_desc->cb_count++;
565 }
566 }
567 tdc->isr_handler = NULL;
568}
569
570static bool handle_continuous_head_request(struct tegra_dma_channel *tdc,
571 struct tegra_dma_sg_req *last_sg_req, bool to_terminate)
572{
573 struct tegra_dma_sg_req *hsgreq = NULL;
574
575 if (list_empty(&tdc->pending_sg_req)) {
576 dev_err(tdc2dev(tdc), "Dma is running without req\n");
577 tegra_dma_stop(tdc);
578 return false;
579 }
580
581 /*
582 * Check that head req on list should be in flight.
583 * If it is not in flight then abort transfer as
584 * looping of transfer can not continue.
585 */
586 hsgreq = list_first_entry(&tdc->pending_sg_req, typeof(*hsgreq), node);
587 if (!hsgreq->configured) {
588 tegra_dma_stop(tdc);
589 dev_err(tdc2dev(tdc), "Error in dma transfer, aborting dma\n");
590 tegra_dma_abort_all(tdc);
591 return false;
592 }
593
594 /* Configure next request */
595 if (!to_terminate)
596 tdc_configure_next_head_desc(tdc);
597 return true;
598}
599
600static void handle_once_dma_done(struct tegra_dma_channel *tdc,
601 bool to_terminate)
602{
603 struct tegra_dma_sg_req *sgreq;
604 struct tegra_dma_desc *dma_desc;
605
606 tdc->busy = false;
607 sgreq = list_first_entry(&tdc->pending_sg_req, typeof(*sgreq), node);
608 dma_desc = sgreq->dma_desc;
609 dma_desc->bytes_transferred += sgreq->req_len;
610
611 list_del(&sgreq->node);
612 if (sgreq->last_sg) {
613 dma_desc->dma_status = DMA_COMPLETE;
614 dma_cookie_complete(&dma_desc->txd);
615 if (!dma_desc->cb_count)
616 list_add_tail(&dma_desc->cb_node, &tdc->cb_desc);
617 dma_desc->cb_count++;
618 list_add_tail(&dma_desc->node, &tdc->free_dma_desc);
619 }
620 list_add_tail(&sgreq->node, &tdc->free_sg_req);
621
622 /* Do not start DMA if it is going to be terminate */
623 if (to_terminate || list_empty(&tdc->pending_sg_req))
624 return;
625
626 tdc_start_head_req(tdc);
627}
628
629static void handle_cont_sngl_cycle_dma_done(struct tegra_dma_channel *tdc,
630 bool to_terminate)
631{
632 struct tegra_dma_sg_req *sgreq;
633 struct tegra_dma_desc *dma_desc;
634 bool st;
635
636 sgreq = list_first_entry(&tdc->pending_sg_req, typeof(*sgreq), node);
637 dma_desc = sgreq->dma_desc;
638 /* if we dma for long enough the transfer count will wrap */
639 dma_desc->bytes_transferred =
640 (dma_desc->bytes_transferred + sgreq->req_len) %
641 dma_desc->bytes_requested;
642
643 /* Callback need to be call */
644 if (!dma_desc->cb_count)
645 list_add_tail(&dma_desc->cb_node, &tdc->cb_desc);
646 dma_desc->cb_count++;
647
648 /* If not last req then put at end of pending list */
649 if (!list_is_last(&sgreq->node, &tdc->pending_sg_req)) {
650 list_move_tail(&sgreq->node, &tdc->pending_sg_req);
651 sgreq->configured = false;
652 st = handle_continuous_head_request(tdc, sgreq, to_terminate);
653 if (!st)
654 dma_desc->dma_status = DMA_ERROR;
655 }
656}
657
658static void tegra_dma_tasklet(unsigned long data)
659{
660 struct tegra_dma_channel *tdc = (struct tegra_dma_channel *)data;
661 struct dmaengine_desc_callback cb;
662 struct tegra_dma_desc *dma_desc;
663 unsigned long flags;
664 int cb_count;
665
666 spin_lock_irqsave(&tdc->lock, flags);
667 while (!list_empty(&tdc->cb_desc)) {
668 dma_desc = list_first_entry(&tdc->cb_desc,
669 typeof(*dma_desc), cb_node);
670 list_del(&dma_desc->cb_node);
671 dmaengine_desc_get_callback(&dma_desc->txd, &cb);
672 cb_count = dma_desc->cb_count;
673 dma_desc->cb_count = 0;
674 spin_unlock_irqrestore(&tdc->lock, flags);
675 while (cb_count--)
676 dmaengine_desc_callback_invoke(&cb, NULL);
677 spin_lock_irqsave(&tdc->lock, flags);
678 }
679 spin_unlock_irqrestore(&tdc->lock, flags);
680}
681
682static irqreturn_t tegra_dma_isr(int irq, void *dev_id)
683{
684 struct tegra_dma_channel *tdc = dev_id;
685 unsigned long status;
686 unsigned long flags;
687
688 spin_lock_irqsave(&tdc->lock, flags);
689
690 status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
691 if (status & TEGRA_APBDMA_STATUS_ISE_EOC) {
692 tdc_write(tdc, TEGRA_APBDMA_CHAN_STATUS, status);
693 tdc->isr_handler(tdc, false);
694 tasklet_schedule(&tdc->tasklet);
695 spin_unlock_irqrestore(&tdc->lock, flags);
696 return IRQ_HANDLED;
697 }
698
699 spin_unlock_irqrestore(&tdc->lock, flags);
700 dev_info(tdc2dev(tdc),
701 "Interrupt already served status 0x%08lx\n", status);
702 return IRQ_NONE;
703}
704
705static dma_cookie_t tegra_dma_tx_submit(struct dma_async_tx_descriptor *txd)
706{
707 struct tegra_dma_desc *dma_desc = txd_to_tegra_dma_desc(txd);
708 struct tegra_dma_channel *tdc = to_tegra_dma_chan(txd->chan);
709 unsigned long flags;
710 dma_cookie_t cookie;
711
712 spin_lock_irqsave(&tdc->lock, flags);
713 dma_desc->dma_status = DMA_IN_PROGRESS;
714 cookie = dma_cookie_assign(&dma_desc->txd);
715 list_splice_tail_init(&dma_desc->tx_list, &tdc->pending_sg_req);
716 spin_unlock_irqrestore(&tdc->lock, flags);
717 return cookie;
718}
719
720static void tegra_dma_issue_pending(struct dma_chan *dc)
721{
722 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
723 unsigned long flags;
724
725 spin_lock_irqsave(&tdc->lock, flags);
726 if (list_empty(&tdc->pending_sg_req)) {
727 dev_err(tdc2dev(tdc), "No DMA request\n");
728 goto end;
729 }
730 if (!tdc->busy) {
731 tdc_start_head_req(tdc);
732
733 /* Continuous single mode: Configure next req */
734 if (tdc->cyclic) {
735 /*
736 * Wait for 1 burst time for configure DMA for
737 * next transfer.
738 */
739 udelay(TEGRA_APBDMA_BURST_COMPLETE_TIME);
740 tdc_configure_next_head_desc(tdc);
741 }
742 }
743end:
744 spin_unlock_irqrestore(&tdc->lock, flags);
745}
746
747static int tegra_dma_terminate_all(struct dma_chan *dc)
748{
749 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
750 struct tegra_dma_sg_req *sgreq;
751 struct tegra_dma_desc *dma_desc;
752 unsigned long flags;
753 unsigned long status;
754 unsigned long wcount;
755 bool was_busy;
756
757 spin_lock_irqsave(&tdc->lock, flags);
758
759 if (!tdc->busy)
760 goto skip_dma_stop;
761
762 /* Pause DMA before checking the queue status */
763 tegra_dma_pause(tdc, true);
764
765 status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
766 if (status & TEGRA_APBDMA_STATUS_ISE_EOC) {
767 dev_dbg(tdc2dev(tdc), "%s():handling isr\n", __func__);
768 tdc->isr_handler(tdc, true);
769 status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
770 }
771 if (tdc->tdma->chip_data->support_separate_wcount_reg)
772 wcount = tdc_read(tdc, TEGRA_APBDMA_CHAN_WORD_TRANSFER);
773 else
774 wcount = status;
775
776 was_busy = tdc->busy;
777 tegra_dma_stop(tdc);
778
779 if (!list_empty(&tdc->pending_sg_req) && was_busy) {
780 sgreq = list_first_entry(&tdc->pending_sg_req,
781 typeof(*sgreq), node);
782 sgreq->dma_desc->bytes_transferred +=
783 get_current_xferred_count(tdc, sgreq, wcount);
784 }
785 tegra_dma_resume(tdc);
786
787skip_dma_stop:
788 tegra_dma_abort_all(tdc);
789
790 while (!list_empty(&tdc->cb_desc)) {
791 dma_desc = list_first_entry(&tdc->cb_desc,
792 typeof(*dma_desc), cb_node);
793 list_del(&dma_desc->cb_node);
794 dma_desc->cb_count = 0;
795 }
796 spin_unlock_irqrestore(&tdc->lock, flags);
797 return 0;
798}
799
800static enum dma_status tegra_dma_tx_status(struct dma_chan *dc,
801 dma_cookie_t cookie, struct dma_tx_state *txstate)
802{
803 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
804 struct tegra_dma_desc *dma_desc;
805 struct tegra_dma_sg_req *sg_req;
806 enum dma_status ret;
807 unsigned long flags;
808 unsigned int residual;
809
810 ret = dma_cookie_status(dc, cookie, txstate);
811 if (ret == DMA_COMPLETE)
812 return ret;
813
814 spin_lock_irqsave(&tdc->lock, flags);
815
816 /* Check on wait_ack desc status */
817 list_for_each_entry(dma_desc, &tdc->free_dma_desc, node) {
818 if (dma_desc->txd.cookie == cookie) {
819 ret = dma_desc->dma_status;
820 goto found;
821 }
822 }
823
824 /* Check in pending list */
825 list_for_each_entry(sg_req, &tdc->pending_sg_req, node) {
826 dma_desc = sg_req->dma_desc;
827 if (dma_desc->txd.cookie == cookie) {
828 ret = dma_desc->dma_status;
829 goto found;
830 }
831 }
832
833 dev_dbg(tdc2dev(tdc), "cookie %d not found\n", cookie);
834 dma_desc = NULL;
835
836found:
837 if (dma_desc && txstate) {
838 residual = dma_desc->bytes_requested -
839 (dma_desc->bytes_transferred %
840 dma_desc->bytes_requested);
841 dma_set_residue(txstate, residual);
842 }
843
844 spin_unlock_irqrestore(&tdc->lock, flags);
845 return ret;
846}
847
848static inline int get_bus_width(struct tegra_dma_channel *tdc,
849 enum dma_slave_buswidth slave_bw)
850{
851 switch (slave_bw) {
852 case DMA_SLAVE_BUSWIDTH_1_BYTE:
853 return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_8;
854 case DMA_SLAVE_BUSWIDTH_2_BYTES:
855 return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_16;
856 case DMA_SLAVE_BUSWIDTH_4_BYTES:
857 return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_32;
858 case DMA_SLAVE_BUSWIDTH_8_BYTES:
859 return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_64;
860 default:
861 dev_warn(tdc2dev(tdc),
862 "slave bw is not supported, using 32bits\n");
863 return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_32;
864 }
865}
866
867static inline int get_burst_size(struct tegra_dma_channel *tdc,
868 u32 burst_size, enum dma_slave_buswidth slave_bw, int len)
869{
870 int burst_byte;
871 int burst_ahb_width;
872
873 /*
874 * burst_size from client is in terms of the bus_width.
875 * convert them into AHB memory width which is 4 byte.
876 */
877 burst_byte = burst_size * slave_bw;
878 burst_ahb_width = burst_byte / 4;
879
880 /* If burst size is 0 then calculate the burst size based on length */
881 if (!burst_ahb_width) {
882 if (len & 0xF)
883 return TEGRA_APBDMA_AHBSEQ_BURST_1;
884 else if ((len >> 4) & 0x1)
885 return TEGRA_APBDMA_AHBSEQ_BURST_4;
886 else
887 return TEGRA_APBDMA_AHBSEQ_BURST_8;
888 }
889 if (burst_ahb_width < 4)
890 return TEGRA_APBDMA_AHBSEQ_BURST_1;
891 else if (burst_ahb_width < 8)
892 return TEGRA_APBDMA_AHBSEQ_BURST_4;
893 else
894 return TEGRA_APBDMA_AHBSEQ_BURST_8;
895}
896
897static int get_transfer_param(struct tegra_dma_channel *tdc,
898 enum dma_transfer_direction direction, unsigned long *apb_addr,
899 unsigned long *apb_seq, unsigned long *csr, unsigned int *burst_size,
900 enum dma_slave_buswidth *slave_bw)
901{
902 switch (direction) {
903 case DMA_MEM_TO_DEV:
904 *apb_addr = tdc->dma_sconfig.dst_addr;
905 *apb_seq = get_bus_width(tdc, tdc->dma_sconfig.dst_addr_width);
906 *burst_size = tdc->dma_sconfig.dst_maxburst;
907 *slave_bw = tdc->dma_sconfig.dst_addr_width;
908 *csr = TEGRA_APBDMA_CSR_DIR;
909 return 0;
910
911 case DMA_DEV_TO_MEM:
912 *apb_addr = tdc->dma_sconfig.src_addr;
913 *apb_seq = get_bus_width(tdc, tdc->dma_sconfig.src_addr_width);
914 *burst_size = tdc->dma_sconfig.src_maxburst;
915 *slave_bw = tdc->dma_sconfig.src_addr_width;
916 *csr = 0;
917 return 0;
918
919 default:
920 dev_err(tdc2dev(tdc), "Dma direction is not supported\n");
921 return -EINVAL;
922 }
923 return -EINVAL;
924}
925
926static void tegra_dma_prep_wcount(struct tegra_dma_channel *tdc,
927 struct tegra_dma_channel_regs *ch_regs, u32 len)
928{
929 u32 len_field = (len - 4) & 0xFFFC;
930
931 if (tdc->tdma->chip_data->support_separate_wcount_reg)
932 ch_regs->wcount = len_field;
933 else
934 ch_regs->csr |= len_field;
935}
936
937static struct dma_async_tx_descriptor *tegra_dma_prep_slave_sg(
938 struct dma_chan *dc, struct scatterlist *sgl, unsigned int sg_len,
939 enum dma_transfer_direction direction, unsigned long flags,
940 void *context)
941{
942 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
943 struct tegra_dma_desc *dma_desc;
944 unsigned int i;
945 struct scatterlist *sg;
946 unsigned long csr, ahb_seq, apb_ptr, apb_seq;
947 struct list_head req_list;
948 struct tegra_dma_sg_req *sg_req = NULL;
949 u32 burst_size;
950 enum dma_slave_buswidth slave_bw;
951
952 if (!tdc->config_init) {
953 dev_err(tdc2dev(tdc), "dma channel is not configured\n");
954 return NULL;
955 }
956 if (sg_len < 1) {
957 dev_err(tdc2dev(tdc), "Invalid segment length %d\n", sg_len);
958 return NULL;
959 }
960
961 if (get_transfer_param(tdc, direction, &apb_ptr, &apb_seq, &csr,
962 &burst_size, &slave_bw) < 0)
963 return NULL;
964
965 INIT_LIST_HEAD(&req_list);
966
967 ahb_seq = TEGRA_APBDMA_AHBSEQ_INTR_ENB;
968 ahb_seq |= TEGRA_APBDMA_AHBSEQ_WRAP_NONE <<
969 TEGRA_APBDMA_AHBSEQ_WRAP_SHIFT;
970 ahb_seq |= TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_32;
971
972 csr |= TEGRA_APBDMA_CSR_ONCE | TEGRA_APBDMA_CSR_FLOW;
973 csr |= tdc->slave_id << TEGRA_APBDMA_CSR_REQ_SEL_SHIFT;
974 if (flags & DMA_PREP_INTERRUPT)
975 csr |= TEGRA_APBDMA_CSR_IE_EOC;
976
977 apb_seq |= TEGRA_APBDMA_APBSEQ_WRAP_WORD_1;
978
979 dma_desc = tegra_dma_desc_get(tdc);
980 if (!dma_desc) {
981 dev_err(tdc2dev(tdc), "Dma descriptors not available\n");
982 return NULL;
983 }
984 INIT_LIST_HEAD(&dma_desc->tx_list);
985 INIT_LIST_HEAD(&dma_desc->cb_node);
986 dma_desc->cb_count = 0;
987 dma_desc->bytes_requested = 0;
988 dma_desc->bytes_transferred = 0;
989 dma_desc->dma_status = DMA_IN_PROGRESS;
990
991 /* Make transfer requests */
992 for_each_sg(sgl, sg, sg_len, i) {
993 u32 len, mem;
994
995 mem = sg_dma_address(sg);
996 len = sg_dma_len(sg);
997
998 if ((len & 3) || (mem & 3) ||
999 (len > tdc->tdma->chip_data->max_dma_count)) {
1000 dev_err(tdc2dev(tdc),
1001 "Dma length/memory address is not supported\n");
1002 tegra_dma_desc_put(tdc, dma_desc);
1003 return NULL;
1004 }
1005
1006 sg_req = tegra_dma_sg_req_get(tdc);
1007 if (!sg_req) {
1008 dev_err(tdc2dev(tdc), "Dma sg-req not available\n");
1009 tegra_dma_desc_put(tdc, dma_desc);
1010 return NULL;
1011 }
1012
1013 ahb_seq |= get_burst_size(tdc, burst_size, slave_bw, len);
1014 dma_desc->bytes_requested += len;
1015
1016 sg_req->ch_regs.apb_ptr = apb_ptr;
1017 sg_req->ch_regs.ahb_ptr = mem;
1018 sg_req->ch_regs.csr = csr;
1019 tegra_dma_prep_wcount(tdc, &sg_req->ch_regs, len);
1020 sg_req->ch_regs.apb_seq = apb_seq;
1021 sg_req->ch_regs.ahb_seq = ahb_seq;
1022 sg_req->configured = false;
1023 sg_req->last_sg = false;
1024 sg_req->dma_desc = dma_desc;
1025 sg_req->req_len = len;
1026
1027 list_add_tail(&sg_req->node, &dma_desc->tx_list);
1028 }
1029 sg_req->last_sg = true;
1030 if (flags & DMA_CTRL_ACK)
1031 dma_desc->txd.flags = DMA_CTRL_ACK;
1032
1033 /*
1034 * Make sure that mode should not be conflicting with currently
1035 * configured mode.
1036 */
1037 if (!tdc->isr_handler) {
1038 tdc->isr_handler = handle_once_dma_done;
1039 tdc->cyclic = false;
1040 } else {
1041 if (tdc->cyclic) {
1042 dev_err(tdc2dev(tdc), "DMA configured in cyclic mode\n");
1043 tegra_dma_desc_put(tdc, dma_desc);
1044 return NULL;
1045 }
1046 }
1047
1048 return &dma_desc->txd;
1049}
1050
1051static struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic(
1052 struct dma_chan *dc, dma_addr_t buf_addr, size_t buf_len,
1053 size_t period_len, enum dma_transfer_direction direction,
1054 unsigned long flags)
1055{
1056 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
1057 struct tegra_dma_desc *dma_desc = NULL;
1058 struct tegra_dma_sg_req *sg_req = NULL;
1059 unsigned long csr, ahb_seq, apb_ptr, apb_seq;
1060 int len;
1061 size_t remain_len;
1062 dma_addr_t mem = buf_addr;
1063 u32 burst_size;
1064 enum dma_slave_buswidth slave_bw;
1065
1066 if (!buf_len || !period_len) {
1067 dev_err(tdc2dev(tdc), "Invalid buffer/period len\n");
1068 return NULL;
1069 }
1070
1071 if (!tdc->config_init) {
1072 dev_err(tdc2dev(tdc), "DMA slave is not configured\n");
1073 return NULL;
1074 }
1075
1076 /*
1077 * We allow to take more number of requests till DMA is
1078 * not started. The driver will loop over all requests.
1079 * Once DMA is started then new requests can be queued only after
1080 * terminating the DMA.
1081 */
1082 if (tdc->busy) {
1083 dev_err(tdc2dev(tdc), "Request not allowed when dma running\n");
1084 return NULL;
1085 }
1086
1087 /*
1088 * We only support cycle transfer when buf_len is multiple of
1089 * period_len.
1090 */
1091 if (buf_len % period_len) {
1092 dev_err(tdc2dev(tdc), "buf_len is not multiple of period_len\n");
1093 return NULL;
1094 }
1095
1096 len = period_len;
1097 if ((len & 3) || (buf_addr & 3) ||
1098 (len > tdc->tdma->chip_data->max_dma_count)) {
1099 dev_err(tdc2dev(tdc), "Req len/mem address is not correct\n");
1100 return NULL;
1101 }
1102
1103 if (get_transfer_param(tdc, direction, &apb_ptr, &apb_seq, &csr,
1104 &burst_size, &slave_bw) < 0)
1105 return NULL;
1106
1107 ahb_seq = TEGRA_APBDMA_AHBSEQ_INTR_ENB;
1108 ahb_seq |= TEGRA_APBDMA_AHBSEQ_WRAP_NONE <<
1109 TEGRA_APBDMA_AHBSEQ_WRAP_SHIFT;
1110 ahb_seq |= TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_32;
1111
1112 csr |= TEGRA_APBDMA_CSR_FLOW;
1113 if (flags & DMA_PREP_INTERRUPT)
1114 csr |= TEGRA_APBDMA_CSR_IE_EOC;
1115 csr |= tdc->slave_id << TEGRA_APBDMA_CSR_REQ_SEL_SHIFT;
1116
1117 apb_seq |= TEGRA_APBDMA_APBSEQ_WRAP_WORD_1;
1118
1119 dma_desc = tegra_dma_desc_get(tdc);
1120 if (!dma_desc) {
1121 dev_err(tdc2dev(tdc), "not enough descriptors available\n");
1122 return NULL;
1123 }
1124
1125 INIT_LIST_HEAD(&dma_desc->tx_list);
1126 INIT_LIST_HEAD(&dma_desc->cb_node);
1127 dma_desc->cb_count = 0;
1128
1129 dma_desc->bytes_transferred = 0;
1130 dma_desc->bytes_requested = buf_len;
1131 remain_len = buf_len;
1132
1133 /* Split transfer equal to period size */
1134 while (remain_len) {
1135 sg_req = tegra_dma_sg_req_get(tdc);
1136 if (!sg_req) {
1137 dev_err(tdc2dev(tdc), "Dma sg-req not available\n");
1138 tegra_dma_desc_put(tdc, dma_desc);
1139 return NULL;
1140 }
1141
1142 ahb_seq |= get_burst_size(tdc, burst_size, slave_bw, len);
1143 sg_req->ch_regs.apb_ptr = apb_ptr;
1144 sg_req->ch_regs.ahb_ptr = mem;
1145 sg_req->ch_regs.csr = csr;
1146 tegra_dma_prep_wcount(tdc, &sg_req->ch_regs, len);
1147 sg_req->ch_regs.apb_seq = apb_seq;
1148 sg_req->ch_regs.ahb_seq = ahb_seq;
1149 sg_req->configured = false;
1150 sg_req->last_sg = false;
1151 sg_req->dma_desc = dma_desc;
1152 sg_req->req_len = len;
1153
1154 list_add_tail(&sg_req->node, &dma_desc->tx_list);
1155 remain_len -= len;
1156 mem += len;
1157 }
1158 sg_req->last_sg = true;
1159 if (flags & DMA_CTRL_ACK)
1160 dma_desc->txd.flags = DMA_CTRL_ACK;
1161
1162 /*
1163 * Make sure that mode should not be conflicting with currently
1164 * configured mode.
1165 */
1166 if (!tdc->isr_handler) {
1167 tdc->isr_handler = handle_cont_sngl_cycle_dma_done;
1168 tdc->cyclic = true;
1169 } else {
1170 if (!tdc->cyclic) {
1171 dev_err(tdc2dev(tdc), "DMA configuration conflict\n");
1172 tegra_dma_desc_put(tdc, dma_desc);
1173 return NULL;
1174 }
1175 }
1176
1177 return &dma_desc->txd;
1178}
1179
1180static int tegra_dma_alloc_chan_resources(struct dma_chan *dc)
1181{
1182 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
1183 struct tegra_dma *tdma = tdc->tdma;
1184 int ret;
1185
1186 dma_cookie_init(&tdc->dma_chan);
1187 tdc->config_init = false;
1188
1189 ret = pm_runtime_get_sync(tdma->dev);
1190 if (ret < 0)
1191 return ret;
1192
1193 return 0;
1194}
1195
1196static void tegra_dma_free_chan_resources(struct dma_chan *dc)
1197{
1198 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
1199 struct tegra_dma *tdma = tdc->tdma;
1200 struct tegra_dma_desc *dma_desc;
1201 struct tegra_dma_sg_req *sg_req;
1202 struct list_head dma_desc_list;
1203 struct list_head sg_req_list;
1204 unsigned long flags;
1205
1206 INIT_LIST_HEAD(&dma_desc_list);
1207 INIT_LIST_HEAD(&sg_req_list);
1208
1209 dev_dbg(tdc2dev(tdc), "Freeing channel %d\n", tdc->id);
1210
1211 tegra_dma_terminate_all(dc);
1212
1213 spin_lock_irqsave(&tdc->lock, flags);
1214 list_splice_init(&tdc->pending_sg_req, &sg_req_list);
1215 list_splice_init(&tdc->free_sg_req, &sg_req_list);
1216 list_splice_init(&tdc->free_dma_desc, &dma_desc_list);
1217 INIT_LIST_HEAD(&tdc->cb_desc);
1218 tdc->config_init = false;
1219 tdc->isr_handler = NULL;
1220 spin_unlock_irqrestore(&tdc->lock, flags);
1221
1222 while (!list_empty(&dma_desc_list)) {
1223 dma_desc = list_first_entry(&dma_desc_list,
1224 typeof(*dma_desc), node);
1225 list_del(&dma_desc->node);
1226 kfree(dma_desc);
1227 }
1228
1229 while (!list_empty(&sg_req_list)) {
1230 sg_req = list_first_entry(&sg_req_list, typeof(*sg_req), node);
1231 list_del(&sg_req->node);
1232 kfree(sg_req);
1233 }
1234 pm_runtime_put(tdma->dev);
1235
1236 tdc->slave_id = TEGRA_APBDMA_SLAVE_ID_INVALID;
1237}
1238
1239static struct dma_chan *tegra_dma_of_xlate(struct of_phandle_args *dma_spec,
1240 struct of_dma *ofdma)
1241{
1242 struct tegra_dma *tdma = ofdma->of_dma_data;
1243 struct dma_chan *chan;
1244 struct tegra_dma_channel *tdc;
1245
1246 if (dma_spec->args[0] > TEGRA_APBDMA_CSR_REQ_SEL_MASK) {
1247 dev_err(tdma->dev, "Invalid slave id: %d\n", dma_spec->args[0]);
1248 return NULL;
1249 }
1250
1251 chan = dma_get_any_slave_channel(&tdma->dma_dev);
1252 if (!chan)
1253 return NULL;
1254
1255 tdc = to_tegra_dma_chan(chan);
1256 tdc->slave_id = dma_spec->args[0];
1257
1258 return chan;
1259}
1260
1261/* Tegra20 specific DMA controller information */
1262static const struct tegra_dma_chip_data tegra20_dma_chip_data = {
1263 .nr_channels = 16,
1264 .channel_reg_size = 0x20,
1265 .max_dma_count = 1024UL * 64,
1266 .support_channel_pause = false,
1267 .support_separate_wcount_reg = false,
1268};
1269
1270/* Tegra30 specific DMA controller information */
1271static const struct tegra_dma_chip_data tegra30_dma_chip_data = {
1272 .nr_channels = 32,
1273 .channel_reg_size = 0x20,
1274 .max_dma_count = 1024UL * 64,
1275 .support_channel_pause = false,
1276 .support_separate_wcount_reg = false,
1277};
1278
1279/* Tegra114 specific DMA controller information */
1280static const struct tegra_dma_chip_data tegra114_dma_chip_data = {
1281 .nr_channels = 32,
1282 .channel_reg_size = 0x20,
1283 .max_dma_count = 1024UL * 64,
1284 .support_channel_pause = true,
1285 .support_separate_wcount_reg = false,
1286};
1287
1288/* Tegra148 specific DMA controller information */
1289static const struct tegra_dma_chip_data tegra148_dma_chip_data = {
1290 .nr_channels = 32,
1291 .channel_reg_size = 0x40,
1292 .max_dma_count = 1024UL * 64,
1293 .support_channel_pause = true,
1294 .support_separate_wcount_reg = true,
1295};
1296
1297static int tegra_dma_probe(struct platform_device *pdev)
1298{
1299 struct resource *res;
1300 struct tegra_dma *tdma;
1301 int ret;
1302 int i;
1303 const struct tegra_dma_chip_data *cdata;
1304
1305 cdata = of_device_get_match_data(&pdev->dev);
1306 if (!cdata) {
1307 dev_err(&pdev->dev, "Error: No device match data found\n");
1308 return -ENODEV;
1309 }
1310
1311 tdma = devm_kzalloc(&pdev->dev, sizeof(*tdma) + cdata->nr_channels *
1312 sizeof(struct tegra_dma_channel), GFP_KERNEL);
1313 if (!tdma)
1314 return -ENOMEM;
1315
1316 tdma->dev = &pdev->dev;
1317 tdma->chip_data = cdata;
1318 platform_set_drvdata(pdev, tdma);
1319
1320 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1321 tdma->base_addr = devm_ioremap_resource(&pdev->dev, res);
1322 if (IS_ERR(tdma->base_addr))
1323 return PTR_ERR(tdma->base_addr);
1324
1325 tdma->dma_clk = devm_clk_get(&pdev->dev, NULL);
1326 if (IS_ERR(tdma->dma_clk)) {
1327 dev_err(&pdev->dev, "Error: Missing controller clock\n");
1328 return PTR_ERR(tdma->dma_clk);
1329 }
1330
1331 tdma->rst = devm_reset_control_get(&pdev->dev, "dma");
1332 if (IS_ERR(tdma->rst)) {
1333 dev_err(&pdev->dev, "Error: Missing reset\n");
1334 return PTR_ERR(tdma->rst);
1335 }
1336
1337 spin_lock_init(&tdma->global_lock);
1338
1339 pm_runtime_enable(&pdev->dev);
1340 if (!pm_runtime_enabled(&pdev->dev))
1341 ret = tegra_dma_runtime_resume(&pdev->dev);
1342 else
1343 ret = pm_runtime_get_sync(&pdev->dev);
1344
1345 if (ret < 0) {
1346 pm_runtime_disable(&pdev->dev);
1347 return ret;
1348 }
1349
1350 /* Reset DMA controller */
1351 reset_control_assert(tdma->rst);
1352 udelay(2);
1353 reset_control_deassert(tdma->rst);
1354
1355 /* Enable global DMA registers */
1356 tdma_write(tdma, TEGRA_APBDMA_GENERAL, TEGRA_APBDMA_GENERAL_ENABLE);
1357 tdma_write(tdma, TEGRA_APBDMA_CONTROL, 0);
1358 tdma_write(tdma, TEGRA_APBDMA_IRQ_MASK_SET, 0xFFFFFFFFul);
1359
1360 pm_runtime_put(&pdev->dev);
1361
1362 INIT_LIST_HEAD(&tdma->dma_dev.channels);
1363 for (i = 0; i < cdata->nr_channels; i++) {
1364 struct tegra_dma_channel *tdc = &tdma->channels[i];
1365
1366 tdc->chan_addr = tdma->base_addr +
1367 TEGRA_APBDMA_CHANNEL_BASE_ADD_OFFSET +
1368 (i * cdata->channel_reg_size);
1369
1370 res = platform_get_resource(pdev, IORESOURCE_IRQ, i);
1371 if (!res) {
1372 ret = -EINVAL;
1373 dev_err(&pdev->dev, "No irq resource for chan %d\n", i);
1374 goto err_irq;
1375 }
1376 tdc->irq = res->start;
1377 snprintf(tdc->name, sizeof(tdc->name), "apbdma.%d", i);
1378 ret = request_irq(tdc->irq, tegra_dma_isr, 0, tdc->name, tdc);
1379 if (ret) {
1380 dev_err(&pdev->dev,
1381 "request_irq failed with err %d channel %d\n",
1382 ret, i);
1383 goto err_irq;
1384 }
1385
1386 tdc->dma_chan.device = &tdma->dma_dev;
1387 dma_cookie_init(&tdc->dma_chan);
1388 list_add_tail(&tdc->dma_chan.device_node,
1389 &tdma->dma_dev.channels);
1390 tdc->tdma = tdma;
1391 tdc->id = i;
1392 tdc->slave_id = TEGRA_APBDMA_SLAVE_ID_INVALID;
1393
1394 tasklet_init(&tdc->tasklet, tegra_dma_tasklet,
1395 (unsigned long)tdc);
1396 spin_lock_init(&tdc->lock);
1397
1398 INIT_LIST_HEAD(&tdc->pending_sg_req);
1399 INIT_LIST_HEAD(&tdc->free_sg_req);
1400 INIT_LIST_HEAD(&tdc->free_dma_desc);
1401 INIT_LIST_HEAD(&tdc->cb_desc);
1402 }
1403
1404 dma_cap_set(DMA_SLAVE, tdma->dma_dev.cap_mask);
1405 dma_cap_set(DMA_PRIVATE, tdma->dma_dev.cap_mask);
1406 dma_cap_set(DMA_CYCLIC, tdma->dma_dev.cap_mask);
1407
1408 tdma->global_pause_count = 0;
1409 tdma->dma_dev.dev = &pdev->dev;
1410 tdma->dma_dev.device_alloc_chan_resources =
1411 tegra_dma_alloc_chan_resources;
1412 tdma->dma_dev.device_free_chan_resources =
1413 tegra_dma_free_chan_resources;
1414 tdma->dma_dev.device_prep_slave_sg = tegra_dma_prep_slave_sg;
1415 tdma->dma_dev.device_prep_dma_cyclic = tegra_dma_prep_dma_cyclic;
1416 tdma->dma_dev.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
1417 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
1418 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) |
1419 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES);
1420 tdma->dma_dev.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
1421 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
1422 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) |
1423 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES);
1424 tdma->dma_dev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
1425 /*
1426 * XXX The hardware appears to support
1427 * DMA_RESIDUE_GRANULARITY_BURST-level reporting, but it's
1428 * only used by this driver during tegra_dma_terminate_all()
1429 */
1430 tdma->dma_dev.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
1431 tdma->dma_dev.device_config = tegra_dma_slave_config;
1432 tdma->dma_dev.device_terminate_all = tegra_dma_terminate_all;
1433 tdma->dma_dev.device_tx_status = tegra_dma_tx_status;
1434 tdma->dma_dev.device_issue_pending = tegra_dma_issue_pending;
1435
1436 ret = dma_async_device_register(&tdma->dma_dev);
1437 if (ret < 0) {
1438 dev_err(&pdev->dev,
1439 "Tegra20 APB DMA driver registration failed %d\n", ret);
1440 goto err_irq;
1441 }
1442
1443 ret = of_dma_controller_register(pdev->dev.of_node,
1444 tegra_dma_of_xlate, tdma);
1445 if (ret < 0) {
1446 dev_err(&pdev->dev,
1447 "Tegra20 APB DMA OF registration failed %d\n", ret);
1448 goto err_unregister_dma_dev;
1449 }
1450
1451 dev_info(&pdev->dev, "Tegra20 APB DMA driver register %d channels\n",
1452 cdata->nr_channels);
1453 return 0;
1454
1455err_unregister_dma_dev:
1456 dma_async_device_unregister(&tdma->dma_dev);
1457err_irq:
1458 while (--i >= 0) {
1459 struct tegra_dma_channel *tdc = &tdma->channels[i];
1460
1461 free_irq(tdc->irq, tdc);
1462 tasklet_kill(&tdc->tasklet);
1463 }
1464
1465 pm_runtime_disable(&pdev->dev);
1466 if (!pm_runtime_status_suspended(&pdev->dev))
1467 tegra_dma_runtime_suspend(&pdev->dev);
1468 return ret;
1469}
1470
1471static int tegra_dma_remove(struct platform_device *pdev)
1472{
1473 struct tegra_dma *tdma = platform_get_drvdata(pdev);
1474 int i;
1475 struct tegra_dma_channel *tdc;
1476
1477 dma_async_device_unregister(&tdma->dma_dev);
1478
1479 for (i = 0; i < tdma->chip_data->nr_channels; ++i) {
1480 tdc = &tdma->channels[i];
1481 free_irq(tdc->irq, tdc);
1482 tasklet_kill(&tdc->tasklet);
1483 }
1484
1485 pm_runtime_disable(&pdev->dev);
1486 if (!pm_runtime_status_suspended(&pdev->dev))
1487 tegra_dma_runtime_suspend(&pdev->dev);
1488
1489 return 0;
1490}
1491
1492static int tegra_dma_runtime_suspend(struct device *dev)
1493{
1494 struct tegra_dma *tdma = dev_get_drvdata(dev);
1495 int i;
1496
1497 tdma->reg_gen = tdma_read(tdma, TEGRA_APBDMA_GENERAL);
1498 for (i = 0; i < tdma->chip_data->nr_channels; i++) {
1499 struct tegra_dma_channel *tdc = &tdma->channels[i];
1500 struct tegra_dma_channel_regs *ch_reg = &tdc->channel_reg;
1501
1502 /* Only save the state of DMA channels that are in use */
1503 if (!tdc->config_init)
1504 continue;
1505
1506 ch_reg->csr = tdc_read(tdc, TEGRA_APBDMA_CHAN_CSR);
1507 ch_reg->ahb_ptr = tdc_read(tdc, TEGRA_APBDMA_CHAN_AHBPTR);
1508 ch_reg->apb_ptr = tdc_read(tdc, TEGRA_APBDMA_CHAN_APBPTR);
1509 ch_reg->ahb_seq = tdc_read(tdc, TEGRA_APBDMA_CHAN_AHBSEQ);
1510 ch_reg->apb_seq = tdc_read(tdc, TEGRA_APBDMA_CHAN_APBSEQ);
1511 if (tdma->chip_data->support_separate_wcount_reg)
1512 ch_reg->wcount = tdc_read(tdc,
1513 TEGRA_APBDMA_CHAN_WCOUNT);
1514 }
1515
1516 clk_disable_unprepare(tdma->dma_clk);
1517
1518 return 0;
1519}
1520
1521static int tegra_dma_runtime_resume(struct device *dev)
1522{
1523 struct tegra_dma *tdma = dev_get_drvdata(dev);
1524 int i, ret;
1525
1526 ret = clk_prepare_enable(tdma->dma_clk);
1527 if (ret < 0) {
1528 dev_err(dev, "clk_enable failed: %d\n", ret);
1529 return ret;
1530 }
1531
1532 tdma_write(tdma, TEGRA_APBDMA_GENERAL, tdma->reg_gen);
1533 tdma_write(tdma, TEGRA_APBDMA_CONTROL, 0);
1534 tdma_write(tdma, TEGRA_APBDMA_IRQ_MASK_SET, 0xFFFFFFFFul);
1535
1536 for (i = 0; i < tdma->chip_data->nr_channels; i++) {
1537 struct tegra_dma_channel *tdc = &tdma->channels[i];
1538 struct tegra_dma_channel_regs *ch_reg = &tdc->channel_reg;
1539
1540 /* Only restore the state of DMA channels that are in use */
1541 if (!tdc->config_init)
1542 continue;
1543
1544 if (tdma->chip_data->support_separate_wcount_reg)
1545 tdc_write(tdc, TEGRA_APBDMA_CHAN_WCOUNT,
1546 ch_reg->wcount);
1547 tdc_write(tdc, TEGRA_APBDMA_CHAN_APBSEQ, ch_reg->apb_seq);
1548 tdc_write(tdc, TEGRA_APBDMA_CHAN_APBPTR, ch_reg->apb_ptr);
1549 tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBSEQ, ch_reg->ahb_seq);
1550 tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBPTR, ch_reg->ahb_ptr);
1551 tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR,
1552 (ch_reg->csr & ~TEGRA_APBDMA_CSR_ENB));
1553 }
1554
1555 return 0;
1556}
1557
1558static const struct dev_pm_ops tegra_dma_dev_pm_ops = {
1559 SET_RUNTIME_PM_OPS(tegra_dma_runtime_suspend, tegra_dma_runtime_resume,
1560 NULL)
1561 SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
1562 pm_runtime_force_resume)
1563};
1564
1565static const struct of_device_id tegra_dma_of_match[] = {
1566 {
1567 .compatible = "nvidia,tegra148-apbdma",
1568 .data = &tegra148_dma_chip_data,
1569 }, {
1570 .compatible = "nvidia,tegra114-apbdma",
1571 .data = &tegra114_dma_chip_data,
1572 }, {
1573 .compatible = "nvidia,tegra30-apbdma",
1574 .data = &tegra30_dma_chip_data,
1575 }, {
1576 .compatible = "nvidia,tegra20-apbdma",
1577 .data = &tegra20_dma_chip_data,
1578 }, {
1579 },
1580};
1581MODULE_DEVICE_TABLE(of, tegra_dma_of_match);
1582
1583static struct platform_driver tegra_dmac_driver = {
1584 .driver = {
1585 .name = "tegra-apbdma",
1586 .pm = &tegra_dma_dev_pm_ops,
1587 .of_match_table = tegra_dma_of_match,
1588 },
1589 .probe = tegra_dma_probe,
1590 .remove = tegra_dma_remove,
1591};
1592
1593module_platform_driver(tegra_dmac_driver);
1594
1595MODULE_ALIAS("platform:tegra20-apbdma");
1596MODULE_DESCRIPTION("NVIDIA Tegra APB DMA Controller driver");
1597MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>");
1598MODULE_LICENSE("GPL v2");