blob: be44c86a1e0375586153f6dc33edeaa04b870127 [file] [log] [blame]
b.liue9582032025-04-17 19:18:16 +08001// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * DMA driver for Xilinx Video DMA Engine
4 *
5 * Copyright (C) 2010-2014 Xilinx, Inc. All rights reserved.
6 *
7 * Based on the Freescale DMA driver.
8 *
9 * Description:
10 * The AXI Video Direct Memory Access (AXI VDMA) core is a soft Xilinx IP
11 * core that provides high-bandwidth direct memory access between memory
12 * and AXI4-Stream type video target peripherals. The core provides efficient
13 * two dimensional DMA operations with independent asynchronous read (S2MM)
14 * and write (MM2S) channel operation. It can be configured to have either
15 * one channel or two channels. If configured as two channels, one is to
16 * transmit to the video device (MM2S) and another is to receive from the
17 * video device (S2MM). Initialization, status, interrupt and management
18 * registers are accessed through an AXI4-Lite slave interface.
19 *
20 * The AXI Direct Memory Access (AXI DMA) core is a soft Xilinx IP core that
21 * provides high-bandwidth one dimensional direct memory access between memory
22 * and AXI4-Stream target peripherals. It supports one receive and one
23 * transmit channel, both of them optional at synthesis time.
24 *
25 * The AXI CDMA, is a soft IP, which provides high-bandwidth Direct Memory
26 * Access (DMA) between a memory-mapped source address and a memory-mapped
27 * destination address.
28 */
29
30#include <linux/bitops.h>
31#include <linux/dmapool.h>
32#include <linux/dma/xilinx_dma.h>
33#include <linux/init.h>
34#include <linux/interrupt.h>
35#include <linux/io.h>
36#include <linux/iopoll.h>
37#include <linux/module.h>
38#include <linux/of_address.h>
39#include <linux/of_dma.h>
40#include <linux/of_platform.h>
41#include <linux/of_irq.h>
42#include <linux/slab.h>
43#include <linux/clk.h>
44#include <linux/io-64-nonatomic-lo-hi.h>
45
46#include "../dmaengine.h"
47
48/* Register/Descriptor Offsets */
49#define XILINX_DMA_MM2S_CTRL_OFFSET 0x0000
50#define XILINX_DMA_S2MM_CTRL_OFFSET 0x0030
51#define XILINX_VDMA_MM2S_DESC_OFFSET 0x0050
52#define XILINX_VDMA_S2MM_DESC_OFFSET 0x00a0
53
54/* Control Registers */
55#define XILINX_DMA_REG_DMACR 0x0000
56#define XILINX_DMA_DMACR_DELAY_MAX 0xff
57#define XILINX_DMA_DMACR_DELAY_SHIFT 24
58#define XILINX_DMA_DMACR_FRAME_COUNT_MAX 0xff
59#define XILINX_DMA_DMACR_FRAME_COUNT_SHIFT 16
60#define XILINX_DMA_DMACR_ERR_IRQ BIT(14)
61#define XILINX_DMA_DMACR_DLY_CNT_IRQ BIT(13)
62#define XILINX_DMA_DMACR_FRM_CNT_IRQ BIT(12)
63#define XILINX_DMA_DMACR_MASTER_SHIFT 8
64#define XILINX_DMA_DMACR_FSYNCSRC_SHIFT 5
65#define XILINX_DMA_DMACR_FRAMECNT_EN BIT(4)
66#define XILINX_DMA_DMACR_GENLOCK_EN BIT(3)
67#define XILINX_DMA_DMACR_RESET BIT(2)
68#define XILINX_DMA_DMACR_CIRC_EN BIT(1)
69#define XILINX_DMA_DMACR_RUNSTOP BIT(0)
70#define XILINX_DMA_DMACR_FSYNCSRC_MASK GENMASK(6, 5)
71#define XILINX_DMA_DMACR_DELAY_MASK GENMASK(31, 24)
72#define XILINX_DMA_DMACR_FRAME_COUNT_MASK GENMASK(23, 16)
73#define XILINX_DMA_DMACR_MASTER_MASK GENMASK(11, 8)
74
75#define XILINX_DMA_REG_DMASR 0x0004
76#define XILINX_DMA_DMASR_EOL_LATE_ERR BIT(15)
77#define XILINX_DMA_DMASR_ERR_IRQ BIT(14)
78#define XILINX_DMA_DMASR_DLY_CNT_IRQ BIT(13)
79#define XILINX_DMA_DMASR_FRM_CNT_IRQ BIT(12)
80#define XILINX_DMA_DMASR_SOF_LATE_ERR BIT(11)
81#define XILINX_DMA_DMASR_SG_DEC_ERR BIT(10)
82#define XILINX_DMA_DMASR_SG_SLV_ERR BIT(9)
83#define XILINX_DMA_DMASR_EOF_EARLY_ERR BIT(8)
84#define XILINX_DMA_DMASR_SOF_EARLY_ERR BIT(7)
85#define XILINX_DMA_DMASR_DMA_DEC_ERR BIT(6)
86#define XILINX_DMA_DMASR_DMA_SLAVE_ERR BIT(5)
87#define XILINX_DMA_DMASR_DMA_INT_ERR BIT(4)
88#define XILINX_DMA_DMASR_SG_MASK BIT(3)
89#define XILINX_DMA_DMASR_IDLE BIT(1)
90#define XILINX_DMA_DMASR_HALTED BIT(0)
91#define XILINX_DMA_DMASR_DELAY_MASK GENMASK(31, 24)
92#define XILINX_DMA_DMASR_FRAME_COUNT_MASK GENMASK(23, 16)
93
94#define XILINX_DMA_REG_CURDESC 0x0008
95#define XILINX_DMA_REG_TAILDESC 0x0010
96#define XILINX_DMA_REG_REG_INDEX 0x0014
97#define XILINX_DMA_REG_FRMSTORE 0x0018
98#define XILINX_DMA_REG_THRESHOLD 0x001c
99#define XILINX_DMA_REG_FRMPTR_STS 0x0024
100#define XILINX_DMA_REG_PARK_PTR 0x0028
101#define XILINX_DMA_PARK_PTR_WR_REF_SHIFT 8
102#define XILINX_DMA_PARK_PTR_WR_REF_MASK GENMASK(12, 8)
103#define XILINX_DMA_PARK_PTR_RD_REF_SHIFT 0
104#define XILINX_DMA_PARK_PTR_RD_REF_MASK GENMASK(4, 0)
105#define XILINX_DMA_REG_VDMA_VERSION 0x002c
106
107/* Register Direct Mode Registers */
108#define XILINX_DMA_REG_VSIZE 0x0000
109#define XILINX_DMA_REG_HSIZE 0x0004
110
111#define XILINX_DMA_REG_FRMDLY_STRIDE 0x0008
112#define XILINX_DMA_FRMDLY_STRIDE_FRMDLY_SHIFT 24
113#define XILINX_DMA_FRMDLY_STRIDE_STRIDE_SHIFT 0
114
115#define XILINX_VDMA_REG_START_ADDRESS(n) (0x000c + 4 * (n))
116#define XILINX_VDMA_REG_START_ADDRESS_64(n) (0x000c + 8 * (n))
117
118#define XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP 0x00ec
119#define XILINX_VDMA_ENABLE_VERTICAL_FLIP BIT(0)
120
121/* HW specific definitions */
122#define XILINX_DMA_MAX_CHANS_PER_DEVICE 0x20
123
124#define XILINX_DMA_DMAXR_ALL_IRQ_MASK \
125 (XILINX_DMA_DMASR_FRM_CNT_IRQ | \
126 XILINX_DMA_DMASR_DLY_CNT_IRQ | \
127 XILINX_DMA_DMASR_ERR_IRQ)
128
129#define XILINX_DMA_DMASR_ALL_ERR_MASK \
130 (XILINX_DMA_DMASR_EOL_LATE_ERR | \
131 XILINX_DMA_DMASR_SOF_LATE_ERR | \
132 XILINX_DMA_DMASR_SG_DEC_ERR | \
133 XILINX_DMA_DMASR_SG_SLV_ERR | \
134 XILINX_DMA_DMASR_EOF_EARLY_ERR | \
135 XILINX_DMA_DMASR_SOF_EARLY_ERR | \
136 XILINX_DMA_DMASR_DMA_DEC_ERR | \
137 XILINX_DMA_DMASR_DMA_SLAVE_ERR | \
138 XILINX_DMA_DMASR_DMA_INT_ERR)
139
140/*
141 * Recoverable errors are DMA Internal error, SOF Early, EOF Early
142 * and SOF Late. They are only recoverable when C_FLUSH_ON_FSYNC
143 * is enabled in the h/w system.
144 */
145#define XILINX_DMA_DMASR_ERR_RECOVER_MASK \
146 (XILINX_DMA_DMASR_SOF_LATE_ERR | \
147 XILINX_DMA_DMASR_EOF_EARLY_ERR | \
148 XILINX_DMA_DMASR_SOF_EARLY_ERR | \
149 XILINX_DMA_DMASR_DMA_INT_ERR)
150
151/* Axi VDMA Flush on Fsync bits */
152#define XILINX_DMA_FLUSH_S2MM 3
153#define XILINX_DMA_FLUSH_MM2S 2
154#define XILINX_DMA_FLUSH_BOTH 1
155
156/* Delay loop counter to prevent hardware failure */
157#define XILINX_DMA_LOOP_COUNT 1000000
158
159/* AXI DMA Specific Registers/Offsets */
160#define XILINX_DMA_REG_SRCDSTADDR 0x18
161#define XILINX_DMA_REG_BTT 0x28
162
163/* AXI DMA Specific Masks/Bit fields */
164#define XILINX_DMA_MAX_TRANS_LEN_MIN 8
165#define XILINX_DMA_MAX_TRANS_LEN_MAX 23
166#define XILINX_DMA_V2_MAX_TRANS_LEN_MAX 26
167#define XILINX_DMA_CR_COALESCE_MAX GENMASK(23, 16)
168#define XILINX_DMA_CR_CYCLIC_BD_EN_MASK BIT(4)
169#define XILINX_DMA_CR_COALESCE_SHIFT 16
170#define XILINX_DMA_BD_SOP BIT(27)
171#define XILINX_DMA_BD_EOP BIT(26)
172#define XILINX_DMA_COALESCE_MAX 255
173#define XILINX_DMA_NUM_DESCS 255
174#define XILINX_DMA_NUM_APP_WORDS 5
175
176/* Multi-Channel DMA Descriptor offsets*/
177#define XILINX_DMA_MCRX_CDESC(x) (0x40 + (x-1) * 0x20)
178#define XILINX_DMA_MCRX_TDESC(x) (0x48 + (x-1) * 0x20)
179
180/* Multi-Channel DMA Masks/Shifts */
181#define XILINX_DMA_BD_HSIZE_MASK GENMASK(15, 0)
182#define XILINX_DMA_BD_STRIDE_MASK GENMASK(15, 0)
183#define XILINX_DMA_BD_VSIZE_MASK GENMASK(31, 19)
184#define XILINX_DMA_BD_TDEST_MASK GENMASK(4, 0)
185#define XILINX_DMA_BD_STRIDE_SHIFT 0
186#define XILINX_DMA_BD_VSIZE_SHIFT 19
187
188/* AXI CDMA Specific Registers/Offsets */
189#define XILINX_CDMA_REG_SRCADDR 0x18
190#define XILINX_CDMA_REG_DSTADDR 0x20
191
192/* AXI CDMA Specific Masks */
193#define XILINX_CDMA_CR_SGMODE BIT(3)
194
195#define xilinx_prep_dma_addr_t(addr) \
196 ((dma_addr_t)((u64)addr##_##msb << 32 | (addr)))
197/**
198 * struct xilinx_vdma_desc_hw - Hardware Descriptor
199 * @next_desc: Next Descriptor Pointer @0x00
200 * @pad1: Reserved @0x04
201 * @buf_addr: Buffer address @0x08
202 * @buf_addr_msb: MSB of Buffer address @0x0C
203 * @vsize: Vertical Size @0x10
204 * @hsize: Horizontal Size @0x14
205 * @stride: Number of bytes between the first
206 * pixels of each horizontal line @0x18
207 */
208struct xilinx_vdma_desc_hw {
209 u32 next_desc;
210 u32 pad1;
211 u32 buf_addr;
212 u32 buf_addr_msb;
213 u32 vsize;
214 u32 hsize;
215 u32 stride;
216} __aligned(64);
217
218/**
219 * struct xilinx_axidma_desc_hw - Hardware Descriptor for AXI DMA
220 * @next_desc: Next Descriptor Pointer @0x00
221 * @next_desc_msb: MSB of Next Descriptor Pointer @0x04
222 * @buf_addr: Buffer address @0x08
223 * @buf_addr_msb: MSB of Buffer address @0x0C
224 * @mcdma_control: Control field for mcdma @0x10
225 * @vsize_stride: Vsize and Stride field for mcdma @0x14
226 * @control: Control field @0x18
227 * @status: Status field @0x1C
228 * @app: APP Fields @0x20 - 0x30
229 */
230struct xilinx_axidma_desc_hw {
231 u32 next_desc;
232 u32 next_desc_msb;
233 u32 buf_addr;
234 u32 buf_addr_msb;
235 u32 mcdma_control;
236 u32 vsize_stride;
237 u32 control;
238 u32 status;
239 u32 app[XILINX_DMA_NUM_APP_WORDS];
240} __aligned(64);
241
242/**
243 * struct xilinx_cdma_desc_hw - Hardware Descriptor
244 * @next_desc: Next Descriptor Pointer @0x00
245 * @next_desc_msb: Next Descriptor Pointer MSB @0x04
246 * @src_addr: Source address @0x08
247 * @src_addr_msb: Source address MSB @0x0C
248 * @dest_addr: Destination address @0x10
249 * @dest_addr_msb: Destination address MSB @0x14
250 * @control: Control field @0x18
251 * @status: Status field @0x1C
252 */
253struct xilinx_cdma_desc_hw {
254 u32 next_desc;
255 u32 next_desc_msb;
256 u32 src_addr;
257 u32 src_addr_msb;
258 u32 dest_addr;
259 u32 dest_addr_msb;
260 u32 control;
261 u32 status;
262} __aligned(64);
263
264/**
265 * struct xilinx_vdma_tx_segment - Descriptor segment
266 * @hw: Hardware descriptor
267 * @node: Node in the descriptor segments list
268 * @phys: Physical address of segment
269 */
270struct xilinx_vdma_tx_segment {
271 struct xilinx_vdma_desc_hw hw;
272 struct list_head node;
273 dma_addr_t phys;
274} __aligned(64);
275
276/**
277 * struct xilinx_axidma_tx_segment - Descriptor segment
278 * @hw: Hardware descriptor
279 * @node: Node in the descriptor segments list
280 * @phys: Physical address of segment
281 */
282struct xilinx_axidma_tx_segment {
283 struct xilinx_axidma_desc_hw hw;
284 struct list_head node;
285 dma_addr_t phys;
286} __aligned(64);
287
288/**
289 * struct xilinx_cdma_tx_segment - Descriptor segment
290 * @hw: Hardware descriptor
291 * @node: Node in the descriptor segments list
292 * @phys: Physical address of segment
293 */
294struct xilinx_cdma_tx_segment {
295 struct xilinx_cdma_desc_hw hw;
296 struct list_head node;
297 dma_addr_t phys;
298} __aligned(64);
299
300/**
301 * struct xilinx_dma_tx_descriptor - Per Transaction structure
302 * @async_tx: Async transaction descriptor
303 * @segments: TX segments list
304 * @node: Node in the channel descriptors list
305 * @cyclic: Check for cyclic transfers.
306 */
307struct xilinx_dma_tx_descriptor {
308 struct dma_async_tx_descriptor async_tx;
309 struct list_head segments;
310 struct list_head node;
311 bool cyclic;
312};
313
314/**
315 * struct xilinx_dma_chan - Driver specific DMA channel structure
316 * @xdev: Driver specific device structure
317 * @ctrl_offset: Control registers offset
318 * @desc_offset: TX descriptor registers offset
319 * @lock: Descriptor operation lock
320 * @pending_list: Descriptors waiting
321 * @active_list: Descriptors ready to submit
322 * @done_list: Complete descriptors
323 * @free_seg_list: Free descriptors
324 * @common: DMA common channel
325 * @desc_pool: Descriptors pool
326 * @dev: The dma device
327 * @irq: Channel IRQ
328 * @id: Channel ID
329 * @direction: Transfer direction
330 * @num_frms: Number of frames
331 * @has_sg: Support scatter transfers
332 * @cyclic: Check for cyclic transfers.
333 * @genlock: Support genlock mode
334 * @err: Channel has errors
335 * @idle: Check for channel idle
336 * @terminating: Check for channel being synchronized by user
337 * @tasklet: Cleanup work after irq
338 * @config: Device configuration info
339 * @flush_on_fsync: Flush on Frame sync
340 * @desc_pendingcount: Descriptor pending count
341 * @ext_addr: Indicates 64 bit addressing is supported by dma channel
342 * @desc_submitcount: Descriptor h/w submitted count
343 * @residue: Residue for AXI DMA
344 * @seg_v: Statically allocated segments base
345 * @seg_p: Physical allocated segments base
346 * @cyclic_seg_v: Statically allocated segment base for cyclic transfers
347 * @cyclic_seg_p: Physical allocated segments base for cyclic dma
348 * @start_transfer: Differentiate b/w DMA IP's transfer
349 * @stop_transfer: Differentiate b/w DMA IP's quiesce
350 * @tdest: TDEST value for mcdma
351 * @has_vflip: S2MM vertical flip
352 */
353struct xilinx_dma_chan {
354 struct xilinx_dma_device *xdev;
355 u32 ctrl_offset;
356 u32 desc_offset;
357 spinlock_t lock;
358 struct list_head pending_list;
359 struct list_head active_list;
360 struct list_head done_list;
361 struct list_head free_seg_list;
362 struct dma_chan common;
363 struct dma_pool *desc_pool;
364 struct device *dev;
365 int irq;
366 int id;
367 enum dma_transfer_direction direction;
368 int num_frms;
369 bool has_sg;
370 bool cyclic;
371 bool genlock;
372 bool err;
373 bool idle;
374 bool terminating;
375 struct tasklet_struct tasklet;
376 struct xilinx_vdma_config config;
377 bool flush_on_fsync;
378 u32 desc_pendingcount;
379 bool ext_addr;
380 u32 desc_submitcount;
381 u32 residue;
382 struct xilinx_axidma_tx_segment *seg_v;
383 dma_addr_t seg_p;
384 struct xilinx_axidma_tx_segment *cyclic_seg_v;
385 dma_addr_t cyclic_seg_p;
386 void (*start_transfer)(struct xilinx_dma_chan *chan);
387 int (*stop_transfer)(struct xilinx_dma_chan *chan);
388 u16 tdest;
389 bool has_vflip;
390};
391
392/**
393 * enum xdma_ip_type - DMA IP type.
394 *
395 * @XDMA_TYPE_AXIDMA: Axi dma ip.
396 * @XDMA_TYPE_CDMA: Axi cdma ip.
397 * @XDMA_TYPE_VDMA: Axi vdma ip.
398 *
399 */
400enum xdma_ip_type {
401 XDMA_TYPE_AXIDMA = 0,
402 XDMA_TYPE_CDMA,
403 XDMA_TYPE_VDMA,
404};
405
406struct xilinx_dma_config {
407 enum xdma_ip_type dmatype;
408 int (*clk_init)(struct platform_device *pdev, struct clk **axi_clk,
409 struct clk **tx_clk, struct clk **txs_clk,
410 struct clk **rx_clk, struct clk **rxs_clk);
411};
412
413/**
414 * struct xilinx_dma_device - DMA device structure
415 * @regs: I/O mapped base address
416 * @dev: Device Structure
417 * @common: DMA device structure
418 * @chan: Driver specific DMA channel
419 * @mcdma: Specifies whether Multi-Channel is present or not
420 * @flush_on_fsync: Flush on frame sync
421 * @ext_addr: Indicates 64 bit addressing is supported by dma device
422 * @pdev: Platform device structure pointer
423 * @dma_config: DMA config structure
424 * @axi_clk: DMA Axi4-lite interace clock
425 * @tx_clk: DMA mm2s clock
426 * @txs_clk: DMA mm2s stream clock
427 * @rx_clk: DMA s2mm clock
428 * @rxs_clk: DMA s2mm stream clock
429 * @nr_channels: Number of channels DMA device supports
430 * @chan_id: DMA channel identifier
431 * @max_buffer_len: Max buffer length
432 */
433struct xilinx_dma_device {
434 void __iomem *regs;
435 struct device *dev;
436 struct dma_device common;
437 struct xilinx_dma_chan *chan[XILINX_DMA_MAX_CHANS_PER_DEVICE];
438 bool mcdma;
439 u32 flush_on_fsync;
440 bool ext_addr;
441 struct platform_device *pdev;
442 const struct xilinx_dma_config *dma_config;
443 struct clk *axi_clk;
444 struct clk *tx_clk;
445 struct clk *txs_clk;
446 struct clk *rx_clk;
447 struct clk *rxs_clk;
448 u32 nr_channels;
449 u32 chan_id;
450 u32 max_buffer_len;
451};
452
453/* Macros */
454#define to_xilinx_chan(chan) \
455 container_of(chan, struct xilinx_dma_chan, common)
456#define to_dma_tx_descriptor(tx) \
457 container_of(tx, struct xilinx_dma_tx_descriptor, async_tx)
458#define xilinx_dma_poll_timeout(chan, reg, val, cond, delay_us, timeout_us) \
459 readl_poll_timeout_atomic(chan->xdev->regs + chan->ctrl_offset + reg, \
460 val, cond, delay_us, timeout_us)
461
462/* IO accessors */
463static inline u32 dma_read(struct xilinx_dma_chan *chan, u32 reg)
464{
465 return ioread32(chan->xdev->regs + reg);
466}
467
468static inline void dma_write(struct xilinx_dma_chan *chan, u32 reg, u32 value)
469{
470 iowrite32(value, chan->xdev->regs + reg);
471}
472
473static inline void vdma_desc_write(struct xilinx_dma_chan *chan, u32 reg,
474 u32 value)
475{
476 dma_write(chan, chan->desc_offset + reg, value);
477}
478
479static inline u32 dma_ctrl_read(struct xilinx_dma_chan *chan, u32 reg)
480{
481 return dma_read(chan, chan->ctrl_offset + reg);
482}
483
484static inline void dma_ctrl_write(struct xilinx_dma_chan *chan, u32 reg,
485 u32 value)
486{
487 dma_write(chan, chan->ctrl_offset + reg, value);
488}
489
490static inline void dma_ctrl_clr(struct xilinx_dma_chan *chan, u32 reg,
491 u32 clr)
492{
493 dma_ctrl_write(chan, reg, dma_ctrl_read(chan, reg) & ~clr);
494}
495
496static inline void dma_ctrl_set(struct xilinx_dma_chan *chan, u32 reg,
497 u32 set)
498{
499 dma_ctrl_write(chan, reg, dma_ctrl_read(chan, reg) | set);
500}
501
502/**
503 * vdma_desc_write_64 - 64-bit descriptor write
504 * @chan: Driver specific VDMA channel
505 * @reg: Register to write
506 * @value_lsb: lower address of the descriptor.
507 * @value_msb: upper address of the descriptor.
508 *
509 * Since vdma driver is trying to write to a register offset which is not a
510 * multiple of 64 bits(ex : 0x5c), we are writing as two separate 32 bits
511 * instead of a single 64 bit register write.
512 */
513static inline void vdma_desc_write_64(struct xilinx_dma_chan *chan, u32 reg,
514 u32 value_lsb, u32 value_msb)
515{
516 /* Write the lsb 32 bits*/
517 writel(value_lsb, chan->xdev->regs + chan->desc_offset + reg);
518
519 /* Write the msb 32 bits */
520 writel(value_msb, chan->xdev->regs + chan->desc_offset + reg + 4);
521}
522
523static inline void dma_writeq(struct xilinx_dma_chan *chan, u32 reg, u64 value)
524{
525 lo_hi_writeq(value, chan->xdev->regs + chan->ctrl_offset + reg);
526}
527
528static inline void xilinx_write(struct xilinx_dma_chan *chan, u32 reg,
529 dma_addr_t addr)
530{
531 if (chan->ext_addr)
532 dma_writeq(chan, reg, addr);
533 else
534 dma_ctrl_write(chan, reg, addr);
535}
536
537static inline void xilinx_axidma_buf(struct xilinx_dma_chan *chan,
538 struct xilinx_axidma_desc_hw *hw,
539 dma_addr_t buf_addr, size_t sg_used,
540 size_t period_len)
541{
542 if (chan->ext_addr) {
543 hw->buf_addr = lower_32_bits(buf_addr + sg_used + period_len);
544 hw->buf_addr_msb = upper_32_bits(buf_addr + sg_used +
545 period_len);
546 } else {
547 hw->buf_addr = buf_addr + sg_used + period_len;
548 }
549}
550
551/* -----------------------------------------------------------------------------
552 * Descriptors and segments alloc and free
553 */
554
555/**
556 * xilinx_vdma_alloc_tx_segment - Allocate transaction segment
557 * @chan: Driver specific DMA channel
558 *
559 * Return: The allocated segment on success and NULL on failure.
560 */
561static struct xilinx_vdma_tx_segment *
562xilinx_vdma_alloc_tx_segment(struct xilinx_dma_chan *chan)
563{
564 struct xilinx_vdma_tx_segment *segment;
565 dma_addr_t phys;
566
567 segment = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &phys);
568 if (!segment)
569 return NULL;
570
571 segment->phys = phys;
572
573 return segment;
574}
575
576/**
577 * xilinx_cdma_alloc_tx_segment - Allocate transaction segment
578 * @chan: Driver specific DMA channel
579 *
580 * Return: The allocated segment on success and NULL on failure.
581 */
582static struct xilinx_cdma_tx_segment *
583xilinx_cdma_alloc_tx_segment(struct xilinx_dma_chan *chan)
584{
585 struct xilinx_cdma_tx_segment *segment;
586 dma_addr_t phys;
587
588 segment = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &phys);
589 if (!segment)
590 return NULL;
591
592 segment->phys = phys;
593
594 return segment;
595}
596
597/**
598 * xilinx_axidma_alloc_tx_segment - Allocate transaction segment
599 * @chan: Driver specific DMA channel
600 *
601 * Return: The allocated segment on success and NULL on failure.
602 */
603static struct xilinx_axidma_tx_segment *
604xilinx_axidma_alloc_tx_segment(struct xilinx_dma_chan *chan)
605{
606 struct xilinx_axidma_tx_segment *segment = NULL;
607 unsigned long flags;
608
609 spin_lock_irqsave(&chan->lock, flags);
610 if (!list_empty(&chan->free_seg_list)) {
611 segment = list_first_entry(&chan->free_seg_list,
612 struct xilinx_axidma_tx_segment,
613 node);
614 list_del(&segment->node);
615 }
616 spin_unlock_irqrestore(&chan->lock, flags);
617
618 return segment;
619}
620
621static void xilinx_dma_clean_hw_desc(struct xilinx_axidma_desc_hw *hw)
622{
623 u32 next_desc = hw->next_desc;
624 u32 next_desc_msb = hw->next_desc_msb;
625
626 memset(hw, 0, sizeof(struct xilinx_axidma_desc_hw));
627
628 hw->next_desc = next_desc;
629 hw->next_desc_msb = next_desc_msb;
630}
631
632/**
633 * xilinx_dma_free_tx_segment - Free transaction segment
634 * @chan: Driver specific DMA channel
635 * @segment: DMA transaction segment
636 */
637static void xilinx_dma_free_tx_segment(struct xilinx_dma_chan *chan,
638 struct xilinx_axidma_tx_segment *segment)
639{
640 xilinx_dma_clean_hw_desc(&segment->hw);
641
642 list_add_tail(&segment->node, &chan->free_seg_list);
643}
644
645/**
646 * xilinx_cdma_free_tx_segment - Free transaction segment
647 * @chan: Driver specific DMA channel
648 * @segment: DMA transaction segment
649 */
650static void xilinx_cdma_free_tx_segment(struct xilinx_dma_chan *chan,
651 struct xilinx_cdma_tx_segment *segment)
652{
653 dma_pool_free(chan->desc_pool, segment, segment->phys);
654}
655
656/**
657 * xilinx_vdma_free_tx_segment - Free transaction segment
658 * @chan: Driver specific DMA channel
659 * @segment: DMA transaction segment
660 */
661static void xilinx_vdma_free_tx_segment(struct xilinx_dma_chan *chan,
662 struct xilinx_vdma_tx_segment *segment)
663{
664 dma_pool_free(chan->desc_pool, segment, segment->phys);
665}
666
667/**
668 * xilinx_dma_tx_descriptor - Allocate transaction descriptor
669 * @chan: Driver specific DMA channel
670 *
671 * Return: The allocated descriptor on success and NULL on failure.
672 */
673static struct xilinx_dma_tx_descriptor *
674xilinx_dma_alloc_tx_descriptor(struct xilinx_dma_chan *chan)
675{
676 struct xilinx_dma_tx_descriptor *desc;
677
678 desc = kzalloc(sizeof(*desc), GFP_KERNEL);
679 if (!desc)
680 return NULL;
681
682 INIT_LIST_HEAD(&desc->segments);
683
684 return desc;
685}
686
687/**
688 * xilinx_dma_free_tx_descriptor - Free transaction descriptor
689 * @chan: Driver specific DMA channel
690 * @desc: DMA transaction descriptor
691 */
692static void
693xilinx_dma_free_tx_descriptor(struct xilinx_dma_chan *chan,
694 struct xilinx_dma_tx_descriptor *desc)
695{
696 struct xilinx_vdma_tx_segment *segment, *next;
697 struct xilinx_cdma_tx_segment *cdma_segment, *cdma_next;
698 struct xilinx_axidma_tx_segment *axidma_segment, *axidma_next;
699
700 if (!desc)
701 return;
702
703 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
704 list_for_each_entry_safe(segment, next, &desc->segments, node) {
705 list_del(&segment->node);
706 xilinx_vdma_free_tx_segment(chan, segment);
707 }
708 } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
709 list_for_each_entry_safe(cdma_segment, cdma_next,
710 &desc->segments, node) {
711 list_del(&cdma_segment->node);
712 xilinx_cdma_free_tx_segment(chan, cdma_segment);
713 }
714 } else {
715 list_for_each_entry_safe(axidma_segment, axidma_next,
716 &desc->segments, node) {
717 list_del(&axidma_segment->node);
718 xilinx_dma_free_tx_segment(chan, axidma_segment);
719 }
720 }
721
722 kfree(desc);
723}
724
725/* Required functions */
726
727/**
728 * xilinx_dma_free_desc_list - Free descriptors list
729 * @chan: Driver specific DMA channel
730 * @list: List to parse and delete the descriptor
731 */
732static void xilinx_dma_free_desc_list(struct xilinx_dma_chan *chan,
733 struct list_head *list)
734{
735 struct xilinx_dma_tx_descriptor *desc, *next;
736
737 list_for_each_entry_safe(desc, next, list, node) {
738 list_del(&desc->node);
739 xilinx_dma_free_tx_descriptor(chan, desc);
740 }
741}
742
743/**
744 * xilinx_dma_free_descriptors - Free channel descriptors
745 * @chan: Driver specific DMA channel
746 */
747static void xilinx_dma_free_descriptors(struct xilinx_dma_chan *chan)
748{
749 unsigned long flags;
750
751 spin_lock_irqsave(&chan->lock, flags);
752
753 xilinx_dma_free_desc_list(chan, &chan->pending_list);
754 xilinx_dma_free_desc_list(chan, &chan->done_list);
755 xilinx_dma_free_desc_list(chan, &chan->active_list);
756
757 spin_unlock_irqrestore(&chan->lock, flags);
758}
759
760/**
761 * xilinx_dma_free_chan_resources - Free channel resources
762 * @dchan: DMA channel
763 */
764static void xilinx_dma_free_chan_resources(struct dma_chan *dchan)
765{
766 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
767 unsigned long flags;
768
769 dev_dbg(chan->dev, "Free all channel resources.\n");
770
771 xilinx_dma_free_descriptors(chan);
772
773 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
774 spin_lock_irqsave(&chan->lock, flags);
775 INIT_LIST_HEAD(&chan->free_seg_list);
776 spin_unlock_irqrestore(&chan->lock, flags);
777
778 /* Free memory that is allocated for BD */
779 dma_free_coherent(chan->dev, sizeof(*chan->seg_v) *
780 XILINX_DMA_NUM_DESCS, chan->seg_v,
781 chan->seg_p);
782
783 /* Free Memory that is allocated for cyclic DMA Mode */
784 dma_free_coherent(chan->dev, sizeof(*chan->cyclic_seg_v),
785 chan->cyclic_seg_v, chan->cyclic_seg_p);
786 }
787
788 if (chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIDMA) {
789 dma_pool_destroy(chan->desc_pool);
790 chan->desc_pool = NULL;
791 }
792}
793
794/**
795 * xilinx_dma_chan_handle_cyclic - Cyclic dma callback
796 * @chan: Driver specific dma channel
797 * @desc: dma transaction descriptor
798 * @flags: flags for spin lock
799 */
800static void xilinx_dma_chan_handle_cyclic(struct xilinx_dma_chan *chan,
801 struct xilinx_dma_tx_descriptor *desc,
802 unsigned long *flags)
803{
804 dma_async_tx_callback callback;
805 void *callback_param;
806
807 callback = desc->async_tx.callback;
808 callback_param = desc->async_tx.callback_param;
809 if (callback) {
810 spin_unlock_irqrestore(&chan->lock, *flags);
811 callback(callback_param);
812 spin_lock_irqsave(&chan->lock, *flags);
813 }
814}
815
816/**
817 * xilinx_dma_chan_desc_cleanup - Clean channel descriptors
818 * @chan: Driver specific DMA channel
819 */
820static void xilinx_dma_chan_desc_cleanup(struct xilinx_dma_chan *chan)
821{
822 struct xilinx_dma_tx_descriptor *desc, *next;
823 unsigned long flags;
824
825 spin_lock_irqsave(&chan->lock, flags);
826
827 list_for_each_entry_safe(desc, next, &chan->done_list, node) {
828 struct dmaengine_desc_callback cb;
829
830 if (desc->cyclic) {
831 xilinx_dma_chan_handle_cyclic(chan, desc, &flags);
832 break;
833 }
834
835 /* Remove from the list of running transactions */
836 list_del(&desc->node);
837
838 /* Run the link descriptor callback function */
839 dmaengine_desc_get_callback(&desc->async_tx, &cb);
840 if (dmaengine_desc_callback_valid(&cb)) {
841 spin_unlock_irqrestore(&chan->lock, flags);
842 dmaengine_desc_callback_invoke(&cb, NULL);
843 spin_lock_irqsave(&chan->lock, flags);
844 }
845
846 /* Run any dependencies, then free the descriptor */
847 dma_run_dependencies(&desc->async_tx);
848 xilinx_dma_free_tx_descriptor(chan, desc);
849
850 /*
851 * While we ran a callback the user called a terminate function,
852 * which takes care of cleaning up any remaining descriptors
853 */
854 if (chan->terminating)
855 break;
856 }
857
858 spin_unlock_irqrestore(&chan->lock, flags);
859}
860
861/**
862 * xilinx_dma_do_tasklet - Schedule completion tasklet
863 * @data: Pointer to the Xilinx DMA channel structure
864 */
865static void xilinx_dma_do_tasklet(unsigned long data)
866{
867 struct xilinx_dma_chan *chan = (struct xilinx_dma_chan *)data;
868
869 xilinx_dma_chan_desc_cleanup(chan);
870}
871
872/**
873 * xilinx_dma_alloc_chan_resources - Allocate channel resources
874 * @dchan: DMA channel
875 *
876 * Return: '0' on success and failure value on error
877 */
878static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan)
879{
880 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
881 int i;
882
883 /* Has this channel already been allocated? */
884 if (chan->desc_pool)
885 return 0;
886
887 /*
888 * We need the descriptor to be aligned to 64bytes
889 * for meeting Xilinx VDMA specification requirement.
890 */
891 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
892 /* Allocate the buffer descriptors. */
893 chan->seg_v = dma_alloc_coherent(chan->dev,
894 sizeof(*chan->seg_v) * XILINX_DMA_NUM_DESCS,
895 &chan->seg_p, GFP_KERNEL);
896 if (!chan->seg_v) {
897 dev_err(chan->dev,
898 "unable to allocate channel %d descriptors\n",
899 chan->id);
900 return -ENOMEM;
901 }
902 /*
903 * For cyclic DMA mode we need to program the tail Descriptor
904 * register with a value which is not a part of the BD chain
905 * so allocating a desc segment during channel allocation for
906 * programming tail descriptor.
907 */
908 chan->cyclic_seg_v = dma_alloc_coherent(chan->dev,
909 sizeof(*chan->cyclic_seg_v),
910 &chan->cyclic_seg_p,
911 GFP_KERNEL);
912 if (!chan->cyclic_seg_v) {
913 dev_err(chan->dev,
914 "unable to allocate desc segment for cyclic DMA\n");
915 dma_free_coherent(chan->dev, sizeof(*chan->seg_v) *
916 XILINX_DMA_NUM_DESCS, chan->seg_v,
917 chan->seg_p);
918 return -ENOMEM;
919 }
920 chan->cyclic_seg_v->phys = chan->cyclic_seg_p;
921
922 for (i = 0; i < XILINX_DMA_NUM_DESCS; i++) {
923 chan->seg_v[i].hw.next_desc =
924 lower_32_bits(chan->seg_p + sizeof(*chan->seg_v) *
925 ((i + 1) % XILINX_DMA_NUM_DESCS));
926 chan->seg_v[i].hw.next_desc_msb =
927 upper_32_bits(chan->seg_p + sizeof(*chan->seg_v) *
928 ((i + 1) % XILINX_DMA_NUM_DESCS));
929 chan->seg_v[i].phys = chan->seg_p +
930 sizeof(*chan->seg_v) * i;
931 list_add_tail(&chan->seg_v[i].node,
932 &chan->free_seg_list);
933 }
934 } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
935 chan->desc_pool = dma_pool_create("xilinx_cdma_desc_pool",
936 chan->dev,
937 sizeof(struct xilinx_cdma_tx_segment),
938 __alignof__(struct xilinx_cdma_tx_segment),
939 0);
940 } else {
941 chan->desc_pool = dma_pool_create("xilinx_vdma_desc_pool",
942 chan->dev,
943 sizeof(struct xilinx_vdma_tx_segment),
944 __alignof__(struct xilinx_vdma_tx_segment),
945 0);
946 }
947
948 if (!chan->desc_pool &&
949 (chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIDMA)) {
950 dev_err(chan->dev,
951 "unable to allocate channel %d descriptor pool\n",
952 chan->id);
953 return -ENOMEM;
954 }
955
956 dma_cookie_init(dchan);
957
958 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
959 /* For AXI DMA resetting once channel will reset the
960 * other channel as well so enable the interrupts here.
961 */
962 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
963 XILINX_DMA_DMAXR_ALL_IRQ_MASK);
964 }
965
966 if ((chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) && chan->has_sg)
967 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
968 XILINX_CDMA_CR_SGMODE);
969
970 return 0;
971}
972
973/**
974 * xilinx_dma_calc_copysize - Calculate the amount of data to copy
975 * @chan: Driver specific DMA channel
976 * @size: Total data that needs to be copied
977 * @done: Amount of data that has been already copied
978 *
979 * Return: Amount of data that has to be copied
980 */
981static int xilinx_dma_calc_copysize(struct xilinx_dma_chan *chan,
982 int size, int done)
983{
984 size_t copy;
985
986 copy = min_t(size_t, size - done,
987 chan->xdev->max_buffer_len);
988
989 if ((copy + done < size) &&
990 chan->xdev->common.copy_align) {
991 /*
992 * If this is not the last descriptor, make sure
993 * the next one will be properly aligned
994 */
995 copy = rounddown(copy,
996 (1 << chan->xdev->common.copy_align));
997 }
998 return copy;
999}
1000
1001/**
1002 * xilinx_dma_tx_status - Get DMA transaction status
1003 * @dchan: DMA channel
1004 * @cookie: Transaction identifier
1005 * @txstate: Transaction state
1006 *
1007 * Return: DMA transaction status
1008 */
1009static enum dma_status xilinx_dma_tx_status(struct dma_chan *dchan,
1010 dma_cookie_t cookie,
1011 struct dma_tx_state *txstate)
1012{
1013 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
1014 struct xilinx_dma_tx_descriptor *desc;
1015 struct xilinx_axidma_tx_segment *segment;
1016 struct xilinx_axidma_desc_hw *hw;
1017 enum dma_status ret;
1018 unsigned long flags;
1019 u32 residue = 0;
1020
1021 ret = dma_cookie_status(dchan, cookie, txstate);
1022 if (ret == DMA_COMPLETE || !txstate)
1023 return ret;
1024
1025 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
1026 spin_lock_irqsave(&chan->lock, flags);
1027
1028 desc = list_last_entry(&chan->active_list,
1029 struct xilinx_dma_tx_descriptor, node);
1030 if (chan->has_sg) {
1031 list_for_each_entry(segment, &desc->segments, node) {
1032 hw = &segment->hw;
1033 residue += (hw->control - hw->status) &
1034 chan->xdev->max_buffer_len;
1035 }
1036 }
1037 spin_unlock_irqrestore(&chan->lock, flags);
1038
1039 chan->residue = residue;
1040 dma_set_residue(txstate, chan->residue);
1041 }
1042
1043 return ret;
1044}
1045
1046/**
1047 * xilinx_dma_stop_transfer - Halt DMA channel
1048 * @chan: Driver specific DMA channel
1049 *
1050 * Return: '0' on success and failure value on error
1051 */
1052static int xilinx_dma_stop_transfer(struct xilinx_dma_chan *chan)
1053{
1054 u32 val;
1055
1056 dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RUNSTOP);
1057
1058 /* Wait for the hardware to halt */
1059 return xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val,
1060 val & XILINX_DMA_DMASR_HALTED, 0,
1061 XILINX_DMA_LOOP_COUNT);
1062}
1063
1064/**
1065 * xilinx_cdma_stop_transfer - Wait for the current transfer to complete
1066 * @chan: Driver specific DMA channel
1067 *
1068 * Return: '0' on success and failure value on error
1069 */
1070static int xilinx_cdma_stop_transfer(struct xilinx_dma_chan *chan)
1071{
1072 u32 val;
1073
1074 return xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val,
1075 val & XILINX_DMA_DMASR_IDLE, 0,
1076 XILINX_DMA_LOOP_COUNT);
1077}
1078
1079/**
1080 * xilinx_dma_start - Start DMA channel
1081 * @chan: Driver specific DMA channel
1082 */
1083static void xilinx_dma_start(struct xilinx_dma_chan *chan)
1084{
1085 int err;
1086 u32 val;
1087
1088 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RUNSTOP);
1089
1090 /* Wait for the hardware to start */
1091 err = xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val,
1092 !(val & XILINX_DMA_DMASR_HALTED), 0,
1093 XILINX_DMA_LOOP_COUNT);
1094
1095 if (err) {
1096 dev_err(chan->dev, "Cannot start channel %p: %x\n",
1097 chan, dma_ctrl_read(chan, XILINX_DMA_REG_DMASR));
1098
1099 chan->err = true;
1100 }
1101}
1102
1103/**
1104 * xilinx_vdma_start_transfer - Starts VDMA transfer
1105 * @chan: Driver specific channel struct pointer
1106 */
1107static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan)
1108{
1109 struct xilinx_vdma_config *config = &chan->config;
1110 struct xilinx_dma_tx_descriptor *desc;
1111 u32 reg, j;
1112 struct xilinx_vdma_tx_segment *segment, *last = NULL;
1113 int i = 0;
1114
1115 /* This function was invoked with lock held */
1116 if (chan->err)
1117 return;
1118
1119 if (!chan->idle)
1120 return;
1121
1122 if (list_empty(&chan->pending_list))
1123 return;
1124
1125 desc = list_first_entry(&chan->pending_list,
1126 struct xilinx_dma_tx_descriptor, node);
1127
1128 /* Configure the hardware using info in the config structure */
1129 if (chan->has_vflip) {
1130 reg = dma_read(chan, XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP);
1131 reg &= ~XILINX_VDMA_ENABLE_VERTICAL_FLIP;
1132 reg |= config->vflip_en;
1133 dma_write(chan, XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP,
1134 reg);
1135 }
1136
1137 reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
1138
1139 if (config->frm_cnt_en)
1140 reg |= XILINX_DMA_DMACR_FRAMECNT_EN;
1141 else
1142 reg &= ~XILINX_DMA_DMACR_FRAMECNT_EN;
1143
1144 /* If not parking, enable circular mode */
1145 if (config->park)
1146 reg &= ~XILINX_DMA_DMACR_CIRC_EN;
1147 else
1148 reg |= XILINX_DMA_DMACR_CIRC_EN;
1149
1150 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
1151
1152 j = chan->desc_submitcount;
1153 reg = dma_read(chan, XILINX_DMA_REG_PARK_PTR);
1154 if (chan->direction == DMA_MEM_TO_DEV) {
1155 reg &= ~XILINX_DMA_PARK_PTR_RD_REF_MASK;
1156 reg |= j << XILINX_DMA_PARK_PTR_RD_REF_SHIFT;
1157 } else {
1158 reg &= ~XILINX_DMA_PARK_PTR_WR_REF_MASK;
1159 reg |= j << XILINX_DMA_PARK_PTR_WR_REF_SHIFT;
1160 }
1161 dma_write(chan, XILINX_DMA_REG_PARK_PTR, reg);
1162
1163 /* Start the hardware */
1164 xilinx_dma_start(chan);
1165
1166 if (chan->err)
1167 return;
1168
1169 /* Start the transfer */
1170 if (chan->desc_submitcount < chan->num_frms)
1171 i = chan->desc_submitcount;
1172
1173 list_for_each_entry(segment, &desc->segments, node) {
1174 if (chan->ext_addr)
1175 vdma_desc_write_64(chan,
1176 XILINX_VDMA_REG_START_ADDRESS_64(i++),
1177 segment->hw.buf_addr,
1178 segment->hw.buf_addr_msb);
1179 else
1180 vdma_desc_write(chan,
1181 XILINX_VDMA_REG_START_ADDRESS(i++),
1182 segment->hw.buf_addr);
1183
1184 last = segment;
1185 }
1186
1187 if (!last)
1188 return;
1189
1190 /* HW expects these parameters to be same for one transaction */
1191 vdma_desc_write(chan, XILINX_DMA_REG_HSIZE, last->hw.hsize);
1192 vdma_desc_write(chan, XILINX_DMA_REG_FRMDLY_STRIDE,
1193 last->hw.stride);
1194 vdma_desc_write(chan, XILINX_DMA_REG_VSIZE, last->hw.vsize);
1195
1196 chan->desc_submitcount++;
1197 chan->desc_pendingcount--;
1198 list_del(&desc->node);
1199 list_add_tail(&desc->node, &chan->active_list);
1200 if (chan->desc_submitcount == chan->num_frms)
1201 chan->desc_submitcount = 0;
1202
1203 chan->idle = false;
1204}
1205
1206/**
1207 * xilinx_cdma_start_transfer - Starts cdma transfer
1208 * @chan: Driver specific channel struct pointer
1209 */
1210static void xilinx_cdma_start_transfer(struct xilinx_dma_chan *chan)
1211{
1212 struct xilinx_dma_tx_descriptor *head_desc, *tail_desc;
1213 struct xilinx_cdma_tx_segment *tail_segment;
1214 u32 ctrl_reg = dma_read(chan, XILINX_DMA_REG_DMACR);
1215
1216 if (chan->err)
1217 return;
1218
1219 if (!chan->idle)
1220 return;
1221
1222 if (list_empty(&chan->pending_list))
1223 return;
1224
1225 head_desc = list_first_entry(&chan->pending_list,
1226 struct xilinx_dma_tx_descriptor, node);
1227 tail_desc = list_last_entry(&chan->pending_list,
1228 struct xilinx_dma_tx_descriptor, node);
1229 tail_segment = list_last_entry(&tail_desc->segments,
1230 struct xilinx_cdma_tx_segment, node);
1231
1232 if (chan->desc_pendingcount <= XILINX_DMA_COALESCE_MAX) {
1233 ctrl_reg &= ~XILINX_DMA_CR_COALESCE_MAX;
1234 ctrl_reg |= chan->desc_pendingcount <<
1235 XILINX_DMA_CR_COALESCE_SHIFT;
1236 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, ctrl_reg);
1237 }
1238
1239 if (chan->has_sg) {
1240 dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR,
1241 XILINX_CDMA_CR_SGMODE);
1242
1243 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
1244 XILINX_CDMA_CR_SGMODE);
1245
1246 xilinx_write(chan, XILINX_DMA_REG_CURDESC,
1247 head_desc->async_tx.phys);
1248
1249 /* Update tail ptr register which will start the transfer */
1250 xilinx_write(chan, XILINX_DMA_REG_TAILDESC,
1251 tail_segment->phys);
1252 } else {
1253 /* In simple mode */
1254 struct xilinx_cdma_tx_segment *segment;
1255 struct xilinx_cdma_desc_hw *hw;
1256
1257 segment = list_first_entry(&head_desc->segments,
1258 struct xilinx_cdma_tx_segment,
1259 node);
1260
1261 hw = &segment->hw;
1262
1263 xilinx_write(chan, XILINX_CDMA_REG_SRCADDR,
1264 xilinx_prep_dma_addr_t(hw->src_addr));
1265 xilinx_write(chan, XILINX_CDMA_REG_DSTADDR,
1266 xilinx_prep_dma_addr_t(hw->dest_addr));
1267
1268 /* Start the transfer */
1269 dma_ctrl_write(chan, XILINX_DMA_REG_BTT,
1270 hw->control & chan->xdev->max_buffer_len);
1271 }
1272
1273 list_splice_tail_init(&chan->pending_list, &chan->active_list);
1274 chan->desc_pendingcount = 0;
1275 chan->idle = false;
1276}
1277
1278/**
1279 * xilinx_dma_start_transfer - Starts DMA transfer
1280 * @chan: Driver specific channel struct pointer
1281 */
1282static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan)
1283{
1284 struct xilinx_dma_tx_descriptor *head_desc, *tail_desc;
1285 struct xilinx_axidma_tx_segment *tail_segment;
1286 u32 reg;
1287
1288 if (chan->err)
1289 return;
1290
1291 if (list_empty(&chan->pending_list))
1292 return;
1293
1294 if (!chan->idle)
1295 return;
1296
1297 head_desc = list_first_entry(&chan->pending_list,
1298 struct xilinx_dma_tx_descriptor, node);
1299 tail_desc = list_last_entry(&chan->pending_list,
1300 struct xilinx_dma_tx_descriptor, node);
1301 tail_segment = list_last_entry(&tail_desc->segments,
1302 struct xilinx_axidma_tx_segment, node);
1303
1304 reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
1305
1306 if (chan->desc_pendingcount <= XILINX_DMA_COALESCE_MAX) {
1307 reg &= ~XILINX_DMA_CR_COALESCE_MAX;
1308 reg |= chan->desc_pendingcount <<
1309 XILINX_DMA_CR_COALESCE_SHIFT;
1310 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
1311 }
1312
1313 if (chan->has_sg && !chan->xdev->mcdma)
1314 xilinx_write(chan, XILINX_DMA_REG_CURDESC,
1315 head_desc->async_tx.phys);
1316
1317 if (chan->has_sg && chan->xdev->mcdma) {
1318 if (chan->direction == DMA_MEM_TO_DEV) {
1319 dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC,
1320 head_desc->async_tx.phys);
1321 } else {
1322 if (!chan->tdest) {
1323 dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC,
1324 head_desc->async_tx.phys);
1325 } else {
1326 dma_ctrl_write(chan,
1327 XILINX_DMA_MCRX_CDESC(chan->tdest),
1328 head_desc->async_tx.phys);
1329 }
1330 }
1331 }
1332
1333 xilinx_dma_start(chan);
1334
1335 if (chan->err)
1336 return;
1337
1338 /* Start the transfer */
1339 if (chan->has_sg && !chan->xdev->mcdma) {
1340 if (chan->cyclic)
1341 xilinx_write(chan, XILINX_DMA_REG_TAILDESC,
1342 chan->cyclic_seg_v->phys);
1343 else
1344 xilinx_write(chan, XILINX_DMA_REG_TAILDESC,
1345 tail_segment->phys);
1346 } else if (chan->has_sg && chan->xdev->mcdma) {
1347 if (chan->direction == DMA_MEM_TO_DEV) {
1348 dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC,
1349 tail_segment->phys);
1350 } else {
1351 if (!chan->tdest) {
1352 dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC,
1353 tail_segment->phys);
1354 } else {
1355 dma_ctrl_write(chan,
1356 XILINX_DMA_MCRX_TDESC(chan->tdest),
1357 tail_segment->phys);
1358 }
1359 }
1360 } else {
1361 struct xilinx_axidma_tx_segment *segment;
1362 struct xilinx_axidma_desc_hw *hw;
1363
1364 segment = list_first_entry(&head_desc->segments,
1365 struct xilinx_axidma_tx_segment,
1366 node);
1367 hw = &segment->hw;
1368
1369 xilinx_write(chan, XILINX_DMA_REG_SRCDSTADDR,
1370 xilinx_prep_dma_addr_t(hw->buf_addr));
1371
1372 /* Start the transfer */
1373 dma_ctrl_write(chan, XILINX_DMA_REG_BTT,
1374 hw->control & chan->xdev->max_buffer_len);
1375 }
1376
1377 list_splice_tail_init(&chan->pending_list, &chan->active_list);
1378 chan->desc_pendingcount = 0;
1379 chan->idle = false;
1380}
1381
1382/**
1383 * xilinx_dma_issue_pending - Issue pending transactions
1384 * @dchan: DMA channel
1385 */
1386static void xilinx_dma_issue_pending(struct dma_chan *dchan)
1387{
1388 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
1389 unsigned long flags;
1390
1391 spin_lock_irqsave(&chan->lock, flags);
1392 chan->start_transfer(chan);
1393 spin_unlock_irqrestore(&chan->lock, flags);
1394}
1395
1396/**
1397 * xilinx_dma_complete_descriptor - Mark the active descriptor as complete
1398 * @chan : xilinx DMA channel
1399 *
1400 * CONTEXT: hardirq
1401 */
1402static void xilinx_dma_complete_descriptor(struct xilinx_dma_chan *chan)
1403{
1404 struct xilinx_dma_tx_descriptor *desc, *next;
1405
1406 /* This function was invoked with lock held */
1407 if (list_empty(&chan->active_list))
1408 return;
1409
1410 list_for_each_entry_safe(desc, next, &chan->active_list, node) {
1411 list_del(&desc->node);
1412 if (!desc->cyclic)
1413 dma_cookie_complete(&desc->async_tx);
1414 list_add_tail(&desc->node, &chan->done_list);
1415 }
1416}
1417
1418/**
1419 * xilinx_dma_reset - Reset DMA channel
1420 * @chan: Driver specific DMA channel
1421 *
1422 * Return: '0' on success and failure value on error
1423 */
1424static int xilinx_dma_reset(struct xilinx_dma_chan *chan)
1425{
1426 int err;
1427 u32 tmp;
1428
1429 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RESET);
1430
1431 /* Wait for the hardware to finish reset */
1432 err = xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMACR, tmp,
1433 !(tmp & XILINX_DMA_DMACR_RESET), 0,
1434 XILINX_DMA_LOOP_COUNT);
1435
1436 if (err) {
1437 dev_err(chan->dev, "reset timeout, cr %x, sr %x\n",
1438 dma_ctrl_read(chan, XILINX_DMA_REG_DMACR),
1439 dma_ctrl_read(chan, XILINX_DMA_REG_DMASR));
1440 return -ETIMEDOUT;
1441 }
1442
1443 chan->err = false;
1444 chan->idle = true;
1445 chan->desc_pendingcount = 0;
1446 chan->desc_submitcount = 0;
1447
1448 return err;
1449}
1450
1451/**
1452 * xilinx_dma_chan_reset - Reset DMA channel and enable interrupts
1453 * @chan: Driver specific DMA channel
1454 *
1455 * Return: '0' on success and failure value on error
1456 */
1457static int xilinx_dma_chan_reset(struct xilinx_dma_chan *chan)
1458{
1459 int err;
1460
1461 /* Reset VDMA */
1462 err = xilinx_dma_reset(chan);
1463 if (err)
1464 return err;
1465
1466 /* Enable interrupts */
1467 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
1468 XILINX_DMA_DMAXR_ALL_IRQ_MASK);
1469
1470 return 0;
1471}
1472
1473/**
1474 * xilinx_dma_irq_handler - DMA Interrupt handler
1475 * @irq: IRQ number
1476 * @data: Pointer to the Xilinx DMA channel structure
1477 *
1478 * Return: IRQ_HANDLED/IRQ_NONE
1479 */
1480static irqreturn_t xilinx_dma_irq_handler(int irq, void *data)
1481{
1482 struct xilinx_dma_chan *chan = data;
1483 u32 status;
1484
1485 /* Read the status and ack the interrupts. */
1486 status = dma_ctrl_read(chan, XILINX_DMA_REG_DMASR);
1487 if (!(status & XILINX_DMA_DMAXR_ALL_IRQ_MASK))
1488 return IRQ_NONE;
1489
1490 dma_ctrl_write(chan, XILINX_DMA_REG_DMASR,
1491 status & XILINX_DMA_DMAXR_ALL_IRQ_MASK);
1492
1493 if (status & XILINX_DMA_DMASR_ERR_IRQ) {
1494 /*
1495 * An error occurred. If C_FLUSH_ON_FSYNC is enabled and the
1496 * error is recoverable, ignore it. Otherwise flag the error.
1497 *
1498 * Only recoverable errors can be cleared in the DMASR register,
1499 * make sure not to write to other error bits to 1.
1500 */
1501 u32 errors = status & XILINX_DMA_DMASR_ALL_ERR_MASK;
1502
1503 dma_ctrl_write(chan, XILINX_DMA_REG_DMASR,
1504 errors & XILINX_DMA_DMASR_ERR_RECOVER_MASK);
1505
1506 if (!chan->flush_on_fsync ||
1507 (errors & ~XILINX_DMA_DMASR_ERR_RECOVER_MASK)) {
1508 dev_err(chan->dev,
1509 "Channel %p has errors %x, cdr %x tdr %x\n",
1510 chan, errors,
1511 dma_ctrl_read(chan, XILINX_DMA_REG_CURDESC),
1512 dma_ctrl_read(chan, XILINX_DMA_REG_TAILDESC));
1513 chan->err = true;
1514 }
1515 }
1516
1517 if (status & XILINX_DMA_DMASR_DLY_CNT_IRQ) {
1518 /*
1519 * Device takes too long to do the transfer when user requires
1520 * responsiveness.
1521 */
1522 dev_dbg(chan->dev, "Inter-packet latency too long\n");
1523 }
1524
1525 if (status & XILINX_DMA_DMASR_FRM_CNT_IRQ) {
1526 spin_lock(&chan->lock);
1527 xilinx_dma_complete_descriptor(chan);
1528 chan->idle = true;
1529 chan->start_transfer(chan);
1530 spin_unlock(&chan->lock);
1531 }
1532
1533 tasklet_schedule(&chan->tasklet);
1534 return IRQ_HANDLED;
1535}
1536
1537/**
1538 * append_desc_queue - Queuing descriptor
1539 * @chan: Driver specific dma channel
1540 * @desc: dma transaction descriptor
1541 */
1542static void append_desc_queue(struct xilinx_dma_chan *chan,
1543 struct xilinx_dma_tx_descriptor *desc)
1544{
1545 struct xilinx_vdma_tx_segment *tail_segment;
1546 struct xilinx_dma_tx_descriptor *tail_desc;
1547 struct xilinx_axidma_tx_segment *axidma_tail_segment;
1548 struct xilinx_cdma_tx_segment *cdma_tail_segment;
1549
1550 if (list_empty(&chan->pending_list))
1551 goto append;
1552
1553 /*
1554 * Add the hardware descriptor to the chain of hardware descriptors
1555 * that already exists in memory.
1556 */
1557 tail_desc = list_last_entry(&chan->pending_list,
1558 struct xilinx_dma_tx_descriptor, node);
1559 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
1560 tail_segment = list_last_entry(&tail_desc->segments,
1561 struct xilinx_vdma_tx_segment,
1562 node);
1563 tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
1564 } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
1565 cdma_tail_segment = list_last_entry(&tail_desc->segments,
1566 struct xilinx_cdma_tx_segment,
1567 node);
1568 cdma_tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
1569 } else {
1570 axidma_tail_segment = list_last_entry(&tail_desc->segments,
1571 struct xilinx_axidma_tx_segment,
1572 node);
1573 axidma_tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
1574 }
1575
1576 /*
1577 * Add the software descriptor and all children to the list
1578 * of pending transactions
1579 */
1580append:
1581 list_add_tail(&desc->node, &chan->pending_list);
1582 chan->desc_pendingcount++;
1583
1584 if (chan->has_sg && (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA)
1585 && unlikely(chan->desc_pendingcount > chan->num_frms)) {
1586 dev_dbg(chan->dev, "desc pendingcount is too high\n");
1587 chan->desc_pendingcount = chan->num_frms;
1588 }
1589}
1590
1591/**
1592 * xilinx_dma_tx_submit - Submit DMA transaction
1593 * @tx: Async transaction descriptor
1594 *
1595 * Return: cookie value on success and failure value on error
1596 */
1597static dma_cookie_t xilinx_dma_tx_submit(struct dma_async_tx_descriptor *tx)
1598{
1599 struct xilinx_dma_tx_descriptor *desc = to_dma_tx_descriptor(tx);
1600 struct xilinx_dma_chan *chan = to_xilinx_chan(tx->chan);
1601 dma_cookie_t cookie;
1602 unsigned long flags;
1603 int err;
1604
1605 if (chan->cyclic) {
1606 xilinx_dma_free_tx_descriptor(chan, desc);
1607 return -EBUSY;
1608 }
1609
1610 if (chan->err) {
1611 /*
1612 * If reset fails, need to hard reset the system.
1613 * Channel is no longer functional
1614 */
1615 err = xilinx_dma_chan_reset(chan);
1616 if (err < 0)
1617 return err;
1618 }
1619
1620 spin_lock_irqsave(&chan->lock, flags);
1621
1622 cookie = dma_cookie_assign(tx);
1623
1624 /* Put this transaction onto the tail of the pending queue */
1625 append_desc_queue(chan, desc);
1626
1627 if (desc->cyclic)
1628 chan->cyclic = true;
1629
1630 chan->terminating = false;
1631
1632 spin_unlock_irqrestore(&chan->lock, flags);
1633
1634 return cookie;
1635}
1636
1637/**
1638 * xilinx_vdma_dma_prep_interleaved - prepare a descriptor for a
1639 * DMA_SLAVE transaction
1640 * @dchan: DMA channel
1641 * @xt: Interleaved template pointer
1642 * @flags: transfer ack flags
1643 *
1644 * Return: Async transaction descriptor on success and NULL on failure
1645 */
1646static struct dma_async_tx_descriptor *
1647xilinx_vdma_dma_prep_interleaved(struct dma_chan *dchan,
1648 struct dma_interleaved_template *xt,
1649 unsigned long flags)
1650{
1651 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
1652 struct xilinx_dma_tx_descriptor *desc;
1653 struct xilinx_vdma_tx_segment *segment;
1654 struct xilinx_vdma_desc_hw *hw;
1655
1656 if (!is_slave_direction(xt->dir))
1657 return NULL;
1658
1659 if (!xt->numf || !xt->sgl[0].size)
1660 return NULL;
1661
1662 if (xt->frame_size != 1)
1663 return NULL;
1664
1665 /* Allocate a transaction descriptor. */
1666 desc = xilinx_dma_alloc_tx_descriptor(chan);
1667 if (!desc)
1668 return NULL;
1669
1670 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
1671 desc->async_tx.tx_submit = xilinx_dma_tx_submit;
1672 async_tx_ack(&desc->async_tx);
1673
1674 /* Allocate the link descriptor from DMA pool */
1675 segment = xilinx_vdma_alloc_tx_segment(chan);
1676 if (!segment)
1677 goto error;
1678
1679 /* Fill in the hardware descriptor */
1680 hw = &segment->hw;
1681 hw->vsize = xt->numf;
1682 hw->hsize = xt->sgl[0].size;
1683 hw->stride = (xt->sgl[0].icg + xt->sgl[0].size) <<
1684 XILINX_DMA_FRMDLY_STRIDE_STRIDE_SHIFT;
1685 hw->stride |= chan->config.frm_dly <<
1686 XILINX_DMA_FRMDLY_STRIDE_FRMDLY_SHIFT;
1687
1688 if (xt->dir != DMA_MEM_TO_DEV) {
1689 if (chan->ext_addr) {
1690 hw->buf_addr = lower_32_bits(xt->dst_start);
1691 hw->buf_addr_msb = upper_32_bits(xt->dst_start);
1692 } else {
1693 hw->buf_addr = xt->dst_start;
1694 }
1695 } else {
1696 if (chan->ext_addr) {
1697 hw->buf_addr = lower_32_bits(xt->src_start);
1698 hw->buf_addr_msb = upper_32_bits(xt->src_start);
1699 } else {
1700 hw->buf_addr = xt->src_start;
1701 }
1702 }
1703
1704 /* Insert the segment into the descriptor segments list. */
1705 list_add_tail(&segment->node, &desc->segments);
1706
1707 /* Link the last hardware descriptor with the first. */
1708 segment = list_first_entry(&desc->segments,
1709 struct xilinx_vdma_tx_segment, node);
1710 desc->async_tx.phys = segment->phys;
1711
1712 return &desc->async_tx;
1713
1714error:
1715 xilinx_dma_free_tx_descriptor(chan, desc);
1716 return NULL;
1717}
1718
1719/**
1720 * xilinx_cdma_prep_memcpy - prepare descriptors for a memcpy transaction
1721 * @dchan: DMA channel
1722 * @dma_dst: destination address
1723 * @dma_src: source address
1724 * @len: transfer length
1725 * @flags: transfer ack flags
1726 *
1727 * Return: Async transaction descriptor on success and NULL on failure
1728 */
1729static struct dma_async_tx_descriptor *
1730xilinx_cdma_prep_memcpy(struct dma_chan *dchan, dma_addr_t dma_dst,
1731 dma_addr_t dma_src, size_t len, unsigned long flags)
1732{
1733 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
1734 struct xilinx_dma_tx_descriptor *desc;
1735 struct xilinx_cdma_tx_segment *segment;
1736 struct xilinx_cdma_desc_hw *hw;
1737
1738 if (!len || len > chan->xdev->max_buffer_len)
1739 return NULL;
1740
1741 desc = xilinx_dma_alloc_tx_descriptor(chan);
1742 if (!desc)
1743 return NULL;
1744
1745 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
1746 desc->async_tx.tx_submit = xilinx_dma_tx_submit;
1747
1748 /* Allocate the link descriptor from DMA pool */
1749 segment = xilinx_cdma_alloc_tx_segment(chan);
1750 if (!segment)
1751 goto error;
1752
1753 hw = &segment->hw;
1754 hw->control = len;
1755 hw->src_addr = dma_src;
1756 hw->dest_addr = dma_dst;
1757 if (chan->ext_addr) {
1758 hw->src_addr_msb = upper_32_bits(dma_src);
1759 hw->dest_addr_msb = upper_32_bits(dma_dst);
1760 }
1761
1762 /* Insert the segment into the descriptor segments list. */
1763 list_add_tail(&segment->node, &desc->segments);
1764
1765 desc->async_tx.phys = segment->phys;
1766 hw->next_desc = segment->phys;
1767
1768 return &desc->async_tx;
1769
1770error:
1771 xilinx_dma_free_tx_descriptor(chan, desc);
1772 return NULL;
1773}
1774
1775/**
1776 * xilinx_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
1777 * @dchan: DMA channel
1778 * @sgl: scatterlist to transfer to/from
1779 * @sg_len: number of entries in @scatterlist
1780 * @direction: DMA direction
1781 * @flags: transfer ack flags
1782 * @context: APP words of the descriptor
1783 *
1784 * Return: Async transaction descriptor on success and NULL on failure
1785 */
1786static struct dma_async_tx_descriptor *xilinx_dma_prep_slave_sg(
1787 struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len,
1788 enum dma_transfer_direction direction, unsigned long flags,
1789 void *context)
1790{
1791 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
1792 struct xilinx_dma_tx_descriptor *desc;
1793 struct xilinx_axidma_tx_segment *segment = NULL;
1794 u32 *app_w = (u32 *)context;
1795 struct scatterlist *sg;
1796 size_t copy;
1797 size_t sg_used;
1798 unsigned int i;
1799
1800 if (!is_slave_direction(direction))
1801 return NULL;
1802
1803 /* Allocate a transaction descriptor. */
1804 desc = xilinx_dma_alloc_tx_descriptor(chan);
1805 if (!desc)
1806 return NULL;
1807
1808 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
1809 desc->async_tx.tx_submit = xilinx_dma_tx_submit;
1810
1811 /* Build transactions using information in the scatter gather list */
1812 for_each_sg(sgl, sg, sg_len, i) {
1813 sg_used = 0;
1814
1815 /* Loop until the entire scatterlist entry is used */
1816 while (sg_used < sg_dma_len(sg)) {
1817 struct xilinx_axidma_desc_hw *hw;
1818
1819 /* Get a free segment */
1820 segment = xilinx_axidma_alloc_tx_segment(chan);
1821 if (!segment)
1822 goto error;
1823
1824 /*
1825 * Calculate the maximum number of bytes to transfer,
1826 * making sure it is less than the hw limit
1827 */
1828 copy = xilinx_dma_calc_copysize(chan, sg_dma_len(sg),
1829 sg_used);
1830 hw = &segment->hw;
1831
1832 /* Fill in the descriptor */
1833 xilinx_axidma_buf(chan, hw, sg_dma_address(sg),
1834 sg_used, 0);
1835
1836 hw->control = copy;
1837
1838 if (chan->direction == DMA_MEM_TO_DEV) {
1839 if (app_w)
1840 memcpy(hw->app, app_w, sizeof(u32) *
1841 XILINX_DMA_NUM_APP_WORDS);
1842 }
1843
1844 sg_used += copy;
1845
1846 /*
1847 * Insert the segment into the descriptor segments
1848 * list.
1849 */
1850 list_add_tail(&segment->node, &desc->segments);
1851 }
1852 }
1853
1854 segment = list_first_entry(&desc->segments,
1855 struct xilinx_axidma_tx_segment, node);
1856 desc->async_tx.phys = segment->phys;
1857
1858 /* For the last DMA_MEM_TO_DEV transfer, set EOP */
1859 if (chan->direction == DMA_MEM_TO_DEV) {
1860 segment->hw.control |= XILINX_DMA_BD_SOP;
1861 segment = list_last_entry(&desc->segments,
1862 struct xilinx_axidma_tx_segment,
1863 node);
1864 segment->hw.control |= XILINX_DMA_BD_EOP;
1865 }
1866
1867 return &desc->async_tx;
1868
1869error:
1870 xilinx_dma_free_tx_descriptor(chan, desc);
1871 return NULL;
1872}
1873
1874/**
1875 * xilinx_dma_prep_dma_cyclic - prepare descriptors for a DMA_SLAVE transaction
1876 * @dchan: DMA channel
1877 * @buf_addr: Physical address of the buffer
1878 * @buf_len: Total length of the cyclic buffers
1879 * @period_len: length of individual cyclic buffer
1880 * @direction: DMA direction
1881 * @flags: transfer ack flags
1882 *
1883 * Return: Async transaction descriptor on success and NULL on failure
1884 */
1885static struct dma_async_tx_descriptor *xilinx_dma_prep_dma_cyclic(
1886 struct dma_chan *dchan, dma_addr_t buf_addr, size_t buf_len,
1887 size_t period_len, enum dma_transfer_direction direction,
1888 unsigned long flags)
1889{
1890 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
1891 struct xilinx_dma_tx_descriptor *desc;
1892 struct xilinx_axidma_tx_segment *segment, *head_segment, *prev = NULL;
1893 size_t copy, sg_used;
1894 unsigned int num_periods;
1895 int i;
1896 u32 reg;
1897
1898 if (!period_len)
1899 return NULL;
1900
1901 num_periods = buf_len / period_len;
1902
1903 if (!num_periods)
1904 return NULL;
1905
1906 if (!is_slave_direction(direction))
1907 return NULL;
1908
1909 /* Allocate a transaction descriptor. */
1910 desc = xilinx_dma_alloc_tx_descriptor(chan);
1911 if (!desc)
1912 return NULL;
1913
1914 chan->direction = direction;
1915 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
1916 desc->async_tx.tx_submit = xilinx_dma_tx_submit;
1917
1918 for (i = 0; i < num_periods; ++i) {
1919 sg_used = 0;
1920
1921 while (sg_used < period_len) {
1922 struct xilinx_axidma_desc_hw *hw;
1923
1924 /* Get a free segment */
1925 segment = xilinx_axidma_alloc_tx_segment(chan);
1926 if (!segment)
1927 goto error;
1928
1929 /*
1930 * Calculate the maximum number of bytes to transfer,
1931 * making sure it is less than the hw limit
1932 */
1933 copy = xilinx_dma_calc_copysize(chan, period_len,
1934 sg_used);
1935 hw = &segment->hw;
1936 xilinx_axidma_buf(chan, hw, buf_addr, sg_used,
1937 period_len * i);
1938 hw->control = copy;
1939
1940 if (prev)
1941 prev->hw.next_desc = segment->phys;
1942
1943 prev = segment;
1944 sg_used += copy;
1945
1946 /*
1947 * Insert the segment into the descriptor segments
1948 * list.
1949 */
1950 list_add_tail(&segment->node, &desc->segments);
1951 }
1952 }
1953
1954 head_segment = list_first_entry(&desc->segments,
1955 struct xilinx_axidma_tx_segment, node);
1956 desc->async_tx.phys = head_segment->phys;
1957
1958 desc->cyclic = true;
1959 reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
1960 reg |= XILINX_DMA_CR_CYCLIC_BD_EN_MASK;
1961 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
1962
1963 segment = list_last_entry(&desc->segments,
1964 struct xilinx_axidma_tx_segment,
1965 node);
1966 segment->hw.next_desc = (u32) head_segment->phys;
1967
1968 /* For the last DMA_MEM_TO_DEV transfer, set EOP */
1969 if (direction == DMA_MEM_TO_DEV) {
1970 head_segment->hw.control |= XILINX_DMA_BD_SOP;
1971 segment->hw.control |= XILINX_DMA_BD_EOP;
1972 }
1973
1974 return &desc->async_tx;
1975
1976error:
1977 xilinx_dma_free_tx_descriptor(chan, desc);
1978 return NULL;
1979}
1980
1981/**
1982 * xilinx_dma_prep_interleaved - prepare a descriptor for a
1983 * DMA_SLAVE transaction
1984 * @dchan: DMA channel
1985 * @xt: Interleaved template pointer
1986 * @flags: transfer ack flags
1987 *
1988 * Return: Async transaction descriptor on success and NULL on failure
1989 */
1990static struct dma_async_tx_descriptor *
1991xilinx_dma_prep_interleaved(struct dma_chan *dchan,
1992 struct dma_interleaved_template *xt,
1993 unsigned long flags)
1994{
1995 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
1996 struct xilinx_dma_tx_descriptor *desc;
1997 struct xilinx_axidma_tx_segment *segment;
1998 struct xilinx_axidma_desc_hw *hw;
1999
2000 if (!is_slave_direction(xt->dir))
2001 return NULL;
2002
2003 if (!xt->numf || !xt->sgl[0].size)
2004 return NULL;
2005
2006 if (xt->frame_size != 1)
2007 return NULL;
2008
2009 /* Allocate a transaction descriptor. */
2010 desc = xilinx_dma_alloc_tx_descriptor(chan);
2011 if (!desc)
2012 return NULL;
2013
2014 chan->direction = xt->dir;
2015 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
2016 desc->async_tx.tx_submit = xilinx_dma_tx_submit;
2017
2018 /* Get a free segment */
2019 segment = xilinx_axidma_alloc_tx_segment(chan);
2020 if (!segment)
2021 goto error;
2022
2023 hw = &segment->hw;
2024
2025 /* Fill in the descriptor */
2026 if (xt->dir != DMA_MEM_TO_DEV)
2027 hw->buf_addr = xt->dst_start;
2028 else
2029 hw->buf_addr = xt->src_start;
2030
2031 hw->mcdma_control = chan->tdest & XILINX_DMA_BD_TDEST_MASK;
2032 hw->vsize_stride = (xt->numf << XILINX_DMA_BD_VSIZE_SHIFT) &
2033 XILINX_DMA_BD_VSIZE_MASK;
2034 hw->vsize_stride |= (xt->sgl[0].icg + xt->sgl[0].size) &
2035 XILINX_DMA_BD_STRIDE_MASK;
2036 hw->control = xt->sgl[0].size & XILINX_DMA_BD_HSIZE_MASK;
2037
2038 /*
2039 * Insert the segment into the descriptor segments
2040 * list.
2041 */
2042 list_add_tail(&segment->node, &desc->segments);
2043
2044
2045 segment = list_first_entry(&desc->segments,
2046 struct xilinx_axidma_tx_segment, node);
2047 desc->async_tx.phys = segment->phys;
2048
2049 /* For the last DMA_MEM_TO_DEV transfer, set EOP */
2050 if (xt->dir == DMA_MEM_TO_DEV) {
2051 segment->hw.control |= XILINX_DMA_BD_SOP;
2052 segment = list_last_entry(&desc->segments,
2053 struct xilinx_axidma_tx_segment,
2054 node);
2055 segment->hw.control |= XILINX_DMA_BD_EOP;
2056 }
2057
2058 return &desc->async_tx;
2059
2060error:
2061 xilinx_dma_free_tx_descriptor(chan, desc);
2062 return NULL;
2063}
2064
2065/**
2066 * xilinx_dma_terminate_all - Halt the channel and free descriptors
2067 * @dchan: Driver specific DMA Channel pointer
2068 *
2069 * Return: '0' always.
2070 */
2071static int xilinx_dma_terminate_all(struct dma_chan *dchan)
2072{
2073 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
2074 u32 reg;
2075 int err;
2076
2077 if (chan->cyclic)
2078 xilinx_dma_chan_reset(chan);
2079
2080 err = chan->stop_transfer(chan);
2081 if (err) {
2082 dev_err(chan->dev, "Cannot stop channel %p: %x\n",
2083 chan, dma_ctrl_read(chan, XILINX_DMA_REG_DMASR));
2084 chan->err = true;
2085 }
2086
2087 /* Remove and free all of the descriptors in the lists */
2088 chan->terminating = true;
2089 xilinx_dma_free_descriptors(chan);
2090 chan->idle = true;
2091
2092 if (chan->cyclic) {
2093 reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
2094 reg &= ~XILINX_DMA_CR_CYCLIC_BD_EN_MASK;
2095 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
2096 chan->cyclic = false;
2097 }
2098
2099 if ((chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) && chan->has_sg)
2100 dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR,
2101 XILINX_CDMA_CR_SGMODE);
2102
2103 return 0;
2104}
2105
2106/**
2107 * xilinx_dma_channel_set_config - Configure VDMA channel
2108 * Run-time configuration for Axi VDMA, supports:
2109 * . halt the channel
2110 * . configure interrupt coalescing and inter-packet delay threshold
2111 * . start/stop parking
2112 * . enable genlock
2113 *
2114 * @dchan: DMA channel
2115 * @cfg: VDMA device configuration pointer
2116 *
2117 * Return: '0' on success and failure value on error
2118 */
2119int xilinx_vdma_channel_set_config(struct dma_chan *dchan,
2120 struct xilinx_vdma_config *cfg)
2121{
2122 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
2123 u32 dmacr;
2124
2125 if (cfg->reset)
2126 return xilinx_dma_chan_reset(chan);
2127
2128 dmacr = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
2129
2130 chan->config.frm_dly = cfg->frm_dly;
2131 chan->config.park = cfg->park;
2132
2133 /* genlock settings */
2134 chan->config.gen_lock = cfg->gen_lock;
2135 chan->config.master = cfg->master;
2136
2137 dmacr &= ~XILINX_DMA_DMACR_GENLOCK_EN;
2138 if (cfg->gen_lock && chan->genlock) {
2139 dmacr |= XILINX_DMA_DMACR_GENLOCK_EN;
2140 dmacr &= ~XILINX_DMA_DMACR_MASTER_MASK;
2141 dmacr |= cfg->master << XILINX_DMA_DMACR_MASTER_SHIFT;
2142 }
2143
2144 chan->config.frm_cnt_en = cfg->frm_cnt_en;
2145 chan->config.vflip_en = cfg->vflip_en;
2146
2147 if (cfg->park)
2148 chan->config.park_frm = cfg->park_frm;
2149 else
2150 chan->config.park_frm = -1;
2151
2152 chan->config.coalesc = cfg->coalesc;
2153 chan->config.delay = cfg->delay;
2154
2155 if (cfg->coalesc <= XILINX_DMA_DMACR_FRAME_COUNT_MAX) {
2156 dmacr &= ~XILINX_DMA_DMACR_FRAME_COUNT_MASK;
2157 dmacr |= cfg->coalesc << XILINX_DMA_DMACR_FRAME_COUNT_SHIFT;
2158 chan->config.coalesc = cfg->coalesc;
2159 }
2160
2161 if (cfg->delay <= XILINX_DMA_DMACR_DELAY_MAX) {
2162 dmacr &= ~XILINX_DMA_DMACR_DELAY_MASK;
2163 dmacr |= cfg->delay << XILINX_DMA_DMACR_DELAY_SHIFT;
2164 chan->config.delay = cfg->delay;
2165 }
2166
2167 /* FSync Source selection */
2168 dmacr &= ~XILINX_DMA_DMACR_FSYNCSRC_MASK;
2169 dmacr |= cfg->ext_fsync << XILINX_DMA_DMACR_FSYNCSRC_SHIFT;
2170
2171 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, dmacr);
2172
2173 return 0;
2174}
2175EXPORT_SYMBOL(xilinx_vdma_channel_set_config);
2176
2177/* -----------------------------------------------------------------------------
2178 * Probe and remove
2179 */
2180
2181/**
2182 * xilinx_dma_chan_remove - Per Channel remove function
2183 * @chan: Driver specific DMA channel
2184 */
2185static void xilinx_dma_chan_remove(struct xilinx_dma_chan *chan)
2186{
2187 /* Disable all interrupts */
2188 dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR,
2189 XILINX_DMA_DMAXR_ALL_IRQ_MASK);
2190
2191 if (chan->irq > 0)
2192 free_irq(chan->irq, chan);
2193
2194 tasklet_kill(&chan->tasklet);
2195
2196 list_del(&chan->common.device_node);
2197}
2198
2199static int axidma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
2200 struct clk **tx_clk, struct clk **rx_clk,
2201 struct clk **sg_clk, struct clk **tmp_clk)
2202{
2203 int err;
2204
2205 *tmp_clk = NULL;
2206
2207 *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk");
2208 if (IS_ERR(*axi_clk)) {
2209 err = PTR_ERR(*axi_clk);
2210 dev_err(&pdev->dev, "failed to get axi_aclk (%d)\n", err);
2211 return err;
2212 }
2213
2214 *tx_clk = devm_clk_get(&pdev->dev, "m_axi_mm2s_aclk");
2215 if (IS_ERR(*tx_clk))
2216 *tx_clk = NULL;
2217
2218 *rx_clk = devm_clk_get(&pdev->dev, "m_axi_s2mm_aclk");
2219 if (IS_ERR(*rx_clk))
2220 *rx_clk = NULL;
2221
2222 *sg_clk = devm_clk_get(&pdev->dev, "m_axi_sg_aclk");
2223 if (IS_ERR(*sg_clk))
2224 *sg_clk = NULL;
2225
2226 err = clk_prepare_enable(*axi_clk);
2227 if (err) {
2228 dev_err(&pdev->dev, "failed to enable axi_clk (%d)\n", err);
2229 return err;
2230 }
2231
2232 err = clk_prepare_enable(*tx_clk);
2233 if (err) {
2234 dev_err(&pdev->dev, "failed to enable tx_clk (%d)\n", err);
2235 goto err_disable_axiclk;
2236 }
2237
2238 err = clk_prepare_enable(*rx_clk);
2239 if (err) {
2240 dev_err(&pdev->dev, "failed to enable rx_clk (%d)\n", err);
2241 goto err_disable_txclk;
2242 }
2243
2244 err = clk_prepare_enable(*sg_clk);
2245 if (err) {
2246 dev_err(&pdev->dev, "failed to enable sg_clk (%d)\n", err);
2247 goto err_disable_rxclk;
2248 }
2249
2250 return 0;
2251
2252err_disable_rxclk:
2253 clk_disable_unprepare(*rx_clk);
2254err_disable_txclk:
2255 clk_disable_unprepare(*tx_clk);
2256err_disable_axiclk:
2257 clk_disable_unprepare(*axi_clk);
2258
2259 return err;
2260}
2261
2262static int axicdma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
2263 struct clk **dev_clk, struct clk **tmp_clk,
2264 struct clk **tmp1_clk, struct clk **tmp2_clk)
2265{
2266 int err;
2267
2268 *tmp_clk = NULL;
2269 *tmp1_clk = NULL;
2270 *tmp2_clk = NULL;
2271
2272 *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk");
2273 if (IS_ERR(*axi_clk)) {
2274 err = PTR_ERR(*axi_clk);
2275 dev_err(&pdev->dev, "failed to get axi_clk (%d)\n", err);
2276 return err;
2277 }
2278
2279 *dev_clk = devm_clk_get(&pdev->dev, "m_axi_aclk");
2280 if (IS_ERR(*dev_clk)) {
2281 err = PTR_ERR(*dev_clk);
2282 dev_err(&pdev->dev, "failed to get dev_clk (%d)\n", err);
2283 return err;
2284 }
2285
2286 err = clk_prepare_enable(*axi_clk);
2287 if (err) {
2288 dev_err(&pdev->dev, "failed to enable axi_clk (%d)\n", err);
2289 return err;
2290 }
2291
2292 err = clk_prepare_enable(*dev_clk);
2293 if (err) {
2294 dev_err(&pdev->dev, "failed to enable dev_clk (%d)\n", err);
2295 goto err_disable_axiclk;
2296 }
2297
2298 return 0;
2299
2300err_disable_axiclk:
2301 clk_disable_unprepare(*axi_clk);
2302
2303 return err;
2304}
2305
2306static int axivdma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
2307 struct clk **tx_clk, struct clk **txs_clk,
2308 struct clk **rx_clk, struct clk **rxs_clk)
2309{
2310 int err;
2311
2312 *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk");
2313 if (IS_ERR(*axi_clk)) {
2314 err = PTR_ERR(*axi_clk);
2315 dev_err(&pdev->dev, "failed to get axi_aclk (%d)\n", err);
2316 return err;
2317 }
2318
2319 *tx_clk = devm_clk_get(&pdev->dev, "m_axi_mm2s_aclk");
2320 if (IS_ERR(*tx_clk))
2321 *tx_clk = NULL;
2322
2323 *txs_clk = devm_clk_get(&pdev->dev, "m_axis_mm2s_aclk");
2324 if (IS_ERR(*txs_clk))
2325 *txs_clk = NULL;
2326
2327 *rx_clk = devm_clk_get(&pdev->dev, "m_axi_s2mm_aclk");
2328 if (IS_ERR(*rx_clk))
2329 *rx_clk = NULL;
2330
2331 *rxs_clk = devm_clk_get(&pdev->dev, "s_axis_s2mm_aclk");
2332 if (IS_ERR(*rxs_clk))
2333 *rxs_clk = NULL;
2334
2335 err = clk_prepare_enable(*axi_clk);
2336 if (err) {
2337 dev_err(&pdev->dev, "failed to enable axi_clk (%d)\n", err);
2338 return err;
2339 }
2340
2341 err = clk_prepare_enable(*tx_clk);
2342 if (err) {
2343 dev_err(&pdev->dev, "failed to enable tx_clk (%d)\n", err);
2344 goto err_disable_axiclk;
2345 }
2346
2347 err = clk_prepare_enable(*txs_clk);
2348 if (err) {
2349 dev_err(&pdev->dev, "failed to enable txs_clk (%d)\n", err);
2350 goto err_disable_txclk;
2351 }
2352
2353 err = clk_prepare_enable(*rx_clk);
2354 if (err) {
2355 dev_err(&pdev->dev, "failed to enable rx_clk (%d)\n", err);
2356 goto err_disable_txsclk;
2357 }
2358
2359 err = clk_prepare_enable(*rxs_clk);
2360 if (err) {
2361 dev_err(&pdev->dev, "failed to enable rxs_clk (%d)\n", err);
2362 goto err_disable_rxclk;
2363 }
2364
2365 return 0;
2366
2367err_disable_rxclk:
2368 clk_disable_unprepare(*rx_clk);
2369err_disable_txsclk:
2370 clk_disable_unprepare(*txs_clk);
2371err_disable_txclk:
2372 clk_disable_unprepare(*tx_clk);
2373err_disable_axiclk:
2374 clk_disable_unprepare(*axi_clk);
2375
2376 return err;
2377}
2378
2379static void xdma_disable_allclks(struct xilinx_dma_device *xdev)
2380{
2381 clk_disable_unprepare(xdev->rxs_clk);
2382 clk_disable_unprepare(xdev->rx_clk);
2383 clk_disable_unprepare(xdev->txs_clk);
2384 clk_disable_unprepare(xdev->tx_clk);
2385 clk_disable_unprepare(xdev->axi_clk);
2386}
2387
2388/**
2389 * xilinx_dma_chan_probe - Per Channel Probing
2390 * It get channel features from the device tree entry and
2391 * initialize special channel handling routines
2392 *
2393 * @xdev: Driver specific device structure
2394 * @node: Device node
2395 * @chan_id: DMA Channel id
2396 *
2397 * Return: '0' on success and failure value on error
2398 */
2399static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
2400 struct device_node *node, int chan_id)
2401{
2402 struct xilinx_dma_chan *chan;
2403 bool has_dre = false;
2404 u32 value, width;
2405 int err;
2406
2407 /* Allocate and initialize the channel structure */
2408 chan = devm_kzalloc(xdev->dev, sizeof(*chan), GFP_KERNEL);
2409 if (!chan)
2410 return -ENOMEM;
2411
2412 chan->dev = xdev->dev;
2413 chan->xdev = xdev;
2414 chan->desc_pendingcount = 0x0;
2415 chan->ext_addr = xdev->ext_addr;
2416 /* This variable ensures that descriptors are not
2417 * Submitted when dma engine is in progress. This variable is
2418 * Added to avoid polling for a bit in the status register to
2419 * Know dma state in the driver hot path.
2420 */
2421 chan->idle = true;
2422
2423 spin_lock_init(&chan->lock);
2424 INIT_LIST_HEAD(&chan->pending_list);
2425 INIT_LIST_HEAD(&chan->done_list);
2426 INIT_LIST_HEAD(&chan->active_list);
2427 INIT_LIST_HEAD(&chan->free_seg_list);
2428
2429 /* Retrieve the channel properties from the device tree */
2430 has_dre = of_property_read_bool(node, "xlnx,include-dre");
2431
2432 chan->genlock = of_property_read_bool(node, "xlnx,genlock-mode");
2433
2434 err = of_property_read_u32(node, "xlnx,datawidth", &value);
2435 if (err) {
2436 dev_err(xdev->dev, "missing xlnx,datawidth property\n");
2437 return err;
2438 }
2439 width = value >> 3; /* Convert bits to bytes */
2440
2441 /* If data width is greater than 8 bytes, DRE is not in hw */
2442 if (width > 8)
2443 has_dre = false;
2444
2445 if (!has_dre)
2446 xdev->common.copy_align = (enum dmaengine_alignment)fls(width - 1);
2447
2448 if (of_device_is_compatible(node, "xlnx,axi-vdma-mm2s-channel") ||
2449 of_device_is_compatible(node, "xlnx,axi-dma-mm2s-channel") ||
2450 of_device_is_compatible(node, "xlnx,axi-cdma-channel")) {
2451 chan->direction = DMA_MEM_TO_DEV;
2452 chan->id = chan_id;
2453 chan->tdest = chan_id;
2454
2455 chan->ctrl_offset = XILINX_DMA_MM2S_CTRL_OFFSET;
2456 if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
2457 chan->desc_offset = XILINX_VDMA_MM2S_DESC_OFFSET;
2458 chan->config.park = 1;
2459
2460 if (xdev->flush_on_fsync == XILINX_DMA_FLUSH_BOTH ||
2461 xdev->flush_on_fsync == XILINX_DMA_FLUSH_MM2S)
2462 chan->flush_on_fsync = true;
2463 }
2464 } else if (of_device_is_compatible(node,
2465 "xlnx,axi-vdma-s2mm-channel") ||
2466 of_device_is_compatible(node,
2467 "xlnx,axi-dma-s2mm-channel")) {
2468 chan->direction = DMA_DEV_TO_MEM;
2469 chan->id = chan_id;
2470 chan->tdest = chan_id - xdev->nr_channels;
2471 chan->has_vflip = of_property_read_bool(node,
2472 "xlnx,enable-vert-flip");
2473 if (chan->has_vflip) {
2474 chan->config.vflip_en = dma_read(chan,
2475 XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP) &
2476 XILINX_VDMA_ENABLE_VERTICAL_FLIP;
2477 }
2478
2479 chan->ctrl_offset = XILINX_DMA_S2MM_CTRL_OFFSET;
2480 if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
2481 chan->desc_offset = XILINX_VDMA_S2MM_DESC_OFFSET;
2482 chan->config.park = 1;
2483
2484 if (xdev->flush_on_fsync == XILINX_DMA_FLUSH_BOTH ||
2485 xdev->flush_on_fsync == XILINX_DMA_FLUSH_S2MM)
2486 chan->flush_on_fsync = true;
2487 }
2488 } else {
2489 dev_err(xdev->dev, "Invalid channel compatible node\n");
2490 return -EINVAL;
2491 }
2492
2493 /* Request the interrupt */
2494 chan->irq = irq_of_parse_and_map(node, 0);
2495 err = request_irq(chan->irq, xilinx_dma_irq_handler, IRQF_SHARED,
2496 "xilinx-dma-controller", chan);
2497 if (err) {
2498 dev_err(xdev->dev, "unable to request IRQ %d\n", chan->irq);
2499 return err;
2500 }
2501
2502 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
2503 chan->start_transfer = xilinx_dma_start_transfer;
2504 chan->stop_transfer = xilinx_dma_stop_transfer;
2505 } else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
2506 chan->start_transfer = xilinx_cdma_start_transfer;
2507 chan->stop_transfer = xilinx_cdma_stop_transfer;
2508 } else {
2509 chan->start_transfer = xilinx_vdma_start_transfer;
2510 chan->stop_transfer = xilinx_dma_stop_transfer;
2511 }
2512
2513 /* check if SG is enabled (only for AXIDMA and CDMA) */
2514 if (xdev->dma_config->dmatype != XDMA_TYPE_VDMA) {
2515 if (dma_ctrl_read(chan, XILINX_DMA_REG_DMASR) &
2516 XILINX_DMA_DMASR_SG_MASK)
2517 chan->has_sg = true;
2518 dev_dbg(chan->dev, "ch %d: SG %s\n", chan->id,
2519 chan->has_sg ? "enabled" : "disabled");
2520 }
2521
2522 /* Initialize the tasklet */
2523 tasklet_init(&chan->tasklet, xilinx_dma_do_tasklet,
2524 (unsigned long)chan);
2525
2526 /*
2527 * Initialize the DMA channel and add it to the DMA engine channels
2528 * list.
2529 */
2530 chan->common.device = &xdev->common;
2531
2532 list_add_tail(&chan->common.device_node, &xdev->common.channels);
2533 xdev->chan[chan->id] = chan;
2534
2535 /* Reset the channel */
2536 err = xilinx_dma_chan_reset(chan);
2537 if (err < 0) {
2538 dev_err(xdev->dev, "Reset channel failed\n");
2539 return err;
2540 }
2541
2542 return 0;
2543}
2544
2545/**
2546 * xilinx_dma_child_probe - Per child node probe
2547 * It get number of dma-channels per child node from
2548 * device-tree and initializes all the channels.
2549 *
2550 * @xdev: Driver specific device structure
2551 * @node: Device node
2552 *
2553 * Return: 0 always.
2554 */
2555static int xilinx_dma_child_probe(struct xilinx_dma_device *xdev,
2556 struct device_node *node)
2557{
2558 int ret, i;
2559 u32 nr_channels = 1;
2560
2561 ret = of_property_read_u32(node, "dma-channels", &nr_channels);
2562 if ((ret < 0) && xdev->mcdma)
2563 dev_warn(xdev->dev, "missing dma-channels property\n");
2564
2565 for (i = 0; i < nr_channels; i++)
2566 xilinx_dma_chan_probe(xdev, node, xdev->chan_id++);
2567
2568 xdev->nr_channels += nr_channels;
2569
2570 return 0;
2571}
2572
2573/**
2574 * of_dma_xilinx_xlate - Translation function
2575 * @dma_spec: Pointer to DMA specifier as found in the device tree
2576 * @ofdma: Pointer to DMA controller data
2577 *
2578 * Return: DMA channel pointer on success and NULL on error
2579 */
2580static struct dma_chan *of_dma_xilinx_xlate(struct of_phandle_args *dma_spec,
2581 struct of_dma *ofdma)
2582{
2583 struct xilinx_dma_device *xdev = ofdma->of_dma_data;
2584 int chan_id = dma_spec->args[0];
2585
2586 if (chan_id >= xdev->nr_channels || !xdev->chan[chan_id])
2587 return NULL;
2588
2589 return dma_get_slave_channel(&xdev->chan[chan_id]->common);
2590}
2591
2592static const struct xilinx_dma_config axidma_config = {
2593 .dmatype = XDMA_TYPE_AXIDMA,
2594 .clk_init = axidma_clk_init,
2595};
2596
2597static const struct xilinx_dma_config axicdma_config = {
2598 .dmatype = XDMA_TYPE_CDMA,
2599 .clk_init = axicdma_clk_init,
2600};
2601
2602static const struct xilinx_dma_config axivdma_config = {
2603 .dmatype = XDMA_TYPE_VDMA,
2604 .clk_init = axivdma_clk_init,
2605};
2606
2607static const struct of_device_id xilinx_dma_of_ids[] = {
2608 { .compatible = "xlnx,axi-dma-1.00.a", .data = &axidma_config },
2609 { .compatible = "xlnx,axi-cdma-1.00.a", .data = &axicdma_config },
2610 { .compatible = "xlnx,axi-vdma-1.00.a", .data = &axivdma_config },
2611 {}
2612};
2613MODULE_DEVICE_TABLE(of, xilinx_dma_of_ids);
2614
2615/**
2616 * xilinx_dma_probe - Driver probe function
2617 * @pdev: Pointer to the platform_device structure
2618 *
2619 * Return: '0' on success and failure value on error
2620 */
2621static int xilinx_dma_probe(struct platform_device *pdev)
2622{
2623 int (*clk_init)(struct platform_device *, struct clk **, struct clk **,
2624 struct clk **, struct clk **, struct clk **)
2625 = axivdma_clk_init;
2626 struct device_node *node = pdev->dev.of_node;
2627 struct xilinx_dma_device *xdev;
2628 struct device_node *child, *np = pdev->dev.of_node;
2629 u32 num_frames, addr_width, len_width;
2630 int i, err;
2631
2632 /* Allocate and initialize the DMA engine structure */
2633 xdev = devm_kzalloc(&pdev->dev, sizeof(*xdev), GFP_KERNEL);
2634 if (!xdev)
2635 return -ENOMEM;
2636
2637 xdev->dev = &pdev->dev;
2638 if (np) {
2639 const struct of_device_id *match;
2640
2641 match = of_match_node(xilinx_dma_of_ids, np);
2642 if (match && match->data) {
2643 xdev->dma_config = match->data;
2644 clk_init = xdev->dma_config->clk_init;
2645 }
2646 }
2647
2648 err = clk_init(pdev, &xdev->axi_clk, &xdev->tx_clk, &xdev->txs_clk,
2649 &xdev->rx_clk, &xdev->rxs_clk);
2650 if (err)
2651 return err;
2652
2653 /* Request and map I/O memory */
2654 xdev->regs = devm_platform_ioremap_resource(pdev, 0);
2655 if (IS_ERR(xdev->regs)) {
2656 err = PTR_ERR(xdev->regs);
2657 goto disable_clks;
2658 }
2659 /* Retrieve the DMA engine properties from the device tree */
2660 xdev->max_buffer_len = GENMASK(XILINX_DMA_MAX_TRANS_LEN_MAX - 1, 0);
2661
2662 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
2663 xdev->mcdma = of_property_read_bool(node, "xlnx,mcdma");
2664 if (!of_property_read_u32(node, "xlnx,sg-length-width",
2665 &len_width)) {
2666 if (len_width < XILINX_DMA_MAX_TRANS_LEN_MIN ||
2667 len_width > XILINX_DMA_V2_MAX_TRANS_LEN_MAX) {
2668 dev_warn(xdev->dev,
2669 "invalid xlnx,sg-length-width property value. Using default width\n");
2670 } else {
2671 if (len_width > XILINX_DMA_MAX_TRANS_LEN_MAX)
2672 dev_warn(xdev->dev, "Please ensure that IP supports buffer length > 23 bits\n");
2673 xdev->max_buffer_len =
2674 GENMASK(len_width - 1, 0);
2675 }
2676 }
2677 }
2678
2679 if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
2680 err = of_property_read_u32(node, "xlnx,num-fstores",
2681 &num_frames);
2682 if (err < 0) {
2683 dev_err(xdev->dev,
2684 "missing xlnx,num-fstores property\n");
2685 goto disable_clks;
2686 }
2687
2688 err = of_property_read_u32(node, "xlnx,flush-fsync",
2689 &xdev->flush_on_fsync);
2690 if (err < 0)
2691 dev_warn(xdev->dev,
2692 "missing xlnx,flush-fsync property\n");
2693 }
2694
2695 err = of_property_read_u32(node, "xlnx,addrwidth", &addr_width);
2696 if (err < 0)
2697 dev_warn(xdev->dev, "missing xlnx,addrwidth property\n");
2698
2699 if (addr_width > 32)
2700 xdev->ext_addr = true;
2701 else
2702 xdev->ext_addr = false;
2703
2704 /* Set the dma mask bits */
2705 err = dma_set_mask_and_coherent(xdev->dev, DMA_BIT_MASK(addr_width));
2706 if (err < 0) {
2707 dev_err(xdev->dev, "DMA mask error %d\n", err);
2708 goto disable_clks;
2709 }
2710
2711 /* Initialize the DMA engine */
2712 xdev->common.dev = &pdev->dev;
2713
2714 INIT_LIST_HEAD(&xdev->common.channels);
2715 if (!(xdev->dma_config->dmatype == XDMA_TYPE_CDMA)) {
2716 dma_cap_set(DMA_SLAVE, xdev->common.cap_mask);
2717 dma_cap_set(DMA_PRIVATE, xdev->common.cap_mask);
2718 }
2719
2720 xdev->common.device_alloc_chan_resources =
2721 xilinx_dma_alloc_chan_resources;
2722 xdev->common.device_free_chan_resources =
2723 xilinx_dma_free_chan_resources;
2724 xdev->common.device_terminate_all = xilinx_dma_terminate_all;
2725 xdev->common.device_tx_status = xilinx_dma_tx_status;
2726 xdev->common.device_issue_pending = xilinx_dma_issue_pending;
2727 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
2728 dma_cap_set(DMA_CYCLIC, xdev->common.cap_mask);
2729 xdev->common.device_prep_slave_sg = xilinx_dma_prep_slave_sg;
2730 xdev->common.device_prep_dma_cyclic =
2731 xilinx_dma_prep_dma_cyclic;
2732 xdev->common.device_prep_interleaved_dma =
2733 xilinx_dma_prep_interleaved;
2734 /* Residue calculation is supported by only AXI DMA */
2735 xdev->common.residue_granularity =
2736 DMA_RESIDUE_GRANULARITY_SEGMENT;
2737 } else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
2738 dma_cap_set(DMA_MEMCPY, xdev->common.cap_mask);
2739 xdev->common.device_prep_dma_memcpy = xilinx_cdma_prep_memcpy;
2740 } else {
2741 xdev->common.device_prep_interleaved_dma =
2742 xilinx_vdma_dma_prep_interleaved;
2743 }
2744
2745 platform_set_drvdata(pdev, xdev);
2746
2747 /* Initialize the channels */
2748 for_each_child_of_node(node, child) {
2749 err = xilinx_dma_child_probe(xdev, child);
2750 if (err < 0) {
2751 of_node_put(child);
2752 goto error;
2753 }
2754 }
2755
2756 if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
2757 for (i = 0; i < xdev->nr_channels; i++)
2758 if (xdev->chan[i])
2759 xdev->chan[i]->num_frms = num_frames;
2760 }
2761
2762 /* Register the DMA engine with the core */
2763 err = dma_async_device_register(&xdev->common);
2764 if (err) {
2765 dev_err(xdev->dev, "failed to register the dma device\n");
2766 goto error;
2767 }
2768
2769 err = of_dma_controller_register(node, of_dma_xilinx_xlate,
2770 xdev);
2771 if (err < 0) {
2772 dev_err(&pdev->dev, "Unable to register DMA to DT\n");
2773 dma_async_device_unregister(&xdev->common);
2774 goto error;
2775 }
2776
2777 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA)
2778 dev_info(&pdev->dev, "Xilinx AXI DMA Engine Driver Probed!!\n");
2779 else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA)
2780 dev_info(&pdev->dev, "Xilinx AXI CDMA Engine Driver Probed!!\n");
2781 else
2782 dev_info(&pdev->dev, "Xilinx AXI VDMA Engine Driver Probed!!\n");
2783
2784 return 0;
2785
2786error:
2787 for (i = 0; i < xdev->nr_channels; i++)
2788 if (xdev->chan[i])
2789 xilinx_dma_chan_remove(xdev->chan[i]);
2790disable_clks:
2791 xdma_disable_allclks(xdev);
2792
2793 return err;
2794}
2795
2796/**
2797 * xilinx_dma_remove - Driver remove function
2798 * @pdev: Pointer to the platform_device structure
2799 *
2800 * Return: Always '0'
2801 */
2802static int xilinx_dma_remove(struct platform_device *pdev)
2803{
2804 struct xilinx_dma_device *xdev = platform_get_drvdata(pdev);
2805 int i;
2806
2807 of_dma_controller_free(pdev->dev.of_node);
2808
2809 dma_async_device_unregister(&xdev->common);
2810
2811 for (i = 0; i < xdev->nr_channels; i++)
2812 if (xdev->chan[i])
2813 xilinx_dma_chan_remove(xdev->chan[i]);
2814
2815 xdma_disable_allclks(xdev);
2816
2817 return 0;
2818}
2819
2820static struct platform_driver xilinx_vdma_driver = {
2821 .driver = {
2822 .name = "xilinx-vdma",
2823 .of_match_table = xilinx_dma_of_ids,
2824 },
2825 .probe = xilinx_dma_probe,
2826 .remove = xilinx_dma_remove,
2827};
2828
2829module_platform_driver(xilinx_vdma_driver);
2830
2831MODULE_AUTHOR("Xilinx, Inc.");
2832MODULE_DESCRIPTION("Xilinx VDMA driver");
2833MODULE_LICENSE("GPL v2");