| xj | b04a402 | 2021-11-25 15:01:52 +0800 | [diff] [blame] | 1 | /* | 
|  | 2 | * Copyright (C) 2013-2014 Allwinner Tech Co., Ltd | 
|  | 3 | * Author: Sugar <shuge@allwinnertech.com> | 
|  | 4 | * | 
|  | 5 | * Copyright (C) 2014 Maxime Ripard | 
|  | 6 | * Maxime Ripard <maxime.ripard@free-electrons.com> | 
|  | 7 | * | 
|  | 8 | * This program is free software; you can redistribute it and/or modify | 
|  | 9 | * it under the terms of the GNU General Public License as published by | 
|  | 10 | * the Free Software Foundation; either version 2 of the License, or | 
|  | 11 | * (at your option) any later version. | 
|  | 12 | */ | 
|  | 13 |  | 
|  | 14 | #include <linux/clk.h> | 
|  | 15 | #include <linux/delay.h> | 
|  | 16 | #include <linux/dmaengine.h> | 
|  | 17 | #include <linux/dmapool.h> | 
|  | 18 | #include <linux/interrupt.h> | 
|  | 19 | #include <linux/module.h> | 
|  | 20 | #include <linux/of_dma.h> | 
|  | 21 | #include <linux/of_device.h> | 
|  | 22 | #include <linux/platform_device.h> | 
|  | 23 | #include <linux/reset.h> | 
|  | 24 | #include <linux/slab.h> | 
|  | 25 | #include <linux/types.h> | 
|  | 26 |  | 
|  | 27 | #include "virt-dma.h" | 
|  | 28 |  | 
|  | 29 | /* | 
|  | 30 | * Common registers | 
|  | 31 | */ | 
|  | 32 | #define DMA_IRQ_EN(x)		((x) * 0x04) | 
|  | 33 | #define DMA_IRQ_HALF			BIT(0) | 
|  | 34 | #define DMA_IRQ_PKG			BIT(1) | 
|  | 35 | #define DMA_IRQ_QUEUE			BIT(2) | 
|  | 36 |  | 
|  | 37 | #define DMA_IRQ_CHAN_NR			8 | 
|  | 38 | #define DMA_IRQ_CHAN_WIDTH		4 | 
|  | 39 |  | 
|  | 40 |  | 
|  | 41 | #define DMA_IRQ_STAT(x)		((x) * 0x04 + 0x10) | 
|  | 42 |  | 
|  | 43 | #define DMA_STAT		0x30 | 
|  | 44 |  | 
|  | 45 | /* Offset between DMA_IRQ_EN and DMA_IRQ_STAT limits number of channels */ | 
|  | 46 | #define DMA_MAX_CHANNELS	(DMA_IRQ_CHAN_NR * 0x10 / 4) | 
|  | 47 |  | 
|  | 48 | /* | 
|  | 49 | * sun8i specific registers | 
|  | 50 | */ | 
|  | 51 | #define SUN8I_DMA_GATE		0x20 | 
|  | 52 | #define SUN8I_DMA_GATE_ENABLE	0x4 | 
|  | 53 |  | 
|  | 54 | #define SUNXI_H3_SECURE_REG		0x20 | 
|  | 55 | #define SUNXI_H3_DMA_GATE		0x28 | 
|  | 56 | #define SUNXI_H3_DMA_GATE_ENABLE	0x4 | 
|  | 57 | /* | 
|  | 58 | * Channels specific registers | 
|  | 59 | */ | 
|  | 60 | #define DMA_CHAN_ENABLE		0x00 | 
|  | 61 | #define DMA_CHAN_ENABLE_START		BIT(0) | 
|  | 62 | #define DMA_CHAN_ENABLE_STOP		0 | 
|  | 63 |  | 
|  | 64 | #define DMA_CHAN_PAUSE		0x04 | 
|  | 65 | #define DMA_CHAN_PAUSE_PAUSE		BIT(1) | 
|  | 66 | #define DMA_CHAN_PAUSE_RESUME		0 | 
|  | 67 |  | 
|  | 68 | #define DMA_CHAN_LLI_ADDR	0x08 | 
|  | 69 |  | 
|  | 70 | #define DMA_CHAN_CUR_CFG	0x0c | 
|  | 71 | #define DMA_CHAN_MAX_DRQ		0x1f | 
|  | 72 | #define DMA_CHAN_CFG_SRC_DRQ(x)		((x) & DMA_CHAN_MAX_DRQ) | 
|  | 73 | #define DMA_CHAN_CFG_SRC_IO_MODE	BIT(5) | 
|  | 74 | #define DMA_CHAN_CFG_SRC_LINEAR_MODE	(0 << 5) | 
|  | 75 | #define DMA_CHAN_CFG_SRC_BURST_A31(x)	(((x) & 0x3) << 7) | 
|  | 76 | #define DMA_CHAN_CFG_SRC_BURST_H3(x)	(((x) & 0x3) << 6) | 
|  | 77 | #define DMA_CHAN_CFG_SRC_WIDTH(x)	(((x) & 0x3) << 9) | 
|  | 78 |  | 
|  | 79 | #define DMA_CHAN_CFG_DST_DRQ(x)		(DMA_CHAN_CFG_SRC_DRQ(x) << 16) | 
|  | 80 | #define DMA_CHAN_CFG_DST_IO_MODE	(DMA_CHAN_CFG_SRC_IO_MODE << 16) | 
|  | 81 | #define DMA_CHAN_CFG_DST_LINEAR_MODE	(DMA_CHAN_CFG_SRC_LINEAR_MODE << 16) | 
|  | 82 | #define DMA_CHAN_CFG_DST_BURST_A31(x)	(DMA_CHAN_CFG_SRC_BURST_A31(x) << 16) | 
|  | 83 | #define DMA_CHAN_CFG_DST_BURST_H3(x)	(DMA_CHAN_CFG_SRC_BURST_H3(x) << 16) | 
|  | 84 | #define DMA_CHAN_CFG_DST_WIDTH(x)	(DMA_CHAN_CFG_SRC_WIDTH(x) << 16) | 
|  | 85 |  | 
|  | 86 | #define DMA_CHAN_CUR_SRC	0x10 | 
|  | 87 |  | 
|  | 88 | #define DMA_CHAN_CUR_DST	0x14 | 
|  | 89 |  | 
|  | 90 | #define DMA_CHAN_CUR_CNT	0x18 | 
|  | 91 |  | 
|  | 92 | #define DMA_CHAN_CUR_PARA	0x1c | 
|  | 93 |  | 
|  | 94 |  | 
|  | 95 | /* | 
|  | 96 | * Various hardware related defines | 
|  | 97 | */ | 
|  | 98 | #define LLI_LAST_ITEM	0xfffff800 | 
|  | 99 | #define NORMAL_WAIT	8 | 
|  | 100 | #define DRQ_SDRAM	1 | 
|  | 101 |  | 
|  | 102 | /* forward declaration */ | 
|  | 103 | struct sun6i_dma_dev; | 
|  | 104 |  | 
|  | 105 | /* | 
|  | 106 | * Hardware channels / ports representation | 
|  | 107 | * | 
|  | 108 | * The hardware is used in several SoCs, with differing numbers | 
|  | 109 | * of channels and endpoints. This structure ties those numbers | 
|  | 110 | * to a certain compatible string. | 
|  | 111 | */ | 
|  | 112 | struct sun6i_dma_config { | 
|  | 113 | u32 nr_max_channels; | 
|  | 114 | u32 nr_max_requests; | 
|  | 115 | u32 nr_max_vchans; | 
|  | 116 | /* | 
|  | 117 | * In the datasheets/user manuals of newer Allwinner SoCs, a special | 
|  | 118 | * bit (bit 2 at register 0x20) is present. | 
|  | 119 | * It's named "DMA MCLK interface circuit auto gating bit" in the | 
|  | 120 | * documents, and the footnote of this register says that this bit | 
|  | 121 | * should be set up when initializing the DMA controller. | 
|  | 122 | * Allwinner A23/A33 user manuals do not have this bit documented, | 
|  | 123 | * however these SoCs really have and need this bit, as seen in the | 
|  | 124 | * BSP kernel source code. | 
|  | 125 | */ | 
|  | 126 | void (*clock_autogate_enable)(struct sun6i_dma_dev *); | 
|  | 127 | void (*set_burst_length)(u32 *p_cfg, s8 src_burst, s8 dst_burst); | 
|  | 128 | u32 src_burst_lengths; | 
|  | 129 | u32 dst_burst_lengths; | 
|  | 130 | u32 src_addr_widths; | 
|  | 131 | u32 dst_addr_widths; | 
|  | 132 | }; | 
|  | 133 |  | 
|  | 134 | /* | 
|  | 135 | * Hardware representation of the LLI | 
|  | 136 | * | 
|  | 137 | * The hardware will be fed the physical address of this structure, | 
|  | 138 | * and read its content in order to start the transfer. | 
|  | 139 | */ | 
|  | 140 | struct sun6i_dma_lli { | 
|  | 141 | u32			cfg; | 
|  | 142 | u32			src; | 
|  | 143 | u32			dst; | 
|  | 144 | u32			len; | 
|  | 145 | u32			para; | 
|  | 146 | u32			p_lli_next; | 
|  | 147 |  | 
|  | 148 | /* | 
|  | 149 | * This field is not used by the DMA controller, but will be | 
|  | 150 | * used by the CPU to go through the list (mostly for dumping | 
|  | 151 | * or freeing it). | 
|  | 152 | */ | 
|  | 153 | struct sun6i_dma_lli	*v_lli_next; | 
|  | 154 | }; | 
|  | 155 |  | 
|  | 156 |  | 
|  | 157 | struct sun6i_desc { | 
|  | 158 | struct virt_dma_desc	vd; | 
|  | 159 | dma_addr_t		p_lli; | 
|  | 160 | struct sun6i_dma_lli	*v_lli; | 
|  | 161 | }; | 
|  | 162 |  | 
|  | 163 | struct sun6i_pchan { | 
|  | 164 | u32			idx; | 
|  | 165 | void __iomem		*base; | 
|  | 166 | struct sun6i_vchan	*vchan; | 
|  | 167 | struct sun6i_desc	*desc; | 
|  | 168 | struct sun6i_desc	*done; | 
|  | 169 | }; | 
|  | 170 |  | 
|  | 171 | struct sun6i_vchan { | 
|  | 172 | struct virt_dma_chan	vc; | 
|  | 173 | struct list_head	node; | 
|  | 174 | struct dma_slave_config	cfg; | 
|  | 175 | struct sun6i_pchan	*phy; | 
|  | 176 | u8			port; | 
|  | 177 | u8			irq_type; | 
|  | 178 | bool			cyclic; | 
|  | 179 | }; | 
|  | 180 |  | 
|  | 181 | struct sun6i_dma_dev { | 
|  | 182 | struct dma_device	slave; | 
|  | 183 | void __iomem		*base; | 
|  | 184 | struct clk		*clk; | 
|  | 185 | int			irq; | 
|  | 186 | spinlock_t		lock; | 
|  | 187 | struct reset_control	*rstc; | 
|  | 188 | struct tasklet_struct	task; | 
|  | 189 | atomic_t		tasklet_shutdown; | 
|  | 190 | struct list_head	pending; | 
|  | 191 | struct dma_pool		*pool; | 
|  | 192 | struct sun6i_pchan	*pchans; | 
|  | 193 | struct sun6i_vchan	*vchans; | 
|  | 194 | const struct sun6i_dma_config *cfg; | 
|  | 195 | u32			num_pchans; | 
|  | 196 | u32			num_vchans; | 
|  | 197 | u32			max_request; | 
|  | 198 | }; | 
|  | 199 |  | 
|  | 200 | static struct device *chan2dev(struct dma_chan *chan) | 
|  | 201 | { | 
|  | 202 | return &chan->dev->device; | 
|  | 203 | } | 
|  | 204 |  | 
|  | 205 | static inline struct sun6i_dma_dev *to_sun6i_dma_dev(struct dma_device *d) | 
|  | 206 | { | 
|  | 207 | return container_of(d, struct sun6i_dma_dev, slave); | 
|  | 208 | } | 
|  | 209 |  | 
|  | 210 | static inline struct sun6i_vchan *to_sun6i_vchan(struct dma_chan *chan) | 
|  | 211 | { | 
|  | 212 | return container_of(chan, struct sun6i_vchan, vc.chan); | 
|  | 213 | } | 
|  | 214 |  | 
|  | 215 | static inline struct sun6i_desc * | 
|  | 216 | to_sun6i_desc(struct dma_async_tx_descriptor *tx) | 
|  | 217 | { | 
|  | 218 | return container_of(tx, struct sun6i_desc, vd.tx); | 
|  | 219 | } | 
|  | 220 |  | 
|  | 221 | static inline void sun6i_dma_dump_com_regs(struct sun6i_dma_dev *sdev) | 
|  | 222 | { | 
|  | 223 | dev_dbg(sdev->slave.dev, "Common register:\n" | 
|  | 224 | "\tmask0(%04x): 0x%08x\n" | 
|  | 225 | "\tmask1(%04x): 0x%08x\n" | 
|  | 226 | "\tpend0(%04x): 0x%08x\n" | 
|  | 227 | "\tpend1(%04x): 0x%08x\n" | 
|  | 228 | "\tstats(%04x): 0x%08x\n", | 
|  | 229 | DMA_IRQ_EN(0), readl(sdev->base + DMA_IRQ_EN(0)), | 
|  | 230 | DMA_IRQ_EN(1), readl(sdev->base + DMA_IRQ_EN(1)), | 
|  | 231 | DMA_IRQ_STAT(0), readl(sdev->base + DMA_IRQ_STAT(0)), | 
|  | 232 | DMA_IRQ_STAT(1), readl(sdev->base + DMA_IRQ_STAT(1)), | 
|  | 233 | DMA_STAT, readl(sdev->base + DMA_STAT)); | 
|  | 234 | } | 
|  | 235 |  | 
|  | 236 | static inline void sun6i_dma_dump_chan_regs(struct sun6i_dma_dev *sdev, | 
|  | 237 | struct sun6i_pchan *pchan) | 
|  | 238 | { | 
|  | 239 | phys_addr_t reg = virt_to_phys(pchan->base); | 
|  | 240 |  | 
|  | 241 | dev_dbg(sdev->slave.dev, "Chan %d reg: %pa\n" | 
|  | 242 | "\t___en(%04x): \t0x%08x\n" | 
|  | 243 | "\tpause(%04x): \t0x%08x\n" | 
|  | 244 | "\tstart(%04x): \t0x%08x\n" | 
|  | 245 | "\t__cfg(%04x): \t0x%08x\n" | 
|  | 246 | "\t__src(%04x): \t0x%08x\n" | 
|  | 247 | "\t__dst(%04x): \t0x%08x\n" | 
|  | 248 | "\tcount(%04x): \t0x%08x\n" | 
|  | 249 | "\t_para(%04x): \t0x%08x\n\n", | 
|  | 250 | pchan->idx, ®, | 
|  | 251 | DMA_CHAN_ENABLE, | 
|  | 252 | readl(pchan->base + DMA_CHAN_ENABLE), | 
|  | 253 | DMA_CHAN_PAUSE, | 
|  | 254 | readl(pchan->base + DMA_CHAN_PAUSE), | 
|  | 255 | DMA_CHAN_LLI_ADDR, | 
|  | 256 | readl(pchan->base + DMA_CHAN_LLI_ADDR), | 
|  | 257 | DMA_CHAN_CUR_CFG, | 
|  | 258 | readl(pchan->base + DMA_CHAN_CUR_CFG), | 
|  | 259 | DMA_CHAN_CUR_SRC, | 
|  | 260 | readl(pchan->base + DMA_CHAN_CUR_SRC), | 
|  | 261 | DMA_CHAN_CUR_DST, | 
|  | 262 | readl(pchan->base + DMA_CHAN_CUR_DST), | 
|  | 263 | DMA_CHAN_CUR_CNT, | 
|  | 264 | readl(pchan->base + DMA_CHAN_CUR_CNT), | 
|  | 265 | DMA_CHAN_CUR_PARA, | 
|  | 266 | readl(pchan->base + DMA_CHAN_CUR_PARA)); | 
|  | 267 | } | 
|  | 268 |  | 
|  | 269 | static inline s8 convert_burst(u32 maxburst) | 
|  | 270 | { | 
|  | 271 | switch (maxburst) { | 
|  | 272 | case 1: | 
|  | 273 | return 0; | 
|  | 274 | case 4: | 
|  | 275 | return 1; | 
|  | 276 | case 8: | 
|  | 277 | return 2; | 
|  | 278 | case 16: | 
|  | 279 | return 3; | 
|  | 280 | default: | 
|  | 281 | return -EINVAL; | 
|  | 282 | } | 
|  | 283 | } | 
|  | 284 |  | 
|  | 285 | static inline s8 convert_buswidth(enum dma_slave_buswidth addr_width) | 
|  | 286 | { | 
|  | 287 | return ilog2(addr_width); | 
|  | 288 | } | 
|  | 289 |  | 
|  | 290 | static void sun6i_enable_clock_autogate_a23(struct sun6i_dma_dev *sdev) | 
|  | 291 | { | 
|  | 292 | writel(SUN8I_DMA_GATE_ENABLE, sdev->base + SUN8I_DMA_GATE); | 
|  | 293 | } | 
|  | 294 |  | 
|  | 295 | static void sun6i_enable_clock_autogate_h3(struct sun6i_dma_dev *sdev) | 
|  | 296 | { | 
|  | 297 | writel(SUNXI_H3_DMA_GATE_ENABLE, sdev->base + SUNXI_H3_DMA_GATE); | 
|  | 298 | } | 
|  | 299 |  | 
|  | 300 | static void sun6i_set_burst_length_a31(u32 *p_cfg, s8 src_burst, s8 dst_burst) | 
|  | 301 | { | 
|  | 302 | *p_cfg |= DMA_CHAN_CFG_SRC_BURST_A31(src_burst) | | 
|  | 303 | DMA_CHAN_CFG_DST_BURST_A31(dst_burst); | 
|  | 304 | } | 
|  | 305 |  | 
|  | 306 | static void sun6i_set_burst_length_h3(u32 *p_cfg, s8 src_burst, s8 dst_burst) | 
|  | 307 | { | 
|  | 308 | *p_cfg |= DMA_CHAN_CFG_SRC_BURST_H3(src_burst) | | 
|  | 309 | DMA_CHAN_CFG_DST_BURST_H3(dst_burst); | 
|  | 310 | } | 
|  | 311 |  | 
|  | 312 | static size_t sun6i_get_chan_size(struct sun6i_pchan *pchan) | 
|  | 313 | { | 
|  | 314 | struct sun6i_desc *txd = pchan->desc; | 
|  | 315 | struct sun6i_dma_lli *lli; | 
|  | 316 | size_t bytes; | 
|  | 317 | dma_addr_t pos; | 
|  | 318 |  | 
|  | 319 | pos = readl(pchan->base + DMA_CHAN_LLI_ADDR); | 
|  | 320 | bytes = readl(pchan->base + DMA_CHAN_CUR_CNT); | 
|  | 321 |  | 
|  | 322 | if (pos == LLI_LAST_ITEM) | 
|  | 323 | return bytes; | 
|  | 324 |  | 
|  | 325 | for (lli = txd->v_lli; lli; lli = lli->v_lli_next) { | 
|  | 326 | if (lli->p_lli_next == pos) { | 
|  | 327 | for (lli = lli->v_lli_next; lli; lli = lli->v_lli_next) | 
|  | 328 | bytes += lli->len; | 
|  | 329 | break; | 
|  | 330 | } | 
|  | 331 | } | 
|  | 332 |  | 
|  | 333 | return bytes; | 
|  | 334 | } | 
|  | 335 |  | 
|  | 336 | static void *sun6i_dma_lli_add(struct sun6i_dma_lli *prev, | 
|  | 337 | struct sun6i_dma_lli *next, | 
|  | 338 | dma_addr_t next_phy, | 
|  | 339 | struct sun6i_desc *txd) | 
|  | 340 | { | 
|  | 341 | if ((!prev && !txd) || !next) | 
|  | 342 | return NULL; | 
|  | 343 |  | 
|  | 344 | if (!prev) { | 
|  | 345 | txd->p_lli = next_phy; | 
|  | 346 | txd->v_lli = next; | 
|  | 347 | } else { | 
|  | 348 | prev->p_lli_next = next_phy; | 
|  | 349 | prev->v_lli_next = next; | 
|  | 350 | } | 
|  | 351 |  | 
|  | 352 | next->p_lli_next = LLI_LAST_ITEM; | 
|  | 353 | next->v_lli_next = NULL; | 
|  | 354 |  | 
|  | 355 | return next; | 
|  | 356 | } | 
|  | 357 |  | 
|  | 358 | static inline void sun6i_dma_dump_lli(struct sun6i_vchan *vchan, | 
|  | 359 | struct sun6i_dma_lli *lli) | 
|  | 360 | { | 
|  | 361 | phys_addr_t p_lli = virt_to_phys(lli); | 
|  | 362 |  | 
|  | 363 | dev_dbg(chan2dev(&vchan->vc.chan), | 
|  | 364 | "\n\tdesc:   p - %pa v - 0x%p\n" | 
|  | 365 | "\t\tc - 0x%08x s - 0x%08x d - 0x%08x\n" | 
|  | 366 | "\t\tl - 0x%08x p - 0x%08x n - 0x%08x\n", | 
|  | 367 | &p_lli, lli, | 
|  | 368 | lli->cfg, lli->src, lli->dst, | 
|  | 369 | lli->len, lli->para, lli->p_lli_next); | 
|  | 370 | } | 
|  | 371 |  | 
|  | 372 | static void sun6i_dma_free_desc(struct virt_dma_desc *vd) | 
|  | 373 | { | 
|  | 374 | struct sun6i_desc *txd = to_sun6i_desc(&vd->tx); | 
|  | 375 | struct sun6i_dma_dev *sdev = to_sun6i_dma_dev(vd->tx.chan->device); | 
|  | 376 | struct sun6i_dma_lli *v_lli, *v_next; | 
|  | 377 | dma_addr_t p_lli, p_next; | 
|  | 378 |  | 
|  | 379 | if (unlikely(!txd)) | 
|  | 380 | return; | 
|  | 381 |  | 
|  | 382 | p_lli = txd->p_lli; | 
|  | 383 | v_lli = txd->v_lli; | 
|  | 384 |  | 
|  | 385 | while (v_lli) { | 
|  | 386 | v_next = v_lli->v_lli_next; | 
|  | 387 | p_next = v_lli->p_lli_next; | 
|  | 388 |  | 
|  | 389 | dma_pool_free(sdev->pool, v_lli, p_lli); | 
|  | 390 |  | 
|  | 391 | v_lli = v_next; | 
|  | 392 | p_lli = p_next; | 
|  | 393 | } | 
|  | 394 |  | 
|  | 395 | kfree(txd); | 
|  | 396 | } | 
|  | 397 |  | 
|  | 398 | static int sun6i_dma_start_desc(struct sun6i_vchan *vchan) | 
|  | 399 | { | 
|  | 400 | struct sun6i_dma_dev *sdev = to_sun6i_dma_dev(vchan->vc.chan.device); | 
|  | 401 | struct virt_dma_desc *desc = vchan_next_desc(&vchan->vc); | 
|  | 402 | struct sun6i_pchan *pchan = vchan->phy; | 
|  | 403 | u32 irq_val, irq_reg, irq_offset; | 
|  | 404 |  | 
|  | 405 | if (!pchan) | 
|  | 406 | return -EAGAIN; | 
|  | 407 |  | 
|  | 408 | if (!desc) { | 
|  | 409 | pchan->desc = NULL; | 
|  | 410 | pchan->done = NULL; | 
|  | 411 | return -EAGAIN; | 
|  | 412 | } | 
|  | 413 |  | 
|  | 414 | list_del(&desc->node); | 
|  | 415 |  | 
|  | 416 | pchan->desc = to_sun6i_desc(&desc->tx); | 
|  | 417 | pchan->done = NULL; | 
|  | 418 |  | 
|  | 419 | sun6i_dma_dump_lli(vchan, pchan->desc->v_lli); | 
|  | 420 |  | 
|  | 421 | irq_reg = pchan->idx / DMA_IRQ_CHAN_NR; | 
|  | 422 | irq_offset = pchan->idx % DMA_IRQ_CHAN_NR; | 
|  | 423 |  | 
|  | 424 | vchan->irq_type = vchan->cyclic ? DMA_IRQ_PKG : DMA_IRQ_QUEUE; | 
|  | 425 |  | 
|  | 426 | irq_val = readl(sdev->base + DMA_IRQ_EN(irq_reg)); | 
|  | 427 | irq_val &= ~((DMA_IRQ_HALF | DMA_IRQ_PKG | DMA_IRQ_QUEUE) << | 
|  | 428 | (irq_offset * DMA_IRQ_CHAN_WIDTH)); | 
|  | 429 | irq_val |= vchan->irq_type << (irq_offset * DMA_IRQ_CHAN_WIDTH); | 
|  | 430 | writel(irq_val, sdev->base + DMA_IRQ_EN(irq_reg)); | 
|  | 431 |  | 
|  | 432 | writel(pchan->desc->p_lli, pchan->base + DMA_CHAN_LLI_ADDR); | 
|  | 433 | writel(DMA_CHAN_ENABLE_START, pchan->base + DMA_CHAN_ENABLE); | 
|  | 434 |  | 
|  | 435 | sun6i_dma_dump_com_regs(sdev); | 
|  | 436 | sun6i_dma_dump_chan_regs(sdev, pchan); | 
|  | 437 |  | 
|  | 438 | return 0; | 
|  | 439 | } | 
|  | 440 |  | 
|  | 441 | static void sun6i_dma_tasklet(unsigned long data) | 
|  | 442 | { | 
|  | 443 | struct sun6i_dma_dev *sdev = (struct sun6i_dma_dev *)data; | 
|  | 444 | struct sun6i_vchan *vchan; | 
|  | 445 | struct sun6i_pchan *pchan; | 
|  | 446 | unsigned int pchan_alloc = 0; | 
|  | 447 | unsigned int pchan_idx; | 
|  | 448 |  | 
|  | 449 | list_for_each_entry(vchan, &sdev->slave.channels, vc.chan.device_node) { | 
|  | 450 | spin_lock_irq(&vchan->vc.lock); | 
|  | 451 |  | 
|  | 452 | pchan = vchan->phy; | 
|  | 453 |  | 
|  | 454 | if (pchan && pchan->done) { | 
|  | 455 | if (sun6i_dma_start_desc(vchan)) { | 
|  | 456 | /* | 
|  | 457 | * No current txd associated with this channel | 
|  | 458 | */ | 
|  | 459 | dev_dbg(sdev->slave.dev, "pchan %u: free\n", | 
|  | 460 | pchan->idx); | 
|  | 461 |  | 
|  | 462 | /* Mark this channel free */ | 
|  | 463 | vchan->phy = NULL; | 
|  | 464 | pchan->vchan = NULL; | 
|  | 465 | } | 
|  | 466 | } | 
|  | 467 | spin_unlock_irq(&vchan->vc.lock); | 
|  | 468 | } | 
|  | 469 |  | 
|  | 470 | spin_lock_irq(&sdev->lock); | 
|  | 471 | for (pchan_idx = 0; pchan_idx < sdev->num_pchans; pchan_idx++) { | 
|  | 472 | pchan = &sdev->pchans[pchan_idx]; | 
|  | 473 |  | 
|  | 474 | if (pchan->vchan || list_empty(&sdev->pending)) | 
|  | 475 | continue; | 
|  | 476 |  | 
|  | 477 | vchan = list_first_entry(&sdev->pending, | 
|  | 478 | struct sun6i_vchan, node); | 
|  | 479 |  | 
|  | 480 | /* Remove from pending channels */ | 
|  | 481 | list_del_init(&vchan->node); | 
|  | 482 | pchan_alloc |= BIT(pchan_idx); | 
|  | 483 |  | 
|  | 484 | /* Mark this channel allocated */ | 
|  | 485 | pchan->vchan = vchan; | 
|  | 486 | vchan->phy = pchan; | 
|  | 487 | dev_dbg(sdev->slave.dev, "pchan %u: alloc vchan %p\n", | 
|  | 488 | pchan->idx, &vchan->vc); | 
|  | 489 | } | 
|  | 490 | spin_unlock_irq(&sdev->lock); | 
|  | 491 |  | 
|  | 492 | for (pchan_idx = 0; pchan_idx < sdev->num_pchans; pchan_idx++) { | 
|  | 493 | if (!(pchan_alloc & BIT(pchan_idx))) | 
|  | 494 | continue; | 
|  | 495 |  | 
|  | 496 | pchan = sdev->pchans + pchan_idx; | 
|  | 497 | vchan = pchan->vchan; | 
|  | 498 | if (vchan) { | 
|  | 499 | spin_lock_irq(&vchan->vc.lock); | 
|  | 500 | sun6i_dma_start_desc(vchan); | 
|  | 501 | spin_unlock_irq(&vchan->vc.lock); | 
|  | 502 | } | 
|  | 503 | } | 
|  | 504 | } | 
|  | 505 |  | 
|  | 506 | static irqreturn_t sun6i_dma_interrupt(int irq, void *dev_id) | 
|  | 507 | { | 
|  | 508 | struct sun6i_dma_dev *sdev = dev_id; | 
|  | 509 | struct sun6i_vchan *vchan; | 
|  | 510 | struct sun6i_pchan *pchan; | 
|  | 511 | int i, j, ret = IRQ_NONE; | 
|  | 512 | u32 status; | 
|  | 513 |  | 
|  | 514 | for (i = 0; i < sdev->num_pchans / DMA_IRQ_CHAN_NR; i++) { | 
|  | 515 | status = readl(sdev->base + DMA_IRQ_STAT(i)); | 
|  | 516 | if (!status) | 
|  | 517 | continue; | 
|  | 518 |  | 
|  | 519 | dev_dbg(sdev->slave.dev, "DMA irq status %s: 0x%x\n", | 
|  | 520 | i ? "high" : "low", status); | 
|  | 521 |  | 
|  | 522 | writel(status, sdev->base + DMA_IRQ_STAT(i)); | 
|  | 523 |  | 
|  | 524 | for (j = 0; (j < DMA_IRQ_CHAN_NR) && status; j++) { | 
|  | 525 | pchan = sdev->pchans + j; | 
|  | 526 | vchan = pchan->vchan; | 
|  | 527 | if (vchan && (status & vchan->irq_type)) { | 
|  | 528 | if (vchan->cyclic) { | 
|  | 529 | vchan_cyclic_callback(&pchan->desc->vd); | 
|  | 530 | } else { | 
|  | 531 | spin_lock(&vchan->vc.lock); | 
|  | 532 | vchan_cookie_complete(&pchan->desc->vd); | 
|  | 533 | pchan->done = pchan->desc; | 
|  | 534 | spin_unlock(&vchan->vc.lock); | 
|  | 535 | } | 
|  | 536 | } | 
|  | 537 |  | 
|  | 538 | status = status >> DMA_IRQ_CHAN_WIDTH; | 
|  | 539 | } | 
|  | 540 |  | 
|  | 541 | if (!atomic_read(&sdev->tasklet_shutdown)) | 
|  | 542 | tasklet_schedule(&sdev->task); | 
|  | 543 | ret = IRQ_HANDLED; | 
|  | 544 | } | 
|  | 545 |  | 
|  | 546 | return ret; | 
|  | 547 | } | 
|  | 548 |  | 
|  | 549 | static int set_config(struct sun6i_dma_dev *sdev, | 
|  | 550 | struct dma_slave_config *sconfig, | 
|  | 551 | enum dma_transfer_direction direction, | 
|  | 552 | u32 *p_cfg) | 
|  | 553 | { | 
|  | 554 | enum dma_slave_buswidth src_addr_width, dst_addr_width; | 
|  | 555 | u32 src_maxburst, dst_maxburst; | 
|  | 556 | s8 src_width, dst_width, src_burst, dst_burst; | 
|  | 557 |  | 
|  | 558 | src_addr_width = sconfig->src_addr_width; | 
|  | 559 | dst_addr_width = sconfig->dst_addr_width; | 
|  | 560 | src_maxburst = sconfig->src_maxburst; | 
|  | 561 | dst_maxburst = sconfig->dst_maxburst; | 
|  | 562 |  | 
|  | 563 | switch (direction) { | 
|  | 564 | case DMA_MEM_TO_DEV: | 
|  | 565 | if (src_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) | 
|  | 566 | src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; | 
|  | 567 | src_maxburst = src_maxburst ? src_maxburst : 8; | 
|  | 568 | break; | 
|  | 569 | case DMA_DEV_TO_MEM: | 
|  | 570 | if (dst_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) | 
|  | 571 | dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; | 
|  | 572 | dst_maxburst = dst_maxburst ? dst_maxburst : 8; | 
|  | 573 | break; | 
|  | 574 | default: | 
|  | 575 | return -EINVAL; | 
|  | 576 | } | 
|  | 577 |  | 
|  | 578 | if (!(BIT(src_addr_width) & sdev->slave.src_addr_widths)) | 
|  | 579 | return -EINVAL; | 
|  | 580 | if (!(BIT(dst_addr_width) & sdev->slave.dst_addr_widths)) | 
|  | 581 | return -EINVAL; | 
|  | 582 | if (!(BIT(src_maxburst) & sdev->cfg->src_burst_lengths)) | 
|  | 583 | return -EINVAL; | 
|  | 584 | if (!(BIT(dst_maxburst) & sdev->cfg->dst_burst_lengths)) | 
|  | 585 | return -EINVAL; | 
|  | 586 |  | 
|  | 587 | src_width = convert_buswidth(src_addr_width); | 
|  | 588 | dst_width = convert_buswidth(dst_addr_width); | 
|  | 589 | dst_burst = convert_burst(dst_maxburst); | 
|  | 590 | src_burst = convert_burst(src_maxburst); | 
|  | 591 |  | 
|  | 592 | *p_cfg = DMA_CHAN_CFG_SRC_WIDTH(src_width) | | 
|  | 593 | DMA_CHAN_CFG_DST_WIDTH(dst_width); | 
|  | 594 |  | 
|  | 595 | sdev->cfg->set_burst_length(p_cfg, src_burst, dst_burst); | 
|  | 596 |  | 
|  | 597 | return 0; | 
|  | 598 | } | 
|  | 599 |  | 
|  | 600 | static struct dma_async_tx_descriptor *sun6i_dma_prep_dma_memcpy( | 
|  | 601 | struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | 
|  | 602 | size_t len, unsigned long flags) | 
|  | 603 | { | 
|  | 604 | struct sun6i_dma_dev *sdev = to_sun6i_dma_dev(chan->device); | 
|  | 605 | struct sun6i_vchan *vchan = to_sun6i_vchan(chan); | 
|  | 606 | struct sun6i_dma_lli *v_lli; | 
|  | 607 | struct sun6i_desc *txd; | 
|  | 608 | dma_addr_t p_lli; | 
|  | 609 | s8 burst, width; | 
|  | 610 |  | 
|  | 611 | dev_dbg(chan2dev(chan), | 
|  | 612 | "%s; chan: %d, dest: %pad, src: %pad, len: %zu. flags: 0x%08lx\n", | 
|  | 613 | __func__, vchan->vc.chan.chan_id, &dest, &src, len, flags); | 
|  | 614 |  | 
|  | 615 | if (!len) | 
|  | 616 | return NULL; | 
|  | 617 |  | 
|  | 618 | txd = kzalloc(sizeof(*txd), GFP_NOWAIT); | 
|  | 619 | if (!txd) | 
|  | 620 | return NULL; | 
|  | 621 |  | 
|  | 622 | v_lli = dma_pool_alloc(sdev->pool, GFP_NOWAIT, &p_lli); | 
|  | 623 | if (!v_lli) { | 
|  | 624 | dev_err(sdev->slave.dev, "Failed to alloc lli memory\n"); | 
|  | 625 | goto err_txd_free; | 
|  | 626 | } | 
|  | 627 |  | 
|  | 628 | v_lli->src = src; | 
|  | 629 | v_lli->dst = dest; | 
|  | 630 | v_lli->len = len; | 
|  | 631 | v_lli->para = NORMAL_WAIT; | 
|  | 632 |  | 
|  | 633 | burst = convert_burst(8); | 
|  | 634 | width = convert_buswidth(DMA_SLAVE_BUSWIDTH_4_BYTES); | 
|  | 635 | v_lli->cfg = DMA_CHAN_CFG_SRC_DRQ(DRQ_SDRAM) | | 
|  | 636 | DMA_CHAN_CFG_DST_DRQ(DRQ_SDRAM) | | 
|  | 637 | DMA_CHAN_CFG_DST_LINEAR_MODE | | 
|  | 638 | DMA_CHAN_CFG_SRC_LINEAR_MODE | | 
|  | 639 | DMA_CHAN_CFG_SRC_WIDTH(width) | | 
|  | 640 | DMA_CHAN_CFG_DST_WIDTH(width); | 
|  | 641 |  | 
|  | 642 | sdev->cfg->set_burst_length(&v_lli->cfg, burst, burst); | 
|  | 643 |  | 
|  | 644 | sun6i_dma_lli_add(NULL, v_lli, p_lli, txd); | 
|  | 645 |  | 
|  | 646 | sun6i_dma_dump_lli(vchan, v_lli); | 
|  | 647 |  | 
|  | 648 | return vchan_tx_prep(&vchan->vc, &txd->vd, flags); | 
|  | 649 |  | 
|  | 650 | err_txd_free: | 
|  | 651 | kfree(txd); | 
|  | 652 | return NULL; | 
|  | 653 | } | 
|  | 654 |  | 
|  | 655 | static struct dma_async_tx_descriptor *sun6i_dma_prep_slave_sg( | 
|  | 656 | struct dma_chan *chan, struct scatterlist *sgl, | 
|  | 657 | unsigned int sg_len, enum dma_transfer_direction dir, | 
|  | 658 | unsigned long flags, void *context) | 
|  | 659 | { | 
|  | 660 | struct sun6i_dma_dev *sdev = to_sun6i_dma_dev(chan->device); | 
|  | 661 | struct sun6i_vchan *vchan = to_sun6i_vchan(chan); | 
|  | 662 | struct dma_slave_config *sconfig = &vchan->cfg; | 
|  | 663 | struct sun6i_dma_lli *v_lli, *prev = NULL; | 
|  | 664 | struct sun6i_desc *txd; | 
|  | 665 | struct scatterlist *sg; | 
|  | 666 | dma_addr_t p_lli; | 
|  | 667 | u32 lli_cfg; | 
|  | 668 | int i, ret; | 
|  | 669 |  | 
|  | 670 | if (!sgl) | 
|  | 671 | return NULL; | 
|  | 672 |  | 
|  | 673 | ret = set_config(sdev, sconfig, dir, &lli_cfg); | 
|  | 674 | if (ret) { | 
|  | 675 | dev_err(chan2dev(chan), "Invalid DMA configuration\n"); | 
|  | 676 | return NULL; | 
|  | 677 | } | 
|  | 678 |  | 
|  | 679 | txd = kzalloc(sizeof(*txd), GFP_NOWAIT); | 
|  | 680 | if (!txd) | 
|  | 681 | return NULL; | 
|  | 682 |  | 
|  | 683 | for_each_sg(sgl, sg, sg_len, i) { | 
|  | 684 | v_lli = dma_pool_alloc(sdev->pool, GFP_NOWAIT, &p_lli); | 
|  | 685 | if (!v_lli) | 
|  | 686 | goto err_lli_free; | 
|  | 687 |  | 
|  | 688 | v_lli->len = sg_dma_len(sg); | 
|  | 689 | v_lli->para = NORMAL_WAIT; | 
|  | 690 |  | 
|  | 691 | if (dir == DMA_MEM_TO_DEV) { | 
|  | 692 | v_lli->src = sg_dma_address(sg); | 
|  | 693 | v_lli->dst = sconfig->dst_addr; | 
|  | 694 | v_lli->cfg = lli_cfg | | 
|  | 695 | DMA_CHAN_CFG_DST_IO_MODE | | 
|  | 696 | DMA_CHAN_CFG_SRC_LINEAR_MODE | | 
|  | 697 | DMA_CHAN_CFG_SRC_DRQ(DRQ_SDRAM) | | 
|  | 698 | DMA_CHAN_CFG_DST_DRQ(vchan->port); | 
|  | 699 |  | 
|  | 700 | dev_dbg(chan2dev(chan), | 
|  | 701 | "%s; chan: %d, dest: %pad, src: %pad, len: %u. flags: 0x%08lx\n", | 
|  | 702 | __func__, vchan->vc.chan.chan_id, | 
|  | 703 | &sconfig->dst_addr, &sg_dma_address(sg), | 
|  | 704 | sg_dma_len(sg), flags); | 
|  | 705 |  | 
|  | 706 | } else { | 
|  | 707 | v_lli->src = sconfig->src_addr; | 
|  | 708 | v_lli->dst = sg_dma_address(sg); | 
|  | 709 | v_lli->cfg = lli_cfg | | 
|  | 710 | DMA_CHAN_CFG_DST_LINEAR_MODE | | 
|  | 711 | DMA_CHAN_CFG_SRC_IO_MODE | | 
|  | 712 | DMA_CHAN_CFG_DST_DRQ(DRQ_SDRAM) | | 
|  | 713 | DMA_CHAN_CFG_SRC_DRQ(vchan->port); | 
|  | 714 |  | 
|  | 715 | dev_dbg(chan2dev(chan), | 
|  | 716 | "%s; chan: %d, dest: %pad, src: %pad, len: %u. flags: 0x%08lx\n", | 
|  | 717 | __func__, vchan->vc.chan.chan_id, | 
|  | 718 | &sg_dma_address(sg), &sconfig->src_addr, | 
|  | 719 | sg_dma_len(sg), flags); | 
|  | 720 | } | 
|  | 721 |  | 
|  | 722 | prev = sun6i_dma_lli_add(prev, v_lli, p_lli, txd); | 
|  | 723 | } | 
|  | 724 |  | 
|  | 725 | dev_dbg(chan2dev(chan), "First: %pad\n", &txd->p_lli); | 
|  | 726 | for (prev = txd->v_lli; prev; prev = prev->v_lli_next) | 
|  | 727 | sun6i_dma_dump_lli(vchan, prev); | 
|  | 728 |  | 
|  | 729 | return vchan_tx_prep(&vchan->vc, &txd->vd, flags); | 
|  | 730 |  | 
|  | 731 | err_lli_free: | 
|  | 732 | for (prev = txd->v_lli; prev; prev = prev->v_lli_next) | 
|  | 733 | dma_pool_free(sdev->pool, prev, virt_to_phys(prev)); | 
|  | 734 | kfree(txd); | 
|  | 735 | return NULL; | 
|  | 736 | } | 
|  | 737 |  | 
|  | 738 | static struct dma_async_tx_descriptor *sun6i_dma_prep_dma_cyclic( | 
|  | 739 | struct dma_chan *chan, | 
|  | 740 | dma_addr_t buf_addr, | 
|  | 741 | size_t buf_len, | 
|  | 742 | size_t period_len, | 
|  | 743 | enum dma_transfer_direction dir, | 
|  | 744 | unsigned long flags) | 
|  | 745 | { | 
|  | 746 | struct sun6i_dma_dev *sdev = to_sun6i_dma_dev(chan->device); | 
|  | 747 | struct sun6i_vchan *vchan = to_sun6i_vchan(chan); | 
|  | 748 | struct dma_slave_config *sconfig = &vchan->cfg; | 
|  | 749 | struct sun6i_dma_lli *v_lli, *prev = NULL; | 
|  | 750 | struct sun6i_desc *txd; | 
|  | 751 | dma_addr_t p_lli; | 
|  | 752 | u32 lli_cfg; | 
|  | 753 | unsigned int i, periods = buf_len / period_len; | 
|  | 754 | int ret; | 
|  | 755 |  | 
|  | 756 | ret = set_config(sdev, sconfig, dir, &lli_cfg); | 
|  | 757 | if (ret) { | 
|  | 758 | dev_err(chan2dev(chan), "Invalid DMA configuration\n"); | 
|  | 759 | return NULL; | 
|  | 760 | } | 
|  | 761 |  | 
|  | 762 | txd = kzalloc(sizeof(*txd), GFP_NOWAIT); | 
|  | 763 | if (!txd) | 
|  | 764 | return NULL; | 
|  | 765 |  | 
|  | 766 | for (i = 0; i < periods; i++) { | 
|  | 767 | v_lli = dma_pool_alloc(sdev->pool, GFP_NOWAIT, &p_lli); | 
|  | 768 | if (!v_lli) { | 
|  | 769 | dev_err(sdev->slave.dev, "Failed to alloc lli memory\n"); | 
|  | 770 | goto err_lli_free; | 
|  | 771 | } | 
|  | 772 |  | 
|  | 773 | v_lli->len = period_len; | 
|  | 774 | v_lli->para = NORMAL_WAIT; | 
|  | 775 |  | 
|  | 776 | if (dir == DMA_MEM_TO_DEV) { | 
|  | 777 | v_lli->src = buf_addr + period_len * i; | 
|  | 778 | v_lli->dst = sconfig->dst_addr; | 
|  | 779 | v_lli->cfg = lli_cfg | | 
|  | 780 | DMA_CHAN_CFG_DST_IO_MODE | | 
|  | 781 | DMA_CHAN_CFG_SRC_LINEAR_MODE | | 
|  | 782 | DMA_CHAN_CFG_SRC_DRQ(DRQ_SDRAM) | | 
|  | 783 | DMA_CHAN_CFG_DST_DRQ(vchan->port); | 
|  | 784 | } else { | 
|  | 785 | v_lli->src = sconfig->src_addr; | 
|  | 786 | v_lli->dst = buf_addr + period_len * i; | 
|  | 787 | v_lli->cfg = lli_cfg | | 
|  | 788 | DMA_CHAN_CFG_DST_LINEAR_MODE | | 
|  | 789 | DMA_CHAN_CFG_SRC_IO_MODE | | 
|  | 790 | DMA_CHAN_CFG_DST_DRQ(DRQ_SDRAM) | | 
|  | 791 | DMA_CHAN_CFG_SRC_DRQ(vchan->port); | 
|  | 792 | } | 
|  | 793 |  | 
|  | 794 | prev = sun6i_dma_lli_add(prev, v_lli, p_lli, txd); | 
|  | 795 | } | 
|  | 796 |  | 
|  | 797 | prev->p_lli_next = txd->p_lli;		/* cyclic list */ | 
|  | 798 |  | 
|  | 799 | vchan->cyclic = true; | 
|  | 800 |  | 
|  | 801 | return vchan_tx_prep(&vchan->vc, &txd->vd, flags); | 
|  | 802 |  | 
|  | 803 | err_lli_free: | 
|  | 804 | for (prev = txd->v_lli; prev; prev = prev->v_lli_next) | 
|  | 805 | dma_pool_free(sdev->pool, prev, virt_to_phys(prev)); | 
|  | 806 | kfree(txd); | 
|  | 807 | return NULL; | 
|  | 808 | } | 
|  | 809 |  | 
|  | 810 | static int sun6i_dma_config(struct dma_chan *chan, | 
|  | 811 | struct dma_slave_config *config) | 
|  | 812 | { | 
|  | 813 | struct sun6i_vchan *vchan = to_sun6i_vchan(chan); | 
|  | 814 |  | 
|  | 815 | memcpy(&vchan->cfg, config, sizeof(*config)); | 
|  | 816 |  | 
|  | 817 | return 0; | 
|  | 818 | } | 
|  | 819 |  | 
|  | 820 | static int sun6i_dma_pause(struct dma_chan *chan) | 
|  | 821 | { | 
|  | 822 | struct sun6i_dma_dev *sdev = to_sun6i_dma_dev(chan->device); | 
|  | 823 | struct sun6i_vchan *vchan = to_sun6i_vchan(chan); | 
|  | 824 | struct sun6i_pchan *pchan = vchan->phy; | 
|  | 825 |  | 
|  | 826 | dev_dbg(chan2dev(chan), "vchan %p: pause\n", &vchan->vc); | 
|  | 827 |  | 
|  | 828 | if (pchan) { | 
|  | 829 | writel(DMA_CHAN_PAUSE_PAUSE, | 
|  | 830 | pchan->base + DMA_CHAN_PAUSE); | 
|  | 831 | } else { | 
|  | 832 | spin_lock(&sdev->lock); | 
|  | 833 | list_del_init(&vchan->node); | 
|  | 834 | spin_unlock(&sdev->lock); | 
|  | 835 | } | 
|  | 836 |  | 
|  | 837 | return 0; | 
|  | 838 | } | 
|  | 839 |  | 
|  | 840 | static int sun6i_dma_resume(struct dma_chan *chan) | 
|  | 841 | { | 
|  | 842 | struct sun6i_dma_dev *sdev = to_sun6i_dma_dev(chan->device); | 
|  | 843 | struct sun6i_vchan *vchan = to_sun6i_vchan(chan); | 
|  | 844 | struct sun6i_pchan *pchan = vchan->phy; | 
|  | 845 | unsigned long flags; | 
|  | 846 |  | 
|  | 847 | dev_dbg(chan2dev(chan), "vchan %p: resume\n", &vchan->vc); | 
|  | 848 |  | 
|  | 849 | spin_lock_irqsave(&vchan->vc.lock, flags); | 
|  | 850 |  | 
|  | 851 | if (pchan) { | 
|  | 852 | writel(DMA_CHAN_PAUSE_RESUME, | 
|  | 853 | pchan->base + DMA_CHAN_PAUSE); | 
|  | 854 | } else if (!list_empty(&vchan->vc.desc_issued)) { | 
|  | 855 | spin_lock(&sdev->lock); | 
|  | 856 | list_add_tail(&vchan->node, &sdev->pending); | 
|  | 857 | spin_unlock(&sdev->lock); | 
|  | 858 | } | 
|  | 859 |  | 
|  | 860 | spin_unlock_irqrestore(&vchan->vc.lock, flags); | 
|  | 861 |  | 
|  | 862 | return 0; | 
|  | 863 | } | 
|  | 864 |  | 
|  | 865 | static int sun6i_dma_terminate_all(struct dma_chan *chan) | 
|  | 866 | { | 
|  | 867 | struct sun6i_dma_dev *sdev = to_sun6i_dma_dev(chan->device); | 
|  | 868 | struct sun6i_vchan *vchan = to_sun6i_vchan(chan); | 
|  | 869 | struct sun6i_pchan *pchan = vchan->phy; | 
|  | 870 | unsigned long flags; | 
|  | 871 | LIST_HEAD(head); | 
|  | 872 |  | 
|  | 873 | spin_lock(&sdev->lock); | 
|  | 874 | list_del_init(&vchan->node); | 
|  | 875 | spin_unlock(&sdev->lock); | 
|  | 876 |  | 
|  | 877 | spin_lock_irqsave(&vchan->vc.lock, flags); | 
|  | 878 |  | 
|  | 879 | if (vchan->cyclic) { | 
|  | 880 | vchan->cyclic = false; | 
|  | 881 | if (pchan && pchan->desc) { | 
|  | 882 | struct virt_dma_desc *vd = &pchan->desc->vd; | 
|  | 883 | struct virt_dma_chan *vc = &vchan->vc; | 
|  | 884 |  | 
|  | 885 | list_add_tail(&vd->node, &vc->desc_completed); | 
|  | 886 | } | 
|  | 887 | } | 
|  | 888 |  | 
|  | 889 | vchan_get_all_descriptors(&vchan->vc, &head); | 
|  | 890 |  | 
|  | 891 | if (pchan) { | 
|  | 892 | writel(DMA_CHAN_ENABLE_STOP, pchan->base + DMA_CHAN_ENABLE); | 
|  | 893 | writel(DMA_CHAN_PAUSE_RESUME, pchan->base + DMA_CHAN_PAUSE); | 
|  | 894 |  | 
|  | 895 | vchan->phy = NULL; | 
|  | 896 | pchan->vchan = NULL; | 
|  | 897 | pchan->desc = NULL; | 
|  | 898 | pchan->done = NULL; | 
|  | 899 | } | 
|  | 900 |  | 
|  | 901 | spin_unlock_irqrestore(&vchan->vc.lock, flags); | 
|  | 902 |  | 
|  | 903 | vchan_dma_desc_free_list(&vchan->vc, &head); | 
|  | 904 |  | 
|  | 905 | return 0; | 
|  | 906 | } | 
|  | 907 |  | 
|  | 908 | static enum dma_status sun6i_dma_tx_status(struct dma_chan *chan, | 
|  | 909 | dma_cookie_t cookie, | 
|  | 910 | struct dma_tx_state *state) | 
|  | 911 | { | 
|  | 912 | struct sun6i_vchan *vchan = to_sun6i_vchan(chan); | 
|  | 913 | struct sun6i_pchan *pchan = vchan->phy; | 
|  | 914 | struct sun6i_dma_lli *lli; | 
|  | 915 | struct virt_dma_desc *vd; | 
|  | 916 | struct sun6i_desc *txd; | 
|  | 917 | enum dma_status ret; | 
|  | 918 | unsigned long flags; | 
|  | 919 | size_t bytes = 0; | 
|  | 920 |  | 
|  | 921 | ret = dma_cookie_status(chan, cookie, state); | 
|  | 922 | if (ret == DMA_COMPLETE || !state) | 
|  | 923 | return ret; | 
|  | 924 |  | 
|  | 925 | spin_lock_irqsave(&vchan->vc.lock, flags); | 
|  | 926 |  | 
|  | 927 | vd = vchan_find_desc(&vchan->vc, cookie); | 
|  | 928 | txd = to_sun6i_desc(&vd->tx); | 
|  | 929 |  | 
|  | 930 | if (vd) { | 
|  | 931 | for (lli = txd->v_lli; lli != NULL; lli = lli->v_lli_next) | 
|  | 932 | bytes += lli->len; | 
|  | 933 | } else if (!pchan || !pchan->desc) { | 
|  | 934 | bytes = 0; | 
|  | 935 | } else { | 
|  | 936 | bytes = sun6i_get_chan_size(pchan); | 
|  | 937 | } | 
|  | 938 |  | 
|  | 939 | spin_unlock_irqrestore(&vchan->vc.lock, flags); | 
|  | 940 |  | 
|  | 941 | dma_set_residue(state, bytes); | 
|  | 942 |  | 
|  | 943 | return ret; | 
|  | 944 | } | 
|  | 945 |  | 
|  | 946 | static void sun6i_dma_issue_pending(struct dma_chan *chan) | 
|  | 947 | { | 
|  | 948 | struct sun6i_dma_dev *sdev = to_sun6i_dma_dev(chan->device); | 
|  | 949 | struct sun6i_vchan *vchan = to_sun6i_vchan(chan); | 
|  | 950 | unsigned long flags; | 
|  | 951 |  | 
|  | 952 | spin_lock_irqsave(&vchan->vc.lock, flags); | 
|  | 953 |  | 
|  | 954 | if (vchan_issue_pending(&vchan->vc)) { | 
|  | 955 | spin_lock(&sdev->lock); | 
|  | 956 |  | 
|  | 957 | if (!vchan->phy && list_empty(&vchan->node)) { | 
|  | 958 | list_add_tail(&vchan->node, &sdev->pending); | 
|  | 959 | tasklet_schedule(&sdev->task); | 
|  | 960 | dev_dbg(chan2dev(chan), "vchan %p: issued\n", | 
|  | 961 | &vchan->vc); | 
|  | 962 | } | 
|  | 963 |  | 
|  | 964 | spin_unlock(&sdev->lock); | 
|  | 965 | } else { | 
|  | 966 | dev_dbg(chan2dev(chan), "vchan %p: nothing to issue\n", | 
|  | 967 | &vchan->vc); | 
|  | 968 | } | 
|  | 969 |  | 
|  | 970 | spin_unlock_irqrestore(&vchan->vc.lock, flags); | 
|  | 971 | } | 
|  | 972 |  | 
|  | 973 | static void sun6i_dma_free_chan_resources(struct dma_chan *chan) | 
|  | 974 | { | 
|  | 975 | struct sun6i_dma_dev *sdev = to_sun6i_dma_dev(chan->device); | 
|  | 976 | struct sun6i_vchan *vchan = to_sun6i_vchan(chan); | 
|  | 977 | unsigned long flags; | 
|  | 978 |  | 
|  | 979 | spin_lock_irqsave(&sdev->lock, flags); | 
|  | 980 | list_del_init(&vchan->node); | 
|  | 981 | spin_unlock_irqrestore(&sdev->lock, flags); | 
|  | 982 |  | 
|  | 983 | vchan_free_chan_resources(&vchan->vc); | 
|  | 984 | } | 
|  | 985 |  | 
|  | 986 | static struct dma_chan *sun6i_dma_of_xlate(struct of_phandle_args *dma_spec, | 
|  | 987 | struct of_dma *ofdma) | 
|  | 988 | { | 
|  | 989 | struct sun6i_dma_dev *sdev = ofdma->of_dma_data; | 
|  | 990 | struct sun6i_vchan *vchan; | 
|  | 991 | struct dma_chan *chan; | 
|  | 992 | u8 port = dma_spec->args[0]; | 
|  | 993 |  | 
|  | 994 | if (port > sdev->max_request) | 
|  | 995 | return NULL; | 
|  | 996 |  | 
|  | 997 | chan = dma_get_any_slave_channel(&sdev->slave); | 
|  | 998 | if (!chan) | 
|  | 999 | return NULL; | 
|  | 1000 |  | 
|  | 1001 | vchan = to_sun6i_vchan(chan); | 
|  | 1002 | vchan->port = port; | 
|  | 1003 |  | 
|  | 1004 | return chan; | 
|  | 1005 | } | 
|  | 1006 |  | 
|  | 1007 | static inline void sun6i_kill_tasklet(struct sun6i_dma_dev *sdev) | 
|  | 1008 | { | 
|  | 1009 | /* Disable all interrupts from DMA */ | 
|  | 1010 | writel(0, sdev->base + DMA_IRQ_EN(0)); | 
|  | 1011 | writel(0, sdev->base + DMA_IRQ_EN(1)); | 
|  | 1012 |  | 
|  | 1013 | /* Prevent spurious interrupts from scheduling the tasklet */ | 
|  | 1014 | atomic_inc(&sdev->tasklet_shutdown); | 
|  | 1015 |  | 
|  | 1016 | /* Make sure we won't have any further interrupts */ | 
|  | 1017 | devm_free_irq(sdev->slave.dev, sdev->irq, sdev); | 
|  | 1018 |  | 
|  | 1019 | /* Actually prevent the tasklet from being scheduled */ | 
|  | 1020 | tasklet_kill(&sdev->task); | 
|  | 1021 | } | 
|  | 1022 |  | 
|  | 1023 | static inline void sun6i_dma_free(struct sun6i_dma_dev *sdev) | 
|  | 1024 | { | 
|  | 1025 | int i; | 
|  | 1026 |  | 
|  | 1027 | for (i = 0; i < sdev->num_vchans; i++) { | 
|  | 1028 | struct sun6i_vchan *vchan = &sdev->vchans[i]; | 
|  | 1029 |  | 
|  | 1030 | list_del(&vchan->vc.chan.device_node); | 
|  | 1031 | tasklet_kill(&vchan->vc.task); | 
|  | 1032 | } | 
|  | 1033 | } | 
|  | 1034 |  | 
|  | 1035 | /* | 
|  | 1036 | * For A31: | 
|  | 1037 | * | 
|  | 1038 | * There's 16 physical channels that can work in parallel. | 
|  | 1039 | * | 
|  | 1040 | * However we have 30 different endpoints for our requests. | 
|  | 1041 | * | 
|  | 1042 | * Since the channels are able to handle only an unidirectional | 
|  | 1043 | * transfer, we need to allocate more virtual channels so that | 
|  | 1044 | * everyone can grab one channel. | 
|  | 1045 | * | 
|  | 1046 | * Some devices can't work in both direction (mostly because it | 
|  | 1047 | * wouldn't make sense), so we have a bit fewer virtual channels than | 
|  | 1048 | * 2 channels per endpoints. | 
|  | 1049 | */ | 
|  | 1050 |  | 
|  | 1051 | static struct sun6i_dma_config sun6i_a31_dma_cfg = { | 
|  | 1052 | .nr_max_channels = 16, | 
|  | 1053 | .nr_max_requests = 30, | 
|  | 1054 | .nr_max_vchans   = 53, | 
|  | 1055 | .set_burst_length = sun6i_set_burst_length_a31, | 
|  | 1056 | .src_burst_lengths = BIT(1) | BIT(8), | 
|  | 1057 | .dst_burst_lengths = BIT(1) | BIT(8), | 
|  | 1058 | .src_addr_widths   = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | | 
|  | 1059 | BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | | 
|  | 1060 | BIT(DMA_SLAVE_BUSWIDTH_4_BYTES), | 
|  | 1061 | .dst_addr_widths   = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | | 
|  | 1062 | BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | | 
|  | 1063 | BIT(DMA_SLAVE_BUSWIDTH_4_BYTES), | 
|  | 1064 | }; | 
|  | 1065 |  | 
|  | 1066 | /* | 
|  | 1067 | * The A23 only has 8 physical channels, a maximum DRQ port id of 24, | 
|  | 1068 | * and a total of 37 usable source and destination endpoints. | 
|  | 1069 | */ | 
|  | 1070 |  | 
|  | 1071 | static struct sun6i_dma_config sun8i_a23_dma_cfg = { | 
|  | 1072 | .nr_max_channels = 8, | 
|  | 1073 | .nr_max_requests = 24, | 
|  | 1074 | .nr_max_vchans   = 37, | 
|  | 1075 | .clock_autogate_enable = sun6i_enable_clock_autogate_a23, | 
|  | 1076 | .set_burst_length = sun6i_set_burst_length_a31, | 
|  | 1077 | .src_burst_lengths = BIT(1) | BIT(8), | 
|  | 1078 | .dst_burst_lengths = BIT(1) | BIT(8), | 
|  | 1079 | .src_addr_widths   = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | | 
|  | 1080 | BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | | 
|  | 1081 | BIT(DMA_SLAVE_BUSWIDTH_4_BYTES), | 
|  | 1082 | .dst_addr_widths   = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | | 
|  | 1083 | BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | | 
|  | 1084 | BIT(DMA_SLAVE_BUSWIDTH_4_BYTES), | 
|  | 1085 | }; | 
|  | 1086 |  | 
|  | 1087 | static struct sun6i_dma_config sun8i_a83t_dma_cfg = { | 
|  | 1088 | .nr_max_channels = 8, | 
|  | 1089 | .nr_max_requests = 28, | 
|  | 1090 | .nr_max_vchans   = 39, | 
|  | 1091 | .clock_autogate_enable = sun6i_enable_clock_autogate_a23, | 
|  | 1092 | .set_burst_length = sun6i_set_burst_length_a31, | 
|  | 1093 | .src_burst_lengths = BIT(1) | BIT(8), | 
|  | 1094 | .dst_burst_lengths = BIT(1) | BIT(8), | 
|  | 1095 | .src_addr_widths   = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | | 
|  | 1096 | BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | | 
|  | 1097 | BIT(DMA_SLAVE_BUSWIDTH_4_BYTES), | 
|  | 1098 | .dst_addr_widths   = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | | 
|  | 1099 | BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | | 
|  | 1100 | BIT(DMA_SLAVE_BUSWIDTH_4_BYTES), | 
|  | 1101 | }; | 
|  | 1102 |  | 
|  | 1103 | /* | 
|  | 1104 | * The H3 has 12 physical channels, a maximum DRQ port id of 27, | 
|  | 1105 | * and a total of 34 usable source and destination endpoints. | 
|  | 1106 | * It also supports additional burst lengths and bus widths, | 
|  | 1107 | * and the burst length fields have different offsets. | 
|  | 1108 | */ | 
|  | 1109 |  | 
|  | 1110 | static struct sun6i_dma_config sun8i_h3_dma_cfg = { | 
|  | 1111 | .nr_max_channels = 12, | 
|  | 1112 | .nr_max_requests = 27, | 
|  | 1113 | .nr_max_vchans   = 34, | 
|  | 1114 | .clock_autogate_enable = sun6i_enable_clock_autogate_h3, | 
|  | 1115 | .set_burst_length = sun6i_set_burst_length_h3, | 
|  | 1116 | .src_burst_lengths = BIT(1) | BIT(4) | BIT(8) | BIT(16), | 
|  | 1117 | .dst_burst_lengths = BIT(1) | BIT(4) | BIT(8) | BIT(16), | 
|  | 1118 | .src_addr_widths   = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | | 
|  | 1119 | BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | | 
|  | 1120 | BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | | 
|  | 1121 | BIT(DMA_SLAVE_BUSWIDTH_8_BYTES), | 
|  | 1122 | .dst_addr_widths   = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | | 
|  | 1123 | BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | | 
|  | 1124 | BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | | 
|  | 1125 | BIT(DMA_SLAVE_BUSWIDTH_8_BYTES), | 
|  | 1126 | }; | 
|  | 1127 |  | 
|  | 1128 | /* | 
|  | 1129 | * The A64 binding uses the number of dma channels from the | 
|  | 1130 | * device tree node. | 
|  | 1131 | */ | 
|  | 1132 | static struct sun6i_dma_config sun50i_a64_dma_cfg = { | 
|  | 1133 | .clock_autogate_enable = sun6i_enable_clock_autogate_h3, | 
|  | 1134 | .set_burst_length = sun6i_set_burst_length_h3, | 
|  | 1135 | .src_burst_lengths = BIT(1) | BIT(4) | BIT(8) | BIT(16), | 
|  | 1136 | .dst_burst_lengths = BIT(1) | BIT(4) | BIT(8) | BIT(16), | 
|  | 1137 | .src_addr_widths   = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | | 
|  | 1138 | BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | | 
|  | 1139 | BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | | 
|  | 1140 | BIT(DMA_SLAVE_BUSWIDTH_8_BYTES), | 
|  | 1141 | .dst_addr_widths   = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | | 
|  | 1142 | BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | | 
|  | 1143 | BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | | 
|  | 1144 | BIT(DMA_SLAVE_BUSWIDTH_8_BYTES), | 
|  | 1145 | }; | 
|  | 1146 |  | 
|  | 1147 | /* | 
|  | 1148 | * The V3s have only 8 physical channels, a maximum DRQ port id of 23, | 
|  | 1149 | * and a total of 24 usable source and destination endpoints. | 
|  | 1150 | */ | 
|  | 1151 |  | 
|  | 1152 | static struct sun6i_dma_config sun8i_v3s_dma_cfg = { | 
|  | 1153 | .nr_max_channels = 8, | 
|  | 1154 | .nr_max_requests = 23, | 
|  | 1155 | .nr_max_vchans   = 24, | 
|  | 1156 | .clock_autogate_enable = sun6i_enable_clock_autogate_a23, | 
|  | 1157 | .set_burst_length = sun6i_set_burst_length_a31, | 
|  | 1158 | .src_burst_lengths = BIT(1) | BIT(8), | 
|  | 1159 | .dst_burst_lengths = BIT(1) | BIT(8), | 
|  | 1160 | .src_addr_widths   = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | | 
|  | 1161 | BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | | 
|  | 1162 | BIT(DMA_SLAVE_BUSWIDTH_4_BYTES), | 
|  | 1163 | .dst_addr_widths   = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | | 
|  | 1164 | BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | | 
|  | 1165 | BIT(DMA_SLAVE_BUSWIDTH_4_BYTES), | 
|  | 1166 | }; | 
|  | 1167 |  | 
|  | 1168 | static const struct of_device_id sun6i_dma_match[] = { | 
|  | 1169 | { .compatible = "allwinner,sun6i-a31-dma", .data = &sun6i_a31_dma_cfg }, | 
|  | 1170 | { .compatible = "allwinner,sun8i-a23-dma", .data = &sun8i_a23_dma_cfg }, | 
|  | 1171 | { .compatible = "allwinner,sun8i-a83t-dma", .data = &sun8i_a83t_dma_cfg }, | 
|  | 1172 | { .compatible = "allwinner,sun8i-h3-dma", .data = &sun8i_h3_dma_cfg }, | 
|  | 1173 | { .compatible = "allwinner,sun8i-v3s-dma", .data = &sun8i_v3s_dma_cfg }, | 
|  | 1174 | { .compatible = "allwinner,sun50i-a64-dma", .data = &sun50i_a64_dma_cfg }, | 
|  | 1175 | { /* sentinel */ } | 
|  | 1176 | }; | 
|  | 1177 | MODULE_DEVICE_TABLE(of, sun6i_dma_match); | 
|  | 1178 |  | 
|  | 1179 | static int sun6i_dma_probe(struct platform_device *pdev) | 
|  | 1180 | { | 
|  | 1181 | struct device_node *np = pdev->dev.of_node; | 
|  | 1182 | struct sun6i_dma_dev *sdc; | 
|  | 1183 | struct resource *res; | 
|  | 1184 | int ret, i; | 
|  | 1185 |  | 
|  | 1186 | sdc = devm_kzalloc(&pdev->dev, sizeof(*sdc), GFP_KERNEL); | 
|  | 1187 | if (!sdc) | 
|  | 1188 | return -ENOMEM; | 
|  | 1189 |  | 
|  | 1190 | sdc->cfg = of_device_get_match_data(&pdev->dev); | 
|  | 1191 | if (!sdc->cfg) | 
|  | 1192 | return -ENODEV; | 
|  | 1193 |  | 
|  | 1194 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 
|  | 1195 | sdc->base = devm_ioremap_resource(&pdev->dev, res); | 
|  | 1196 | if (IS_ERR(sdc->base)) | 
|  | 1197 | return PTR_ERR(sdc->base); | 
|  | 1198 |  | 
|  | 1199 | sdc->irq = platform_get_irq(pdev, 0); | 
|  | 1200 | if (sdc->irq < 0) { | 
|  | 1201 | dev_err(&pdev->dev, "Cannot claim IRQ\n"); | 
|  | 1202 | return sdc->irq; | 
|  | 1203 | } | 
|  | 1204 |  | 
|  | 1205 | sdc->clk = devm_clk_get(&pdev->dev, NULL); | 
|  | 1206 | if (IS_ERR(sdc->clk)) { | 
|  | 1207 | dev_err(&pdev->dev, "No clock specified\n"); | 
|  | 1208 | return PTR_ERR(sdc->clk); | 
|  | 1209 | } | 
|  | 1210 |  | 
|  | 1211 | sdc->rstc = devm_reset_control_get(&pdev->dev, NULL); | 
|  | 1212 | if (IS_ERR(sdc->rstc)) { | 
|  | 1213 | dev_err(&pdev->dev, "No reset controller specified\n"); | 
|  | 1214 | return PTR_ERR(sdc->rstc); | 
|  | 1215 | } | 
|  | 1216 |  | 
|  | 1217 | sdc->pool = dmam_pool_create(dev_name(&pdev->dev), &pdev->dev, | 
|  | 1218 | sizeof(struct sun6i_dma_lli), 4, 0); | 
|  | 1219 | if (!sdc->pool) { | 
|  | 1220 | dev_err(&pdev->dev, "No memory for descriptors dma pool\n"); | 
|  | 1221 | return -ENOMEM; | 
|  | 1222 | } | 
|  | 1223 |  | 
|  | 1224 | platform_set_drvdata(pdev, sdc); | 
|  | 1225 | INIT_LIST_HEAD(&sdc->pending); | 
|  | 1226 | spin_lock_init(&sdc->lock); | 
|  | 1227 |  | 
|  | 1228 | dma_cap_set(DMA_PRIVATE, sdc->slave.cap_mask); | 
|  | 1229 | dma_cap_set(DMA_MEMCPY, sdc->slave.cap_mask); | 
|  | 1230 | dma_cap_set(DMA_SLAVE, sdc->slave.cap_mask); | 
|  | 1231 | dma_cap_set(DMA_CYCLIC, sdc->slave.cap_mask); | 
|  | 1232 |  | 
|  | 1233 | INIT_LIST_HEAD(&sdc->slave.channels); | 
|  | 1234 | sdc->slave.device_free_chan_resources	= sun6i_dma_free_chan_resources; | 
|  | 1235 | sdc->slave.device_tx_status		= sun6i_dma_tx_status; | 
|  | 1236 | sdc->slave.device_issue_pending		= sun6i_dma_issue_pending; | 
|  | 1237 | sdc->slave.device_prep_slave_sg		= sun6i_dma_prep_slave_sg; | 
|  | 1238 | sdc->slave.device_prep_dma_memcpy	= sun6i_dma_prep_dma_memcpy; | 
|  | 1239 | sdc->slave.device_prep_dma_cyclic	= sun6i_dma_prep_dma_cyclic; | 
|  | 1240 | sdc->slave.copy_align			= DMAENGINE_ALIGN_4_BYTES; | 
|  | 1241 | sdc->slave.device_config		= sun6i_dma_config; | 
|  | 1242 | sdc->slave.device_pause			= sun6i_dma_pause; | 
|  | 1243 | sdc->slave.device_resume		= sun6i_dma_resume; | 
|  | 1244 | sdc->slave.device_terminate_all		= sun6i_dma_terminate_all; | 
|  | 1245 | sdc->slave.src_addr_widths		= sdc->cfg->src_addr_widths; | 
|  | 1246 | sdc->slave.dst_addr_widths		= sdc->cfg->dst_addr_widths; | 
|  | 1247 | sdc->slave.directions			= BIT(DMA_DEV_TO_MEM) | | 
|  | 1248 | BIT(DMA_MEM_TO_DEV); | 
|  | 1249 | sdc->slave.residue_granularity		= DMA_RESIDUE_GRANULARITY_BURST; | 
|  | 1250 | sdc->slave.dev = &pdev->dev; | 
|  | 1251 |  | 
|  | 1252 | sdc->num_pchans = sdc->cfg->nr_max_channels; | 
|  | 1253 | sdc->num_vchans = sdc->cfg->nr_max_vchans; | 
|  | 1254 | sdc->max_request = sdc->cfg->nr_max_requests; | 
|  | 1255 |  | 
|  | 1256 | ret = of_property_read_u32(np, "dma-channels", &sdc->num_pchans); | 
|  | 1257 | if (ret && !sdc->num_pchans) { | 
|  | 1258 | dev_err(&pdev->dev, "Can't get dma-channels.\n"); | 
|  | 1259 | return ret; | 
|  | 1260 | } | 
|  | 1261 |  | 
|  | 1262 | ret = of_property_read_u32(np, "dma-requests", &sdc->max_request); | 
|  | 1263 | if (ret && !sdc->max_request) { | 
|  | 1264 | dev_info(&pdev->dev, "Missing dma-requests, using %u.\n", | 
|  | 1265 | DMA_CHAN_MAX_DRQ); | 
|  | 1266 | sdc->max_request = DMA_CHAN_MAX_DRQ; | 
|  | 1267 | } | 
|  | 1268 |  | 
|  | 1269 | /* | 
|  | 1270 | * If the number of vchans is not specified, derive it from the | 
|  | 1271 | * highest port number, at most one channel per port and direction. | 
|  | 1272 | */ | 
|  | 1273 | if (!sdc->num_vchans) | 
|  | 1274 | sdc->num_vchans = 2 * (sdc->max_request + 1); | 
|  | 1275 |  | 
|  | 1276 | sdc->pchans = devm_kcalloc(&pdev->dev, sdc->num_pchans, | 
|  | 1277 | sizeof(struct sun6i_pchan), GFP_KERNEL); | 
|  | 1278 | if (!sdc->pchans) | 
|  | 1279 | return -ENOMEM; | 
|  | 1280 |  | 
|  | 1281 | sdc->vchans = devm_kcalloc(&pdev->dev, sdc->num_vchans, | 
|  | 1282 | sizeof(struct sun6i_vchan), GFP_KERNEL); | 
|  | 1283 | if (!sdc->vchans) | 
|  | 1284 | return -ENOMEM; | 
|  | 1285 |  | 
|  | 1286 | tasklet_init(&sdc->task, sun6i_dma_tasklet, (unsigned long)sdc); | 
|  | 1287 |  | 
|  | 1288 | for (i = 0; i < sdc->num_pchans; i++) { | 
|  | 1289 | struct sun6i_pchan *pchan = &sdc->pchans[i]; | 
|  | 1290 |  | 
|  | 1291 | pchan->idx = i; | 
|  | 1292 | pchan->base = sdc->base + 0x100 + i * 0x40; | 
|  | 1293 | } | 
|  | 1294 |  | 
|  | 1295 | for (i = 0; i < sdc->num_vchans; i++) { | 
|  | 1296 | struct sun6i_vchan *vchan = &sdc->vchans[i]; | 
|  | 1297 |  | 
|  | 1298 | INIT_LIST_HEAD(&vchan->node); | 
|  | 1299 | vchan->vc.desc_free = sun6i_dma_free_desc; | 
|  | 1300 | vchan_init(&vchan->vc, &sdc->slave); | 
|  | 1301 | } | 
|  | 1302 |  | 
|  | 1303 | ret = reset_control_deassert(sdc->rstc); | 
|  | 1304 | if (ret) { | 
|  | 1305 | dev_err(&pdev->dev, "Couldn't deassert the device from reset\n"); | 
|  | 1306 | goto err_chan_free; | 
|  | 1307 | } | 
|  | 1308 |  | 
|  | 1309 | ret = clk_prepare_enable(sdc->clk); | 
|  | 1310 | if (ret) { | 
|  | 1311 | dev_err(&pdev->dev, "Couldn't enable the clock\n"); | 
|  | 1312 | goto err_reset_assert; | 
|  | 1313 | } | 
|  | 1314 |  | 
|  | 1315 | ret = devm_request_irq(&pdev->dev, sdc->irq, sun6i_dma_interrupt, 0, | 
|  | 1316 | dev_name(&pdev->dev), sdc); | 
|  | 1317 | if (ret) { | 
|  | 1318 | dev_err(&pdev->dev, "Cannot request IRQ\n"); | 
|  | 1319 | goto err_clk_disable; | 
|  | 1320 | } | 
|  | 1321 |  | 
|  | 1322 | ret = dma_async_device_register(&sdc->slave); | 
|  | 1323 | if (ret) { | 
|  | 1324 | dev_warn(&pdev->dev, "Failed to register DMA engine device\n"); | 
|  | 1325 | goto err_irq_disable; | 
|  | 1326 | } | 
|  | 1327 |  | 
|  | 1328 | ret = of_dma_controller_register(pdev->dev.of_node, sun6i_dma_of_xlate, | 
|  | 1329 | sdc); | 
|  | 1330 | if (ret) { | 
|  | 1331 | dev_err(&pdev->dev, "of_dma_controller_register failed\n"); | 
|  | 1332 | goto err_dma_unregister; | 
|  | 1333 | } | 
|  | 1334 |  | 
|  | 1335 | if (sdc->cfg->clock_autogate_enable) | 
|  | 1336 | sdc->cfg->clock_autogate_enable(sdc); | 
|  | 1337 |  | 
|  | 1338 | return 0; | 
|  | 1339 |  | 
|  | 1340 | err_dma_unregister: | 
|  | 1341 | dma_async_device_unregister(&sdc->slave); | 
|  | 1342 | err_irq_disable: | 
|  | 1343 | sun6i_kill_tasklet(sdc); | 
|  | 1344 | err_clk_disable: | 
|  | 1345 | clk_disable_unprepare(sdc->clk); | 
|  | 1346 | err_reset_assert: | 
|  | 1347 | reset_control_assert(sdc->rstc); | 
|  | 1348 | err_chan_free: | 
|  | 1349 | sun6i_dma_free(sdc); | 
|  | 1350 | return ret; | 
|  | 1351 | } | 
|  | 1352 |  | 
|  | 1353 | static int sun6i_dma_remove(struct platform_device *pdev) | 
|  | 1354 | { | 
|  | 1355 | struct sun6i_dma_dev *sdc = platform_get_drvdata(pdev); | 
|  | 1356 |  | 
|  | 1357 | of_dma_controller_free(pdev->dev.of_node); | 
|  | 1358 | dma_async_device_unregister(&sdc->slave); | 
|  | 1359 |  | 
|  | 1360 | sun6i_kill_tasklet(sdc); | 
|  | 1361 |  | 
|  | 1362 | clk_disable_unprepare(sdc->clk); | 
|  | 1363 | reset_control_assert(sdc->rstc); | 
|  | 1364 |  | 
|  | 1365 | sun6i_dma_free(sdc); | 
|  | 1366 |  | 
|  | 1367 | return 0; | 
|  | 1368 | } | 
|  | 1369 |  | 
|  | 1370 | static struct platform_driver sun6i_dma_driver = { | 
|  | 1371 | .probe		= sun6i_dma_probe, | 
|  | 1372 | .remove		= sun6i_dma_remove, | 
|  | 1373 | .driver = { | 
|  | 1374 | .name		= "sun6i-dma", | 
|  | 1375 | .of_match_table	= sun6i_dma_match, | 
|  | 1376 | }, | 
|  | 1377 | }; | 
|  | 1378 | module_platform_driver(sun6i_dma_driver); | 
|  | 1379 |  | 
|  | 1380 | MODULE_DESCRIPTION("Allwinner A31 DMA Controller Driver"); | 
|  | 1381 | MODULE_AUTHOR("Sugar <shuge@allwinnertech.com>"); | 
|  | 1382 | MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com>"); | 
|  | 1383 | MODULE_LICENSE("GPL"); |