b.liu | e958203 | 2025-04-17 19:18:16 +0800 | [diff] [blame] | 1 | From 87db029b2576ccae40dcf8173e2fbb84fdbec1a5 Mon Sep 17 00:00:00 2001 |
| 2 | From: Peng Ma <peng.ma@nxp.com> |
| 3 | Date: Mon, 4 Mar 2019 15:45:56 +0800 |
| 4 | Subject: [PATCH] dmaengine: fsl-dpaa2-qdma: Add NXP dpaa2 qDMA controller |
| 5 | driver for Layerscape SoCs |
| 6 | |
| 7 | DPPA2(Data Path Acceleration Architecture 2) qDMA supports |
| 8 | virtualized channel by allowing DMA jobs to be enqueued into |
| 9 | different work queues. Core can initiate a DMA transaction by |
| 10 | preparing a frame descriptor(FD) for each DMA job and enqueuing |
| 11 | this job through a hardware portal. DPAA2 components can also |
| 12 | prepare a FD and enqueue a DMA job through a hardware portal. |
| 13 | The qDMA prefetches DMA jobs through DPAA2 hardware portal. It |
| 14 | then schedules and dispatches to internal DMA hardware engines, |
| 15 | which generate read and write requests. Both qDMA source data and |
| 16 | destination data can be either contiguous or non-contiguous using |
| 17 | one or more scatter/gather tables. |
| 18 | The qDMA supports global bandwidth flow control where all DMA |
| 19 | transactions are stalled if the bandwidth threshold has been reached. |
| 20 | Also supported are transaction based read throttling. |
| 21 | |
| 22 | Add NXP dppa2 qDMA to support some of Layerscape SoCs. |
| 23 | such as: LS1088A, LS208xA, LX2, etc. |
| 24 | |
| 25 | Signed-off-by: Peng Ma <peng.ma@nxp.com> |
| 26 | --- |
| 27 | drivers/dma/Kconfig | 2 + |
| 28 | drivers/dma/Makefile | 1 + |
| 29 | drivers/dma/fsl-dpaa2-qdma/Kconfig | 9 + |
| 30 | drivers/dma/fsl-dpaa2-qdma/Makefile | 3 + |
| 31 | drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c | 825 ++++++++++++++++++++++++++++++++ |
| 32 | drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.h | 153 ++++++ |
| 33 | 6 files changed, 993 insertions(+) |
| 34 | create mode 100644 drivers/dma/fsl-dpaa2-qdma/Kconfig |
| 35 | create mode 100644 drivers/dma/fsl-dpaa2-qdma/Makefile |
| 36 | create mode 100644 drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c |
| 37 | create mode 100644 drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.h |
| 38 | |
| 39 | --- a/drivers/dma/Kconfig |
| 40 | +++ b/drivers/dma/Kconfig |
| 41 | @@ -670,6 +670,8 @@ source "drivers/dma/sh/Kconfig" |
| 42 | |
| 43 | source "drivers/dma/ti/Kconfig" |
| 44 | |
| 45 | +source "drivers/dma/fsl-dpaa2-qdma/Kconfig" |
| 46 | + |
| 47 | # clients |
| 48 | comment "DMA Clients" |
| 49 | depends on DMA_ENGINE |
| 50 | --- a/drivers/dma/Makefile |
| 51 | +++ b/drivers/dma/Makefile |
| 52 | @@ -75,6 +75,7 @@ obj-$(CONFIG_UNIPHIER_MDMAC) += uniphier |
| 53 | obj-$(CONFIG_XGENE_DMA) += xgene-dma.o |
| 54 | obj-$(CONFIG_ZX_DMA) += zx_dma.o |
| 55 | obj-$(CONFIG_ST_FDMA) += st_fdma.o |
| 56 | +obj-$(CONFIG_FSL_DPAA2_QDMA) += fsl-dpaa2-qdma/ |
| 57 | |
| 58 | obj-y += mediatek/ |
| 59 | obj-y += qcom/ |
| 60 | --- /dev/null |
| 61 | +++ b/drivers/dma/fsl-dpaa2-qdma/Kconfig |
| 62 | @@ -0,0 +1,9 @@ |
| 63 | +menuconfig FSL_DPAA2_QDMA |
| 64 | + tristate "NXP DPAA2 QDMA" |
| 65 | + depends on ARM64 |
| 66 | + depends on FSL_MC_BUS && FSL_MC_DPIO |
| 67 | + select DMA_ENGINE |
| 68 | + select DMA_VIRTUAL_CHANNELS |
| 69 | + help |
| 70 | + NXP Data Path Acceleration Architecture 2 QDMA driver, |
| 71 | + using the NXP MC bus driver. |
| 72 | --- /dev/null |
| 73 | +++ b/drivers/dma/fsl-dpaa2-qdma/Makefile |
| 74 | @@ -0,0 +1,3 @@ |
| 75 | +# SPDX-License-Identifier: GPL-2.0 |
| 76 | +# Makefile for the NXP DPAA2 qDMA controllers |
| 77 | +obj-$(CONFIG_FSL_DPAA2_QDMA) += dpaa2-qdma.o dpdmai.o |
| 78 | --- /dev/null |
| 79 | +++ b/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c |
| 80 | @@ -0,0 +1,825 @@ |
| 81 | +// SPDX-License-Identifier: GPL-2.0 |
| 82 | +// Copyright 2019 NXP |
| 83 | + |
| 84 | +#include <linux/init.h> |
| 85 | +#include <linux/module.h> |
| 86 | +#include <linux/dmapool.h> |
| 87 | +#include <linux/of_irq.h> |
| 88 | +#include <linux/iommu.h> |
| 89 | +#include <linux/sys_soc.h> |
| 90 | +#include <linux/fsl/mc.h> |
| 91 | +#include <soc/fsl/dpaa2-io.h> |
| 92 | + |
| 93 | +#include "../virt-dma.h" |
| 94 | +#include "dpdmai.h" |
| 95 | +#include "dpaa2-qdma.h" |
| 96 | + |
| 97 | +static bool smmu_disable = true; |
| 98 | + |
| 99 | +static struct dpaa2_qdma_chan *to_dpaa2_qdma_chan(struct dma_chan *chan) |
| 100 | +{ |
| 101 | + return container_of(chan, struct dpaa2_qdma_chan, vchan.chan); |
| 102 | +} |
| 103 | + |
| 104 | +static struct dpaa2_qdma_comp *to_fsl_qdma_comp(struct virt_dma_desc *vd) |
| 105 | +{ |
| 106 | + return container_of(vd, struct dpaa2_qdma_comp, vdesc); |
| 107 | +} |
| 108 | + |
| 109 | +static int dpaa2_qdma_alloc_chan_resources(struct dma_chan *chan) |
| 110 | +{ |
| 111 | + struct dpaa2_qdma_chan *dpaa2_chan = to_dpaa2_qdma_chan(chan); |
| 112 | + struct dpaa2_qdma_engine *dpaa2_qdma = dpaa2_chan->qdma; |
| 113 | + struct device *dev = &dpaa2_qdma->priv->dpdmai_dev->dev; |
| 114 | + |
| 115 | + dpaa2_chan->fd_pool = dma_pool_create("fd_pool", dev, |
| 116 | + sizeof(struct dpaa2_fd), |
| 117 | + sizeof(struct dpaa2_fd), 0); |
| 118 | + if (!dpaa2_chan->fd_pool) |
| 119 | + goto err; |
| 120 | + |
| 121 | + dpaa2_chan->fl_pool = dma_pool_create("fl_pool", dev, |
| 122 | + sizeof(struct dpaa2_fl_entry), |
| 123 | + sizeof(struct dpaa2_fl_entry), 0); |
| 124 | + if (!dpaa2_chan->fl_pool) |
| 125 | + goto err_fd; |
| 126 | + |
| 127 | + dpaa2_chan->sdd_pool = |
| 128 | + dma_pool_create("sdd_pool", dev, |
| 129 | + sizeof(struct dpaa2_qdma_sd_d), |
| 130 | + sizeof(struct dpaa2_qdma_sd_d), 0); |
| 131 | + if (!dpaa2_chan->sdd_pool) |
| 132 | + goto err_fl; |
| 133 | + |
| 134 | + return dpaa2_qdma->desc_allocated++; |
| 135 | +err_fl: |
| 136 | + dma_pool_destroy(dpaa2_chan->fl_pool); |
| 137 | +err_fd: |
| 138 | + dma_pool_destroy(dpaa2_chan->fd_pool); |
| 139 | +err: |
| 140 | + return -ENOMEM; |
| 141 | +} |
| 142 | + |
| 143 | +static void dpaa2_qdma_free_chan_resources(struct dma_chan *chan) |
| 144 | +{ |
| 145 | + struct dpaa2_qdma_chan *dpaa2_chan = to_dpaa2_qdma_chan(chan); |
| 146 | + struct dpaa2_qdma_engine *dpaa2_qdma = dpaa2_chan->qdma; |
| 147 | + unsigned long flags; |
| 148 | + |
| 149 | + LIST_HEAD(head); |
| 150 | + |
| 151 | + spin_lock_irqsave(&dpaa2_chan->vchan.lock, flags); |
| 152 | + vchan_get_all_descriptors(&dpaa2_chan->vchan, &head); |
| 153 | + spin_unlock_irqrestore(&dpaa2_chan->vchan.lock, flags); |
| 154 | + |
| 155 | + vchan_dma_desc_free_list(&dpaa2_chan->vchan, &head); |
| 156 | + |
| 157 | + dpaa2_dpdmai_free_comp(dpaa2_chan, &dpaa2_chan->comp_used); |
| 158 | + dpaa2_dpdmai_free_comp(dpaa2_chan, &dpaa2_chan->comp_free); |
| 159 | + |
| 160 | + dma_pool_destroy(dpaa2_chan->fd_pool); |
| 161 | + dma_pool_destroy(dpaa2_chan->fl_pool); |
| 162 | + dma_pool_destroy(dpaa2_chan->sdd_pool); |
| 163 | + dpaa2_qdma->desc_allocated--; |
| 164 | +} |
| 165 | + |
| 166 | +/* |
| 167 | + * Request a command descriptor for enqueue. |
| 168 | + */ |
| 169 | +static struct dpaa2_qdma_comp * |
| 170 | +dpaa2_qdma_request_desc(struct dpaa2_qdma_chan *dpaa2_chan) |
| 171 | +{ |
| 172 | + struct dpaa2_qdma_priv *qdma_priv = dpaa2_chan->qdma->priv; |
| 173 | + struct device *dev = &qdma_priv->dpdmai_dev->dev; |
| 174 | + struct dpaa2_qdma_comp *comp_temp = NULL; |
| 175 | + unsigned long flags; |
| 176 | + |
| 177 | + spin_lock_irqsave(&dpaa2_chan->queue_lock, flags); |
| 178 | + if (list_empty(&dpaa2_chan->comp_free)) { |
| 179 | + spin_unlock_irqrestore(&dpaa2_chan->queue_lock, flags); |
| 180 | + comp_temp = kzalloc(sizeof(*comp_temp), GFP_NOWAIT); |
| 181 | + if (!comp_temp) |
| 182 | + goto err; |
| 183 | + comp_temp->fd_virt_addr = |
| 184 | + dma_pool_alloc(dpaa2_chan->fd_pool, GFP_NOWAIT, |
| 185 | + &comp_temp->fd_bus_addr); |
| 186 | + if (!comp_temp->fd_virt_addr) |
| 187 | + goto err_comp; |
| 188 | + |
| 189 | + comp_temp->fl_virt_addr = |
| 190 | + dma_pool_alloc(dpaa2_chan->fl_pool, GFP_NOWAIT, |
| 191 | + &comp_temp->fl_bus_addr); |
| 192 | + if (!comp_temp->fl_virt_addr) |
| 193 | + goto err_fd_virt; |
| 194 | + |
| 195 | + comp_temp->desc_virt_addr = |
| 196 | + dma_pool_alloc(dpaa2_chan->sdd_pool, GFP_NOWAIT, |
| 197 | + &comp_temp->desc_bus_addr); |
| 198 | + if (!comp_temp->desc_virt_addr) |
| 199 | + goto err_fl_virt; |
| 200 | + |
| 201 | + comp_temp->qchan = dpaa2_chan; |
| 202 | + return comp_temp; |
| 203 | + } |
| 204 | + |
| 205 | + comp_temp = list_first_entry(&dpaa2_chan->comp_free, |
| 206 | + struct dpaa2_qdma_comp, list); |
| 207 | + list_del(&comp_temp->list); |
| 208 | + spin_unlock_irqrestore(&dpaa2_chan->queue_lock, flags); |
| 209 | + |
| 210 | + comp_temp->qchan = dpaa2_chan; |
| 211 | + |
| 212 | + return comp_temp; |
| 213 | + |
| 214 | +err_fl_virt: |
| 215 | + dma_pool_free(dpaa2_chan->fl_pool, |
| 216 | + comp_temp->fl_virt_addr, |
| 217 | + comp_temp->fl_bus_addr); |
| 218 | +err_fd_virt: |
| 219 | + dma_pool_free(dpaa2_chan->fd_pool, |
| 220 | + comp_temp->fd_virt_addr, |
| 221 | + comp_temp->fd_bus_addr); |
| 222 | +err_comp: |
| 223 | + kfree(comp_temp); |
| 224 | +err: |
| 225 | + dev_err(dev, "Failed to request descriptor\n"); |
| 226 | + return NULL; |
| 227 | +} |
| 228 | + |
| 229 | +static void |
| 230 | +dpaa2_qdma_populate_fd(u32 format, struct dpaa2_qdma_comp *dpaa2_comp) |
| 231 | +{ |
| 232 | + struct dpaa2_fd *fd; |
| 233 | + |
| 234 | + fd = dpaa2_comp->fd_virt_addr; |
| 235 | + memset(fd, 0, sizeof(struct dpaa2_fd)); |
| 236 | + |
| 237 | + /* fd populated */ |
| 238 | + dpaa2_fd_set_addr(fd, dpaa2_comp->fl_bus_addr); |
| 239 | + |
| 240 | + /* |
| 241 | + * Bypass memory translation, Frame list format, short length disable |
| 242 | + * we need to disable BMT if fsl-mc use iova addr |
| 243 | + */ |
| 244 | + if (smmu_disable) |
| 245 | + dpaa2_fd_set_bpid(fd, QMAN_FD_BMT_ENABLE); |
| 246 | + dpaa2_fd_set_format(fd, QMAN_FD_FMT_ENABLE | QMAN_FD_SL_DISABLE); |
| 247 | + |
| 248 | + dpaa2_fd_set_frc(fd, format | QDMA_SER_CTX); |
| 249 | +} |
| 250 | + |
| 251 | +/* first frame list for descriptor buffer */ |
| 252 | +static void |
| 253 | +dpaa2_qdma_populate_first_framel(struct dpaa2_fl_entry *f_list, |
| 254 | + struct dpaa2_qdma_comp *dpaa2_comp, |
| 255 | + bool wrt_changed) |
| 256 | +{ |
| 257 | + struct dpaa2_qdma_sd_d *sdd; |
| 258 | + |
| 259 | + sdd = dpaa2_comp->desc_virt_addr; |
| 260 | + memset(sdd, 0, 2 * (sizeof(*sdd))); |
| 261 | + |
| 262 | + /* source descriptor CMD */ |
| 263 | + sdd->cmd = cpu_to_le32(QDMA_SD_CMD_RDTTYPE_COHERENT); |
| 264 | + sdd++; |
| 265 | + |
| 266 | + /* dest descriptor CMD */ |
| 267 | + if (wrt_changed) |
| 268 | + sdd->cmd = cpu_to_le32(LX2160_QDMA_DD_CMD_WRTTYPE_COHERENT); |
| 269 | + else |
| 270 | + sdd->cmd = cpu_to_le32(QDMA_DD_CMD_WRTTYPE_COHERENT); |
| 271 | + |
| 272 | + memset(f_list, 0, sizeof(struct dpaa2_fl_entry)); |
| 273 | + |
| 274 | + /* first frame list to source descriptor */ |
| 275 | + dpaa2_fl_set_addr(f_list, dpaa2_comp->desc_bus_addr); |
| 276 | + dpaa2_fl_set_len(f_list, 0x20); |
| 277 | + dpaa2_fl_set_format(f_list, QDMA_FL_FMT_SBF | QDMA_FL_SL_LONG); |
| 278 | + |
| 279 | + /* bypass memory translation */ |
| 280 | + if (smmu_disable) |
| 281 | + f_list->bpid = cpu_to_le16(QDMA_FL_BMT_ENABLE); |
| 282 | +} |
| 283 | + |
| 284 | +/* source and destination frame list */ |
| 285 | +static void |
| 286 | +dpaa2_qdma_populate_frames(struct dpaa2_fl_entry *f_list, |
| 287 | + dma_addr_t dst, dma_addr_t src, |
| 288 | + size_t len, uint8_t fmt) |
| 289 | +{ |
| 290 | + /* source frame list to source buffer */ |
| 291 | + memset(f_list, 0, sizeof(struct dpaa2_fl_entry)); |
| 292 | + |
| 293 | + dpaa2_fl_set_addr(f_list, src); |
| 294 | + dpaa2_fl_set_len(f_list, len); |
| 295 | + |
| 296 | + /* single buffer frame or scatter gather frame */ |
| 297 | + dpaa2_fl_set_format(f_list, (fmt | QDMA_FL_SL_LONG)); |
| 298 | + |
| 299 | + /* bypass memory translation */ |
| 300 | + if (smmu_disable) |
| 301 | + f_list->bpid = cpu_to_le16(QDMA_FL_BMT_ENABLE); |
| 302 | + |
| 303 | + f_list++; |
| 304 | + |
| 305 | + /* destination frame list to destination buffer */ |
| 306 | + memset(f_list, 0, sizeof(struct dpaa2_fl_entry)); |
| 307 | + |
| 308 | + dpaa2_fl_set_addr(f_list, dst); |
| 309 | + dpaa2_fl_set_len(f_list, len); |
| 310 | + dpaa2_fl_set_format(f_list, (fmt | QDMA_FL_SL_LONG)); |
| 311 | + /* single buffer frame or scatter gather frame */ |
| 312 | + dpaa2_fl_set_final(f_list, QDMA_FL_F); |
| 313 | + /* bypass memory translation */ |
| 314 | + if (smmu_disable) |
| 315 | + f_list->bpid = cpu_to_le16(QDMA_FL_BMT_ENABLE); |
| 316 | +} |
| 317 | + |
| 318 | +static struct dma_async_tx_descriptor |
| 319 | +*dpaa2_qdma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst, |
| 320 | + dma_addr_t src, size_t len, ulong flags) |
| 321 | +{ |
| 322 | + struct dpaa2_qdma_chan *dpaa2_chan = to_dpaa2_qdma_chan(chan); |
| 323 | + struct dpaa2_qdma_engine *dpaa2_qdma; |
| 324 | + struct dpaa2_qdma_comp *dpaa2_comp; |
| 325 | + struct dpaa2_fl_entry *f_list; |
| 326 | + bool wrt_changed; |
| 327 | + |
| 328 | + dpaa2_qdma = dpaa2_chan->qdma; |
| 329 | + dpaa2_comp = dpaa2_qdma_request_desc(dpaa2_chan); |
| 330 | + if (!dpaa2_comp) |
| 331 | + return NULL; |
| 332 | + |
| 333 | + wrt_changed = (bool)dpaa2_qdma->qdma_wrtype_fixup; |
| 334 | + |
| 335 | + /* populate Frame descriptor */ |
| 336 | + dpaa2_qdma_populate_fd(QDMA_FD_LONG_FORMAT, dpaa2_comp); |
| 337 | + |
| 338 | + f_list = dpaa2_comp->fl_virt_addr; |
| 339 | + |
| 340 | + /* first frame list for descriptor buffer (logn format) */ |
| 341 | + dpaa2_qdma_populate_first_framel(f_list, dpaa2_comp, wrt_changed); |
| 342 | + |
| 343 | + f_list++; |
| 344 | + |
| 345 | + dpaa2_qdma_populate_frames(f_list, dst, src, len, QDMA_FL_FMT_SBF); |
| 346 | + |
| 347 | + return vchan_tx_prep(&dpaa2_chan->vchan, &dpaa2_comp->vdesc, flags); |
| 348 | +} |
| 349 | + |
| 350 | +static void dpaa2_qdma_issue_pending(struct dma_chan *chan) |
| 351 | +{ |
| 352 | + struct dpaa2_qdma_chan *dpaa2_chan = to_dpaa2_qdma_chan(chan); |
| 353 | + struct dpaa2_qdma_comp *dpaa2_comp; |
| 354 | + struct virt_dma_desc *vdesc; |
| 355 | + struct dpaa2_fd *fd; |
| 356 | + unsigned long flags; |
| 357 | + int err; |
| 358 | + |
| 359 | + spin_lock_irqsave(&dpaa2_chan->queue_lock, flags); |
| 360 | + spin_lock(&dpaa2_chan->vchan.lock); |
| 361 | + if (vchan_issue_pending(&dpaa2_chan->vchan)) { |
| 362 | + vdesc = vchan_next_desc(&dpaa2_chan->vchan); |
| 363 | + if (!vdesc) |
| 364 | + goto err_enqueue; |
| 365 | + dpaa2_comp = to_fsl_qdma_comp(vdesc); |
| 366 | + |
| 367 | + fd = dpaa2_comp->fd_virt_addr; |
| 368 | + |
| 369 | + list_del(&vdesc->node); |
| 370 | + list_add_tail(&dpaa2_comp->list, &dpaa2_chan->comp_used); |
| 371 | + |
| 372 | + err = dpaa2_io_service_enqueue_fq(NULL, dpaa2_chan->fqid, fd); |
| 373 | + if (err) { |
| 374 | + list_del(&dpaa2_comp->list); |
| 375 | + list_add_tail(&dpaa2_comp->list, |
| 376 | + &dpaa2_chan->comp_free); |
| 377 | + } |
| 378 | + } |
| 379 | +err_enqueue: |
| 380 | + spin_unlock(&dpaa2_chan->vchan.lock); |
| 381 | + spin_unlock_irqrestore(&dpaa2_chan->queue_lock, flags); |
| 382 | +} |
| 383 | + |
| 384 | +static int __cold dpaa2_qdma_setup(struct fsl_mc_device *ls_dev) |
| 385 | +{ |
| 386 | + struct dpaa2_qdma_priv_per_prio *ppriv; |
| 387 | + struct device *dev = &ls_dev->dev; |
| 388 | + struct dpaa2_qdma_priv *priv; |
| 389 | + u8 prio_def = DPDMAI_PRIO_NUM; |
| 390 | + int err = -EINVAL; |
| 391 | + int i; |
| 392 | + |
| 393 | + priv = dev_get_drvdata(dev); |
| 394 | + |
| 395 | + priv->dev = dev; |
| 396 | + priv->dpqdma_id = ls_dev->obj_desc.id; |
| 397 | + |
| 398 | + /* Get the handle for the DPDMAI this interface is associate with */ |
| 399 | + err = dpdmai_open(priv->mc_io, 0, priv->dpqdma_id, &ls_dev->mc_handle); |
| 400 | + if (err) { |
| 401 | + dev_err(dev, "dpdmai_open() failed\n"); |
| 402 | + return err; |
| 403 | + } |
| 404 | + |
| 405 | + dev_dbg(dev, "Opened dpdmai object successfully\n"); |
| 406 | + |
| 407 | + err = dpdmai_get_attributes(priv->mc_io, 0, ls_dev->mc_handle, |
| 408 | + &priv->dpdmai_attr); |
| 409 | + if (err) { |
| 410 | + dev_err(dev, "dpdmai_get_attributes() failed\n"); |
| 411 | + goto exit; |
| 412 | + } |
| 413 | + |
| 414 | + if (priv->dpdmai_attr.version.major > DPDMAI_VER_MAJOR) { |
| 415 | + dev_err(dev, "DPDMAI major version mismatch\n" |
| 416 | + "Found %u.%u, supported version is %u.%u\n", |
| 417 | + priv->dpdmai_attr.version.major, |
| 418 | + priv->dpdmai_attr.version.minor, |
| 419 | + DPDMAI_VER_MAJOR, DPDMAI_VER_MINOR); |
| 420 | + goto exit; |
| 421 | + } |
| 422 | + |
| 423 | + if (priv->dpdmai_attr.version.minor > DPDMAI_VER_MINOR) { |
| 424 | + dev_err(dev, "DPDMAI minor version mismatch\n" |
| 425 | + "Found %u.%u, supported version is %u.%u\n", |
| 426 | + priv->dpdmai_attr.version.major, |
| 427 | + priv->dpdmai_attr.version.minor, |
| 428 | + DPDMAI_VER_MAJOR, DPDMAI_VER_MINOR); |
| 429 | + goto exit; |
| 430 | + } |
| 431 | + |
| 432 | + priv->num_pairs = min(priv->dpdmai_attr.num_of_priorities, prio_def); |
| 433 | + ppriv = kcalloc(priv->num_pairs, sizeof(*ppriv), GFP_KERNEL); |
| 434 | + if (!ppriv) { |
| 435 | + err = -ENOMEM; |
| 436 | + goto exit; |
| 437 | + } |
| 438 | + priv->ppriv = ppriv; |
| 439 | + |
| 440 | + for (i = 0; i < priv->num_pairs; i++) { |
| 441 | + err = dpdmai_get_rx_queue(priv->mc_io, 0, ls_dev->mc_handle, |
| 442 | + i, &priv->rx_queue_attr[i]); |
| 443 | + if (err) { |
| 444 | + dev_err(dev, "dpdmai_get_rx_queue() failed\n"); |
| 445 | + goto exit; |
| 446 | + } |
| 447 | + ppriv->rsp_fqid = priv->rx_queue_attr[i].fqid; |
| 448 | + |
| 449 | + err = dpdmai_get_tx_queue(priv->mc_io, 0, ls_dev->mc_handle, |
| 450 | + i, &priv->tx_fqid[i]); |
| 451 | + if (err) { |
| 452 | + dev_err(dev, "dpdmai_get_tx_queue() failed\n"); |
| 453 | + goto exit; |
| 454 | + } |
| 455 | + ppriv->req_fqid = priv->tx_fqid[i]; |
| 456 | + ppriv->prio = i; |
| 457 | + ppriv->priv = priv; |
| 458 | + ppriv++; |
| 459 | + } |
| 460 | + |
| 461 | + return 0; |
| 462 | +exit: |
| 463 | + dpdmai_close(priv->mc_io, 0, ls_dev->mc_handle); |
| 464 | + return err; |
| 465 | +} |
| 466 | + |
| 467 | +static void dpaa2_qdma_fqdan_cb(struct dpaa2_io_notification_ctx *ctx) |
| 468 | +{ |
| 469 | + struct dpaa2_qdma_priv_per_prio *ppriv = container_of(ctx, |
| 470 | + struct dpaa2_qdma_priv_per_prio, nctx); |
| 471 | + struct dpaa2_qdma_comp *dpaa2_comp, *_comp_tmp; |
| 472 | + struct dpaa2_qdma_priv *priv = ppriv->priv; |
| 473 | + u32 n_chans = priv->dpaa2_qdma->n_chans; |
| 474 | + struct dpaa2_qdma_chan *qchan; |
| 475 | + const struct dpaa2_fd *fd_eq; |
| 476 | + const struct dpaa2_fd *fd; |
| 477 | + struct dpaa2_dq *dq; |
| 478 | + int is_last = 0; |
| 479 | + int found; |
| 480 | + u8 status; |
| 481 | + int err; |
| 482 | + int i; |
| 483 | + |
| 484 | + do { |
| 485 | + err = dpaa2_io_service_pull_fq(NULL, ppriv->rsp_fqid, |
| 486 | + ppriv->store); |
| 487 | + } while (err); |
| 488 | + |
| 489 | + while (!is_last) { |
| 490 | + do { |
| 491 | + dq = dpaa2_io_store_next(ppriv->store, &is_last); |
| 492 | + } while (!is_last && !dq); |
| 493 | + if (!dq) { |
| 494 | + dev_err(priv->dev, "FQID returned no valid frames!\n"); |
| 495 | + continue; |
| 496 | + } |
| 497 | + |
| 498 | + /* obtain FD and process the error */ |
| 499 | + fd = dpaa2_dq_fd(dq); |
| 500 | + |
| 501 | + status = dpaa2_fd_get_ctrl(fd) & 0xff; |
| 502 | + if (status) |
| 503 | + dev_err(priv->dev, "FD error occurred\n"); |
| 504 | + found = 0; |
| 505 | + for (i = 0; i < n_chans; i++) { |
| 506 | + qchan = &priv->dpaa2_qdma->chans[i]; |
| 507 | + spin_lock(&qchan->queue_lock); |
| 508 | + if (list_empty(&qchan->comp_used)) { |
| 509 | + spin_unlock(&qchan->queue_lock); |
| 510 | + continue; |
| 511 | + } |
| 512 | + list_for_each_entry_safe(dpaa2_comp, _comp_tmp, |
| 513 | + &qchan->comp_used, list) { |
| 514 | + fd_eq = dpaa2_comp->fd_virt_addr; |
| 515 | + |
| 516 | + if (le64_to_cpu(fd_eq->simple.addr) == |
| 517 | + le64_to_cpu(fd->simple.addr)) { |
| 518 | + spin_lock(&qchan->vchan.lock); |
| 519 | + vchan_cookie_complete(& |
| 520 | + dpaa2_comp->vdesc); |
| 521 | + spin_unlock(&qchan->vchan.lock); |
| 522 | + found = 1; |
| 523 | + break; |
| 524 | + } |
| 525 | + } |
| 526 | + spin_unlock(&qchan->queue_lock); |
| 527 | + if (found) |
| 528 | + break; |
| 529 | + } |
| 530 | + } |
| 531 | + |
| 532 | + dpaa2_io_service_rearm(NULL, ctx); |
| 533 | +} |
| 534 | + |
| 535 | +static int __cold dpaa2_qdma_dpio_setup(struct dpaa2_qdma_priv *priv) |
| 536 | +{ |
| 537 | + struct dpaa2_qdma_priv_per_prio *ppriv; |
| 538 | + struct device *dev = priv->dev; |
| 539 | + int err = -EINVAL; |
| 540 | + int i, num; |
| 541 | + |
| 542 | + num = priv->num_pairs; |
| 543 | + ppriv = priv->ppriv; |
| 544 | + for (i = 0; i < num; i++) { |
| 545 | + ppriv->nctx.is_cdan = 0; |
| 546 | + ppriv->nctx.desired_cpu = DPAA2_IO_ANY_CPU; |
| 547 | + ppriv->nctx.id = ppriv->rsp_fqid; |
| 548 | + ppriv->nctx.cb = dpaa2_qdma_fqdan_cb; |
| 549 | + err = dpaa2_io_service_register(NULL, &ppriv->nctx, dev); |
| 550 | + if (err) { |
| 551 | + dev_err(dev, "Notification register failed\n"); |
| 552 | + goto err_service; |
| 553 | + } |
| 554 | + |
| 555 | + ppriv->store = |
| 556 | + dpaa2_io_store_create(DPAA2_QDMA_STORE_SIZE, dev); |
| 557 | + if (!ppriv->store) { |
| 558 | + dev_err(dev, "dpaa2_io_store_create() failed\n"); |
| 559 | + goto err_store; |
| 560 | + } |
| 561 | + |
| 562 | + ppriv++; |
| 563 | + } |
| 564 | + return 0; |
| 565 | + |
| 566 | +err_store: |
| 567 | + dpaa2_io_service_deregister(NULL, &ppriv->nctx, dev); |
| 568 | +err_service: |
| 569 | + ppriv--; |
| 570 | + while (ppriv >= priv->ppriv) { |
| 571 | + dpaa2_io_service_deregister(NULL, &ppriv->nctx, dev); |
| 572 | + dpaa2_io_store_destroy(ppriv->store); |
| 573 | + ppriv--; |
| 574 | + } |
| 575 | + return err; |
| 576 | +} |
| 577 | + |
| 578 | +static void dpaa2_dpmai_store_free(struct dpaa2_qdma_priv *priv) |
| 579 | +{ |
| 580 | + struct dpaa2_qdma_priv_per_prio *ppriv = priv->ppriv; |
| 581 | + int i; |
| 582 | + |
| 583 | + for (i = 0; i < priv->num_pairs; i++) { |
| 584 | + dpaa2_io_store_destroy(ppriv->store); |
| 585 | + ppriv++; |
| 586 | + } |
| 587 | +} |
| 588 | + |
| 589 | +static void dpaa2_dpdmai_dpio_free(struct dpaa2_qdma_priv *priv) |
| 590 | +{ |
| 591 | + struct dpaa2_qdma_priv_per_prio *ppriv = priv->ppriv; |
| 592 | + struct device *dev = priv->dev; |
| 593 | + int i; |
| 594 | + |
| 595 | + for (i = 0; i < priv->num_pairs; i++) { |
| 596 | + dpaa2_io_service_deregister(NULL, &ppriv->nctx, dev); |
| 597 | + ppriv++; |
| 598 | + } |
| 599 | +} |
| 600 | + |
| 601 | +static int __cold dpaa2_dpdmai_bind(struct dpaa2_qdma_priv *priv) |
| 602 | +{ |
| 603 | + struct dpdmai_rx_queue_cfg rx_queue_cfg; |
| 604 | + struct dpaa2_qdma_priv_per_prio *ppriv; |
| 605 | + struct device *dev = priv->dev; |
| 606 | + struct fsl_mc_device *ls_dev; |
| 607 | + int i, num; |
| 608 | + int err; |
| 609 | + |
| 610 | + ls_dev = to_fsl_mc_device(dev); |
| 611 | + num = priv->num_pairs; |
| 612 | + ppriv = priv->ppriv; |
| 613 | + for (i = 0; i < num; i++) { |
| 614 | + rx_queue_cfg.options = DPDMAI_QUEUE_OPT_USER_CTX | |
| 615 | + DPDMAI_QUEUE_OPT_DEST; |
| 616 | + rx_queue_cfg.user_ctx = ppriv->nctx.qman64; |
| 617 | + rx_queue_cfg.dest_cfg.dest_type = DPDMAI_DEST_DPIO; |
| 618 | + rx_queue_cfg.dest_cfg.dest_id = ppriv->nctx.dpio_id; |
| 619 | + rx_queue_cfg.dest_cfg.priority = ppriv->prio; |
| 620 | + err = dpdmai_set_rx_queue(priv->mc_io, 0, ls_dev->mc_handle, |
| 621 | + rx_queue_cfg.dest_cfg.priority, |
| 622 | + &rx_queue_cfg); |
| 623 | + if (err) { |
| 624 | + dev_err(dev, "dpdmai_set_rx_queue() failed\n"); |
| 625 | + return err; |
| 626 | + } |
| 627 | + |
| 628 | + ppriv++; |
| 629 | + } |
| 630 | + |
| 631 | + return 0; |
| 632 | +} |
| 633 | + |
| 634 | +static int __cold dpaa2_dpdmai_dpio_unbind(struct dpaa2_qdma_priv *priv) |
| 635 | +{ |
| 636 | + struct dpaa2_qdma_priv_per_prio *ppriv = priv->ppriv; |
| 637 | + struct device *dev = priv->dev; |
| 638 | + struct fsl_mc_device *ls_dev; |
| 639 | + int err = 0; |
| 640 | + int i; |
| 641 | + |
| 642 | + ls_dev = to_fsl_mc_device(dev); |
| 643 | + |
| 644 | + for (i = 0; i < priv->num_pairs; i++) { |
| 645 | + ppriv->nctx.qman64 = 0; |
| 646 | + ppriv->nctx.dpio_id = 0; |
| 647 | + ppriv++; |
| 648 | + } |
| 649 | + |
| 650 | + err = dpdmai_reset(priv->mc_io, 0, ls_dev->mc_handle); |
| 651 | + if (err) |
| 652 | + dev_err(dev, "dpdmai_reset() failed\n"); |
| 653 | + |
| 654 | + return err; |
| 655 | +} |
| 656 | + |
| 657 | +static void dpaa2_dpdmai_free_comp(struct dpaa2_qdma_chan *qchan, |
| 658 | + struct list_head *head) |
| 659 | +{ |
| 660 | + struct dpaa2_qdma_comp *comp_tmp, *_comp_tmp; |
| 661 | + unsigned long flags; |
| 662 | + |
| 663 | + list_for_each_entry_safe(comp_tmp, _comp_tmp, |
| 664 | + head, list) { |
| 665 | + spin_lock_irqsave(&qchan->queue_lock, flags); |
| 666 | + list_del(&comp_tmp->list); |
| 667 | + spin_unlock_irqrestore(&qchan->queue_lock, flags); |
| 668 | + dma_pool_free(qchan->fd_pool, |
| 669 | + comp_tmp->fd_virt_addr, |
| 670 | + comp_tmp->fd_bus_addr); |
| 671 | + dma_pool_free(qchan->fl_pool, |
| 672 | + comp_tmp->fl_virt_addr, |
| 673 | + comp_tmp->fl_bus_addr); |
| 674 | + dma_pool_free(qchan->sdd_pool, |
| 675 | + comp_tmp->desc_virt_addr, |
| 676 | + comp_tmp->desc_bus_addr); |
| 677 | + kfree(comp_tmp); |
| 678 | + } |
| 679 | +} |
| 680 | + |
| 681 | +static void dpaa2_dpdmai_free_channels(struct dpaa2_qdma_engine *dpaa2_qdma) |
| 682 | +{ |
| 683 | + struct dpaa2_qdma_chan *qchan; |
| 684 | + int num, i; |
| 685 | + |
| 686 | + num = dpaa2_qdma->n_chans; |
| 687 | + for (i = 0; i < num; i++) { |
| 688 | + qchan = &dpaa2_qdma->chans[i]; |
| 689 | + dpaa2_dpdmai_free_comp(qchan, &qchan->comp_used); |
| 690 | + dpaa2_dpdmai_free_comp(qchan, &qchan->comp_free); |
| 691 | + dma_pool_destroy(qchan->fd_pool); |
| 692 | + dma_pool_destroy(qchan->fl_pool); |
| 693 | + dma_pool_destroy(qchan->sdd_pool); |
| 694 | + } |
| 695 | +} |
| 696 | + |
| 697 | +static void dpaa2_qdma_free_desc(struct virt_dma_desc *vdesc) |
| 698 | +{ |
| 699 | + struct dpaa2_qdma_comp *dpaa2_comp; |
| 700 | + struct dpaa2_qdma_chan *qchan; |
| 701 | + unsigned long flags; |
| 702 | + |
| 703 | + dpaa2_comp = to_fsl_qdma_comp(vdesc); |
| 704 | + qchan = dpaa2_comp->qchan; |
| 705 | + spin_lock_irqsave(&qchan->queue_lock, flags); |
| 706 | + list_del(&dpaa2_comp->list); |
| 707 | + list_add_tail(&dpaa2_comp->list, &qchan->comp_free); |
| 708 | + spin_unlock_irqrestore(&qchan->queue_lock, flags); |
| 709 | +} |
| 710 | + |
| 711 | +static int dpaa2_dpdmai_init_channels(struct dpaa2_qdma_engine *dpaa2_qdma) |
| 712 | +{ |
| 713 | + struct dpaa2_qdma_priv *priv = dpaa2_qdma->priv; |
| 714 | + struct dpaa2_qdma_chan *dpaa2_chan; |
| 715 | + int num = priv->num_pairs; |
| 716 | + int i; |
| 717 | + |
| 718 | + INIT_LIST_HEAD(&dpaa2_qdma->dma_dev.channels); |
| 719 | + for (i = 0; i < dpaa2_qdma->n_chans; i++) { |
| 720 | + dpaa2_chan = &dpaa2_qdma->chans[i]; |
| 721 | + dpaa2_chan->qdma = dpaa2_qdma; |
| 722 | + dpaa2_chan->fqid = priv->tx_fqid[i % num]; |
| 723 | + dpaa2_chan->vchan.desc_free = dpaa2_qdma_free_desc; |
| 724 | + vchan_init(&dpaa2_chan->vchan, &dpaa2_qdma->dma_dev); |
| 725 | + spin_lock_init(&dpaa2_chan->queue_lock); |
| 726 | + INIT_LIST_HEAD(&dpaa2_chan->comp_used); |
| 727 | + INIT_LIST_HEAD(&dpaa2_chan->comp_free); |
| 728 | + } |
| 729 | + return 0; |
| 730 | +} |
| 731 | + |
| 732 | +static int dpaa2_qdma_probe(struct fsl_mc_device *dpdmai_dev) |
| 733 | +{ |
| 734 | + struct device *dev = &dpdmai_dev->dev; |
| 735 | + struct dpaa2_qdma_engine *dpaa2_qdma; |
| 736 | + struct dpaa2_qdma_priv *priv; |
| 737 | + int err; |
| 738 | + |
| 739 | + priv = kzalloc(sizeof(*priv), GFP_KERNEL); |
| 740 | + if (!priv) |
| 741 | + return -ENOMEM; |
| 742 | + dev_set_drvdata(dev, priv); |
| 743 | + priv->dpdmai_dev = dpdmai_dev; |
| 744 | + |
| 745 | + priv->iommu_domain = iommu_get_domain_for_dev(dev); |
| 746 | + if (priv->iommu_domain) |
| 747 | + smmu_disable = false; |
| 748 | + |
| 749 | + /* obtain a MC portal */ |
| 750 | + err = fsl_mc_portal_allocate(dpdmai_dev, 0, &priv->mc_io); |
| 751 | + if (err) { |
| 752 | + if (err == -ENXIO) |
| 753 | + err = -EPROBE_DEFER; |
| 754 | + else |
| 755 | + dev_err(dev, "MC portal allocation failed\n"); |
| 756 | + goto err_mcportal; |
| 757 | + } |
| 758 | + |
| 759 | + /* DPDMAI initialization */ |
| 760 | + err = dpaa2_qdma_setup(dpdmai_dev); |
| 761 | + if (err) { |
| 762 | + dev_err(dev, "dpaa2_dpdmai_setup() failed\n"); |
| 763 | + goto err_dpdmai_setup; |
| 764 | + } |
| 765 | + |
| 766 | + /* DPIO */ |
| 767 | + err = dpaa2_qdma_dpio_setup(priv); |
| 768 | + if (err) { |
| 769 | + dev_err(dev, "dpaa2_dpdmai_dpio_setup() failed\n"); |
| 770 | + goto err_dpio_setup; |
| 771 | + } |
| 772 | + |
| 773 | + /* DPDMAI binding to DPIO */ |
| 774 | + err = dpaa2_dpdmai_bind(priv); |
| 775 | + if (err) { |
| 776 | + dev_err(dev, "dpaa2_dpdmai_bind() failed\n"); |
| 777 | + goto err_bind; |
| 778 | + } |
| 779 | + |
| 780 | + /* DPDMAI enable */ |
| 781 | + err = dpdmai_enable(priv->mc_io, 0, dpdmai_dev->mc_handle); |
| 782 | + if (err) { |
| 783 | + dev_err(dev, "dpdmai_enable() faile\n"); |
| 784 | + goto err_enable; |
| 785 | + } |
| 786 | + |
| 787 | + dpaa2_qdma = kzalloc(sizeof(*dpaa2_qdma), GFP_KERNEL); |
| 788 | + if (!dpaa2_qdma) { |
| 789 | + err = -ENOMEM; |
| 790 | + goto err_eng; |
| 791 | + } |
| 792 | + |
| 793 | + priv->dpaa2_qdma = dpaa2_qdma; |
| 794 | + dpaa2_qdma->priv = priv; |
| 795 | + |
| 796 | + dpaa2_qdma->desc_allocated = 0; |
| 797 | + dpaa2_qdma->n_chans = NUM_CH; |
| 798 | + |
| 799 | + dpaa2_dpdmai_init_channels(dpaa2_qdma); |
| 800 | + |
| 801 | + if (soc_device_match(soc_fixup_tuning)) |
| 802 | + dpaa2_qdma->qdma_wrtype_fixup = true; |
| 803 | + else |
| 804 | + dpaa2_qdma->qdma_wrtype_fixup = false; |
| 805 | + |
| 806 | + dma_cap_set(DMA_PRIVATE, dpaa2_qdma->dma_dev.cap_mask); |
| 807 | + dma_cap_set(DMA_SLAVE, dpaa2_qdma->dma_dev.cap_mask); |
| 808 | + dma_cap_set(DMA_MEMCPY, dpaa2_qdma->dma_dev.cap_mask); |
| 809 | + |
| 810 | + dpaa2_qdma->dma_dev.dev = dev; |
| 811 | + dpaa2_qdma->dma_dev.device_alloc_chan_resources = |
| 812 | + dpaa2_qdma_alloc_chan_resources; |
| 813 | + dpaa2_qdma->dma_dev.device_free_chan_resources = |
| 814 | + dpaa2_qdma_free_chan_resources; |
| 815 | + dpaa2_qdma->dma_dev.device_tx_status = dma_cookie_status; |
| 816 | + dpaa2_qdma->dma_dev.device_prep_dma_memcpy = dpaa2_qdma_prep_memcpy; |
| 817 | + dpaa2_qdma->dma_dev.device_issue_pending = dpaa2_qdma_issue_pending; |
| 818 | + |
| 819 | + err = dma_async_device_register(&dpaa2_qdma->dma_dev); |
| 820 | + if (err) { |
| 821 | + dev_err(dev, "Can't register NXP QDMA engine.\n"); |
| 822 | + goto err_dpaa2_qdma; |
| 823 | + } |
| 824 | + |
| 825 | + return 0; |
| 826 | + |
| 827 | +err_dpaa2_qdma: |
| 828 | + kfree(dpaa2_qdma); |
| 829 | +err_eng: |
| 830 | + dpdmai_disable(priv->mc_io, 0, dpdmai_dev->mc_handle); |
| 831 | +err_enable: |
| 832 | + dpaa2_dpdmai_dpio_unbind(priv); |
| 833 | +err_bind: |
| 834 | + dpaa2_dpmai_store_free(priv); |
| 835 | + dpaa2_dpdmai_dpio_free(priv); |
| 836 | +err_dpio_setup: |
| 837 | + kfree(priv->ppriv); |
| 838 | + dpdmai_close(priv->mc_io, 0, dpdmai_dev->mc_handle); |
| 839 | +err_dpdmai_setup: |
| 840 | + fsl_mc_portal_free(priv->mc_io); |
| 841 | +err_mcportal: |
| 842 | + kfree(priv); |
| 843 | + dev_set_drvdata(dev, NULL); |
| 844 | + return err; |
| 845 | +} |
| 846 | + |
| 847 | +static int dpaa2_qdma_remove(struct fsl_mc_device *ls_dev) |
| 848 | +{ |
| 849 | + struct dpaa2_qdma_engine *dpaa2_qdma; |
| 850 | + struct dpaa2_qdma_priv *priv; |
| 851 | + struct device *dev; |
| 852 | + |
| 853 | + dev = &ls_dev->dev; |
| 854 | + priv = dev_get_drvdata(dev); |
| 855 | + dpaa2_qdma = priv->dpaa2_qdma; |
| 856 | + |
| 857 | + dpdmai_disable(priv->mc_io, 0, ls_dev->mc_handle); |
| 858 | + dpaa2_dpdmai_dpio_unbind(priv); |
| 859 | + dpaa2_dpmai_store_free(priv); |
| 860 | + dpaa2_dpdmai_dpio_free(priv); |
| 861 | + dpdmai_close(priv->mc_io, 0, ls_dev->mc_handle); |
| 862 | + fsl_mc_portal_free(priv->mc_io); |
| 863 | + dev_set_drvdata(dev, NULL); |
| 864 | + dpaa2_dpdmai_free_channels(dpaa2_qdma); |
| 865 | + |
| 866 | + dma_async_device_unregister(&dpaa2_qdma->dma_dev); |
| 867 | + kfree(priv); |
| 868 | + kfree(dpaa2_qdma); |
| 869 | + |
| 870 | + return 0; |
| 871 | +} |
| 872 | + |
| 873 | +static const struct fsl_mc_device_id dpaa2_qdma_id_table[] = { |
| 874 | + { |
| 875 | + .vendor = FSL_MC_VENDOR_FREESCALE, |
| 876 | + .obj_type = "dpdmai", |
| 877 | + }, |
| 878 | + { .vendor = 0x0 } |
| 879 | +}; |
| 880 | + |
| 881 | +static struct fsl_mc_driver dpaa2_qdma_driver = { |
| 882 | + .driver = { |
| 883 | + .name = "dpaa2-qdma", |
| 884 | + .owner = THIS_MODULE, |
| 885 | + }, |
| 886 | + .probe = dpaa2_qdma_probe, |
| 887 | + .remove = dpaa2_qdma_remove, |
| 888 | + .match_id_table = dpaa2_qdma_id_table |
| 889 | +}; |
| 890 | + |
| 891 | +static int __init dpaa2_qdma_driver_init(void) |
| 892 | +{ |
| 893 | + return fsl_mc_driver_register(&(dpaa2_qdma_driver)); |
| 894 | +} |
| 895 | +late_initcall(dpaa2_qdma_driver_init); |
| 896 | + |
| 897 | +static void __exit fsl_qdma_exit(void) |
| 898 | +{ |
| 899 | + fsl_mc_driver_unregister(&(dpaa2_qdma_driver)); |
| 900 | +} |
| 901 | +module_exit(fsl_qdma_exit); |
| 902 | + |
| 903 | +MODULE_ALIAS("platform:fsl-dpaa2-qdma"); |
| 904 | +MODULE_LICENSE("GPL v2"); |
| 905 | +MODULE_DESCRIPTION("NXP Layerscape DPAA2 qDMA engine driver"); |
| 906 | --- /dev/null |
| 907 | +++ b/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.h |
| 908 | @@ -0,0 +1,153 @@ |
| 909 | +/* SPDX-License-Identifier: GPL-2.0 */ |
| 910 | +/* Copyright 2019 NXP */ |
| 911 | + |
| 912 | +#ifndef __DPAA2_QDMA_H |
| 913 | +#define __DPAA2_QDMA_H |
| 914 | + |
| 915 | +#define DPAA2_QDMA_STORE_SIZE 16 |
| 916 | +#define NUM_CH 8 |
| 917 | + |
| 918 | +struct dpaa2_qdma_sd_d { |
| 919 | + u32 rsv:32; |
| 920 | + union { |
| 921 | + struct { |
| 922 | + u32 ssd:12; /* souce stride distance */ |
| 923 | + u32 sss:12; /* souce stride size */ |
| 924 | + u32 rsv1:8; |
| 925 | + } sdf; |
| 926 | + struct { |
| 927 | + u32 dsd:12; /* Destination stride distance */ |
| 928 | + u32 dss:12; /* Destination stride size */ |
| 929 | + u32 rsv2:8; |
| 930 | + } ddf; |
| 931 | + } df; |
| 932 | + u32 rbpcmd; /* Route-by-port command */ |
| 933 | + u32 cmd; |
| 934 | +} __attribute__((__packed__)); |
| 935 | + |
| 936 | +/* Source descriptor command read transaction type for RBP=0: */ |
| 937 | +/* coherent copy of cacheable memory */ |
| 938 | +#define QDMA_SD_CMD_RDTTYPE_COHERENT (0xb << 28) |
| 939 | +/* Destination descriptor command write transaction type for RBP=0: */ |
| 940 | +/* coherent copy of cacheable memory */ |
| 941 | +#define QDMA_DD_CMD_WRTTYPE_COHERENT (0x6 << 28) |
| 942 | +#define LX2160_QDMA_DD_CMD_WRTTYPE_COHERENT (0xb << 28) |
| 943 | + |
| 944 | +#define QMAN_FD_FMT_ENABLE BIT(0) /* frame list table enable */ |
| 945 | +#define QMAN_FD_BMT_ENABLE BIT(15) /* bypass memory translation */ |
| 946 | +#define QMAN_FD_BMT_DISABLE (0) /* bypass memory translation */ |
| 947 | +#define QMAN_FD_SL_DISABLE (0) /* short lengthe disabled */ |
| 948 | +#define QMAN_FD_SL_ENABLE BIT(14) /* short lengthe enabled */ |
| 949 | + |
| 950 | +#define QDMA_FINAL_BIT_DISABLE (0) /* final bit disable */ |
| 951 | +#define QDMA_FINAL_BIT_ENABLE BIT(31) /* final bit enable */ |
| 952 | + |
| 953 | +#define QDMA_FD_SHORT_FORMAT BIT(11) /* short format */ |
| 954 | +#define QDMA_FD_LONG_FORMAT (0) /* long format */ |
| 955 | +#define QDMA_SER_DISABLE (8) /* no notification */ |
| 956 | +#define QDMA_SER_CTX BIT(8) /* notification by FQD_CTX[fqid] */ |
| 957 | +#define QDMA_SER_DEST (2 << 8) /* notification by destination desc */ |
| 958 | +#define QDMA_SER_BOTH (3 << 8) /* soruce and dest notification */ |
| 959 | +#define QDMA_FD_SPF_ENALBE BIT(30) /* source prefetch enable */ |
| 960 | + |
| 961 | +#define QMAN_FD_VA_ENABLE BIT(14) /* Address used is virtual address */ |
| 962 | +#define QMAN_FD_VA_DISABLE (0)/* Address used is a real address */ |
| 963 | +/* Flow Context: 49bit physical address */ |
| 964 | +#define QMAN_FD_CBMT_ENABLE BIT(15) |
| 965 | +#define QMAN_FD_CBMT_DISABLE (0) /* Flow Context: 64bit virtual address */ |
| 966 | +#define QMAN_FD_SC_DISABLE (0) /* stashing control */ |
| 967 | + |
| 968 | +#define QDMA_FL_FMT_SBF (0x0) /* Single buffer frame */ |
| 969 | +#define QDMA_FL_FMT_SGE (0x2) /* Scatter gather frame */ |
| 970 | +#define QDMA_FL_BMT_ENABLE BIT(15) /* enable bypass memory translation */ |
| 971 | +#define QDMA_FL_BMT_DISABLE (0x0) /* enable bypass memory translation */ |
| 972 | +#define QDMA_FL_SL_LONG (0x0)/* long length */ |
| 973 | +#define QDMA_FL_SL_SHORT (0x1) /* short length */ |
| 974 | +#define QDMA_FL_F (0x1)/* last frame list bit */ |
| 975 | + |
| 976 | +/*Description of Frame list table structure*/ |
| 977 | +struct dpaa2_qdma_chan { |
| 978 | + struct dpaa2_qdma_engine *qdma; |
| 979 | + struct virt_dma_chan vchan; |
| 980 | + struct virt_dma_desc vdesc; |
| 981 | + enum dma_status status; |
| 982 | + u32 fqid; |
| 983 | + |
| 984 | + /* spinlock used by dpaa2 qdma driver */ |
| 985 | + spinlock_t queue_lock; |
| 986 | + struct dma_pool *fd_pool; |
| 987 | + struct dma_pool *fl_pool; |
| 988 | + struct dma_pool *sdd_pool; |
| 989 | + |
| 990 | + struct list_head comp_used; |
| 991 | + struct list_head comp_free; |
| 992 | + |
| 993 | +}; |
| 994 | + |
| 995 | +struct dpaa2_qdma_comp { |
| 996 | + dma_addr_t fd_bus_addr; |
| 997 | + dma_addr_t fl_bus_addr; |
| 998 | + dma_addr_t desc_bus_addr; |
| 999 | + struct dpaa2_fd *fd_virt_addr; |
| 1000 | + struct dpaa2_fl_entry *fl_virt_addr; |
| 1001 | + struct dpaa2_qdma_sd_d *desc_virt_addr; |
| 1002 | + struct dpaa2_qdma_chan *qchan; |
| 1003 | + struct virt_dma_desc vdesc; |
| 1004 | + struct list_head list; |
| 1005 | +}; |
| 1006 | + |
| 1007 | +struct dpaa2_qdma_engine { |
| 1008 | + struct dma_device dma_dev; |
| 1009 | + u32 n_chans; |
| 1010 | + struct dpaa2_qdma_chan chans[NUM_CH]; |
| 1011 | + int qdma_wrtype_fixup; |
| 1012 | + int desc_allocated; |
| 1013 | + |
| 1014 | + struct dpaa2_qdma_priv *priv; |
| 1015 | +}; |
| 1016 | + |
| 1017 | +/* |
| 1018 | + * dpaa2_qdma_priv - driver private data |
| 1019 | + */ |
| 1020 | +struct dpaa2_qdma_priv { |
| 1021 | + int dpqdma_id; |
| 1022 | + |
| 1023 | + struct iommu_domain *iommu_domain; |
| 1024 | + struct dpdmai_attr dpdmai_attr; |
| 1025 | + struct device *dev; |
| 1026 | + struct fsl_mc_io *mc_io; |
| 1027 | + struct fsl_mc_device *dpdmai_dev; |
| 1028 | + u8 num_pairs; |
| 1029 | + |
| 1030 | + struct dpaa2_qdma_engine *dpaa2_qdma; |
| 1031 | + struct dpaa2_qdma_priv_per_prio *ppriv; |
| 1032 | + |
| 1033 | + struct dpdmai_rx_queue_attr rx_queue_attr[DPDMAI_PRIO_NUM]; |
| 1034 | + u32 tx_fqid[DPDMAI_PRIO_NUM]; |
| 1035 | +}; |
| 1036 | + |
| 1037 | +struct dpaa2_qdma_priv_per_prio { |
| 1038 | + int req_fqid; |
| 1039 | + int rsp_fqid; |
| 1040 | + int prio; |
| 1041 | + |
| 1042 | + struct dpaa2_io_store *store; |
| 1043 | + struct dpaa2_io_notification_ctx nctx; |
| 1044 | + |
| 1045 | + struct dpaa2_qdma_priv *priv; |
| 1046 | +}; |
| 1047 | + |
| 1048 | +static struct soc_device_attribute soc_fixup_tuning[] = { |
| 1049 | + { .family = "QorIQ LX2160A"}, |
| 1050 | + { }, |
| 1051 | +}; |
| 1052 | + |
| 1053 | +/* FD pool size: one FD + 3 Frame list + 2 source/destination descriptor */ |
| 1054 | +#define FD_POOL_SIZE (sizeof(struct dpaa2_fd) + \ |
| 1055 | + sizeof(struct dpaa2_fl_entry) * 3 + \ |
| 1056 | + sizeof(struct dpaa2_qdma_sd_d) * 2) |
| 1057 | + |
| 1058 | +static void dpaa2_dpdmai_free_channels(struct dpaa2_qdma_engine *dpaa2_qdma); |
| 1059 | +static void dpaa2_dpdmai_free_comp(struct dpaa2_qdma_chan *qchan, |
| 1060 | + struct list_head *head); |
| 1061 | +#endif /* __DPAA2_QDMA_H */ |