ASR_BASE
Change-Id: Icf3719cc0afe3eeb3edc7fa80a2eb5199ca9dda1
diff --git a/marvell/linux/drivers/dma/mmp_hsdma.c b/marvell/linux/drivers/dma/mmp_hsdma.c
new file mode 100644
index 0000000..4aa7e57
--- /dev/null
+++ b/marvell/linux/drivers/dma/mmp_hsdma.c
@@ -0,0 +1,1073 @@
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <linux/dmaengine.h>
+#include <linux/platform_device.h>
+#include <linux/device.h>
+#include <linux/platform_data/mmp_dma.h>
+#include <linux/dmapool.h>
+#include <linux/of_device.h>
+#include <linux/of_dma.h>
+#include <linux/of.h>
+#include <linux/pm_runtime.h>
+#include <linux/pm_qos.h>
+#include <linux/delay.h>
+#include "dmaengine.h"
+
+#define DCR 0x0 /* DMA Control Registers */
+#define DCFGR 0x4 /* DMA CFG Registers */
+#define DIER 0x8 /* DMA Interrupt Enable Registers */
+#define DISR 0xc /* DMA Interrupt Status Registers */
+
+#define OFFSET_CH(n) (((n) + 1) << 8)
+
+#define DCCR(n) (0x0000 + OFFSET_CH(n)) /* DMA Channel(n) Control Registers */
+#define DCBCR(n) (0x0004 + OFFSET_CH(n)) /* DMA Channel(n) Byte Cnt Registers */
+
+#define DCIER(n) (0x0020 + OFFSET_CH(n)) /* DMA Channel(n) Interrupt Enable Registers */
+#define DCISR(n) (0x0024 + OFFSET_CH(n)) /* DMA Channel(n) Interrupt Status Registers */
+#define DCICR(n) (0x0028 + OFFSET_CH(n)) /* DMA Channel(n) Interrupt Clear Registers */
+
+#define DCSR(n) (0x0030 + OFFSET_CH(n)) /* DMA Channel(n) Status Registers */
+#define DCCBCR(n) (0x0034 + OFFSET_CH(n)) /* DMA Channel(n) Current Byte Cnt Registers */
+
+#define DCDLAR(n) (0x0050 + OFFSET_CH(n)) /* DMA Channel(n) Descriptor Low Address Registers */
+#define DCDHAR(n) (0x0054 + OFFSET_CH(n)) /* DMA Channel(n) Descriptor High Address Registers */
+
+#define DCCR_ABT BIT(6) /* channel abort (read / write) */
+#define DCCR_INT_MOD BIT(5) /* channel int mode (read / write) */
+#define DCCR_CHAIN_MOD BIT(4) /* channel chain mode (read / write) */
+#define DCCR_EN BIT(0) /* channel enable (read / write) */
+
+#define HSDMA_MAX_DESC_BYTES 0xffff
+
+struct mmp_hsdma_desc_hw {
+ u32 src_laddr; /* Source Low Address */
+ u32 src_haddr; /* Source High Address */
+ u32 dest_laddr; /* Destination Low Address */
+ u32 dest_haddr; /* Destination High Address */
+ u32 byte_length; /* Byte Length */
+ u32 dummy; /* Dummy */
+ u32 next_desc_laddr; /* Next Descriptor Low Address */
+ u32 next_desc_haddr; /* Next Descriptor High Address */
+} __aligned(32);
+
+struct mmp_hsdma_desc_sw {
+ struct mmp_hsdma_desc_hw desc;
+ struct list_head node;
+ struct list_head tx_list;
+ struct dma_async_tx_descriptor async_tx;
+};
+
+struct mmp_hsdma_phy;
+
+struct mmp_hsdma_chan {
+ struct device *dev;
+ struct dma_chan chan;
+ struct dma_async_tx_descriptor desc;
+ struct mmp_hsdma_phy *phy;
+ enum dma_transfer_direction dir;
+ struct dma_slave_config slave_config;
+
+ /* channel's basic info */
+ struct tasklet_struct tasklet;
+ int dedicated_chan;
+
+ /* list for desc */
+ spinlock_t desc_lock; /* Descriptor list lock */
+ struct list_head chain_pending; /* Link descriptors queue for pending */
+ struct list_head chain_running; /* Link descriptors queue for running */
+ bool idle; /* channel state machine */
+ bool byte_align;
+
+ int user_do_qos;
+ int qos_count; /* Per-channel qos count */
+ enum dma_status status; /* channel state machine */
+ struct dma_pool *desc_pool; /* Descriptors pool */
+};
+
+struct mmp_hsdma_phy {
+ int idx;
+ void __iomem *base;
+ struct mmp_hsdma_chan *vchan;
+};
+
+struct mmp_hsdma_device {
+ int dma_channels;
+ int dedicated_chan_bitmap;
+ s32 lpm_qos;
+ struct pm_qos_request qos_idle;
+ void __iomem *base;
+ struct device *dev;
+ struct dma_device device;
+ struct mmp_hsdma_phy *phy;
+ spinlock_t phy_lock; /* protect alloc/free phy channels */
+};
+
+#define tx_to_mmp_hsdma_desc(tx) \
+ container_of(tx, struct mmp_hsdma_desc_sw, async_tx)
+#define to_mmp_hsdma_desc(lh) \
+ container_of(lh, struct mmp_hsdma_desc_sw, node)
+#define to_mmp_hsdma_chan(dchan) \
+ container_of(dchan, struct mmp_hsdma_chan, chan)
+#define to_mmp_hsdma_dev(dmadev) \
+ container_of(dmadev, struct mmp_hsdma_device, device)
+
+static void mmp_hsdma_qos_get(struct mmp_hsdma_chan *chan);
+static void mmp_hsdma_qos_put(struct mmp_hsdma_chan *chan);
+
+static void set_desc(struct mmp_hsdma_phy *phy, dma_addr_t addr)
+{
+ u32 reg;
+
+ reg = DCDLAR(phy->idx);
+ writel(addr, phy->base + reg);
+
+ reg = DCDHAR(phy->idx);
+ writel(0x00000000, phy->base + reg);
+}
+
+static void enable_chan(struct mmp_hsdma_phy *phy)
+{
+ u32 reg;
+
+ if (!phy->vchan)
+ return;
+
+ reg = DCR;
+ writel(0x1, phy->base + reg);
+
+ reg = DIER;
+ writel(readl(phy->base + reg) | BIT(phy->idx),
+ phy->base + reg);
+
+ reg = DCIER(phy->idx);
+ writel(0xf, phy->base + reg);
+
+ reg = DCCR(phy->idx);
+ writel(DCCR_INT_MOD | DCCR_CHAIN_MOD | DCCR_EN,
+ phy->base + reg);
+}
+
+static void disable_chan(struct mmp_hsdma_phy *phy)
+{
+ u32 reg;
+
+ if (!phy)
+ return;
+
+ reg = DCCR(phy->idx);
+ writel(readl(phy->base + reg) | DCCR_ABT | ~DCCR_EN,
+ phy->base + reg);
+}
+
+static int clear_chan_irq(struct mmp_hsdma_phy *phy)
+{
+ u32 reg;
+
+ reg = DCICR(phy->idx);
+ writel(0xf, phy->base + reg);
+
+ return 0;
+}
+
+static irqreturn_t mmp_hsdma_chan_handler(int irq, void *dev_id)
+{
+ struct mmp_hsdma_phy *phy = dev_id;
+ struct mmp_hsdma_chan *hschan = phy->vchan;
+
+ if (clear_chan_irq(phy) != 0)
+ return IRQ_NONE;
+
+ if (hschan)
+ tasklet_schedule(&hschan->tasklet);
+ else
+ pr_err("%s: hsdma channel has been freed\n", __func__);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t mmp_hsdma_int_handler(int irq, void *dev_id)
+{
+ struct mmp_hsdma_device *hsdev = dev_id;
+ struct mmp_hsdma_phy *phy;
+ u32 disr = readl(hsdev->base + DISR);
+ int i, ret;
+ int irq_num = 0;
+ unsigned long flags;
+
+ while (disr) {
+ i = __ffs(disr);
+ /* only handle interrupts belonging to hsdma driver*/
+ if (i >= hsdev->dma_channels)
+ break;
+ disr &= (disr - 1);
+ phy = &hsdev->phy[i];
+ spin_lock_irqsave(&hsdev->phy_lock, flags);
+ ret = mmp_hsdma_chan_handler(irq, phy);
+ spin_unlock_irqrestore(&hsdev->phy_lock, flags);
+ if (ret == IRQ_HANDLED)
+ irq_num++;
+ }
+
+ if (irq_num)
+ return IRQ_HANDLED;
+
+ return IRQ_NONE;
+}
+
+/* lookup free phy channel as descending priority */
+static struct mmp_hsdma_phy *lookup_phy(struct mmp_hsdma_chan *hschan)
+{
+ int prio, i;
+ struct mmp_hsdma_device *hsdev = to_mmp_hsdma_dev(hschan->chan.device);
+ struct mmp_hsdma_phy *phy, *found = NULL;
+ unsigned long flags;
+
+ /*
+ * dma channel priorities
+ * ch 0 - 3 <--> (0)
+ * ch 4 - 7 <--> (1)
+ */
+
+ spin_lock_irqsave(&hsdev->phy_lock, flags);
+ if (hschan->dedicated_chan > 0) {
+ phy = &hsdev->phy[hschan->dedicated_chan];
+ if (!phy->vchan) {
+ phy->vchan = hschan;
+ found = phy;
+ goto out_unlock;
+ } else {
+ dev_err(hschan->dev, "dedicated channel %d already used!\n",
+ hschan->dedicated_chan);
+ }
+ }
+
+ for (prio = 0; prio <= ((hsdev->dma_channels - 1) & 0xf) >> 2; prio++) {
+ for (i = 0; i < hsdev->dma_channels; i++) {
+ if (prio != (i & 0xf) >> 2)
+ continue;
+ phy = &hsdev->phy[i];
+ if (!phy->vchan) {
+ phy->vchan = hschan;
+ found = phy;
+ goto out_unlock;
+ }
+ }
+ }
+
+out_unlock:
+ spin_unlock_irqrestore(&hsdev->phy_lock, flags);
+ return found;
+}
+
+static void mmp_hsdma_free_phy(struct mmp_hsdma_chan *hschan)
+{
+ struct mmp_hsdma_device *hsdev = to_mmp_hsdma_dev(hschan->chan.device);
+ unsigned long flags;
+ u32 reg;
+
+ if (!hschan->phy)
+ return;
+
+ reg = DCCR(hschan->phy->idx);
+ writel(readl(hschan->phy->base + reg) | ~DCCR_EN,
+ hschan->phy->base + reg);
+
+ spin_lock_irqsave(&hsdev->phy_lock, flags);
+ hschan->phy->vchan = NULL;
+ hschan->phy = NULL;
+ spin_unlock_irqrestore(&hsdev->phy_lock, flags);
+}
+
+/**
+ * start_pending_queue - transfer any pending transactions
+ * pending list ==> running list
+ */
+static int start_pending_queue(struct mmp_hsdma_chan *chan)
+{
+ struct mmp_hsdma_desc_sw *desc;
+ struct mmp_hsdma_desc_sw *_desc;
+
+ /* still in running, irq will start the pending list */
+ if (chan->status == DMA_IN_PROGRESS) {
+ dev_dbg(chan->dev, "DMA controller still busy\n");
+ return -1;
+ }
+
+ if (list_empty(&chan->chain_pending)) {
+ /* chance to re-fetch phy channel with higher prio */
+ mmp_hsdma_free_phy(chan);
+ dev_dbg(chan->dev, "no pending list\n");
+ return -1;
+ }
+
+ if (!chan->phy) {
+ chan->phy = lookup_phy(chan);
+ if (!chan->phy) {
+ dev_dbg(chan->dev, "no free dma channel\n");
+ return -1;
+ }
+ }
+
+ /*
+ * pending -> running
+ * reintilize pending list
+ */
+ list_for_each_entry_safe(desc, _desc, &chan->chain_pending, node) {
+ list_del(&desc->node);
+ list_add_tail(&desc->node, &chan->chain_running);
+ }
+
+ desc = list_first_entry(&chan->chain_running,
+ struct mmp_hsdma_desc_sw, node);
+
+ /*
+ * Program the descriptor's address into the DMA controller,
+ * then start the DMA transaction
+ */
+ set_desc(chan->phy, desc->async_tx.phys);
+ /* ensure descriptors written before starting dma */
+ wmb();
+ enable_chan(chan->phy);
+ chan->idle = false;
+ chan->status = DMA_IN_PROGRESS;
+ return 0;
+}
+
+/* desc->tx_list ==> pending list */
+static dma_cookie_t mmp_hsdma_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+ struct mmp_hsdma_chan *chan = to_mmp_hsdma_chan(tx->chan);
+ struct mmp_hsdma_desc_sw *desc = tx_to_mmp_hsdma_desc(tx);
+ struct mmp_hsdma_desc_sw *child;
+ unsigned long flags;
+ dma_cookie_t cookie = -EBUSY;
+
+ spin_lock_irqsave(&chan->desc_lock, flags);
+
+ list_for_each_entry(child, &desc->tx_list, node) {
+ cookie = dma_cookie_assign(&child->async_tx);
+ }
+
+ /* softly link to pending list - desc->tx_list ==> pending list */
+ list_splice_tail_init(&desc->tx_list, &chan->chain_pending);
+
+ spin_unlock_irqrestore(&chan->desc_lock, flags);
+
+ return cookie;
+}
+
+static struct mmp_hsdma_desc_sw *
+mmp_hsdma_alloc_descriptor(struct mmp_hsdma_chan *chan)
+{
+ struct mmp_hsdma_desc_sw *desc;
+ dma_addr_t hsdesc;
+
+ desc = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &hsdesc);
+ if (!desc) {
+ dev_err(chan->dev, "out of memory for link descriptor\n");
+ return NULL;
+ }
+
+ INIT_LIST_HEAD(&desc->tx_list);
+ dma_async_tx_descriptor_init(&desc->async_tx, &chan->chan);
+ /* each desc has submit */
+ desc->async_tx.tx_submit = mmp_hsdma_tx_submit;
+ desc->async_tx.phys = hsdesc;
+
+ return desc;
+}
+
+static int mmp_hsdma_alloc_chan_resources(struct dma_chan *dchan)
+{
+ struct mmp_hsdma_chan *chan = to_mmp_hsdma_chan(dchan);
+
+ if (chan->desc_pool)
+ return 1;
+
+ chan->desc_pool = dma_pool_create(dev_name(&dchan->dev->device),
+ chan->dev,
+ sizeof(struct mmp_hsdma_desc_sw),
+ __alignof__(struct mmp_hsdma_desc_sw),
+ 0);
+ if (!chan->desc_pool) {
+ dev_err(chan->dev, "unable to allocate descriptor pool\n");
+ return -ENOMEM;
+ }
+
+ chan->status = DMA_COMPLETE;
+ chan->dir = 0;
+ chan->idle = true;
+ return 1;
+}
+
+static void mmp_hsdma_free_desc_list(struct mmp_hsdma_chan *chan,
+ struct list_head *list)
+{
+ struct mmp_hsdma_desc_sw *desc, *_desc;
+
+ list_for_each_entry_safe(desc, _desc, list, node) {
+ list_del(&desc->node);
+ dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
+ }
+}
+
+static void mmp_hsdma_free_chan_resources(struct dma_chan *dchan)
+{
+ struct mmp_hsdma_chan *chan = to_mmp_hsdma_chan(dchan);
+ unsigned long flags;
+
+ /* wait until task ends if necessary */
+ tasklet_kill(&chan->tasklet);
+ spin_lock_irqsave(&chan->desc_lock, flags);
+ mmp_hsdma_free_desc_list(chan, &chan->chain_pending);
+ mmp_hsdma_free_desc_list(chan, &chan->chain_running);
+ spin_unlock_irqrestore(&chan->desc_lock, flags);
+
+ dma_pool_destroy(chan->desc_pool);
+ chan->desc_pool = NULL;
+ chan->idle = true;
+ chan->status = DMA_COMPLETE;
+ chan->dir = 0;
+ return;
+}
+
+static struct dma_async_tx_descriptor *
+mmp_hsdma_prep_memcpy(struct dma_chan *dchan,
+ dma_addr_t dma_dst, dma_addr_t dma_src,
+ size_t len, unsigned long flags)
+{
+ struct mmp_hsdma_chan *chan;
+ struct mmp_hsdma_desc_sw *first = NULL, *prev = NULL, *new;
+ size_t copy = 0;
+
+ if (!dchan)
+ return NULL;
+
+ if (!len)
+ return NULL;
+
+ chan = to_mmp_hsdma_chan(dchan);
+ chan->byte_align = false;
+
+ if (!chan->dir) {
+ chan->dir = DMA_MEM_TO_MEM;
+ }
+
+ do {
+ /* Allocate the link descriptor from DMA pool */
+ new = mmp_hsdma_alloc_descriptor(chan);
+ if (!new) {
+ dev_err(chan->dev, "no memory for desc\n");
+ goto fail;
+ }
+
+ copy = min_t(size_t, len, HSDMA_MAX_DESC_BYTES);
+
+ if (dma_src & 0x3 || dma_dst & 0x3)
+ chan->byte_align = true;
+
+ new->desc.src_laddr = dma_src;
+ new->desc.src_haddr = 0x00000000;
+ new->desc.dest_laddr = dma_dst;
+ new->desc.dest_haddr = 0x00000000;
+ new->desc.byte_length = copy;
+ new->desc.dummy = 0x00000000;
+ new->desc.next_desc_laddr = 0x00000000;
+ new->desc.next_desc_haddr = 0x00000000;
+
+ if (!first)
+ first = new;
+ else
+ prev->desc.next_desc_laddr = new->async_tx.phys;
+
+ new->async_tx.cookie = 0;
+ async_tx_ack(&new->async_tx);
+
+ prev = new;
+ len -= copy;
+
+ if (chan->dir == DMA_MEM_TO_DEV) {
+ dma_src += copy;
+ } else if (chan->dir == DMA_DEV_TO_MEM) {
+ dma_dst += copy;
+ } else if (chan->dir == DMA_MEM_TO_MEM) {
+ dma_src += copy;
+ dma_dst += copy;
+ }
+
+ /* Insert the link descriptor to the LD ring */
+ list_add_tail(&new->node, &first->tx_list);
+ } while (len);
+
+ /* client is in control of this ack */
+ first->async_tx.flags = flags;
+ first->async_tx.cookie = -EBUSY;
+
+ return &first->async_tx;
+
+fail:
+ if (first) {
+ mmp_hsdma_free_desc_list(chan, &first->tx_list);
+ }
+ return NULL;
+}
+
+static int mmp_hsdma_config(struct dma_chan *dchan,
+ struct dma_slave_config *cfg)
+{
+ struct mmp_hsdma_chan *chan = to_mmp_hsdma_chan(dchan);
+
+ memcpy(&chan->slave_config, cfg, sizeof(*cfg));
+ return 0;
+}
+
+static int mmp_hsdma_pause_chan(struct dma_chan *dchan)
+{
+ struct mmp_hsdma_chan *chan = to_mmp_hsdma_chan(dchan);
+
+ if (!chan->phy)
+ return -1;
+
+ disable_chan(chan->phy);
+ mmp_hsdma_qos_put(chan);
+ chan->status = DMA_PAUSED;
+
+ return 0;
+}
+
+static int mmp_hsdma_terminate_all(struct dma_chan *dchan)
+{
+ struct mmp_hsdma_chan *chan = to_mmp_hsdma_chan(dchan);
+ unsigned long flags;
+
+ if (!dchan)
+ return -EINVAL;
+
+ spin_lock_irqsave(&chan->desc_lock, flags);
+ chan->status = DMA_COMPLETE;
+ disable_chan(chan->phy);
+ mmp_hsdma_free_phy(chan);
+ mmp_hsdma_free_desc_list(chan, &chan->chain_pending);
+ mmp_hsdma_free_desc_list(chan, &chan->chain_running);
+ spin_unlock_irqrestore(&chan->desc_lock, flags);
+ chan->idle = true;
+ mmp_hsdma_qos_put(chan);
+
+ return 0;
+}
+
+static int mmp_hsdma_dump_status(struct dma_chan *dchan)
+{
+ struct mmp_hsdma_chan *chan = to_mmp_hsdma_chan(dchan);
+ struct mmp_hsdma_device *hsdev = to_mmp_hsdma_dev(chan->chan.device);
+ struct mmp_hsdma_phy *phy;
+ u32 reg;
+
+ if (!dchan)
+ return -EINVAL;
+
+ if (chan->dedicated_chan > 0)
+ phy = &hsdev->phy[chan->dedicated_chan];
+ else
+ phy = chan->phy;
+
+ if (!phy) {
+ dev_info(chan->dev, "dma dump status: phy already freed\n");
+ return -EINVAL;
+ }
+
+ dev_info(chan->dev, "==== high speed dma dump status ====\n");
+ reg = DCR;
+ dev_info(chan->dev, "DCR[0x%x]=0x%x\n", reg, readl(phy->base + reg));
+ reg = DCFGR;
+ dev_info(chan->dev, "DCFGR[0x%x]=0x%x\n", reg, readl(phy->base + reg));
+ reg = DCCR(phy->idx);
+ dev_info(chan->dev, "DCCR(%d)[0x%x]=0x%x\n", phy->idx, reg, readl(phy->base + reg));
+ reg = DCDLAR(phy->idx);
+ dev_info(chan->dev, "DCDLAR(%d)[0x%x]=0x%x\n", phy->idx, reg, readl(phy->base + reg));
+ reg = DCDHAR(phy->idx);
+ dev_info(chan->dev, "DCDHAR(%d)[0x%x]=0x%x\n", phy->idx, reg, readl(phy->base + reg));
+
+ return 0;
+}
+
+static enum dma_status mmp_hsdma_tx_status(struct dma_chan *dchan,
+ dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
+{
+ struct mmp_hsdma_chan *chan = to_mmp_hsdma_chan(dchan);
+ enum dma_status ret;
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->desc_lock, flags);
+ ret = dma_cookie_status(dchan, cookie, txstate);
+ spin_unlock_irqrestore(&chan->desc_lock, flags);
+
+ if (ret == DMA_COMPLETE)
+ return ret;
+ else
+ return chan->status;
+}
+
+/**
+ * mmp_hsdma_issue_pending - Issue the DMA start command
+ * pending list ==> running list
+ */
+static void mmp_hsdma_issue_pending(struct dma_chan *dchan)
+{
+
+ struct mmp_hsdma_chan *chan = to_mmp_hsdma_chan(dchan);
+ unsigned long flags;
+ int ret = 0;
+
+ mmp_hsdma_qos_get(chan);
+ spin_lock_irqsave(&chan->desc_lock, flags);
+ ret = start_pending_queue(chan);
+ spin_unlock_irqrestore(&chan->desc_lock, flags);
+ if (ret)
+ mmp_hsdma_qos_put(chan);
+}
+
+/*
+ * dma_do_tasklet
+ * Do call back
+ * Start pending list
+ */
+static void dma_do_tasklet(unsigned long data)
+{
+ struct mmp_hsdma_chan *chan = (struct mmp_hsdma_chan *)data;
+ struct mmp_hsdma_desc_sw *desc, *_desc;
+ LIST_HEAD(chain_cleanup);
+ unsigned long flags;
+ struct dmaengine_desc_callback cb;
+ int ret = 0;
+
+ /* return if this channel has been stopped */
+ spin_lock_irqsave(&chan->desc_lock, flags);
+ if (chan->status == DMA_COMPLETE) {
+ spin_unlock_irqrestore(&chan->desc_lock, flags);
+ return;
+ }
+ spin_unlock_irqrestore(&chan->desc_lock, flags);
+
+ /* submit pending list; callback for each desc; free desc */
+ spin_lock_irqsave(&chan->desc_lock, flags);
+
+ list_for_each_entry_safe(desc, _desc, &chan->chain_running, node) {
+ /*
+ * move the descriptors to a temporary list so we can drop
+ * the lock during the entire cleanup operation
+ */
+ list_move(&desc->node, &chain_cleanup);
+ }
+
+ /*
+ * The hardware is idle and ready for more when the
+ * chain_running list is empty.
+ */
+ chan->status = list_empty(&chan->chain_running) ?
+ DMA_COMPLETE : DMA_IN_PROGRESS;
+
+ /* Start any pending transactions automatically */
+ ret = start_pending_queue(chan);
+ spin_unlock_irqrestore(&chan->desc_lock, flags);
+
+ /* restart pending transactions failed, do not need qos anymore */
+ if (ret)
+ mmp_hsdma_qos_put(chan);
+ /* Run the callback for each descriptor, in order */
+ list_for_each_entry_safe(desc, _desc, &chain_cleanup, node) {
+ struct dma_async_tx_descriptor *txd = &desc->async_tx;
+
+ /* Remove from the list of transactions */
+ list_del(&desc->node);
+ /* Run the link descriptor callback function */
+ dmaengine_desc_get_callback(txd, &cb);
+ dmaengine_desc_callback_invoke(&cb, NULL);
+
+ dma_pool_free(chan->desc_pool, desc, txd->phys);
+ }
+}
+
+static int mmp_hsdma_remove(struct platform_device *op)
+{
+ struct mmp_hsdma_device *hsdev = platform_get_drvdata(op);
+ struct mmp_hsdma_phy *phy;
+ int i, irq = 0, irq_num = 0;
+
+ if (op->dev.of_node)
+ of_dma_controller_free(op->dev.of_node);
+
+#ifdef CONFIG_PM_RUNTIME
+ pm_qos_remove_request(&hsdev->qos_idle);
+#endif
+ for (i = 0; i < hsdev->dma_channels; i++) {
+ if (platform_get_irq(op, i) > 0)
+ irq_num++;
+ }
+
+ if (irq_num != hsdev->dma_channels) {
+ irq = platform_get_irq(op, 0);
+ devm_free_irq(&op->dev, irq, hsdev);
+ } else {
+ for (i = 0; i < hsdev->dma_channels; i++) {
+ phy = &hsdev->phy[i];
+ irq = platform_get_irq(op, i);
+ devm_free_irq(&op->dev, irq, phy);
+ }
+ }
+ dma_async_device_unregister(&hsdev->device);
+ platform_set_drvdata(op, NULL);
+ return 0;
+}
+
+static const struct of_device_id mmp_hsdma_dt_ids[] = {
+ { .compatible = "asr,hsdma-1.0", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, mmp_hsdma_dt_ids);
+
+static int mmp_hsdma_chan_init(struct mmp_hsdma_device *hsdev, int idx, int irq)
+{
+ struct mmp_hsdma_phy *phy = &hsdev->phy[idx];
+ struct mmp_hsdma_chan *chan;
+ int ret;
+
+ chan = devm_kzalloc(hsdev->dev, sizeof(*chan), GFP_KERNEL);
+ if (chan == NULL)
+ return -ENOMEM;
+
+ phy->idx = idx;
+ phy->base = hsdev->base;
+
+ disable_chan(phy);
+ clear_chan_irq(phy);
+
+ if (irq) {
+ ret = devm_request_irq(hsdev->dev, irq, mmp_hsdma_chan_handler,
+ IRQF_SHARED, "hsdma", phy);
+ if (ret) {
+ dev_err(hsdev->dev, "channel request irq fail!\n");
+ return ret;
+ }
+ }
+
+ spin_lock_init(&chan->desc_lock);
+ chan->dev = hsdev->dev;
+ chan->chan.device = &hsdev->device;
+ tasklet_init(&chan->tasklet, dma_do_tasklet, (unsigned long)chan);
+ INIT_LIST_HEAD(&chan->chain_pending);
+ INIT_LIST_HEAD(&chan->chain_running);
+
+ chan->status = DMA_COMPLETE;
+ chan->qos_count = 0;
+ chan->user_do_qos = 1;
+
+ /* register virt channel to dma engine */
+ list_add_tail(&chan->chan.device_node, &hsdev->device.channels);
+
+ return 0;
+}
+
+static int mmp_hsdma_clk_init(void)
+{
+ void __iomem* apmu;
+ u32 val;
+
+ apmu = ioremap(0xd4282800, SZ_4K);
+ if (apmu == NULL) {
+ pr_err("hsdma: error to ioremap APMU base\n");
+ return -ENXIO;
+ }
+
+ val = readl(apmu + 0x3ec);
+ val &= ~(0x1 << 1);
+ writel(val, apmu + 0x3ec); // reset
+ ndelay(300);
+
+ val &= ~(0x7 <<8);
+ val |= 0x3 | (0x1 << 15) | (0x2 << 8);
+ writel(val, apmu + 0x3ec);
+ do {
+ val = readl(apmu + 0xe0);
+ if (!(val & BIT(15)))
+ break;
+ } while(1);
+ iounmap(apmu);
+
+ return 0;
+}
+
+static const struct of_device_id mmp_pdma_dt_ids[] = {
+ { .compatible = "asr,pdma-1.0", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, mmp_pdma_dt_ids);
+
+static struct dma_chan *mmp_hsdma_dma_xlate(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma)
+{
+ struct mmp_hsdma_device *d = ofdma->of_dma_data;
+ struct dma_chan *chan;
+ struct mmp_hsdma_chan *vchan;
+ int dedicated_chan;
+
+ chan = dma_get_any_slave_channel(&d->device);
+ if (!chan)
+ return NULL;
+
+ vchan = to_mmp_hsdma_chan(chan);
+
+ dedicated_chan = (dma_spec->args[0] & 0xff) >> 8;
+ if (dedicated_chan == 0 || dedicated_chan >= d->dma_channels)
+ dedicated_chan = -1;
+ vchan->dedicated_chan = dedicated_chan;
+
+#ifdef CONFIG_PM_RUNTIME
+ if (unlikely(dma_spec->args_count != 1))
+ dev_err(d->dev, "#dma-cells should be 1!\n");
+
+ vchan->user_do_qos = (dma_spec->args[0] & 0xff00) ? 1 : 0;
+
+ if (vchan->user_do_qos)
+ dev_dbg(d->dev, "channel %d: user does qos itself\n",
+ vchan->chan.chan_id);
+ else
+ dev_dbg(d->dev, "channel %d: hsdma does qos\n",
+ vchan->chan.chan_id);
+#endif
+ return chan;
+}
+
+static int mmp_hsdma_probe(struct platform_device *op)
+{
+ struct mmp_hsdma_device *hsdev;
+ const struct of_device_id *of_id;
+ struct mmp_dma_platdata *hsdata = dev_get_platdata(&op->dev);
+ struct resource *iores;
+ int i, ret, irq = 0;
+ int dma_channels = 0, irq_num = 0;
+ const enum dma_slave_buswidth widths = DMA_SLAVE_BUSWIDTH_4_BYTES;
+
+ hsdev = devm_kzalloc(&op->dev, sizeof(*hsdev), GFP_KERNEL);
+ if (!hsdev)
+ return -ENOMEM;
+ hsdev->dev = &op->dev;
+
+ spin_lock_init(&hsdev->phy_lock);
+
+ iores = platform_get_resource(op, IORESOURCE_MEM, 0);
+ hsdev->base = devm_ioremap_resource(hsdev->dev, iores);
+ if (IS_ERR(hsdev->base))
+ return PTR_ERR(hsdev->base);
+
+ of_id = of_match_device(mmp_hsdma_dt_ids, hsdev->dev);
+ if (of_id)
+ of_property_read_u32(hsdev->dev->of_node, "#dma-channels",
+ &dma_channels);
+ else if (hsdata && hsdata->dma_channels)
+ dma_channels = hsdata->dma_channels;
+ else
+ dma_channels = 8; /* default 8 channel */
+ hsdev->dma_channels = dma_channels;
+
+#ifdef CONFIG_PM_RUNTIME
+ if (!of_id || of_property_read_u32(hsdev->dev->of_node,
+ "lpm-qos", &hsdev->lpm_qos)) {
+ dev_err(hsdev->dev, "cannot find lpm-qos in device tree\n");
+ return -EINVAL;
+ }
+ hsdev->qos_idle.name = op->name;
+
+ pm_qos_add_request(&hsdev->qos_idle, PM_QOS_CPUIDLE_BLOCK,
+ PM_QOS_CPUIDLE_BLOCK_DEFAULT_VALUE);
+
+ pm_runtime_enable(&op->dev);
+ /*
+ * We can't ensure the pm operations are always in non-atomic context.
+ * Actually it depends on the drivers' behavior. So mark it as irq safe.
+ */
+ pm_runtime_irq_safe(&op->dev);
+#endif
+
+ for (i = 0; i < dma_channels; i++) {
+ if (platform_get_irq_optional(op, i) > 0)
+ irq_num++;
+ }
+
+ hsdev->phy = devm_kcalloc(hsdev->dev, dma_channels, sizeof(*hsdev->phy),
+ GFP_KERNEL);
+ if (hsdev->phy == NULL)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&hsdev->device.channels);
+
+ for (i = 0; i < dma_channels; i++) {
+ irq = (irq_num != dma_channels) ? 0 : platform_get_irq(op, i);
+ ret = mmp_hsdma_chan_init(hsdev, i, irq);
+ if (ret)
+ return ret;
+ }
+
+ if (irq_num != dma_channels) {
+ /* all chan share one irq, demux inside */
+ irq = platform_get_irq(op, 0);
+ ret = devm_request_irq(hsdev->dev, irq, mmp_hsdma_int_handler,
+ IRQF_SHARED, "hsdma", hsdev);
+ if (ret)
+ return ret;
+ }
+
+ dma_cap_set(DMA_SLAVE, hsdev->device.cap_mask);
+ dma_cap_set(DMA_MEMCPY, hsdev->device.cap_mask);
+ hsdev->device.dev = &op->dev;
+ hsdev->device.device_alloc_chan_resources = mmp_hsdma_alloc_chan_resources;
+ hsdev->device.device_free_chan_resources = mmp_hsdma_free_chan_resources;
+ hsdev->device.device_tx_status = mmp_hsdma_tx_status;
+ hsdev->device.device_prep_dma_memcpy = mmp_hsdma_prep_memcpy;
+
+ hsdev->device.device_issue_pending = mmp_hsdma_issue_pending;
+ hsdev->device.device_config = mmp_hsdma_config;
+ hsdev->device.device_pause = mmp_hsdma_pause_chan;
+ hsdev->device.device_terminate_all = mmp_hsdma_terminate_all;
+ hsdev->device.device_dump_status = mmp_hsdma_dump_status;
+ hsdev->device.copy_align = DMAENGINE_ALIGN_4_BYTES;
+ hsdev->device.src_addr_widths = widths;
+ hsdev->device.dst_addr_widths = widths;
+ hsdev->device.directions = BIT(DMA_MEM_TO_MEM);
+
+ if (hsdev->dev->coherent_dma_mask)
+ dma_set_mask(hsdev->dev, hsdev->dev->coherent_dma_mask);
+ else
+ dma_set_mask(hsdev->dev, DMA_BIT_MASK(64));
+
+ ret = dma_async_device_register(&hsdev->device);
+ if (ret) {
+ dev_err(hsdev->device.dev, "unable to register\n");
+ return ret;
+ }
+
+ if (op->dev.of_node) {
+ /* Device-tree DMA controller registration */
+ ret = of_dma_controller_register(op->dev.of_node,
+ mmp_hsdma_dma_xlate, hsdev);
+ if (ret < 0) {
+ dev_err(&op->dev, "of_dma_controller_register failed\n");
+ return ret;
+ }
+ }
+
+ platform_set_drvdata(op, hsdev);
+
+ /* Init hsdma clk */
+ mmp_hsdma_clk_init();
+
+ dev_info(hsdev->device.dev, "initialized %d channels\n", dma_channels);
+ return 0;
+}
+
+#ifdef CONFIG_PM_RUNTIME
+/*
+ * Per-channel qos get/put function. This function ensures that pm_
+ * runtime_get/put are not called multi times for one channel.
+ * This guarantees pm_runtime_get/put always match for the entire device.
+ */
+static void mmp_hsdma_qos_get(struct mmp_hsdma_chan *chan)
+{
+ unsigned long flags;
+
+ if (chan->user_do_qos)
+ return;
+
+ spin_lock_irqsave(&chan->desc_lock, flags);
+ if (chan->qos_count == 0) {
+ chan->qos_count = 1;
+ /*
+ * Safe in spin_lock because it's marked as irq safe.
+ * Similar case for mmp_pdma_qos_put().
+ */
+ pm_runtime_get_sync(chan->dev);
+ }
+ spin_unlock_irqrestore(&chan->desc_lock, flags);
+}
+
+static void mmp_hsdma_qos_put(struct mmp_hsdma_chan *chan)
+{
+ unsigned long flags;
+
+ if (chan->user_do_qos)
+ return;
+
+ spin_lock_irqsave(&chan->desc_lock, flags);
+ if (chan->qos_count == 1) {
+ chan->qos_count = 0;
+ pm_runtime_put_autosuspend(chan->dev);
+ }
+ spin_unlock_irqrestore(&chan->desc_lock, flags);
+}
+
+static int mmp_hsdma_runtime_suspend(struct device *dev)
+{
+ struct mmp_hsdma_device *hsdev = dev_get_drvdata(dev);
+
+ pm_qos_update_request(&hsdev->qos_idle,
+ PM_QOS_CPUIDLE_BLOCK_DEFAULT_VALUE);
+
+ return 0;
+}
+
+static int mmp_hsdma_runtime_resume(struct device *dev)
+{
+ struct mmp_hsdma_device *hsdev = dev_get_drvdata(dev);
+
+ pm_qos_update_request(&hsdev->qos_idle, hsdev->lpm_qos);
+
+ return 0;
+}
+
+static const struct dev_pm_ops mmp_hsdma_pmops = {
+ SET_RUNTIME_PM_OPS(mmp_hsdma_runtime_suspend,
+ mmp_hsdma_runtime_resume, NULL)
+};
+#define MMP_HSDMA_PMOPS (&mmp_hsdma_pmops)
+#else
+static inline void mmp_hsdma_qos_get(struct mmp_hsdma_chan *chan)
+{
+}
+
+static inline void mmp_hsdma_qos_put(struct mmp_hsdma_chan *chan)
+{
+}
+
+#define mmp_hsdma_runtime_suspend NULL
+#define mmp_hsdma_runtime_resume NULL
+#define MMP_HSDMA_PMOPS NULL
+#endif
+
+static const struct platform_device_id mmp_hsdma_id_table[] = {
+ { "mmp-hsdma", },
+ { },
+};
+
+static struct platform_driver mmp_hsdma_driver = {
+ .driver = {
+ .name = "mmp-hsdma",
+ .of_match_table = mmp_hsdma_dt_ids,
+ .pm = MMP_HSDMA_PMOPS,
+ },
+ .id_table = mmp_hsdma_id_table,
+ .probe = mmp_hsdma_probe,
+ .remove = mmp_hsdma_remove,
+};
+
+module_platform_driver(mmp_hsdma_driver);
+
+MODULE_AUTHOR("ASR Microelectronics");
+MODULE_DESCRIPTION("ASR High Speed DMA Driver");
+MODULE_LICENSE("GPL v2");