[Feature]add MT2731_MP2_MR2_SVN388 baseline version

Change-Id: Ief04314834b31e27effab435d3ca8ba33b499059
diff --git a/src/kernel/linux/v4.14/drivers/rapidio/devices/Kconfig b/src/kernel/linux/v4.14/drivers/rapidio/devices/Kconfig
new file mode 100644
index 0000000..c4cb087
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/rapidio/devices/Kconfig
@@ -0,0 +1,10 @@
+#
+# RapidIO master port configuration
+#
+
+config RAPIDIO_TSI721
+	tristate "IDT Tsi721 PCI Express SRIO Controller support"
+	depends on RAPIDIO && PCIEPORTBUS
+	default "n"
+	---help---
+	  Include support for IDT Tsi721 PCI Express Serial RapidIO controller.
diff --git a/src/kernel/linux/v4.14/drivers/rapidio/devices/Makefile b/src/kernel/linux/v4.14/drivers/rapidio/devices/Makefile
new file mode 100644
index 0000000..927dbf8
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/rapidio/devices/Makefile
@@ -0,0 +1,8 @@
+#
+# Makefile for RapidIO devices
+#
+
+obj-$(CONFIG_RAPIDIO_TSI721)	+= tsi721_mport.o
+tsi721_mport-y			:= tsi721.o
+tsi721_mport-$(CONFIG_RAPIDIO_DMA_ENGINE) += tsi721_dma.o
+obj-$(CONFIG_RAPIDIO_MPORT_CDEV) += rio_mport_cdev.o
diff --git a/src/kernel/linux/v4.14/drivers/rapidio/devices/rio_mport_cdev.c b/src/kernel/linux/v4.14/drivers/rapidio/devices/rio_mport_cdev.c
new file mode 100644
index 0000000..f207f87
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/rapidio/devices/rio_mport_cdev.c
@@ -0,0 +1,2733 @@
+/*
+ * RapidIO mport character device
+ *
+ * Copyright 2014-2015 Integrated Device Technology, Inc.
+ *    Alexandre Bounine <alexandre.bounine@idt.com>
+ * Copyright 2014-2015 Prodrive Technologies
+ *    Andre van Herk <andre.van.herk@prodrive-technologies.com>
+ *    Jerry Jacobs <jerry.jacobs@prodrive-technologies.com>
+ * Copyright (C) 2014 Texas Instruments Incorporated
+ *    Aurelien Jacquiot <a-jacquiot@ti.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/cdev.h>
+#include <linux/ioctl.h>
+#include <linux/uaccess.h>
+#include <linux/list.h>
+#include <linux/fs.h>
+#include <linux/err.h>
+#include <linux/net.h>
+#include <linux/poll.h>
+#include <linux/spinlock.h>
+#include <linux/sched.h>
+#include <linux/kfifo.h>
+
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/mman.h>
+
+#include <linux/dma-mapping.h>
+#ifdef CONFIG_RAPIDIO_DMA_ENGINE
+#include <linux/dmaengine.h>
+#endif
+
+#include <linux/rio.h>
+#include <linux/rio_ids.h>
+#include <linux/rio_drv.h>
+#include <linux/rio_mport_cdev.h>
+
+#include "../rio.h"
+
+#define DRV_NAME	"rio_mport"
+#define DRV_PREFIX	DRV_NAME ": "
+#define DEV_NAME	"rio_mport"
+#define DRV_VERSION     "1.0.0"
+
+/* Debug output filtering masks */
+enum {
+	DBG_NONE	= 0,
+	DBG_INIT	= BIT(0), /* driver init */
+	DBG_EXIT	= BIT(1), /* driver exit */
+	DBG_MPORT	= BIT(2), /* mport add/remove */
+	DBG_RDEV	= BIT(3), /* RapidIO device add/remove */
+	DBG_DMA		= BIT(4), /* DMA transfer messages */
+	DBG_MMAP	= BIT(5), /* mapping messages */
+	DBG_IBW		= BIT(6), /* inbound window */
+	DBG_EVENT	= BIT(7), /* event handling messages */
+	DBG_OBW		= BIT(8), /* outbound window messages */
+	DBG_DBELL	= BIT(9), /* doorbell messages */
+	DBG_ALL		= ~0,
+};
+
+#ifdef DEBUG
+#define rmcd_debug(level, fmt, arg...)		\
+	do {					\
+		if (DBG_##level & dbg_level)	\
+			pr_debug(DRV_PREFIX "%s: " fmt "\n", __func__, ##arg); \
+	} while (0)
+#else
+#define rmcd_debug(level, fmt, arg...) \
+		no_printk(KERN_DEBUG pr_fmt(DRV_PREFIX fmt "\n"), ##arg)
+#endif
+
+#define rmcd_warn(fmt, arg...) \
+	pr_warn(DRV_PREFIX "%s WARNING " fmt "\n", __func__, ##arg)
+
+#define rmcd_error(fmt, arg...) \
+	pr_err(DRV_PREFIX "%s ERROR " fmt "\n", __func__, ##arg)
+
+MODULE_AUTHOR("Jerry Jacobs <jerry.jacobs@prodrive-technologies.com>");
+MODULE_AUTHOR("Aurelien Jacquiot <a-jacquiot@ti.com>");
+MODULE_AUTHOR("Alexandre Bounine <alexandre.bounine@idt.com>");
+MODULE_AUTHOR("Andre van Herk <andre.van.herk@prodrive-technologies.com>");
+MODULE_DESCRIPTION("RapidIO mport character device driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+
+static int dma_timeout = 3000; /* DMA transfer timeout in msec */
+module_param(dma_timeout, int, S_IRUGO);
+MODULE_PARM_DESC(dma_timeout, "DMA Transfer Timeout in msec (default: 3000)");
+
+#ifdef DEBUG
+static u32 dbg_level = DBG_NONE;
+module_param(dbg_level, uint, S_IWUSR | S_IWGRP | S_IRUGO);
+MODULE_PARM_DESC(dbg_level, "Debugging output level (default 0 = none)");
+#endif
+
+/*
+ * An internal DMA coherent buffer
+ */
+struct mport_dma_buf {
+	void		*ib_base;
+	dma_addr_t	ib_phys;
+	u32		ib_size;
+	u64		ib_rio_base;
+	bool		ib_map;
+	struct file	*filp;
+};
+
+/*
+ * Internal memory mapping structure
+ */
+enum rio_mport_map_dir {
+	MAP_INBOUND,
+	MAP_OUTBOUND,
+	MAP_DMA,
+};
+
+struct rio_mport_mapping {
+	struct list_head node;
+	struct mport_dev *md;
+	enum rio_mport_map_dir dir;
+	u16 rioid;
+	u64 rio_addr;
+	dma_addr_t phys_addr; /* for mmap */
+	void *virt_addr; /* kernel address, for dma_free_coherent */
+	u64 size;
+	struct kref ref; /* refcount of vmas sharing the mapping */
+	struct file *filp;
+};
+
+struct rio_mport_dma_map {
+	int valid;
+	u64 length;
+	void *vaddr;
+	dma_addr_t paddr;
+};
+
+#define MPORT_MAX_DMA_BUFS	16
+#define MPORT_EVENT_DEPTH	10
+
+/*
+ * mport_dev  driver-specific structure that represents mport device
+ * @active    mport device status flag
+ * @node      list node to maintain list of registered mports
+ * @cdev      character device
+ * @dev       associated device object
+ * @mport     associated subsystem's master port device object
+ * @buf_mutex lock for buffer handling
+ * @file_mutex - lock for open files list
+ * @file_list  - list of open files on given mport
+ * @properties properties of this mport
+ * @portwrites queue of inbound portwrites
+ * @pw_lock    lock for port write queue
+ * @mappings   queue for memory mappings
+ * @dma_chan   DMA channels associated with this device
+ * @dma_ref:
+ * @comp:
+ */
+struct mport_dev {
+	atomic_t		active;
+	struct list_head	node;
+	struct cdev		cdev;
+	struct device		dev;
+	struct rio_mport	*mport;
+	struct mutex		buf_mutex;
+	struct mutex		file_mutex;
+	struct list_head	file_list;
+	struct rio_mport_properties	properties;
+	struct list_head		doorbells;
+	spinlock_t			db_lock;
+	struct list_head		portwrites;
+	spinlock_t			pw_lock;
+	struct list_head	mappings;
+#ifdef CONFIG_RAPIDIO_DMA_ENGINE
+	struct dma_chan *dma_chan;
+	struct kref	dma_ref;
+	struct completion comp;
+#endif
+};
+
+/*
+ * mport_cdev_priv - data structure specific to individual file object
+ *                   associated with an open device
+ * @md    master port character device object
+ * @async_queue - asynchronous notification queue
+ * @list - file objects tracking list
+ * @db_filters    inbound doorbell filters for this descriptor
+ * @pw_filters    portwrite filters for this descriptor
+ * @event_fifo    event fifo for this descriptor
+ * @event_rx_wait wait queue for this descriptor
+ * @fifo_lock     lock for event_fifo
+ * @event_mask    event mask for this descriptor
+ * @dmach DMA engine channel allocated for specific file object
+ */
+struct mport_cdev_priv {
+	struct mport_dev	*md;
+	struct fasync_struct	*async_queue;
+	struct list_head	list;
+	struct list_head	db_filters;
+	struct list_head        pw_filters;
+	struct kfifo            event_fifo;
+	wait_queue_head_t       event_rx_wait;
+	spinlock_t              fifo_lock;
+	u32			event_mask; /* RIO_DOORBELL, RIO_PORTWRITE */
+#ifdef CONFIG_RAPIDIO_DMA_ENGINE
+	struct dma_chan		*dmach;
+	struct list_head	async_list;
+	struct list_head	pend_list;
+	spinlock_t              req_lock;
+	struct mutex		dma_lock;
+	struct kref		dma_ref;
+	struct completion	comp;
+#endif
+};
+
+/*
+ * rio_mport_pw_filter - structure to describe a portwrite filter
+ * md_node   node in mport device's list
+ * priv_node node in private file object's list
+ * priv      reference to private data
+ * filter    actual portwrite filter
+ */
+struct rio_mport_pw_filter {
+	struct list_head md_node;
+	struct list_head priv_node;
+	struct mport_cdev_priv *priv;
+	struct rio_pw_filter filter;
+};
+
+/*
+ * rio_mport_db_filter - structure to describe a doorbell filter
+ * @data_node reference to device node
+ * @priv_node node in private data
+ * @priv      reference to private data
+ * @filter    actual doorbell filter
+ */
+struct rio_mport_db_filter {
+	struct list_head data_node;
+	struct list_head priv_node;
+	struct mport_cdev_priv *priv;
+	struct rio_doorbell_filter filter;
+};
+
+static LIST_HEAD(mport_devs);
+static DEFINE_MUTEX(mport_devs_lock);
+
+#if (0) /* used by commented out portion of poll function : FIXME */
+static DECLARE_WAIT_QUEUE_HEAD(mport_cdev_wait);
+#endif
+
+static struct class *dev_class;
+static dev_t dev_number;
+
+static struct workqueue_struct *dma_wq;
+
+static void mport_release_mapping(struct kref *ref);
+
+static int rio_mport_maint_rd(struct mport_cdev_priv *priv, void __user *arg,
+			      int local)
+{
+	struct rio_mport *mport = priv->md->mport;
+	struct rio_mport_maint_io maint_io;
+	u32 *buffer;
+	u32 offset;
+	size_t length;
+	int ret, i;
+
+	if (unlikely(copy_from_user(&maint_io, arg, sizeof(maint_io))))
+		return -EFAULT;
+
+	if ((maint_io.offset % 4) ||
+	    (maint_io.length == 0) || (maint_io.length % 4) ||
+	    (maint_io.length + maint_io.offset) > RIO_MAINT_SPACE_SZ)
+		return -EINVAL;
+
+	buffer = vmalloc(maint_io.length);
+	if (buffer == NULL)
+		return -ENOMEM;
+	length = maint_io.length/sizeof(u32);
+	offset = maint_io.offset;
+
+	for (i = 0; i < length; i++) {
+		if (local)
+			ret = __rio_local_read_config_32(mport,
+				offset, &buffer[i]);
+		else
+			ret = rio_mport_read_config_32(mport, maint_io.rioid,
+				maint_io.hopcount, offset, &buffer[i]);
+		if (ret)
+			goto out;
+
+		offset += 4;
+	}
+
+	if (unlikely(copy_to_user((void __user *)(uintptr_t)maint_io.buffer,
+				   buffer, maint_io.length)))
+		ret = -EFAULT;
+out:
+	vfree(buffer);
+	return ret;
+}
+
+static int rio_mport_maint_wr(struct mport_cdev_priv *priv, void __user *arg,
+			      int local)
+{
+	struct rio_mport *mport = priv->md->mport;
+	struct rio_mport_maint_io maint_io;
+	u32 *buffer;
+	u32 offset;
+	size_t length;
+	int ret = -EINVAL, i;
+
+	if (unlikely(copy_from_user(&maint_io, arg, sizeof(maint_io))))
+		return -EFAULT;
+
+	if ((maint_io.offset % 4) ||
+	    (maint_io.length == 0) || (maint_io.length % 4) ||
+	    (maint_io.length + maint_io.offset) > RIO_MAINT_SPACE_SZ)
+		return -EINVAL;
+
+	buffer = vmalloc(maint_io.length);
+	if (buffer == NULL)
+		return -ENOMEM;
+	length = maint_io.length;
+
+	if (unlikely(copy_from_user(buffer,
+			(void __user *)(uintptr_t)maint_io.buffer, length))) {
+		ret = -EFAULT;
+		goto out;
+	}
+
+	offset = maint_io.offset;
+	length /= sizeof(u32);
+
+	for (i = 0; i < length; i++) {
+		if (local)
+			ret = __rio_local_write_config_32(mport,
+							  offset, buffer[i]);
+		else
+			ret = rio_mport_write_config_32(mport, maint_io.rioid,
+							maint_io.hopcount,
+							offset, buffer[i]);
+		if (ret)
+			goto out;
+
+		offset += 4;
+	}
+
+out:
+	vfree(buffer);
+	return ret;
+}
+
+
+/*
+ * Inbound/outbound memory mapping functions
+ */
+static int
+rio_mport_create_outbound_mapping(struct mport_dev *md, struct file *filp,
+				  u16 rioid, u64 raddr, u32 size,
+				  dma_addr_t *paddr)
+{
+	struct rio_mport *mport = md->mport;
+	struct rio_mport_mapping *map;
+	int ret;
+
+	rmcd_debug(OBW, "did=%d ra=0x%llx sz=0x%x", rioid, raddr, size);
+
+	map = kzalloc(sizeof(*map), GFP_KERNEL);
+	if (map == NULL)
+		return -ENOMEM;
+
+	ret = rio_map_outb_region(mport, rioid, raddr, size, 0, paddr);
+	if (ret < 0)
+		goto err_map_outb;
+
+	map->dir = MAP_OUTBOUND;
+	map->rioid = rioid;
+	map->rio_addr = raddr;
+	map->size = size;
+	map->phys_addr = *paddr;
+	map->filp = filp;
+	map->md = md;
+	kref_init(&map->ref);
+	list_add_tail(&map->node, &md->mappings);
+	return 0;
+err_map_outb:
+	kfree(map);
+	return ret;
+}
+
+static int
+rio_mport_get_outbound_mapping(struct mport_dev *md, struct file *filp,
+			       u16 rioid, u64 raddr, u32 size,
+			       dma_addr_t *paddr)
+{
+	struct rio_mport_mapping *map;
+	int err = -ENOMEM;
+
+	mutex_lock(&md->buf_mutex);
+	list_for_each_entry(map, &md->mappings, node) {
+		if (map->dir != MAP_OUTBOUND)
+			continue;
+		if (rioid == map->rioid &&
+		    raddr == map->rio_addr && size == map->size) {
+			*paddr = map->phys_addr;
+			err = 0;
+			break;
+		} else if (rioid == map->rioid &&
+			   raddr < (map->rio_addr + map->size - 1) &&
+			   (raddr + size) > map->rio_addr) {
+			err = -EBUSY;
+			break;
+		}
+	}
+
+	/* If not found, create new */
+	if (err == -ENOMEM)
+		err = rio_mport_create_outbound_mapping(md, filp, rioid, raddr,
+						size, paddr);
+	mutex_unlock(&md->buf_mutex);
+	return err;
+}
+
+static int rio_mport_obw_map(struct file *filp, void __user *arg)
+{
+	struct mport_cdev_priv *priv = filp->private_data;
+	struct mport_dev *data = priv->md;
+	struct rio_mmap map;
+	dma_addr_t paddr;
+	int ret;
+
+	if (unlikely(copy_from_user(&map, arg, sizeof(map))))
+		return -EFAULT;
+
+	rmcd_debug(OBW, "did=%d ra=0x%llx sz=0x%llx",
+		   map.rioid, map.rio_addr, map.length);
+
+	ret = rio_mport_get_outbound_mapping(data, filp, map.rioid,
+					     map.rio_addr, map.length, &paddr);
+	if (ret < 0) {
+		rmcd_error("Failed to set OBW err= %d", ret);
+		return ret;
+	}
+
+	map.handle = paddr;
+
+	if (unlikely(copy_to_user(arg, &map, sizeof(map))))
+		return -EFAULT;
+	return 0;
+}
+
+/*
+ * rio_mport_obw_free() - unmap an OutBound Window from RapidIO address space
+ *
+ * @priv: driver private data
+ * @arg:  buffer handle returned by allocation routine
+ */
+static int rio_mport_obw_free(struct file *filp, void __user *arg)
+{
+	struct mport_cdev_priv *priv = filp->private_data;
+	struct mport_dev *md = priv->md;
+	u64 handle;
+	struct rio_mport_mapping *map, *_map;
+
+	if (!md->mport->ops->unmap_outb)
+		return -EPROTONOSUPPORT;
+
+	if (copy_from_user(&handle, arg, sizeof(handle)))
+		return -EFAULT;
+
+	rmcd_debug(OBW, "h=0x%llx", handle);
+
+	mutex_lock(&md->buf_mutex);
+	list_for_each_entry_safe(map, _map, &md->mappings, node) {
+		if (map->dir == MAP_OUTBOUND && map->phys_addr == handle) {
+			if (map->filp == filp) {
+				rmcd_debug(OBW, "kref_put h=0x%llx", handle);
+				map->filp = NULL;
+				kref_put(&map->ref, mport_release_mapping);
+			}
+			break;
+		}
+	}
+	mutex_unlock(&md->buf_mutex);
+
+	return 0;
+}
+
+/*
+ * maint_hdid_set() - Set the host Device ID
+ * @priv: driver private data
+ * @arg:	Device Id
+ */
+static int maint_hdid_set(struct mport_cdev_priv *priv, void __user *arg)
+{
+	struct mport_dev *md = priv->md;
+	u16 hdid;
+
+	if (copy_from_user(&hdid, arg, sizeof(hdid)))
+		return -EFAULT;
+
+	md->mport->host_deviceid = hdid;
+	md->properties.hdid = hdid;
+	rio_local_set_device_id(md->mport, hdid);
+
+	rmcd_debug(MPORT, "Set host device Id to %d", hdid);
+
+	return 0;
+}
+
+/*
+ * maint_comptag_set() - Set the host Component Tag
+ * @priv: driver private data
+ * @arg:	Component Tag
+ */
+static int maint_comptag_set(struct mport_cdev_priv *priv, void __user *arg)
+{
+	struct mport_dev *md = priv->md;
+	u32 comptag;
+
+	if (copy_from_user(&comptag, arg, sizeof(comptag)))
+		return -EFAULT;
+
+	rio_local_write_config_32(md->mport, RIO_COMPONENT_TAG_CSR, comptag);
+
+	rmcd_debug(MPORT, "Set host Component Tag to %d", comptag);
+
+	return 0;
+}
+
+#ifdef CONFIG_RAPIDIO_DMA_ENGINE
+
+struct mport_dma_req {
+	struct list_head node;
+	struct file *filp;
+	struct mport_cdev_priv *priv;
+	enum rio_transfer_sync sync;
+	struct sg_table sgt;
+	struct page **page_list;
+	unsigned int nr_pages;
+	struct rio_mport_mapping *map;
+	struct dma_chan *dmach;
+	enum dma_data_direction dir;
+	dma_cookie_t cookie;
+	enum dma_status	status;
+	struct completion req_comp;
+};
+
+struct mport_faf_work {
+	struct work_struct work;
+	struct mport_dma_req *req;
+};
+
+static void mport_release_def_dma(struct kref *dma_ref)
+{
+	struct mport_dev *md =
+			container_of(dma_ref, struct mport_dev, dma_ref);
+
+	rmcd_debug(EXIT, "DMA_%d", md->dma_chan->chan_id);
+	rio_release_dma(md->dma_chan);
+	md->dma_chan = NULL;
+}
+
+static void mport_release_dma(struct kref *dma_ref)
+{
+	struct mport_cdev_priv *priv =
+			container_of(dma_ref, struct mport_cdev_priv, dma_ref);
+
+	rmcd_debug(EXIT, "DMA_%d", priv->dmach->chan_id);
+	complete(&priv->comp);
+}
+
+static void dma_req_free(struct mport_dma_req *req)
+{
+	struct mport_cdev_priv *priv = req->priv;
+	unsigned int i;
+
+	dma_unmap_sg(req->dmach->device->dev,
+		     req->sgt.sgl, req->sgt.nents, req->dir);
+	sg_free_table(&req->sgt);
+	if (req->page_list) {
+		for (i = 0; i < req->nr_pages; i++)
+			put_page(req->page_list[i]);
+		kfree(req->page_list);
+	}
+
+	if (req->map) {
+		mutex_lock(&req->map->md->buf_mutex);
+		kref_put(&req->map->ref, mport_release_mapping);
+		mutex_unlock(&req->map->md->buf_mutex);
+	}
+
+	kref_put(&priv->dma_ref, mport_release_dma);
+
+	kfree(req);
+}
+
+static void dma_xfer_callback(void *param)
+{
+	struct mport_dma_req *req = (struct mport_dma_req *)param;
+	struct mport_cdev_priv *priv = req->priv;
+
+	req->status = dma_async_is_tx_complete(priv->dmach, req->cookie,
+					       NULL, NULL);
+	complete(&req->req_comp);
+}
+
+static void dma_faf_cleanup(struct work_struct *_work)
+{
+	struct mport_faf_work *work = container_of(_work,
+						struct mport_faf_work, work);
+	struct mport_dma_req *req = work->req;
+
+	dma_req_free(req);
+	kfree(work);
+}
+
+static void dma_faf_callback(void *param)
+{
+	struct mport_dma_req *req = (struct mport_dma_req *)param;
+	struct mport_faf_work *work;
+
+	work = kmalloc(sizeof(*work), GFP_ATOMIC);
+	if (!work)
+		return;
+
+	INIT_WORK(&work->work, dma_faf_cleanup);
+	work->req = req;
+	queue_work(dma_wq, &work->work);
+}
+
+/*
+ * prep_dma_xfer() - Configure and send request to DMAengine to prepare DMA
+ *                   transfer object.
+ * Returns pointer to DMA transaction descriptor allocated by DMA driver on
+ * success or ERR_PTR (and/or NULL) if failed. Caller must check returned
+ * non-NULL pointer using IS_ERR macro.
+ */
+static struct dma_async_tx_descriptor
+*prep_dma_xfer(struct dma_chan *chan, struct rio_transfer_io *transfer,
+	struct sg_table *sgt, int nents, enum dma_transfer_direction dir,
+	enum dma_ctrl_flags flags)
+{
+	struct rio_dma_data tx_data;
+
+	tx_data.sg = sgt->sgl;
+	tx_data.sg_len = nents;
+	tx_data.rio_addr_u = 0;
+	tx_data.rio_addr = transfer->rio_addr;
+	if (dir == DMA_MEM_TO_DEV) {
+		switch (transfer->method) {
+		case RIO_EXCHANGE_NWRITE:
+			tx_data.wr_type = RDW_ALL_NWRITE;
+			break;
+		case RIO_EXCHANGE_NWRITE_R_ALL:
+			tx_data.wr_type = RDW_ALL_NWRITE_R;
+			break;
+		case RIO_EXCHANGE_NWRITE_R:
+			tx_data.wr_type = RDW_LAST_NWRITE_R;
+			break;
+		case RIO_EXCHANGE_DEFAULT:
+			tx_data.wr_type = RDW_DEFAULT;
+			break;
+		default:
+			return ERR_PTR(-EINVAL);
+		}
+	}
+
+	return rio_dma_prep_xfer(chan, transfer->rioid, &tx_data, dir, flags);
+}
+
+/* Request DMA channel associated with this mport device.
+ * Try to request DMA channel for every new process that opened given
+ * mport. If a new DMA channel is not available use default channel
+ * which is the first DMA channel opened on mport device.
+ */
+static int get_dma_channel(struct mport_cdev_priv *priv)
+{
+	mutex_lock(&priv->dma_lock);
+	if (!priv->dmach) {
+		priv->dmach = rio_request_mport_dma(priv->md->mport);
+		if (!priv->dmach) {
+			/* Use default DMA channel if available */
+			if (priv->md->dma_chan) {
+				priv->dmach = priv->md->dma_chan;
+				kref_get(&priv->md->dma_ref);
+			} else {
+				rmcd_error("Failed to get DMA channel");
+				mutex_unlock(&priv->dma_lock);
+				return -ENODEV;
+			}
+		} else if (!priv->md->dma_chan) {
+			/* Register default DMA channel if we do not have one */
+			priv->md->dma_chan = priv->dmach;
+			kref_init(&priv->md->dma_ref);
+			rmcd_debug(DMA, "Register DMA_chan %d as default",
+				   priv->dmach->chan_id);
+		}
+
+		kref_init(&priv->dma_ref);
+		init_completion(&priv->comp);
+	}
+
+	kref_get(&priv->dma_ref);
+	mutex_unlock(&priv->dma_lock);
+	return 0;
+}
+
+static void put_dma_channel(struct mport_cdev_priv *priv)
+{
+	kref_put(&priv->dma_ref, mport_release_dma);
+}
+
+/*
+ * DMA transfer functions
+ */
+static int do_dma_request(struct mport_dma_req *req,
+			  struct rio_transfer_io *xfer,
+			  enum rio_transfer_sync sync, int nents)
+{
+	struct mport_cdev_priv *priv;
+	struct sg_table *sgt;
+	struct dma_chan *chan;
+	struct dma_async_tx_descriptor *tx;
+	dma_cookie_t cookie;
+	unsigned long tmo = msecs_to_jiffies(dma_timeout);
+	enum dma_transfer_direction dir;
+	long wret;
+	int ret = 0;
+
+	priv = req->priv;
+	sgt = &req->sgt;
+
+	chan = priv->dmach;
+	dir = (req->dir == DMA_FROM_DEVICE) ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV;
+
+	rmcd_debug(DMA, "%s(%d) uses %s for DMA_%s",
+		   current->comm, task_pid_nr(current),
+		   dev_name(&chan->dev->device),
+		   (dir == DMA_DEV_TO_MEM)?"READ":"WRITE");
+
+	/* Initialize DMA transaction request */
+	tx = prep_dma_xfer(chan, xfer, sgt, nents, dir,
+			   DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
+
+	if (!tx) {
+		rmcd_debug(DMA, "prep error for %s A:0x%llx L:0x%llx",
+			(dir == DMA_DEV_TO_MEM)?"READ":"WRITE",
+			xfer->rio_addr, xfer->length);
+		ret = -EIO;
+		goto err_out;
+	} else if (IS_ERR(tx)) {
+		ret = PTR_ERR(tx);
+		rmcd_debug(DMA, "prep error %d for %s A:0x%llx L:0x%llx", ret,
+			(dir == DMA_DEV_TO_MEM)?"READ":"WRITE",
+			xfer->rio_addr, xfer->length);
+		goto err_out;
+	}
+
+	if (sync == RIO_TRANSFER_FAF)
+		tx->callback = dma_faf_callback;
+	else
+		tx->callback = dma_xfer_callback;
+	tx->callback_param = req;
+
+	req->dmach = chan;
+	req->sync = sync;
+	req->status = DMA_IN_PROGRESS;
+	init_completion(&req->req_comp);
+
+	cookie = dmaengine_submit(tx);
+	req->cookie = cookie;
+
+	rmcd_debug(DMA, "pid=%d DMA_%s tx_cookie = %d", task_pid_nr(current),
+		   (dir == DMA_DEV_TO_MEM)?"READ":"WRITE", cookie);
+
+	if (dma_submit_error(cookie)) {
+		rmcd_error("submit err=%d (addr:0x%llx len:0x%llx)",
+			   cookie, xfer->rio_addr, xfer->length);
+		ret = -EIO;
+		goto err_out;
+	}
+
+	dma_async_issue_pending(chan);
+
+	if (sync == RIO_TRANSFER_ASYNC) {
+		spin_lock(&priv->req_lock);
+		list_add_tail(&req->node, &priv->async_list);
+		spin_unlock(&priv->req_lock);
+		return cookie;
+	} else if (sync == RIO_TRANSFER_FAF)
+		return 0;
+
+	wret = wait_for_completion_interruptible_timeout(&req->req_comp, tmo);
+
+	if (wret == 0) {
+		/* Timeout on wait occurred */
+		rmcd_error("%s(%d) timed out waiting for DMA_%s %d",
+		       current->comm, task_pid_nr(current),
+		       (dir == DMA_DEV_TO_MEM)?"READ":"WRITE", cookie);
+		return -ETIMEDOUT;
+	} else if (wret == -ERESTARTSYS) {
+		/* Wait_for_completion was interrupted by a signal but DMA may
+		 * be in progress
+		 */
+		rmcd_error("%s(%d) wait for DMA_%s %d was interrupted",
+			current->comm, task_pid_nr(current),
+			(dir == DMA_DEV_TO_MEM)?"READ":"WRITE", cookie);
+		return -EINTR;
+	}
+
+	if (req->status != DMA_COMPLETE) {
+		/* DMA transaction completion was signaled with error */
+		rmcd_error("%s(%d) DMA_%s %d completed with status %d (ret=%d)",
+			current->comm, task_pid_nr(current),
+			(dir == DMA_DEV_TO_MEM)?"READ":"WRITE",
+			cookie, req->status, ret);
+		ret = -EIO;
+	}
+
+err_out:
+	return ret;
+}
+
+/*
+ * rio_dma_transfer() - Perform RapidIO DMA data transfer to/from
+ *                      the remote RapidIO device
+ * @filp: file pointer associated with the call
+ * @transfer_mode: DMA transfer mode
+ * @sync: synchronization mode
+ * @dir: DMA transfer direction (DMA_MEM_TO_DEV = write OR
+ *                               DMA_DEV_TO_MEM = read)
+ * @xfer: data transfer descriptor structure
+ */
+static int
+rio_dma_transfer(struct file *filp, u32 transfer_mode,
+		 enum rio_transfer_sync sync, enum dma_data_direction dir,
+		 struct rio_transfer_io *xfer)
+{
+	struct mport_cdev_priv *priv = filp->private_data;
+	unsigned long nr_pages = 0;
+	struct page **page_list = NULL;
+	struct mport_dma_req *req;
+	struct mport_dev *md = priv->md;
+	struct dma_chan *chan;
+	int i, ret;
+	int nents;
+
+	if (xfer->length == 0)
+		return -EINVAL;
+	req = kzalloc(sizeof(*req), GFP_KERNEL);
+	if (!req)
+		return -ENOMEM;
+
+	ret = get_dma_channel(priv);
+	if (ret) {
+		kfree(req);
+		return ret;
+	}
+
+	/*
+	 * If parameter loc_addr != NULL, we are transferring data from/to
+	 * data buffer allocated in user-space: lock in memory user-space
+	 * buffer pages and build an SG table for DMA transfer request
+	 *
+	 * Otherwise (loc_addr == NULL) contiguous kernel-space buffer is
+	 * used for DMA data transfers: build single entry SG table using
+	 * offset within the internal buffer specified by handle parameter.
+	 */
+	if (xfer->loc_addr) {
+		unsigned long offset;
+		long pinned;
+
+		offset = (unsigned long)(uintptr_t)xfer->loc_addr & ~PAGE_MASK;
+		nr_pages = PAGE_ALIGN(xfer->length + offset) >> PAGE_SHIFT;
+
+		page_list = kmalloc_array(nr_pages,
+					  sizeof(*page_list), GFP_KERNEL);
+		if (page_list == NULL) {
+			ret = -ENOMEM;
+			goto err_req;
+		}
+
+		pinned = get_user_pages_unlocked(
+				(unsigned long)xfer->loc_addr & PAGE_MASK,
+				nr_pages,
+				page_list,
+				dir == DMA_FROM_DEVICE ? FOLL_WRITE : 0);
+
+		if (pinned != nr_pages) {
+			if (pinned < 0) {
+				rmcd_error("get_user_pages_unlocked err=%ld",
+					   pinned);
+				nr_pages = 0;
+			} else
+				rmcd_error("pinned %ld out of %ld pages",
+					   pinned, nr_pages);
+			ret = -EFAULT;
+			/*
+			 * Set nr_pages up to mean "how many pages to unpin, in
+			 * the error handler:
+			 */
+			nr_pages = pinned;
+			goto err_pg;
+		}
+
+		ret = sg_alloc_table_from_pages(&req->sgt, page_list, nr_pages,
+					offset, xfer->length, GFP_KERNEL);
+		if (ret) {
+			rmcd_error("sg_alloc_table failed with err=%d", ret);
+			goto err_pg;
+		}
+
+		req->page_list = page_list;
+		req->nr_pages = nr_pages;
+	} else {
+		dma_addr_t baddr;
+		struct rio_mport_mapping *map;
+
+		baddr = (dma_addr_t)xfer->handle;
+
+		mutex_lock(&md->buf_mutex);
+		list_for_each_entry(map, &md->mappings, node) {
+			if (baddr >= map->phys_addr &&
+			    baddr < (map->phys_addr + map->size)) {
+				kref_get(&map->ref);
+				req->map = map;
+				break;
+			}
+		}
+		mutex_unlock(&md->buf_mutex);
+
+		if (req->map == NULL) {
+			ret = -ENOMEM;
+			goto err_req;
+		}
+
+		if (xfer->length + xfer->offset > map->size) {
+			ret = -EINVAL;
+			goto err_req;
+		}
+
+		ret = sg_alloc_table(&req->sgt, 1, GFP_KERNEL);
+		if (unlikely(ret)) {
+			rmcd_error("sg_alloc_table failed for internal buf");
+			goto err_req;
+		}
+
+		sg_set_buf(req->sgt.sgl,
+			   map->virt_addr + (baddr - map->phys_addr) +
+				xfer->offset, xfer->length);
+	}
+
+	req->dir = dir;
+	req->filp = filp;
+	req->priv = priv;
+	chan = priv->dmach;
+
+	nents = dma_map_sg(chan->device->dev,
+			   req->sgt.sgl, req->sgt.nents, dir);
+	if (nents == -EFAULT) {
+		rmcd_error("Failed to map SG list");
+		ret = -EFAULT;
+		goto err_pg;
+	}
+
+	ret = do_dma_request(req, xfer, sync, nents);
+
+	if (ret >= 0) {
+		if (sync == RIO_TRANSFER_SYNC)
+			goto sync_out;
+		return ret; /* return ASYNC cookie */
+	}
+
+	if (ret == -ETIMEDOUT || ret == -EINTR) {
+		/*
+		 * This can happen only in case of SYNC transfer.
+		 * Do not free unfinished request structure immediately.
+		 * Place it into pending list and deal with it later
+		 */
+		spin_lock(&priv->req_lock);
+		list_add_tail(&req->node, &priv->pend_list);
+		spin_unlock(&priv->req_lock);
+		return ret;
+	}
+
+
+	rmcd_debug(DMA, "do_dma_request failed with err=%d", ret);
+sync_out:
+	dma_unmap_sg(chan->device->dev, req->sgt.sgl, req->sgt.nents, dir);
+	sg_free_table(&req->sgt);
+err_pg:
+	if (page_list) {
+		for (i = 0; i < nr_pages; i++)
+			put_page(page_list[i]);
+		kfree(page_list);
+	}
+err_req:
+	if (req->map) {
+		mutex_lock(&md->buf_mutex);
+		kref_put(&req->map->ref, mport_release_mapping);
+		mutex_unlock(&md->buf_mutex);
+	}
+	put_dma_channel(priv);
+	kfree(req);
+	return ret;
+}
+
+static int rio_mport_transfer_ioctl(struct file *filp, void __user *arg)
+{
+	struct mport_cdev_priv *priv = filp->private_data;
+	struct rio_transaction transaction;
+	struct rio_transfer_io *transfer;
+	enum dma_data_direction dir;
+	int i, ret = 0;
+
+	if (unlikely(copy_from_user(&transaction, arg, sizeof(transaction))))
+		return -EFAULT;
+
+	if (transaction.count != 1) /* only single transfer for now */
+		return -EINVAL;
+
+	if ((transaction.transfer_mode &
+	     priv->md->properties.transfer_mode) == 0)
+		return -ENODEV;
+
+	transfer = vmalloc(transaction.count * sizeof(*transfer));
+	if (!transfer)
+		return -ENOMEM;
+
+	if (unlikely(copy_from_user(transfer,
+				    (void __user *)(uintptr_t)transaction.block,
+				    transaction.count * sizeof(*transfer)))) {
+		ret = -EFAULT;
+		goto out_free;
+	}
+
+	dir = (transaction.dir == RIO_TRANSFER_DIR_READ) ?
+					DMA_FROM_DEVICE : DMA_TO_DEVICE;
+	for (i = 0; i < transaction.count && ret == 0; i++)
+		ret = rio_dma_transfer(filp, transaction.transfer_mode,
+			transaction.sync, dir, &transfer[i]);
+
+	if (unlikely(copy_to_user((void __user *)(uintptr_t)transaction.block,
+				  transfer,
+				  transaction.count * sizeof(*transfer))))
+		ret = -EFAULT;
+
+out_free:
+	vfree(transfer);
+
+	return ret;
+}
+
+static int rio_mport_wait_for_async_dma(struct file *filp, void __user *arg)
+{
+	struct mport_cdev_priv *priv;
+	struct mport_dev *md;
+	struct rio_async_tx_wait w_param;
+	struct mport_dma_req *req;
+	dma_cookie_t cookie;
+	unsigned long tmo;
+	long wret;
+	int found = 0;
+	int ret;
+
+	priv = (struct mport_cdev_priv *)filp->private_data;
+	md = priv->md;
+
+	if (unlikely(copy_from_user(&w_param, arg, sizeof(w_param))))
+		return -EFAULT;
+
+	cookie = w_param.token;
+	if (w_param.timeout)
+		tmo = msecs_to_jiffies(w_param.timeout);
+	else /* Use default DMA timeout */
+		tmo = msecs_to_jiffies(dma_timeout);
+
+	spin_lock(&priv->req_lock);
+	list_for_each_entry(req, &priv->async_list, node) {
+		if (req->cookie == cookie) {
+			list_del(&req->node);
+			found = 1;
+			break;
+		}
+	}
+	spin_unlock(&priv->req_lock);
+
+	if (!found)
+		return -EAGAIN;
+
+	wret = wait_for_completion_interruptible_timeout(&req->req_comp, tmo);
+
+	if (wret == 0) {
+		/* Timeout on wait occurred */
+		rmcd_error("%s(%d) timed out waiting for ASYNC DMA_%s",
+		       current->comm, task_pid_nr(current),
+		       (req->dir == DMA_FROM_DEVICE)?"READ":"WRITE");
+		ret = -ETIMEDOUT;
+		goto err_tmo;
+	} else if (wret == -ERESTARTSYS) {
+		/* Wait_for_completion was interrupted by a signal but DMA may
+		 * be still in progress
+		 */
+		rmcd_error("%s(%d) wait for ASYNC DMA_%s was interrupted",
+			current->comm, task_pid_nr(current),
+			(req->dir == DMA_FROM_DEVICE)?"READ":"WRITE");
+		ret = -EINTR;
+		goto err_tmo;
+	}
+
+	if (req->status != DMA_COMPLETE) {
+		/* DMA transaction completion signaled with transfer error */
+		rmcd_error("%s(%d) ASYNC DMA_%s completion with status %d",
+			current->comm, task_pid_nr(current),
+			(req->dir == DMA_FROM_DEVICE)?"READ":"WRITE",
+			req->status);
+		ret = -EIO;
+	} else
+		ret = 0;
+
+	if (req->status != DMA_IN_PROGRESS && req->status != DMA_PAUSED)
+		dma_req_free(req);
+
+	return ret;
+
+err_tmo:
+	/* Return request back into async queue */
+	spin_lock(&priv->req_lock);
+	list_add_tail(&req->node, &priv->async_list);
+	spin_unlock(&priv->req_lock);
+	return ret;
+}
+
+static int rio_mport_create_dma_mapping(struct mport_dev *md, struct file *filp,
+			u64 size, struct rio_mport_mapping **mapping)
+{
+	struct rio_mport_mapping *map;
+
+	map = kzalloc(sizeof(*map), GFP_KERNEL);
+	if (map == NULL)
+		return -ENOMEM;
+
+	map->virt_addr = dma_alloc_coherent(md->mport->dev.parent, size,
+					    &map->phys_addr, GFP_KERNEL);
+	if (map->virt_addr == NULL) {
+		kfree(map);
+		return -ENOMEM;
+	}
+
+	map->dir = MAP_DMA;
+	map->size = size;
+	map->filp = filp;
+	map->md = md;
+	kref_init(&map->ref);
+	mutex_lock(&md->buf_mutex);
+	list_add_tail(&map->node, &md->mappings);
+	mutex_unlock(&md->buf_mutex);
+	*mapping = map;
+
+	return 0;
+}
+
+static int rio_mport_alloc_dma(struct file *filp, void __user *arg)
+{
+	struct mport_cdev_priv *priv = filp->private_data;
+	struct mport_dev *md = priv->md;
+	struct rio_dma_mem map;
+	struct rio_mport_mapping *mapping = NULL;
+	int ret;
+
+	if (unlikely(copy_from_user(&map, arg, sizeof(map))))
+		return -EFAULT;
+
+	ret = rio_mport_create_dma_mapping(md, filp, map.length, &mapping);
+	if (ret)
+		return ret;
+
+	map.dma_handle = mapping->phys_addr;
+
+	if (unlikely(copy_to_user(arg, &map, sizeof(map)))) {
+		mutex_lock(&md->buf_mutex);
+		kref_put(&mapping->ref, mport_release_mapping);
+		mutex_unlock(&md->buf_mutex);
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+static int rio_mport_free_dma(struct file *filp, void __user *arg)
+{
+	struct mport_cdev_priv *priv = filp->private_data;
+	struct mport_dev *md = priv->md;
+	u64 handle;
+	int ret = -EFAULT;
+	struct rio_mport_mapping *map, *_map;
+
+	if (copy_from_user(&handle, arg, sizeof(handle)))
+		return -EFAULT;
+	rmcd_debug(EXIT, "filp=%p", filp);
+
+	mutex_lock(&md->buf_mutex);
+	list_for_each_entry_safe(map, _map, &md->mappings, node) {
+		if (map->dir == MAP_DMA && map->phys_addr == handle &&
+		    map->filp == filp) {
+			kref_put(&map->ref, mport_release_mapping);
+			ret = 0;
+			break;
+		}
+	}
+	mutex_unlock(&md->buf_mutex);
+
+	if (ret == -EFAULT) {
+		rmcd_debug(DMA, "ERR no matching mapping");
+		return ret;
+	}
+
+	return 0;
+}
+#else
+static int rio_mport_transfer_ioctl(struct file *filp, void *arg)
+{
+	return -ENODEV;
+}
+
+static int rio_mport_wait_for_async_dma(struct file *filp, void __user *arg)
+{
+	return -ENODEV;
+}
+
+static int rio_mport_alloc_dma(struct file *filp, void __user *arg)
+{
+	return -ENODEV;
+}
+
+static int rio_mport_free_dma(struct file *filp, void __user *arg)
+{
+	return -ENODEV;
+}
+#endif /* CONFIG_RAPIDIO_DMA_ENGINE */
+
+/*
+ * Inbound/outbound memory mapping functions
+ */
+
+static int
+rio_mport_create_inbound_mapping(struct mport_dev *md, struct file *filp,
+				u64 raddr, u64 size,
+				struct rio_mport_mapping **mapping)
+{
+	struct rio_mport *mport = md->mport;
+	struct rio_mport_mapping *map;
+	int ret;
+
+	/* rio_map_inb_region() accepts u32 size */
+	if (size > 0xffffffff)
+		return -EINVAL;
+
+	map = kzalloc(sizeof(*map), GFP_KERNEL);
+	if (map == NULL)
+		return -ENOMEM;
+
+	map->virt_addr = dma_alloc_coherent(mport->dev.parent, size,
+					    &map->phys_addr, GFP_KERNEL);
+	if (map->virt_addr == NULL) {
+		ret = -ENOMEM;
+		goto err_dma_alloc;
+	}
+
+	if (raddr == RIO_MAP_ANY_ADDR)
+		raddr = map->phys_addr;
+	ret = rio_map_inb_region(mport, map->phys_addr, raddr, (u32)size, 0);
+	if (ret < 0)
+		goto err_map_inb;
+
+	map->dir = MAP_INBOUND;
+	map->rio_addr = raddr;
+	map->size = size;
+	map->filp = filp;
+	map->md = md;
+	kref_init(&map->ref);
+	mutex_lock(&md->buf_mutex);
+	list_add_tail(&map->node, &md->mappings);
+	mutex_unlock(&md->buf_mutex);
+	*mapping = map;
+	return 0;
+
+err_map_inb:
+	dma_free_coherent(mport->dev.parent, size,
+			  map->virt_addr, map->phys_addr);
+err_dma_alloc:
+	kfree(map);
+	return ret;
+}
+
+static int
+rio_mport_get_inbound_mapping(struct mport_dev *md, struct file *filp,
+			      u64 raddr, u64 size,
+			      struct rio_mport_mapping **mapping)
+{
+	struct rio_mport_mapping *map;
+	int err = -ENOMEM;
+
+	if (raddr == RIO_MAP_ANY_ADDR)
+		goto get_new;
+
+	mutex_lock(&md->buf_mutex);
+	list_for_each_entry(map, &md->mappings, node) {
+		if (map->dir != MAP_INBOUND)
+			continue;
+		if (raddr == map->rio_addr && size == map->size) {
+			/* allow exact match only */
+			*mapping = map;
+			err = 0;
+			break;
+		} else if (raddr < (map->rio_addr + map->size - 1) &&
+			   (raddr + size) > map->rio_addr) {
+			err = -EBUSY;
+			break;
+		}
+	}
+	mutex_unlock(&md->buf_mutex);
+
+	if (err != -ENOMEM)
+		return err;
+get_new:
+	/* not found, create new */
+	return rio_mport_create_inbound_mapping(md, filp, raddr, size, mapping);
+}
+
+static int rio_mport_map_inbound(struct file *filp, void __user *arg)
+{
+	struct mport_cdev_priv *priv = filp->private_data;
+	struct mport_dev *md = priv->md;
+	struct rio_mmap map;
+	struct rio_mport_mapping *mapping = NULL;
+	int ret;
+
+	if (!md->mport->ops->map_inb)
+		return -EPROTONOSUPPORT;
+	if (unlikely(copy_from_user(&map, arg, sizeof(map))))
+		return -EFAULT;
+
+	rmcd_debug(IBW, "%s filp=%p", dev_name(&priv->md->dev), filp);
+
+	ret = rio_mport_get_inbound_mapping(md, filp, map.rio_addr,
+					    map.length, &mapping);
+	if (ret)
+		return ret;
+
+	map.handle = mapping->phys_addr;
+	map.rio_addr = mapping->rio_addr;
+
+	if (unlikely(copy_to_user(arg, &map, sizeof(map)))) {
+		/* Delete mapping if it was created by this request */
+		if (ret == 0 && mapping->filp == filp) {
+			mutex_lock(&md->buf_mutex);
+			kref_put(&mapping->ref, mport_release_mapping);
+			mutex_unlock(&md->buf_mutex);
+		}
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+/*
+ * rio_mport_inbound_free() - unmap from RapidIO address space and free
+ *                    previously allocated inbound DMA coherent buffer
+ * @priv: driver private data
+ * @arg:  buffer handle returned by allocation routine
+ */
+static int rio_mport_inbound_free(struct file *filp, void __user *arg)
+{
+	struct mport_cdev_priv *priv = filp->private_data;
+	struct mport_dev *md = priv->md;
+	u64 handle;
+	struct rio_mport_mapping *map, *_map;
+
+	rmcd_debug(IBW, "%s filp=%p", dev_name(&priv->md->dev), filp);
+
+	if (!md->mport->ops->unmap_inb)
+		return -EPROTONOSUPPORT;
+
+	if (copy_from_user(&handle, arg, sizeof(handle)))
+		return -EFAULT;
+
+	mutex_lock(&md->buf_mutex);
+	list_for_each_entry_safe(map, _map, &md->mappings, node) {
+		if (map->dir == MAP_INBOUND && map->phys_addr == handle) {
+			if (map->filp == filp) {
+				map->filp = NULL;
+				kref_put(&map->ref, mport_release_mapping);
+			}
+			break;
+		}
+	}
+	mutex_unlock(&md->buf_mutex);
+
+	return 0;
+}
+
+/*
+ * maint_port_idx_get() - Get the port index of the mport instance
+ * @priv: driver private data
+ * @arg:  port index
+ */
+static int maint_port_idx_get(struct mport_cdev_priv *priv, void __user *arg)
+{
+	struct mport_dev *md = priv->md;
+	u32 port_idx = md->mport->index;
+
+	rmcd_debug(MPORT, "port_index=%d", port_idx);
+
+	if (copy_to_user(arg, &port_idx, sizeof(port_idx)))
+		return -EFAULT;
+
+	return 0;
+}
+
+static int rio_mport_add_event(struct mport_cdev_priv *priv,
+			       struct rio_event *event)
+{
+	int overflow;
+
+	if (!(priv->event_mask & event->header))
+		return -EACCES;
+
+	spin_lock(&priv->fifo_lock);
+	overflow = kfifo_avail(&priv->event_fifo) < sizeof(*event)
+		|| kfifo_in(&priv->event_fifo, (unsigned char *)event,
+			sizeof(*event)) != sizeof(*event);
+	spin_unlock(&priv->fifo_lock);
+
+	wake_up_interruptible(&priv->event_rx_wait);
+
+	if (overflow) {
+		dev_warn(&priv->md->dev, DRV_NAME ": event fifo overflow\n");
+		return -EBUSY;
+	}
+
+	return 0;
+}
+
+static void rio_mport_doorbell_handler(struct rio_mport *mport, void *dev_id,
+				       u16 src, u16 dst, u16 info)
+{
+	struct mport_dev *data = dev_id;
+	struct mport_cdev_priv *priv;
+	struct rio_mport_db_filter *db_filter;
+	struct rio_event event;
+	int handled;
+
+	event.header = RIO_DOORBELL;
+	event.u.doorbell.rioid = src;
+	event.u.doorbell.payload = info;
+
+	handled = 0;
+	spin_lock(&data->db_lock);
+	list_for_each_entry(db_filter, &data->doorbells, data_node) {
+		if (((db_filter->filter.rioid == RIO_INVALID_DESTID ||
+		      db_filter->filter.rioid == src)) &&
+		      info >= db_filter->filter.low &&
+		      info <= db_filter->filter.high) {
+			priv = db_filter->priv;
+			rio_mport_add_event(priv, &event);
+			handled = 1;
+		}
+	}
+	spin_unlock(&data->db_lock);
+
+	if (!handled)
+		dev_warn(&data->dev,
+			"%s: spurious DB received from 0x%x, info=0x%04x\n",
+			__func__, src, info);
+}
+
+static int rio_mport_add_db_filter(struct mport_cdev_priv *priv,
+				   void __user *arg)
+{
+	struct mport_dev *md = priv->md;
+	struct rio_mport_db_filter *db_filter;
+	struct rio_doorbell_filter filter;
+	unsigned long flags;
+	int ret;
+
+	if (copy_from_user(&filter, arg, sizeof(filter)))
+		return -EFAULT;
+
+	if (filter.low > filter.high)
+		return -EINVAL;
+
+	ret = rio_request_inb_dbell(md->mport, md, filter.low, filter.high,
+				    rio_mport_doorbell_handler);
+	if (ret) {
+		rmcd_error("%s failed to register IBDB, err=%d",
+			   dev_name(&md->dev), ret);
+		return ret;
+	}
+
+	db_filter = kzalloc(sizeof(*db_filter), GFP_KERNEL);
+	if (db_filter == NULL) {
+		rio_release_inb_dbell(md->mport, filter.low, filter.high);
+		return -ENOMEM;
+	}
+
+	db_filter->filter = filter;
+	db_filter->priv = priv;
+	spin_lock_irqsave(&md->db_lock, flags);
+	list_add_tail(&db_filter->priv_node, &priv->db_filters);
+	list_add_tail(&db_filter->data_node, &md->doorbells);
+	spin_unlock_irqrestore(&md->db_lock, flags);
+
+	return 0;
+}
+
+static void rio_mport_delete_db_filter(struct rio_mport_db_filter *db_filter)
+{
+	list_del(&db_filter->data_node);
+	list_del(&db_filter->priv_node);
+	kfree(db_filter);
+}
+
+static int rio_mport_remove_db_filter(struct mport_cdev_priv *priv,
+				      void __user *arg)
+{
+	struct rio_mport_db_filter *db_filter;
+	struct rio_doorbell_filter filter;
+	unsigned long flags;
+	int ret = -EINVAL;
+
+	if (copy_from_user(&filter, arg, sizeof(filter)))
+		return -EFAULT;
+
+	if (filter.low > filter.high)
+		return -EINVAL;
+
+	spin_lock_irqsave(&priv->md->db_lock, flags);
+	list_for_each_entry(db_filter, &priv->db_filters, priv_node) {
+		if (db_filter->filter.rioid == filter.rioid &&
+		    db_filter->filter.low == filter.low &&
+		    db_filter->filter.high == filter.high) {
+			rio_mport_delete_db_filter(db_filter);
+			ret = 0;
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&priv->md->db_lock, flags);
+
+	if (!ret)
+		rio_release_inb_dbell(priv->md->mport, filter.low, filter.high);
+
+	return ret;
+}
+
+static int rio_mport_match_pw(union rio_pw_msg *msg,
+			      struct rio_pw_filter *filter)
+{
+	if ((msg->em.comptag & filter->mask) < filter->low ||
+		(msg->em.comptag & filter->mask) > filter->high)
+		return 0;
+	return 1;
+}
+
+static int rio_mport_pw_handler(struct rio_mport *mport, void *context,
+				union rio_pw_msg *msg, int step)
+{
+	struct mport_dev *md = context;
+	struct mport_cdev_priv *priv;
+	struct rio_mport_pw_filter *pw_filter;
+	struct rio_event event;
+	int handled;
+
+	event.header = RIO_PORTWRITE;
+	memcpy(event.u.portwrite.payload, msg->raw, RIO_PW_MSG_SIZE);
+
+	handled = 0;
+	spin_lock(&md->pw_lock);
+	list_for_each_entry(pw_filter, &md->portwrites, md_node) {
+		if (rio_mport_match_pw(msg, &pw_filter->filter)) {
+			priv = pw_filter->priv;
+			rio_mport_add_event(priv, &event);
+			handled = 1;
+		}
+	}
+	spin_unlock(&md->pw_lock);
+
+	if (!handled) {
+		printk_ratelimited(KERN_WARNING DRV_NAME
+			": mport%d received spurious PW from 0x%08x\n",
+			mport->id, msg->em.comptag);
+	}
+
+	return 0;
+}
+
+static int rio_mport_add_pw_filter(struct mport_cdev_priv *priv,
+				   void __user *arg)
+{
+	struct mport_dev *md = priv->md;
+	struct rio_mport_pw_filter *pw_filter;
+	struct rio_pw_filter filter;
+	unsigned long flags;
+	int hadd = 0;
+
+	if (copy_from_user(&filter, arg, sizeof(filter)))
+		return -EFAULT;
+
+	pw_filter = kzalloc(sizeof(*pw_filter), GFP_KERNEL);
+	if (pw_filter == NULL)
+		return -ENOMEM;
+
+	pw_filter->filter = filter;
+	pw_filter->priv = priv;
+	spin_lock_irqsave(&md->pw_lock, flags);
+	if (list_empty(&md->portwrites))
+		hadd = 1;
+	list_add_tail(&pw_filter->priv_node, &priv->pw_filters);
+	list_add_tail(&pw_filter->md_node, &md->portwrites);
+	spin_unlock_irqrestore(&md->pw_lock, flags);
+
+	if (hadd) {
+		int ret;
+
+		ret = rio_add_mport_pw_handler(md->mport, md,
+					       rio_mport_pw_handler);
+		if (ret) {
+			dev_err(&md->dev,
+				"%s: failed to add IB_PW handler, err=%d\n",
+				__func__, ret);
+			return ret;
+		}
+		rio_pw_enable(md->mport, 1);
+	}
+
+	return 0;
+}
+
+static void rio_mport_delete_pw_filter(struct rio_mport_pw_filter *pw_filter)
+{
+	list_del(&pw_filter->md_node);
+	list_del(&pw_filter->priv_node);
+	kfree(pw_filter);
+}
+
+static int rio_mport_match_pw_filter(struct rio_pw_filter *a,
+				     struct rio_pw_filter *b)
+{
+	if ((a->mask == b->mask) && (a->low == b->low) && (a->high == b->high))
+		return 1;
+	return 0;
+}
+
+static int rio_mport_remove_pw_filter(struct mport_cdev_priv *priv,
+				      void __user *arg)
+{
+	struct mport_dev *md = priv->md;
+	struct rio_mport_pw_filter *pw_filter;
+	struct rio_pw_filter filter;
+	unsigned long flags;
+	int ret = -EINVAL;
+	int hdel = 0;
+
+	if (copy_from_user(&filter, arg, sizeof(filter)))
+		return -EFAULT;
+
+	spin_lock_irqsave(&md->pw_lock, flags);
+	list_for_each_entry(pw_filter, &priv->pw_filters, priv_node) {
+		if (rio_mport_match_pw_filter(&pw_filter->filter, &filter)) {
+			rio_mport_delete_pw_filter(pw_filter);
+			ret = 0;
+			break;
+		}
+	}
+
+	if (list_empty(&md->portwrites))
+		hdel = 1;
+	spin_unlock_irqrestore(&md->pw_lock, flags);
+
+	if (hdel) {
+		rio_del_mport_pw_handler(md->mport, priv->md,
+					 rio_mport_pw_handler);
+		rio_pw_enable(md->mport, 0);
+	}
+
+	return ret;
+}
+
+/*
+ * rio_release_dev - release routine for kernel RIO device object
+ * @dev: kernel device object associated with a RIO device structure
+ *
+ * Frees a RIO device struct associated a RIO device struct.
+ * The RIO device struct is freed.
+ */
+static void rio_release_dev(struct device *dev)
+{
+	struct rio_dev *rdev;
+
+	rdev = to_rio_dev(dev);
+	pr_info(DRV_PREFIX "%s: %s\n", __func__, rio_name(rdev));
+	kfree(rdev);
+}
+
+
+static void rio_release_net(struct device *dev)
+{
+	struct rio_net *net;
+
+	net = to_rio_net(dev);
+	rmcd_debug(RDEV, "net_%d", net->id);
+	kfree(net);
+}
+
+
+/*
+ * rio_mport_add_riodev - creates a kernel RIO device object
+ *
+ * Allocates a RIO device data structure and initializes required fields based
+ * on device's configuration space contents.
+ * If the device has switch capabilities, then a switch specific portion is
+ * allocated and configured.
+ */
+static int rio_mport_add_riodev(struct mport_cdev_priv *priv,
+				   void __user *arg)
+{
+	struct mport_dev *md = priv->md;
+	struct rio_rdev_info dev_info;
+	struct rio_dev *rdev;
+	struct rio_switch *rswitch = NULL;
+	struct rio_mport *mport;
+	size_t size;
+	u32 rval;
+	u32 swpinfo = 0;
+	u16 destid;
+	u8 hopcount;
+	int err;
+
+	if (copy_from_user(&dev_info, arg, sizeof(dev_info)))
+		return -EFAULT;
+	dev_info.name[sizeof(dev_info.name) - 1] = '\0';
+
+	rmcd_debug(RDEV, "name:%s ct:0x%x did:0x%x hc:0x%x", dev_info.name,
+		   dev_info.comptag, dev_info.destid, dev_info.hopcount);
+
+	if (bus_find_device_by_name(&rio_bus_type, NULL, dev_info.name)) {
+		rmcd_debug(RDEV, "device %s already exists", dev_info.name);
+		return -EEXIST;
+	}
+
+	size = sizeof(*rdev);
+	mport = md->mport;
+	destid = dev_info.destid;
+	hopcount = dev_info.hopcount;
+
+	if (rio_mport_read_config_32(mport, destid, hopcount,
+				     RIO_PEF_CAR, &rval))
+		return -EIO;
+
+	if (rval & RIO_PEF_SWITCH) {
+		rio_mport_read_config_32(mport, destid, hopcount,
+					 RIO_SWP_INFO_CAR, &swpinfo);
+		size += (RIO_GET_TOTAL_PORTS(swpinfo) *
+			 sizeof(rswitch->nextdev[0])) + sizeof(*rswitch);
+	}
+
+	rdev = kzalloc(size, GFP_KERNEL);
+	if (rdev == NULL)
+		return -ENOMEM;
+
+	if (mport->net == NULL) {
+		struct rio_net *net;
+
+		net = rio_alloc_net(mport);
+		if (!net) {
+			err = -ENOMEM;
+			rmcd_debug(RDEV, "failed to allocate net object");
+			goto cleanup;
+		}
+
+		net->id = mport->id;
+		net->hport = mport;
+		dev_set_name(&net->dev, "rnet_%d", net->id);
+		net->dev.parent = &mport->dev;
+		net->dev.release = rio_release_net;
+		err = rio_add_net(net);
+		if (err) {
+			rmcd_debug(RDEV, "failed to register net, err=%d", err);
+			kfree(net);
+			goto cleanup;
+		}
+	}
+
+	rdev->net = mport->net;
+	rdev->pef = rval;
+	rdev->swpinfo = swpinfo;
+	rio_mport_read_config_32(mport, destid, hopcount,
+				 RIO_DEV_ID_CAR, &rval);
+	rdev->did = rval >> 16;
+	rdev->vid = rval & 0xffff;
+	rio_mport_read_config_32(mport, destid, hopcount, RIO_DEV_INFO_CAR,
+				 &rdev->device_rev);
+	rio_mport_read_config_32(mport, destid, hopcount, RIO_ASM_ID_CAR,
+				 &rval);
+	rdev->asm_did = rval >> 16;
+	rdev->asm_vid = rval & 0xffff;
+	rio_mport_read_config_32(mport, destid, hopcount, RIO_ASM_INFO_CAR,
+				 &rval);
+	rdev->asm_rev = rval >> 16;
+
+	if (rdev->pef & RIO_PEF_EXT_FEATURES) {
+		rdev->efptr = rval & 0xffff;
+		rdev->phys_efptr = rio_mport_get_physefb(mport, 0, destid,
+						hopcount, &rdev->phys_rmap);
+
+		rdev->em_efptr = rio_mport_get_feature(mport, 0, destid,
+						hopcount, RIO_EFB_ERR_MGMNT);
+	}
+
+	rio_mport_read_config_32(mport, destid, hopcount, RIO_SRC_OPS_CAR,
+				 &rdev->src_ops);
+	rio_mport_read_config_32(mport, destid, hopcount, RIO_DST_OPS_CAR,
+				 &rdev->dst_ops);
+
+	rdev->comp_tag = dev_info.comptag;
+	rdev->destid = destid;
+	/* hopcount is stored as specified by a caller, regardles of EP or SW */
+	rdev->hopcount = hopcount;
+
+	if (rdev->pef & RIO_PEF_SWITCH) {
+		rswitch = rdev->rswitch;
+		rswitch->route_table = NULL;
+	}
+
+	if (strlen(dev_info.name))
+		dev_set_name(&rdev->dev, "%s", dev_info.name);
+	else if (rdev->pef & RIO_PEF_SWITCH)
+		dev_set_name(&rdev->dev, "%02x:s:%04x", mport->id,
+			     rdev->comp_tag & RIO_CTAG_UDEVID);
+	else
+		dev_set_name(&rdev->dev, "%02x:e:%04x", mport->id,
+			     rdev->comp_tag & RIO_CTAG_UDEVID);
+
+	INIT_LIST_HEAD(&rdev->net_list);
+	rdev->dev.parent = &mport->net->dev;
+	rio_attach_device(rdev);
+	rdev->dev.release = rio_release_dev;
+
+	if (rdev->dst_ops & RIO_DST_OPS_DOORBELL)
+		rio_init_dbell_res(&rdev->riores[RIO_DOORBELL_RESOURCE],
+				   0, 0xffff);
+	err = rio_add_device(rdev);
+	if (err)
+		goto cleanup;
+	rio_dev_get(rdev);
+
+	return 0;
+cleanup:
+	kfree(rdev);
+	return err;
+}
+
+static int rio_mport_del_riodev(struct mport_cdev_priv *priv, void __user *arg)
+{
+	struct rio_rdev_info dev_info;
+	struct rio_dev *rdev = NULL;
+	struct device  *dev;
+	struct rio_mport *mport;
+	struct rio_net *net;
+
+	if (copy_from_user(&dev_info, arg, sizeof(dev_info)))
+		return -EFAULT;
+	dev_info.name[sizeof(dev_info.name) - 1] = '\0';
+
+	mport = priv->md->mport;
+
+	/* If device name is specified, removal by name has priority */
+	if (strlen(dev_info.name)) {
+		dev = bus_find_device_by_name(&rio_bus_type, NULL,
+					      dev_info.name);
+		if (dev)
+			rdev = to_rio_dev(dev);
+	} else {
+		do {
+			rdev = rio_get_comptag(dev_info.comptag, rdev);
+			if (rdev && rdev->dev.parent == &mport->net->dev &&
+			    rdev->destid == dev_info.destid &&
+			    rdev->hopcount == dev_info.hopcount)
+				break;
+		} while (rdev);
+	}
+
+	if (!rdev) {
+		rmcd_debug(RDEV,
+			"device name:%s ct:0x%x did:0x%x hc:0x%x not found",
+			dev_info.name, dev_info.comptag, dev_info.destid,
+			dev_info.hopcount);
+		return -ENODEV;
+	}
+
+	net = rdev->net;
+	rio_dev_put(rdev);
+	rio_del_device(rdev, RIO_DEVICE_SHUTDOWN);
+
+	if (list_empty(&net->devices)) {
+		rio_free_net(net);
+		mport->net = NULL;
+	}
+
+	return 0;
+}
+
+/*
+ * Mport cdev management
+ */
+
+/*
+ * mport_cdev_open() - Open character device (mport)
+ */
+static int mport_cdev_open(struct inode *inode, struct file *filp)
+{
+	int ret;
+	int minor = iminor(inode);
+	struct mport_dev *chdev;
+	struct mport_cdev_priv *priv;
+
+	/* Test for valid device */
+	if (minor >= RIO_MAX_MPORTS) {
+		rmcd_error("Invalid minor device number");
+		return -EINVAL;
+	}
+
+	chdev = container_of(inode->i_cdev, struct mport_dev, cdev);
+
+	rmcd_debug(INIT, "%s filp=%p", dev_name(&chdev->dev), filp);
+
+	if (atomic_read(&chdev->active) == 0)
+		return -ENODEV;
+
+	get_device(&chdev->dev);
+
+	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+	if (!priv) {
+		put_device(&chdev->dev);
+		return -ENOMEM;
+	}
+
+	priv->md = chdev;
+
+	mutex_lock(&chdev->file_mutex);
+	list_add_tail(&priv->list, &chdev->file_list);
+	mutex_unlock(&chdev->file_mutex);
+
+	INIT_LIST_HEAD(&priv->db_filters);
+	INIT_LIST_HEAD(&priv->pw_filters);
+	spin_lock_init(&priv->fifo_lock);
+	init_waitqueue_head(&priv->event_rx_wait);
+	ret = kfifo_alloc(&priv->event_fifo,
+			  sizeof(struct rio_event) * MPORT_EVENT_DEPTH,
+			  GFP_KERNEL);
+	if (ret < 0) {
+		dev_err(&chdev->dev, DRV_NAME ": kfifo_alloc failed\n");
+		ret = -ENOMEM;
+		goto err_fifo;
+	}
+
+#ifdef CONFIG_RAPIDIO_DMA_ENGINE
+	INIT_LIST_HEAD(&priv->async_list);
+	INIT_LIST_HEAD(&priv->pend_list);
+	spin_lock_init(&priv->req_lock);
+	mutex_init(&priv->dma_lock);
+#endif
+
+	filp->private_data = priv;
+	goto out;
+err_fifo:
+	kfree(priv);
+out:
+	return ret;
+}
+
+static int mport_cdev_fasync(int fd, struct file *filp, int mode)
+{
+	struct mport_cdev_priv *priv = filp->private_data;
+
+	return fasync_helper(fd, filp, mode, &priv->async_queue);
+}
+
+#ifdef CONFIG_RAPIDIO_DMA_ENGINE
+static void mport_cdev_release_dma(struct file *filp)
+{
+	struct mport_cdev_priv *priv = filp->private_data;
+	struct mport_dev *md;
+	struct mport_dma_req *req, *req_next;
+	unsigned long tmo = msecs_to_jiffies(dma_timeout);
+	long wret;
+	LIST_HEAD(list);
+
+	rmcd_debug(EXIT, "from filp=%p %s(%d)",
+		   filp, current->comm, task_pid_nr(current));
+
+	if (!priv->dmach) {
+		rmcd_debug(EXIT, "No DMA channel for filp=%p", filp);
+		return;
+	}
+
+	md = priv->md;
+
+	flush_workqueue(dma_wq);
+
+	spin_lock(&priv->req_lock);
+	if (!list_empty(&priv->async_list)) {
+		rmcd_debug(EXIT, "async list not empty filp=%p %s(%d)",
+			   filp, current->comm, task_pid_nr(current));
+		list_splice_init(&priv->async_list, &list);
+	}
+	spin_unlock(&priv->req_lock);
+
+	if (!list_empty(&list)) {
+		rmcd_debug(EXIT, "temp list not empty");
+		list_for_each_entry_safe(req, req_next, &list, node) {
+			rmcd_debug(EXIT, "free req->filp=%p cookie=%d compl=%s",
+				   req->filp, req->cookie,
+				   completion_done(&req->req_comp)?"yes":"no");
+			list_del(&req->node);
+			dma_req_free(req);
+		}
+	}
+
+	if (!list_empty(&priv->pend_list)) {
+		rmcd_debug(EXIT, "Free pending DMA requests for filp=%p %s(%d)",
+			   filp, current->comm, task_pid_nr(current));
+		list_for_each_entry_safe(req,
+					 req_next, &priv->pend_list, node) {
+			rmcd_debug(EXIT, "free req->filp=%p cookie=%d compl=%s",
+				   req->filp, req->cookie,
+				   completion_done(&req->req_comp)?"yes":"no");
+			list_del(&req->node);
+			dma_req_free(req);
+		}
+	}
+
+	put_dma_channel(priv);
+	wret = wait_for_completion_interruptible_timeout(&priv->comp, tmo);
+
+	if (wret <= 0) {
+		rmcd_error("%s(%d) failed waiting for DMA release err=%ld",
+			current->comm, task_pid_nr(current), wret);
+	}
+
+	spin_lock(&priv->req_lock);
+
+	if (!list_empty(&priv->pend_list)) {
+		rmcd_debug(EXIT, "ATTN: pending DMA requests, filp=%p %s(%d)",
+			   filp, current->comm, task_pid_nr(current));
+	}
+
+	spin_unlock(&priv->req_lock);
+
+	if (priv->dmach != priv->md->dma_chan) {
+		rmcd_debug(EXIT, "Release DMA channel for filp=%p %s(%d)",
+			   filp, current->comm, task_pid_nr(current));
+		rio_release_dma(priv->dmach);
+	} else {
+		rmcd_debug(EXIT, "Adjust default DMA channel refcount");
+		kref_put(&md->dma_ref, mport_release_def_dma);
+	}
+
+	priv->dmach = NULL;
+}
+#else
+#define mport_cdev_release_dma(priv) do {} while (0)
+#endif
+
+/*
+ * mport_cdev_release() - Release character device
+ */
+static int mport_cdev_release(struct inode *inode, struct file *filp)
+{
+	struct mport_cdev_priv *priv = filp->private_data;
+	struct mport_dev *chdev;
+	struct rio_mport_pw_filter *pw_filter, *pw_filter_next;
+	struct rio_mport_db_filter *db_filter, *db_filter_next;
+	struct rio_mport_mapping *map, *_map;
+	unsigned long flags;
+
+	rmcd_debug(EXIT, "%s filp=%p", dev_name(&priv->md->dev), filp);
+
+	chdev = priv->md;
+	mport_cdev_release_dma(filp);
+
+	priv->event_mask = 0;
+
+	spin_lock_irqsave(&chdev->pw_lock, flags);
+	if (!list_empty(&priv->pw_filters)) {
+		list_for_each_entry_safe(pw_filter, pw_filter_next,
+					 &priv->pw_filters, priv_node)
+			rio_mport_delete_pw_filter(pw_filter);
+	}
+	spin_unlock_irqrestore(&chdev->pw_lock, flags);
+
+	spin_lock_irqsave(&chdev->db_lock, flags);
+	list_for_each_entry_safe(db_filter, db_filter_next,
+				 &priv->db_filters, priv_node) {
+		rio_mport_delete_db_filter(db_filter);
+	}
+	spin_unlock_irqrestore(&chdev->db_lock, flags);
+
+	kfifo_free(&priv->event_fifo);
+
+	mutex_lock(&chdev->buf_mutex);
+	list_for_each_entry_safe(map, _map, &chdev->mappings, node) {
+		if (map->filp == filp) {
+			rmcd_debug(EXIT, "release mapping %p filp=%p",
+				   map->virt_addr, filp);
+			kref_put(&map->ref, mport_release_mapping);
+		}
+	}
+	mutex_unlock(&chdev->buf_mutex);
+
+	mport_cdev_fasync(-1, filp, 0);
+	filp->private_data = NULL;
+	mutex_lock(&chdev->file_mutex);
+	list_del(&priv->list);
+	mutex_unlock(&chdev->file_mutex);
+	put_device(&chdev->dev);
+	kfree(priv);
+	return 0;
+}
+
+/*
+ * mport_cdev_ioctl() - IOCTLs for character device
+ */
+static long mport_cdev_ioctl(struct file *filp,
+		unsigned int cmd, unsigned long arg)
+{
+	int err = -EINVAL;
+	struct mport_cdev_priv *data = filp->private_data;
+	struct mport_dev *md = data->md;
+
+	if (atomic_read(&md->active) == 0)
+		return -ENODEV;
+
+	switch (cmd) {
+	case RIO_MPORT_MAINT_READ_LOCAL:
+		return rio_mport_maint_rd(data, (void __user *)arg, 1);
+	case RIO_MPORT_MAINT_WRITE_LOCAL:
+		return rio_mport_maint_wr(data, (void __user *)arg, 1);
+	case RIO_MPORT_MAINT_READ_REMOTE:
+		return rio_mport_maint_rd(data, (void __user *)arg, 0);
+	case RIO_MPORT_MAINT_WRITE_REMOTE:
+		return rio_mport_maint_wr(data, (void __user *)arg, 0);
+	case RIO_MPORT_MAINT_HDID_SET:
+		return maint_hdid_set(data, (void __user *)arg);
+	case RIO_MPORT_MAINT_COMPTAG_SET:
+		return maint_comptag_set(data, (void __user *)arg);
+	case RIO_MPORT_MAINT_PORT_IDX_GET:
+		return maint_port_idx_get(data, (void __user *)arg);
+	case RIO_MPORT_GET_PROPERTIES:
+		md->properties.hdid = md->mport->host_deviceid;
+		if (copy_to_user((void __user *)arg, &(md->properties),
+				 sizeof(md->properties)))
+			return -EFAULT;
+		return 0;
+	case RIO_ENABLE_DOORBELL_RANGE:
+		return rio_mport_add_db_filter(data, (void __user *)arg);
+	case RIO_DISABLE_DOORBELL_RANGE:
+		return rio_mport_remove_db_filter(data, (void __user *)arg);
+	case RIO_ENABLE_PORTWRITE_RANGE:
+		return rio_mport_add_pw_filter(data, (void __user *)arg);
+	case RIO_DISABLE_PORTWRITE_RANGE:
+		return rio_mport_remove_pw_filter(data, (void __user *)arg);
+	case RIO_SET_EVENT_MASK:
+		data->event_mask = (u32)arg;
+		return 0;
+	case RIO_GET_EVENT_MASK:
+		if (copy_to_user((void __user *)arg, &data->event_mask,
+				    sizeof(u32)))
+			return -EFAULT;
+		return 0;
+	case RIO_MAP_OUTBOUND:
+		return rio_mport_obw_map(filp, (void __user *)arg);
+	case RIO_MAP_INBOUND:
+		return rio_mport_map_inbound(filp, (void __user *)arg);
+	case RIO_UNMAP_OUTBOUND:
+		return rio_mport_obw_free(filp, (void __user *)arg);
+	case RIO_UNMAP_INBOUND:
+		return rio_mport_inbound_free(filp, (void __user *)arg);
+	case RIO_ALLOC_DMA:
+		return rio_mport_alloc_dma(filp, (void __user *)arg);
+	case RIO_FREE_DMA:
+		return rio_mport_free_dma(filp, (void __user *)arg);
+	case RIO_WAIT_FOR_ASYNC:
+		return rio_mport_wait_for_async_dma(filp, (void __user *)arg);
+	case RIO_TRANSFER:
+		return rio_mport_transfer_ioctl(filp, (void __user *)arg);
+	case RIO_DEV_ADD:
+		return rio_mport_add_riodev(data, (void __user *)arg);
+	case RIO_DEV_DEL:
+		return rio_mport_del_riodev(data, (void __user *)arg);
+	default:
+		break;
+	}
+
+	return err;
+}
+
+/*
+ * mport_release_mapping - free mapping resources and info structure
+ * @ref: a pointer to the kref within struct rio_mport_mapping
+ *
+ * NOTE: Shall be called while holding buf_mutex.
+ */
+static void mport_release_mapping(struct kref *ref)
+{
+	struct rio_mport_mapping *map =
+			container_of(ref, struct rio_mport_mapping, ref);
+	struct rio_mport *mport = map->md->mport;
+
+	rmcd_debug(MMAP, "type %d mapping @ %p (phys = %pad) for %s",
+		   map->dir, map->virt_addr,
+		   &map->phys_addr, mport->name);
+
+	list_del(&map->node);
+
+	switch (map->dir) {
+	case MAP_INBOUND:
+		rio_unmap_inb_region(mport, map->phys_addr);
+	case MAP_DMA:
+		dma_free_coherent(mport->dev.parent, map->size,
+				  map->virt_addr, map->phys_addr);
+		break;
+	case MAP_OUTBOUND:
+		rio_unmap_outb_region(mport, map->rioid, map->rio_addr);
+		break;
+	}
+	kfree(map);
+}
+
+static void mport_mm_open(struct vm_area_struct *vma)
+{
+	struct rio_mport_mapping *map = vma->vm_private_data;
+
+	rmcd_debug(MMAP, "%pad", &map->phys_addr);
+	kref_get(&map->ref);
+}
+
+static void mport_mm_close(struct vm_area_struct *vma)
+{
+	struct rio_mport_mapping *map = vma->vm_private_data;
+
+	rmcd_debug(MMAP, "%pad", &map->phys_addr);
+	mutex_lock(&map->md->buf_mutex);
+	kref_put(&map->ref, mport_release_mapping);
+	mutex_unlock(&map->md->buf_mutex);
+}
+
+static const struct vm_operations_struct vm_ops = {
+	.open =	mport_mm_open,
+	.close = mport_mm_close,
+};
+
+static int mport_cdev_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+	struct mport_cdev_priv *priv = filp->private_data;
+	struct mport_dev *md;
+	size_t size = vma->vm_end - vma->vm_start;
+	dma_addr_t baddr;
+	unsigned long offset;
+	int found = 0, ret;
+	struct rio_mport_mapping *map;
+
+	rmcd_debug(MMAP, "0x%x bytes at offset 0x%lx",
+		   (unsigned int)size, vma->vm_pgoff);
+
+	md = priv->md;
+	baddr = ((dma_addr_t)vma->vm_pgoff << PAGE_SHIFT);
+
+	mutex_lock(&md->buf_mutex);
+	list_for_each_entry(map, &md->mappings, node) {
+		if (baddr >= map->phys_addr &&
+		    baddr < (map->phys_addr + map->size)) {
+			found = 1;
+			break;
+		}
+	}
+	mutex_unlock(&md->buf_mutex);
+
+	if (!found)
+		return -ENOMEM;
+
+	offset = baddr - map->phys_addr;
+
+	if (size + offset > map->size)
+		return -EINVAL;
+
+	vma->vm_pgoff = offset >> PAGE_SHIFT;
+	rmcd_debug(MMAP, "MMAP adjusted offset = 0x%lx", vma->vm_pgoff);
+
+	if (map->dir == MAP_INBOUND || map->dir == MAP_DMA)
+		ret = dma_mmap_coherent(md->mport->dev.parent, vma,
+				map->virt_addr, map->phys_addr, map->size);
+	else if (map->dir == MAP_OUTBOUND) {
+		vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+		ret = vm_iomap_memory(vma, map->phys_addr, map->size);
+	} else {
+		rmcd_error("Attempt to mmap unsupported mapping type");
+		ret = -EIO;
+	}
+
+	if (!ret) {
+		vma->vm_private_data = map;
+		vma->vm_ops = &vm_ops;
+		mport_mm_open(vma);
+	} else {
+		rmcd_error("MMAP exit with err=%d", ret);
+	}
+
+	return ret;
+}
+
+static unsigned int mport_cdev_poll(struct file *filp, poll_table *wait)
+{
+	struct mport_cdev_priv *priv = filp->private_data;
+
+	poll_wait(filp, &priv->event_rx_wait, wait);
+	if (kfifo_len(&priv->event_fifo))
+		return POLLIN | POLLRDNORM;
+
+	return 0;
+}
+
+static ssize_t mport_read(struct file *filp, char __user *buf, size_t count,
+			loff_t *ppos)
+{
+	struct mport_cdev_priv *priv = filp->private_data;
+	int copied;
+	ssize_t ret;
+
+	if (!count)
+		return 0;
+
+	if (kfifo_is_empty(&priv->event_fifo) &&
+	    (filp->f_flags & O_NONBLOCK))
+		return -EAGAIN;
+
+	if (count % sizeof(struct rio_event))
+		return -EINVAL;
+
+	ret = wait_event_interruptible(priv->event_rx_wait,
+					kfifo_len(&priv->event_fifo) != 0);
+	if (ret)
+		return ret;
+
+	while (ret < count) {
+		if (kfifo_to_user(&priv->event_fifo, buf,
+		      sizeof(struct rio_event), &copied))
+			return -EFAULT;
+		ret += copied;
+		buf += copied;
+	}
+
+	return ret;
+}
+
+static ssize_t mport_write(struct file *filp, const char __user *buf,
+			 size_t count, loff_t *ppos)
+{
+	struct mport_cdev_priv *priv = filp->private_data;
+	struct rio_mport *mport = priv->md->mport;
+	struct rio_event event;
+	int len, ret;
+
+	if (!count)
+		return 0;
+
+	if (count % sizeof(event))
+		return -EINVAL;
+
+	len = 0;
+	while ((count - len) >= (int)sizeof(event)) {
+		if (copy_from_user(&event, buf, sizeof(event)))
+			return -EFAULT;
+
+		if (event.header != RIO_DOORBELL)
+			return -EINVAL;
+
+		ret = rio_mport_send_doorbell(mport,
+					      event.u.doorbell.rioid,
+					      event.u.doorbell.payload);
+		if (ret < 0)
+			return ret;
+
+		len += sizeof(event);
+		buf += sizeof(event);
+	}
+
+	return len;
+}
+
+static const struct file_operations mport_fops = {
+	.owner		= THIS_MODULE,
+	.open		= mport_cdev_open,
+	.release	= mport_cdev_release,
+	.poll		= mport_cdev_poll,
+	.read		= mport_read,
+	.write		= mport_write,
+	.mmap		= mport_cdev_mmap,
+	.fasync		= mport_cdev_fasync,
+	.unlocked_ioctl = mport_cdev_ioctl
+};
+
+/*
+ * Character device management
+ */
+
+static void mport_device_release(struct device *dev)
+{
+	struct mport_dev *md;
+
+	rmcd_debug(EXIT, "%s", dev_name(dev));
+	md = container_of(dev, struct mport_dev, dev);
+	kfree(md);
+}
+
+/*
+ * mport_cdev_add() - Create mport_dev from rio_mport
+ * @mport:	RapidIO master port
+ */
+static struct mport_dev *mport_cdev_add(struct rio_mport *mport)
+{
+	int ret = 0;
+	struct mport_dev *md;
+	struct rio_mport_attr attr;
+
+	md = kzalloc(sizeof(*md), GFP_KERNEL);
+	if (!md) {
+		rmcd_error("Unable allocate a device object");
+		return NULL;
+	}
+
+	md->mport = mport;
+	mutex_init(&md->buf_mutex);
+	mutex_init(&md->file_mutex);
+	INIT_LIST_HEAD(&md->file_list);
+
+	device_initialize(&md->dev);
+	md->dev.devt = MKDEV(MAJOR(dev_number), mport->id);
+	md->dev.class = dev_class;
+	md->dev.parent = &mport->dev;
+	md->dev.release = mport_device_release;
+	dev_set_name(&md->dev, DEV_NAME "%d", mport->id);
+	atomic_set(&md->active, 1);
+
+	cdev_init(&md->cdev, &mport_fops);
+	md->cdev.owner = THIS_MODULE;
+
+	INIT_LIST_HEAD(&md->doorbells);
+	spin_lock_init(&md->db_lock);
+	INIT_LIST_HEAD(&md->portwrites);
+	spin_lock_init(&md->pw_lock);
+	INIT_LIST_HEAD(&md->mappings);
+
+	md->properties.id = mport->id;
+	md->properties.sys_size = mport->sys_size;
+	md->properties.hdid = mport->host_deviceid;
+	md->properties.index = mport->index;
+
+	/* The transfer_mode property will be returned through mport query
+	 * interface
+	 */
+#ifdef CONFIG_FSL_RIO /* for now: only on Freescale's SoCs */
+	md->properties.transfer_mode |= RIO_TRANSFER_MODE_MAPPED;
+#else
+	md->properties.transfer_mode |= RIO_TRANSFER_MODE_TRANSFER;
+#endif
+
+	ret = cdev_device_add(&md->cdev, &md->dev);
+	if (ret) {
+		rmcd_error("Failed to register mport %d (err=%d)",
+		       mport->id, ret);
+		goto err_cdev;
+	}
+	ret = rio_query_mport(mport, &attr);
+	if (!ret) {
+		md->properties.flags = attr.flags;
+		md->properties.link_speed = attr.link_speed;
+		md->properties.link_width = attr.link_width;
+		md->properties.dma_max_sge = attr.dma_max_sge;
+		md->properties.dma_max_size = attr.dma_max_size;
+		md->properties.dma_align = attr.dma_align;
+		md->properties.cap_sys_size = 0;
+		md->properties.cap_transfer_mode = 0;
+		md->properties.cap_addr_size = 0;
+	} else
+		pr_info(DRV_PREFIX "Failed to obtain info for %s cdev(%d:%d)\n",
+			mport->name, MAJOR(dev_number), mport->id);
+
+	mutex_lock(&mport_devs_lock);
+	list_add_tail(&md->node, &mport_devs);
+	mutex_unlock(&mport_devs_lock);
+
+	pr_info(DRV_PREFIX "Added %s cdev(%d:%d)\n",
+		mport->name, MAJOR(dev_number), mport->id);
+
+	return md;
+
+err_cdev:
+	put_device(&md->dev);
+	return NULL;
+}
+
+/*
+ * mport_cdev_terminate_dma() - Stop all active DMA data transfers and release
+ *                              associated DMA channels.
+ */
+static void mport_cdev_terminate_dma(struct mport_dev *md)
+{
+#ifdef CONFIG_RAPIDIO_DMA_ENGINE
+	struct mport_cdev_priv *client;
+
+	rmcd_debug(DMA, "%s", dev_name(&md->dev));
+
+	mutex_lock(&md->file_mutex);
+	list_for_each_entry(client, &md->file_list, list) {
+		if (client->dmach) {
+			dmaengine_terminate_all(client->dmach);
+			rio_release_dma(client->dmach);
+		}
+	}
+	mutex_unlock(&md->file_mutex);
+
+	if (md->dma_chan) {
+		dmaengine_terminate_all(md->dma_chan);
+		rio_release_dma(md->dma_chan);
+		md->dma_chan = NULL;
+	}
+#endif
+}
+
+
+/*
+ * mport_cdev_kill_fasync() - Send SIGIO signal to all processes with open
+ *                            mport_cdev files.
+ */
+static int mport_cdev_kill_fasync(struct mport_dev *md)
+{
+	unsigned int files = 0;
+	struct mport_cdev_priv *client;
+
+	mutex_lock(&md->file_mutex);
+	list_for_each_entry(client, &md->file_list, list) {
+		if (client->async_queue)
+			kill_fasync(&client->async_queue, SIGIO, POLL_HUP);
+		files++;
+	}
+	mutex_unlock(&md->file_mutex);
+	return files;
+}
+
+/*
+ * mport_cdev_remove() - Remove mport character device
+ * @dev:	Mport device to remove
+ */
+static void mport_cdev_remove(struct mport_dev *md)
+{
+	struct rio_mport_mapping *map, *_map;
+
+	rmcd_debug(EXIT, "Remove %s cdev", md->mport->name);
+	atomic_set(&md->active, 0);
+	mport_cdev_terminate_dma(md);
+	rio_del_mport_pw_handler(md->mport, md, rio_mport_pw_handler);
+	cdev_device_del(&md->cdev, &md->dev);
+	mport_cdev_kill_fasync(md);
+
+	flush_workqueue(dma_wq);
+
+	/* TODO: do we need to give clients some time to close file
+	 * descriptors? Simple wait for XX, or kref?
+	 */
+
+	/*
+	 * Release DMA buffers allocated for the mport device.
+	 * Disable associated inbound Rapidio requests mapping if applicable.
+	 */
+	mutex_lock(&md->buf_mutex);
+	list_for_each_entry_safe(map, _map, &md->mappings, node) {
+		kref_put(&map->ref, mport_release_mapping);
+	}
+	mutex_unlock(&md->buf_mutex);
+
+	if (!list_empty(&md->mappings))
+		rmcd_warn("WARNING: %s pending mappings on removal",
+			  md->mport->name);
+
+	rio_release_inb_dbell(md->mport, 0, 0x0fff);
+
+	put_device(&md->dev);
+}
+
+/*
+ * RIO rio_mport_interface driver
+ */
+
+/*
+ * mport_add_mport() - Add rio_mport from LDM device struct
+ * @dev:		Linux device model struct
+ * @class_intf:	Linux class_interface
+ */
+static int mport_add_mport(struct device *dev,
+		struct class_interface *class_intf)
+{
+	struct rio_mport *mport = NULL;
+	struct mport_dev *chdev = NULL;
+
+	mport = to_rio_mport(dev);
+	if (!mport)
+		return -ENODEV;
+
+	chdev = mport_cdev_add(mport);
+	if (!chdev)
+		return -ENODEV;
+
+	return 0;
+}
+
+/*
+ * mport_remove_mport() - Remove rio_mport from global list
+ * TODO remove device from global mport_dev list
+ */
+static void mport_remove_mport(struct device *dev,
+		struct class_interface *class_intf)
+{
+	struct rio_mport *mport = NULL;
+	struct mport_dev *chdev;
+	int found = 0;
+
+	mport = to_rio_mport(dev);
+	rmcd_debug(EXIT, "Remove %s", mport->name);
+
+	mutex_lock(&mport_devs_lock);
+	list_for_each_entry(chdev, &mport_devs, node) {
+		if (chdev->mport->id == mport->id) {
+			atomic_set(&chdev->active, 0);
+			list_del(&chdev->node);
+			found = 1;
+			break;
+		}
+	}
+	mutex_unlock(&mport_devs_lock);
+
+	if (found)
+		mport_cdev_remove(chdev);
+}
+
+/* the rio_mport_interface is used to handle local mport devices */
+static struct class_interface rio_mport_interface __refdata = {
+	.class		= &rio_mport_class,
+	.add_dev	= mport_add_mport,
+	.remove_dev	= mport_remove_mport,
+};
+
+/*
+ * Linux kernel module
+ */
+
+/*
+ * mport_init - Driver module loading
+ */
+static int __init mport_init(void)
+{
+	int ret;
+
+	/* Create device class needed by udev */
+	dev_class = class_create(THIS_MODULE, DRV_NAME);
+	if (IS_ERR(dev_class)) {
+		rmcd_error("Unable to create " DRV_NAME " class");
+		return PTR_ERR(dev_class);
+	}
+
+	ret = alloc_chrdev_region(&dev_number, 0, RIO_MAX_MPORTS, DRV_NAME);
+	if (ret < 0)
+		goto err_chr;
+
+	rmcd_debug(INIT, "Registered class with major=%d", MAJOR(dev_number));
+
+	/* Register to rio_mport_interface */
+	ret = class_interface_register(&rio_mport_interface);
+	if (ret) {
+		rmcd_error("class_interface_register() failed, err=%d", ret);
+		goto err_cli;
+	}
+
+	dma_wq = create_singlethread_workqueue("dma_wq");
+	if (!dma_wq) {
+		rmcd_error("failed to create DMA work queue");
+		ret = -ENOMEM;
+		goto err_wq;
+	}
+
+	return 0;
+
+err_wq:
+	class_interface_unregister(&rio_mport_interface);
+err_cli:
+	unregister_chrdev_region(dev_number, RIO_MAX_MPORTS);
+err_chr:
+	class_destroy(dev_class);
+	return ret;
+}
+
+/**
+ * mport_exit - Driver module unloading
+ */
+static void __exit mport_exit(void)
+{
+	class_interface_unregister(&rio_mport_interface);
+	class_destroy(dev_class);
+	unregister_chrdev_region(dev_number, RIO_MAX_MPORTS);
+	destroy_workqueue(dma_wq);
+}
+
+module_init(mport_init);
+module_exit(mport_exit);
diff --git a/src/kernel/linux/v4.14/drivers/rapidio/devices/tsi721.c b/src/kernel/linux/v4.14/drivers/rapidio/devices/tsi721.c
new file mode 100644
index 0000000..9a68914
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/rapidio/devices/tsi721.c
@@ -0,0 +1,3014 @@
+/*
+ * RapidIO mport driver for Tsi721 PCIExpress-to-SRIO bridge
+ *
+ * Copyright 2011 Integrated Device Technology, Inc.
+ * Alexandre Bounine <alexandre.bounine@idt.com>
+ * Chul Kim <chul.kim@idt.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+ */
+
+#include <linux/io.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/rio.h>
+#include <linux/rio_drv.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/kfifo.h>
+#include <linux/delay.h>
+
+#include "tsi721.h"
+
+#ifdef DEBUG
+u32 tsi_dbg_level;
+module_param_named(dbg_level, tsi_dbg_level, uint, S_IWUSR | S_IRUGO);
+MODULE_PARM_DESC(dbg_level, "Debugging output level (default 0 = none)");
+#endif
+
+static int pcie_mrrs = -1;
+module_param(pcie_mrrs, int, S_IRUGO);
+MODULE_PARM_DESC(pcie_mrrs, "PCIe MRRS override value (0...5)");
+
+static u8 mbox_sel = 0x0f;
+module_param(mbox_sel, byte, S_IRUGO);
+MODULE_PARM_DESC(mbox_sel,
+		 "RIO Messaging MBOX Selection Mask (default: 0x0f = all)");
+
+static DEFINE_SPINLOCK(tsi721_maint_lock);
+
+static void tsi721_omsg_handler(struct tsi721_device *priv, int ch);
+static void tsi721_imsg_handler(struct tsi721_device *priv, int ch);
+
+/**
+ * tsi721_lcread - read from local SREP config space
+ * @mport: RapidIO master port info
+ * @index: ID of RapdiIO interface
+ * @offset: Offset into configuration space
+ * @len: Length (in bytes) of the maintenance transaction
+ * @data: Value to be read into
+ *
+ * Generates a local SREP space read. Returns %0 on
+ * success or %-EINVAL on failure.
+ */
+static int tsi721_lcread(struct rio_mport *mport, int index, u32 offset,
+			 int len, u32 *data)
+{
+	struct tsi721_device *priv = mport->priv;
+
+	if (len != sizeof(u32))
+		return -EINVAL; /* only 32-bit access is supported */
+
+	*data = ioread32(priv->regs + offset);
+
+	return 0;
+}
+
+/**
+ * tsi721_lcwrite - write into local SREP config space
+ * @mport: RapidIO master port info
+ * @index: ID of RapdiIO interface
+ * @offset: Offset into configuration space
+ * @len: Length (in bytes) of the maintenance transaction
+ * @data: Value to be written
+ *
+ * Generates a local write into SREP configuration space. Returns %0 on
+ * success or %-EINVAL on failure.
+ */
+static int tsi721_lcwrite(struct rio_mport *mport, int index, u32 offset,
+			  int len, u32 data)
+{
+	struct tsi721_device *priv = mport->priv;
+
+	if (len != sizeof(u32))
+		return -EINVAL; /* only 32-bit access is supported */
+
+	iowrite32(data, priv->regs + offset);
+
+	return 0;
+}
+
+/**
+ * tsi721_maint_dma - Helper function to generate RapidIO maintenance
+ *                    transactions using designated Tsi721 DMA channel.
+ * @priv: pointer to tsi721 private data
+ * @sys_size: RapdiIO transport system size
+ * @destid: Destination ID of transaction
+ * @hopcount: Number of hops to target device
+ * @offset: Offset into configuration space
+ * @len: Length (in bytes) of the maintenance transaction
+ * @data: Location to be read from or write into
+ * @do_wr: Operation flag (1 == MAINT_WR)
+ *
+ * Generates a RapidIO maintenance transaction (Read or Write).
+ * Returns %0 on success and %-EINVAL or %-EFAULT on failure.
+ */
+static int tsi721_maint_dma(struct tsi721_device *priv, u32 sys_size,
+			u16 destid, u8 hopcount, u32 offset, int len,
+			u32 *data, int do_wr)
+{
+	void __iomem *regs = priv->regs + TSI721_DMAC_BASE(priv->mdma.ch_id);
+	struct tsi721_dma_desc *bd_ptr;
+	u32 rd_count, swr_ptr, ch_stat;
+	unsigned long flags;
+	int i, err = 0;
+	u32 op = do_wr ? MAINT_WR : MAINT_RD;
+
+	if (offset > (RIO_MAINT_SPACE_SZ - len) || (len != sizeof(u32)))
+		return -EINVAL;
+
+	spin_lock_irqsave(&tsi721_maint_lock, flags);
+
+	bd_ptr = priv->mdma.bd_base;
+
+	rd_count = ioread32(regs + TSI721_DMAC_DRDCNT);
+
+	/* Initialize DMA descriptor */
+	bd_ptr[0].type_id = cpu_to_le32((DTYPE2 << 29) | (op << 19) | destid);
+	bd_ptr[0].bcount = cpu_to_le32((sys_size << 26) | 0x04);
+	bd_ptr[0].raddr_lo = cpu_to_le32((hopcount << 24) | offset);
+	bd_ptr[0].raddr_hi = 0;
+	if (do_wr)
+		bd_ptr[0].data[0] = cpu_to_be32p(data);
+	else
+		bd_ptr[0].data[0] = 0xffffffff;
+
+	mb();
+
+	/* Start DMA operation */
+	iowrite32(rd_count + 2,	regs + TSI721_DMAC_DWRCNT);
+	ioread32(regs + TSI721_DMAC_DWRCNT);
+	i = 0;
+
+	/* Wait until DMA transfer is finished */
+	while ((ch_stat = ioread32(regs + TSI721_DMAC_STS))
+							& TSI721_DMAC_STS_RUN) {
+		udelay(1);
+		if (++i >= 5000000) {
+			tsi_debug(MAINT, &priv->pdev->dev,
+				"DMA[%d] read timeout ch_status=%x",
+				priv->mdma.ch_id, ch_stat);
+			if (!do_wr)
+				*data = 0xffffffff;
+			err = -EIO;
+			goto err_out;
+		}
+	}
+
+	if (ch_stat & TSI721_DMAC_STS_ABORT) {
+		/* If DMA operation aborted due to error,
+		 * reinitialize DMA channel
+		 */
+		tsi_debug(MAINT, &priv->pdev->dev, "DMA ABORT ch_stat=%x",
+			  ch_stat);
+		tsi_debug(MAINT, &priv->pdev->dev,
+			  "OP=%d : destid=%x hc=%x off=%x",
+			  do_wr ? MAINT_WR : MAINT_RD,
+			  destid, hopcount, offset);
+		iowrite32(TSI721_DMAC_INT_ALL, regs + TSI721_DMAC_INT);
+		iowrite32(TSI721_DMAC_CTL_INIT, regs + TSI721_DMAC_CTL);
+		udelay(10);
+		iowrite32(0, regs + TSI721_DMAC_DWRCNT);
+		udelay(1);
+		if (!do_wr)
+			*data = 0xffffffff;
+		err = -EIO;
+		goto err_out;
+	}
+
+	if (!do_wr)
+		*data = be32_to_cpu(bd_ptr[0].data[0]);
+
+	/*
+	 * Update descriptor status FIFO RD pointer.
+	 * NOTE: Skipping check and clear FIFO entries because we are waiting
+	 * for transfer to be completed.
+	 */
+	swr_ptr = ioread32(regs + TSI721_DMAC_DSWP);
+	iowrite32(swr_ptr, regs + TSI721_DMAC_DSRP);
+
+err_out:
+	spin_unlock_irqrestore(&tsi721_maint_lock, flags);
+
+	return err;
+}
+
+/**
+ * tsi721_cread_dma - Generate a RapidIO maintenance read transaction
+ *                    using Tsi721 BDMA engine.
+ * @mport: RapidIO master port control structure
+ * @index: ID of RapdiIO interface
+ * @destid: Destination ID of transaction
+ * @hopcount: Number of hops to target device
+ * @offset: Offset into configuration space
+ * @len: Length (in bytes) of the maintenance transaction
+ * @val: Location to be read into
+ *
+ * Generates a RapidIO maintenance read transaction.
+ * Returns %0 on success and %-EINVAL or %-EFAULT on failure.
+ */
+static int tsi721_cread_dma(struct rio_mport *mport, int index, u16 destid,
+			u8 hopcount, u32 offset, int len, u32 *data)
+{
+	struct tsi721_device *priv = mport->priv;
+
+	return tsi721_maint_dma(priv, mport->sys_size, destid, hopcount,
+				offset, len, data, 0);
+}
+
+/**
+ * tsi721_cwrite_dma - Generate a RapidIO maintenance write transaction
+ *                     using Tsi721 BDMA engine
+ * @mport: RapidIO master port control structure
+ * @index: ID of RapdiIO interface
+ * @destid: Destination ID of transaction
+ * @hopcount: Number of hops to target device
+ * @offset: Offset into configuration space
+ * @len: Length (in bytes) of the maintenance transaction
+ * @val: Value to be written
+ *
+ * Generates a RapidIO maintenance write transaction.
+ * Returns %0 on success and %-EINVAL or %-EFAULT on failure.
+ */
+static int tsi721_cwrite_dma(struct rio_mport *mport, int index, u16 destid,
+			 u8 hopcount, u32 offset, int len, u32 data)
+{
+	struct tsi721_device *priv = mport->priv;
+	u32 temp = data;
+
+	return tsi721_maint_dma(priv, mport->sys_size, destid, hopcount,
+				offset, len, &temp, 1);
+}
+
+/**
+ * tsi721_pw_handler - Tsi721 inbound port-write interrupt handler
+ * @priv:  tsi721 device private structure
+ *
+ * Handles inbound port-write interrupts. Copies PW message from an internal
+ * buffer into PW message FIFO and schedules deferred routine to process
+ * queued messages.
+ */
+static int
+tsi721_pw_handler(struct tsi721_device *priv)
+{
+	u32 pw_stat;
+	u32 pw_buf[TSI721_RIO_PW_MSG_SIZE/sizeof(u32)];
+
+
+	pw_stat = ioread32(priv->regs + TSI721_RIO_PW_RX_STAT);
+
+	if (pw_stat & TSI721_RIO_PW_RX_STAT_PW_VAL) {
+		pw_buf[0] = ioread32(priv->regs + TSI721_RIO_PW_RX_CAPT(0));
+		pw_buf[1] = ioread32(priv->regs + TSI721_RIO_PW_RX_CAPT(1));
+		pw_buf[2] = ioread32(priv->regs + TSI721_RIO_PW_RX_CAPT(2));
+		pw_buf[3] = ioread32(priv->regs + TSI721_RIO_PW_RX_CAPT(3));
+
+		/* Queue PW message (if there is room in FIFO),
+		 * otherwise discard it.
+		 */
+		spin_lock(&priv->pw_fifo_lock);
+		if (kfifo_avail(&priv->pw_fifo) >= TSI721_RIO_PW_MSG_SIZE)
+			kfifo_in(&priv->pw_fifo, pw_buf,
+						TSI721_RIO_PW_MSG_SIZE);
+		else
+			priv->pw_discard_count++;
+		spin_unlock(&priv->pw_fifo_lock);
+	}
+
+	/* Clear pending PW interrupts */
+	iowrite32(TSI721_RIO_PW_RX_STAT_PW_DISC | TSI721_RIO_PW_RX_STAT_PW_VAL,
+		  priv->regs + TSI721_RIO_PW_RX_STAT);
+
+	schedule_work(&priv->pw_work);
+
+	return 0;
+}
+
+static void tsi721_pw_dpc(struct work_struct *work)
+{
+	struct tsi721_device *priv = container_of(work, struct tsi721_device,
+						    pw_work);
+	union rio_pw_msg pwmsg;
+
+	/*
+	 * Process port-write messages
+	 */
+	while (kfifo_out_spinlocked(&priv->pw_fifo, (unsigned char *)&pwmsg,
+			 TSI721_RIO_PW_MSG_SIZE, &priv->pw_fifo_lock)) {
+		/* Pass the port-write message to RIO core for processing */
+		rio_inb_pwrite_handler(&priv->mport, &pwmsg);
+	}
+}
+
+/**
+ * tsi721_pw_enable - enable/disable port-write interface init
+ * @mport: Master port implementing the port write unit
+ * @enable:    1=enable; 0=disable port-write message handling
+ */
+static int tsi721_pw_enable(struct rio_mport *mport, int enable)
+{
+	struct tsi721_device *priv = mport->priv;
+	u32 rval;
+
+	rval = ioread32(priv->regs + TSI721_RIO_EM_INT_ENABLE);
+
+	if (enable)
+		rval |= TSI721_RIO_EM_INT_ENABLE_PW_RX;
+	else
+		rval &= ~TSI721_RIO_EM_INT_ENABLE_PW_RX;
+
+	/* Clear pending PW interrupts */
+	iowrite32(TSI721_RIO_PW_RX_STAT_PW_DISC | TSI721_RIO_PW_RX_STAT_PW_VAL,
+		  priv->regs + TSI721_RIO_PW_RX_STAT);
+	/* Update enable bits */
+	iowrite32(rval, priv->regs + TSI721_RIO_EM_INT_ENABLE);
+
+	return 0;
+}
+
+/**
+ * tsi721_dsend - Send a RapidIO doorbell
+ * @mport: RapidIO master port info
+ * @index: ID of RapidIO interface
+ * @destid: Destination ID of target device
+ * @data: 16-bit info field of RapidIO doorbell
+ *
+ * Sends a RapidIO doorbell message. Always returns %0.
+ */
+static int tsi721_dsend(struct rio_mport *mport, int index,
+			u16 destid, u16 data)
+{
+	struct tsi721_device *priv = mport->priv;
+	u32 offset;
+
+	offset = (((mport->sys_size) ? RIO_TT_CODE_16 : RIO_TT_CODE_8) << 18) |
+		 (destid << 2);
+
+	tsi_debug(DBELL, &priv->pdev->dev,
+		  "Send Doorbell 0x%04x to destID 0x%x", data, destid);
+	iowrite16be(data, priv->odb_base + offset);
+
+	return 0;
+}
+
+/**
+ * tsi721_dbell_handler - Tsi721 doorbell interrupt handler
+ * @priv: tsi721 device-specific data structure
+ *
+ * Handles inbound doorbell interrupts. Copies doorbell entry from an internal
+ * buffer into DB message FIFO and schedules deferred  routine to process
+ * queued DBs.
+ */
+static int
+tsi721_dbell_handler(struct tsi721_device *priv)
+{
+	u32 regval;
+
+	/* Disable IDB interrupts */
+	regval = ioread32(priv->regs + TSI721_SR_CHINTE(IDB_QUEUE));
+	regval &= ~TSI721_SR_CHINT_IDBQRCV;
+	iowrite32(regval,
+		priv->regs + TSI721_SR_CHINTE(IDB_QUEUE));
+
+	schedule_work(&priv->idb_work);
+
+	return 0;
+}
+
+static void tsi721_db_dpc(struct work_struct *work)
+{
+	struct tsi721_device *priv = container_of(work, struct tsi721_device,
+						    idb_work);
+	struct rio_mport *mport;
+	struct rio_dbell *dbell;
+	int found = 0;
+	u32 wr_ptr, rd_ptr;
+	u64 *idb_entry;
+	u32 regval;
+	union {
+		u64 msg;
+		u8  bytes[8];
+	} idb;
+
+	/*
+	 * Process queued inbound doorbells
+	 */
+	mport = &priv->mport;
+
+	wr_ptr = ioread32(priv->regs + TSI721_IDQ_WP(IDB_QUEUE)) % IDB_QSIZE;
+	rd_ptr = ioread32(priv->regs + TSI721_IDQ_RP(IDB_QUEUE)) % IDB_QSIZE;
+
+	while (wr_ptr != rd_ptr) {
+		idb_entry = (u64 *)(priv->idb_base +
+					(TSI721_IDB_ENTRY_SIZE * rd_ptr));
+		rd_ptr++;
+		rd_ptr %= IDB_QSIZE;
+		idb.msg = *idb_entry;
+		*idb_entry = 0;
+
+		/* Process one doorbell */
+		list_for_each_entry(dbell, &mport->dbells, node) {
+			if ((dbell->res->start <= DBELL_INF(idb.bytes)) &&
+			    (dbell->res->end >= DBELL_INF(idb.bytes))) {
+				found = 1;
+				break;
+			}
+		}
+
+		if (found) {
+			dbell->dinb(mport, dbell->dev_id, DBELL_SID(idb.bytes),
+				    DBELL_TID(idb.bytes), DBELL_INF(idb.bytes));
+		} else {
+			tsi_debug(DBELL, &priv->pdev->dev,
+				  "spurious IDB sid %2.2x tid %2.2x info %4.4x",
+				  DBELL_SID(idb.bytes), DBELL_TID(idb.bytes),
+				  DBELL_INF(idb.bytes));
+		}
+
+		wr_ptr = ioread32(priv->regs +
+				  TSI721_IDQ_WP(IDB_QUEUE)) % IDB_QSIZE;
+	}
+
+	iowrite32(rd_ptr & (IDB_QSIZE - 1),
+		priv->regs + TSI721_IDQ_RP(IDB_QUEUE));
+
+	/* Re-enable IDB interrupts */
+	regval = ioread32(priv->regs + TSI721_SR_CHINTE(IDB_QUEUE));
+	regval |= TSI721_SR_CHINT_IDBQRCV;
+	iowrite32(regval,
+		priv->regs + TSI721_SR_CHINTE(IDB_QUEUE));
+
+	wr_ptr = ioread32(priv->regs + TSI721_IDQ_WP(IDB_QUEUE)) % IDB_QSIZE;
+	if (wr_ptr != rd_ptr)
+		schedule_work(&priv->idb_work);
+}
+
+/**
+ * tsi721_irqhandler - Tsi721 interrupt handler
+ * @irq: Linux interrupt number
+ * @ptr: Pointer to interrupt-specific data (tsi721_device structure)
+ *
+ * Handles Tsi721 interrupts signaled using MSI and INTA. Checks reported
+ * interrupt events and calls an event-specific handler(s).
+ */
+static irqreturn_t tsi721_irqhandler(int irq, void *ptr)
+{
+	struct tsi721_device *priv = (struct tsi721_device *)ptr;
+	u32 dev_int;
+	u32 dev_ch_int;
+	u32 intval;
+	u32 ch_inte;
+
+	/* For MSI mode disable all device-level interrupts */
+	if (priv->flags & TSI721_USING_MSI)
+		iowrite32(0, priv->regs + TSI721_DEV_INTE);
+
+	dev_int = ioread32(priv->regs + TSI721_DEV_INT);
+	if (!dev_int)
+		return IRQ_NONE;
+
+	dev_ch_int = ioread32(priv->regs + TSI721_DEV_CHAN_INT);
+
+	if (dev_int & TSI721_DEV_INT_SR2PC_CH) {
+		/* Service SR2PC Channel interrupts */
+		if (dev_ch_int & TSI721_INT_SR2PC_CHAN(IDB_QUEUE)) {
+			/* Service Inbound Doorbell interrupt */
+			intval = ioread32(priv->regs +
+						TSI721_SR_CHINT(IDB_QUEUE));
+			if (intval & TSI721_SR_CHINT_IDBQRCV)
+				tsi721_dbell_handler(priv);
+			else
+				tsi_info(&priv->pdev->dev,
+					"Unsupported SR_CH_INT %x", intval);
+
+			/* Clear interrupts */
+			iowrite32(intval,
+				priv->regs + TSI721_SR_CHINT(IDB_QUEUE));
+			ioread32(priv->regs + TSI721_SR_CHINT(IDB_QUEUE));
+		}
+	}
+
+	if (dev_int & TSI721_DEV_INT_SMSG_CH) {
+		int ch;
+
+		/*
+		 * Service channel interrupts from Messaging Engine
+		 */
+
+		if (dev_ch_int & TSI721_INT_IMSG_CHAN_M) { /* Inbound Msg */
+			/* Disable signaled OB MSG Channel interrupts */
+			ch_inte = ioread32(priv->regs + TSI721_DEV_CHAN_INTE);
+			ch_inte &= ~(dev_ch_int & TSI721_INT_IMSG_CHAN_M);
+			iowrite32(ch_inte, priv->regs + TSI721_DEV_CHAN_INTE);
+
+			/*
+			 * Process Inbound Message interrupt for each MBOX
+			 */
+			for (ch = 4; ch < RIO_MAX_MBOX + 4; ch++) {
+				if (!(dev_ch_int & TSI721_INT_IMSG_CHAN(ch)))
+					continue;
+				tsi721_imsg_handler(priv, ch);
+			}
+		}
+
+		if (dev_ch_int & TSI721_INT_OMSG_CHAN_M) { /* Outbound Msg */
+			/* Disable signaled OB MSG Channel interrupts */
+			ch_inte = ioread32(priv->regs + TSI721_DEV_CHAN_INTE);
+			ch_inte &= ~(dev_ch_int & TSI721_INT_OMSG_CHAN_M);
+			iowrite32(ch_inte, priv->regs + TSI721_DEV_CHAN_INTE);
+
+			/*
+			 * Process Outbound Message interrupts for each MBOX
+			 */
+
+			for (ch = 0; ch < RIO_MAX_MBOX; ch++) {
+				if (!(dev_ch_int & TSI721_INT_OMSG_CHAN(ch)))
+					continue;
+				tsi721_omsg_handler(priv, ch);
+			}
+		}
+	}
+
+	if (dev_int & TSI721_DEV_INT_SRIO) {
+		/* Service SRIO MAC interrupts */
+		intval = ioread32(priv->regs + TSI721_RIO_EM_INT_STAT);
+		if (intval & TSI721_RIO_EM_INT_STAT_PW_RX)
+			tsi721_pw_handler(priv);
+	}
+
+#ifdef CONFIG_RAPIDIO_DMA_ENGINE
+	if (dev_int & TSI721_DEV_INT_BDMA_CH) {
+		int ch;
+
+		if (dev_ch_int & TSI721_INT_BDMA_CHAN_M) {
+			tsi_debug(DMA, &priv->pdev->dev,
+				  "IRQ from DMA channel 0x%08x", dev_ch_int);
+
+			for (ch = 0; ch < TSI721_DMA_MAXCH; ch++) {
+				if (!(dev_ch_int & TSI721_INT_BDMA_CHAN(ch)))
+					continue;
+				tsi721_bdma_handler(&priv->bdma[ch]);
+			}
+		}
+	}
+#endif
+
+	/* For MSI mode re-enable device-level interrupts */
+	if (priv->flags & TSI721_USING_MSI) {
+		dev_int = TSI721_DEV_INT_SR2PC_CH | TSI721_DEV_INT_SRIO |
+			TSI721_DEV_INT_SMSG_CH | TSI721_DEV_INT_BDMA_CH;
+		iowrite32(dev_int, priv->regs + TSI721_DEV_INTE);
+	}
+
+	return IRQ_HANDLED;
+}
+
+static void tsi721_interrupts_init(struct tsi721_device *priv)
+{
+	u32 intr;
+
+	/* Enable IDB interrupts */
+	iowrite32(TSI721_SR_CHINT_ALL,
+		priv->regs + TSI721_SR_CHINT(IDB_QUEUE));
+	iowrite32(TSI721_SR_CHINT_IDBQRCV,
+		priv->regs + TSI721_SR_CHINTE(IDB_QUEUE));
+
+	/* Enable SRIO MAC interrupts */
+	iowrite32(TSI721_RIO_EM_DEV_INT_EN_INT,
+		priv->regs + TSI721_RIO_EM_DEV_INT_EN);
+
+	/* Enable interrupts from channels in use */
+#ifdef CONFIG_RAPIDIO_DMA_ENGINE
+	intr = TSI721_INT_SR2PC_CHAN(IDB_QUEUE) |
+		(TSI721_INT_BDMA_CHAN_M &
+		 ~TSI721_INT_BDMA_CHAN(TSI721_DMACH_MAINT));
+#else
+	intr = TSI721_INT_SR2PC_CHAN(IDB_QUEUE);
+#endif
+	iowrite32(intr,	priv->regs + TSI721_DEV_CHAN_INTE);
+
+	if (priv->flags & TSI721_USING_MSIX)
+		intr = TSI721_DEV_INT_SRIO;
+	else
+		intr = TSI721_DEV_INT_SR2PC_CH | TSI721_DEV_INT_SRIO |
+			TSI721_DEV_INT_SMSG_CH | TSI721_DEV_INT_BDMA_CH;
+
+	iowrite32(intr, priv->regs + TSI721_DEV_INTE);
+	ioread32(priv->regs + TSI721_DEV_INTE);
+}
+
+#ifdef CONFIG_PCI_MSI
+/**
+ * tsi721_omsg_msix - MSI-X interrupt handler for outbound messaging
+ * @irq: Linux interrupt number
+ * @ptr: Pointer to interrupt-specific data (tsi721_device structure)
+ *
+ * Handles outbound messaging interrupts signaled using MSI-X.
+ */
+static irqreturn_t tsi721_omsg_msix(int irq, void *ptr)
+{
+	struct tsi721_device *priv = (struct tsi721_device *)ptr;
+	int mbox;
+
+	mbox = (irq - priv->msix[TSI721_VECT_OMB0_DONE].vector) % RIO_MAX_MBOX;
+	tsi721_omsg_handler(priv, mbox);
+	return IRQ_HANDLED;
+}
+
+/**
+ * tsi721_imsg_msix - MSI-X interrupt handler for inbound messaging
+ * @irq: Linux interrupt number
+ * @ptr: Pointer to interrupt-specific data (tsi721_device structure)
+ *
+ * Handles inbound messaging interrupts signaled using MSI-X.
+ */
+static irqreturn_t tsi721_imsg_msix(int irq, void *ptr)
+{
+	struct tsi721_device *priv = (struct tsi721_device *)ptr;
+	int mbox;
+
+	mbox = (irq - priv->msix[TSI721_VECT_IMB0_RCV].vector) % RIO_MAX_MBOX;
+	tsi721_imsg_handler(priv, mbox + 4);
+	return IRQ_HANDLED;
+}
+
+/**
+ * tsi721_srio_msix - Tsi721 MSI-X SRIO MAC interrupt handler
+ * @irq: Linux interrupt number
+ * @ptr: Pointer to interrupt-specific data (tsi721_device structure)
+ *
+ * Handles Tsi721 interrupts from SRIO MAC.
+ */
+static irqreturn_t tsi721_srio_msix(int irq, void *ptr)
+{
+	struct tsi721_device *priv = (struct tsi721_device *)ptr;
+	u32 srio_int;
+
+	/* Service SRIO MAC interrupts */
+	srio_int = ioread32(priv->regs + TSI721_RIO_EM_INT_STAT);
+	if (srio_int & TSI721_RIO_EM_INT_STAT_PW_RX)
+		tsi721_pw_handler(priv);
+
+	return IRQ_HANDLED;
+}
+
+/**
+ * tsi721_sr2pc_ch_msix - Tsi721 MSI-X SR2PC Channel interrupt handler
+ * @irq: Linux interrupt number
+ * @ptr: Pointer to interrupt-specific data (tsi721_device structure)
+ *
+ * Handles Tsi721 interrupts from SR2PC Channel.
+ * NOTE: At this moment services only one SR2PC channel associated with inbound
+ * doorbells.
+ */
+static irqreturn_t tsi721_sr2pc_ch_msix(int irq, void *ptr)
+{
+	struct tsi721_device *priv = (struct tsi721_device *)ptr;
+	u32 sr_ch_int;
+
+	/* Service Inbound DB interrupt from SR2PC channel */
+	sr_ch_int = ioread32(priv->regs + TSI721_SR_CHINT(IDB_QUEUE));
+	if (sr_ch_int & TSI721_SR_CHINT_IDBQRCV)
+		tsi721_dbell_handler(priv);
+
+	/* Clear interrupts */
+	iowrite32(sr_ch_int, priv->regs + TSI721_SR_CHINT(IDB_QUEUE));
+	/* Read back to ensure that interrupt was cleared */
+	sr_ch_int = ioread32(priv->regs + TSI721_SR_CHINT(IDB_QUEUE));
+
+	return IRQ_HANDLED;
+}
+
+/**
+ * tsi721_request_msix - register interrupt service for MSI-X mode.
+ * @priv: tsi721 device-specific data structure
+ *
+ * Registers MSI-X interrupt service routines for interrupts that are active
+ * immediately after mport initialization. Messaging interrupt service routines
+ * should be registered during corresponding open requests.
+ */
+static int tsi721_request_msix(struct tsi721_device *priv)
+{
+	int err = 0;
+
+	err = request_irq(priv->msix[TSI721_VECT_IDB].vector,
+			tsi721_sr2pc_ch_msix, 0,
+			priv->msix[TSI721_VECT_IDB].irq_name, (void *)priv);
+	if (err)
+		return err;
+
+	err = request_irq(priv->msix[TSI721_VECT_PWRX].vector,
+			tsi721_srio_msix, 0,
+			priv->msix[TSI721_VECT_PWRX].irq_name, (void *)priv);
+	if (err) {
+		free_irq(priv->msix[TSI721_VECT_IDB].vector, (void *)priv);
+		return err;
+	}
+
+	return 0;
+}
+
+/**
+ * tsi721_enable_msix - Attempts to enable MSI-X support for Tsi721.
+ * @priv: pointer to tsi721 private data
+ *
+ * Configures MSI-X support for Tsi721. Supports only an exact number
+ * of requested vectors.
+ */
+static int tsi721_enable_msix(struct tsi721_device *priv)
+{
+	struct msix_entry entries[TSI721_VECT_MAX];
+	int err;
+	int i;
+
+	entries[TSI721_VECT_IDB].entry = TSI721_MSIX_SR2PC_IDBQ_RCV(IDB_QUEUE);
+	entries[TSI721_VECT_PWRX].entry = TSI721_MSIX_SRIO_MAC_INT;
+
+	/*
+	 * Initialize MSI-X entries for Messaging Engine:
+	 * this driver supports four RIO mailboxes (inbound and outbound)
+	 * NOTE: Inbound message MBOX 0...4 use IB channels 4...7. Therefore
+	 * offset +4 is added to IB MBOX number.
+	 */
+	for (i = 0; i < RIO_MAX_MBOX; i++) {
+		entries[TSI721_VECT_IMB0_RCV + i].entry =
+					TSI721_MSIX_IMSG_DQ_RCV(i + 4);
+		entries[TSI721_VECT_IMB0_INT + i].entry =
+					TSI721_MSIX_IMSG_INT(i + 4);
+		entries[TSI721_VECT_OMB0_DONE + i].entry =
+					TSI721_MSIX_OMSG_DONE(i);
+		entries[TSI721_VECT_OMB0_INT + i].entry =
+					TSI721_MSIX_OMSG_INT(i);
+	}
+
+#ifdef CONFIG_RAPIDIO_DMA_ENGINE
+	/*
+	 * Initialize MSI-X entries for Block DMA Engine:
+	 * this driver supports XXX DMA channels
+	 * (one is reserved for SRIO maintenance transactions)
+	 */
+	for (i = 0; i < TSI721_DMA_CHNUM; i++) {
+		entries[TSI721_VECT_DMA0_DONE + i].entry =
+					TSI721_MSIX_DMACH_DONE(i);
+		entries[TSI721_VECT_DMA0_INT + i].entry =
+					TSI721_MSIX_DMACH_INT(i);
+	}
+#endif /* CONFIG_RAPIDIO_DMA_ENGINE */
+
+	err = pci_enable_msix_exact(priv->pdev, entries, ARRAY_SIZE(entries));
+	if (err) {
+		tsi_err(&priv->pdev->dev,
+			"Failed to enable MSI-X (err=%d)", err);
+		return err;
+	}
+
+	/*
+	 * Copy MSI-X vector information into tsi721 private structure
+	 */
+	priv->msix[TSI721_VECT_IDB].vector = entries[TSI721_VECT_IDB].vector;
+	snprintf(priv->msix[TSI721_VECT_IDB].irq_name, IRQ_DEVICE_NAME_MAX,
+		 DRV_NAME "-idb@pci:%s", pci_name(priv->pdev));
+	priv->msix[TSI721_VECT_PWRX].vector = entries[TSI721_VECT_PWRX].vector;
+	snprintf(priv->msix[TSI721_VECT_PWRX].irq_name, IRQ_DEVICE_NAME_MAX,
+		 DRV_NAME "-pwrx@pci:%s", pci_name(priv->pdev));
+
+	for (i = 0; i < RIO_MAX_MBOX; i++) {
+		priv->msix[TSI721_VECT_IMB0_RCV + i].vector =
+				entries[TSI721_VECT_IMB0_RCV + i].vector;
+		snprintf(priv->msix[TSI721_VECT_IMB0_RCV + i].irq_name,
+			 IRQ_DEVICE_NAME_MAX, DRV_NAME "-imbr%d@pci:%s",
+			 i, pci_name(priv->pdev));
+
+		priv->msix[TSI721_VECT_IMB0_INT + i].vector =
+				entries[TSI721_VECT_IMB0_INT + i].vector;
+		snprintf(priv->msix[TSI721_VECT_IMB0_INT + i].irq_name,
+			 IRQ_DEVICE_NAME_MAX, DRV_NAME "-imbi%d@pci:%s",
+			 i, pci_name(priv->pdev));
+
+		priv->msix[TSI721_VECT_OMB0_DONE + i].vector =
+				entries[TSI721_VECT_OMB0_DONE + i].vector;
+		snprintf(priv->msix[TSI721_VECT_OMB0_DONE + i].irq_name,
+			 IRQ_DEVICE_NAME_MAX, DRV_NAME "-ombd%d@pci:%s",
+			 i, pci_name(priv->pdev));
+
+		priv->msix[TSI721_VECT_OMB0_INT + i].vector =
+				entries[TSI721_VECT_OMB0_INT + i].vector;
+		snprintf(priv->msix[TSI721_VECT_OMB0_INT + i].irq_name,
+			 IRQ_DEVICE_NAME_MAX, DRV_NAME "-ombi%d@pci:%s",
+			 i, pci_name(priv->pdev));
+	}
+
+#ifdef CONFIG_RAPIDIO_DMA_ENGINE
+	for (i = 0; i < TSI721_DMA_CHNUM; i++) {
+		priv->msix[TSI721_VECT_DMA0_DONE + i].vector =
+				entries[TSI721_VECT_DMA0_DONE + i].vector;
+		snprintf(priv->msix[TSI721_VECT_DMA0_DONE + i].irq_name,
+			 IRQ_DEVICE_NAME_MAX, DRV_NAME "-dmad%d@pci:%s",
+			 i, pci_name(priv->pdev));
+
+		priv->msix[TSI721_VECT_DMA0_INT + i].vector =
+				entries[TSI721_VECT_DMA0_INT + i].vector;
+		snprintf(priv->msix[TSI721_VECT_DMA0_INT + i].irq_name,
+			 IRQ_DEVICE_NAME_MAX, DRV_NAME "-dmai%d@pci:%s",
+			 i, pci_name(priv->pdev));
+	}
+#endif /* CONFIG_RAPIDIO_DMA_ENGINE */
+
+	return 0;
+}
+#endif /* CONFIG_PCI_MSI */
+
+static int tsi721_request_irq(struct tsi721_device *priv)
+{
+	int err;
+
+#ifdef CONFIG_PCI_MSI
+	if (priv->flags & TSI721_USING_MSIX)
+		err = tsi721_request_msix(priv);
+	else
+#endif
+		err = request_irq(priv->pdev->irq, tsi721_irqhandler,
+			  (priv->flags & TSI721_USING_MSI) ? 0 : IRQF_SHARED,
+			  DRV_NAME, (void *)priv);
+
+	if (err)
+		tsi_err(&priv->pdev->dev,
+			"Unable to allocate interrupt, err=%d", err);
+
+	return err;
+}
+
+static void tsi721_free_irq(struct tsi721_device *priv)
+{
+#ifdef CONFIG_PCI_MSI
+	if (priv->flags & TSI721_USING_MSIX) {
+		free_irq(priv->msix[TSI721_VECT_IDB].vector, (void *)priv);
+		free_irq(priv->msix[TSI721_VECT_PWRX].vector, (void *)priv);
+	} else
+#endif
+	free_irq(priv->pdev->irq, (void *)priv);
+}
+
+static int
+tsi721_obw_alloc(struct tsi721_device *priv, struct tsi721_obw_bar *pbar,
+		 u32 size, int *win_id)
+{
+	u64 win_base;
+	u64 bar_base;
+	u64 bar_end;
+	u32 align;
+	struct tsi721_ob_win *win;
+	struct tsi721_ob_win *new_win = NULL;
+	int new_win_idx = -1;
+	int i = 0;
+
+	bar_base = pbar->base;
+	bar_end =  bar_base + pbar->size;
+	win_base = bar_base;
+	align = size/TSI721_PC2SR_ZONES;
+
+	while (i < TSI721_IBWIN_NUM) {
+		for (i = 0; i < TSI721_IBWIN_NUM; i++) {
+			if (!priv->ob_win[i].active) {
+				if (new_win == NULL) {
+					new_win = &priv->ob_win[i];
+					new_win_idx = i;
+				}
+				continue;
+			}
+
+			/*
+			 * If this window belongs to the current BAR check it
+			 * for overlap
+			 */
+			win = &priv->ob_win[i];
+
+			if (win->base >= bar_base && win->base < bar_end) {
+				if (win_base < (win->base + win->size) &&
+						(win_base + size) > win->base) {
+					/* Overlap detected */
+					win_base = win->base + win->size;
+					win_base = ALIGN(win_base, align);
+					break;
+				}
+			}
+		}
+	}
+
+	if (win_base + size > bar_end)
+		return -ENOMEM;
+
+	if (!new_win) {
+		tsi_err(&priv->pdev->dev, "OBW count tracking failed");
+		return -EIO;
+	}
+
+	new_win->active = true;
+	new_win->base = win_base;
+	new_win->size = size;
+	new_win->pbar = pbar;
+	priv->obwin_cnt--;
+	pbar->free -= size;
+	*win_id = new_win_idx;
+	return 0;
+}
+
+static int tsi721_map_outb_win(struct rio_mport *mport, u16 destid, u64 rstart,
+			u32 size, u32 flags, dma_addr_t *laddr)
+{
+	struct tsi721_device *priv = mport->priv;
+	int i;
+	struct tsi721_obw_bar *pbar;
+	struct tsi721_ob_win *ob_win;
+	int obw = -1;
+	u32 rval;
+	u64 rio_addr;
+	u32 zsize;
+	int ret = -ENOMEM;
+
+	tsi_debug(OBW, &priv->pdev->dev,
+		  "did=%d ra=0x%llx sz=0x%x", destid, rstart, size);
+
+	if (!is_power_of_2(size) || (size < 0x8000) || (rstart & (size - 1)))
+		return -EINVAL;
+
+	if (priv->obwin_cnt == 0)
+		return -EBUSY;
+
+	for (i = 0; i < 2; i++) {
+		if (priv->p2r_bar[i].free >= size) {
+			pbar = &priv->p2r_bar[i];
+			ret = tsi721_obw_alloc(priv, pbar, size, &obw);
+			if (!ret)
+				break;
+		}
+	}
+
+	if (ret)
+		return ret;
+
+	WARN_ON(obw == -1);
+	ob_win = &priv->ob_win[obw];
+	ob_win->destid = destid;
+	ob_win->rstart = rstart;
+	tsi_debug(OBW, &priv->pdev->dev,
+		  "allocated OBW%d @%llx", obw, ob_win->base);
+
+	/*
+	 * Configure Outbound Window
+	 */
+
+	zsize = size/TSI721_PC2SR_ZONES;
+	rio_addr = rstart;
+
+	/*
+	 * Program Address Translation Zones:
+	 *  This implementation uses all 8 zones associated wit window.
+	 */
+	for (i = 0; i < TSI721_PC2SR_ZONES; i++) {
+
+		while (ioread32(priv->regs + TSI721_ZONE_SEL) &
+			TSI721_ZONE_SEL_GO) {
+			udelay(1);
+		}
+
+		rval = (u32)(rio_addr & TSI721_LUT_DATA0_ADD) |
+			TSI721_LUT_DATA0_NREAD | TSI721_LUT_DATA0_NWR;
+		iowrite32(rval, priv->regs + TSI721_LUT_DATA0);
+		rval = (u32)(rio_addr >> 32);
+		iowrite32(rval, priv->regs + TSI721_LUT_DATA1);
+		rval = destid;
+		iowrite32(rval, priv->regs + TSI721_LUT_DATA2);
+
+		rval = TSI721_ZONE_SEL_GO | (obw << 3) | i;
+		iowrite32(rval, priv->regs + TSI721_ZONE_SEL);
+
+		rio_addr += zsize;
+	}
+
+	iowrite32(TSI721_OBWIN_SIZE(size) << 8,
+		  priv->regs + TSI721_OBWINSZ(obw));
+	iowrite32((u32)(ob_win->base >> 32), priv->regs + TSI721_OBWINUB(obw));
+	iowrite32((u32)(ob_win->base & TSI721_OBWINLB_BA) | TSI721_OBWINLB_WEN,
+		  priv->regs + TSI721_OBWINLB(obw));
+
+	*laddr = ob_win->base;
+	return 0;
+}
+
+static void tsi721_unmap_outb_win(struct rio_mport *mport,
+				  u16 destid, u64 rstart)
+{
+	struct tsi721_device *priv = mport->priv;
+	struct tsi721_ob_win *ob_win;
+	int i;
+
+	tsi_debug(OBW, &priv->pdev->dev, "did=%d ra=0x%llx", destid, rstart);
+
+	for (i = 0; i < TSI721_OBWIN_NUM; i++) {
+		ob_win = &priv->ob_win[i];
+
+		if (ob_win->active &&
+		    ob_win->destid == destid && ob_win->rstart == rstart) {
+			tsi_debug(OBW, &priv->pdev->dev,
+				  "free OBW%d @%llx", i, ob_win->base);
+			ob_win->active = false;
+			iowrite32(0, priv->regs + TSI721_OBWINLB(i));
+			ob_win->pbar->free += ob_win->size;
+			priv->obwin_cnt++;
+			break;
+		}
+	}
+}
+
+/**
+ * tsi721_init_pc2sr_mapping - initializes outbound (PCIe->SRIO)
+ * translation regions.
+ * @priv: pointer to tsi721 private data
+ *
+ * Disables SREP translation regions.
+ */
+static void tsi721_init_pc2sr_mapping(struct tsi721_device *priv)
+{
+	int i, z;
+	u32 rval;
+
+	/* Disable all PC2SR translation windows */
+	for (i = 0; i < TSI721_OBWIN_NUM; i++)
+		iowrite32(0, priv->regs + TSI721_OBWINLB(i));
+
+	/* Initialize zone lookup tables to avoid ECC errors on reads */
+	iowrite32(0, priv->regs + TSI721_LUT_DATA0);
+	iowrite32(0, priv->regs + TSI721_LUT_DATA1);
+	iowrite32(0, priv->regs + TSI721_LUT_DATA2);
+
+	for (i = 0; i < TSI721_OBWIN_NUM; i++) {
+		for (z = 0; z < TSI721_PC2SR_ZONES; z++) {
+			while (ioread32(priv->regs + TSI721_ZONE_SEL) &
+				TSI721_ZONE_SEL_GO) {
+				udelay(1);
+			}
+			rval = TSI721_ZONE_SEL_GO | (i << 3) | z;
+			iowrite32(rval, priv->regs + TSI721_ZONE_SEL);
+		}
+	}
+
+	if (priv->p2r_bar[0].size == 0 && priv->p2r_bar[1].size == 0) {
+		priv->obwin_cnt = 0;
+		return;
+	}
+
+	priv->p2r_bar[0].free = priv->p2r_bar[0].size;
+	priv->p2r_bar[1].free = priv->p2r_bar[1].size;
+
+	for (i = 0; i < TSI721_OBWIN_NUM; i++)
+		priv->ob_win[i].active = false;
+
+	priv->obwin_cnt = TSI721_OBWIN_NUM;
+}
+
+/**
+ * tsi721_rio_map_inb_mem -- Mapping inbound memory region.
+ * @mport: RapidIO master port
+ * @lstart: Local memory space start address.
+ * @rstart: RapidIO space start address.
+ * @size: The mapping region size.
+ * @flags: Flags for mapping. 0 for using default flags.
+ *
+ * Return: 0 -- Success.
+ *
+ * This function will create the inbound mapping
+ * from rstart to lstart.
+ */
+static int tsi721_rio_map_inb_mem(struct rio_mport *mport, dma_addr_t lstart,
+		u64 rstart, u64 size, u32 flags)
+{
+	struct tsi721_device *priv = mport->priv;
+	int i, avail = -1;
+	u32 regval;
+	struct tsi721_ib_win *ib_win;
+	bool direct = (lstart == rstart);
+	u64 ibw_size;
+	dma_addr_t loc_start;
+	u64 ibw_start;
+	struct tsi721_ib_win_mapping *map = NULL;
+	int ret = -EBUSY;
+
+	/* Max IBW size supported by HW is 16GB */
+	if (size > 0x400000000UL)
+		return -EINVAL;
+
+	if (direct) {
+		/* Calculate minimal acceptable window size and base address */
+
+		ibw_size = roundup_pow_of_two(size);
+		ibw_start = lstart & ~(ibw_size - 1);
+
+		tsi_debug(IBW, &priv->pdev->dev,
+			"Direct (RIO_0x%llx -> PCIe_%pad), size=0x%llx, ibw_start = 0x%llx",
+			rstart, &lstart, size, ibw_start);
+
+		while ((lstart + size) > (ibw_start + ibw_size)) {
+			ibw_size *= 2;
+			ibw_start = lstart & ~(ibw_size - 1);
+			/* Check for crossing IBW max size 16GB */
+			if (ibw_size > 0x400000000UL)
+				return -EBUSY;
+		}
+
+		loc_start = ibw_start;
+
+		map = kzalloc(sizeof(struct tsi721_ib_win_mapping), GFP_ATOMIC);
+		if (map == NULL)
+			return -ENOMEM;
+
+	} else {
+		tsi_debug(IBW, &priv->pdev->dev,
+			"Translated (RIO_0x%llx -> PCIe_%pad), size=0x%llx",
+			rstart, &lstart, size);
+
+		if (!is_power_of_2(size) || size < 0x1000 ||
+		    ((u64)lstart & (size - 1)) || (rstart & (size - 1)))
+			return -EINVAL;
+		if (priv->ibwin_cnt == 0)
+			return -EBUSY;
+		ibw_start = rstart;
+		ibw_size = size;
+		loc_start = lstart;
+	}
+
+	/*
+	 * Scan for overlapping with active regions and mark the first available
+	 * IB window at the same time.
+	 */
+	for (i = 0; i < TSI721_IBWIN_NUM; i++) {
+		ib_win = &priv->ib_win[i];
+
+		if (!ib_win->active) {
+			if (avail == -1) {
+				avail = i;
+				ret = 0;
+			}
+		} else if (ibw_start < (ib_win->rstart + ib_win->size) &&
+			   (ibw_start + ibw_size) > ib_win->rstart) {
+			/* Return error if address translation involved */
+			if (!direct || ib_win->xlat) {
+				ret = -EFAULT;
+				break;
+			}
+
+			/*
+			 * Direct mappings usually are larger than originally
+			 * requested fragments - check if this new request fits
+			 * into it.
+			 */
+			if (rstart >= ib_win->rstart &&
+			    (rstart + size) <= (ib_win->rstart +
+							ib_win->size)) {
+				/* We are in - no further mapping required */
+				map->lstart = lstart;
+				list_add_tail(&map->node, &ib_win->mappings);
+				return 0;
+			}
+
+			ret = -EFAULT;
+			break;
+		}
+	}
+
+	if (ret)
+		goto out;
+	i = avail;
+
+	/* Sanity check: available IB window must be disabled at this point */
+	regval = ioread32(priv->regs + TSI721_IBWIN_LB(i));
+	if (WARN_ON(regval & TSI721_IBWIN_LB_WEN)) {
+		ret = -EIO;
+		goto out;
+	}
+
+	ib_win = &priv->ib_win[i];
+	ib_win->active = true;
+	ib_win->rstart = ibw_start;
+	ib_win->lstart = loc_start;
+	ib_win->size = ibw_size;
+	ib_win->xlat = (lstart != rstart);
+	INIT_LIST_HEAD(&ib_win->mappings);
+
+	/*
+	 * When using direct IBW mapping and have larger than requested IBW size
+	 * we can have multiple local memory blocks mapped through the same IBW
+	 * To handle this situation we maintain list of "clients" for such IBWs.
+	 */
+	if (direct) {
+		map->lstart = lstart;
+		list_add_tail(&map->node, &ib_win->mappings);
+	}
+
+	iowrite32(TSI721_IBWIN_SIZE(ibw_size) << 8,
+			priv->regs + TSI721_IBWIN_SZ(i));
+
+	iowrite32(((u64)loc_start >> 32), priv->regs + TSI721_IBWIN_TUA(i));
+	iowrite32(((u64)loc_start & TSI721_IBWIN_TLA_ADD),
+		  priv->regs + TSI721_IBWIN_TLA(i));
+
+	iowrite32(ibw_start >> 32, priv->regs + TSI721_IBWIN_UB(i));
+	iowrite32((ibw_start & TSI721_IBWIN_LB_BA) | TSI721_IBWIN_LB_WEN,
+		priv->regs + TSI721_IBWIN_LB(i));
+
+	priv->ibwin_cnt--;
+
+	tsi_debug(IBW, &priv->pdev->dev,
+		"Configured IBWIN%d (RIO_0x%llx -> PCIe_%pad), size=0x%llx",
+		i, ibw_start, &loc_start, ibw_size);
+
+	return 0;
+out:
+	kfree(map);
+	return ret;
+}
+
+/**
+ * tsi721_rio_unmap_inb_mem -- Unmapping inbound memory region.
+ * @mport: RapidIO master port
+ * @lstart: Local memory space start address.
+ */
+static void tsi721_rio_unmap_inb_mem(struct rio_mport *mport,
+				dma_addr_t lstart)
+{
+	struct tsi721_device *priv = mport->priv;
+	struct tsi721_ib_win *ib_win;
+	int i;
+
+	tsi_debug(IBW, &priv->pdev->dev,
+		"Unmap IBW mapped to PCIe_%pad", &lstart);
+
+	/* Search for matching active inbound translation window */
+	for (i = 0; i < TSI721_IBWIN_NUM; i++) {
+		ib_win = &priv->ib_win[i];
+
+		/* Address translating IBWs must to be an exact march */
+		if (!ib_win->active ||
+		    (ib_win->xlat && lstart != ib_win->lstart))
+			continue;
+
+		if (lstart >= ib_win->lstart &&
+		    lstart < (ib_win->lstart + ib_win->size)) {
+
+			if (!ib_win->xlat) {
+				struct tsi721_ib_win_mapping *map;
+				int found = 0;
+
+				list_for_each_entry(map,
+						    &ib_win->mappings, node) {
+					if (map->lstart == lstart) {
+						list_del(&map->node);
+						kfree(map);
+						found = 1;
+						break;
+					}
+				}
+
+				if (!found)
+					continue;
+
+				if (!list_empty(&ib_win->mappings))
+					break;
+			}
+
+			tsi_debug(IBW, &priv->pdev->dev, "Disable IBWIN_%d", i);
+			iowrite32(0, priv->regs + TSI721_IBWIN_LB(i));
+			ib_win->active = false;
+			priv->ibwin_cnt++;
+			break;
+		}
+	}
+
+	if (i == TSI721_IBWIN_NUM)
+		tsi_debug(IBW, &priv->pdev->dev,
+			"IB window mapped to %pad not found", &lstart);
+}
+
+/**
+ * tsi721_init_sr2pc_mapping - initializes inbound (SRIO->PCIe)
+ * translation regions.
+ * @priv: pointer to tsi721 private data
+ *
+ * Disables inbound windows.
+ */
+static void tsi721_init_sr2pc_mapping(struct tsi721_device *priv)
+{
+	int i;
+
+	/* Disable all SR2PC inbound windows */
+	for (i = 0; i < TSI721_IBWIN_NUM; i++)
+		iowrite32(0, priv->regs + TSI721_IBWIN_LB(i));
+	priv->ibwin_cnt = TSI721_IBWIN_NUM;
+}
+
+/*
+ * tsi721_close_sr2pc_mapping - closes all active inbound (SRIO->PCIe)
+ * translation regions.
+ * @priv: pointer to tsi721 device private data
+ */
+static void tsi721_close_sr2pc_mapping(struct tsi721_device *priv)
+{
+	struct tsi721_ib_win *ib_win;
+	int i;
+
+	/* Disable all active SR2PC inbound windows */
+	for (i = 0; i < TSI721_IBWIN_NUM; i++) {
+		ib_win = &priv->ib_win[i];
+		if (ib_win->active) {
+			iowrite32(0, priv->regs + TSI721_IBWIN_LB(i));
+			ib_win->active = false;
+		}
+	}
+}
+
+/**
+ * tsi721_port_write_init - Inbound port write interface init
+ * @priv: pointer to tsi721 private data
+ *
+ * Initializes inbound port write handler.
+ * Returns %0 on success or %-ENOMEM on failure.
+ */
+static int tsi721_port_write_init(struct tsi721_device *priv)
+{
+	priv->pw_discard_count = 0;
+	INIT_WORK(&priv->pw_work, tsi721_pw_dpc);
+	spin_lock_init(&priv->pw_fifo_lock);
+	if (kfifo_alloc(&priv->pw_fifo,
+			TSI721_RIO_PW_MSG_SIZE * 32, GFP_KERNEL)) {
+		tsi_err(&priv->pdev->dev, "PW FIFO allocation failed");
+		return -ENOMEM;
+	}
+
+	/* Use reliable port-write capture mode */
+	iowrite32(TSI721_RIO_PW_CTL_PWC_REL, priv->regs + TSI721_RIO_PW_CTL);
+	return 0;
+}
+
+static void tsi721_port_write_free(struct tsi721_device *priv)
+{
+	kfifo_free(&priv->pw_fifo);
+}
+
+static int tsi721_doorbell_init(struct tsi721_device *priv)
+{
+	/* Outbound Doorbells do not require any setup.
+	 * Tsi721 uses dedicated PCI BAR1 to generate doorbells.
+	 * That BAR1 was mapped during the probe routine.
+	 */
+
+	/* Initialize Inbound Doorbell processing DPC and queue */
+	priv->db_discard_count = 0;
+	INIT_WORK(&priv->idb_work, tsi721_db_dpc);
+
+	/* Allocate buffer for inbound doorbells queue */
+	priv->idb_base = dma_zalloc_coherent(&priv->pdev->dev,
+				IDB_QSIZE * TSI721_IDB_ENTRY_SIZE,
+				&priv->idb_dma, GFP_KERNEL);
+	if (!priv->idb_base)
+		return -ENOMEM;
+
+	tsi_debug(DBELL, &priv->pdev->dev,
+		  "Allocated IDB buffer @ %p (phys = %pad)",
+		  priv->idb_base, &priv->idb_dma);
+
+	iowrite32(TSI721_IDQ_SIZE_VAL(IDB_QSIZE),
+		priv->regs + TSI721_IDQ_SIZE(IDB_QUEUE));
+	iowrite32(((u64)priv->idb_dma >> 32),
+		priv->regs + TSI721_IDQ_BASEU(IDB_QUEUE));
+	iowrite32(((u64)priv->idb_dma & TSI721_IDQ_BASEL_ADDR),
+		priv->regs + TSI721_IDQ_BASEL(IDB_QUEUE));
+	/* Enable accepting all inbound doorbells */
+	iowrite32(0, priv->regs + TSI721_IDQ_MASK(IDB_QUEUE));
+
+	iowrite32(TSI721_IDQ_INIT, priv->regs + TSI721_IDQ_CTL(IDB_QUEUE));
+
+	iowrite32(0, priv->regs + TSI721_IDQ_RP(IDB_QUEUE));
+
+	return 0;
+}
+
+static void tsi721_doorbell_free(struct tsi721_device *priv)
+{
+	if (priv->idb_base == NULL)
+		return;
+
+	/* Free buffer allocated for inbound doorbell queue */
+	dma_free_coherent(&priv->pdev->dev, IDB_QSIZE * TSI721_IDB_ENTRY_SIZE,
+			  priv->idb_base, priv->idb_dma);
+	priv->idb_base = NULL;
+}
+
+/**
+ * tsi721_bdma_maint_init - Initialize maintenance request BDMA channel.
+ * @priv: pointer to tsi721 private data
+ *
+ * Initialize BDMA channel allocated for RapidIO maintenance read/write
+ * request generation
+ * Returns %0 on success or %-ENOMEM on failure.
+ */
+static int tsi721_bdma_maint_init(struct tsi721_device *priv)
+{
+	struct tsi721_dma_desc *bd_ptr;
+	u64		*sts_ptr;
+	dma_addr_t	bd_phys, sts_phys;
+	int		sts_size;
+	int		bd_num = 2;
+	void __iomem	*regs;
+
+	tsi_debug(MAINT, &priv->pdev->dev,
+		  "Init BDMA_%d Maintenance requests", TSI721_DMACH_MAINT);
+
+	/*
+	 * Initialize DMA channel for maintenance requests
+	 */
+
+	priv->mdma.ch_id = TSI721_DMACH_MAINT;
+	regs = priv->regs + TSI721_DMAC_BASE(TSI721_DMACH_MAINT);
+
+	/* Allocate space for DMA descriptors */
+	bd_ptr = dma_zalloc_coherent(&priv->pdev->dev,
+					bd_num * sizeof(struct tsi721_dma_desc),
+					&bd_phys, GFP_KERNEL);
+	if (!bd_ptr)
+		return -ENOMEM;
+
+	priv->mdma.bd_num = bd_num;
+	priv->mdma.bd_phys = bd_phys;
+	priv->mdma.bd_base = bd_ptr;
+
+	tsi_debug(MAINT, &priv->pdev->dev, "DMA descriptors @ %p (phys = %pad)",
+		  bd_ptr, &bd_phys);
+
+	/* Allocate space for descriptor status FIFO */
+	sts_size = (bd_num >= TSI721_DMA_MINSTSSZ) ?
+					bd_num : TSI721_DMA_MINSTSSZ;
+	sts_size = roundup_pow_of_two(sts_size);
+	sts_ptr = dma_zalloc_coherent(&priv->pdev->dev,
+				     sts_size * sizeof(struct tsi721_dma_sts),
+				     &sts_phys, GFP_KERNEL);
+	if (!sts_ptr) {
+		/* Free space allocated for DMA descriptors */
+		dma_free_coherent(&priv->pdev->dev,
+				  bd_num * sizeof(struct tsi721_dma_desc),
+				  bd_ptr, bd_phys);
+		priv->mdma.bd_base = NULL;
+		return -ENOMEM;
+	}
+
+	priv->mdma.sts_phys = sts_phys;
+	priv->mdma.sts_base = sts_ptr;
+	priv->mdma.sts_size = sts_size;
+
+	tsi_debug(MAINT, &priv->pdev->dev,
+		"desc status FIFO @ %p (phys = %pad) size=0x%x",
+		sts_ptr, &sts_phys, sts_size);
+
+	/* Initialize DMA descriptors ring */
+	bd_ptr[bd_num - 1].type_id = cpu_to_le32(DTYPE3 << 29);
+	bd_ptr[bd_num - 1].next_lo = cpu_to_le32((u64)bd_phys &
+						 TSI721_DMAC_DPTRL_MASK);
+	bd_ptr[bd_num - 1].next_hi = cpu_to_le32((u64)bd_phys >> 32);
+
+	/* Setup DMA descriptor pointers */
+	iowrite32(((u64)bd_phys >> 32),	regs + TSI721_DMAC_DPTRH);
+	iowrite32(((u64)bd_phys & TSI721_DMAC_DPTRL_MASK),
+		regs + TSI721_DMAC_DPTRL);
+
+	/* Setup descriptor status FIFO */
+	iowrite32(((u64)sts_phys >> 32), regs + TSI721_DMAC_DSBH);
+	iowrite32(((u64)sts_phys & TSI721_DMAC_DSBL_MASK),
+		regs + TSI721_DMAC_DSBL);
+	iowrite32(TSI721_DMAC_DSSZ_SIZE(sts_size),
+		regs + TSI721_DMAC_DSSZ);
+
+	/* Clear interrupt bits */
+	iowrite32(TSI721_DMAC_INT_ALL, regs + TSI721_DMAC_INT);
+
+	ioread32(regs + TSI721_DMAC_INT);
+
+	/* Toggle DMA channel initialization */
+	iowrite32(TSI721_DMAC_CTL_INIT,	regs + TSI721_DMAC_CTL);
+	ioread32(regs + TSI721_DMAC_CTL);
+	udelay(10);
+
+	return 0;
+}
+
+static int tsi721_bdma_maint_free(struct tsi721_device *priv)
+{
+	u32 ch_stat;
+	struct tsi721_bdma_maint *mdma = &priv->mdma;
+	void __iomem *regs = priv->regs + TSI721_DMAC_BASE(mdma->ch_id);
+
+	if (mdma->bd_base == NULL)
+		return 0;
+
+	/* Check if DMA channel still running */
+	ch_stat = ioread32(regs + TSI721_DMAC_STS);
+	if (ch_stat & TSI721_DMAC_STS_RUN)
+		return -EFAULT;
+
+	/* Put DMA channel into init state */
+	iowrite32(TSI721_DMAC_CTL_INIT,	regs + TSI721_DMAC_CTL);
+
+	/* Free space allocated for DMA descriptors */
+	dma_free_coherent(&priv->pdev->dev,
+		mdma->bd_num * sizeof(struct tsi721_dma_desc),
+		mdma->bd_base, mdma->bd_phys);
+	mdma->bd_base = NULL;
+
+	/* Free space allocated for status FIFO */
+	dma_free_coherent(&priv->pdev->dev,
+		mdma->sts_size * sizeof(struct tsi721_dma_sts),
+		mdma->sts_base, mdma->sts_phys);
+	mdma->sts_base = NULL;
+	return 0;
+}
+
+/* Enable Inbound Messaging Interrupts */
+static void
+tsi721_imsg_interrupt_enable(struct tsi721_device *priv, int ch,
+				  u32 inte_mask)
+{
+	u32 rval;
+
+	if (!inte_mask)
+		return;
+
+	/* Clear pending Inbound Messaging interrupts */
+	iowrite32(inte_mask, priv->regs + TSI721_IBDMAC_INT(ch));
+
+	/* Enable Inbound Messaging interrupts */
+	rval = ioread32(priv->regs + TSI721_IBDMAC_INTE(ch));
+	iowrite32(rval | inte_mask, priv->regs + TSI721_IBDMAC_INTE(ch));
+
+	if (priv->flags & TSI721_USING_MSIX)
+		return; /* Finished if we are in MSI-X mode */
+
+	/*
+	 * For MSI and INTA interrupt signalling we need to enable next levels
+	 */
+
+	/* Enable Device Channel Interrupt */
+	rval = ioread32(priv->regs + TSI721_DEV_CHAN_INTE);
+	iowrite32(rval | TSI721_INT_IMSG_CHAN(ch),
+		  priv->regs + TSI721_DEV_CHAN_INTE);
+}
+
+/* Disable Inbound Messaging Interrupts */
+static void
+tsi721_imsg_interrupt_disable(struct tsi721_device *priv, int ch,
+				   u32 inte_mask)
+{
+	u32 rval;
+
+	if (!inte_mask)
+		return;
+
+	/* Clear pending Inbound Messaging interrupts */
+	iowrite32(inte_mask, priv->regs + TSI721_IBDMAC_INT(ch));
+
+	/* Disable Inbound Messaging interrupts */
+	rval = ioread32(priv->regs + TSI721_IBDMAC_INTE(ch));
+	rval &= ~inte_mask;
+	iowrite32(rval, priv->regs + TSI721_IBDMAC_INTE(ch));
+
+	if (priv->flags & TSI721_USING_MSIX)
+		return; /* Finished if we are in MSI-X mode */
+
+	/*
+	 * For MSI and INTA interrupt signalling we need to disable next levels
+	 */
+
+	/* Disable Device Channel Interrupt */
+	rval = ioread32(priv->regs + TSI721_DEV_CHAN_INTE);
+	rval &= ~TSI721_INT_IMSG_CHAN(ch);
+	iowrite32(rval, priv->regs + TSI721_DEV_CHAN_INTE);
+}
+
+/* Enable Outbound Messaging interrupts */
+static void
+tsi721_omsg_interrupt_enable(struct tsi721_device *priv, int ch,
+				  u32 inte_mask)
+{
+	u32 rval;
+
+	if (!inte_mask)
+		return;
+
+	/* Clear pending Outbound Messaging interrupts */
+	iowrite32(inte_mask, priv->regs + TSI721_OBDMAC_INT(ch));
+
+	/* Enable Outbound Messaging channel interrupts */
+	rval = ioread32(priv->regs + TSI721_OBDMAC_INTE(ch));
+	iowrite32(rval | inte_mask, priv->regs + TSI721_OBDMAC_INTE(ch));
+
+	if (priv->flags & TSI721_USING_MSIX)
+		return; /* Finished if we are in MSI-X mode */
+
+	/*
+	 * For MSI and INTA interrupt signalling we need to enable next levels
+	 */
+
+	/* Enable Device Channel Interrupt */
+	rval = ioread32(priv->regs + TSI721_DEV_CHAN_INTE);
+	iowrite32(rval | TSI721_INT_OMSG_CHAN(ch),
+		  priv->regs + TSI721_DEV_CHAN_INTE);
+}
+
+/* Disable Outbound Messaging interrupts */
+static void
+tsi721_omsg_interrupt_disable(struct tsi721_device *priv, int ch,
+				   u32 inte_mask)
+{
+	u32 rval;
+
+	if (!inte_mask)
+		return;
+
+	/* Clear pending Outbound Messaging interrupts */
+	iowrite32(inte_mask, priv->regs + TSI721_OBDMAC_INT(ch));
+
+	/* Disable Outbound Messaging interrupts */
+	rval = ioread32(priv->regs + TSI721_OBDMAC_INTE(ch));
+	rval &= ~inte_mask;
+	iowrite32(rval, priv->regs + TSI721_OBDMAC_INTE(ch));
+
+	if (priv->flags & TSI721_USING_MSIX)
+		return; /* Finished if we are in MSI-X mode */
+
+	/*
+	 * For MSI and INTA interrupt signalling we need to disable next levels
+	 */
+
+	/* Disable Device Channel Interrupt */
+	rval = ioread32(priv->regs + TSI721_DEV_CHAN_INTE);
+	rval &= ~TSI721_INT_OMSG_CHAN(ch);
+	iowrite32(rval, priv->regs + TSI721_DEV_CHAN_INTE);
+}
+
+/**
+ * tsi721_add_outb_message - Add message to the Tsi721 outbound message queue
+ * @mport: Master port with outbound message queue
+ * @rdev: Target of outbound message
+ * @mbox: Outbound mailbox
+ * @buffer: Message to add to outbound queue
+ * @len: Length of message
+ */
+static int
+tsi721_add_outb_message(struct rio_mport *mport, struct rio_dev *rdev, int mbox,
+			void *buffer, size_t len)
+{
+	struct tsi721_device *priv = mport->priv;
+	struct tsi721_omsg_desc *desc;
+	u32 tx_slot;
+	unsigned long flags;
+
+	if (!priv->omsg_init[mbox] ||
+	    len > TSI721_MSG_MAX_SIZE || len < 8)
+		return -EINVAL;
+
+	spin_lock_irqsave(&priv->omsg_ring[mbox].lock, flags);
+
+	tx_slot = priv->omsg_ring[mbox].tx_slot;
+
+	/* Copy copy message into transfer buffer */
+	memcpy(priv->omsg_ring[mbox].omq_base[tx_slot], buffer, len);
+
+	if (len & 0x7)
+		len += 8;
+
+	/* Build descriptor associated with buffer */
+	desc = priv->omsg_ring[mbox].omd_base;
+	desc[tx_slot].type_id = cpu_to_le32((DTYPE4 << 29) | rdev->destid);
+#ifdef TSI721_OMSG_DESC_INT
+	/* Request IOF_DONE interrupt generation for each N-th frame in queue */
+	if (tx_slot % 4 == 0)
+		desc[tx_slot].type_id |= cpu_to_le32(TSI721_OMD_IOF);
+#endif
+	desc[tx_slot].msg_info =
+		cpu_to_le32((mport->sys_size << 26) | (mbox << 22) |
+			    (0xe << 12) | (len & 0xff8));
+	desc[tx_slot].bufptr_lo =
+		cpu_to_le32((u64)priv->omsg_ring[mbox].omq_phys[tx_slot] &
+			    0xffffffff);
+	desc[tx_slot].bufptr_hi =
+		cpu_to_le32((u64)priv->omsg_ring[mbox].omq_phys[tx_slot] >> 32);
+
+	priv->omsg_ring[mbox].wr_count++;
+
+	/* Go to next descriptor */
+	if (++priv->omsg_ring[mbox].tx_slot == priv->omsg_ring[mbox].size) {
+		priv->omsg_ring[mbox].tx_slot = 0;
+		/* Move through the ring link descriptor at the end */
+		priv->omsg_ring[mbox].wr_count++;
+	}
+
+	mb();
+
+	/* Set new write count value */
+	iowrite32(priv->omsg_ring[mbox].wr_count,
+		priv->regs + TSI721_OBDMAC_DWRCNT(mbox));
+	ioread32(priv->regs + TSI721_OBDMAC_DWRCNT(mbox));
+
+	spin_unlock_irqrestore(&priv->omsg_ring[mbox].lock, flags);
+
+	return 0;
+}
+
+/**
+ * tsi721_omsg_handler - Outbound Message Interrupt Handler
+ * @priv: pointer to tsi721 private data
+ * @ch:   number of OB MSG channel to service
+ *
+ * Services channel interrupts from outbound messaging engine.
+ */
+static void tsi721_omsg_handler(struct tsi721_device *priv, int ch)
+{
+	u32 omsg_int;
+	struct rio_mport *mport = &priv->mport;
+	void *dev_id = NULL;
+	u32 tx_slot = 0xffffffff;
+	int do_callback = 0;
+
+	spin_lock(&priv->omsg_ring[ch].lock);
+
+	omsg_int = ioread32(priv->regs + TSI721_OBDMAC_INT(ch));
+
+	if (omsg_int & TSI721_OBDMAC_INT_ST_FULL)
+		tsi_info(&priv->pdev->dev,
+			"OB MBOX%d: Status FIFO is full", ch);
+
+	if (omsg_int & (TSI721_OBDMAC_INT_DONE | TSI721_OBDMAC_INT_IOF_DONE)) {
+		u32 srd_ptr;
+		u64 *sts_ptr, last_ptr = 0, prev_ptr = 0;
+		int i, j;
+
+		/*
+		 * Find last successfully processed descriptor
+		 */
+
+		/* Check and clear descriptor status FIFO entries */
+		srd_ptr = priv->omsg_ring[ch].sts_rdptr;
+		sts_ptr = priv->omsg_ring[ch].sts_base;
+		j = srd_ptr * 8;
+		while (sts_ptr[j]) {
+			for (i = 0; i < 8 && sts_ptr[j]; i++, j++) {
+				prev_ptr = last_ptr;
+				last_ptr = le64_to_cpu(sts_ptr[j]);
+				sts_ptr[j] = 0;
+			}
+
+			++srd_ptr;
+			srd_ptr %= priv->omsg_ring[ch].sts_size;
+			j = srd_ptr * 8;
+		}
+
+		if (last_ptr == 0)
+			goto no_sts_update;
+
+		priv->omsg_ring[ch].sts_rdptr = srd_ptr;
+		iowrite32(srd_ptr, priv->regs + TSI721_OBDMAC_DSRP(ch));
+
+		if (!mport->outb_msg[ch].mcback)
+			goto no_sts_update;
+
+		/* Inform upper layer about transfer completion */
+
+		tx_slot = (last_ptr - (u64)priv->omsg_ring[ch].omd_phys)/
+						sizeof(struct tsi721_omsg_desc);
+
+		/*
+		 * Check if this is a Link Descriptor (LD).
+		 * If yes, ignore LD and use descriptor processed
+		 * before LD.
+		 */
+		if (tx_slot == priv->omsg_ring[ch].size) {
+			if (prev_ptr)
+				tx_slot = (prev_ptr -
+					(u64)priv->omsg_ring[ch].omd_phys)/
+						sizeof(struct tsi721_omsg_desc);
+			else
+				goto no_sts_update;
+		}
+
+		if (tx_slot >= priv->omsg_ring[ch].size)
+			tsi_debug(OMSG, &priv->pdev->dev,
+				  "OB_MSG tx_slot=%x > size=%x",
+				  tx_slot, priv->omsg_ring[ch].size);
+		WARN_ON(tx_slot >= priv->omsg_ring[ch].size);
+
+		/* Move slot index to the next message to be sent */
+		++tx_slot;
+		if (tx_slot == priv->omsg_ring[ch].size)
+			tx_slot = 0;
+
+		dev_id = priv->omsg_ring[ch].dev_id;
+		do_callback = 1;
+	}
+
+no_sts_update:
+
+	if (omsg_int & TSI721_OBDMAC_INT_ERROR) {
+		/*
+		* Outbound message operation aborted due to error,
+		* reinitialize OB MSG channel
+		*/
+
+		tsi_debug(OMSG, &priv->pdev->dev, "OB MSG ABORT ch_stat=%x",
+			  ioread32(priv->regs + TSI721_OBDMAC_STS(ch)));
+
+		iowrite32(TSI721_OBDMAC_INT_ERROR,
+				priv->regs + TSI721_OBDMAC_INT(ch));
+		iowrite32(TSI721_OBDMAC_CTL_RETRY_THR | TSI721_OBDMAC_CTL_INIT,
+				priv->regs + TSI721_OBDMAC_CTL(ch));
+		ioread32(priv->regs + TSI721_OBDMAC_CTL(ch));
+
+		/* Inform upper level to clear all pending tx slots */
+		dev_id = priv->omsg_ring[ch].dev_id;
+		tx_slot = priv->omsg_ring[ch].tx_slot;
+		do_callback = 1;
+
+		/* Synch tx_slot tracking */
+		iowrite32(priv->omsg_ring[ch].tx_slot,
+			priv->regs + TSI721_OBDMAC_DRDCNT(ch));
+		ioread32(priv->regs + TSI721_OBDMAC_DRDCNT(ch));
+		priv->omsg_ring[ch].wr_count = priv->omsg_ring[ch].tx_slot;
+		priv->omsg_ring[ch].sts_rdptr = 0;
+	}
+
+	/* Clear channel interrupts */
+	iowrite32(omsg_int, priv->regs + TSI721_OBDMAC_INT(ch));
+
+	if (!(priv->flags & TSI721_USING_MSIX)) {
+		u32 ch_inte;
+
+		/* Re-enable channel interrupts */
+		ch_inte = ioread32(priv->regs + TSI721_DEV_CHAN_INTE);
+		ch_inte |= TSI721_INT_OMSG_CHAN(ch);
+		iowrite32(ch_inte, priv->regs + TSI721_DEV_CHAN_INTE);
+	}
+
+	spin_unlock(&priv->omsg_ring[ch].lock);
+
+	if (mport->outb_msg[ch].mcback && do_callback)
+		mport->outb_msg[ch].mcback(mport, dev_id, ch, tx_slot);
+}
+
+/**
+ * tsi721_open_outb_mbox - Initialize Tsi721 outbound mailbox
+ * @mport: Master port implementing Outbound Messaging Engine
+ * @dev_id: Device specific pointer to pass on event
+ * @mbox: Mailbox to open
+ * @entries: Number of entries in the outbound mailbox ring
+ */
+static int tsi721_open_outb_mbox(struct rio_mport *mport, void *dev_id,
+				 int mbox, int entries)
+{
+	struct tsi721_device *priv = mport->priv;
+	struct tsi721_omsg_desc *bd_ptr;
+	int i, rc = 0;
+
+	if ((entries < TSI721_OMSGD_MIN_RING_SIZE) ||
+	    (entries > (TSI721_OMSGD_RING_SIZE)) ||
+	    (!is_power_of_2(entries)) || mbox >= RIO_MAX_MBOX) {
+		rc = -EINVAL;
+		goto out;
+	}
+
+	if ((mbox_sel & (1 << mbox)) == 0) {
+		rc = -ENODEV;
+		goto out;
+	}
+
+	priv->omsg_ring[mbox].dev_id = dev_id;
+	priv->omsg_ring[mbox].size = entries;
+	priv->omsg_ring[mbox].sts_rdptr = 0;
+	spin_lock_init(&priv->omsg_ring[mbox].lock);
+
+	/* Outbound Msg Buffer allocation based on
+	   the number of maximum descriptor entries */
+	for (i = 0; i < entries; i++) {
+		priv->omsg_ring[mbox].omq_base[i] =
+			dma_alloc_coherent(
+				&priv->pdev->dev, TSI721_MSG_BUFFER_SIZE,
+				&priv->omsg_ring[mbox].omq_phys[i],
+				GFP_KERNEL);
+		if (priv->omsg_ring[mbox].omq_base[i] == NULL) {
+			tsi_debug(OMSG, &priv->pdev->dev,
+				  "ENOMEM for OB_MSG_%d data buffer", mbox);
+			rc = -ENOMEM;
+			goto out_buf;
+		}
+	}
+
+	/* Outbound message descriptor allocation */
+	priv->omsg_ring[mbox].omd_base = dma_alloc_coherent(
+				&priv->pdev->dev,
+				(entries + 1) * sizeof(struct tsi721_omsg_desc),
+				&priv->omsg_ring[mbox].omd_phys, GFP_KERNEL);
+	if (priv->omsg_ring[mbox].omd_base == NULL) {
+		tsi_debug(OMSG, &priv->pdev->dev,
+			"ENOMEM for OB_MSG_%d descriptor memory", mbox);
+		rc = -ENOMEM;
+		goto out_buf;
+	}
+
+	priv->omsg_ring[mbox].tx_slot = 0;
+
+	/* Outbound message descriptor status FIFO allocation */
+	priv->omsg_ring[mbox].sts_size = roundup_pow_of_two(entries + 1);
+	priv->omsg_ring[mbox].sts_base = dma_zalloc_coherent(&priv->pdev->dev,
+			priv->omsg_ring[mbox].sts_size *
+						sizeof(struct tsi721_dma_sts),
+			&priv->omsg_ring[mbox].sts_phys, GFP_KERNEL);
+	if (priv->omsg_ring[mbox].sts_base == NULL) {
+		tsi_debug(OMSG, &priv->pdev->dev,
+			"ENOMEM for OB_MSG_%d status FIFO", mbox);
+		rc = -ENOMEM;
+		goto out_desc;
+	}
+
+	/*
+	 * Configure Outbound Messaging Engine
+	 */
+
+	/* Setup Outbound Message descriptor pointer */
+	iowrite32(((u64)priv->omsg_ring[mbox].omd_phys >> 32),
+			priv->regs + TSI721_OBDMAC_DPTRH(mbox));
+	iowrite32(((u64)priv->omsg_ring[mbox].omd_phys &
+					TSI721_OBDMAC_DPTRL_MASK),
+			priv->regs + TSI721_OBDMAC_DPTRL(mbox));
+
+	/* Setup Outbound Message descriptor status FIFO */
+	iowrite32(((u64)priv->omsg_ring[mbox].sts_phys >> 32),
+			priv->regs + TSI721_OBDMAC_DSBH(mbox));
+	iowrite32(((u64)priv->omsg_ring[mbox].sts_phys &
+					TSI721_OBDMAC_DSBL_MASK),
+			priv->regs + TSI721_OBDMAC_DSBL(mbox));
+	iowrite32(TSI721_DMAC_DSSZ_SIZE(priv->omsg_ring[mbox].sts_size),
+		priv->regs + (u32)TSI721_OBDMAC_DSSZ(mbox));
+
+	/* Enable interrupts */
+
+#ifdef CONFIG_PCI_MSI
+	if (priv->flags & TSI721_USING_MSIX) {
+		int idx = TSI721_VECT_OMB0_DONE + mbox;
+
+		/* Request interrupt service if we are in MSI-X mode */
+		rc = request_irq(priv->msix[idx].vector, tsi721_omsg_msix, 0,
+				 priv->msix[idx].irq_name, (void *)priv);
+
+		if (rc) {
+			tsi_debug(OMSG, &priv->pdev->dev,
+				"Unable to get MSI-X IRQ for OBOX%d-DONE",
+				mbox);
+			goto out_stat;
+		}
+
+		idx = TSI721_VECT_OMB0_INT + mbox;
+		rc = request_irq(priv->msix[idx].vector, tsi721_omsg_msix, 0,
+				 priv->msix[idx].irq_name, (void *)priv);
+
+		if (rc)	{
+			tsi_debug(OMSG, &priv->pdev->dev,
+				"Unable to get MSI-X IRQ for MBOX%d-INT", mbox);
+			idx = TSI721_VECT_OMB0_DONE + mbox;
+			free_irq(priv->msix[idx].vector, (void *)priv);
+			goto out_stat;
+		}
+	}
+#endif /* CONFIG_PCI_MSI */
+
+	tsi721_omsg_interrupt_enable(priv, mbox, TSI721_OBDMAC_INT_ALL);
+
+	/* Initialize Outbound Message descriptors ring */
+	bd_ptr = priv->omsg_ring[mbox].omd_base;
+	bd_ptr[entries].type_id = cpu_to_le32(DTYPE5 << 29);
+	bd_ptr[entries].msg_info = 0;
+	bd_ptr[entries].next_lo =
+		cpu_to_le32((u64)priv->omsg_ring[mbox].omd_phys &
+		TSI721_OBDMAC_DPTRL_MASK);
+	bd_ptr[entries].next_hi =
+		cpu_to_le32((u64)priv->omsg_ring[mbox].omd_phys >> 32);
+	priv->omsg_ring[mbox].wr_count = 0;
+	mb();
+
+	/* Initialize Outbound Message engine */
+	iowrite32(TSI721_OBDMAC_CTL_RETRY_THR | TSI721_OBDMAC_CTL_INIT,
+		  priv->regs + TSI721_OBDMAC_CTL(mbox));
+	ioread32(priv->regs + TSI721_OBDMAC_DWRCNT(mbox));
+	udelay(10);
+
+	priv->omsg_init[mbox] = 1;
+
+	return 0;
+
+#ifdef CONFIG_PCI_MSI
+out_stat:
+	dma_free_coherent(&priv->pdev->dev,
+		priv->omsg_ring[mbox].sts_size * sizeof(struct tsi721_dma_sts),
+		priv->omsg_ring[mbox].sts_base,
+		priv->omsg_ring[mbox].sts_phys);
+
+	priv->omsg_ring[mbox].sts_base = NULL;
+#endif /* CONFIG_PCI_MSI */
+
+out_desc:
+	dma_free_coherent(&priv->pdev->dev,
+		(entries + 1) * sizeof(struct tsi721_omsg_desc),
+		priv->omsg_ring[mbox].omd_base,
+		priv->omsg_ring[mbox].omd_phys);
+
+	priv->omsg_ring[mbox].omd_base = NULL;
+
+out_buf:
+	for (i = 0; i < priv->omsg_ring[mbox].size; i++) {
+		if (priv->omsg_ring[mbox].omq_base[i]) {
+			dma_free_coherent(&priv->pdev->dev,
+				TSI721_MSG_BUFFER_SIZE,
+				priv->omsg_ring[mbox].omq_base[i],
+				priv->omsg_ring[mbox].omq_phys[i]);
+
+			priv->omsg_ring[mbox].omq_base[i] = NULL;
+		}
+	}
+
+out:
+	return rc;
+}
+
+/**
+ * tsi721_close_outb_mbox - Close Tsi721 outbound mailbox
+ * @mport: Master port implementing the outbound message unit
+ * @mbox: Mailbox to close
+ */
+static void tsi721_close_outb_mbox(struct rio_mport *mport, int mbox)
+{
+	struct tsi721_device *priv = mport->priv;
+	u32 i;
+
+	if (!priv->omsg_init[mbox])
+		return;
+	priv->omsg_init[mbox] = 0;
+
+	/* Disable Interrupts */
+
+	tsi721_omsg_interrupt_disable(priv, mbox, TSI721_OBDMAC_INT_ALL);
+
+#ifdef CONFIG_PCI_MSI
+	if (priv->flags & TSI721_USING_MSIX) {
+		free_irq(priv->msix[TSI721_VECT_OMB0_DONE + mbox].vector,
+			 (void *)priv);
+		free_irq(priv->msix[TSI721_VECT_OMB0_INT + mbox].vector,
+			 (void *)priv);
+	}
+#endif /* CONFIG_PCI_MSI */
+
+	/* Free OMSG Descriptor Status FIFO */
+	dma_free_coherent(&priv->pdev->dev,
+		priv->omsg_ring[mbox].sts_size * sizeof(struct tsi721_dma_sts),
+		priv->omsg_ring[mbox].sts_base,
+		priv->omsg_ring[mbox].sts_phys);
+
+	priv->omsg_ring[mbox].sts_base = NULL;
+
+	/* Free OMSG descriptors */
+	dma_free_coherent(&priv->pdev->dev,
+		(priv->omsg_ring[mbox].size + 1) *
+			sizeof(struct tsi721_omsg_desc),
+		priv->omsg_ring[mbox].omd_base,
+		priv->omsg_ring[mbox].omd_phys);
+
+	priv->omsg_ring[mbox].omd_base = NULL;
+
+	/* Free message buffers */
+	for (i = 0; i < priv->omsg_ring[mbox].size; i++) {
+		if (priv->omsg_ring[mbox].omq_base[i]) {
+			dma_free_coherent(&priv->pdev->dev,
+				TSI721_MSG_BUFFER_SIZE,
+				priv->omsg_ring[mbox].omq_base[i],
+				priv->omsg_ring[mbox].omq_phys[i]);
+
+			priv->omsg_ring[mbox].omq_base[i] = NULL;
+		}
+	}
+}
+
+/**
+ * tsi721_imsg_handler - Inbound Message Interrupt Handler
+ * @priv: pointer to tsi721 private data
+ * @ch: inbound message channel number to service
+ *
+ * Services channel interrupts from inbound messaging engine.
+ */
+static void tsi721_imsg_handler(struct tsi721_device *priv, int ch)
+{
+	u32 mbox = ch - 4;
+	u32 imsg_int;
+	struct rio_mport *mport = &priv->mport;
+
+	spin_lock(&priv->imsg_ring[mbox].lock);
+
+	imsg_int = ioread32(priv->regs + TSI721_IBDMAC_INT(ch));
+
+	if (imsg_int & TSI721_IBDMAC_INT_SRTO)
+		tsi_info(&priv->pdev->dev, "IB MBOX%d SRIO timeout", mbox);
+
+	if (imsg_int & TSI721_IBDMAC_INT_PC_ERROR)
+		tsi_info(&priv->pdev->dev, "IB MBOX%d PCIe error", mbox);
+
+	if (imsg_int & TSI721_IBDMAC_INT_FQ_LOW)
+		tsi_info(&priv->pdev->dev, "IB MBOX%d IB free queue low", mbox);
+
+	/* Clear IB channel interrupts */
+	iowrite32(imsg_int, priv->regs + TSI721_IBDMAC_INT(ch));
+
+	/* If an IB Msg is received notify the upper layer */
+	if (imsg_int & TSI721_IBDMAC_INT_DQ_RCV &&
+		mport->inb_msg[mbox].mcback)
+		mport->inb_msg[mbox].mcback(mport,
+				priv->imsg_ring[mbox].dev_id, mbox, -1);
+
+	if (!(priv->flags & TSI721_USING_MSIX)) {
+		u32 ch_inte;
+
+		/* Re-enable channel interrupts */
+		ch_inte = ioread32(priv->regs + TSI721_DEV_CHAN_INTE);
+		ch_inte |= TSI721_INT_IMSG_CHAN(ch);
+		iowrite32(ch_inte, priv->regs + TSI721_DEV_CHAN_INTE);
+	}
+
+	spin_unlock(&priv->imsg_ring[mbox].lock);
+}
+
+/**
+ * tsi721_open_inb_mbox - Initialize Tsi721 inbound mailbox
+ * @mport: Master port implementing the Inbound Messaging Engine
+ * @dev_id: Device specific pointer to pass on event
+ * @mbox: Mailbox to open
+ * @entries: Number of entries in the inbound mailbox ring
+ */
+static int tsi721_open_inb_mbox(struct rio_mport *mport, void *dev_id,
+				int mbox, int entries)
+{
+	struct tsi721_device *priv = mport->priv;
+	int ch = mbox + 4;
+	int i;
+	u64 *free_ptr;
+	int rc = 0;
+
+	if ((entries < TSI721_IMSGD_MIN_RING_SIZE) ||
+	    (entries > TSI721_IMSGD_RING_SIZE) ||
+	    (!is_power_of_2(entries)) || mbox >= RIO_MAX_MBOX) {
+		rc = -EINVAL;
+		goto out;
+	}
+
+	if ((mbox_sel & (1 << mbox)) == 0) {
+		rc = -ENODEV;
+		goto out;
+	}
+
+	/* Initialize IB Messaging Ring */
+	priv->imsg_ring[mbox].dev_id = dev_id;
+	priv->imsg_ring[mbox].size = entries;
+	priv->imsg_ring[mbox].rx_slot = 0;
+	priv->imsg_ring[mbox].desc_rdptr = 0;
+	priv->imsg_ring[mbox].fq_wrptr = 0;
+	for (i = 0; i < priv->imsg_ring[mbox].size; i++)
+		priv->imsg_ring[mbox].imq_base[i] = NULL;
+	spin_lock_init(&priv->imsg_ring[mbox].lock);
+
+	/* Allocate buffers for incoming messages */
+	priv->imsg_ring[mbox].buf_base =
+		dma_alloc_coherent(&priv->pdev->dev,
+				   entries * TSI721_MSG_BUFFER_SIZE,
+				   &priv->imsg_ring[mbox].buf_phys,
+				   GFP_KERNEL);
+
+	if (priv->imsg_ring[mbox].buf_base == NULL) {
+		tsi_err(&priv->pdev->dev,
+			"Failed to allocate buffers for IB MBOX%d", mbox);
+		rc = -ENOMEM;
+		goto out;
+	}
+
+	/* Allocate memory for circular free list */
+	priv->imsg_ring[mbox].imfq_base =
+		dma_alloc_coherent(&priv->pdev->dev,
+				   entries * 8,
+				   &priv->imsg_ring[mbox].imfq_phys,
+				   GFP_KERNEL);
+
+	if (priv->imsg_ring[mbox].imfq_base == NULL) {
+		tsi_err(&priv->pdev->dev,
+			"Failed to allocate free queue for IB MBOX%d", mbox);
+		rc = -ENOMEM;
+		goto out_buf;
+	}
+
+	/* Allocate memory for Inbound message descriptors */
+	priv->imsg_ring[mbox].imd_base =
+		dma_alloc_coherent(&priv->pdev->dev,
+				   entries * sizeof(struct tsi721_imsg_desc),
+				   &priv->imsg_ring[mbox].imd_phys, GFP_KERNEL);
+
+	if (priv->imsg_ring[mbox].imd_base == NULL) {
+		tsi_err(&priv->pdev->dev,
+			"Failed to allocate descriptor memory for IB MBOX%d",
+			mbox);
+		rc = -ENOMEM;
+		goto out_dma;
+	}
+
+	/* Fill free buffer pointer list */
+	free_ptr = priv->imsg_ring[mbox].imfq_base;
+	for (i = 0; i < entries; i++)
+		free_ptr[i] = cpu_to_le64(
+				(u64)(priv->imsg_ring[mbox].buf_phys) +
+				i * 0x1000);
+
+	mb();
+
+	/*
+	 * For mapping of inbound SRIO Messages into appropriate queues we need
+	 * to set Inbound Device ID register in the messaging engine. We do it
+	 * once when first inbound mailbox is requested.
+	 */
+	if (!(priv->flags & TSI721_IMSGID_SET)) {
+		iowrite32((u32)priv->mport.host_deviceid,
+			priv->regs + TSI721_IB_DEVID);
+		priv->flags |= TSI721_IMSGID_SET;
+	}
+
+	/*
+	 * Configure Inbound Messaging channel (ch = mbox + 4)
+	 */
+
+	/* Setup Inbound Message free queue */
+	iowrite32(((u64)priv->imsg_ring[mbox].imfq_phys >> 32),
+		priv->regs + TSI721_IBDMAC_FQBH(ch));
+	iowrite32(((u64)priv->imsg_ring[mbox].imfq_phys &
+			TSI721_IBDMAC_FQBL_MASK),
+		priv->regs+TSI721_IBDMAC_FQBL(ch));
+	iowrite32(TSI721_DMAC_DSSZ_SIZE(entries),
+		priv->regs + TSI721_IBDMAC_FQSZ(ch));
+
+	/* Setup Inbound Message descriptor queue */
+	iowrite32(((u64)priv->imsg_ring[mbox].imd_phys >> 32),
+		priv->regs + TSI721_IBDMAC_DQBH(ch));
+	iowrite32(((u32)priv->imsg_ring[mbox].imd_phys &
+		   (u32)TSI721_IBDMAC_DQBL_MASK),
+		priv->regs+TSI721_IBDMAC_DQBL(ch));
+	iowrite32(TSI721_DMAC_DSSZ_SIZE(entries),
+		priv->regs + TSI721_IBDMAC_DQSZ(ch));
+
+	/* Enable interrupts */
+
+#ifdef CONFIG_PCI_MSI
+	if (priv->flags & TSI721_USING_MSIX) {
+		int idx = TSI721_VECT_IMB0_RCV + mbox;
+
+		/* Request interrupt service if we are in MSI-X mode */
+		rc = request_irq(priv->msix[idx].vector, tsi721_imsg_msix, 0,
+				 priv->msix[idx].irq_name, (void *)priv);
+
+		if (rc) {
+			tsi_debug(IMSG, &priv->pdev->dev,
+				"Unable to get MSI-X IRQ for IBOX%d-DONE",
+				mbox);
+			goto out_desc;
+		}
+
+		idx = TSI721_VECT_IMB0_INT + mbox;
+		rc = request_irq(priv->msix[idx].vector, tsi721_imsg_msix, 0,
+				 priv->msix[idx].irq_name, (void *)priv);
+
+		if (rc)	{
+			tsi_debug(IMSG, &priv->pdev->dev,
+				"Unable to get MSI-X IRQ for IBOX%d-INT", mbox);
+			free_irq(
+				priv->msix[TSI721_VECT_IMB0_RCV + mbox].vector,
+				(void *)priv);
+			goto out_desc;
+		}
+	}
+#endif /* CONFIG_PCI_MSI */
+
+	tsi721_imsg_interrupt_enable(priv, ch, TSI721_IBDMAC_INT_ALL);
+
+	/* Initialize Inbound Message Engine */
+	iowrite32(TSI721_IBDMAC_CTL_INIT, priv->regs + TSI721_IBDMAC_CTL(ch));
+	ioread32(priv->regs + TSI721_IBDMAC_CTL(ch));
+	udelay(10);
+	priv->imsg_ring[mbox].fq_wrptr = entries - 1;
+	iowrite32(entries - 1, priv->regs + TSI721_IBDMAC_FQWP(ch));
+
+	priv->imsg_init[mbox] = 1;
+	return 0;
+
+#ifdef CONFIG_PCI_MSI
+out_desc:
+	dma_free_coherent(&priv->pdev->dev,
+		priv->imsg_ring[mbox].size * sizeof(struct tsi721_imsg_desc),
+		priv->imsg_ring[mbox].imd_base,
+		priv->imsg_ring[mbox].imd_phys);
+
+	priv->imsg_ring[mbox].imd_base = NULL;
+#endif /* CONFIG_PCI_MSI */
+
+out_dma:
+	dma_free_coherent(&priv->pdev->dev,
+		priv->imsg_ring[mbox].size * 8,
+		priv->imsg_ring[mbox].imfq_base,
+		priv->imsg_ring[mbox].imfq_phys);
+
+	priv->imsg_ring[mbox].imfq_base = NULL;
+
+out_buf:
+	dma_free_coherent(&priv->pdev->dev,
+		priv->imsg_ring[mbox].size * TSI721_MSG_BUFFER_SIZE,
+		priv->imsg_ring[mbox].buf_base,
+		priv->imsg_ring[mbox].buf_phys);
+
+	priv->imsg_ring[mbox].buf_base = NULL;
+
+out:
+	return rc;
+}
+
+/**
+ * tsi721_close_inb_mbox - Shut down Tsi721 inbound mailbox
+ * @mport: Master port implementing the Inbound Messaging Engine
+ * @mbox: Mailbox to close
+ */
+static void tsi721_close_inb_mbox(struct rio_mport *mport, int mbox)
+{
+	struct tsi721_device *priv = mport->priv;
+	u32 rx_slot;
+	int ch = mbox + 4;
+
+	if (!priv->imsg_init[mbox]) /* mbox isn't initialized yet */
+		return;
+	priv->imsg_init[mbox] = 0;
+
+	/* Disable Inbound Messaging Engine */
+
+	/* Disable Interrupts */
+	tsi721_imsg_interrupt_disable(priv, ch, TSI721_OBDMAC_INT_MASK);
+
+#ifdef CONFIG_PCI_MSI
+	if (priv->flags & TSI721_USING_MSIX) {
+		free_irq(priv->msix[TSI721_VECT_IMB0_RCV + mbox].vector,
+				(void *)priv);
+		free_irq(priv->msix[TSI721_VECT_IMB0_INT + mbox].vector,
+				(void *)priv);
+	}
+#endif /* CONFIG_PCI_MSI */
+
+	/* Clear Inbound Buffer Queue */
+	for (rx_slot = 0; rx_slot < priv->imsg_ring[mbox].size; rx_slot++)
+		priv->imsg_ring[mbox].imq_base[rx_slot] = NULL;
+
+	/* Free memory allocated for message buffers */
+	dma_free_coherent(&priv->pdev->dev,
+		priv->imsg_ring[mbox].size * TSI721_MSG_BUFFER_SIZE,
+		priv->imsg_ring[mbox].buf_base,
+		priv->imsg_ring[mbox].buf_phys);
+
+	priv->imsg_ring[mbox].buf_base = NULL;
+
+	/* Free memory allocated for free pointr list */
+	dma_free_coherent(&priv->pdev->dev,
+		priv->imsg_ring[mbox].size * 8,
+		priv->imsg_ring[mbox].imfq_base,
+		priv->imsg_ring[mbox].imfq_phys);
+
+	priv->imsg_ring[mbox].imfq_base = NULL;
+
+	/* Free memory allocated for RX descriptors */
+	dma_free_coherent(&priv->pdev->dev,
+		priv->imsg_ring[mbox].size * sizeof(struct tsi721_imsg_desc),
+		priv->imsg_ring[mbox].imd_base,
+		priv->imsg_ring[mbox].imd_phys);
+
+	priv->imsg_ring[mbox].imd_base = NULL;
+}
+
+/**
+ * tsi721_add_inb_buffer - Add buffer to the Tsi721 inbound message queue
+ * @mport: Master port implementing the Inbound Messaging Engine
+ * @mbox: Inbound mailbox number
+ * @buf: Buffer to add to inbound queue
+ */
+static int tsi721_add_inb_buffer(struct rio_mport *mport, int mbox, void *buf)
+{
+	struct tsi721_device *priv = mport->priv;
+	u32 rx_slot;
+	int rc = 0;
+
+	rx_slot = priv->imsg_ring[mbox].rx_slot;
+	if (priv->imsg_ring[mbox].imq_base[rx_slot]) {
+		tsi_err(&priv->pdev->dev,
+			"Error adding inbound buffer %d, buffer exists",
+			rx_slot);
+		rc = -EINVAL;
+		goto out;
+	}
+
+	priv->imsg_ring[mbox].imq_base[rx_slot] = buf;
+
+	if (++priv->imsg_ring[mbox].rx_slot == priv->imsg_ring[mbox].size)
+		priv->imsg_ring[mbox].rx_slot = 0;
+
+out:
+	return rc;
+}
+
+/**
+ * tsi721_get_inb_message - Fetch inbound message from the Tsi721 MSG Queue
+ * @mport: Master port implementing the Inbound Messaging Engine
+ * @mbox: Inbound mailbox number
+ *
+ * Returns pointer to the message on success or NULL on failure.
+ */
+static void *tsi721_get_inb_message(struct rio_mport *mport, int mbox)
+{
+	struct tsi721_device *priv = mport->priv;
+	struct tsi721_imsg_desc *desc;
+	u32 rx_slot;
+	void *rx_virt = NULL;
+	u64 rx_phys;
+	void *buf = NULL;
+	u64 *free_ptr;
+	int ch = mbox + 4;
+	int msg_size;
+
+	if (!priv->imsg_init[mbox])
+		return NULL;
+
+	desc = priv->imsg_ring[mbox].imd_base;
+	desc += priv->imsg_ring[mbox].desc_rdptr;
+
+	if (!(le32_to_cpu(desc->msg_info) & TSI721_IMD_HO))
+		goto out;
+
+	rx_slot = priv->imsg_ring[mbox].rx_slot;
+	while (priv->imsg_ring[mbox].imq_base[rx_slot] == NULL) {
+		if (++rx_slot == priv->imsg_ring[mbox].size)
+			rx_slot = 0;
+	}
+
+	rx_phys = ((u64)le32_to_cpu(desc->bufptr_hi) << 32) |
+			le32_to_cpu(desc->bufptr_lo);
+
+	rx_virt = priv->imsg_ring[mbox].buf_base +
+		  (rx_phys - (u64)priv->imsg_ring[mbox].buf_phys);
+
+	buf = priv->imsg_ring[mbox].imq_base[rx_slot];
+	msg_size = le32_to_cpu(desc->msg_info) & TSI721_IMD_BCOUNT;
+	if (msg_size == 0)
+		msg_size = RIO_MAX_MSG_SIZE;
+
+	memcpy(buf, rx_virt, msg_size);
+	priv->imsg_ring[mbox].imq_base[rx_slot] = NULL;
+
+	desc->msg_info &= cpu_to_le32(~TSI721_IMD_HO);
+	if (++priv->imsg_ring[mbox].desc_rdptr == priv->imsg_ring[mbox].size)
+		priv->imsg_ring[mbox].desc_rdptr = 0;
+
+	iowrite32(priv->imsg_ring[mbox].desc_rdptr,
+		priv->regs + TSI721_IBDMAC_DQRP(ch));
+
+	/* Return free buffer into the pointer list */
+	free_ptr = priv->imsg_ring[mbox].imfq_base;
+	free_ptr[priv->imsg_ring[mbox].fq_wrptr] = cpu_to_le64(rx_phys);
+
+	if (++priv->imsg_ring[mbox].fq_wrptr == priv->imsg_ring[mbox].size)
+		priv->imsg_ring[mbox].fq_wrptr = 0;
+
+	iowrite32(priv->imsg_ring[mbox].fq_wrptr,
+		priv->regs + TSI721_IBDMAC_FQWP(ch));
+out:
+	return buf;
+}
+
+/**
+ * tsi721_messages_init - Initialization of Messaging Engine
+ * @priv: pointer to tsi721 private data
+ *
+ * Configures Tsi721 messaging engine.
+ */
+static int tsi721_messages_init(struct tsi721_device *priv)
+{
+	int	ch;
+
+	iowrite32(0, priv->regs + TSI721_SMSG_ECC_LOG);
+	iowrite32(0, priv->regs + TSI721_RETRY_GEN_CNT);
+	iowrite32(0, priv->regs + TSI721_RETRY_RX_CNT);
+
+	/* Set SRIO Message Request/Response Timeout */
+	iowrite32(TSI721_RQRPTO_VAL, priv->regs + TSI721_RQRPTO);
+
+	/* Initialize Inbound Messaging Engine Registers */
+	for (ch = 0; ch < TSI721_IMSG_CHNUM; ch++) {
+		/* Clear interrupt bits */
+		iowrite32(TSI721_IBDMAC_INT_MASK,
+			priv->regs + TSI721_IBDMAC_INT(ch));
+		/* Clear Status */
+		iowrite32(0, priv->regs + TSI721_IBDMAC_STS(ch));
+
+		iowrite32(TSI721_SMSG_ECC_COR_LOG_MASK,
+				priv->regs + TSI721_SMSG_ECC_COR_LOG(ch));
+		iowrite32(TSI721_SMSG_ECC_NCOR_MASK,
+				priv->regs + TSI721_SMSG_ECC_NCOR(ch));
+	}
+
+	return 0;
+}
+
+/**
+ * tsi721_query_mport - Fetch inbound message from the Tsi721 MSG Queue
+ * @mport: Master port implementing the Inbound Messaging Engine
+ * @mbox: Inbound mailbox number
+ *
+ * Returns pointer to the message on success or NULL on failure.
+ */
+static int tsi721_query_mport(struct rio_mport *mport,
+			      struct rio_mport_attr *attr)
+{
+	struct tsi721_device *priv = mport->priv;
+	u32 rval;
+
+	rval = ioread32(priv->regs + 0x100 + RIO_PORT_N_ERR_STS_CSR(0, 0));
+	if (rval & RIO_PORT_N_ERR_STS_PORT_OK) {
+		rval = ioread32(priv->regs + 0x100 + RIO_PORT_N_CTL2_CSR(0, 0));
+		attr->link_speed = (rval & RIO_PORT_N_CTL2_SEL_BAUD) >> 28;
+		rval = ioread32(priv->regs + 0x100 + RIO_PORT_N_CTL_CSR(0, 0));
+		attr->link_width = (rval & RIO_PORT_N_CTL_IPW) >> 27;
+	} else
+		attr->link_speed = RIO_LINK_DOWN;
+
+#ifdef CONFIG_RAPIDIO_DMA_ENGINE
+	attr->flags = RIO_MPORT_DMA | RIO_MPORT_DMA_SG;
+	attr->dma_max_sge = 0;
+	attr->dma_max_size = TSI721_BDMA_MAX_BCOUNT;
+	attr->dma_align = 0;
+#else
+	attr->flags = 0;
+#endif
+	return 0;
+}
+
+/**
+ * tsi721_disable_ints - disables all device interrupts
+ * @priv: pointer to tsi721 private data
+ */
+static void tsi721_disable_ints(struct tsi721_device *priv)
+{
+	int ch;
+
+	/* Disable all device level interrupts */
+	iowrite32(0, priv->regs + TSI721_DEV_INTE);
+
+	/* Disable all Device Channel interrupts */
+	iowrite32(0, priv->regs + TSI721_DEV_CHAN_INTE);
+
+	/* Disable all Inbound Msg Channel interrupts */
+	for (ch = 0; ch < TSI721_IMSG_CHNUM; ch++)
+		iowrite32(0, priv->regs + TSI721_IBDMAC_INTE(ch));
+
+	/* Disable all Outbound Msg Channel interrupts */
+	for (ch = 0; ch < TSI721_OMSG_CHNUM; ch++)
+		iowrite32(0, priv->regs + TSI721_OBDMAC_INTE(ch));
+
+	/* Disable all general messaging interrupts */
+	iowrite32(0, priv->regs + TSI721_SMSG_INTE);
+
+	/* Disable all BDMA Channel interrupts */
+	for (ch = 0; ch < TSI721_DMA_MAXCH; ch++)
+		iowrite32(0,
+			priv->regs + TSI721_DMAC_BASE(ch) + TSI721_DMAC_INTE);
+
+	/* Disable all general BDMA interrupts */
+	iowrite32(0, priv->regs + TSI721_BDMA_INTE);
+
+	/* Disable all SRIO Channel interrupts */
+	for (ch = 0; ch < TSI721_SRIO_MAXCH; ch++)
+		iowrite32(0, priv->regs + TSI721_SR_CHINTE(ch));
+
+	/* Disable all general SR2PC interrupts */
+	iowrite32(0, priv->regs + TSI721_SR2PC_GEN_INTE);
+
+	/* Disable all PC2SR interrupts */
+	iowrite32(0, priv->regs + TSI721_PC2SR_INTE);
+
+	/* Disable all I2C interrupts */
+	iowrite32(0, priv->regs + TSI721_I2C_INT_ENABLE);
+
+	/* Disable SRIO MAC interrupts */
+	iowrite32(0, priv->regs + TSI721_RIO_EM_INT_ENABLE);
+	iowrite32(0, priv->regs + TSI721_RIO_EM_DEV_INT_EN);
+}
+
+static struct rio_ops tsi721_rio_ops = {
+	.lcread			= tsi721_lcread,
+	.lcwrite		= tsi721_lcwrite,
+	.cread			= tsi721_cread_dma,
+	.cwrite			= tsi721_cwrite_dma,
+	.dsend			= tsi721_dsend,
+	.open_inb_mbox		= tsi721_open_inb_mbox,
+	.close_inb_mbox		= tsi721_close_inb_mbox,
+	.open_outb_mbox		= tsi721_open_outb_mbox,
+	.close_outb_mbox	= tsi721_close_outb_mbox,
+	.add_outb_message	= tsi721_add_outb_message,
+	.add_inb_buffer		= tsi721_add_inb_buffer,
+	.get_inb_message	= tsi721_get_inb_message,
+	.map_inb		= tsi721_rio_map_inb_mem,
+	.unmap_inb		= tsi721_rio_unmap_inb_mem,
+	.pwenable		= tsi721_pw_enable,
+	.query_mport		= tsi721_query_mport,
+	.map_outb		= tsi721_map_outb_win,
+	.unmap_outb		= tsi721_unmap_outb_win,
+};
+
+static void tsi721_mport_release(struct device *dev)
+{
+	struct rio_mport *mport = to_rio_mport(dev);
+
+	tsi_debug(EXIT, dev, "%s id=%d", mport->name, mport->id);
+}
+
+/**
+ * tsi721_setup_mport - Setup Tsi721 as RapidIO subsystem master port
+ * @priv: pointer to tsi721 private data
+ *
+ * Configures Tsi721 as RapidIO master port.
+ */
+static int tsi721_setup_mport(struct tsi721_device *priv)
+{
+	struct pci_dev *pdev = priv->pdev;
+	int err = 0;
+	struct rio_mport *mport = &priv->mport;
+
+	err = rio_mport_initialize(mport);
+	if (err)
+		return err;
+
+	mport->ops = &tsi721_rio_ops;
+	mport->index = 0;
+	mport->sys_size = 0; /* small system */
+	mport->priv = (void *)priv;
+	mport->phys_efptr = 0x100;
+	mport->phys_rmap = 1;
+	mport->dev.parent = &pdev->dev;
+	mport->dev.release = tsi721_mport_release;
+
+	INIT_LIST_HEAD(&mport->dbells);
+
+	rio_init_dbell_res(&mport->riores[RIO_DOORBELL_RESOURCE], 0, 0xffff);
+	rio_init_mbox_res(&mport->riores[RIO_INB_MBOX_RESOURCE], 0, 3);
+	rio_init_mbox_res(&mport->riores[RIO_OUTB_MBOX_RESOURCE], 0, 3);
+	snprintf(mport->name, RIO_MAX_MPORT_NAME, "%s(%s)",
+		 dev_driver_string(&pdev->dev), dev_name(&pdev->dev));
+
+	/* Hook up interrupt handler */
+
+#ifdef CONFIG_PCI_MSI
+	if (!tsi721_enable_msix(priv))
+		priv->flags |= TSI721_USING_MSIX;
+	else if (!pci_enable_msi(pdev))
+		priv->flags |= TSI721_USING_MSI;
+	else
+		tsi_debug(MPORT, &pdev->dev,
+			 "MSI/MSI-X is not available. Using legacy INTx.");
+#endif /* CONFIG_PCI_MSI */
+
+	err = tsi721_request_irq(priv);
+
+	if (err) {
+		tsi_err(&pdev->dev, "Unable to get PCI IRQ %02X (err=0x%x)",
+			pdev->irq, err);
+		return err;
+	}
+
+#ifdef CONFIG_RAPIDIO_DMA_ENGINE
+	err = tsi721_register_dma(priv);
+	if (err)
+		goto err_exit;
+#endif
+	/* Enable SRIO link */
+	iowrite32(ioread32(priv->regs + TSI721_DEVCTL) |
+		  TSI721_DEVCTL_SRBOOT_CMPL,
+		  priv->regs + TSI721_DEVCTL);
+
+	if (mport->host_deviceid >= 0)
+		iowrite32(RIO_PORT_GEN_HOST | RIO_PORT_GEN_MASTER |
+			  RIO_PORT_GEN_DISCOVERED,
+			  priv->regs + (0x100 + RIO_PORT_GEN_CTL_CSR));
+	else
+		iowrite32(0, priv->regs + (0x100 + RIO_PORT_GEN_CTL_CSR));
+
+	err = rio_register_mport(mport);
+	if (err) {
+		tsi721_unregister_dma(priv);
+		goto err_exit;
+	}
+
+	return 0;
+
+err_exit:
+	tsi721_free_irq(priv);
+	return err;
+}
+
+static int tsi721_probe(struct pci_dev *pdev,
+				  const struct pci_device_id *id)
+{
+	struct tsi721_device *priv;
+	int err;
+
+	priv = kzalloc(sizeof(struct tsi721_device), GFP_KERNEL);
+	if (!priv) {
+		err = -ENOMEM;
+		goto err_exit;
+	}
+
+	err = pci_enable_device(pdev);
+	if (err) {
+		tsi_err(&pdev->dev, "Failed to enable PCI device");
+		goto err_clean;
+	}
+
+	priv->pdev = pdev;
+
+#ifdef DEBUG
+	{
+		int i;
+
+		for (i = 0; i <= PCI_STD_RESOURCE_END; i++) {
+			tsi_debug(INIT, &pdev->dev, "res%d %pR",
+				  i, &pdev->resource[i]);
+		}
+	}
+#endif
+	/*
+	 * Verify BAR configuration
+	 */
+
+	/* BAR_0 (registers) must be 512KB+ in 32-bit address space */
+	if (!(pci_resource_flags(pdev, BAR_0) & IORESOURCE_MEM) ||
+	    pci_resource_flags(pdev, BAR_0) & IORESOURCE_MEM_64 ||
+	    pci_resource_len(pdev, BAR_0) < TSI721_REG_SPACE_SIZE) {
+		tsi_err(&pdev->dev, "Missing or misconfigured CSR BAR0");
+		err = -ENODEV;
+		goto err_disable_pdev;
+	}
+
+	/* BAR_1 (outbound doorbells) must be 16MB+ in 32-bit address space */
+	if (!(pci_resource_flags(pdev, BAR_1) & IORESOURCE_MEM) ||
+	    pci_resource_flags(pdev, BAR_1) & IORESOURCE_MEM_64 ||
+	    pci_resource_len(pdev, BAR_1) < TSI721_DB_WIN_SIZE) {
+		tsi_err(&pdev->dev, "Missing or misconfigured Doorbell BAR1");
+		err = -ENODEV;
+		goto err_disable_pdev;
+	}
+
+	/*
+	 * BAR_2 and BAR_4 (outbound translation) must be in 64-bit PCIe address
+	 * space.
+	 * NOTE: BAR_2 and BAR_4 are not used by this version of driver.
+	 * It may be a good idea to keep them disabled using HW configuration
+	 * to save PCI memory space.
+	 */
+
+	priv->p2r_bar[0].size = priv->p2r_bar[1].size = 0;
+
+	if (pci_resource_flags(pdev, BAR_2) & IORESOURCE_MEM_64) {
+		if (pci_resource_flags(pdev, BAR_2) & IORESOURCE_PREFETCH)
+			tsi_debug(INIT, &pdev->dev,
+				 "Prefetchable OBW BAR2 will not be used");
+		else {
+			priv->p2r_bar[0].base = pci_resource_start(pdev, BAR_2);
+			priv->p2r_bar[0].size = pci_resource_len(pdev, BAR_2);
+		}
+	}
+
+	if (pci_resource_flags(pdev, BAR_4) & IORESOURCE_MEM_64) {
+		if (pci_resource_flags(pdev, BAR_4) & IORESOURCE_PREFETCH)
+			tsi_debug(INIT, &pdev->dev,
+				 "Prefetchable OBW BAR4 will not be used");
+		else {
+			priv->p2r_bar[1].base = pci_resource_start(pdev, BAR_4);
+			priv->p2r_bar[1].size = pci_resource_len(pdev, BAR_4);
+		}
+	}
+
+	err = pci_request_regions(pdev, DRV_NAME);
+	if (err) {
+		tsi_err(&pdev->dev, "Unable to obtain PCI resources");
+		goto err_disable_pdev;
+	}
+
+	pci_set_master(pdev);
+
+	priv->regs = pci_ioremap_bar(pdev, BAR_0);
+	if (!priv->regs) {
+		tsi_err(&pdev->dev, "Unable to map device registers space");
+		err = -ENOMEM;
+		goto err_free_res;
+	}
+
+	priv->odb_base = pci_ioremap_bar(pdev, BAR_1);
+	if (!priv->odb_base) {
+		tsi_err(&pdev->dev, "Unable to map outbound doorbells space");
+		err = -ENOMEM;
+		goto err_unmap_bars;
+	}
+
+	/* Configure DMA attributes. */
+	if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
+		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+		if (err) {
+			tsi_err(&pdev->dev, "Unable to set DMA mask");
+			goto err_unmap_bars;
+		}
+
+		if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))
+			tsi_info(&pdev->dev, "Unable to set consistent DMA mask");
+	} else {
+		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+		if (err)
+			tsi_info(&pdev->dev, "Unable to set consistent DMA mask");
+	}
+
+	BUG_ON(!pci_is_pcie(pdev));
+
+	/* Clear "no snoop" and "relaxed ordering" bits. */
+	pcie_capability_clear_and_set_word(pdev, PCI_EXP_DEVCTL,
+		PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN, 0);
+
+	/* Override PCIe Maximum Read Request Size setting if requested */
+	if (pcie_mrrs >= 0) {
+		if (pcie_mrrs <= 5)
+			pcie_capability_clear_and_set_word(pdev, PCI_EXP_DEVCTL,
+					PCI_EXP_DEVCTL_READRQ, pcie_mrrs << 12);
+		else
+			tsi_info(&pdev->dev,
+				 "Invalid MRRS override value %d", pcie_mrrs);
+	}
+
+	/* Adjust PCIe completion timeout. */
+	pcie_capability_clear_and_set_word(pdev, PCI_EXP_DEVCTL2, 0xf, 0x2);
+
+	/*
+	 * FIXUP: correct offsets of MSI-X tables in the MSI-X Capability Block
+	 */
+	pci_write_config_dword(pdev, TSI721_PCIECFG_EPCTL, 0x01);
+	pci_write_config_dword(pdev, TSI721_PCIECFG_MSIXTBL,
+						TSI721_MSIXTBL_OFFSET);
+	pci_write_config_dword(pdev, TSI721_PCIECFG_MSIXPBA,
+						TSI721_MSIXPBA_OFFSET);
+	pci_write_config_dword(pdev, TSI721_PCIECFG_EPCTL, 0);
+	/* End of FIXUP */
+
+	tsi721_disable_ints(priv);
+
+	tsi721_init_pc2sr_mapping(priv);
+	tsi721_init_sr2pc_mapping(priv);
+
+	if (tsi721_bdma_maint_init(priv)) {
+		tsi_err(&pdev->dev, "BDMA initialization failed");
+		err = -ENOMEM;
+		goto err_unmap_bars;
+	}
+
+	err = tsi721_doorbell_init(priv);
+	if (err)
+		goto err_free_bdma;
+
+	tsi721_port_write_init(priv);
+
+	err = tsi721_messages_init(priv);
+	if (err)
+		goto err_free_consistent;
+
+	err = tsi721_setup_mport(priv);
+	if (err)
+		goto err_free_consistent;
+
+	pci_set_drvdata(pdev, priv);
+	tsi721_interrupts_init(priv);
+
+	return 0;
+
+err_free_consistent:
+	tsi721_port_write_free(priv);
+	tsi721_doorbell_free(priv);
+err_free_bdma:
+	tsi721_bdma_maint_free(priv);
+err_unmap_bars:
+	if (priv->regs)
+		iounmap(priv->regs);
+	if (priv->odb_base)
+		iounmap(priv->odb_base);
+err_free_res:
+	pci_release_regions(pdev);
+	pci_clear_master(pdev);
+err_disable_pdev:
+	pci_disable_device(pdev);
+err_clean:
+	kfree(priv);
+err_exit:
+	return err;
+}
+
+static void tsi721_remove(struct pci_dev *pdev)
+{
+	struct tsi721_device *priv = pci_get_drvdata(pdev);
+
+	tsi_debug(EXIT, &pdev->dev, "enter");
+
+	tsi721_disable_ints(priv);
+	tsi721_free_irq(priv);
+	flush_scheduled_work();
+	rio_unregister_mport(&priv->mport);
+
+	tsi721_unregister_dma(priv);
+	tsi721_bdma_maint_free(priv);
+	tsi721_doorbell_free(priv);
+	tsi721_port_write_free(priv);
+	tsi721_close_sr2pc_mapping(priv);
+
+	if (priv->regs)
+		iounmap(priv->regs);
+	if (priv->odb_base)
+		iounmap(priv->odb_base);
+#ifdef CONFIG_PCI_MSI
+	if (priv->flags & TSI721_USING_MSIX)
+		pci_disable_msix(priv->pdev);
+	else if (priv->flags & TSI721_USING_MSI)
+		pci_disable_msi(priv->pdev);
+#endif
+	pci_release_regions(pdev);
+	pci_clear_master(pdev);
+	pci_disable_device(pdev);
+	pci_set_drvdata(pdev, NULL);
+	kfree(priv);
+	tsi_debug(EXIT, &pdev->dev, "exit");
+}
+
+static void tsi721_shutdown(struct pci_dev *pdev)
+{
+	struct tsi721_device *priv = pci_get_drvdata(pdev);
+
+	tsi_debug(EXIT, &pdev->dev, "enter");
+
+	tsi721_disable_ints(priv);
+	tsi721_dma_stop_all(priv);
+	pci_clear_master(pdev);
+	pci_disable_device(pdev);
+}
+
+static const struct pci_device_id tsi721_pci_tbl[] = {
+	{ PCI_DEVICE(PCI_VENDOR_ID_IDT, PCI_DEVICE_ID_TSI721) },
+	{ 0, }	/* terminate list */
+};
+
+MODULE_DEVICE_TABLE(pci, tsi721_pci_tbl);
+
+static struct pci_driver tsi721_driver = {
+	.name		= "tsi721",
+	.id_table	= tsi721_pci_tbl,
+	.probe		= tsi721_probe,
+	.remove		= tsi721_remove,
+	.shutdown	= tsi721_shutdown,
+};
+
+module_pci_driver(tsi721_driver);
+
+MODULE_DESCRIPTION("IDT Tsi721 PCIExpress-to-SRIO bridge driver");
+MODULE_AUTHOR("Integrated Device Technology, Inc.");
+MODULE_LICENSE("GPL");
diff --git a/src/kernel/linux/v4.14/drivers/rapidio/devices/tsi721.h b/src/kernel/linux/v4.14/drivers/rapidio/devices/tsi721.h
new file mode 100644
index 0000000..957eadc
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/rapidio/devices/tsi721.h
@@ -0,0 +1,936 @@
+/*
+ * Tsi721 PCIExpress-to-SRIO bridge definitions
+ *
+ * Copyright 2011, Integrated Device Technology, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+ */
+
+#ifndef __TSI721_H
+#define __TSI721_H
+
+/* Debug output filtering masks */
+enum {
+	DBG_NONE	= 0,
+	DBG_INIT	= BIT(0), /* driver init */
+	DBG_EXIT	= BIT(1), /* driver exit */
+	DBG_MPORT	= BIT(2), /* mport add/remove */
+	DBG_MAINT	= BIT(3), /* maintenance ops messages */
+	DBG_DMA		= BIT(4), /* DMA transfer messages */
+	DBG_DMAV	= BIT(5), /* verbose DMA transfer messages */
+	DBG_IBW		= BIT(6), /* inbound window */
+	DBG_EVENT	= BIT(7), /* event handling messages */
+	DBG_OBW		= BIT(8), /* outbound window messages */
+	DBG_DBELL	= BIT(9), /* doorbell messages */
+	DBG_OMSG	= BIT(10), /* doorbell messages */
+	DBG_IMSG	= BIT(11), /* doorbell messages */
+	DBG_ALL		= ~0,
+};
+
+#ifdef DEBUG
+extern u32 tsi_dbg_level;
+
+#define tsi_debug(level, dev, fmt, arg...)				\
+	do {								\
+		if (DBG_##level & tsi_dbg_level)				\
+			dev_dbg(dev, "%s: " fmt "\n", __func__, ##arg);	\
+	} while (0)
+#else
+#define tsi_debug(level, dev, fmt, arg...) \
+		no_printk(KERN_DEBUG "%s: " fmt "\n", __func__, ##arg)
+#endif
+
+#define tsi_info(dev, fmt, arg...) \
+	dev_info(dev, "%s: " fmt "\n", __func__, ##arg)
+
+#define tsi_warn(dev, fmt, arg...) \
+	dev_warn(dev, "%s: WARNING " fmt "\n", __func__, ##arg)
+
+#define tsi_err(dev, fmt, arg...) \
+	dev_err(dev, "%s: ERROR " fmt "\n", __func__, ##arg)
+
+#define DRV_NAME	"tsi721"
+
+#define DEFAULT_HOPCOUNT	0xff
+#define DEFAULT_DESTID		0xff
+
+/* PCI device ID */
+#define PCI_DEVICE_ID_TSI721		0x80ab
+
+#define BAR_0	0
+#define BAR_1	1
+#define BAR_2	2
+#define BAR_4	4
+
+#define TSI721_PC2SR_BARS	2
+#define TSI721_PC2SR_WINS	8
+#define TSI721_PC2SR_ZONES	8
+#define TSI721_MAINT_WIN	0 /* Window for outbound maintenance requests */
+#define IDB_QUEUE		0 /* Inbound Doorbell Queue to use */
+#define IDB_QSIZE		512 /* Inbound Doorbell Queue size */
+
+/* Memory space sizes */
+#define TSI721_REG_SPACE_SIZE		(512 * 1024) /* 512K */
+#define TSI721_DB_WIN_SIZE		(16 * 1024 * 1024) /* 16MB */
+
+#define  RIO_TT_CODE_8		0x00000000
+#define  RIO_TT_CODE_16		0x00000001
+
+#define TSI721_DMA_MAXCH	8
+#define TSI721_DMA_MINSTSSZ	32
+#define TSI721_DMA_STSBLKSZ	8
+
+#define TSI721_SRIO_MAXCH	8
+
+#define DBELL_SID(buf)		(((u8)buf[2] << 8) | (u8)buf[3])
+#define DBELL_TID(buf)		(((u8)buf[4] << 8) | (u8)buf[5])
+#define DBELL_INF(buf)		(((u8)buf[0] << 8) | (u8)buf[1])
+
+#define TSI721_RIO_PW_MSG_SIZE	16  /* Tsi721 saves only 16 bytes of PW msg */
+
+/* Register definitions */
+
+/*
+ * Registers in PCIe configuration space
+ */
+
+#define TSI721_PCIECFG_MSIXTBL	0x0a4
+#define TSI721_MSIXTBL_OFFSET	0x2c000
+#define TSI721_PCIECFG_MSIXPBA	0x0a8
+#define TSI721_MSIXPBA_OFFSET	0x2a000
+#define TSI721_PCIECFG_EPCTL	0x400
+
+/*
+ * Event Management Registers
+ */
+
+#define TSI721_RIO_EM_INT_STAT		0x10910
+#define TSI721_RIO_EM_INT_STAT_PW_RX	0x00010000
+
+#define TSI721_RIO_EM_INT_ENABLE	0x10914
+#define TSI721_RIO_EM_INT_ENABLE_PW_RX	0x00010000
+
+#define TSI721_RIO_EM_DEV_INT_EN	0x10930
+#define TSI721_RIO_EM_DEV_INT_EN_INT	0x00000001
+
+/*
+ * Port-Write Block Registers
+ */
+
+#define TSI721_RIO_PW_CTL		0x10a04
+#define TSI721_RIO_PW_CTL_PW_TIMER	0xf0000000
+#define TSI721_RIO_PW_CTL_PWT_DIS	(0 << 28)
+#define TSI721_RIO_PW_CTL_PWT_103	(1 << 28)
+#define TSI721_RIO_PW_CTL_PWT_205	(1 << 29)
+#define TSI721_RIO_PW_CTL_PWT_410	(1 << 30)
+#define TSI721_RIO_PW_CTL_PWT_820	(1 << 31)
+#define TSI721_RIO_PW_CTL_PWC_MODE	0x01000000
+#define TSI721_RIO_PW_CTL_PWC_CONT	0x00000000
+#define TSI721_RIO_PW_CTL_PWC_REL	0x01000000
+
+#define TSI721_RIO_PW_RX_STAT		0x10a10
+#define TSI721_RIO_PW_RX_STAT_WR_SIZE	0x0000f000
+#define TSI_RIO_PW_RX_STAT_WDPTR	0x00000100
+#define TSI721_RIO_PW_RX_STAT_PW_SHORT	0x00000008
+#define TSI721_RIO_PW_RX_STAT_PW_TRUNC	0x00000004
+#define TSI721_RIO_PW_RX_STAT_PW_DISC	0x00000002
+#define TSI721_RIO_PW_RX_STAT_PW_VAL	0x00000001
+
+#define TSI721_RIO_PW_RX_CAPT(x)	(0x10a20 + (x)*4)
+
+/*
+ * Inbound Doorbells
+ */
+
+#define TSI721_IDB_ENTRY_SIZE	64
+
+#define TSI721_IDQ_CTL(x)	(0x20000 + (x) * 0x1000)
+#define TSI721_IDQ_SUSPEND	0x00000002
+#define TSI721_IDQ_INIT		0x00000001
+
+#define TSI721_IDQ_STS(x)	(0x20004 + (x) * 0x1000)
+#define TSI721_IDQ_RUN		0x00200000
+
+#define TSI721_IDQ_MASK(x)	(0x20008 + (x) * 0x1000)
+#define TSI721_IDQ_MASK_MASK	0xffff0000
+#define TSI721_IDQ_MASK_PATT	0x0000ffff
+
+#define TSI721_IDQ_RP(x)	(0x2000c + (x) * 0x1000)
+#define TSI721_IDQ_RP_PTR	0x0007ffff
+
+#define TSI721_IDQ_WP(x)	(0x20010 + (x) * 0x1000)
+#define TSI721_IDQ_WP_PTR	0x0007ffff
+
+#define TSI721_IDQ_BASEL(x)	(0x20014 + (x) * 0x1000)
+#define TSI721_IDQ_BASEL_ADDR	0xffffffc0
+#define TSI721_IDQ_BASEU(x)	(0x20018 + (x) * 0x1000)
+#define TSI721_IDQ_SIZE(x)	(0x2001c + (x) * 0x1000)
+#define TSI721_IDQ_SIZE_VAL(size)	(__fls(size) - 4)
+#define TSI721_IDQ_SIZE_MIN	512
+#define TSI721_IDQ_SIZE_MAX	(512 * 1024)
+
+#define TSI721_SR_CHINT(x)	(0x20040 + (x) * 0x1000)
+#define TSI721_SR_CHINTE(x)	(0x20044 + (x) * 0x1000)
+#define TSI721_SR_CHINTSET(x)	(0x20048 + (x) * 0x1000)
+#define TSI721_SR_CHINT_ODBOK	0x00000020
+#define TSI721_SR_CHINT_IDBQRCV	0x00000010
+#define TSI721_SR_CHINT_SUSP	0x00000008
+#define TSI721_SR_CHINT_ODBTO	0x00000004
+#define TSI721_SR_CHINT_ODBRTRY	0x00000002
+#define TSI721_SR_CHINT_ODBERR	0x00000001
+#define TSI721_SR_CHINT_ALL	0x0000003f
+
+#define TSI721_IBWIN_NUM	8
+
+#define TSI721_IBWIN_LB(x)	(0x29000 + (x) * 0x20)
+#define TSI721_IBWIN_LB_BA	0xfffff000
+#define TSI721_IBWIN_LB_WEN	0x00000001
+
+#define TSI721_IBWIN_UB(x)	(0x29004 + (x) * 0x20)
+#define TSI721_IBWIN_SZ(x)	(0x29008 + (x) * 0x20)
+#define TSI721_IBWIN_SZ_SIZE	0x00001f00
+#define TSI721_IBWIN_SIZE(size)	(__fls(size) - 12)
+
+#define TSI721_IBWIN_TLA(x)	(0x2900c + (x) * 0x20)
+#define TSI721_IBWIN_TLA_ADD	0xfffff000
+#define TSI721_IBWIN_TUA(x)	(0x29010 + (x) * 0x20)
+
+#define TSI721_SR2PC_GEN_INTE	0x29800
+#define TSI721_SR2PC_PWE	0x29804
+#define TSI721_SR2PC_GEN_INT	0x29808
+
+#define TSI721_DEV_INTE		0x29840
+#define TSI721_DEV_INT		0x29844
+#define TSI721_DEV_INTSET	0x29848
+#define TSI721_DEV_INT_BDMA_CH	0x00002000
+#define TSI721_DEV_INT_BDMA_NCH	0x00001000
+#define TSI721_DEV_INT_SMSG_CH	0x00000800
+#define TSI721_DEV_INT_SMSG_NCH	0x00000400
+#define TSI721_DEV_INT_SR2PC_CH	0x00000200
+#define TSI721_DEV_INT_SRIO	0x00000020
+
+#define TSI721_DEV_CHAN_INTE	0x2984c
+#define TSI721_DEV_CHAN_INT	0x29850
+
+#define TSI721_INT_SR2PC_CHAN_M	0xff000000
+#define TSI721_INT_SR2PC_CHAN(x) (1 << (24 + (x)))
+#define TSI721_INT_IMSG_CHAN_M	0x00ff0000
+#define TSI721_INT_IMSG_CHAN(x)	(1 << (16 + (x)))
+#define TSI721_INT_OMSG_CHAN_M	0x0000ff00
+#define TSI721_INT_OMSG_CHAN(x)	(1 << (8 + (x)))
+#define TSI721_INT_BDMA_CHAN_M	0x000000ff
+#define TSI721_INT_BDMA_CHAN(x)	(1 << (x))
+
+/*
+ * PC2SR block registers
+ */
+#define TSI721_OBWIN_NUM	TSI721_PC2SR_WINS
+
+#define TSI721_OBWINLB(x)	(0x40000 + (x) * 0x20)
+#define TSI721_OBWINLB_BA	0xffff8000
+#define TSI721_OBWINLB_WEN	0x00000001
+
+#define TSI721_OBWINUB(x)	(0x40004 + (x) * 0x20)
+
+#define TSI721_OBWINSZ(x)	(0x40008 + (x) * 0x20)
+#define TSI721_OBWINSZ_SIZE	0x00001f00
+#define TSI721_OBWIN_SIZE(size)	(__fls(size) - 15)
+
+#define TSI721_ZONE_SEL		0x41300
+#define TSI721_ZONE_SEL_RD_WRB	0x00020000
+#define TSI721_ZONE_SEL_GO	0x00010000
+#define TSI721_ZONE_SEL_WIN	0x00000038
+#define TSI721_ZONE_SEL_ZONE	0x00000007
+
+#define TSI721_LUT_DATA0	0x41304
+#define TSI721_LUT_DATA0_ADD	0xfffff000
+#define TSI721_LUT_DATA0_RDTYPE	0x00000f00
+#define TSI721_LUT_DATA0_NREAD	0x00000100
+#define TSI721_LUT_DATA0_MNTRD	0x00000200
+#define TSI721_LUT_DATA0_RDCRF	0x00000020
+#define TSI721_LUT_DATA0_WRCRF	0x00000010
+#define TSI721_LUT_DATA0_WRTYPE	0x0000000f
+#define TSI721_LUT_DATA0_NWR	0x00000001
+#define TSI721_LUT_DATA0_MNTWR	0x00000002
+#define TSI721_LUT_DATA0_NWR_R	0x00000004
+
+#define TSI721_LUT_DATA1	0x41308
+
+#define TSI721_LUT_DATA2	0x4130c
+#define TSI721_LUT_DATA2_HC	0xff000000
+#define TSI721_LUT_DATA2_ADD65	0x000c0000
+#define TSI721_LUT_DATA2_TT	0x00030000
+#define TSI721_LUT_DATA2_DSTID	0x0000ffff
+
+#define TSI721_PC2SR_INTE	0x41310
+
+#define TSI721_DEVCTL		0x48004
+#define TSI721_DEVCTL_SRBOOT_CMPL	0x00000004
+
+#define TSI721_I2C_INT_ENABLE	0x49120
+
+/*
+ * Block DMA Engine Registers
+ *   x = 0..7
+ */
+
+#define TSI721_DMAC_BASE(x)	(0x51000 + (x) * 0x1000)
+
+#define TSI721_DMAC_DWRCNT	0x000
+#define TSI721_DMAC_DRDCNT	0x004
+
+#define TSI721_DMAC_CTL		0x008
+#define TSI721_DMAC_CTL_SUSP	0x00000002
+#define TSI721_DMAC_CTL_INIT	0x00000001
+
+#define TSI721_DMAC_INT		0x00c
+#define TSI721_DMAC_INT_STFULL	0x00000010
+#define TSI721_DMAC_INT_DONE	0x00000008
+#define TSI721_DMAC_INT_SUSP	0x00000004
+#define TSI721_DMAC_INT_ERR	0x00000002
+#define TSI721_DMAC_INT_IOFDONE	0x00000001
+#define TSI721_DMAC_INT_ALL	0x0000001f
+
+#define TSI721_DMAC_INTSET	0x010
+
+#define TSI721_DMAC_STS		0x014
+#define TSI721_DMAC_STS_ABORT	0x00400000
+#define TSI721_DMAC_STS_RUN	0x00200000
+#define TSI721_DMAC_STS_CS	0x001f0000
+
+#define TSI721_DMAC_INTE	0x018
+
+#define TSI721_DMAC_DPTRL	0x024
+#define TSI721_DMAC_DPTRL_MASK	0xffffffe0
+
+#define TSI721_DMAC_DPTRH	0x028
+
+#define TSI721_DMAC_DSBL	0x02c
+#define TSI721_DMAC_DSBL_MASK	0xffffffc0
+
+#define TSI721_DMAC_DSBH	0x030
+
+#define TSI721_DMAC_DSSZ	0x034
+#define TSI721_DMAC_DSSZ_SIZE_M	0x0000000f
+#define TSI721_DMAC_DSSZ_SIZE(size)	(__fls(size) - 4)
+
+#define TSI721_DMAC_DSRP	0x038
+#define TSI721_DMAC_DSRP_MASK	0x0007ffff
+
+#define TSI721_DMAC_DSWP	0x03c
+#define TSI721_DMAC_DSWP_MASK	0x0007ffff
+
+#define TSI721_BDMA_INTE	0x5f000
+
+/*
+ * Messaging definitions
+ */
+#define TSI721_MSG_BUFFER_SIZE		RIO_MAX_MSG_SIZE
+#define TSI721_MSG_MAX_SIZE		RIO_MAX_MSG_SIZE
+#define TSI721_IMSG_MAXCH		8
+#define TSI721_IMSG_CHNUM		TSI721_IMSG_MAXCH
+#define TSI721_IMSGD_MIN_RING_SIZE	32
+#define TSI721_IMSGD_RING_SIZE		512
+
+#define TSI721_OMSG_CHNUM		4 /* One channel per MBOX */
+#define TSI721_OMSGD_MIN_RING_SIZE	32
+#define TSI721_OMSGD_RING_SIZE		512
+
+/*
+ * Outbound Messaging Engine Registers
+ *   x = 0..7
+ */
+
+#define TSI721_OBDMAC_DWRCNT(x)		(0x61000 + (x) * 0x1000)
+
+#define TSI721_OBDMAC_DRDCNT(x)		(0x61004 + (x) * 0x1000)
+
+#define TSI721_OBDMAC_CTL(x)		(0x61008 + (x) * 0x1000)
+#define TSI721_OBDMAC_CTL_MASK		0x00000007
+#define TSI721_OBDMAC_CTL_RETRY_THR	0x00000004
+#define TSI721_OBDMAC_CTL_SUSPEND	0x00000002
+#define TSI721_OBDMAC_CTL_INIT		0x00000001
+
+#define TSI721_OBDMAC_INT(x)		(0x6100c + (x) * 0x1000)
+#define TSI721_OBDMAC_INTSET(x)		(0x61010 + (x) * 0x1000)
+#define TSI721_OBDMAC_INTE(x)		(0x61018 + (x) * 0x1000)
+#define TSI721_OBDMAC_INT_MASK		0x0000001F
+#define TSI721_OBDMAC_INT_ST_FULL	0x00000010
+#define TSI721_OBDMAC_INT_DONE		0x00000008
+#define TSI721_OBDMAC_INT_SUSPENDED	0x00000004
+#define TSI721_OBDMAC_INT_ERROR		0x00000002
+#define TSI721_OBDMAC_INT_IOF_DONE	0x00000001
+#define TSI721_OBDMAC_INT_ALL		TSI721_OBDMAC_INT_MASK
+
+#define TSI721_OBDMAC_STS(x)		(0x61014 + (x) * 0x1000)
+#define TSI721_OBDMAC_STS_MASK		0x007f0000
+#define TSI721_OBDMAC_STS_ABORT		0x00400000
+#define TSI721_OBDMAC_STS_RUN		0x00200000
+#define TSI721_OBDMAC_STS_CS		0x001f0000
+
+#define TSI721_OBDMAC_PWE(x)		(0x6101c + (x) * 0x1000)
+#define TSI721_OBDMAC_PWE_MASK		0x00000002
+#define TSI721_OBDMAC_PWE_ERROR_EN	0x00000002
+
+#define TSI721_OBDMAC_DPTRL(x)		(0x61020 + (x) * 0x1000)
+#define TSI721_OBDMAC_DPTRL_MASK	0xfffffff0
+
+#define TSI721_OBDMAC_DPTRH(x)		(0x61024 + (x) * 0x1000)
+#define TSI721_OBDMAC_DPTRH_MASK	0xffffffff
+
+#define TSI721_OBDMAC_DSBL(x)		(0x61040 + (x) * 0x1000)
+#define TSI721_OBDMAC_DSBL_MASK		0xffffffc0
+
+#define TSI721_OBDMAC_DSBH(x)		(0x61044 + (x) * 0x1000)
+#define TSI721_OBDMAC_DSBH_MASK		0xffffffff
+
+#define TSI721_OBDMAC_DSSZ(x)		(0x61048 + (x) * 0x1000)
+#define TSI721_OBDMAC_DSSZ_MASK		0x0000000f
+
+#define TSI721_OBDMAC_DSRP(x)		(0x6104c + (x) * 0x1000)
+#define TSI721_OBDMAC_DSRP_MASK		0x0007ffff
+
+#define TSI721_OBDMAC_DSWP(x)		(0x61050 + (x) * 0x1000)
+#define TSI721_OBDMAC_DSWP_MASK		0x0007ffff
+
+#define TSI721_RQRPTO			0x60010
+#define TSI721_RQRPTO_MASK		0x00ffffff
+#define TSI721_RQRPTO_VAL		400	/* Response TO value */
+
+/*
+ * Inbound Messaging Engine Registers
+ *   x = 0..7
+ */
+
+#define TSI721_IB_DEVID_GLOBAL		0xffff
+#define TSI721_IBDMAC_FQBL(x)		(0x61200 + (x) * 0x1000)
+#define TSI721_IBDMAC_FQBL_MASK		0xffffffc0
+
+#define TSI721_IBDMAC_FQBH(x)		(0x61204 + (x) * 0x1000)
+#define TSI721_IBDMAC_FQBH_MASK		0xffffffff
+
+#define TSI721_IBDMAC_FQSZ_ENTRY_INX	TSI721_IMSGD_RING_SIZE
+#define TSI721_IBDMAC_FQSZ(x)		(0x61208 + (x) * 0x1000)
+#define TSI721_IBDMAC_FQSZ_MASK		0x0000000f
+
+#define TSI721_IBDMAC_FQRP(x)		(0x6120c + (x) * 0x1000)
+#define TSI721_IBDMAC_FQRP_MASK		0x0007ffff
+
+#define TSI721_IBDMAC_FQWP(x)		(0x61210 + (x) * 0x1000)
+#define TSI721_IBDMAC_FQWP_MASK		0x0007ffff
+
+#define TSI721_IBDMAC_FQTH(x)		(0x61214 + (x) * 0x1000)
+#define TSI721_IBDMAC_FQTH_MASK		0x0007ffff
+
+#define TSI721_IB_DEVID			0x60020
+#define TSI721_IB_DEVID_MASK		0x0000ffff
+
+#define TSI721_IBDMAC_CTL(x)		(0x61240 + (x) * 0x1000)
+#define TSI721_IBDMAC_CTL_MASK		0x00000003
+#define TSI721_IBDMAC_CTL_SUSPEND	0x00000002
+#define TSI721_IBDMAC_CTL_INIT		0x00000001
+
+#define TSI721_IBDMAC_STS(x)		(0x61244 + (x) * 0x1000)
+#define TSI721_IBDMAC_STS_MASK		0x007f0000
+#define TSI721_IBSMAC_STS_ABORT		0x00400000
+#define TSI721_IBSMAC_STS_RUN		0x00200000
+#define TSI721_IBSMAC_STS_CS		0x001f0000
+
+#define TSI721_IBDMAC_INT(x)		(0x61248 + (x) * 0x1000)
+#define TSI721_IBDMAC_INTSET(x)		(0x6124c + (x) * 0x1000)
+#define TSI721_IBDMAC_INTE(x)		(0x61250 + (x) * 0x1000)
+#define TSI721_IBDMAC_INT_MASK		0x0000100f
+#define TSI721_IBDMAC_INT_SRTO		0x00001000
+#define TSI721_IBDMAC_INT_SUSPENDED	0x00000008
+#define TSI721_IBDMAC_INT_PC_ERROR	0x00000004
+#define TSI721_IBDMAC_INT_FQ_LOW	0x00000002
+#define TSI721_IBDMAC_INT_DQ_RCV	0x00000001
+#define TSI721_IBDMAC_INT_ALL		TSI721_IBDMAC_INT_MASK
+
+#define TSI721_IBDMAC_PWE(x)		(0x61254 + (x) * 0x1000)
+#define TSI721_IBDMAC_PWE_MASK		0x00001700
+#define TSI721_IBDMAC_PWE_SRTO		0x00001000
+#define TSI721_IBDMAC_PWE_ILL_FMT	0x00000400
+#define TSI721_IBDMAC_PWE_ILL_DEC	0x00000200
+#define TSI721_IBDMAC_PWE_IMP_SP	0x00000100
+
+#define TSI721_IBDMAC_DQBL(x)		(0x61300 + (x) * 0x1000)
+#define TSI721_IBDMAC_DQBL_MASK		0xffffffc0
+#define TSI721_IBDMAC_DQBL_ADDR		0xffffffc0
+
+#define TSI721_IBDMAC_DQBH(x)		(0x61304 + (x) * 0x1000)
+#define TSI721_IBDMAC_DQBH_MASK		0xffffffff
+
+#define TSI721_IBDMAC_DQRP(x)		(0x61308 + (x) * 0x1000)
+#define TSI721_IBDMAC_DQRP_MASK		0x0007ffff
+
+#define TSI721_IBDMAC_DQWR(x)		(0x6130c + (x) * 0x1000)
+#define TSI721_IBDMAC_DQWR_MASK		0x0007ffff
+
+#define TSI721_IBDMAC_DQSZ(x)		(0x61314 + (x) * 0x1000)
+#define TSI721_IBDMAC_DQSZ_MASK		0x0000000f
+
+/*
+ * Messaging Engine Interrupts
+ */
+
+#define TSI721_SMSG_PWE			0x6a004
+
+#define TSI721_SMSG_INTE		0x6a000
+#define TSI721_SMSG_INT			0x6a008
+#define TSI721_SMSG_INTSET		0x6a010
+#define TSI721_SMSG_INT_MASK		0x0086ffff
+#define TSI721_SMSG_INT_UNS_RSP		0x00800000
+#define TSI721_SMSG_INT_ECC_NCOR	0x00040000
+#define TSI721_SMSG_INT_ECC_COR		0x00020000
+#define TSI721_SMSG_INT_ECC_NCOR_CH	0x0000ff00
+#define TSI721_SMSG_INT_ECC_COR_CH	0x000000ff
+
+#define TSI721_SMSG_ECC_LOG		0x6a014
+#define TSI721_SMSG_ECC_LOG_MASK	0x00070007
+#define TSI721_SMSG_ECC_LOG_ECC_NCOR_M	0x00070000
+#define TSI721_SMSG_ECC_LOG_ECC_COR_M	0x00000007
+
+#define TSI721_RETRY_GEN_CNT		0x6a100
+#define TSI721_RETRY_GEN_CNT_MASK	0xffffffff
+
+#define TSI721_RETRY_RX_CNT		0x6a104
+#define TSI721_RETRY_RX_CNT_MASK	0xffffffff
+
+#define TSI721_SMSG_ECC_COR_LOG(x)	(0x6a300 + (x) * 4)
+#define TSI721_SMSG_ECC_COR_LOG_MASK	0x000000ff
+
+#define TSI721_SMSG_ECC_NCOR(x)		(0x6a340 + (x) * 4)
+#define TSI721_SMSG_ECC_NCOR_MASK	0x000000ff
+
+/*
+ * Block DMA Descriptors
+ */
+
+struct tsi721_dma_desc {
+	__le32 type_id;
+
+#define TSI721_DMAD_DEVID	0x0000ffff
+#define TSI721_DMAD_CRF		0x00010000
+#define TSI721_DMAD_PRIO	0x00060000
+#define TSI721_DMAD_RTYPE	0x00780000
+#define TSI721_DMAD_IOF		0x08000000
+#define TSI721_DMAD_DTYPE	0xe0000000
+
+	__le32 bcount;
+
+#define TSI721_DMAD_BCOUNT1	0x03ffffff /* if DTYPE == 1 */
+#define TSI721_DMAD_BCOUNT2	0x0000000f /* if DTYPE == 2 */
+#define TSI721_DMAD_TT		0x0c000000
+#define TSI721_DMAD_RADDR0	0xc0000000
+
+	union {
+		__le32 raddr_lo;	   /* if DTYPE == (1 || 2) */
+		__le32 next_lo;		   /* if DTYPE == 3 */
+	};
+
+#define TSI721_DMAD_CFGOFF	0x00ffffff
+#define TSI721_DMAD_HOPCNT	0xff000000
+
+	union {
+		__le32 raddr_hi;	   /* if DTYPE == (1 || 2) */
+		__le32 next_hi;		   /* if DTYPE == 3 */
+	};
+
+	union {
+		struct {		   /* if DTYPE == 1 */
+			__le32 bufptr_lo;
+			__le32 bufptr_hi;
+			__le32 s_dist;
+			__le32 s_size;
+		} t1;
+		__le32 data[4];		   /* if DTYPE == 2 */
+		u32    reserved[4];	   /* if DTYPE == 3 */
+	};
+} __aligned(32);
+
+/*
+ * Inbound Messaging Descriptor
+ */
+struct tsi721_imsg_desc {
+	__le32 type_id;
+
+#define TSI721_IMD_DEVID	0x0000ffff
+#define TSI721_IMD_CRF		0x00010000
+#define TSI721_IMD_PRIO		0x00060000
+#define TSI721_IMD_TT		0x00180000
+#define TSI721_IMD_DTYPE	0xe0000000
+
+	__le32 msg_info;
+
+#define TSI721_IMD_BCOUNT	0x00000ff8
+#define TSI721_IMD_SSIZE	0x0000f000
+#define TSI721_IMD_LETER	0x00030000
+#define TSI721_IMD_XMBOX	0x003c0000
+#define TSI721_IMD_MBOX		0x00c00000
+#define TSI721_IMD_CS		0x78000000
+#define TSI721_IMD_HO		0x80000000
+
+	__le32 bufptr_lo;
+	__le32 bufptr_hi;
+	u32    reserved[12];
+
+} __aligned(64);
+
+/*
+ * Outbound Messaging Descriptor
+ */
+struct tsi721_omsg_desc {
+	__le32 type_id;
+
+#define TSI721_OMD_DEVID	0x0000ffff
+#define TSI721_OMD_CRF		0x00010000
+#define TSI721_OMD_PRIO		0x00060000
+#define TSI721_OMD_IOF		0x08000000
+#define TSI721_OMD_DTYPE	0xe0000000
+#define TSI721_OMD_RSRVD	0x17f80000
+
+	__le32 msg_info;
+
+#define TSI721_OMD_BCOUNT	0x00000ff8
+#define TSI721_OMD_SSIZE	0x0000f000
+#define TSI721_OMD_LETER	0x00030000
+#define TSI721_OMD_XMBOX	0x003c0000
+#define TSI721_OMD_MBOX		0x00c00000
+#define TSI721_OMD_TT		0x0c000000
+
+	union {
+		__le32 bufptr_lo;	/* if DTYPE == 4 */
+		__le32 next_lo;		/* if DTYPE == 5 */
+	};
+
+	union {
+		__le32 bufptr_hi;	/* if DTYPE == 4 */
+		__le32 next_hi;		/* if DTYPE == 5 */
+	};
+
+} __aligned(16);
+
+struct tsi721_dma_sts {
+	__le64	desc_sts[8];
+} __aligned(64);
+
+struct tsi721_desc_sts_fifo {
+	union {
+		__le64	da64;
+		struct {
+			__le32	lo;
+			__le32	hi;
+		} da32;
+	} stat[8];
+} __aligned(64);
+
+/* Descriptor types for BDMA and Messaging blocks */
+enum dma_dtype {
+	DTYPE1 = 1, /* Data Transfer DMA Descriptor */
+	DTYPE2 = 2, /* Immediate Data Transfer DMA Descriptor */
+	DTYPE3 = 3, /* Block Pointer DMA Descriptor */
+	DTYPE4 = 4, /* Outbound Msg DMA Descriptor */
+	DTYPE5 = 5, /* OB Messaging Block Pointer Descriptor */
+	DTYPE6 = 6  /* Inbound Messaging Descriptor */
+};
+
+enum dma_rtype {
+	NREAD = 0,
+	LAST_NWRITE_R = 1,
+	ALL_NWRITE = 2,
+	ALL_NWRITE_R = 3,
+	MAINT_RD = 4,
+	MAINT_WR = 5
+};
+
+/*
+ * mport Driver Definitions
+ */
+#define TSI721_DMA_CHNUM	TSI721_DMA_MAXCH
+
+#define TSI721_DMACH_MAINT	7	/* DMA channel for maint requests */
+#define TSI721_DMACH_MAINT_NBD	32	/* Number of BDs for maint requests */
+
+#define TSI721_DMACH_DMA	1	/* DMA channel for data transfers */
+
+#define MSG_DMA_ENTRY_INX_TO_SIZE(x)	((0x10 << (x)) & 0xFFFF0)
+
+enum tsi721_smsg_int_flag {
+	SMSG_INT_NONE		= 0x00000000,
+	SMSG_INT_ECC_COR_CH	= 0x000000ff,
+	SMSG_INT_ECC_NCOR_CH	= 0x0000ff00,
+	SMSG_INT_ECC_COR	= 0x00020000,
+	SMSG_INT_ECC_NCOR	= 0x00040000,
+	SMSG_INT_UNS_RSP	= 0x00800000,
+	SMSG_INT_ALL		= 0x0006ffff
+};
+
+/* Structures */
+
+#ifdef CONFIG_RAPIDIO_DMA_ENGINE
+
+#define TSI721_BDMA_MAX_BCOUNT	(TSI721_DMAD_BCOUNT1 + 1)
+
+struct tsi721_tx_desc {
+	struct dma_async_tx_descriptor	txd;
+	u16				destid;
+	/* low 64-bits of 66-bit RIO address */
+	u64				rio_addr;
+	/* upper 2-bits of 66-bit RIO address */
+	u8				rio_addr_u;
+	enum dma_rtype			rtype;
+	struct list_head		desc_node;
+	struct scatterlist		*sg;
+	unsigned int			sg_len;
+	enum dma_status			status;
+};
+
+struct tsi721_bdma_chan {
+	int		id;
+	void __iomem	*regs;
+	int		bd_num;		/* number of HW buffer descriptors */
+	void		*bd_base;	/* start of DMA descriptors */
+	dma_addr_t	bd_phys;
+	void		*sts_base;	/* start of DMA BD status FIFO */
+	dma_addr_t	sts_phys;
+	int		sts_size;
+	u32		sts_rdptr;
+	u32		wr_count;
+	u32		wr_count_next;
+
+	struct dma_chan		dchan;
+	struct tsi721_tx_desc	*tx_desc;
+	spinlock_t		lock;
+	struct tsi721_tx_desc	*active_tx;
+	struct list_head	queue;
+	struct list_head	free_list;
+	struct tasklet_struct	tasklet;
+	bool			active;
+};
+
+#endif /* CONFIG_RAPIDIO_DMA_ENGINE */
+
+struct tsi721_bdma_maint {
+	int		ch_id;		/* BDMA channel number */
+	int		bd_num;		/* number of buffer descriptors */
+	void		*bd_base;	/* start of DMA descriptors */
+	dma_addr_t	bd_phys;
+	void		*sts_base;	/* start of DMA BD status FIFO */
+	dma_addr_t	sts_phys;
+	int		sts_size;
+};
+
+struct tsi721_imsg_ring {
+	u32		size;
+	/* VA/PA of data buffers for incoming messages */
+	void		*buf_base;
+	dma_addr_t	buf_phys;
+	/* VA/PA of circular free buffer list */
+	void		*imfq_base;
+	dma_addr_t	imfq_phys;
+	/* VA/PA of Inbound message descriptors */
+	void		*imd_base;
+	dma_addr_t	imd_phys;
+	 /* Inbound Queue buffer pointers */
+	void		*imq_base[TSI721_IMSGD_RING_SIZE];
+
+	u32		rx_slot;
+	void		*dev_id;
+	u32		fq_wrptr;
+	u32		desc_rdptr;
+	spinlock_t	lock;
+};
+
+struct tsi721_omsg_ring {
+	u32		size;
+	/* VA/PA of OB Msg descriptors */
+	void		*omd_base;
+	dma_addr_t	omd_phys;
+	/* VA/PA of OB Msg data buffers */
+	void		*omq_base[TSI721_OMSGD_RING_SIZE];
+	dma_addr_t	omq_phys[TSI721_OMSGD_RING_SIZE];
+	/* VA/PA of OB Msg descriptor status FIFO */
+	void		*sts_base;
+	dma_addr_t	sts_phys;
+	u32		sts_size; /* # of allocated status entries */
+	u32		sts_rdptr;
+
+	u32		tx_slot;
+	void		*dev_id;
+	u32		wr_count;
+	spinlock_t	lock;
+};
+
+enum tsi721_flags {
+	TSI721_USING_MSI	= (1 << 0),
+	TSI721_USING_MSIX	= (1 << 1),
+	TSI721_IMSGID_SET	= (1 << 2),
+};
+
+#ifdef CONFIG_PCI_MSI
+/*
+ * MSI-X Table Entries (0 ... 69)
+ */
+#define TSI721_MSIX_DMACH_DONE(x)	(0 + (x))
+#define TSI721_MSIX_DMACH_INT(x)	(8 + (x))
+#define TSI721_MSIX_BDMA_INT		16
+#define TSI721_MSIX_OMSG_DONE(x)	(17 + (x))
+#define TSI721_MSIX_OMSG_INT(x)		(25 + (x))
+#define TSI721_MSIX_IMSG_DQ_RCV(x)	(33 + (x))
+#define TSI721_MSIX_IMSG_INT(x)		(41 + (x))
+#define TSI721_MSIX_MSG_INT		49
+#define TSI721_MSIX_SR2PC_IDBQ_RCV(x)	(50 + (x))
+#define TSI721_MSIX_SR2PC_CH_INT(x)	(58 + (x))
+#define TSI721_MSIX_SR2PC_INT		66
+#define TSI721_MSIX_PC2SR_INT		67
+#define TSI721_MSIX_SRIO_MAC_INT	68
+#define TSI721_MSIX_I2C_INT		69
+
+/* MSI-X vector and init table entry indexes */
+enum tsi721_msix_vect {
+	TSI721_VECT_IDB,
+	TSI721_VECT_PWRX, /* PW_RX is part of SRIO MAC Interrupt reporting */
+	TSI721_VECT_OMB0_DONE,
+	TSI721_VECT_OMB1_DONE,
+	TSI721_VECT_OMB2_DONE,
+	TSI721_VECT_OMB3_DONE,
+	TSI721_VECT_OMB0_INT,
+	TSI721_VECT_OMB1_INT,
+	TSI721_VECT_OMB2_INT,
+	TSI721_VECT_OMB3_INT,
+	TSI721_VECT_IMB0_RCV,
+	TSI721_VECT_IMB1_RCV,
+	TSI721_VECT_IMB2_RCV,
+	TSI721_VECT_IMB3_RCV,
+	TSI721_VECT_IMB0_INT,
+	TSI721_VECT_IMB1_INT,
+	TSI721_VECT_IMB2_INT,
+	TSI721_VECT_IMB3_INT,
+#ifdef CONFIG_RAPIDIO_DMA_ENGINE
+	TSI721_VECT_DMA0_DONE,
+	TSI721_VECT_DMA1_DONE,
+	TSI721_VECT_DMA2_DONE,
+	TSI721_VECT_DMA3_DONE,
+	TSI721_VECT_DMA4_DONE,
+	TSI721_VECT_DMA5_DONE,
+	TSI721_VECT_DMA6_DONE,
+	TSI721_VECT_DMA7_DONE,
+	TSI721_VECT_DMA0_INT,
+	TSI721_VECT_DMA1_INT,
+	TSI721_VECT_DMA2_INT,
+	TSI721_VECT_DMA3_INT,
+	TSI721_VECT_DMA4_INT,
+	TSI721_VECT_DMA5_INT,
+	TSI721_VECT_DMA6_INT,
+	TSI721_VECT_DMA7_INT,
+#endif /* CONFIG_RAPIDIO_DMA_ENGINE */
+	TSI721_VECT_MAX
+};
+
+#define IRQ_DEVICE_NAME_MAX	64
+
+struct msix_irq {
+	u16	vector;
+	char	irq_name[IRQ_DEVICE_NAME_MAX];
+};
+#endif /* CONFIG_PCI_MSI */
+
+struct tsi721_ib_win_mapping {
+	struct list_head node;
+	dma_addr_t	lstart;
+};
+
+struct tsi721_ib_win {
+	u64		rstart;
+	u32		size;
+	dma_addr_t	lstart;
+	bool		active;
+	bool		xlat;
+	struct list_head mappings;
+};
+
+struct tsi721_obw_bar {
+	u64		base;
+	u64		size;
+	u64		free;
+};
+
+struct tsi721_ob_win {
+	u64		base;
+	u32		size;
+	u16		destid;
+	u64		rstart;
+	bool		active;
+	struct tsi721_obw_bar *pbar;
+};
+
+struct tsi721_device {
+	struct pci_dev	*pdev;
+	struct rio_mport mport;
+	u32		flags;
+	void __iomem	*regs;
+#ifdef CONFIG_PCI_MSI
+	struct msix_irq	msix[TSI721_VECT_MAX];
+#endif
+	/* Doorbells */
+	void __iomem	*odb_base;
+	void		*idb_base;
+	dma_addr_t	idb_dma;
+	struct work_struct idb_work;
+	u32		db_discard_count;
+
+	/* Inbound Port-Write */
+	struct work_struct pw_work;
+	struct kfifo	pw_fifo;
+	spinlock_t	pw_fifo_lock;
+	u32		pw_discard_count;
+
+	/* BDMA Engine */
+	struct tsi721_bdma_maint mdma; /* Maintenance rd/wr request channel */
+
+#ifdef CONFIG_RAPIDIO_DMA_ENGINE
+	struct tsi721_bdma_chan bdma[TSI721_DMA_CHNUM];
+#endif
+
+	/* Inbound Messaging */
+	int		imsg_init[TSI721_IMSG_CHNUM];
+	struct tsi721_imsg_ring imsg_ring[TSI721_IMSG_CHNUM];
+
+	/* Outbound Messaging */
+	int		omsg_init[TSI721_OMSG_CHNUM];
+	struct tsi721_omsg_ring	omsg_ring[TSI721_OMSG_CHNUM];
+
+	/* Inbound Mapping Windows */
+	struct tsi721_ib_win ib_win[TSI721_IBWIN_NUM];
+	int		ibwin_cnt;
+
+	/* Outbound Mapping Windows */
+	struct tsi721_obw_bar p2r_bar[2];
+	struct tsi721_ob_win  ob_win[TSI721_OBWIN_NUM];
+	int		obwin_cnt;
+};
+
+#ifdef CONFIG_RAPIDIO_DMA_ENGINE
+extern void tsi721_bdma_handler(struct tsi721_bdma_chan *bdma_chan);
+extern int tsi721_register_dma(struct tsi721_device *priv);
+extern void tsi721_unregister_dma(struct tsi721_device *priv);
+extern void tsi721_dma_stop_all(struct tsi721_device *priv);
+#else
+#define tsi721_dma_stop_all(priv) do {} while (0)
+#define tsi721_unregister_dma(priv) do {} while (0)
+#endif
+
+#endif
diff --git a/src/kernel/linux/v4.14/drivers/rapidio/devices/tsi721_dma.c b/src/kernel/linux/v4.14/drivers/rapidio/devices/tsi721_dma.c
new file mode 100644
index 0000000..e2a4185
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/rapidio/devices/tsi721_dma.c
@@ -0,0 +1,1059 @@
+/*
+ * DMA Engine support for Tsi721 PCIExpress-to-SRIO bridge
+ *
+ * Copyright (c) 2011-2014 Integrated Device Technology, Inc.
+ * Alexandre Bounine <alexandre.bounine@idt.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called COPYING.
+ */
+
+#include <linux/io.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/rio.h>
+#include <linux/rio_drv.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/kfifo.h>
+#include <linux/sched.h>
+#include <linux/delay.h>
+#include "../../dma/dmaengine.h"
+
+#include "tsi721.h"
+
+#ifdef CONFIG_PCI_MSI
+static irqreturn_t tsi721_bdma_msix(int irq, void *ptr);
+#endif
+static int tsi721_submit_sg(struct tsi721_tx_desc *desc);
+
+static unsigned int dma_desc_per_channel = 128;
+module_param(dma_desc_per_channel, uint, S_IRUGO);
+MODULE_PARM_DESC(dma_desc_per_channel,
+		 "Number of DMA descriptors per channel (default: 128)");
+
+static unsigned int dma_txqueue_sz = 16;
+module_param(dma_txqueue_sz, uint, S_IRUGO);
+MODULE_PARM_DESC(dma_txqueue_sz,
+		 "DMA Transactions Queue Size (default: 16)");
+
+static u8 dma_sel = 0x7f;
+module_param(dma_sel, byte, S_IRUGO);
+MODULE_PARM_DESC(dma_sel,
+		 "DMA Channel Selection Mask (default: 0x7f = all)");
+
+static inline struct tsi721_bdma_chan *to_tsi721_chan(struct dma_chan *chan)
+{
+	return container_of(chan, struct tsi721_bdma_chan, dchan);
+}
+
+static inline struct tsi721_device *to_tsi721(struct dma_device *ddev)
+{
+	return container_of(ddev, struct rio_mport, dma)->priv;
+}
+
+static inline
+struct tsi721_tx_desc *to_tsi721_desc(struct dma_async_tx_descriptor *txd)
+{
+	return container_of(txd, struct tsi721_tx_desc, txd);
+}
+
+static int tsi721_bdma_ch_init(struct tsi721_bdma_chan *bdma_chan, int bd_num)
+{
+	struct tsi721_dma_desc *bd_ptr;
+	struct device *dev = bdma_chan->dchan.device->dev;
+	u64		*sts_ptr;
+	dma_addr_t	bd_phys;
+	dma_addr_t	sts_phys;
+	int		sts_size;
+#ifdef CONFIG_PCI_MSI
+	struct tsi721_device *priv = to_tsi721(bdma_chan->dchan.device);
+#endif
+
+	tsi_debug(DMA, &bdma_chan->dchan.dev->device, "DMAC%d", bdma_chan->id);
+
+	/*
+	 * Allocate space for DMA descriptors
+	 * (add an extra element for link descriptor)
+	 */
+	bd_ptr = dma_zalloc_coherent(dev,
+				(bd_num + 1) * sizeof(struct tsi721_dma_desc),
+				&bd_phys, GFP_ATOMIC);
+	if (!bd_ptr)
+		return -ENOMEM;
+
+	bdma_chan->bd_num = bd_num;
+	bdma_chan->bd_phys = bd_phys;
+	bdma_chan->bd_base = bd_ptr;
+
+	tsi_debug(DMA, &bdma_chan->dchan.dev->device,
+		  "DMAC%d descriptors @ %p (phys = %pad)",
+		  bdma_chan->id, bd_ptr, &bd_phys);
+
+	/* Allocate space for descriptor status FIFO */
+	sts_size = ((bd_num + 1) >= TSI721_DMA_MINSTSSZ) ?
+					(bd_num + 1) : TSI721_DMA_MINSTSSZ;
+	sts_size = roundup_pow_of_two(sts_size);
+	sts_ptr = dma_zalloc_coherent(dev,
+				     sts_size * sizeof(struct tsi721_dma_sts),
+				     &sts_phys, GFP_ATOMIC);
+	if (!sts_ptr) {
+		/* Free space allocated for DMA descriptors */
+		dma_free_coherent(dev,
+				  (bd_num + 1) * sizeof(struct tsi721_dma_desc),
+				  bd_ptr, bd_phys);
+		bdma_chan->bd_base = NULL;
+		return -ENOMEM;
+	}
+
+	bdma_chan->sts_phys = sts_phys;
+	bdma_chan->sts_base = sts_ptr;
+	bdma_chan->sts_size = sts_size;
+
+	tsi_debug(DMA, &bdma_chan->dchan.dev->device,
+		"DMAC%d desc status FIFO @ %p (phys = %pad) size=0x%x",
+		bdma_chan->id, sts_ptr, &sts_phys, sts_size);
+
+	/* Initialize DMA descriptors ring using added link descriptor */
+	bd_ptr[bd_num].type_id = cpu_to_le32(DTYPE3 << 29);
+	bd_ptr[bd_num].next_lo = cpu_to_le32((u64)bd_phys &
+						 TSI721_DMAC_DPTRL_MASK);
+	bd_ptr[bd_num].next_hi = cpu_to_le32((u64)bd_phys >> 32);
+
+	/* Setup DMA descriptor pointers */
+	iowrite32(((u64)bd_phys >> 32),
+		bdma_chan->regs + TSI721_DMAC_DPTRH);
+	iowrite32(((u64)bd_phys & TSI721_DMAC_DPTRL_MASK),
+		bdma_chan->regs + TSI721_DMAC_DPTRL);
+
+	/* Setup descriptor status FIFO */
+	iowrite32(((u64)sts_phys >> 32),
+		bdma_chan->regs + TSI721_DMAC_DSBH);
+	iowrite32(((u64)sts_phys & TSI721_DMAC_DSBL_MASK),
+		bdma_chan->regs + TSI721_DMAC_DSBL);
+	iowrite32(TSI721_DMAC_DSSZ_SIZE(sts_size),
+		bdma_chan->regs + TSI721_DMAC_DSSZ);
+
+	/* Clear interrupt bits */
+	iowrite32(TSI721_DMAC_INT_ALL,
+		bdma_chan->regs + TSI721_DMAC_INT);
+
+	ioread32(bdma_chan->regs + TSI721_DMAC_INT);
+
+#ifdef CONFIG_PCI_MSI
+	/* Request interrupt service if we are in MSI-X mode */
+	if (priv->flags & TSI721_USING_MSIX) {
+		int rc, idx;
+
+		idx = TSI721_VECT_DMA0_DONE + bdma_chan->id;
+
+		rc = request_irq(priv->msix[idx].vector, tsi721_bdma_msix, 0,
+				 priv->msix[idx].irq_name, (void *)bdma_chan);
+
+		if (rc) {
+			tsi_debug(DMA, &bdma_chan->dchan.dev->device,
+				  "Unable to get MSI-X for DMAC%d-DONE",
+				  bdma_chan->id);
+			goto err_out;
+		}
+
+		idx = TSI721_VECT_DMA0_INT + bdma_chan->id;
+
+		rc = request_irq(priv->msix[idx].vector, tsi721_bdma_msix, 0,
+				priv->msix[idx].irq_name, (void *)bdma_chan);
+
+		if (rc)	{
+			tsi_debug(DMA, &bdma_chan->dchan.dev->device,
+				  "Unable to get MSI-X for DMAC%d-INT",
+				  bdma_chan->id);
+			free_irq(
+				priv->msix[TSI721_VECT_DMA0_DONE +
+					    bdma_chan->id].vector,
+				(void *)bdma_chan);
+		}
+
+err_out:
+		if (rc) {
+			/* Free space allocated for DMA descriptors */
+			dma_free_coherent(dev,
+				(bd_num + 1) * sizeof(struct tsi721_dma_desc),
+				bd_ptr, bd_phys);
+			bdma_chan->bd_base = NULL;
+
+			/* Free space allocated for status descriptors */
+			dma_free_coherent(dev,
+				sts_size * sizeof(struct tsi721_dma_sts),
+				sts_ptr, sts_phys);
+			bdma_chan->sts_base = NULL;
+
+			return -EIO;
+		}
+	}
+#endif /* CONFIG_PCI_MSI */
+
+	/* Toggle DMA channel initialization */
+	iowrite32(TSI721_DMAC_CTL_INIT,	bdma_chan->regs + TSI721_DMAC_CTL);
+	ioread32(bdma_chan->regs + TSI721_DMAC_CTL);
+	bdma_chan->wr_count = bdma_chan->wr_count_next = 0;
+	bdma_chan->sts_rdptr = 0;
+	udelay(10);
+
+	return 0;
+}
+
+static int tsi721_bdma_ch_free(struct tsi721_bdma_chan *bdma_chan)
+{
+	u32 ch_stat;
+#ifdef CONFIG_PCI_MSI
+	struct tsi721_device *priv = to_tsi721(bdma_chan->dchan.device);
+#endif
+
+	if (bdma_chan->bd_base == NULL)
+		return 0;
+
+	/* Check if DMA channel still running */
+	ch_stat = ioread32(bdma_chan->regs + TSI721_DMAC_STS);
+	if (ch_stat & TSI721_DMAC_STS_RUN)
+		return -EFAULT;
+
+	/* Put DMA channel into init state */
+	iowrite32(TSI721_DMAC_CTL_INIT,	bdma_chan->regs + TSI721_DMAC_CTL);
+
+#ifdef CONFIG_PCI_MSI
+	if (priv->flags & TSI721_USING_MSIX) {
+		free_irq(priv->msix[TSI721_VECT_DMA0_DONE +
+				    bdma_chan->id].vector, (void *)bdma_chan);
+		free_irq(priv->msix[TSI721_VECT_DMA0_INT +
+				    bdma_chan->id].vector, (void *)bdma_chan);
+	}
+#endif /* CONFIG_PCI_MSI */
+
+	/* Free space allocated for DMA descriptors */
+	dma_free_coherent(bdma_chan->dchan.device->dev,
+		(bdma_chan->bd_num + 1) * sizeof(struct tsi721_dma_desc),
+		bdma_chan->bd_base, bdma_chan->bd_phys);
+	bdma_chan->bd_base = NULL;
+
+	/* Free space allocated for status FIFO */
+	dma_free_coherent(bdma_chan->dchan.device->dev,
+		bdma_chan->sts_size * sizeof(struct tsi721_dma_sts),
+		bdma_chan->sts_base, bdma_chan->sts_phys);
+	bdma_chan->sts_base = NULL;
+	return 0;
+}
+
+static void
+tsi721_bdma_interrupt_enable(struct tsi721_bdma_chan *bdma_chan, int enable)
+{
+	if (enable) {
+		/* Clear pending BDMA channel interrupts */
+		iowrite32(TSI721_DMAC_INT_ALL,
+			bdma_chan->regs + TSI721_DMAC_INT);
+		ioread32(bdma_chan->regs + TSI721_DMAC_INT);
+		/* Enable BDMA channel interrupts */
+		iowrite32(TSI721_DMAC_INT_ALL,
+			bdma_chan->regs + TSI721_DMAC_INTE);
+	} else {
+		/* Disable BDMA channel interrupts */
+		iowrite32(0, bdma_chan->regs + TSI721_DMAC_INTE);
+		/* Clear pending BDMA channel interrupts */
+		iowrite32(TSI721_DMAC_INT_ALL,
+			bdma_chan->regs + TSI721_DMAC_INT);
+	}
+
+}
+
+static bool tsi721_dma_is_idle(struct tsi721_bdma_chan *bdma_chan)
+{
+	u32 sts;
+
+	sts = ioread32(bdma_chan->regs + TSI721_DMAC_STS);
+	return ((sts & TSI721_DMAC_STS_RUN) == 0);
+}
+
+void tsi721_bdma_handler(struct tsi721_bdma_chan *bdma_chan)
+{
+	/* Disable BDMA channel interrupts */
+	iowrite32(0, bdma_chan->regs + TSI721_DMAC_INTE);
+	if (bdma_chan->active)
+		tasklet_hi_schedule(&bdma_chan->tasklet);
+}
+
+#ifdef CONFIG_PCI_MSI
+/**
+ * tsi721_omsg_msix - MSI-X interrupt handler for BDMA channels
+ * @irq: Linux interrupt number
+ * @ptr: Pointer to interrupt-specific data (BDMA channel structure)
+ *
+ * Handles BDMA channel interrupts signaled using MSI-X.
+ */
+static irqreturn_t tsi721_bdma_msix(int irq, void *ptr)
+{
+	struct tsi721_bdma_chan *bdma_chan = ptr;
+
+	if (bdma_chan->active)
+		tasklet_hi_schedule(&bdma_chan->tasklet);
+	return IRQ_HANDLED;
+}
+#endif /* CONFIG_PCI_MSI */
+
+/* Must be called with the spinlock held */
+static void tsi721_start_dma(struct tsi721_bdma_chan *bdma_chan)
+{
+	if (!tsi721_dma_is_idle(bdma_chan)) {
+		tsi_err(&bdma_chan->dchan.dev->device,
+			"DMAC%d Attempt to start non-idle channel",
+			bdma_chan->id);
+		return;
+	}
+
+	if (bdma_chan->wr_count == bdma_chan->wr_count_next) {
+		tsi_err(&bdma_chan->dchan.dev->device,
+			"DMAC%d Attempt to start DMA with no BDs ready %d",
+			bdma_chan->id, task_pid_nr(current));
+		return;
+	}
+
+	tsi_debug(DMA, &bdma_chan->dchan.dev->device, "DMAC%d (wrc=%d) %d",
+		  bdma_chan->id, bdma_chan->wr_count_next,
+		  task_pid_nr(current));
+
+	iowrite32(bdma_chan->wr_count_next,
+		bdma_chan->regs + TSI721_DMAC_DWRCNT);
+	ioread32(bdma_chan->regs + TSI721_DMAC_DWRCNT);
+
+	bdma_chan->wr_count = bdma_chan->wr_count_next;
+}
+
+static int
+tsi721_desc_fill_init(struct tsi721_tx_desc *desc,
+		      struct tsi721_dma_desc *bd_ptr,
+		      struct scatterlist *sg, u32 sys_size)
+{
+	u64 rio_addr;
+
+	if (bd_ptr == NULL)
+		return -EINVAL;
+
+	/* Initialize DMA descriptor */
+	bd_ptr->type_id = cpu_to_le32((DTYPE1 << 29) |
+				      (desc->rtype << 19) | desc->destid);
+	bd_ptr->bcount = cpu_to_le32(((desc->rio_addr & 0x3) << 30) |
+				     (sys_size << 26));
+	rio_addr = (desc->rio_addr >> 2) |
+				((u64)(desc->rio_addr_u & 0x3) << 62);
+	bd_ptr->raddr_lo = cpu_to_le32(rio_addr & 0xffffffff);
+	bd_ptr->raddr_hi = cpu_to_le32(rio_addr >> 32);
+	bd_ptr->t1.bufptr_lo = cpu_to_le32(
+					(u64)sg_dma_address(sg) & 0xffffffff);
+	bd_ptr->t1.bufptr_hi = cpu_to_le32((u64)sg_dma_address(sg) >> 32);
+	bd_ptr->t1.s_dist = 0;
+	bd_ptr->t1.s_size = 0;
+
+	return 0;
+}
+
+static int
+tsi721_desc_fill_end(struct tsi721_dma_desc *bd_ptr, u32 bcount, bool interrupt)
+{
+	if (bd_ptr == NULL)
+		return -EINVAL;
+
+	/* Update DMA descriptor */
+	if (interrupt)
+		bd_ptr->type_id |= cpu_to_le32(TSI721_DMAD_IOF);
+	bd_ptr->bcount |= cpu_to_le32(bcount & TSI721_DMAD_BCOUNT1);
+
+	return 0;
+}
+
+static void tsi721_dma_tx_err(struct tsi721_bdma_chan *bdma_chan,
+			      struct tsi721_tx_desc *desc)
+{
+	struct dma_async_tx_descriptor *txd = &desc->txd;
+	dma_async_tx_callback callback = txd->callback;
+	void *param = txd->callback_param;
+
+	list_move(&desc->desc_node, &bdma_chan->free_list);
+
+	if (callback)
+		callback(param);
+}
+
+static void tsi721_clr_stat(struct tsi721_bdma_chan *bdma_chan)
+{
+	u32 srd_ptr;
+	u64 *sts_ptr;
+	int i, j;
+
+	/* Check and clear descriptor status FIFO entries */
+	srd_ptr = bdma_chan->sts_rdptr;
+	sts_ptr = bdma_chan->sts_base;
+	j = srd_ptr * 8;
+	while (sts_ptr[j]) {
+		for (i = 0; i < 8 && sts_ptr[j]; i++, j++)
+			sts_ptr[j] = 0;
+
+		++srd_ptr;
+		srd_ptr %= bdma_chan->sts_size;
+		j = srd_ptr * 8;
+	}
+
+	iowrite32(srd_ptr, bdma_chan->regs + TSI721_DMAC_DSRP);
+	bdma_chan->sts_rdptr = srd_ptr;
+}
+
+/* Must be called with the channel spinlock held */
+static int tsi721_submit_sg(struct tsi721_tx_desc *desc)
+{
+	struct dma_chan *dchan = desc->txd.chan;
+	struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
+	u32 sys_size;
+	u64 rio_addr;
+	dma_addr_t next_addr;
+	u32 bcount;
+	struct scatterlist *sg;
+	unsigned int i;
+	int err = 0;
+	struct tsi721_dma_desc *bd_ptr = NULL;
+	u32 idx, rd_idx;
+	u32 add_count = 0;
+	struct device *ch_dev = &dchan->dev->device;
+
+	if (!tsi721_dma_is_idle(bdma_chan)) {
+		tsi_err(ch_dev, "DMAC%d ERR: Attempt to use non-idle channel",
+			bdma_chan->id);
+		return -EIO;
+	}
+
+	/*
+	 * Fill DMA channel's hardware buffer descriptors.
+	 * (NOTE: RapidIO destination address is limited to 64 bits for now)
+	 */
+	rio_addr = desc->rio_addr;
+	next_addr = -1;
+	bcount = 0;
+	sys_size = dma_to_mport(dchan->device)->sys_size;
+
+	rd_idx = ioread32(bdma_chan->regs + TSI721_DMAC_DRDCNT);
+	rd_idx %= (bdma_chan->bd_num + 1);
+
+	idx = bdma_chan->wr_count_next % (bdma_chan->bd_num + 1);
+	if (idx == bdma_chan->bd_num) {
+		/* wrap around link descriptor */
+		idx = 0;
+		add_count++;
+	}
+
+	tsi_debug(DMA, ch_dev, "DMAC%d BD ring status: rdi=%d wri=%d",
+		  bdma_chan->id, rd_idx, idx);
+
+	for_each_sg(desc->sg, sg, desc->sg_len, i) {
+
+		tsi_debug(DMAV, ch_dev, "DMAC%d sg%d/%d addr: 0x%llx len: %d",
+			bdma_chan->id, i, desc->sg_len,
+			(unsigned long long)sg_dma_address(sg), sg_dma_len(sg));
+
+		if (sg_dma_len(sg) > TSI721_BDMA_MAX_BCOUNT) {
+			tsi_err(ch_dev, "DMAC%d SG entry %d is too large",
+				bdma_chan->id, i);
+			err = -EINVAL;
+			break;
+		}
+
+		/*
+		 * If this sg entry forms contiguous block with previous one,
+		 * try to merge it into existing DMA descriptor
+		 */
+		if (next_addr == sg_dma_address(sg) &&
+		    bcount + sg_dma_len(sg) <= TSI721_BDMA_MAX_BCOUNT) {
+			/* Adjust byte count of the descriptor */
+			bcount += sg_dma_len(sg);
+			goto entry_done;
+		} else if (next_addr != -1) {
+			/* Finalize descriptor using total byte count value */
+			tsi721_desc_fill_end(bd_ptr, bcount, 0);
+			tsi_debug(DMAV, ch_dev,	"DMAC%d prev desc final len: %d",
+				  bdma_chan->id, bcount);
+		}
+
+		desc->rio_addr = rio_addr;
+
+		if (i && idx == rd_idx) {
+			tsi_debug(DMAV, ch_dev,
+				  "DMAC%d HW descriptor ring is full @ %d",
+				  bdma_chan->id, i);
+			desc->sg = sg;
+			desc->sg_len -= i;
+			break;
+		}
+
+		bd_ptr = &((struct tsi721_dma_desc *)bdma_chan->bd_base)[idx];
+		err = tsi721_desc_fill_init(desc, bd_ptr, sg, sys_size);
+		if (err) {
+			tsi_err(ch_dev, "Failed to build desc: err=%d", err);
+			break;
+		}
+
+		tsi_debug(DMAV, ch_dev, "DMAC%d bd_ptr = %p did=%d raddr=0x%llx",
+			  bdma_chan->id, bd_ptr, desc->destid, desc->rio_addr);
+
+		next_addr = sg_dma_address(sg);
+		bcount = sg_dma_len(sg);
+
+		add_count++;
+		if (++idx == bdma_chan->bd_num) {
+			/* wrap around link descriptor */
+			idx = 0;
+			add_count++;
+		}
+
+entry_done:
+		if (sg_is_last(sg)) {
+			tsi721_desc_fill_end(bd_ptr, bcount, 0);
+			tsi_debug(DMAV, ch_dev,
+				  "DMAC%d last desc final len: %d",
+				  bdma_chan->id, bcount);
+			desc->sg_len = 0;
+		} else {
+			rio_addr += sg_dma_len(sg);
+			next_addr += sg_dma_len(sg);
+		}
+	}
+
+	if (!err)
+		bdma_chan->wr_count_next += add_count;
+
+	return err;
+}
+
+static void tsi721_advance_work(struct tsi721_bdma_chan *bdma_chan,
+				struct tsi721_tx_desc *desc)
+{
+	int err;
+
+	tsi_debug(DMA, &bdma_chan->dchan.dev->device, "DMAC%d", bdma_chan->id);
+
+	if (!tsi721_dma_is_idle(bdma_chan))
+		return;
+
+	/*
+	 * If there is no data transfer in progress, fetch new descriptor from
+	 * the pending queue.
+	*/
+
+	if (desc == NULL && bdma_chan->active_tx == NULL &&
+					!list_empty(&bdma_chan->queue)) {
+		desc = list_first_entry(&bdma_chan->queue,
+					struct tsi721_tx_desc, desc_node);
+		list_del_init((&desc->desc_node));
+		bdma_chan->active_tx = desc;
+	}
+
+	if (desc) {
+		err = tsi721_submit_sg(desc);
+		if (!err)
+			tsi721_start_dma(bdma_chan);
+		else {
+			tsi721_dma_tx_err(bdma_chan, desc);
+			tsi_debug(DMA, &bdma_chan->dchan.dev->device,
+				"DMAC%d ERR: tsi721_submit_sg failed with err=%d",
+				bdma_chan->id, err);
+		}
+	}
+
+	tsi_debug(DMA, &bdma_chan->dchan.dev->device, "DMAC%d Exit",
+		  bdma_chan->id);
+}
+
+static void tsi721_dma_tasklet(unsigned long data)
+{
+	struct tsi721_bdma_chan *bdma_chan = (struct tsi721_bdma_chan *)data;
+	u32 dmac_int, dmac_sts;
+
+	dmac_int = ioread32(bdma_chan->regs + TSI721_DMAC_INT);
+	tsi_debug(DMA, &bdma_chan->dchan.dev->device, "DMAC%d_INT = 0x%x",
+		  bdma_chan->id, dmac_int);
+	/* Clear channel interrupts */
+	iowrite32(dmac_int, bdma_chan->regs + TSI721_DMAC_INT);
+
+	if (dmac_int & TSI721_DMAC_INT_ERR) {
+		int i = 10000;
+		struct tsi721_tx_desc *desc;
+
+		desc = bdma_chan->active_tx;
+		dmac_sts = ioread32(bdma_chan->regs + TSI721_DMAC_STS);
+		tsi_err(&bdma_chan->dchan.dev->device,
+			"DMAC%d_STS = 0x%x did=%d raddr=0x%llx",
+			bdma_chan->id, dmac_sts, desc->destid, desc->rio_addr);
+
+		/* Re-initialize DMA channel if possible */
+
+		if ((dmac_sts & TSI721_DMAC_STS_ABORT) == 0)
+			goto err_out;
+
+		tsi721_clr_stat(bdma_chan);
+
+		spin_lock(&bdma_chan->lock);
+
+		/* Put DMA channel into init state */
+		iowrite32(TSI721_DMAC_CTL_INIT,
+			  bdma_chan->regs + TSI721_DMAC_CTL);
+		do {
+			udelay(1);
+			dmac_sts = ioread32(bdma_chan->regs + TSI721_DMAC_STS);
+			i--;
+		} while ((dmac_sts & TSI721_DMAC_STS_ABORT) && i);
+
+		if (dmac_sts & TSI721_DMAC_STS_ABORT) {
+			tsi_err(&bdma_chan->dchan.dev->device,
+				"Failed to re-initiate DMAC%d",	bdma_chan->id);
+			spin_unlock(&bdma_chan->lock);
+			goto err_out;
+		}
+
+		/* Setup DMA descriptor pointers */
+		iowrite32(((u64)bdma_chan->bd_phys >> 32),
+			bdma_chan->regs + TSI721_DMAC_DPTRH);
+		iowrite32(((u64)bdma_chan->bd_phys & TSI721_DMAC_DPTRL_MASK),
+			bdma_chan->regs + TSI721_DMAC_DPTRL);
+
+		/* Setup descriptor status FIFO */
+		iowrite32(((u64)bdma_chan->sts_phys >> 32),
+			bdma_chan->regs + TSI721_DMAC_DSBH);
+		iowrite32(((u64)bdma_chan->sts_phys & TSI721_DMAC_DSBL_MASK),
+			bdma_chan->regs + TSI721_DMAC_DSBL);
+		iowrite32(TSI721_DMAC_DSSZ_SIZE(bdma_chan->sts_size),
+			bdma_chan->regs + TSI721_DMAC_DSSZ);
+
+		/* Clear interrupt bits */
+		iowrite32(TSI721_DMAC_INT_ALL,
+			bdma_chan->regs + TSI721_DMAC_INT);
+
+		ioread32(bdma_chan->regs + TSI721_DMAC_INT);
+
+		bdma_chan->wr_count = bdma_chan->wr_count_next = 0;
+		bdma_chan->sts_rdptr = 0;
+		udelay(10);
+
+		desc = bdma_chan->active_tx;
+		desc->status = DMA_ERROR;
+		dma_cookie_complete(&desc->txd);
+		list_add(&desc->desc_node, &bdma_chan->free_list);
+		bdma_chan->active_tx = NULL;
+		if (bdma_chan->active)
+			tsi721_advance_work(bdma_chan, NULL);
+		spin_unlock(&bdma_chan->lock);
+	}
+
+	if (dmac_int & TSI721_DMAC_INT_STFULL) {
+		tsi_err(&bdma_chan->dchan.dev->device,
+			"DMAC%d descriptor status FIFO is full",
+			bdma_chan->id);
+	}
+
+	if (dmac_int & (TSI721_DMAC_INT_DONE | TSI721_DMAC_INT_IOFDONE)) {
+		struct tsi721_tx_desc *desc;
+
+		tsi721_clr_stat(bdma_chan);
+		spin_lock(&bdma_chan->lock);
+		desc = bdma_chan->active_tx;
+
+		if (desc->sg_len == 0) {
+			dma_async_tx_callback callback = NULL;
+			void *param = NULL;
+
+			desc->status = DMA_COMPLETE;
+			dma_cookie_complete(&desc->txd);
+			if (desc->txd.flags & DMA_PREP_INTERRUPT) {
+				callback = desc->txd.callback;
+				param = desc->txd.callback_param;
+			}
+			list_add(&desc->desc_node, &bdma_chan->free_list);
+			bdma_chan->active_tx = NULL;
+			if (bdma_chan->active)
+				tsi721_advance_work(bdma_chan, NULL);
+			spin_unlock(&bdma_chan->lock);
+			if (callback)
+				callback(param);
+		} else {
+			if (bdma_chan->active)
+				tsi721_advance_work(bdma_chan,
+						    bdma_chan->active_tx);
+			spin_unlock(&bdma_chan->lock);
+		}
+	}
+err_out:
+	/* Re-Enable BDMA channel interrupts */
+	iowrite32(TSI721_DMAC_INT_ALL, bdma_chan->regs + TSI721_DMAC_INTE);
+}
+
+static dma_cookie_t tsi721_tx_submit(struct dma_async_tx_descriptor *txd)
+{
+	struct tsi721_tx_desc *desc = to_tsi721_desc(txd);
+	struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(txd->chan);
+	dma_cookie_t cookie;
+
+	/* Check if the descriptor is detached from any lists */
+	if (!list_empty(&desc->desc_node)) {
+		tsi_err(&bdma_chan->dchan.dev->device,
+			"DMAC%d wrong state of descriptor %p",
+			bdma_chan->id, txd);
+		return -EIO;
+	}
+
+	spin_lock_bh(&bdma_chan->lock);
+
+	if (!bdma_chan->active) {
+		spin_unlock_bh(&bdma_chan->lock);
+		return -ENODEV;
+	}
+
+	cookie = dma_cookie_assign(txd);
+	desc->status = DMA_IN_PROGRESS;
+	list_add_tail(&desc->desc_node, &bdma_chan->queue);
+	tsi721_advance_work(bdma_chan, NULL);
+
+	spin_unlock_bh(&bdma_chan->lock);
+	return cookie;
+}
+
+static int tsi721_alloc_chan_resources(struct dma_chan *dchan)
+{
+	struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
+	struct tsi721_tx_desc *desc = NULL;
+	int i;
+
+	tsi_debug(DMA, &dchan->dev->device, "DMAC%d", bdma_chan->id);
+
+	if (bdma_chan->bd_base)
+		return dma_txqueue_sz;
+
+	/* Initialize BDMA channel */
+	if (tsi721_bdma_ch_init(bdma_chan, dma_desc_per_channel)) {
+		tsi_err(&dchan->dev->device, "Unable to initialize DMAC%d",
+			bdma_chan->id);
+		return -ENODEV;
+	}
+
+	/* Allocate queue of transaction descriptors */
+	desc = kcalloc(dma_txqueue_sz, sizeof(struct tsi721_tx_desc),
+			GFP_ATOMIC);
+	if (!desc) {
+		tsi_err(&dchan->dev->device,
+			"DMAC%d Failed to allocate logical descriptors",
+			bdma_chan->id);
+		tsi721_bdma_ch_free(bdma_chan);
+		return -ENOMEM;
+	}
+
+	bdma_chan->tx_desc = desc;
+
+	for (i = 0; i < dma_txqueue_sz; i++) {
+		dma_async_tx_descriptor_init(&desc[i].txd, dchan);
+		desc[i].txd.tx_submit = tsi721_tx_submit;
+		desc[i].txd.flags = DMA_CTRL_ACK;
+		list_add(&desc[i].desc_node, &bdma_chan->free_list);
+	}
+
+	dma_cookie_init(dchan);
+
+	bdma_chan->active = true;
+	tsi721_bdma_interrupt_enable(bdma_chan, 1);
+
+	return dma_txqueue_sz;
+}
+
+static void tsi721_sync_dma_irq(struct tsi721_bdma_chan *bdma_chan)
+{
+	struct tsi721_device *priv = to_tsi721(bdma_chan->dchan.device);
+
+#ifdef CONFIG_PCI_MSI
+	if (priv->flags & TSI721_USING_MSIX) {
+		synchronize_irq(priv->msix[TSI721_VECT_DMA0_DONE +
+					   bdma_chan->id].vector);
+		synchronize_irq(priv->msix[TSI721_VECT_DMA0_INT +
+					   bdma_chan->id].vector);
+	} else
+#endif
+	synchronize_irq(priv->pdev->irq);
+}
+
+static void tsi721_free_chan_resources(struct dma_chan *dchan)
+{
+	struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
+
+	tsi_debug(DMA, &dchan->dev->device, "DMAC%d", bdma_chan->id);
+
+	if (bdma_chan->bd_base == NULL)
+		return;
+
+	tsi721_bdma_interrupt_enable(bdma_chan, 0);
+	bdma_chan->active = false;
+	tsi721_sync_dma_irq(bdma_chan);
+	tasklet_kill(&bdma_chan->tasklet);
+	INIT_LIST_HEAD(&bdma_chan->free_list);
+	kfree(bdma_chan->tx_desc);
+	tsi721_bdma_ch_free(bdma_chan);
+}
+
+static
+enum dma_status tsi721_tx_status(struct dma_chan *dchan, dma_cookie_t cookie,
+				 struct dma_tx_state *txstate)
+{
+	struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
+	enum dma_status	status;
+
+	spin_lock_bh(&bdma_chan->lock);
+	status = dma_cookie_status(dchan, cookie, txstate);
+	spin_unlock_bh(&bdma_chan->lock);
+	return status;
+}
+
+static void tsi721_issue_pending(struct dma_chan *dchan)
+{
+	struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
+
+	tsi_debug(DMA, &dchan->dev->device, "DMAC%d", bdma_chan->id);
+
+	spin_lock_bh(&bdma_chan->lock);
+	if (tsi721_dma_is_idle(bdma_chan) && bdma_chan->active) {
+		tsi721_advance_work(bdma_chan, NULL);
+	}
+	spin_unlock_bh(&bdma_chan->lock);
+}
+
+static
+struct dma_async_tx_descriptor *tsi721_prep_rio_sg(struct dma_chan *dchan,
+			struct scatterlist *sgl, unsigned int sg_len,
+			enum dma_transfer_direction dir, unsigned long flags,
+			void *tinfo)
+{
+	struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
+	struct tsi721_tx_desc *desc;
+	struct rio_dma_ext *rext = tinfo;
+	enum dma_rtype rtype;
+	struct dma_async_tx_descriptor *txd = NULL;
+
+	if (!sgl || !sg_len) {
+		tsi_err(&dchan->dev->device, "DMAC%d No SG list",
+			bdma_chan->id);
+		return ERR_PTR(-EINVAL);
+	}
+
+	tsi_debug(DMA, &dchan->dev->device, "DMAC%d %s", bdma_chan->id,
+		  (dir == DMA_DEV_TO_MEM)?"READ":"WRITE");
+
+	if (dir == DMA_DEV_TO_MEM)
+		rtype = NREAD;
+	else if (dir == DMA_MEM_TO_DEV) {
+		switch (rext->wr_type) {
+		case RDW_ALL_NWRITE:
+			rtype = ALL_NWRITE;
+			break;
+		case RDW_ALL_NWRITE_R:
+			rtype = ALL_NWRITE_R;
+			break;
+		case RDW_LAST_NWRITE_R:
+		default:
+			rtype = LAST_NWRITE_R;
+			break;
+		}
+	} else {
+		tsi_err(&dchan->dev->device,
+			"DMAC%d Unsupported DMA direction option",
+			bdma_chan->id);
+		return ERR_PTR(-EINVAL);
+	}
+
+	spin_lock_bh(&bdma_chan->lock);
+
+	if (!list_empty(&bdma_chan->free_list)) {
+		desc = list_first_entry(&bdma_chan->free_list,
+				struct tsi721_tx_desc, desc_node);
+		list_del_init(&desc->desc_node);
+		desc->destid = rext->destid;
+		desc->rio_addr = rext->rio_addr;
+		desc->rio_addr_u = 0;
+		desc->rtype = rtype;
+		desc->sg_len	= sg_len;
+		desc->sg	= sgl;
+		txd		= &desc->txd;
+		txd->flags	= flags;
+	}
+
+	spin_unlock_bh(&bdma_chan->lock);
+
+	if (!txd) {
+		tsi_debug(DMA, &dchan->dev->device,
+			  "DMAC%d free TXD is not available", bdma_chan->id);
+		return ERR_PTR(-EBUSY);
+	}
+
+	return txd;
+}
+
+static int tsi721_terminate_all(struct dma_chan *dchan)
+{
+	struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
+	struct tsi721_tx_desc *desc, *_d;
+	LIST_HEAD(list);
+
+	tsi_debug(DMA, &dchan->dev->device, "DMAC%d", bdma_chan->id);
+
+	spin_lock_bh(&bdma_chan->lock);
+
+	bdma_chan->active = false;
+
+	while (!tsi721_dma_is_idle(bdma_chan)) {
+
+		udelay(5);
+#if (0)
+		/* make sure to stop the transfer */
+		iowrite32(TSI721_DMAC_CTL_SUSP,
+			  bdma_chan->regs + TSI721_DMAC_CTL);
+
+		/* Wait until DMA channel stops */
+		do {
+			dmac_int = ioread32(bdma_chan->regs + TSI721_DMAC_INT);
+		} while ((dmac_int & TSI721_DMAC_INT_SUSP) == 0);
+#endif
+	}
+
+	if (bdma_chan->active_tx)
+		list_add(&bdma_chan->active_tx->desc_node, &list);
+	list_splice_init(&bdma_chan->queue, &list);
+
+	list_for_each_entry_safe(desc, _d, &list, desc_node)
+		tsi721_dma_tx_err(bdma_chan, desc);
+
+	spin_unlock_bh(&bdma_chan->lock);
+
+	return 0;
+}
+
+static void tsi721_dma_stop(struct tsi721_bdma_chan *bdma_chan)
+{
+	if (!bdma_chan->active)
+		return;
+	spin_lock_bh(&bdma_chan->lock);
+	if (!tsi721_dma_is_idle(bdma_chan)) {
+		int timeout = 100000;
+
+		/* stop the transfer in progress */
+		iowrite32(TSI721_DMAC_CTL_SUSP,
+			  bdma_chan->regs + TSI721_DMAC_CTL);
+
+		/* Wait until DMA channel stops */
+		while (!tsi721_dma_is_idle(bdma_chan) && --timeout)
+			udelay(1);
+	}
+
+	spin_unlock_bh(&bdma_chan->lock);
+}
+
+void tsi721_dma_stop_all(struct tsi721_device *priv)
+{
+	int i;
+
+	for (i = 0; i < TSI721_DMA_MAXCH; i++) {
+		if ((i != TSI721_DMACH_MAINT) && (dma_sel & (1 << i)))
+			tsi721_dma_stop(&priv->bdma[i]);
+	}
+}
+
+int tsi721_register_dma(struct tsi721_device *priv)
+{
+	int i;
+	int nr_channels = 0;
+	int err;
+	struct rio_mport *mport = &priv->mport;
+
+	INIT_LIST_HEAD(&mport->dma.channels);
+
+	for (i = 0; i < TSI721_DMA_MAXCH; i++) {
+		struct tsi721_bdma_chan *bdma_chan = &priv->bdma[i];
+
+		if ((i == TSI721_DMACH_MAINT) || (dma_sel & (1 << i)) == 0)
+			continue;
+
+		bdma_chan->regs = priv->regs + TSI721_DMAC_BASE(i);
+
+		bdma_chan->dchan.device = &mport->dma;
+		bdma_chan->dchan.cookie = 1;
+		bdma_chan->dchan.chan_id = i;
+		bdma_chan->id = i;
+		bdma_chan->active = false;
+
+		spin_lock_init(&bdma_chan->lock);
+
+		bdma_chan->active_tx = NULL;
+		INIT_LIST_HEAD(&bdma_chan->queue);
+		INIT_LIST_HEAD(&bdma_chan->free_list);
+
+		tasklet_init(&bdma_chan->tasklet, tsi721_dma_tasklet,
+			     (unsigned long)bdma_chan);
+		list_add_tail(&bdma_chan->dchan.device_node,
+			      &mport->dma.channels);
+		nr_channels++;
+	}
+
+	mport->dma.chancnt = nr_channels;
+	dma_cap_zero(mport->dma.cap_mask);
+	dma_cap_set(DMA_PRIVATE, mport->dma.cap_mask);
+	dma_cap_set(DMA_SLAVE, mport->dma.cap_mask);
+
+	mport->dma.dev = &priv->pdev->dev;
+	mport->dma.device_alloc_chan_resources = tsi721_alloc_chan_resources;
+	mport->dma.device_free_chan_resources = tsi721_free_chan_resources;
+	mport->dma.device_tx_status = tsi721_tx_status;
+	mport->dma.device_issue_pending = tsi721_issue_pending;
+	mport->dma.device_prep_slave_sg = tsi721_prep_rio_sg;
+	mport->dma.device_terminate_all = tsi721_terminate_all;
+
+	err = dma_async_device_register(&mport->dma);
+	if (err)
+		tsi_err(&priv->pdev->dev, "Failed to register DMA device");
+
+	return err;
+}
+
+void tsi721_unregister_dma(struct tsi721_device *priv)
+{
+	struct rio_mport *mport = &priv->mport;
+	struct dma_chan *chan, *_c;
+	struct tsi721_bdma_chan *bdma_chan;
+
+	tsi721_dma_stop_all(priv);
+	dma_async_device_unregister(&mport->dma);
+
+	list_for_each_entry_safe(chan, _c, &mport->dma.channels,
+					device_node) {
+		bdma_chan = to_tsi721_chan(chan);
+		if (bdma_chan->active) {
+			tsi721_bdma_interrupt_enable(bdma_chan, 0);
+			bdma_chan->active = false;
+			tsi721_sync_dma_irq(bdma_chan);
+			tasklet_kill(&bdma_chan->tasklet);
+			INIT_LIST_HEAD(&bdma_chan->free_list);
+			kfree(bdma_chan->tx_desc);
+			tsi721_bdma_ch_free(bdma_chan);
+		}
+
+		list_del(&chan->device_node);
+	}
+}