[Feature]add MT2731_MP2_MR2_SVN388 baseline version

Change-Id: Ief04314834b31e27effab435d3ca8ba33b499059
diff --git a/src/kernel/linux/v4.14/drivers/ntb/Kconfig b/src/kernel/linux/v4.14/drivers/ntb/Kconfig
new file mode 100644
index 0000000..95944e5
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/ntb/Kconfig
@@ -0,0 +1,28 @@
+menuconfig NTB
+	tristate "Non-Transparent Bridge support"
+	depends on PCI
+	help
+	 The PCI-E Non-transparent bridge hardware is a point-to-point PCI-E bus
+	 connecting 2 systems.  When configured, writes to the device's PCI
+	 mapped memory will be mirrored to a buffer on the remote system.  The
+	 ntb Linux driver uses this point-to-point communication as a method to
+	 transfer data from one system to the other.
+
+	 If unsure, say N.
+
+if NTB
+
+source "drivers/ntb/hw/Kconfig"
+
+source "drivers/ntb/test/Kconfig"
+
+config NTB_TRANSPORT
+	tristate "NTB Transport Client"
+	help
+	 This is a transport driver that enables connected systems to exchange
+	 messages over the ntb hardware.  The transport exposes a queue pair api
+	 to client drivers.
+
+	 If unsure, say N.
+
+endif # NTB
diff --git a/src/kernel/linux/v4.14/drivers/ntb/Makefile b/src/kernel/linux/v4.14/drivers/ntb/Makefile
new file mode 100644
index 0000000..1921dec
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/ntb/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_NTB) += ntb.o hw/ test/
+obj-$(CONFIG_NTB_TRANSPORT) += ntb_transport.o
diff --git a/src/kernel/linux/v4.14/drivers/ntb/hw/Kconfig b/src/kernel/linux/v4.14/drivers/ntb/hw/Kconfig
new file mode 100644
index 0000000..a89243c
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/ntb/hw/Kconfig
@@ -0,0 +1,3 @@
+source "drivers/ntb/hw/amd/Kconfig"
+source "drivers/ntb/hw/idt/Kconfig"
+source "drivers/ntb/hw/intel/Kconfig"
diff --git a/src/kernel/linux/v4.14/drivers/ntb/hw/Makefile b/src/kernel/linux/v4.14/drivers/ntb/hw/Makefile
new file mode 100644
index 0000000..87332c3
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/ntb/hw/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_NTB_AMD)	+= amd/
+obj-$(CONFIG_NTB_IDT)	+= idt/
+obj-$(CONFIG_NTB_INTEL)	+= intel/
diff --git a/src/kernel/linux/v4.14/drivers/ntb/hw/amd/Kconfig b/src/kernel/linux/v4.14/drivers/ntb/hw/amd/Kconfig
new file mode 100644
index 0000000..cfe903c
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/ntb/hw/amd/Kconfig
@@ -0,0 +1,7 @@
+config NTB_AMD
+	tristate "AMD Non-Transparent Bridge support"
+	depends on X86_64
+	help
+	 This driver supports AMD NTB on capable Zeppelin hardware.
+
+	 If unsure, say N.
diff --git a/src/kernel/linux/v4.14/drivers/ntb/hw/amd/Makefile b/src/kernel/linux/v4.14/drivers/ntb/hw/amd/Makefile
new file mode 100644
index 0000000..ad54da9
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/ntb/hw/amd/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_NTB_AMD) += ntb_hw_amd.o
diff --git a/src/kernel/linux/v4.14/drivers/ntb/hw/amd/ntb_hw_amd.c b/src/kernel/linux/v4.14/drivers/ntb/hw/amd/ntb_hw_amd.c
new file mode 100644
index 0000000..f0788aa
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/ntb/hw/amd/ntb_hw_amd.c
@@ -0,0 +1,1151 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ *   redistributing this file, you may do so under either license.
+ *
+ *   GPL LICENSE SUMMARY
+ *
+ *   Copyright (C) 2016 Advanced Micro Devices, Inc. All Rights Reserved.
+ *   Copyright (C) 2016 T-Platforms. All Rights Reserved.
+ *
+ *   This program is free software; you can redistribute it and/or modify
+ *   it under the terms of version 2 of the GNU General Public License as
+ *   published by the Free Software Foundation.
+ *
+ *   BSD LICENSE
+ *
+ *   Copyright (C) 2016 Advanced Micro Devices, Inc. All Rights Reserved.
+ *   Copyright (C) 2016 T-Platforms. All Rights Reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copy
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of AMD Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * AMD PCIe NTB Linux driver
+ *
+ * Contact Information:
+ * Xiangliang Yu <Xiangliang.Yu@amd.com>
+ */
+
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/acpi.h>
+#include <linux/pci.h>
+#include <linux/random.h>
+#include <linux/slab.h>
+#include <linux/ntb.h>
+
+#include "ntb_hw_amd.h"
+
+#define NTB_NAME	"ntb_hw_amd"
+#define NTB_DESC	"AMD(R) PCI-E Non-Transparent Bridge Driver"
+#define NTB_VER		"1.0"
+
+MODULE_DESCRIPTION(NTB_DESC);
+MODULE_VERSION(NTB_VER);
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("AMD Inc.");
+
+static const struct file_operations amd_ntb_debugfs_info;
+static struct dentry *debugfs_dir;
+
+static int ndev_mw_to_bar(struct amd_ntb_dev *ndev, int idx)
+{
+	if (idx < 0 || idx > ndev->mw_count)
+		return -EINVAL;
+
+	return 1 << idx;
+}
+
+static int amd_ntb_mw_count(struct ntb_dev *ntb, int pidx)
+{
+	if (pidx != NTB_DEF_PEER_IDX)
+		return -EINVAL;
+
+	return ntb_ndev(ntb)->mw_count;
+}
+
+static int amd_ntb_mw_get_align(struct ntb_dev *ntb, int pidx, int idx,
+				resource_size_t *addr_align,
+				resource_size_t *size_align,
+				resource_size_t *size_max)
+{
+	struct amd_ntb_dev *ndev = ntb_ndev(ntb);
+	int bar;
+
+	if (pidx != NTB_DEF_PEER_IDX)
+		return -EINVAL;
+
+	bar = ndev_mw_to_bar(ndev, idx);
+	if (bar < 0)
+		return bar;
+
+	if (addr_align)
+		*addr_align = SZ_4K;
+
+	if (size_align)
+		*size_align = 1;
+
+	if (size_max)
+		*size_max = pci_resource_len(ndev->ntb.pdev, bar);
+
+	return 0;
+}
+
+static int amd_ntb_mw_set_trans(struct ntb_dev *ntb, int pidx, int idx,
+				dma_addr_t addr, resource_size_t size)
+{
+	struct amd_ntb_dev *ndev = ntb_ndev(ntb);
+	unsigned long xlat_reg, limit_reg = 0;
+	resource_size_t mw_size;
+	void __iomem *mmio, *peer_mmio;
+	u64 base_addr, limit, reg_val;
+	int bar;
+
+	if (pidx != NTB_DEF_PEER_IDX)
+		return -EINVAL;
+
+	bar = ndev_mw_to_bar(ndev, idx);
+	if (bar < 0)
+		return bar;
+
+	mw_size = pci_resource_len(ntb->pdev, bar);
+
+	/* make sure the range fits in the usable mw size */
+	if (size > mw_size)
+		return -EINVAL;
+
+	mmio = ndev->self_mmio;
+	peer_mmio = ndev->peer_mmio;
+
+	base_addr = pci_resource_start(ntb->pdev, bar);
+
+	if (bar != 1) {
+		xlat_reg = AMD_BAR23XLAT_OFFSET + ((bar - 2) << 2);
+		limit_reg = AMD_BAR23LMT_OFFSET + ((bar - 2) << 2);
+
+		/* Set the limit if supported */
+		limit = size;
+
+		/* set and verify setting the translation address */
+		write64(addr, peer_mmio + xlat_reg);
+		reg_val = read64(peer_mmio + xlat_reg);
+		if (reg_val != addr) {
+			write64(0, peer_mmio + xlat_reg);
+			return -EIO;
+		}
+
+		/* set and verify setting the limit */
+		write64(limit, mmio + limit_reg);
+		reg_val = read64(mmio + limit_reg);
+		if (reg_val != limit) {
+			write64(base_addr, mmio + limit_reg);
+			write64(0, peer_mmio + xlat_reg);
+			return -EIO;
+		}
+	} else {
+		xlat_reg = AMD_BAR1XLAT_OFFSET;
+		limit_reg = AMD_BAR1LMT_OFFSET;
+
+		/* Set the limit if supported */
+		limit = size;
+
+		/* set and verify setting the translation address */
+		write64(addr, peer_mmio + xlat_reg);
+		reg_val = read64(peer_mmio + xlat_reg);
+		if (reg_val != addr) {
+			write64(0, peer_mmio + xlat_reg);
+			return -EIO;
+		}
+
+		/* set and verify setting the limit */
+		writel(limit, mmio + limit_reg);
+		reg_val = readl(mmio + limit_reg);
+		if (reg_val != limit) {
+			writel(base_addr, mmio + limit_reg);
+			writel(0, peer_mmio + xlat_reg);
+			return -EIO;
+		}
+	}
+
+	return 0;
+}
+
+static int amd_link_is_up(struct amd_ntb_dev *ndev)
+{
+	if (!ndev->peer_sta)
+		return NTB_LNK_STA_ACTIVE(ndev->cntl_sta);
+
+	if (ndev->peer_sta & AMD_LINK_UP_EVENT) {
+		ndev->peer_sta = 0;
+		return 1;
+	}
+
+	/* If peer_sta is reset or D0 event, the ISR has
+	 * started a timer to check link status of hardware.
+	 * So here just clear status bit. And if peer_sta is
+	 * D3 or PME_TO, D0/reset event will be happened when
+	 * system wakeup/poweron, so do nothing here.
+	 */
+	if (ndev->peer_sta & AMD_PEER_RESET_EVENT)
+		ndev->peer_sta &= ~AMD_PEER_RESET_EVENT;
+	else if (ndev->peer_sta & (AMD_PEER_D0_EVENT | AMD_LINK_DOWN_EVENT))
+		ndev->peer_sta = 0;
+
+	return 0;
+}
+
+static u64 amd_ntb_link_is_up(struct ntb_dev *ntb,
+			      enum ntb_speed *speed,
+			      enum ntb_width *width)
+{
+	struct amd_ntb_dev *ndev = ntb_ndev(ntb);
+	int ret = 0;
+
+	if (amd_link_is_up(ndev)) {
+		if (speed)
+			*speed = NTB_LNK_STA_SPEED(ndev->lnk_sta);
+		if (width)
+			*width = NTB_LNK_STA_WIDTH(ndev->lnk_sta);
+
+		dev_dbg(&ntb->pdev->dev, "link is up.\n");
+
+		ret = 1;
+	} else {
+		if (speed)
+			*speed = NTB_SPEED_NONE;
+		if (width)
+			*width = NTB_WIDTH_NONE;
+
+		dev_dbg(&ntb->pdev->dev, "link is down.\n");
+	}
+
+	return ret;
+}
+
+static int amd_ntb_link_enable(struct ntb_dev *ntb,
+			       enum ntb_speed max_speed,
+			       enum ntb_width max_width)
+{
+	struct amd_ntb_dev *ndev = ntb_ndev(ntb);
+	void __iomem *mmio = ndev->self_mmio;
+	u32 ntb_ctl;
+
+	/* Enable event interrupt */
+	ndev->int_mask &= ~AMD_EVENT_INTMASK;
+	writel(ndev->int_mask, mmio + AMD_INTMASK_OFFSET);
+
+	if (ndev->ntb.topo == NTB_TOPO_SEC)
+		return -EINVAL;
+	dev_dbg(&ntb->pdev->dev, "Enabling Link.\n");
+
+	ntb_ctl = readl(mmio + AMD_CNTL_OFFSET);
+	ntb_ctl |= (PMM_REG_CTL | SMM_REG_CTL);
+	writel(ntb_ctl, mmio + AMD_CNTL_OFFSET);
+
+	return 0;
+}
+
+static int amd_ntb_link_disable(struct ntb_dev *ntb)
+{
+	struct amd_ntb_dev *ndev = ntb_ndev(ntb);
+	void __iomem *mmio = ndev->self_mmio;
+	u32 ntb_ctl;
+
+	/* Disable event interrupt */
+	ndev->int_mask |= AMD_EVENT_INTMASK;
+	writel(ndev->int_mask, mmio + AMD_INTMASK_OFFSET);
+
+	if (ndev->ntb.topo == NTB_TOPO_SEC)
+		return -EINVAL;
+	dev_dbg(&ntb->pdev->dev, "Enabling Link.\n");
+
+	ntb_ctl = readl(mmio + AMD_CNTL_OFFSET);
+	ntb_ctl &= ~(PMM_REG_CTL | SMM_REG_CTL);
+	writel(ntb_ctl, mmio + AMD_CNTL_OFFSET);
+
+	return 0;
+}
+
+static int amd_ntb_peer_mw_count(struct ntb_dev *ntb)
+{
+	/* The same as for inbound MWs */
+	return ntb_ndev(ntb)->mw_count;
+}
+
+static int amd_ntb_peer_mw_get_addr(struct ntb_dev *ntb, int idx,
+				    phys_addr_t *base, resource_size_t *size)
+{
+	struct amd_ntb_dev *ndev = ntb_ndev(ntb);
+	int bar;
+
+	bar = ndev_mw_to_bar(ndev, idx);
+	if (bar < 0)
+		return bar;
+
+	if (base)
+		*base = pci_resource_start(ndev->ntb.pdev, bar);
+
+	if (size)
+		*size = pci_resource_len(ndev->ntb.pdev, bar);
+
+	return 0;
+}
+
+static u64 amd_ntb_db_valid_mask(struct ntb_dev *ntb)
+{
+	return ntb_ndev(ntb)->db_valid_mask;
+}
+
+static int amd_ntb_db_vector_count(struct ntb_dev *ntb)
+{
+	return ntb_ndev(ntb)->db_count;
+}
+
+static u64 amd_ntb_db_vector_mask(struct ntb_dev *ntb, int db_vector)
+{
+	struct amd_ntb_dev *ndev = ntb_ndev(ntb);
+
+	if (db_vector < 0 || db_vector > ndev->db_count)
+		return 0;
+
+	return ntb_ndev(ntb)->db_valid_mask & (1 << db_vector);
+}
+
+static u64 amd_ntb_db_read(struct ntb_dev *ntb)
+{
+	struct amd_ntb_dev *ndev = ntb_ndev(ntb);
+	void __iomem *mmio = ndev->self_mmio;
+
+	return (u64)readw(mmio + AMD_DBSTAT_OFFSET);
+}
+
+static int amd_ntb_db_clear(struct ntb_dev *ntb, u64 db_bits)
+{
+	struct amd_ntb_dev *ndev = ntb_ndev(ntb);
+	void __iomem *mmio = ndev->self_mmio;
+
+	writew((u16)db_bits, mmio + AMD_DBSTAT_OFFSET);
+
+	return 0;
+}
+
+static int amd_ntb_db_set_mask(struct ntb_dev *ntb, u64 db_bits)
+{
+	struct amd_ntb_dev *ndev = ntb_ndev(ntb);
+	void __iomem *mmio = ndev->self_mmio;
+	unsigned long flags;
+
+	if (db_bits & ~ndev->db_valid_mask)
+		return -EINVAL;
+
+	spin_lock_irqsave(&ndev->db_mask_lock, flags);
+	ndev->db_mask |= db_bits;
+	writew((u16)ndev->db_mask, mmio + AMD_DBMASK_OFFSET);
+	spin_unlock_irqrestore(&ndev->db_mask_lock, flags);
+
+	return 0;
+}
+
+static int amd_ntb_db_clear_mask(struct ntb_dev *ntb, u64 db_bits)
+{
+	struct amd_ntb_dev *ndev = ntb_ndev(ntb);
+	void __iomem *mmio = ndev->self_mmio;
+	unsigned long flags;
+
+	if (db_bits & ~ndev->db_valid_mask)
+		return -EINVAL;
+
+	spin_lock_irqsave(&ndev->db_mask_lock, flags);
+	ndev->db_mask &= ~db_bits;
+	writew((u16)ndev->db_mask, mmio + AMD_DBMASK_OFFSET);
+	spin_unlock_irqrestore(&ndev->db_mask_lock, flags);
+
+	return 0;
+}
+
+static int amd_ntb_peer_db_set(struct ntb_dev *ntb, u64 db_bits)
+{
+	struct amd_ntb_dev *ndev = ntb_ndev(ntb);
+	void __iomem *mmio = ndev->self_mmio;
+
+	writew((u16)db_bits, mmio + AMD_DBREQ_OFFSET);
+
+	return 0;
+}
+
+static int amd_ntb_spad_count(struct ntb_dev *ntb)
+{
+	return ntb_ndev(ntb)->spad_count;
+}
+
+static u32 amd_ntb_spad_read(struct ntb_dev *ntb, int idx)
+{
+	struct amd_ntb_dev *ndev = ntb_ndev(ntb);
+	void __iomem *mmio = ndev->self_mmio;
+	u32 offset;
+
+	if (idx < 0 || idx >= ndev->spad_count)
+		return 0;
+
+	offset = ndev->self_spad + (idx << 2);
+	return readl(mmio + AMD_SPAD_OFFSET + offset);
+}
+
+static int amd_ntb_spad_write(struct ntb_dev *ntb,
+			      int idx, u32 val)
+{
+	struct amd_ntb_dev *ndev = ntb_ndev(ntb);
+	void __iomem *mmio = ndev->self_mmio;
+	u32 offset;
+
+	if (idx < 0 || idx >= ndev->spad_count)
+		return -EINVAL;
+
+	offset = ndev->self_spad + (idx << 2);
+	writel(val, mmio + AMD_SPAD_OFFSET + offset);
+
+	return 0;
+}
+
+static u32 amd_ntb_peer_spad_read(struct ntb_dev *ntb, int pidx, int sidx)
+{
+	struct amd_ntb_dev *ndev = ntb_ndev(ntb);
+	void __iomem *mmio = ndev->self_mmio;
+	u32 offset;
+
+	if (sidx < 0 || sidx >= ndev->spad_count)
+		return -EINVAL;
+
+	offset = ndev->peer_spad + (sidx << 2);
+	return readl(mmio + AMD_SPAD_OFFSET + offset);
+}
+
+static int amd_ntb_peer_spad_write(struct ntb_dev *ntb, int pidx,
+				   int sidx, u32 val)
+{
+	struct amd_ntb_dev *ndev = ntb_ndev(ntb);
+	void __iomem *mmio = ndev->self_mmio;
+	u32 offset;
+
+	if (sidx < 0 || sidx >= ndev->spad_count)
+		return -EINVAL;
+
+	offset = ndev->peer_spad + (sidx << 2);
+	writel(val, mmio + AMD_SPAD_OFFSET + offset);
+
+	return 0;
+}
+
+static const struct ntb_dev_ops amd_ntb_ops = {
+	.mw_count		= amd_ntb_mw_count,
+	.mw_get_align		= amd_ntb_mw_get_align,
+	.mw_set_trans		= amd_ntb_mw_set_trans,
+	.peer_mw_count		= amd_ntb_peer_mw_count,
+	.peer_mw_get_addr	= amd_ntb_peer_mw_get_addr,
+	.link_is_up		= amd_ntb_link_is_up,
+	.link_enable		= amd_ntb_link_enable,
+	.link_disable		= amd_ntb_link_disable,
+	.db_valid_mask		= amd_ntb_db_valid_mask,
+	.db_vector_count	= amd_ntb_db_vector_count,
+	.db_vector_mask		= amd_ntb_db_vector_mask,
+	.db_read		= amd_ntb_db_read,
+	.db_clear		= amd_ntb_db_clear,
+	.db_set_mask		= amd_ntb_db_set_mask,
+	.db_clear_mask		= amd_ntb_db_clear_mask,
+	.peer_db_set		= amd_ntb_peer_db_set,
+	.spad_count		= amd_ntb_spad_count,
+	.spad_read		= amd_ntb_spad_read,
+	.spad_write		= amd_ntb_spad_write,
+	.peer_spad_read		= amd_ntb_peer_spad_read,
+	.peer_spad_write	= amd_ntb_peer_spad_write,
+};
+
+static void amd_ack_smu(struct amd_ntb_dev *ndev, u32 bit)
+{
+	void __iomem *mmio = ndev->self_mmio;
+	int reg;
+
+	reg = readl(mmio + AMD_SMUACK_OFFSET);
+	reg |= bit;
+	writel(reg, mmio + AMD_SMUACK_OFFSET);
+
+	ndev->peer_sta |= bit;
+}
+
+static void amd_handle_event(struct amd_ntb_dev *ndev, int vec)
+{
+	void __iomem *mmio = ndev->self_mmio;
+	struct device *dev = &ndev->ntb.pdev->dev;
+	u32 status;
+
+	status = readl(mmio + AMD_INTSTAT_OFFSET);
+	if (!(status & AMD_EVENT_INTMASK))
+		return;
+
+	dev_dbg(dev, "status = 0x%x and vec = %d\n", status, vec);
+
+	status &= AMD_EVENT_INTMASK;
+	switch (status) {
+	case AMD_PEER_FLUSH_EVENT:
+		dev_info(dev, "Flush is done.\n");
+		break;
+	case AMD_PEER_RESET_EVENT:
+		amd_ack_smu(ndev, AMD_PEER_RESET_EVENT);
+
+		/* link down first */
+		ntb_link_event(&ndev->ntb);
+		/* polling peer status */
+		schedule_delayed_work(&ndev->hb_timer, AMD_LINK_HB_TIMEOUT);
+
+		break;
+	case AMD_PEER_D3_EVENT:
+	case AMD_PEER_PMETO_EVENT:
+	case AMD_LINK_UP_EVENT:
+	case AMD_LINK_DOWN_EVENT:
+		amd_ack_smu(ndev, status);
+
+		/* link down */
+		ntb_link_event(&ndev->ntb);
+
+		break;
+	case AMD_PEER_D0_EVENT:
+		mmio = ndev->peer_mmio;
+		status = readl(mmio + AMD_PMESTAT_OFFSET);
+		/* check if this is WAKEUP event */
+		if (status & 0x1)
+			dev_info(dev, "Wakeup is done.\n");
+
+		amd_ack_smu(ndev, AMD_PEER_D0_EVENT);
+
+		/* start a timer to poll link status */
+		schedule_delayed_work(&ndev->hb_timer,
+				      AMD_LINK_HB_TIMEOUT);
+		break;
+	default:
+		dev_info(dev, "event status = 0x%x.\n", status);
+		break;
+	}
+}
+
+static irqreturn_t ndev_interrupt(struct amd_ntb_dev *ndev, int vec)
+{
+	dev_dbg(&ndev->ntb.pdev->dev, "vec %d\n", vec);
+
+	if (vec > (AMD_DB_CNT - 1) || (ndev->msix_vec_count == 1))
+		amd_handle_event(ndev, vec);
+
+	if (vec < AMD_DB_CNT)
+		ntb_db_event(&ndev->ntb, vec);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t ndev_vec_isr(int irq, void *dev)
+{
+	struct amd_ntb_vec *nvec = dev;
+
+	return ndev_interrupt(nvec->ndev, nvec->num);
+}
+
+static irqreturn_t ndev_irq_isr(int irq, void *dev)
+{
+	struct amd_ntb_dev *ndev = dev;
+
+	return ndev_interrupt(ndev, irq - ndev->ntb.pdev->irq);
+}
+
+static int ndev_init_isr(struct amd_ntb_dev *ndev,
+			 int msix_min, int msix_max)
+{
+	struct pci_dev *pdev;
+	int rc, i, msix_count, node;
+
+	pdev = ndev->ntb.pdev;
+
+	node = dev_to_node(&pdev->dev);
+
+	ndev->db_mask = ndev->db_valid_mask;
+
+	/* Try to set up msix irq */
+	ndev->vec = kzalloc_node(msix_max * sizeof(*ndev->vec),
+				 GFP_KERNEL, node);
+	if (!ndev->vec)
+		goto err_msix_vec_alloc;
+
+	ndev->msix = kzalloc_node(msix_max * sizeof(*ndev->msix),
+				  GFP_KERNEL, node);
+	if (!ndev->msix)
+		goto err_msix_alloc;
+
+	for (i = 0; i < msix_max; ++i)
+		ndev->msix[i].entry = i;
+
+	msix_count = pci_enable_msix_range(pdev, ndev->msix,
+					   msix_min, msix_max);
+	if (msix_count < 0)
+		goto err_msix_enable;
+
+	/* NOTE: Disable MSIX if msix count is less than 16 because of
+	 * hardware limitation.
+	 */
+	if (msix_count < msix_min) {
+		pci_disable_msix(pdev);
+		goto err_msix_enable;
+	}
+
+	for (i = 0; i < msix_count; ++i) {
+		ndev->vec[i].ndev = ndev;
+		ndev->vec[i].num = i;
+		rc = request_irq(ndev->msix[i].vector, ndev_vec_isr, 0,
+				 "ndev_vec_isr", &ndev->vec[i]);
+		if (rc)
+			goto err_msix_request;
+	}
+
+	dev_dbg(&pdev->dev, "Using msix interrupts\n");
+	ndev->db_count = msix_min;
+	ndev->msix_vec_count = msix_max;
+	return 0;
+
+err_msix_request:
+	while (i-- > 0)
+		free_irq(ndev->msix[i].vector, &ndev->vec[i]);
+	pci_disable_msix(pdev);
+err_msix_enable:
+	kfree(ndev->msix);
+err_msix_alloc:
+	kfree(ndev->vec);
+err_msix_vec_alloc:
+	ndev->msix = NULL;
+	ndev->vec = NULL;
+
+	/* Try to set up msi irq */
+	rc = pci_enable_msi(pdev);
+	if (rc)
+		goto err_msi_enable;
+
+	rc = request_irq(pdev->irq, ndev_irq_isr, 0,
+			 "ndev_irq_isr", ndev);
+	if (rc)
+		goto err_msi_request;
+
+	dev_dbg(&pdev->dev, "Using msi interrupts\n");
+	ndev->db_count = 1;
+	ndev->msix_vec_count = 1;
+	return 0;
+
+err_msi_request:
+	pci_disable_msi(pdev);
+err_msi_enable:
+
+	/* Try to set up intx irq */
+	pci_intx(pdev, 1);
+
+	rc = request_irq(pdev->irq, ndev_irq_isr, IRQF_SHARED,
+			 "ndev_irq_isr", ndev);
+	if (rc)
+		goto err_intx_request;
+
+	dev_dbg(&pdev->dev, "Using intx interrupts\n");
+	ndev->db_count = 1;
+	ndev->msix_vec_count = 1;
+	return 0;
+
+err_intx_request:
+	return rc;
+}
+
+static void ndev_deinit_isr(struct amd_ntb_dev *ndev)
+{
+	struct pci_dev *pdev;
+	void __iomem *mmio = ndev->self_mmio;
+	int i;
+
+	pdev = ndev->ntb.pdev;
+
+	/* Mask all doorbell interrupts */
+	ndev->db_mask = ndev->db_valid_mask;
+	writel(ndev->db_mask, mmio + AMD_DBMASK_OFFSET);
+
+	if (ndev->msix) {
+		i = ndev->msix_vec_count;
+		while (i--)
+			free_irq(ndev->msix[i].vector, &ndev->vec[i]);
+		pci_disable_msix(pdev);
+		kfree(ndev->msix);
+		kfree(ndev->vec);
+	} else {
+		free_irq(pdev->irq, ndev);
+		if (pci_dev_msi_enabled(pdev))
+			pci_disable_msi(pdev);
+		else
+			pci_intx(pdev, 0);
+	}
+}
+
+static ssize_t ndev_debugfs_read(struct file *filp, char __user *ubuf,
+				 size_t count, loff_t *offp)
+{
+	struct amd_ntb_dev *ndev;
+	void __iomem *mmio;
+	char *buf;
+	size_t buf_size;
+	ssize_t ret, off;
+	union { u64 v64; u32 v32; u16 v16; } u;
+
+	ndev = filp->private_data;
+	mmio = ndev->self_mmio;
+
+	buf_size = min(count, 0x800ul);
+
+	buf = kmalloc(buf_size, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	off = 0;
+
+	off += scnprintf(buf + off, buf_size - off,
+			 "NTB Device Information:\n");
+
+	off += scnprintf(buf + off, buf_size - off,
+			 "Connection Topology -\t%s\n",
+			 ntb_topo_string(ndev->ntb.topo));
+
+	off += scnprintf(buf + off, buf_size - off,
+			 "LNK STA -\t\t%#06x\n", ndev->lnk_sta);
+
+	if (!amd_link_is_up(ndev)) {
+		off += scnprintf(buf + off, buf_size - off,
+				 "Link Status -\t\tDown\n");
+	} else {
+		off += scnprintf(buf + off, buf_size - off,
+				 "Link Status -\t\tUp\n");
+		off += scnprintf(buf + off, buf_size - off,
+				 "Link Speed -\t\tPCI-E Gen %u\n",
+				 NTB_LNK_STA_SPEED(ndev->lnk_sta));
+		off += scnprintf(buf + off, buf_size - off,
+				 "Link Width -\t\tx%u\n",
+				 NTB_LNK_STA_WIDTH(ndev->lnk_sta));
+	}
+
+	off += scnprintf(buf + off, buf_size - off,
+			 "Memory Window Count -\t%u\n", ndev->mw_count);
+	off += scnprintf(buf + off, buf_size - off,
+			 "Scratchpad Count -\t%u\n", ndev->spad_count);
+	off += scnprintf(buf + off, buf_size - off,
+			 "Doorbell Count -\t%u\n", ndev->db_count);
+	off += scnprintf(buf + off, buf_size - off,
+			 "MSIX Vector Count -\t%u\n", ndev->msix_vec_count);
+
+	off += scnprintf(buf + off, buf_size - off,
+			 "Doorbell Valid Mask -\t%#llx\n", ndev->db_valid_mask);
+
+	u.v32 = readl(ndev->self_mmio + AMD_DBMASK_OFFSET);
+	off += scnprintf(buf + off, buf_size - off,
+			 "Doorbell Mask -\t\t\t%#06x\n", u.v32);
+
+	u.v32 = readl(mmio + AMD_DBSTAT_OFFSET);
+	off += scnprintf(buf + off, buf_size - off,
+			 "Doorbell Bell -\t\t\t%#06x\n", u.v32);
+
+	off += scnprintf(buf + off, buf_size - off,
+			 "\nNTB Incoming XLAT:\n");
+
+	u.v64 = read64(mmio + AMD_BAR1XLAT_OFFSET);
+	off += scnprintf(buf + off, buf_size - off,
+			 "XLAT1 -\t\t%#018llx\n", u.v64);
+
+	u.v64 = read64(ndev->self_mmio + AMD_BAR23XLAT_OFFSET);
+	off += scnprintf(buf + off, buf_size - off,
+			 "XLAT23 -\t\t%#018llx\n", u.v64);
+
+	u.v64 = read64(ndev->self_mmio + AMD_BAR45XLAT_OFFSET);
+	off += scnprintf(buf + off, buf_size - off,
+			 "XLAT45 -\t\t%#018llx\n", u.v64);
+
+	u.v32 = readl(mmio + AMD_BAR1LMT_OFFSET);
+	off += scnprintf(buf + off, buf_size - off,
+			 "LMT1 -\t\t\t%#06x\n", u.v32);
+
+	u.v64 = read64(ndev->self_mmio + AMD_BAR23LMT_OFFSET);
+	off += scnprintf(buf + off, buf_size - off,
+			 "LMT23 -\t\t\t%#018llx\n", u.v64);
+
+	u.v64 = read64(ndev->self_mmio + AMD_BAR45LMT_OFFSET);
+	off += scnprintf(buf + off, buf_size - off,
+			 "LMT45 -\t\t\t%#018llx\n", u.v64);
+
+	ret = simple_read_from_buffer(ubuf, count, offp, buf, off);
+	kfree(buf);
+	return ret;
+}
+
+static void ndev_init_debugfs(struct amd_ntb_dev *ndev)
+{
+	if (!debugfs_dir) {
+		ndev->debugfs_dir = NULL;
+		ndev->debugfs_info = NULL;
+	} else {
+		ndev->debugfs_dir =
+			debugfs_create_dir(pci_name(ndev->ntb.pdev),
+					   debugfs_dir);
+		if (!ndev->debugfs_dir)
+			ndev->debugfs_info = NULL;
+		else
+			ndev->debugfs_info =
+				debugfs_create_file("info", S_IRUSR,
+						    ndev->debugfs_dir, ndev,
+						    &amd_ntb_debugfs_info);
+	}
+}
+
+static void ndev_deinit_debugfs(struct amd_ntb_dev *ndev)
+{
+	debugfs_remove_recursive(ndev->debugfs_dir);
+}
+
+static inline void ndev_init_struct(struct amd_ntb_dev *ndev,
+				    struct pci_dev *pdev)
+{
+	ndev->ntb.pdev = pdev;
+	ndev->ntb.topo = NTB_TOPO_NONE;
+	ndev->ntb.ops = &amd_ntb_ops;
+	ndev->int_mask = AMD_EVENT_INTMASK;
+	spin_lock_init(&ndev->db_mask_lock);
+}
+
+static int amd_poll_link(struct amd_ntb_dev *ndev)
+{
+	void __iomem *mmio = ndev->peer_mmio;
+	u32 reg, stat;
+	int rc;
+
+	reg = readl(mmio + AMD_SIDEINFO_OFFSET);
+	reg &= NTB_LIN_STA_ACTIVE_BIT;
+
+	dev_dbg(&ndev->ntb.pdev->dev, "%s: reg_val = 0x%x.\n", __func__, reg);
+
+	if (reg == ndev->cntl_sta)
+		return 0;
+
+	ndev->cntl_sta = reg;
+
+	rc = pci_read_config_dword(ndev->ntb.pdev,
+				   AMD_LINK_STATUS_OFFSET, &stat);
+	if (rc)
+		return 0;
+	ndev->lnk_sta = stat;
+
+	return 1;
+}
+
+static void amd_link_hb(struct work_struct *work)
+{
+	struct amd_ntb_dev *ndev = hb_ndev(work);
+
+	if (amd_poll_link(ndev))
+		ntb_link_event(&ndev->ntb);
+
+	if (!amd_link_is_up(ndev))
+		schedule_delayed_work(&ndev->hb_timer, AMD_LINK_HB_TIMEOUT);
+}
+
+static int amd_init_isr(struct amd_ntb_dev *ndev)
+{
+	return ndev_init_isr(ndev, AMD_DB_CNT, AMD_MSIX_VECTOR_CNT);
+}
+
+static void amd_init_side_info(struct amd_ntb_dev *ndev)
+{
+	void __iomem *mmio = ndev->self_mmio;
+	unsigned int reg;
+
+	reg = readl(mmio + AMD_SIDEINFO_OFFSET);
+	if (!(reg & AMD_SIDE_READY)) {
+		reg |= AMD_SIDE_READY;
+		writel(reg, mmio + AMD_SIDEINFO_OFFSET);
+	}
+}
+
+static void amd_deinit_side_info(struct amd_ntb_dev *ndev)
+{
+	void __iomem *mmio = ndev->self_mmio;
+	unsigned int reg;
+
+	reg = readl(mmio + AMD_SIDEINFO_OFFSET);
+	if (reg & AMD_SIDE_READY) {
+		reg &= ~AMD_SIDE_READY;
+		writel(reg, mmio + AMD_SIDEINFO_OFFSET);
+		readl(mmio + AMD_SIDEINFO_OFFSET);
+	}
+}
+
+static int amd_init_ntb(struct amd_ntb_dev *ndev)
+{
+	void __iomem *mmio = ndev->self_mmio;
+
+	ndev->mw_count = AMD_MW_CNT;
+	ndev->spad_count = AMD_SPADS_CNT;
+	ndev->db_count = AMD_DB_CNT;
+
+	switch (ndev->ntb.topo) {
+	case NTB_TOPO_PRI:
+	case NTB_TOPO_SEC:
+		ndev->spad_count >>= 1;
+		if (ndev->ntb.topo == NTB_TOPO_PRI) {
+			ndev->self_spad = 0;
+			ndev->peer_spad = 0x20;
+		} else {
+			ndev->self_spad = 0x20;
+			ndev->peer_spad = 0;
+		}
+
+		INIT_DELAYED_WORK(&ndev->hb_timer, amd_link_hb);
+		schedule_delayed_work(&ndev->hb_timer, AMD_LINK_HB_TIMEOUT);
+
+		break;
+	default:
+		dev_err(&ndev->ntb.pdev->dev,
+			"AMD NTB does not support B2B mode.\n");
+		return -EINVAL;
+	}
+
+	ndev->db_valid_mask = BIT_ULL(ndev->db_count) - 1;
+
+	/* Mask event interrupts */
+	writel(ndev->int_mask, mmio + AMD_INTMASK_OFFSET);
+
+	return 0;
+}
+
+static enum ntb_topo amd_get_topo(struct amd_ntb_dev *ndev)
+{
+	void __iomem *mmio = ndev->self_mmio;
+	u32 info;
+
+	info = readl(mmio + AMD_SIDEINFO_OFFSET);
+	if (info & AMD_SIDE_MASK)
+		return NTB_TOPO_SEC;
+	else
+		return NTB_TOPO_PRI;
+}
+
+static int amd_init_dev(struct amd_ntb_dev *ndev)
+{
+	struct pci_dev *pdev;
+	int rc = 0;
+
+	pdev = ndev->ntb.pdev;
+
+	ndev->ntb.topo = amd_get_topo(ndev);
+	dev_dbg(&pdev->dev, "AMD NTB topo is %s\n",
+		ntb_topo_string(ndev->ntb.topo));
+
+	rc = amd_init_ntb(ndev);
+	if (rc)
+		return rc;
+
+	rc = amd_init_isr(ndev);
+	if (rc) {
+		dev_err(&pdev->dev, "fail to init isr.\n");
+		return rc;
+	}
+
+	ndev->db_valid_mask = BIT_ULL(ndev->db_count) - 1;
+
+	return 0;
+}
+
+static void amd_deinit_dev(struct amd_ntb_dev *ndev)
+{
+	cancel_delayed_work_sync(&ndev->hb_timer);
+
+	ndev_deinit_isr(ndev);
+}
+
+static int amd_ntb_init_pci(struct amd_ntb_dev *ndev,
+			    struct pci_dev *pdev)
+{
+	int rc;
+
+	pci_set_drvdata(pdev, ndev);
+
+	rc = pci_enable_device(pdev);
+	if (rc)
+		goto err_pci_enable;
+
+	rc = pci_request_regions(pdev, NTB_NAME);
+	if (rc)
+		goto err_pci_regions;
+
+	pci_set_master(pdev);
+
+	rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+	if (rc) {
+		rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+		if (rc)
+			goto err_dma_mask;
+		dev_warn(&pdev->dev, "Cannot DMA highmem\n");
+	}
+
+	rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+	if (rc) {
+		rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+		if (rc)
+			goto err_dma_mask;
+		dev_warn(&pdev->dev, "Cannot DMA consistent highmem\n");
+	}
+
+	ndev->self_mmio = pci_iomap(pdev, 0, 0);
+	if (!ndev->self_mmio) {
+		rc = -EIO;
+		goto err_dma_mask;
+	}
+	ndev->peer_mmio = ndev->self_mmio + AMD_PEER_OFFSET;
+
+	return 0;
+
+err_dma_mask:
+	pci_clear_master(pdev);
+err_pci_regions:
+	pci_disable_device(pdev);
+err_pci_enable:
+	pci_set_drvdata(pdev, NULL);
+	return rc;
+}
+
+static void amd_ntb_deinit_pci(struct amd_ntb_dev *ndev)
+{
+	struct pci_dev *pdev = ndev->ntb.pdev;
+
+	pci_iounmap(pdev, ndev->self_mmio);
+
+	pci_clear_master(pdev);
+	pci_release_regions(pdev);
+	pci_disable_device(pdev);
+	pci_set_drvdata(pdev, NULL);
+}
+
+static int amd_ntb_pci_probe(struct pci_dev *pdev,
+			     const struct pci_device_id *id)
+{
+	struct amd_ntb_dev *ndev;
+	int rc, node;
+
+	node = dev_to_node(&pdev->dev);
+
+	ndev = kzalloc_node(sizeof(*ndev), GFP_KERNEL, node);
+	if (!ndev) {
+		rc = -ENOMEM;
+		goto err_ndev;
+	}
+
+	ndev_init_struct(ndev, pdev);
+
+	rc = amd_ntb_init_pci(ndev, pdev);
+	if (rc)
+		goto err_init_pci;
+
+	rc = amd_init_dev(ndev);
+	if (rc)
+		goto err_init_dev;
+
+	/* write side info */
+	amd_init_side_info(ndev);
+
+	amd_poll_link(ndev);
+
+	ndev_init_debugfs(ndev);
+
+	rc = ntb_register_device(&ndev->ntb);
+	if (rc)
+		goto err_register;
+
+	dev_info(&pdev->dev, "NTB device registered.\n");
+
+	return 0;
+
+err_register:
+	ndev_deinit_debugfs(ndev);
+	amd_deinit_dev(ndev);
+err_init_dev:
+	amd_ntb_deinit_pci(ndev);
+err_init_pci:
+	kfree(ndev);
+err_ndev:
+	return rc;
+}
+
+static void amd_ntb_pci_remove(struct pci_dev *pdev)
+{
+	struct amd_ntb_dev *ndev = pci_get_drvdata(pdev);
+
+	ntb_unregister_device(&ndev->ntb);
+	ndev_deinit_debugfs(ndev);
+	amd_deinit_side_info(ndev);
+	amd_deinit_dev(ndev);
+	amd_ntb_deinit_pci(ndev);
+	kfree(ndev);
+}
+
+static const struct file_operations amd_ntb_debugfs_info = {
+	.owner = THIS_MODULE,
+	.open = simple_open,
+	.read = ndev_debugfs_read,
+};
+
+static const struct pci_device_id amd_ntb_pci_tbl[] = {
+	{PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_NTB)},
+	{0}
+};
+MODULE_DEVICE_TABLE(pci, amd_ntb_pci_tbl);
+
+static struct pci_driver amd_ntb_pci_driver = {
+	.name		= KBUILD_MODNAME,
+	.id_table	= amd_ntb_pci_tbl,
+	.probe		= amd_ntb_pci_probe,
+	.remove		= amd_ntb_pci_remove,
+};
+
+static int __init amd_ntb_pci_driver_init(void)
+{
+	pr_info("%s %s\n", NTB_DESC, NTB_VER);
+
+	if (debugfs_initialized())
+		debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
+
+	return pci_register_driver(&amd_ntb_pci_driver);
+}
+module_init(amd_ntb_pci_driver_init);
+
+static void __exit amd_ntb_pci_driver_exit(void)
+{
+	pci_unregister_driver(&amd_ntb_pci_driver);
+	debugfs_remove_recursive(debugfs_dir);
+}
+module_exit(amd_ntb_pci_driver_exit);
diff --git a/src/kernel/linux/v4.14/drivers/ntb/hw/amd/ntb_hw_amd.h b/src/kernel/linux/v4.14/drivers/ntb/hw/amd/ntb_hw_amd.h
new file mode 100644
index 0000000..8f3617a
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/ntb/hw/amd/ntb_hw_amd.h
@@ -0,0 +1,217 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ *   redistributing this file, you may do so under either license.
+ *
+ *   GPL LICENSE SUMMARY
+ *
+ *   Copyright (C) 2016 Advanced Micro Devices, Inc. All Rights Reserved.
+ *
+ *   This program is free software; you can redistribute it and/or modify
+ *   it under the terms of version 2 of the GNU General Public License as
+ *   published by the Free Software Foundation.
+ *
+ *   BSD LICENSE
+ *
+ *   Copyright (C) 2016 Advanced Micro Devices, Inc. All Rights Reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copy
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of AMD Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * AMD PCIe NTB Linux driver
+ *
+ * Contact Information:
+ * Xiangliang Yu <Xiangliang.Yu@amd.com>
+ */
+
+#ifndef NTB_HW_AMD_H
+#define NTB_HW_AMD_H
+
+#include <linux/ntb.h>
+#include <linux/pci.h>
+
+#define PCI_DEVICE_ID_AMD_NTB	0x145B
+#define AMD_LINK_HB_TIMEOUT	msecs_to_jiffies(1000)
+#define AMD_LINK_STATUS_OFFSET	0x68
+#define NTB_LIN_STA_ACTIVE_BIT	0x00000002
+#define NTB_LNK_STA_SPEED_MASK	0x000F0000
+#define NTB_LNK_STA_WIDTH_MASK	0x03F00000
+#define NTB_LNK_STA_ACTIVE(x)	(!!((x) & NTB_LIN_STA_ACTIVE_BIT))
+#define NTB_LNK_STA_SPEED(x)	(((x) & NTB_LNK_STA_SPEED_MASK) >> 16)
+#define NTB_LNK_STA_WIDTH(x)	(((x) & NTB_LNK_STA_WIDTH_MASK) >> 20)
+
+#ifndef read64
+#ifdef readq
+#define read64 readq
+#else
+#define read64 _read64
+static inline u64 _read64(void __iomem *mmio)
+{
+	u64 low, high;
+
+	low = readl(mmio);
+	high = readl(mmio + sizeof(u32));
+	return low | (high << 32);
+}
+#endif
+#endif
+
+#ifndef write64
+#ifdef writeq
+#define write64 writeq
+#else
+#define write64 _write64
+static inline void _write64(u64 val, void __iomem *mmio)
+{
+	writel(val, mmio);
+	writel(val >> 32, mmio + sizeof(u32));
+}
+#endif
+#endif
+
+enum {
+	/* AMD NTB Capability */
+	AMD_MW_CNT		= 3,
+	AMD_DB_CNT		= 16,
+	AMD_MSIX_VECTOR_CNT	= 24,
+	AMD_SPADS_CNT		= 16,
+
+	/*  AMD NTB register offset */
+	AMD_CNTL_OFFSET		= 0x200,
+
+	/* NTB control register bits */
+	PMM_REG_CTL		= BIT(21),
+	SMM_REG_CTL		= BIT(20),
+	SMM_REG_ACC_PATH	= BIT(18),
+	PMM_REG_ACC_PATH	= BIT(17),
+	NTB_CLK_EN		= BIT(16),
+
+	AMD_STA_OFFSET		= 0x204,
+	AMD_PGSLV_OFFSET	= 0x208,
+	AMD_SPAD_MUX_OFFSET	= 0x20C,
+	AMD_SPAD_OFFSET		= 0x210,
+	AMD_RSMU_HCID		= 0x250,
+	AMD_RSMU_SIID		= 0x254,
+	AMD_PSION_OFFSET	= 0x300,
+	AMD_SSION_OFFSET	= 0x330,
+	AMD_MMINDEX_OFFSET	= 0x400,
+	AMD_MMDATA_OFFSET	= 0x404,
+	AMD_SIDEINFO_OFFSET	= 0x408,
+
+	AMD_SIDE_MASK		= BIT(0),
+	AMD_SIDE_READY		= BIT(1),
+
+	/* limit register */
+	AMD_ROMBARLMT_OFFSET	= 0x410,
+	AMD_BAR1LMT_OFFSET	= 0x414,
+	AMD_BAR23LMT_OFFSET	= 0x418,
+	AMD_BAR45LMT_OFFSET	= 0x420,
+	/* xlat address */
+	AMD_POMBARXLAT_OFFSET	= 0x428,
+	AMD_BAR1XLAT_OFFSET	= 0x430,
+	AMD_BAR23XLAT_OFFSET	= 0x438,
+	AMD_BAR45XLAT_OFFSET	= 0x440,
+	/* doorbell and interrupt */
+	AMD_DBFM_OFFSET		= 0x450,
+	AMD_DBREQ_OFFSET	= 0x454,
+	AMD_MIRRDBSTAT_OFFSET	= 0x458,
+	AMD_DBMASK_OFFSET	= 0x45C,
+	AMD_DBSTAT_OFFSET	= 0x460,
+	AMD_INTMASK_OFFSET	= 0x470,
+	AMD_INTSTAT_OFFSET	= 0x474,
+
+	/* event type */
+	AMD_PEER_FLUSH_EVENT	= BIT(0),
+	AMD_PEER_RESET_EVENT	= BIT(1),
+	AMD_PEER_D3_EVENT	= BIT(2),
+	AMD_PEER_PMETO_EVENT	= BIT(3),
+	AMD_PEER_D0_EVENT	= BIT(4),
+	AMD_LINK_UP_EVENT	= BIT(5),
+	AMD_LINK_DOWN_EVENT	= BIT(6),
+	AMD_EVENT_INTMASK	= (AMD_PEER_FLUSH_EVENT |
+				AMD_PEER_RESET_EVENT | AMD_PEER_D3_EVENT |
+				AMD_PEER_PMETO_EVENT | AMD_PEER_D0_EVENT |
+				AMD_LINK_UP_EVENT | AMD_LINK_DOWN_EVENT),
+
+	AMD_PMESTAT_OFFSET	= 0x480,
+	AMD_PMSGTRIG_OFFSET	= 0x490,
+	AMD_LTRLATENCY_OFFSET	= 0x494,
+	AMD_FLUSHTRIG_OFFSET	= 0x498,
+
+	/* SMU register*/
+	AMD_SMUACK_OFFSET	= 0x4A0,
+	AMD_SINRST_OFFSET	= 0x4A4,
+	AMD_RSPNUM_OFFSET	= 0x4A8,
+	AMD_SMU_SPADMUTEX	= 0x4B0,
+	AMD_SMU_SPADOFFSET	= 0x4B4,
+
+	AMD_PEER_OFFSET		= 0x400,
+};
+
+struct amd_ntb_dev;
+
+struct amd_ntb_vec {
+	struct amd_ntb_dev	*ndev;
+	int			num;
+};
+
+struct amd_ntb_dev {
+	struct ntb_dev ntb;
+
+	u32 ntb_side;
+	u32 lnk_sta;
+	u32 cntl_sta;
+	u32 peer_sta;
+
+	unsigned char mw_count;
+	unsigned char spad_count;
+	unsigned char db_count;
+	unsigned char msix_vec_count;
+
+	u64 db_valid_mask;
+	u64 db_mask;
+	u32 int_mask;
+
+	struct msix_entry *msix;
+	struct amd_ntb_vec *vec;
+
+	/* synchronize rmw access of db_mask and hw reg */
+	spinlock_t db_mask_lock;
+
+	void __iomem *self_mmio;
+	void __iomem *peer_mmio;
+	unsigned int self_spad;
+	unsigned int peer_spad;
+
+	struct delayed_work hb_timer;
+
+	struct dentry *debugfs_dir;
+	struct dentry *debugfs_info;
+};
+
+#define ntb_ndev(__ntb) container_of(__ntb, struct amd_ntb_dev, ntb)
+#define hb_ndev(__work) container_of(__work, struct amd_ntb_dev, hb_timer.work)
+
+#endif
diff --git a/src/kernel/linux/v4.14/drivers/ntb/hw/idt/Kconfig b/src/kernel/linux/v4.14/drivers/ntb/hw/idt/Kconfig
new file mode 100644
index 0000000..b360e56
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/ntb/hw/idt/Kconfig
@@ -0,0 +1,31 @@
+config NTB_IDT
+	tristate "IDT PCIe-switch Non-Transparent Bridge support"
+	depends on PCI
+	help
+	 This driver supports NTB of cappable IDT PCIe-switches.
+
+	 Some of the pre-initializations must be made before IDT PCIe-switch
+	 exposes it NT-functions correctly. It should be done by either proper
+	 initialisation of EEPROM connected to master smbus of the switch or
+	 by BIOS using slave-SMBus interface changing corresponding registers
+	 value. Evidently it must be done before PCI bus enumeration is
+	 finished in Linux kernel.
+
+	 First of all partitions must be activated and properly assigned to all
+	 the ports with NT-functions intended to be activated (see SWPARTxCTL
+	 and SWPORTxCTL registers). Then all NT-function BARs must be enabled
+	 with chosen valid aperture. For memory windows related BARs the
+	 aperture settings shall determine the maximum size of memory windows
+	 accepted by a BAR. Note that BAR0 must map PCI configuration space
+	 registers.
+
+	 It's worth to note, that since a part of this driver relies on the
+	 BAR settings of peer NT-functions, the BAR setups can't be done over
+	 kernel PCI fixups. That's why the alternative pre-initialization
+	 techniques like BIOS using SMBus interface or EEPROM should be
+	 utilized. Additionally if one needs to have temperature sensor
+	 information printed to system log, the corresponding registers must
+	 be initialized within BIOS/EEPROM as well.
+
+	 If unsure, say N.
+
diff --git a/src/kernel/linux/v4.14/drivers/ntb/hw/idt/Makefile b/src/kernel/linux/v4.14/drivers/ntb/hw/idt/Makefile
new file mode 100644
index 0000000..a102cf1
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/ntb/hw/idt/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_NTB_IDT) += ntb_hw_idt.o
diff --git a/src/kernel/linux/v4.14/drivers/ntb/hw/idt/ntb_hw_idt.c b/src/kernel/linux/v4.14/drivers/ntb/hw/idt/ntb_hw_idt.c
new file mode 100644
index 0000000..b68e2ca
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/ntb/hw/idt/ntb_hw_idt.c
@@ -0,0 +1,2712 @@
+/*
+ *   This file is provided under a GPLv2 license.  When using or
+ *   redistributing this file, you may do so under that license.
+ *
+ *   GPL LICENSE SUMMARY
+ *
+ *   Copyright (C) 2016 T-Platforms All Rights Reserved.
+ *
+ *   This program is free software; you can redistribute it and/or modify it
+ *   under the terms and conditions of the GNU General Public License,
+ *   version 2, as published by the Free Software Foundation.
+ *
+ *   This program is distributed in the hope that it will be useful, but
+ *   WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
+ *   Public License for more details.
+ *
+ *   You should have received a copy of the GNU General Public License along
+ *   with this program; if not, one can be found http://www.gnu.org/licenses/.
+ *
+ *   The full GNU General Public License is included in this distribution in
+ *   the file called "COPYING".
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * IDT PCIe-switch NTB Linux driver
+ *
+ * Contact Information:
+ * Serge Semin <fancer.lancer@gmail.com>, <Sergey.Semin@t-platforms.ru>
+ */
+
+#include <linux/stddef.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <linux/sizes.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/pci.h>
+#include <linux/aer.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/debugfs.h>
+#include <linux/ntb.h>
+
+#include "ntb_hw_idt.h"
+
+#define NTB_NAME	"ntb_hw_idt"
+#define NTB_DESC	"IDT PCI-E Non-Transparent Bridge Driver"
+#define NTB_VER		"2.0"
+#define NTB_IRQNAME	"ntb_irq_idt"
+
+MODULE_DESCRIPTION(NTB_DESC);
+MODULE_VERSION(NTB_VER);
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("T-platforms");
+
+/*
+ * NT Endpoint registers table simplifying a loop access to the functionally
+ * related registers
+ */
+static const struct idt_ntb_regs ntdata_tbl = {
+	{ {IDT_NT_BARSETUP0,	IDT_NT_BARLIMIT0,
+	   IDT_NT_BARLTBASE0,	IDT_NT_BARUTBASE0},
+	  {IDT_NT_BARSETUP1,	IDT_NT_BARLIMIT1,
+	   IDT_NT_BARLTBASE1,	IDT_NT_BARUTBASE1},
+	  {IDT_NT_BARSETUP2,	IDT_NT_BARLIMIT2,
+	   IDT_NT_BARLTBASE2,	IDT_NT_BARUTBASE2},
+	  {IDT_NT_BARSETUP3,	IDT_NT_BARLIMIT3,
+	   IDT_NT_BARLTBASE3,	IDT_NT_BARUTBASE3},
+	  {IDT_NT_BARSETUP4,	IDT_NT_BARLIMIT4,
+	   IDT_NT_BARLTBASE4,	IDT_NT_BARUTBASE4},
+	  {IDT_NT_BARSETUP5,	IDT_NT_BARLIMIT5,
+	   IDT_NT_BARLTBASE5,	IDT_NT_BARUTBASE5} },
+	{ {IDT_NT_INMSG0,	IDT_NT_OUTMSG0,	IDT_NT_INMSGSRC0},
+	  {IDT_NT_INMSG1,	IDT_NT_OUTMSG1,	IDT_NT_INMSGSRC1},
+	  {IDT_NT_INMSG2,	IDT_NT_OUTMSG2,	IDT_NT_INMSGSRC2},
+	  {IDT_NT_INMSG3,	IDT_NT_OUTMSG3,	IDT_NT_INMSGSRC3} }
+};
+
+/*
+ * NT Endpoint ports data table with the corresponding pcie command, link
+ * status, control and BAR-related registers
+ */
+static const struct idt_ntb_port portdata_tbl[IDT_MAX_NR_PORTS] = {
+/*0*/	{ IDT_SW_NTP0_PCIECMDSTS,	IDT_SW_NTP0_PCIELCTLSTS,
+	  IDT_SW_NTP0_NTCTL,
+	  IDT_SW_SWPORT0CTL,		IDT_SW_SWPORT0STS,
+	  { {IDT_SW_NTP0_BARSETUP0,	IDT_SW_NTP0_BARLIMIT0,
+	     IDT_SW_NTP0_BARLTBASE0,	IDT_SW_NTP0_BARUTBASE0},
+	    {IDT_SW_NTP0_BARSETUP1,	IDT_SW_NTP0_BARLIMIT1,
+	     IDT_SW_NTP0_BARLTBASE1,	IDT_SW_NTP0_BARUTBASE1},
+	    {IDT_SW_NTP0_BARSETUP2,	IDT_SW_NTP0_BARLIMIT2,
+	     IDT_SW_NTP0_BARLTBASE2,	IDT_SW_NTP0_BARUTBASE2},
+	    {IDT_SW_NTP0_BARSETUP3,	IDT_SW_NTP0_BARLIMIT3,
+	     IDT_SW_NTP0_BARLTBASE3,	IDT_SW_NTP0_BARUTBASE3},
+	    {IDT_SW_NTP0_BARSETUP4,	IDT_SW_NTP0_BARLIMIT4,
+	     IDT_SW_NTP0_BARLTBASE4,	IDT_SW_NTP0_BARUTBASE4},
+	    {IDT_SW_NTP0_BARSETUP5,	IDT_SW_NTP0_BARLIMIT5,
+	     IDT_SW_NTP0_BARLTBASE5,	IDT_SW_NTP0_BARUTBASE5} } },
+/*1*/	{0},
+/*2*/	{ IDT_SW_NTP2_PCIECMDSTS,	IDT_SW_NTP2_PCIELCTLSTS,
+	  IDT_SW_NTP2_NTCTL,
+	  IDT_SW_SWPORT2CTL,		IDT_SW_SWPORT2STS,
+	  { {IDT_SW_NTP2_BARSETUP0,	IDT_SW_NTP2_BARLIMIT0,
+	     IDT_SW_NTP2_BARLTBASE0,	IDT_SW_NTP2_BARUTBASE0},
+	    {IDT_SW_NTP2_BARSETUP1,	IDT_SW_NTP2_BARLIMIT1,
+	     IDT_SW_NTP2_BARLTBASE1,	IDT_SW_NTP2_BARUTBASE1},
+	    {IDT_SW_NTP2_BARSETUP2,	IDT_SW_NTP2_BARLIMIT2,
+	     IDT_SW_NTP2_BARLTBASE2,	IDT_SW_NTP2_BARUTBASE2},
+	    {IDT_SW_NTP2_BARSETUP3,	IDT_SW_NTP2_BARLIMIT3,
+	     IDT_SW_NTP2_BARLTBASE3,	IDT_SW_NTP2_BARUTBASE3},
+	    {IDT_SW_NTP2_BARSETUP4,	IDT_SW_NTP2_BARLIMIT4,
+	     IDT_SW_NTP2_BARLTBASE4,	IDT_SW_NTP2_BARUTBASE4},
+	    {IDT_SW_NTP2_BARSETUP5,	IDT_SW_NTP2_BARLIMIT5,
+	     IDT_SW_NTP2_BARLTBASE5,	IDT_SW_NTP2_BARUTBASE5} } },
+/*3*/	{0},
+/*4*/	{ IDT_SW_NTP4_PCIECMDSTS,	IDT_SW_NTP4_PCIELCTLSTS,
+	  IDT_SW_NTP4_NTCTL,
+	  IDT_SW_SWPORT4CTL,		IDT_SW_SWPORT4STS,
+	  { {IDT_SW_NTP4_BARSETUP0,	IDT_SW_NTP4_BARLIMIT0,
+	     IDT_SW_NTP4_BARLTBASE0,	IDT_SW_NTP4_BARUTBASE0},
+	    {IDT_SW_NTP4_BARSETUP1,	IDT_SW_NTP4_BARLIMIT1,
+	     IDT_SW_NTP4_BARLTBASE1,	IDT_SW_NTP4_BARUTBASE1},
+	    {IDT_SW_NTP4_BARSETUP2,	IDT_SW_NTP4_BARLIMIT2,
+	     IDT_SW_NTP4_BARLTBASE2,	IDT_SW_NTP4_BARUTBASE2},
+	    {IDT_SW_NTP4_BARSETUP3,	IDT_SW_NTP4_BARLIMIT3,
+	     IDT_SW_NTP4_BARLTBASE3,	IDT_SW_NTP4_BARUTBASE3},
+	    {IDT_SW_NTP4_BARSETUP4,	IDT_SW_NTP4_BARLIMIT4,
+	     IDT_SW_NTP4_BARLTBASE4,	IDT_SW_NTP4_BARUTBASE4},
+	    {IDT_SW_NTP4_BARSETUP5,	IDT_SW_NTP4_BARLIMIT5,
+	     IDT_SW_NTP4_BARLTBASE5,	IDT_SW_NTP4_BARUTBASE5} } },
+/*5*/	{0},
+/*6*/	{ IDT_SW_NTP6_PCIECMDSTS,	IDT_SW_NTP6_PCIELCTLSTS,
+	  IDT_SW_NTP6_NTCTL,
+	  IDT_SW_SWPORT6CTL,		IDT_SW_SWPORT6STS,
+	  { {IDT_SW_NTP6_BARSETUP0,	IDT_SW_NTP6_BARLIMIT0,
+	     IDT_SW_NTP6_BARLTBASE0,	IDT_SW_NTP6_BARUTBASE0},
+	    {IDT_SW_NTP6_BARSETUP1,	IDT_SW_NTP6_BARLIMIT1,
+	     IDT_SW_NTP6_BARLTBASE1,	IDT_SW_NTP6_BARUTBASE1},
+	    {IDT_SW_NTP6_BARSETUP2,	IDT_SW_NTP6_BARLIMIT2,
+	     IDT_SW_NTP6_BARLTBASE2,	IDT_SW_NTP6_BARUTBASE2},
+	    {IDT_SW_NTP6_BARSETUP3,	IDT_SW_NTP6_BARLIMIT3,
+	     IDT_SW_NTP6_BARLTBASE3,	IDT_SW_NTP6_BARUTBASE3},
+	    {IDT_SW_NTP6_BARSETUP4,	IDT_SW_NTP6_BARLIMIT4,
+	     IDT_SW_NTP6_BARLTBASE4,	IDT_SW_NTP6_BARUTBASE4},
+	    {IDT_SW_NTP6_BARSETUP5,	IDT_SW_NTP6_BARLIMIT5,
+	     IDT_SW_NTP6_BARLTBASE5,	IDT_SW_NTP6_BARUTBASE5} } },
+/*7*/	{0},
+/*8*/	{ IDT_SW_NTP8_PCIECMDSTS,	IDT_SW_NTP8_PCIELCTLSTS,
+	  IDT_SW_NTP8_NTCTL,
+	  IDT_SW_SWPORT8CTL,		IDT_SW_SWPORT8STS,
+	  { {IDT_SW_NTP8_BARSETUP0,	IDT_SW_NTP8_BARLIMIT0,
+	     IDT_SW_NTP8_BARLTBASE0,	IDT_SW_NTP8_BARUTBASE0},
+	    {IDT_SW_NTP8_BARSETUP1,	IDT_SW_NTP8_BARLIMIT1,
+	     IDT_SW_NTP8_BARLTBASE1,	IDT_SW_NTP8_BARUTBASE1},
+	    {IDT_SW_NTP8_BARSETUP2,	IDT_SW_NTP8_BARLIMIT2,
+	     IDT_SW_NTP8_BARLTBASE2,	IDT_SW_NTP8_BARUTBASE2},
+	    {IDT_SW_NTP8_BARSETUP3,	IDT_SW_NTP8_BARLIMIT3,
+	     IDT_SW_NTP8_BARLTBASE3,	IDT_SW_NTP8_BARUTBASE3},
+	    {IDT_SW_NTP8_BARSETUP4,	IDT_SW_NTP8_BARLIMIT4,
+	     IDT_SW_NTP8_BARLTBASE4,	IDT_SW_NTP8_BARUTBASE4},
+	    {IDT_SW_NTP8_BARSETUP5,	IDT_SW_NTP8_BARLIMIT5,
+	     IDT_SW_NTP8_BARLTBASE5,	IDT_SW_NTP8_BARUTBASE5} } },
+/*9*/	{0},
+/*10*/	{0},
+/*11*/	{0},
+/*12*/	{ IDT_SW_NTP12_PCIECMDSTS,	IDT_SW_NTP12_PCIELCTLSTS,
+	  IDT_SW_NTP12_NTCTL,
+	  IDT_SW_SWPORT12CTL,		IDT_SW_SWPORT12STS,
+	  { {IDT_SW_NTP12_BARSETUP0,	IDT_SW_NTP12_BARLIMIT0,
+	     IDT_SW_NTP12_BARLTBASE0,	IDT_SW_NTP12_BARUTBASE0},
+	    {IDT_SW_NTP12_BARSETUP1,	IDT_SW_NTP12_BARLIMIT1,
+	     IDT_SW_NTP12_BARLTBASE1,	IDT_SW_NTP12_BARUTBASE1},
+	    {IDT_SW_NTP12_BARSETUP2,	IDT_SW_NTP12_BARLIMIT2,
+	     IDT_SW_NTP12_BARLTBASE2,	IDT_SW_NTP12_BARUTBASE2},
+	    {IDT_SW_NTP12_BARSETUP3,	IDT_SW_NTP12_BARLIMIT3,
+	     IDT_SW_NTP12_BARLTBASE3,	IDT_SW_NTP12_BARUTBASE3},
+	    {IDT_SW_NTP12_BARSETUP4,	IDT_SW_NTP12_BARLIMIT4,
+	     IDT_SW_NTP12_BARLTBASE4,	IDT_SW_NTP12_BARUTBASE4},
+	    {IDT_SW_NTP12_BARSETUP5,	IDT_SW_NTP12_BARLIMIT5,
+	     IDT_SW_NTP12_BARLTBASE5,	IDT_SW_NTP12_BARUTBASE5} } },
+/*13*/	{0},
+/*14*/	{0},
+/*15*/	{0},
+/*16*/	{ IDT_SW_NTP16_PCIECMDSTS,	IDT_SW_NTP16_PCIELCTLSTS,
+	  IDT_SW_NTP16_NTCTL,
+	  IDT_SW_SWPORT16CTL,		IDT_SW_SWPORT16STS,
+	  { {IDT_SW_NTP16_BARSETUP0,	IDT_SW_NTP16_BARLIMIT0,
+	     IDT_SW_NTP16_BARLTBASE0,	IDT_SW_NTP16_BARUTBASE0},
+	    {IDT_SW_NTP16_BARSETUP1,	IDT_SW_NTP16_BARLIMIT1,
+	     IDT_SW_NTP16_BARLTBASE1,	IDT_SW_NTP16_BARUTBASE1},
+	    {IDT_SW_NTP16_BARSETUP2,	IDT_SW_NTP16_BARLIMIT2,
+	     IDT_SW_NTP16_BARLTBASE2,	IDT_SW_NTP16_BARUTBASE2},
+	    {IDT_SW_NTP16_BARSETUP3,	IDT_SW_NTP16_BARLIMIT3,
+	     IDT_SW_NTP16_BARLTBASE3,	IDT_SW_NTP16_BARUTBASE3},
+	    {IDT_SW_NTP16_BARSETUP4,	IDT_SW_NTP16_BARLIMIT4,
+	     IDT_SW_NTP16_BARLTBASE4,	IDT_SW_NTP16_BARUTBASE4},
+	    {IDT_SW_NTP16_BARSETUP5,	IDT_SW_NTP16_BARLIMIT5,
+	     IDT_SW_NTP16_BARLTBASE5,	IDT_SW_NTP16_BARUTBASE5} } },
+/*17*/	{0},
+/*18*/	{0},
+/*19*/	{0},
+/*20*/	{ IDT_SW_NTP20_PCIECMDSTS,	IDT_SW_NTP20_PCIELCTLSTS,
+	  IDT_SW_NTP20_NTCTL,
+	  IDT_SW_SWPORT20CTL,		IDT_SW_SWPORT20STS,
+	  { {IDT_SW_NTP20_BARSETUP0,	IDT_SW_NTP20_BARLIMIT0,
+	     IDT_SW_NTP20_BARLTBASE0,	IDT_SW_NTP20_BARUTBASE0},
+	    {IDT_SW_NTP20_BARSETUP1,	IDT_SW_NTP20_BARLIMIT1,
+	     IDT_SW_NTP20_BARLTBASE1,	IDT_SW_NTP20_BARUTBASE1},
+	    {IDT_SW_NTP20_BARSETUP2,	IDT_SW_NTP20_BARLIMIT2,
+	     IDT_SW_NTP20_BARLTBASE2,	IDT_SW_NTP20_BARUTBASE2},
+	    {IDT_SW_NTP20_BARSETUP3,	IDT_SW_NTP20_BARLIMIT3,
+	     IDT_SW_NTP20_BARLTBASE3,	IDT_SW_NTP20_BARUTBASE3},
+	    {IDT_SW_NTP20_BARSETUP4,	IDT_SW_NTP20_BARLIMIT4,
+	     IDT_SW_NTP20_BARLTBASE4,	IDT_SW_NTP20_BARUTBASE4},
+	    {IDT_SW_NTP20_BARSETUP5,	IDT_SW_NTP20_BARLIMIT5,
+	     IDT_SW_NTP20_BARLTBASE5,	IDT_SW_NTP20_BARUTBASE5} } },
+/*21*/	{0},
+/*22*/	{0},
+/*23*/	{0}
+};
+
+/*
+ * IDT PCIe-switch partitions table with the corresponding control, status
+ * and messages control registers
+ */
+static const struct idt_ntb_part partdata_tbl[IDT_MAX_NR_PARTS] = {
+/*0*/	{ IDT_SW_SWPART0CTL,	IDT_SW_SWPART0STS,
+	  {IDT_SW_SWP0MSGCTL0,	IDT_SW_SWP0MSGCTL1,
+	   IDT_SW_SWP0MSGCTL2,	IDT_SW_SWP0MSGCTL3} },
+/*1*/	{ IDT_SW_SWPART1CTL,	IDT_SW_SWPART1STS,
+	  {IDT_SW_SWP1MSGCTL0,	IDT_SW_SWP1MSGCTL1,
+	   IDT_SW_SWP1MSGCTL2,	IDT_SW_SWP1MSGCTL3} },
+/*2*/	{ IDT_SW_SWPART2CTL,	IDT_SW_SWPART2STS,
+	  {IDT_SW_SWP2MSGCTL0,	IDT_SW_SWP2MSGCTL1,
+	   IDT_SW_SWP2MSGCTL2,	IDT_SW_SWP2MSGCTL3} },
+/*3*/	{ IDT_SW_SWPART3CTL,	IDT_SW_SWPART3STS,
+	  {IDT_SW_SWP3MSGCTL0,	IDT_SW_SWP3MSGCTL1,
+	   IDT_SW_SWP3MSGCTL2,	IDT_SW_SWP3MSGCTL3} },
+/*4*/	{ IDT_SW_SWPART4CTL,	IDT_SW_SWPART4STS,
+	  {IDT_SW_SWP4MSGCTL0,	IDT_SW_SWP4MSGCTL1,
+	   IDT_SW_SWP4MSGCTL2,	IDT_SW_SWP4MSGCTL3} },
+/*5*/	{ IDT_SW_SWPART5CTL,	IDT_SW_SWPART5STS,
+	  {IDT_SW_SWP5MSGCTL0,	IDT_SW_SWP5MSGCTL1,
+	   IDT_SW_SWP5MSGCTL2,	IDT_SW_SWP5MSGCTL3} },
+/*6*/	{ IDT_SW_SWPART6CTL,	IDT_SW_SWPART6STS,
+	  {IDT_SW_SWP6MSGCTL0,	IDT_SW_SWP6MSGCTL1,
+	   IDT_SW_SWP6MSGCTL2,	IDT_SW_SWP6MSGCTL3} },
+/*7*/	{ IDT_SW_SWPART7CTL,	IDT_SW_SWPART7STS,
+	  {IDT_SW_SWP7MSGCTL0,	IDT_SW_SWP7MSGCTL1,
+	   IDT_SW_SWP7MSGCTL2,	IDT_SW_SWP7MSGCTL3} }
+};
+
+/*
+ * DebugFS directory to place the driver debug file
+ */
+static struct dentry *dbgfs_topdir;
+
+/*=============================================================================
+ *                1. IDT PCIe-switch registers IO-functions
+ *
+ *    Beside ordinary configuration space registers IDT PCIe-switch expose
+ * global configuration registers, which are used to determine state of other
+ * device ports as well as being notified of some switch-related events.
+ * Additionally all the configuration space registers of all the IDT
+ * PCIe-switch functions are mapped to the Global Address space, so each
+ * function can determine a configuration of any other PCI-function.
+ *    Functions declared in this chapter are created to encapsulate access
+ * to configuration and global registers, so the driver code just need to
+ * provide IDT NTB hardware descriptor and a register address.
+ *=============================================================================
+ */
+
+/*
+ * idt_nt_write() - PCI configuration space registers write method
+ * @ndev:	IDT NTB hardware driver descriptor
+ * @reg:	Register to write data to
+ * @data:	Value to write to the register
+ *
+ * IDT PCIe-switch registers are all Little endian.
+ */
+static void idt_nt_write(struct idt_ntb_dev *ndev,
+			 const unsigned int reg, const u32 data)
+{
+	/*
+	 * It's obvious bug to request a register exceeding the maximum possible
+	 * value as well as to have it unaligned.
+	 */
+	if (WARN_ON(reg > IDT_REG_PCI_MAX || !IS_ALIGNED(reg, IDT_REG_ALIGN)))
+		return;
+
+	/* Just write the value to the specified register */
+	iowrite32(data, ndev->cfgspc + (ptrdiff_t)reg);
+}
+
+/*
+ * idt_nt_read() - PCI configuration space registers read method
+ * @ndev:	IDT NTB hardware driver descriptor
+ * @reg:	Register to write data to
+ *
+ * IDT PCIe-switch Global configuration registers are all Little endian.
+ *
+ * Return: register value
+ */
+static u32 idt_nt_read(struct idt_ntb_dev *ndev, const unsigned int reg)
+{
+	/*
+	 * It's obvious bug to request a register exceeding the maximum possible
+	 * value as well as to have it unaligned.
+	 */
+	if (WARN_ON(reg > IDT_REG_PCI_MAX || !IS_ALIGNED(reg, IDT_REG_ALIGN)))
+		return ~0;
+
+	/* Just read the value from the specified register */
+	return ioread32(ndev->cfgspc + (ptrdiff_t)reg);
+}
+
+/*
+ * idt_sw_write() - Global registers write method
+ * @ndev:	IDT NTB hardware driver descriptor
+ * @reg:	Register to write data to
+ * @data:	Value to write to the register
+ *
+ * IDT PCIe-switch Global configuration registers are all Little endian.
+ */
+static void idt_sw_write(struct idt_ntb_dev *ndev,
+			 const unsigned int reg, const u32 data)
+{
+	unsigned long irqflags;
+
+	/*
+	 * It's obvious bug to request a register exceeding the maximum possible
+	 * value as well as to have it unaligned.
+	 */
+	if (WARN_ON(reg > IDT_REG_SW_MAX || !IS_ALIGNED(reg, IDT_REG_ALIGN)))
+		return;
+
+	/* Lock GASA registers operations */
+	spin_lock_irqsave(&ndev->gasa_lock, irqflags);
+	/* Set the global register address */
+	iowrite32((u32)reg, ndev->cfgspc + (ptrdiff_t)IDT_NT_GASAADDR);
+	/* Put the new value of the register */
+	iowrite32(data, ndev->cfgspc + (ptrdiff_t)IDT_NT_GASADATA);
+	/* Make sure the PCIe transactions are executed */
+	mmiowb();
+	/* Unlock GASA registers operations */
+	spin_unlock_irqrestore(&ndev->gasa_lock, irqflags);
+}
+
+/*
+ * idt_sw_read() - Global registers read method
+ * @ndev:	IDT NTB hardware driver descriptor
+ * @reg:	Register to write data to
+ *
+ * IDT PCIe-switch Global configuration registers are all Little endian.
+ *
+ * Return: register value
+ */
+static u32 idt_sw_read(struct idt_ntb_dev *ndev, const unsigned int reg)
+{
+	unsigned long irqflags;
+	u32 data;
+
+	/*
+	 * It's obvious bug to request a register exceeding the maximum possible
+	 * value as well as to have it unaligned.
+	 */
+	if (WARN_ON(reg > IDT_REG_SW_MAX || !IS_ALIGNED(reg, IDT_REG_ALIGN)))
+		return ~0;
+
+	/* Lock GASA registers operations */
+	spin_lock_irqsave(&ndev->gasa_lock, irqflags);
+	/* Set the global register address */
+	iowrite32((u32)reg, ndev->cfgspc + (ptrdiff_t)IDT_NT_GASAADDR);
+	/* Get the data of the register (read ops acts as MMIO barrier) */
+	data = ioread32(ndev->cfgspc + (ptrdiff_t)IDT_NT_GASADATA);
+	/* Unlock GASA registers operations */
+	spin_unlock_irqrestore(&ndev->gasa_lock, irqflags);
+
+	return data;
+}
+
+/*
+ * idt_reg_set_bits() - set bits of a passed register
+ * @ndev:	IDT NTB hardware driver descriptor
+ * @reg:	Register to change bits of
+ * @reg_lock:	Register access spin lock
+ * @valid_mask:	Mask of valid bits
+ * @set_bits:	Bitmask to set
+ *
+ * Helper method to check whether a passed bitfield is valid and set
+ * corresponding bits of a register.
+ *
+ * WARNING! Make sure the passed register isn't accessed over plane
+ * idt_nt_write() method (read method is ok to be used concurrently).
+ *
+ * Return: zero on success, negative error on invalid bitmask.
+ */
+static inline int idt_reg_set_bits(struct idt_ntb_dev *ndev, unsigned int reg,
+				   spinlock_t *reg_lock,
+				   u64 valid_mask, u64 set_bits)
+{
+	unsigned long irqflags;
+	u32 data;
+
+	if (set_bits & ~(u64)valid_mask)
+		return -EINVAL;
+
+	/* Lock access to the register unless the change is written back */
+	spin_lock_irqsave(reg_lock, irqflags);
+	data = idt_nt_read(ndev, reg) | (u32)set_bits;
+	idt_nt_write(ndev, reg, data);
+	/* Unlock the register */
+	spin_unlock_irqrestore(reg_lock, irqflags);
+
+	return 0;
+}
+
+/*
+ * idt_reg_clear_bits() - clear bits of a passed register
+ * @ndev:	IDT NTB hardware driver descriptor
+ * @reg:	Register to change bits of
+ * @reg_lock:	Register access spin lock
+ * @set_bits:	Bitmask to clear
+ *
+ * Helper method to check whether a passed bitfield is valid and clear
+ * corresponding bits of a register.
+ *
+ * NOTE! Invalid bits are always considered cleared so it's not an error
+ * to clear them over.
+ *
+ * WARNING! Make sure the passed register isn't accessed over plane
+ * idt_nt_write() method (read method is ok to use concurrently).
+ */
+static inline void idt_reg_clear_bits(struct idt_ntb_dev *ndev,
+				     unsigned int reg, spinlock_t *reg_lock,
+				     u64 clear_bits)
+{
+	unsigned long irqflags;
+	u32 data;
+
+	/* Lock access to the register unless the change is written back */
+	spin_lock_irqsave(reg_lock, irqflags);
+	data = idt_nt_read(ndev, reg) & ~(u32)clear_bits;
+	idt_nt_write(ndev, reg, data);
+	/* Unlock the register */
+	spin_unlock_irqrestore(reg_lock, irqflags);
+}
+
+/*===========================================================================
+ *                           2. Ports operations
+ *
+ *    IDT PCIe-switches can have from 3 up to 8 ports with possible
+ * NT-functions enabled. So all the possible ports need to be scanned looking
+ * for NTB activated. NTB API will have enumerated only the ports with NTB.
+ *===========================================================================
+ */
+
+/*
+ * idt_scan_ports() - scan IDT PCIe-switch ports collecting info in the tables
+ * @ndev:	Pointer to the PCI device descriptor
+ *
+ * Return: zero on success, otherwise a negative error number.
+ */
+static int idt_scan_ports(struct idt_ntb_dev *ndev)
+{
+	unsigned char pidx, port, part;
+	u32 data, portsts, partsts;
+
+	/* Retrieve the local port number */
+	data = idt_nt_read(ndev, IDT_NT_PCIELCAP);
+	ndev->port = GET_FIELD(PCIELCAP_PORTNUM, data);
+
+	/* Retrieve the local partition number */
+	portsts = idt_sw_read(ndev, portdata_tbl[ndev->port].sts);
+	ndev->part = GET_FIELD(SWPORTxSTS_SWPART, portsts);
+
+	/* Initialize port/partition -> index tables with invalid values */
+	memset(ndev->port_idx_map, -EINVAL, sizeof(ndev->port_idx_map));
+	memset(ndev->part_idx_map, -EINVAL, sizeof(ndev->part_idx_map));
+
+	/*
+	 * Walk over all the possible ports checking whether any of them has
+	 * NT-function activated
+	 */
+	ndev->peer_cnt = 0;
+	for (pidx = 0; pidx < ndev->swcfg->port_cnt; pidx++) {
+		port = ndev->swcfg->ports[pidx];
+		/* Skip local port */
+		if (port == ndev->port)
+			continue;
+
+		/* Read the port status register to get it partition */
+		portsts = idt_sw_read(ndev, portdata_tbl[port].sts);
+		part = GET_FIELD(SWPORTxSTS_SWPART, portsts);
+
+		/* Retrieve the partition status */
+		partsts = idt_sw_read(ndev, partdata_tbl[part].sts);
+		/* Check if partition state is active and port has NTB */
+		if (IS_FLD_SET(SWPARTxSTS_STATE, partsts, ACT) &&
+		    (IS_FLD_SET(SWPORTxSTS_MODE, portsts, NT) ||
+		     IS_FLD_SET(SWPORTxSTS_MODE, portsts, USNT) ||
+		     IS_FLD_SET(SWPORTxSTS_MODE, portsts, USNTDMA) ||
+		     IS_FLD_SET(SWPORTxSTS_MODE, portsts, NTDMA))) {
+			/* Save the port and partition numbers */
+			ndev->peers[ndev->peer_cnt].port = port;
+			ndev->peers[ndev->peer_cnt].part = part;
+			/* Fill in the port/partition -> index tables */
+			ndev->port_idx_map[port] = ndev->peer_cnt;
+			ndev->part_idx_map[part] = ndev->peer_cnt;
+			ndev->peer_cnt++;
+		}
+	}
+
+	dev_dbg(&ndev->ntb.pdev->dev, "Local port: %hhu, num of peers: %hhu\n",
+		ndev->port, ndev->peer_cnt);
+
+	/* It's useless to have this driver loaded if there is no any peer */
+	if (ndev->peer_cnt == 0) {
+		dev_warn(&ndev->ntb.pdev->dev, "No active peer found\n");
+		return -ENODEV;
+	}
+
+	return 0;
+}
+
+/*
+ * idt_ntb_port_number() - get the local port number
+ * @ntb:	NTB device context.
+ *
+ * Return: the local port number
+ */
+static int idt_ntb_port_number(struct ntb_dev *ntb)
+{
+	struct idt_ntb_dev *ndev = to_ndev_ntb(ntb);
+
+	return ndev->port;
+}
+
+/*
+ * idt_ntb_peer_port_count() - get the number of peer ports
+ * @ntb:	NTB device context.
+ *
+ * Return the count of detected peer NT-functions.
+ *
+ * Return: number of peer ports
+ */
+static int idt_ntb_peer_port_count(struct ntb_dev *ntb)
+{
+	struct idt_ntb_dev *ndev = to_ndev_ntb(ntb);
+
+	return ndev->peer_cnt;
+}
+
+/*
+ * idt_ntb_peer_port_number() - get peer port by given index
+ * @ntb:	NTB device context.
+ * @pidx:	Peer port index.
+ *
+ * Return: peer port or negative error
+ */
+static int idt_ntb_peer_port_number(struct ntb_dev *ntb, int pidx)
+{
+	struct idt_ntb_dev *ndev = to_ndev_ntb(ntb);
+
+	if (pidx < 0 || ndev->peer_cnt <= pidx)
+		return -EINVAL;
+
+	/* Return the detected NT-function port number */
+	return ndev->peers[pidx].port;
+}
+
+/*
+ * idt_ntb_peer_port_idx() - get peer port index by given port number
+ * @ntb:	NTB device context.
+ * @port:	Peer port number.
+ *
+ * Internal port -> index table is pre-initialized with -EINVAL values,
+ * so we just need to return it value
+ *
+ * Return: peer NT-function port index or negative error
+ */
+static int idt_ntb_peer_port_idx(struct ntb_dev *ntb, int port)
+{
+	struct idt_ntb_dev *ndev = to_ndev_ntb(ntb);
+
+	if (port < 0 || IDT_MAX_NR_PORTS <= port)
+		return -EINVAL;
+
+	return ndev->port_idx_map[port];
+}
+
+/*===========================================================================
+ *                         3. Link status operations
+ *    There is no any ready-to-use method to have peer ports notified if NTB
+ * link is set up or got down. Instead global signal can be used instead.
+ * In case if any one of ports changes local NTB link state, it sends
+ * global signal and clears corresponding global state bit. Then all the ports
+ * receive a notification of that, so to make client driver being aware of
+ * possible NTB link change.
+ *    Additionally each of active NT-functions is subscribed to PCIe-link
+ * state changes of peer ports.
+ *===========================================================================
+ */
+
+static void idt_ntb_local_link_disable(struct idt_ntb_dev *ndev);
+
+/*
+ * idt_init_link() - Initialize NTB link state notification subsystem
+ * @ndev:	IDT NTB hardware driver descriptor
+ *
+ * Function performs the basic initialization of some global registers
+ * needed to enable IRQ-based notifications of PCIe Link Up/Down and
+ * Global Signal events.
+ * NOTE Since it's not possible to determine when all the NTB peer drivers are
+ * unloaded as well as have those registers accessed concurrently, we must
+ * preinitialize them with the same value and leave it uncleared on local
+ * driver unload.
+ */
+static void idt_init_link(struct idt_ntb_dev *ndev)
+{
+	u32 part_mask, port_mask, se_mask;
+	unsigned char pidx;
+
+	/* Initialize spin locker of Mapping Table access registers */
+	spin_lock_init(&ndev->mtbl_lock);
+
+	/* Walk over all detected peers collecting port and partition masks */
+	port_mask = ~BIT(ndev->port);
+	part_mask = ~BIT(ndev->part);
+	for (pidx = 0; pidx < ndev->peer_cnt; pidx++) {
+		port_mask &= ~BIT(ndev->peers[pidx].port);
+		part_mask &= ~BIT(ndev->peers[pidx].part);
+	}
+
+	/* Clean the Link Up/Down and GLobal Signal status registers */
+	idt_sw_write(ndev, IDT_SW_SELINKUPSTS, (u32)-1);
+	idt_sw_write(ndev, IDT_SW_SELINKDNSTS, (u32)-1);
+	idt_sw_write(ndev, IDT_SW_SEGSIGSTS, (u32)-1);
+
+	/* Unmask NT-activated partitions to receive Global Switch events */
+	idt_sw_write(ndev, IDT_SW_SEPMSK, part_mask);
+
+	/* Enable PCIe Link Up events of NT-activated ports */
+	idt_sw_write(ndev, IDT_SW_SELINKUPMSK, port_mask);
+
+	/* Enable PCIe Link Down events of NT-activated ports */
+	idt_sw_write(ndev, IDT_SW_SELINKDNMSK, port_mask);
+
+	/* Unmask NT-activated partitions to receive Global Signal events */
+	idt_sw_write(ndev, IDT_SW_SEGSIGMSK, part_mask);
+
+	/* Unmask Link Up/Down and Global Switch Events */
+	se_mask = ~(IDT_SEMSK_LINKUP | IDT_SEMSK_LINKDN | IDT_SEMSK_GSIGNAL);
+	idt_sw_write(ndev, IDT_SW_SEMSK, se_mask);
+
+	dev_dbg(&ndev->ntb.pdev->dev, "NTB link status events initialized");
+}
+
+/*
+ * idt_deinit_link() - deinitialize link subsystem
+ * @ndev:	IDT NTB hardware driver descriptor
+ *
+ * Just disable the link back.
+ */
+static void idt_deinit_link(struct idt_ntb_dev *ndev)
+{
+	/* Disable the link */
+	idt_ntb_local_link_disable(ndev);
+
+	dev_dbg(&ndev->ntb.pdev->dev, "NTB link status events deinitialized");
+}
+
+/*
+ * idt_se_isr() - switch events ISR
+ * @ndev:	IDT NTB hardware driver descriptor
+ * @ntint_sts:	NT-function interrupt status
+ *
+ * This driver doesn't support IDT PCIe-switch dynamic reconfigurations,
+ * Failover capability, etc, so switch events are utilized to notify of
+ * PCIe and NTB link events.
+ * The method is called from PCIe ISR bottom-half routine.
+ */
+static void idt_se_isr(struct idt_ntb_dev *ndev, u32 ntint_sts)
+{
+	u32 sests;
+
+	/* Read Switch Events status */
+	sests = idt_sw_read(ndev, IDT_SW_SESTS);
+
+	/* Clean the Link Up/Down and Global Signal status registers */
+	idt_sw_write(ndev, IDT_SW_SELINKUPSTS, (u32)-1);
+	idt_sw_write(ndev, IDT_SW_SELINKDNSTS, (u32)-1);
+	idt_sw_write(ndev, IDT_SW_SEGSIGSTS, (u32)-1);
+
+	/* Clean the corresponding interrupt bit */
+	idt_nt_write(ndev, IDT_NT_NTINTSTS, IDT_NTINTSTS_SEVENT);
+
+	dev_dbg(&ndev->ntb.pdev->dev, "SE IRQ detected %#08x (SESTS %#08x)",
+			  ntint_sts, sests);
+
+	/* Notify the client driver of possible link state change */
+	ntb_link_event(&ndev->ntb);
+}
+
+/*
+ * idt_ntb_local_link_enable() - enable the local NTB link.
+ * @ndev:	IDT NTB hardware driver descriptor
+ *
+ * In order to enable the NTB link we need:
+ * - enable Completion TLPs translation
+ * - initialize mapping table to enable the Request ID translation
+ * - notify peers of NTB link state change
+ */
+static void idt_ntb_local_link_enable(struct idt_ntb_dev *ndev)
+{
+	u32 reqid, mtbldata = 0;
+	unsigned long irqflags;
+
+	/* Enable the ID protection and Completion TLPs translation */
+	idt_nt_write(ndev, IDT_NT_NTCTL, IDT_NTCTL_CPEN);
+
+	/* Retrieve the current Requester ID (Bus:Device:Function) */
+	reqid = idt_nt_read(ndev, IDT_NT_REQIDCAP);
+
+	/*
+	 * Set the corresponding NT Mapping table entry of port partition index
+	 * with the data to perform the Request ID translation
+	 */
+	mtbldata = SET_FIELD(NTMTBLDATA_REQID, 0, reqid) |
+		   SET_FIELD(NTMTBLDATA_PART, 0, ndev->part) |
+		   IDT_NTMTBLDATA_VALID;
+	spin_lock_irqsave(&ndev->mtbl_lock, irqflags);
+	idt_nt_write(ndev, IDT_NT_NTMTBLADDR, ndev->part);
+	idt_nt_write(ndev, IDT_NT_NTMTBLDATA, mtbldata);
+	mmiowb();
+	spin_unlock_irqrestore(&ndev->mtbl_lock, irqflags);
+
+	/* Notify the peers by setting and clearing the global signal bit */
+	idt_nt_write(ndev, IDT_NT_NTGSIGNAL, IDT_NTGSIGNAL_SET);
+	idt_sw_write(ndev, IDT_SW_SEGSIGSTS, (u32)1 << ndev->part);
+}
+
+/*
+ * idt_ntb_local_link_disable() - disable the local NTB link.
+ * @ndev:	IDT NTB hardware driver descriptor
+ *
+ * In order to enable the NTB link we need:
+ * - disable Completion TLPs translation
+ * - clear corresponding mapping table entry
+ * - notify peers of NTB link state change
+ */
+static void idt_ntb_local_link_disable(struct idt_ntb_dev *ndev)
+{
+	unsigned long irqflags;
+
+	/* Disable Completion TLPs translation */
+	idt_nt_write(ndev, IDT_NT_NTCTL, 0);
+
+	/* Clear the corresponding NT Mapping table entry */
+	spin_lock_irqsave(&ndev->mtbl_lock, irqflags);
+	idt_nt_write(ndev, IDT_NT_NTMTBLADDR, ndev->part);
+	idt_nt_write(ndev, IDT_NT_NTMTBLDATA, 0);
+	mmiowb();
+	spin_unlock_irqrestore(&ndev->mtbl_lock, irqflags);
+
+	/* Notify the peers by setting and clearing the global signal bit */
+	idt_nt_write(ndev, IDT_NT_NTGSIGNAL, IDT_NTGSIGNAL_SET);
+	idt_sw_write(ndev, IDT_SW_SEGSIGSTS, (u32)1 << ndev->part);
+}
+
+/*
+ * idt_ntb_local_link_is_up() - test wethter local NTB link is up
+ * @ndev:	IDT NTB hardware driver descriptor
+ *
+ * Local link is up under the following conditions:
+ * - Bus mastering is enabled
+ * - NTCTL has Completion TLPs translation enabled
+ * - Mapping table permits Request TLPs translation
+ * NOTE: We don't need to check PCIe link state since it's obviously
+ * up while we are able to communicate with IDT PCIe-switch
+ *
+ * Return: true if link is up, otherwise false
+ */
+static bool idt_ntb_local_link_is_up(struct idt_ntb_dev *ndev)
+{
+	unsigned long irqflags;
+	u32 data;
+
+	/* Read the local Bus Master Enable status */
+	data = idt_nt_read(ndev, IDT_NT_PCICMDSTS);
+	if (!(data & IDT_PCICMDSTS_BME))
+		return false;
+
+	/* Read the local Completion TLPs translation enable status */
+	data = idt_nt_read(ndev, IDT_NT_NTCTL);
+	if (!(data & IDT_NTCTL_CPEN))
+		return false;
+
+	/* Read Mapping table entry corresponding to the local partition */
+	spin_lock_irqsave(&ndev->mtbl_lock, irqflags);
+	idt_nt_write(ndev, IDT_NT_NTMTBLADDR, ndev->part);
+	data = idt_nt_read(ndev, IDT_NT_NTMTBLDATA);
+	spin_unlock_irqrestore(&ndev->mtbl_lock, irqflags);
+
+	return !!(data & IDT_NTMTBLDATA_VALID);
+}
+
+/*
+ * idt_ntb_peer_link_is_up() - test whether peer NTB link is up
+ * @ndev:	IDT NTB hardware driver descriptor
+ * @pidx:	Peer port index
+ *
+ * Peer link is up under the following conditions:
+ * - PCIe link is up
+ * - Bus mastering is enabled
+ * - NTCTL has Completion TLPs translation enabled
+ * - Mapping table permits Request TLPs translation
+ *
+ * Return: true if link is up, otherwise false
+ */
+static bool idt_ntb_peer_link_is_up(struct idt_ntb_dev *ndev, int pidx)
+{
+	unsigned long irqflags;
+	unsigned char port;
+	u32 data;
+
+	/* Retrieve the device port number */
+	port = ndev->peers[pidx].port;
+
+	/* Check whether PCIe link is up */
+	data = idt_sw_read(ndev, portdata_tbl[port].sts);
+	if (!(data & IDT_SWPORTxSTS_LINKUP))
+		return false;
+
+	/* Check whether bus mastering is enabled on the peer port */
+	data = idt_sw_read(ndev, portdata_tbl[port].pcicmdsts);
+	if (!(data & IDT_PCICMDSTS_BME))
+		return false;
+
+	/* Check if Completion TLPs translation is enabled on the peer port */
+	data = idt_sw_read(ndev, portdata_tbl[port].ntctl);
+	if (!(data & IDT_NTCTL_CPEN))
+		return false;
+
+	/* Read Mapping table entry corresponding to the peer partition */
+	spin_lock_irqsave(&ndev->mtbl_lock, irqflags);
+	idt_nt_write(ndev, IDT_NT_NTMTBLADDR, ndev->peers[pidx].part);
+	data = idt_nt_read(ndev, IDT_NT_NTMTBLDATA);
+	spin_unlock_irqrestore(&ndev->mtbl_lock, irqflags);
+
+	return !!(data & IDT_NTMTBLDATA_VALID);
+}
+
+/*
+ * idt_ntb_link_is_up() - get the current ntb link state (NTB API callback)
+ * @ntb:	NTB device context.
+ * @speed:	OUT - The link speed expressed as PCIe generation number.
+ * @width:	OUT - The link width expressed as the number of PCIe lanes.
+ *
+ * Get the bitfield of NTB link states for all peer ports
+ *
+ * Return: bitfield of indexed ports link state: bit is set/cleared if the
+ *         link is up/down respectively.
+ */
+static u64 idt_ntb_link_is_up(struct ntb_dev *ntb,
+			      enum ntb_speed *speed, enum ntb_width *width)
+{
+	struct idt_ntb_dev *ndev = to_ndev_ntb(ntb);
+	unsigned char pidx;
+	u64 status;
+	u32 data;
+
+	/* Retrieve the local link speed and width */
+	if (speed != NULL || width != NULL) {
+		data = idt_nt_read(ndev, IDT_NT_PCIELCTLSTS);
+		if (speed != NULL)
+			*speed = GET_FIELD(PCIELCTLSTS_CLS, data);
+		if (width != NULL)
+			*width = GET_FIELD(PCIELCTLSTS_NLW, data);
+	}
+
+	/* If local NTB link isn't up then all the links are considered down */
+	if (!idt_ntb_local_link_is_up(ndev))
+		return 0;
+
+	/* Collect all the peer ports link states into the bitfield */
+	status = 0;
+	for (pidx = 0; pidx < ndev->peer_cnt; pidx++) {
+		if (idt_ntb_peer_link_is_up(ndev, pidx))
+			status |= ((u64)1 << pidx);
+	}
+
+	return status;
+}
+
+/*
+ * idt_ntb_link_enable() - enable local port ntb link (NTB API callback)
+ * @ntb:	NTB device context.
+ * @max_speed:	The maximum link speed expressed as PCIe generation number.
+ * @max_width:	The maximum link width expressed as the number of PCIe lanes.
+ *
+ * Enable just local NTB link. PCIe link parameters are ignored.
+ *
+ * Return: always zero.
+ */
+static int idt_ntb_link_enable(struct ntb_dev *ntb, enum ntb_speed speed,
+			       enum ntb_width width)
+{
+	struct idt_ntb_dev *ndev = to_ndev_ntb(ntb);
+
+	/* Just enable the local NTB link */
+	idt_ntb_local_link_enable(ndev);
+
+	dev_dbg(&ndev->ntb.pdev->dev, "Local NTB link enabled");
+
+	return 0;
+}
+
+/*
+ * idt_ntb_link_disable() - disable local port ntb link (NTB API callback)
+ * @ntb:	NTB device context.
+ *
+ * Disable just local NTB link.
+ *
+ * Return: always zero.
+ */
+static int idt_ntb_link_disable(struct ntb_dev *ntb)
+{
+	struct idt_ntb_dev *ndev = to_ndev_ntb(ntb);
+
+	/* Just disable the local NTB link */
+	idt_ntb_local_link_disable(ndev);
+
+	dev_dbg(&ndev->ntb.pdev->dev, "Local NTB link disabled");
+
+	return 0;
+}
+
+/*=============================================================================
+ *                         4. Memory Window operations
+ *
+ *    IDT PCIe-switches have two types of memory windows: MWs with direct
+ * address translation and MWs with LUT based translation. The first type of
+ * MWs is simple map of corresponding BAR address space to a memory space
+ * of specified target port. So it implemets just ont-to-one mapping. Lookup
+ * table in its turn can map one BAR address space to up to 24 different
+ * memory spaces of different ports.
+ *    NT-functions BARs can be turned on to implement either direct or lookup
+ * table based address translations, so:
+ * BAR0 - NT configuration registers space/direct address translation
+ * BAR1 - direct address translation/upper address of BAR0x64
+ * BAR2 - direct address translation/Lookup table with either 12 or 24 entries
+ * BAR3 - direct address translation/upper address of BAR2x64
+ * BAR4 - direct address translation/Lookup table with either 12 or 24 entries
+ * BAR5 - direct address translation/upper address of BAR4x64
+ *    Additionally BAR2 and BAR4 can't have 24-entries LUT enabled at the same
+ * time. Since the BARs setup can be rather complicated this driver implements
+ * a scanning algorithm to have all the possible memory windows configuration
+ * covered.
+ *
+ * NOTE 1 BAR setup must be done before Linux kernel enumerated NT-function
+ * of any port, so this driver would have memory windows configurations fixed.
+ * In this way all initializations must be performed either by platform BIOS
+ * or using EEPROM connected to IDT PCIe-switch master SMBus.
+ *
+ * NOTE 2 This driver expects BAR0 mapping NT-function configuration space.
+ * Easy calculation can give us an upper boundary of 29 possible memory windows
+ * per each NT-function if all the BARs are of 32bit type.
+ *=============================================================================
+ */
+
+/*
+ * idt_get_mw_count() - get memory window count
+ * @mw_type:	Memory window type
+ *
+ * Return: number of memory windows with respect to the BAR type
+ */
+static inline unsigned char idt_get_mw_count(enum idt_mw_type mw_type)
+{
+	switch (mw_type) {
+	case IDT_MW_DIR:
+		return 1;
+	case IDT_MW_LUT12:
+		return 12;
+	case IDT_MW_LUT24:
+		return 24;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+/*
+ * idt_get_mw_name() - get memory window name
+ * @mw_type:	Memory window type
+ *
+ * Return: pointer to a string with name
+ */
+static inline char *idt_get_mw_name(enum idt_mw_type mw_type)
+{
+	switch (mw_type) {
+	case IDT_MW_DIR:
+		return "DIR  ";
+	case IDT_MW_LUT12:
+		return "LUT12";
+	case IDT_MW_LUT24:
+		return "LUT24";
+	default:
+		break;
+	}
+
+	return "unknown";
+}
+
+/*
+ * idt_scan_mws() - scan memory windows of the port
+ * @ndev:	IDT NTB hardware driver descriptor
+ * @port:	Port to get number of memory windows for
+ * @mw_cnt:	Out - number of memory windows
+ *
+ * It walks over BAR setup registers of the specified port and determines
+ * the memory windows parameters if any activated.
+ *
+ * Return: array of memory windows
+ */
+static struct idt_mw_cfg *idt_scan_mws(struct idt_ntb_dev *ndev, int port,
+				       unsigned char *mw_cnt)
+{
+	struct idt_mw_cfg mws[IDT_MAX_NR_MWS], *ret_mws;
+	const struct idt_ntb_bar *bars;
+	enum idt_mw_type mw_type;
+	unsigned char widx, bidx, en_cnt;
+	bool bar_64bit = false;
+	int aprt_size;
+	u32 data;
+
+	/* Retrieve the array of the BARs registers */
+	bars = portdata_tbl[port].bars;
+
+	/* Scan all the BARs belonging to the port */
+	*mw_cnt = 0;
+	for (bidx = 0; bidx < IDT_BAR_CNT; bidx += 1 + bar_64bit) {
+		/* Read BARSETUP register value */
+		data = idt_sw_read(ndev, bars[bidx].setup);
+
+		/* Skip disabled BARs */
+		if (!(data & IDT_BARSETUP_EN)) {
+			bar_64bit = false;
+			continue;
+		}
+
+		/* Skip next BARSETUP if current one has 64bit addressing */
+		bar_64bit = IS_FLD_SET(BARSETUP_TYPE, data, 64);
+
+		/* Skip configuration space mapping BARs */
+		if (data & IDT_BARSETUP_MODE_CFG)
+			continue;
+
+		/* Retrieve MW type/entries count and aperture size */
+		mw_type = GET_FIELD(BARSETUP_ATRAN, data);
+		en_cnt = idt_get_mw_count(mw_type);
+		aprt_size = (u64)1 << GET_FIELD(BARSETUP_SIZE, data);
+
+		/* Save configurations of all available memory windows */
+		for (widx = 0; widx < en_cnt; widx++, (*mw_cnt)++) {
+			/*
+			 * IDT can expose a limited number of MWs, so it's bug
+			 * to have more than the driver expects
+			 */
+			if (*mw_cnt >= IDT_MAX_NR_MWS)
+				return ERR_PTR(-EINVAL);
+
+			/* Save basic MW info */
+			mws[*mw_cnt].type = mw_type;
+			mws[*mw_cnt].bar = bidx;
+			mws[*mw_cnt].idx = widx;
+			/* It's always DWORD aligned */
+			mws[*mw_cnt].addr_align = IDT_TRANS_ALIGN;
+			/* DIR and LUT approachs differently configure MWs */
+			if (mw_type == IDT_MW_DIR)
+				mws[*mw_cnt].size_max = aprt_size;
+			else if (mw_type == IDT_MW_LUT12)
+				mws[*mw_cnt].size_max = aprt_size / 16;
+			else
+				mws[*mw_cnt].size_max = aprt_size / 32;
+			mws[*mw_cnt].size_align = (mw_type == IDT_MW_DIR) ?
+				IDT_DIR_SIZE_ALIGN : mws[*mw_cnt].size_max;
+		}
+	}
+
+	/* Allocate memory for memory window descriptors */
+	ret_mws = devm_kcalloc(&ndev->ntb.pdev->dev, *mw_cnt, sizeof(*ret_mws),
+			       GFP_KERNEL);
+	if (!ret_mws)
+		return ERR_PTR(-ENOMEM);
+
+	/* Copy the info of detected memory windows */
+	memcpy(ret_mws, mws, (*mw_cnt)*sizeof(*ret_mws));
+
+	return ret_mws;
+}
+
+/*
+ * idt_init_mws() - initialize memory windows subsystem
+ * @ndev:	IDT NTB hardware driver descriptor
+ *
+ * Scan BAR setup registers of local and peer ports to determine the
+ * outbound and inbound memory windows parameters
+ *
+ * Return: zero on success, otherwise a negative error number
+ */
+static int idt_init_mws(struct idt_ntb_dev *ndev)
+{
+	struct idt_ntb_peer *peer;
+	unsigned char pidx;
+
+	/* Scan memory windows of the local port */
+	ndev->mws = idt_scan_mws(ndev, ndev->port, &ndev->mw_cnt);
+	if (IS_ERR(ndev->mws)) {
+		dev_err(&ndev->ntb.pdev->dev,
+			"Failed to scan mws of local port %hhu", ndev->port);
+		return PTR_ERR(ndev->mws);
+	}
+
+	/* Scan memory windows of the peer ports */
+	for (pidx = 0; pidx < ndev->peer_cnt; pidx++) {
+		peer = &ndev->peers[pidx];
+		peer->mws = idt_scan_mws(ndev, peer->port, &peer->mw_cnt);
+		if (IS_ERR(peer->mws)) {
+			dev_err(&ndev->ntb.pdev->dev,
+				"Failed to scan mws of port %hhu", peer->port);
+			return PTR_ERR(peer->mws);
+		}
+	}
+
+	/* Initialize spin locker of the LUT registers */
+	spin_lock_init(&ndev->lut_lock);
+
+	dev_dbg(&ndev->ntb.pdev->dev, "Outbound and inbound MWs initialized");
+
+	return 0;
+}
+
+/*
+ * idt_ntb_mw_count() - number of inbound memory windows (NTB API callback)
+ * @ntb:	NTB device context.
+ * @pidx:	Port index of peer device.
+ *
+ * The value is returned for the specified peer, so generally speaking it can
+ * be different for different port depending on the IDT PCIe-switch
+ * initialization.
+ *
+ * Return: the number of memory windows.
+ */
+static int idt_ntb_mw_count(struct ntb_dev *ntb, int pidx)
+{
+	struct idt_ntb_dev *ndev = to_ndev_ntb(ntb);
+
+	if (pidx < 0 || ndev->peer_cnt <= pidx)
+		return -EINVAL;
+
+	return ndev->peers[pidx].mw_cnt;
+}
+
+/*
+ * idt_ntb_mw_get_align() - inbound memory window parameters (NTB API callback)
+ * @ntb:	NTB device context.
+ * @pidx:	Port index of peer device.
+ * @widx:	Memory window index.
+ * @addr_align:	OUT - the base alignment for translating the memory window
+ * @size_align:	OUT - the size alignment for translating the memory window
+ * @size_max:	OUT - the maximum size of the memory window
+ *
+ * The peer memory window parameters have already been determined, so just
+ * return the corresponding values, which mustn't change within session.
+ *
+ * Return: Zero on success, otherwise a negative error number.
+ */
+static int idt_ntb_mw_get_align(struct ntb_dev *ntb, int pidx, int widx,
+				resource_size_t *addr_align,
+				resource_size_t *size_align,
+				resource_size_t *size_max)
+{
+	struct idt_ntb_dev *ndev = to_ndev_ntb(ntb);
+	struct idt_ntb_peer *peer;
+
+	if (pidx < 0 || ndev->peer_cnt <= pidx)
+		return -EINVAL;
+
+	peer = &ndev->peers[pidx];
+
+	if (widx < 0 || peer->mw_cnt <= widx)
+		return -EINVAL;
+
+	if (addr_align != NULL)
+		*addr_align = peer->mws[widx].addr_align;
+
+	if (size_align != NULL)
+		*size_align = peer->mws[widx].size_align;
+
+	if (size_max != NULL)
+		*size_max = peer->mws[widx].size_max;
+
+	return 0;
+}
+
+/*
+ * idt_ntb_peer_mw_count() - number of outbound memory windows
+ *			     (NTB API callback)
+ * @ntb:	NTB device context.
+ *
+ * Outbound memory windows parameters have been determined based on the
+ * BAR setup registers value, which are mostly constants within one session.
+ *
+ * Return: the number of memory windows.
+ */
+static int idt_ntb_peer_mw_count(struct ntb_dev *ntb)
+{
+	struct idt_ntb_dev *ndev = to_ndev_ntb(ntb);
+
+	return ndev->mw_cnt;
+}
+
+/*
+ * idt_ntb_peer_mw_get_addr() - get map address of an outbound memory window
+ *				(NTB API callback)
+ * @ntb:	NTB device context.
+ * @widx:	Memory window index (within ntb_peer_mw_count() return value).
+ * @base:	OUT - the base address of mapping region.
+ * @size:	OUT - the size of mapping region.
+ *
+ * Return just parameters of BAR resources mapping. Size reflects just the size
+ * of the resource
+ *
+ * Return: Zero on success, otherwise a negative error number.
+ */
+static int idt_ntb_peer_mw_get_addr(struct ntb_dev *ntb, int widx,
+				    phys_addr_t *base, resource_size_t *size)
+{
+	struct idt_ntb_dev *ndev = to_ndev_ntb(ntb);
+
+	if (widx < 0 || ndev->mw_cnt <= widx)
+		return -EINVAL;
+
+	/* Mapping address is just properly shifted BAR resource start */
+	if (base != NULL)
+		*base = pci_resource_start(ntb->pdev, ndev->mws[widx].bar) +
+			ndev->mws[widx].idx * ndev->mws[widx].size_max;
+
+	/* Mapping size has already been calculated at MWs scanning */
+	if (size != NULL)
+		*size = ndev->mws[widx].size_max;
+
+	return 0;
+}
+
+/*
+ * idt_ntb_peer_mw_set_trans() - set a translation address of a memory window
+ *				 (NTB API callback)
+ * @ntb:	NTB device context.
+ * @pidx:	Port index of peer device the translation address received from.
+ * @widx:	Memory window index.
+ * @addr:	The dma address of the shared memory to access.
+ * @size:	The size of the shared memory to access.
+ *
+ * The Direct address translation and LUT base translation is initialized a
+ * bit differenet. Although the parameters restriction are now determined by
+ * the same code.
+ *
+ * Return: Zero on success, otherwise an error number.
+ */
+static int idt_ntb_peer_mw_set_trans(struct ntb_dev *ntb, int pidx, int widx,
+				     u64 addr, resource_size_t size)
+{
+	struct idt_ntb_dev *ndev = to_ndev_ntb(ntb);
+	struct idt_mw_cfg *mw_cfg;
+	u32 data = 0, lutoff = 0;
+
+	if (pidx < 0 || ndev->peer_cnt <= pidx)
+		return -EINVAL;
+
+	if (widx < 0 || ndev->mw_cnt <= widx)
+		return -EINVAL;
+
+	/*
+	 * Retrieve the memory window config to make sure the passed arguments
+	 * fit it restrictions
+	 */
+	mw_cfg = &ndev->mws[widx];
+	if (!IS_ALIGNED(addr, mw_cfg->addr_align))
+		return -EINVAL;
+	if (!IS_ALIGNED(size, mw_cfg->size_align) || size > mw_cfg->size_max)
+		return -EINVAL;
+
+	/* DIR and LUT based translations are initialized differently */
+	if (mw_cfg->type == IDT_MW_DIR) {
+		const struct idt_ntb_bar *bar = &ntdata_tbl.bars[mw_cfg->bar];
+		u64 limit;
+		/* Set destination partition of translation */
+		data = idt_nt_read(ndev, bar->setup);
+		data = SET_FIELD(BARSETUP_TPART, data, ndev->peers[pidx].part);
+		idt_nt_write(ndev, bar->setup, data);
+		/* Set translation base address */
+		idt_nt_write(ndev, bar->ltbase, (u32)addr);
+		idt_nt_write(ndev, bar->utbase, (u32)(addr >> 32));
+		/* Set the custom BAR aperture limit */
+		limit = pci_resource_start(ntb->pdev, mw_cfg->bar) + size;
+		idt_nt_write(ndev, bar->limit, (u32)limit);
+		if (IS_FLD_SET(BARSETUP_TYPE, data, 64))
+			idt_nt_write(ndev, (bar + 1)->limit, (limit >> 32));
+	} else {
+		unsigned long irqflags;
+		/* Initialize corresponding LUT entry */
+		lutoff = SET_FIELD(LUTOFFSET_INDEX, 0, mw_cfg->idx) |
+			 SET_FIELD(LUTOFFSET_BAR, 0, mw_cfg->bar);
+		data = SET_FIELD(LUTUDATA_PART, 0, ndev->peers[pidx].part) |
+			IDT_LUTUDATA_VALID;
+		spin_lock_irqsave(&ndev->lut_lock, irqflags);
+		idt_nt_write(ndev, IDT_NT_LUTOFFSET, lutoff);
+		idt_nt_write(ndev, IDT_NT_LUTLDATA, (u32)addr);
+		idt_nt_write(ndev, IDT_NT_LUTMDATA, (u32)(addr >> 32));
+		idt_nt_write(ndev, IDT_NT_LUTUDATA, data);
+		mmiowb();
+		spin_unlock_irqrestore(&ndev->lut_lock, irqflags);
+		/* Limit address isn't specified since size is fixed for LUT */
+	}
+
+	return 0;
+}
+
+/*
+ * idt_ntb_peer_mw_clear_trans() - clear the outbound MW translation address
+ *				   (NTB API callback)
+ * @ntb:	NTB device context.
+ * @pidx:	Port index of peer device.
+ * @widx:	Memory window index.
+ *
+ * It effectively disables the translation over the specified outbound MW.
+ *
+ * Return: Zero on success, otherwise an error number.
+ */
+static int idt_ntb_peer_mw_clear_trans(struct ntb_dev *ntb, int pidx,
+					int widx)
+{
+	struct idt_ntb_dev *ndev = to_ndev_ntb(ntb);
+	struct idt_mw_cfg *mw_cfg;
+
+	if (pidx < 0 || ndev->peer_cnt <= pidx)
+		return -EINVAL;
+
+	if (widx < 0 || ndev->mw_cnt <= widx)
+		return -EINVAL;
+
+	mw_cfg = &ndev->mws[widx];
+
+	/* DIR and LUT based translations are initialized differently */
+	if (mw_cfg->type == IDT_MW_DIR) {
+		const struct idt_ntb_bar *bar = &ntdata_tbl.bars[mw_cfg->bar];
+		u32 data;
+		/* Read BARSETUP to check BAR type */
+		data = idt_nt_read(ndev, bar->setup);
+		/* Disable translation by specifying zero BAR limit */
+		idt_nt_write(ndev, bar->limit, 0);
+		if (IS_FLD_SET(BARSETUP_TYPE, data, 64))
+			idt_nt_write(ndev, (bar + 1)->limit, 0);
+	} else {
+		unsigned long irqflags;
+		u32 lutoff;
+		/* Clear the corresponding LUT entry up */
+		lutoff = SET_FIELD(LUTOFFSET_INDEX, 0, mw_cfg->idx) |
+			 SET_FIELD(LUTOFFSET_BAR, 0, mw_cfg->bar);
+		spin_lock_irqsave(&ndev->lut_lock, irqflags);
+		idt_nt_write(ndev, IDT_NT_LUTOFFSET, lutoff);
+		idt_nt_write(ndev, IDT_NT_LUTLDATA, 0);
+		idt_nt_write(ndev, IDT_NT_LUTMDATA, 0);
+		idt_nt_write(ndev, IDT_NT_LUTUDATA, 0);
+		mmiowb();
+		spin_unlock_irqrestore(&ndev->lut_lock, irqflags);
+	}
+
+	return 0;
+}
+
+/*=============================================================================
+ *                          5. Doorbell operations
+ *
+ *    Doorbell functionality of IDT PCIe-switches is pretty unusual. First of
+ * all there is global doorbell register which state can by changed by any
+ * NT-function of the IDT device in accordance with global permissions. These
+ * permissions configs are not supported by NTB API, so it must be done by
+ * either BIOS or EEPROM settings. In the same way the state of the global
+ * doorbell is reflected to the NT-functions local inbound doorbell registers.
+ * It can lead to situations when client driver sets some peer doorbell bits
+ * and get them bounced back to local inbound doorbell if permissions are
+ * granted.
+ *    Secondly there is just one IRQ vector for Doorbell, Message, Temperature
+ * and Switch events, so if client driver left any of Doorbell bits set and
+ * some other event occurred, the driver will be notified of Doorbell event
+ * again.
+ *=============================================================================
+ */
+
+/*
+ * idt_db_isr() - doorbell event ISR
+ * @ndev:	IDT NTB hardware driver descriptor
+ * @ntint_sts:	NT-function interrupt status
+ *
+ * Doorbell event happans when DBELL bit of NTINTSTS switches from 0 to 1.
+ * It happens only when unmasked doorbell bits are set to ones on completely
+ * zeroed doorbell register.
+ * The method is called from PCIe ISR bottom-half routine.
+ */
+static void idt_db_isr(struct idt_ntb_dev *ndev, u32 ntint_sts)
+{
+	/*
+	 * Doorbell IRQ status will be cleaned only when client
+	 * driver unsets all the doorbell bits.
+	 */
+	dev_dbg(&ndev->ntb.pdev->dev, "DB IRQ detected %#08x", ntint_sts);
+
+	/* Notify the client driver of possible doorbell state change */
+	ntb_db_event(&ndev->ntb, 0);
+}
+
+/*
+ * idt_ntb_db_valid_mask() - get a mask of doorbell bits supported by the ntb
+ *			     (NTB API callback)
+ * @ntb:	NTB device context.
+ *
+ * IDT PCIe-switches expose just one Doorbell register of DWORD size.
+ *
+ * Return: A mask of doorbell bits supported by the ntb.
+ */
+static u64 idt_ntb_db_valid_mask(struct ntb_dev *ntb)
+{
+	return IDT_DBELL_MASK;
+}
+
+/*
+ * idt_ntb_db_read() - read the local doorbell register (NTB API callback)
+ * @ntb:	NTB device context.
+ *
+ * There is just on inbound doorbell register of each NT-function, so
+ * this method return it value.
+ *
+ * Return: The bits currently set in the local doorbell register.
+ */
+static u64 idt_ntb_db_read(struct ntb_dev *ntb)
+{
+	struct idt_ntb_dev *ndev = to_ndev_ntb(ntb);
+
+	return idt_nt_read(ndev, IDT_NT_INDBELLSTS);
+}
+
+/*
+ * idt_ntb_db_clear() - clear bits in the local doorbell register
+ *			(NTB API callback)
+ * @ntb:	NTB device context.
+ * @db_bits:	Doorbell bits to clear.
+ *
+ * Clear bits of inbound doorbell register by writing ones to it.
+ *
+ * NOTE! Invalid bits are always considered cleared so it's not an error
+ * to clear them over.
+ *
+ * Return: always zero as success.
+ */
+static int idt_ntb_db_clear(struct ntb_dev *ntb, u64 db_bits)
+{
+	struct idt_ntb_dev *ndev = to_ndev_ntb(ntb);
+
+	idt_nt_write(ndev, IDT_NT_INDBELLSTS, (u32)db_bits);
+
+	return 0;
+}
+
+/*
+ * idt_ntb_db_read_mask() - read the local doorbell mask (NTB API callback)
+ * @ntb:	NTB device context.
+ *
+ * Each inbound doorbell bit can be masked from generating IRQ by setting
+ * the corresponding bit in inbound doorbell mask. So this method returns
+ * the value of the register.
+ *
+ * Return: The bits currently set in the local doorbell mask register.
+ */
+static u64 idt_ntb_db_read_mask(struct ntb_dev *ntb)
+{
+	struct idt_ntb_dev *ndev = to_ndev_ntb(ntb);
+
+	return idt_nt_read(ndev, IDT_NT_INDBELLMSK);
+}
+
+/*
+ * idt_ntb_db_set_mask() - set bits in the local doorbell mask
+ *			   (NTB API callback)
+ * @ntb:	NTB device context.
+ * @db_bits:	Doorbell mask bits to set.
+ *
+ * The inbound doorbell register mask value must be read, then OR'ed with
+ * passed field and only then set back.
+ *
+ * Return: zero on success, negative error if invalid argument passed.
+ */
+static int idt_ntb_db_set_mask(struct ntb_dev *ntb, u64 db_bits)
+{
+	struct idt_ntb_dev *ndev = to_ndev_ntb(ntb);
+
+	return idt_reg_set_bits(ndev, IDT_NT_INDBELLMSK, &ndev->db_mask_lock,
+				IDT_DBELL_MASK, db_bits);
+}
+
+/*
+ * idt_ntb_db_clear_mask() - clear bits in the local doorbell mask
+ *			     (NTB API callback)
+ * @ntb:	NTB device context.
+ * @db_bits:	Doorbell bits to clear.
+ *
+ * The method just clears the set bits up in accordance with the passed
+ * bitfield. IDT PCIe-switch shall generate an interrupt if there hasn't
+ * been any unmasked bit set before current unmasking. Otherwise IRQ won't
+ * be generated since there is only one IRQ vector for all doorbells.
+ *
+ * Return: always zero as success
+ */
+static int idt_ntb_db_clear_mask(struct ntb_dev *ntb, u64 db_bits)
+{
+	struct idt_ntb_dev *ndev = to_ndev_ntb(ntb);
+
+	idt_reg_clear_bits(ndev, IDT_NT_INDBELLMSK, &ndev->db_mask_lock,
+			   db_bits);
+
+	return 0;
+}
+
+/*
+ * idt_ntb_peer_db_set() - set bits in the peer doorbell register
+ *			   (NTB API callback)
+ * @ntb:	NTB device context.
+ * @db_bits:	Doorbell bits to set.
+ *
+ * IDT PCIe-switches exposes local outbound doorbell register to change peer
+ * inbound doorbell register state.
+ *
+ * Return: zero on success, negative error if invalid argument passed.
+ */
+static int idt_ntb_peer_db_set(struct ntb_dev *ntb, u64 db_bits)
+{
+	struct idt_ntb_dev *ndev = to_ndev_ntb(ntb);
+
+	if (db_bits & ~(u64)IDT_DBELL_MASK)
+		return -EINVAL;
+
+	idt_nt_write(ndev, IDT_NT_OUTDBELLSET, (u32)db_bits);
+	return 0;
+}
+
+/*=============================================================================
+ *                          6. Messaging operations
+ *
+ *    Each NT-function of IDT PCIe-switch has four inbound and four outbound
+ * message registers. Each outbound message register can be connected to one or
+ * even more than one peer inbound message registers by setting global
+ * configurations. Since NTB API permits one-on-one message registers mapping
+ * only, the driver acts in according with that restriction.
+ *=============================================================================
+ */
+
+/*
+ * idt_init_msg() - initialize messaging interface
+ * @ndev:	IDT NTB hardware driver descriptor
+ *
+ * Just initialize the message registers routing tables locker.
+ */
+static void idt_init_msg(struct idt_ntb_dev *ndev)
+{
+	unsigned char midx;
+
+	/* Init the messages routing table lockers */
+	for (midx = 0; midx < IDT_MSG_CNT; midx++)
+		spin_lock_init(&ndev->msg_locks[midx]);
+
+	dev_dbg(&ndev->ntb.pdev->dev, "NTB Messaging initialized");
+}
+
+/*
+ * idt_msg_isr() - message event ISR
+ * @ndev:	IDT NTB hardware driver descriptor
+ * @ntint_sts:	NT-function interrupt status
+ *
+ * Message event happens when MSG bit of NTINTSTS switches from 0 to 1.
+ * It happens only when unmasked message status bits are set to ones on
+ * completely zeroed message status register.
+ * The method is called from PCIe ISR bottom-half routine.
+ */
+static void idt_msg_isr(struct idt_ntb_dev *ndev, u32 ntint_sts)
+{
+	/*
+	 * Message IRQ status will be cleaned only when client
+	 * driver unsets all the message status bits.
+	 */
+	dev_dbg(&ndev->ntb.pdev->dev, "Message IRQ detected %#08x", ntint_sts);
+
+	/* Notify the client driver of possible message status change */
+	ntb_msg_event(&ndev->ntb);
+}
+
+/*
+ * idt_ntb_msg_count() - get the number of message registers (NTB API callback)
+ * @ntb:	NTB device context.
+ *
+ * IDT PCIe-switches support four message registers.
+ *
+ * Return: the number of message registers.
+ */
+static int idt_ntb_msg_count(struct ntb_dev *ntb)
+{
+	return IDT_MSG_CNT;
+}
+
+/*
+ * idt_ntb_msg_inbits() - get a bitfield of inbound message registers status
+ *			  (NTB API callback)
+ * @ntb:	NTB device context.
+ *
+ * NT message status register is shared between inbound and outbound message
+ * registers status
+ *
+ * Return: bitfield of inbound message registers.
+ */
+static u64 idt_ntb_msg_inbits(struct ntb_dev *ntb)
+{
+	return (u64)IDT_INMSG_MASK;
+}
+
+/*
+ * idt_ntb_msg_outbits() - get a bitfield of outbound message registers status
+ *			  (NTB API callback)
+ * @ntb:	NTB device context.
+ *
+ * NT message status register is shared between inbound and outbound message
+ * registers status
+ *
+ * Return: bitfield of outbound message registers.
+ */
+static u64 idt_ntb_msg_outbits(struct ntb_dev *ntb)
+{
+	return (u64)IDT_OUTMSG_MASK;
+}
+
+/*
+ * idt_ntb_msg_read_sts() - read the message registers status (NTB API callback)
+ * @ntb:	NTB device context.
+ *
+ * IDT PCIe-switches expose message status registers to notify drivers of
+ * incoming data and failures in case if peer message register isn't freed.
+ *
+ * Return: status bits of message registers
+ */
+static u64 idt_ntb_msg_read_sts(struct ntb_dev *ntb)
+{
+	struct idt_ntb_dev *ndev = to_ndev_ntb(ntb);
+
+	return idt_nt_read(ndev, IDT_NT_MSGSTS);
+}
+
+/*
+ * idt_ntb_msg_clear_sts() - clear status bits of message registers
+ *			     (NTB API callback)
+ * @ntb:	NTB device context.
+ * @sts_bits:	Status bits to clear.
+ *
+ * Clear bits in the status register by writing ones.
+ *
+ * NOTE! Invalid bits are always considered cleared so it's not an error
+ * to clear them over.
+ *
+ * Return: always zero as success.
+ */
+static int idt_ntb_msg_clear_sts(struct ntb_dev *ntb, u64 sts_bits)
+{
+	struct idt_ntb_dev *ndev = to_ndev_ntb(ntb);
+
+	idt_nt_write(ndev, IDT_NT_MSGSTS, sts_bits);
+
+	return 0;
+}
+
+/*
+ * idt_ntb_msg_set_mask() - set mask of message register status bits
+ *			    (NTB API callback)
+ * @ntb:	NTB device context.
+ * @mask_bits:	Mask bits.
+ *
+ * Mask the message status bits from raising an IRQ.
+ *
+ * Return: zero on success, negative error if invalid argument passed.
+ */
+static int idt_ntb_msg_set_mask(struct ntb_dev *ntb, u64 mask_bits)
+{
+	struct idt_ntb_dev *ndev = to_ndev_ntb(ntb);
+
+	return idt_reg_set_bits(ndev, IDT_NT_MSGSTSMSK, &ndev->msg_mask_lock,
+				IDT_MSG_MASK, mask_bits);
+}
+
+/*
+ * idt_ntb_msg_clear_mask() - clear message registers mask
+ *			      (NTB API callback)
+ * @ntb:	NTB device context.
+ * @mask_bits:	Mask bits.
+ *
+ * Clear mask of message status bits IRQs.
+ *
+ * Return: always zero as success.
+ */
+static int idt_ntb_msg_clear_mask(struct ntb_dev *ntb, u64 mask_bits)
+{
+	struct idt_ntb_dev *ndev = to_ndev_ntb(ntb);
+
+	idt_reg_clear_bits(ndev, IDT_NT_MSGSTSMSK, &ndev->msg_mask_lock,
+			   mask_bits);
+
+	return 0;
+}
+
+/*
+ * idt_ntb_msg_read() - read message register with specified index
+ *			(NTB API callback)
+ * @ntb:	NTB device context.
+ * @midx:	Message register index
+ * @pidx:	OUT - Port index of peer device a message retrieved from
+ * @msg:	OUT - Data
+ *
+ * Read data from the specified message register and source register.
+ *
+ * Return: zero on success, negative error if invalid argument passed.
+ */
+static int idt_ntb_msg_read(struct ntb_dev *ntb, int midx, int *pidx, u32 *msg)
+{
+	struct idt_ntb_dev *ndev = to_ndev_ntb(ntb);
+
+	if (midx < 0 || IDT_MSG_CNT <= midx)
+		return -EINVAL;
+
+	/* Retrieve source port index of the message */
+	if (pidx != NULL) {
+		u32 srcpart;
+
+		srcpart = idt_nt_read(ndev, ntdata_tbl.msgs[midx].src);
+		*pidx = ndev->part_idx_map[srcpart];
+
+		/* Sanity check partition index (for initial case) */
+		if (*pidx == -EINVAL)
+			*pidx = 0;
+	}
+
+	/* Retrieve data of the corresponding message register */
+	if (msg != NULL)
+		*msg = idt_nt_read(ndev, ntdata_tbl.msgs[midx].in);
+
+	return 0;
+}
+
+/*
+ * idt_ntb_msg_write() - write data to the specified message register
+ *			 (NTB API callback)
+ * @ntb:	NTB device context.
+ * @midx:	Message register index
+ * @pidx:	Port index of peer device a message being sent to
+ * @msg:	Data to send
+ *
+ * Just try to send data to a peer. Message status register should be
+ * checked by client driver.
+ *
+ * Return: zero on success, negative error if invalid argument passed.
+ */
+static int idt_ntb_msg_write(struct ntb_dev *ntb, int midx, int pidx, u32 msg)
+{
+	struct idt_ntb_dev *ndev = to_ndev_ntb(ntb);
+	unsigned long irqflags;
+	u32 swpmsgctl = 0;
+
+	if (midx < 0 || IDT_MSG_CNT <= midx)
+		return -EINVAL;
+
+	if (pidx < 0 || ndev->peer_cnt <= pidx)
+		return -EINVAL;
+
+	/* Collect the routing information */
+	swpmsgctl = SET_FIELD(SWPxMSGCTL_REG, 0, midx) |
+		    SET_FIELD(SWPxMSGCTL_PART, 0, ndev->peers[pidx].part);
+
+	/* Lock the messages routing table of the specified register */
+	spin_lock_irqsave(&ndev->msg_locks[midx], irqflags);
+	/* Set the route and send the data */
+	idt_sw_write(ndev, partdata_tbl[ndev->part].msgctl[midx], swpmsgctl);
+	idt_nt_write(ndev, ntdata_tbl.msgs[midx].out, msg);
+	mmiowb();
+	/* Unlock the messages routing table */
+	spin_unlock_irqrestore(&ndev->msg_locks[midx], irqflags);
+
+	/* Client driver shall check the status register */
+	return 0;
+}
+
+/*=============================================================================
+ *                      7. Temperature sensor operations
+ *
+ *    IDT PCIe-switch has an embedded temperature sensor, which can be used to
+ * warn a user-space of possible chip overheating. Since workload temperature
+ * can be different on different platforms, temperature thresholds as well as
+ * general sensor settings must be setup in the framework of BIOS/EEPROM
+ * initializations. It includes the actual sensor enabling as well.
+ *=============================================================================
+ */
+
+/*
+ * idt_read_temp() - read temperature from chip sensor
+ * @ntb:	NTB device context.
+ * @val:	OUT - integer value of temperature
+ * @frac:	OUT - fraction
+ */
+static void idt_read_temp(struct idt_ntb_dev *ndev, unsigned char *val,
+			  unsigned char *frac)
+{
+	u32 data;
+
+	/* Read the data from TEMP field of the TMPSTS register */
+	data = idt_sw_read(ndev, IDT_SW_TMPSTS);
+	data = GET_FIELD(TMPSTS_TEMP, data);
+	/* TEMP field has one fractional bit and seven integer bits */
+	*val = data >> 1;
+	*frac = ((data & 0x1) ? 5 : 0);
+}
+
+/*
+ * idt_temp_isr() - temperature sensor alarm events ISR
+ * @ndev:	IDT NTB hardware driver descriptor
+ * @ntint_sts:	NT-function interrupt status
+ *
+ * It handles events of temperature crossing alarm thresholds. Since reading
+ * of TMPALARM register clears it up, the function doesn't analyze the
+ * read value, instead the current temperature value just warningly printed to
+ * log.
+ * The method is called from PCIe ISR bottom-half routine.
+ */
+static void idt_temp_isr(struct idt_ntb_dev *ndev, u32 ntint_sts)
+{
+	unsigned char val, frac;
+
+	/* Read the current temperature value */
+	idt_read_temp(ndev, &val, &frac);
+
+	/* Read the temperature alarm to clean the alarm status out */
+	/*(void)idt_sw_read(ndev, IDT_SW_TMPALARM);*/
+
+	/* Clean the corresponding interrupt bit */
+	idt_nt_write(ndev, IDT_NT_NTINTSTS, IDT_NTINTSTS_TMPSENSOR);
+
+	dev_dbg(&ndev->ntb.pdev->dev,
+		"Temp sensor IRQ detected %#08x", ntint_sts);
+
+	/* Print temperature value to log */
+	dev_warn(&ndev->ntb.pdev->dev, "Temperature %hhu.%hhu", val, frac);
+}
+
+/*=============================================================================
+ *                           8. ISRs related operations
+ *
+ *    IDT PCIe-switch has strangely developed IRQ system. There is just one
+ * interrupt vector for doorbell and message registers. So the hardware driver
+ * can't determine actual source of IRQ if, for example, message event happened
+ * while any of unmasked doorbell is still set. The similar situation may be if
+ * switch or temperature sensor events pop up. The difference is that SEVENT
+ * and TMPSENSOR bits of NT interrupt status register can be cleaned by
+ * IRQ handler so a next interrupt request won't have false handling of
+ * corresponding events.
+ *    The hardware driver has only bottom-half handler of the IRQ, since if any
+ * of events happened the device won't raise it again before the last one is
+ * handled by clearing of corresponding NTINTSTS bit.
+ *=============================================================================
+ */
+
+static irqreturn_t idt_thread_isr(int irq, void *devid);
+
+/*
+ * idt_init_isr() - initialize PCIe interrupt handler
+ * @ndev:	IDT NTB hardware driver descriptor
+ *
+ * Return: zero on success, otherwise a negative error number.
+ */
+static int idt_init_isr(struct idt_ntb_dev *ndev)
+{
+	struct pci_dev *pdev = ndev->ntb.pdev;
+	u32 ntint_mask;
+	int ret;
+
+	/* Allocate just one interrupt vector for the ISR */
+	ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI | PCI_IRQ_LEGACY);
+	if (ret != 1) {
+		dev_err(&pdev->dev, "Failed to allocate IRQ vector");
+		return ret;
+	}
+
+	/* Retrieve the IRQ vector */
+	ret = pci_irq_vector(pdev, 0);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "Failed to get IRQ vector");
+		goto err_free_vectors;
+	}
+
+	/* Set the IRQ handler */
+	ret = devm_request_threaded_irq(&pdev->dev, ret, NULL, idt_thread_isr,
+					IRQF_ONESHOT, NTB_IRQNAME, ndev);
+	if (ret != 0) {
+		dev_err(&pdev->dev, "Failed to set MSI IRQ handler, %d", ret);
+		goto err_free_vectors;
+	}
+
+	/* Unmask Message/Doorbell/SE/Temperature interrupts */
+	ntint_mask = idt_nt_read(ndev, IDT_NT_NTINTMSK) & ~IDT_NTINTMSK_ALL;
+	idt_nt_write(ndev, IDT_NT_NTINTMSK, ntint_mask);
+
+	/* From now on the interrupts are enabled */
+	dev_dbg(&pdev->dev, "NTB interrupts initialized");
+
+	return 0;
+
+err_free_vectors:
+	pci_free_irq_vectors(pdev);
+
+	return ret;
+}
+
+
+/*
+ * idt_deinit_ist() - deinitialize PCIe interrupt handler
+ * @ndev:	IDT NTB hardware driver descriptor
+ *
+ * Disable corresponding interrupts and free allocated IRQ vectors.
+ */
+static void idt_deinit_isr(struct idt_ntb_dev *ndev)
+{
+	struct pci_dev *pdev = ndev->ntb.pdev;
+	u32 ntint_mask;
+
+	/* Mask interrupts back */
+	ntint_mask = idt_nt_read(ndev, IDT_NT_NTINTMSK) | IDT_NTINTMSK_ALL;
+	idt_nt_write(ndev, IDT_NT_NTINTMSK, ntint_mask);
+
+	/* Manually free IRQ otherwise PCI free irq vectors will fail */
+	devm_free_irq(&pdev->dev, pci_irq_vector(pdev, 0), ndev);
+
+	/* Free allocated IRQ vectors */
+	pci_free_irq_vectors(pdev);
+
+	dev_dbg(&pdev->dev, "NTB interrupts deinitialized");
+}
+
+/*
+ * idt_thread_isr() - NT function interrupts handler
+ * @irq:	IRQ number
+ * @devid:	Custom buffer
+ *
+ * It reads current NT interrupts state register and handles all the event
+ * it declares.
+ * The method is bottom-half routine of actual default PCIe IRQ handler.
+ */
+static irqreturn_t idt_thread_isr(int irq, void *devid)
+{
+	struct idt_ntb_dev *ndev = devid;
+	bool handled = false;
+	u32 ntint_sts;
+
+	/* Read the NT interrupts status register */
+	ntint_sts = idt_nt_read(ndev, IDT_NT_NTINTSTS);
+
+	/* Handle messaging interrupts */
+	if (ntint_sts & IDT_NTINTSTS_MSG) {
+		idt_msg_isr(ndev, ntint_sts);
+		handled = true;
+	}
+
+	/* Handle doorbell interrupts */
+	if (ntint_sts & IDT_NTINTSTS_DBELL) {
+		idt_db_isr(ndev, ntint_sts);
+		handled = true;
+	}
+
+	/* Handle switch event interrupts */
+	if (ntint_sts & IDT_NTINTSTS_SEVENT) {
+		idt_se_isr(ndev, ntint_sts);
+		handled = true;
+	}
+
+	/* Handle temperature sensor interrupt */
+	if (ntint_sts & IDT_NTINTSTS_TMPSENSOR) {
+		idt_temp_isr(ndev, ntint_sts);
+		handled = true;
+	}
+
+	dev_dbg(&ndev->ntb.pdev->dev, "IDT IRQs 0x%08x handled", ntint_sts);
+
+	return handled ? IRQ_HANDLED : IRQ_NONE;
+}
+
+/*===========================================================================
+ *                     9. NTB hardware driver initialization
+ *===========================================================================
+ */
+
+/*
+ * NTB API operations
+ */
+static const struct ntb_dev_ops idt_ntb_ops = {
+	.port_number		= idt_ntb_port_number,
+	.peer_port_count	= idt_ntb_peer_port_count,
+	.peer_port_number	= idt_ntb_peer_port_number,
+	.peer_port_idx		= idt_ntb_peer_port_idx,
+	.link_is_up		= idt_ntb_link_is_up,
+	.link_enable		= idt_ntb_link_enable,
+	.link_disable		= idt_ntb_link_disable,
+	.mw_count		= idt_ntb_mw_count,
+	.mw_get_align		= idt_ntb_mw_get_align,
+	.peer_mw_count		= idt_ntb_peer_mw_count,
+	.peer_mw_get_addr	= idt_ntb_peer_mw_get_addr,
+	.peer_mw_set_trans	= idt_ntb_peer_mw_set_trans,
+	.peer_mw_clear_trans	= idt_ntb_peer_mw_clear_trans,
+	.db_valid_mask		= idt_ntb_db_valid_mask,
+	.db_read		= idt_ntb_db_read,
+	.db_clear		= idt_ntb_db_clear,
+	.db_read_mask		= idt_ntb_db_read_mask,
+	.db_set_mask		= idt_ntb_db_set_mask,
+	.db_clear_mask		= idt_ntb_db_clear_mask,
+	.peer_db_set		= idt_ntb_peer_db_set,
+	.msg_count		= idt_ntb_msg_count,
+	.msg_inbits		= idt_ntb_msg_inbits,
+	.msg_outbits		= idt_ntb_msg_outbits,
+	.msg_read_sts		= idt_ntb_msg_read_sts,
+	.msg_clear_sts		= idt_ntb_msg_clear_sts,
+	.msg_set_mask		= idt_ntb_msg_set_mask,
+	.msg_clear_mask		= idt_ntb_msg_clear_mask,
+	.msg_read		= idt_ntb_msg_read,
+	.msg_write		= idt_ntb_msg_write
+};
+
+/*
+ * idt_register_device() - register IDT NTB device
+ * @ndev:	IDT NTB hardware driver descriptor
+ *
+ * Return: zero on success, otherwise a negative error number.
+ */
+static int idt_register_device(struct idt_ntb_dev *ndev)
+{
+	int ret;
+
+	/* Initialize the rest of NTB device structure and register it */
+	ndev->ntb.ops = &idt_ntb_ops;
+	ndev->ntb.topo = NTB_TOPO_PRI;
+
+	ret = ntb_register_device(&ndev->ntb);
+	if (ret != 0) {
+		dev_err(&ndev->ntb.pdev->dev, "Failed to register NTB device");
+		return ret;
+	}
+
+	dev_dbg(&ndev->ntb.pdev->dev, "NTB device successfully registered");
+
+	return 0;
+}
+
+/*
+ * idt_unregister_device() - unregister IDT NTB device
+ * @ndev:	IDT NTB hardware driver descriptor
+ */
+static void idt_unregister_device(struct idt_ntb_dev *ndev)
+{
+	/* Just unregister the NTB device */
+	ntb_unregister_device(&ndev->ntb);
+
+	dev_dbg(&ndev->ntb.pdev->dev, "NTB device unregistered");
+}
+
+/*=============================================================================
+ *                        10. DebugFS node initialization
+ *=============================================================================
+ */
+
+static ssize_t idt_dbgfs_info_read(struct file *filp, char __user *ubuf,
+				   size_t count, loff_t *offp);
+
+/*
+ * Driver DebugFS info file operations
+ */
+static const struct file_operations idt_dbgfs_info_ops = {
+	.owner = THIS_MODULE,
+	.open = simple_open,
+	.read = idt_dbgfs_info_read
+};
+
+/*
+ * idt_dbgfs_info_read() - DebugFS read info node callback
+ * @file:	File node descriptor.
+ * @ubuf:	User-space buffer to put data to
+ * @count:	Size of the buffer
+ * @offp:	Offset within the buffer
+ */
+static ssize_t idt_dbgfs_info_read(struct file *filp, char __user *ubuf,
+				   size_t count, loff_t *offp)
+{
+	struct idt_ntb_dev *ndev = filp->private_data;
+	unsigned char temp, frac, idx, pidx, cnt;
+	ssize_t ret = 0, off = 0;
+	unsigned long irqflags;
+	enum ntb_speed speed;
+	enum ntb_width width;
+	char *strbuf;
+	size_t size;
+	u32 data;
+
+	/* Lets limit the buffer size the way the Intel/AMD drivers do */
+	size = min_t(size_t, count, 0x1000U);
+
+	/* Allocate the memory for the buffer */
+	strbuf = kmalloc(size, GFP_KERNEL);
+	if (strbuf == NULL)
+		return -ENOMEM;
+
+	/* Put the data into the string buffer */
+	off += scnprintf(strbuf + off, size - off,
+		"\n\t\tIDT NTB device Information:\n\n");
+
+	/* General local device configurations */
+	off += scnprintf(strbuf + off, size - off,
+		"Local Port %hhu, Partition %hhu\n", ndev->port, ndev->part);
+
+	/* Peer ports information */
+	off += scnprintf(strbuf + off, size - off, "Peers:\n");
+	for (idx = 0; idx < ndev->peer_cnt; idx++) {
+		off += scnprintf(strbuf + off, size - off,
+			"\t%hhu. Port %hhu, Partition %hhu\n",
+			idx, ndev->peers[idx].port, ndev->peers[idx].part);
+	}
+
+	/* Links status */
+	data = idt_ntb_link_is_up(&ndev->ntb, &speed, &width);
+	off += scnprintf(strbuf + off, size - off,
+		"NTB link status\t- 0x%08x, ", data);
+	off += scnprintf(strbuf + off, size - off, "PCIe Gen %d x%d lanes\n",
+		speed, width);
+
+	/* Mapping table entries */
+	off += scnprintf(strbuf + off, size - off, "NTB Mapping Table:\n");
+	for (idx = 0; idx < IDT_MTBL_ENTRY_CNT; idx++) {
+		spin_lock_irqsave(&ndev->mtbl_lock, irqflags);
+		idt_nt_write(ndev, IDT_NT_NTMTBLADDR, idx);
+		data = idt_nt_read(ndev, IDT_NT_NTMTBLDATA);
+		spin_unlock_irqrestore(&ndev->mtbl_lock, irqflags);
+
+		/* Print valid entries only */
+		if (data & IDT_NTMTBLDATA_VALID) {
+			off += scnprintf(strbuf + off, size - off,
+				"\t%hhu. Partition %d, Requester ID 0x%04x\n",
+				idx, GET_FIELD(NTMTBLDATA_PART, data),
+				GET_FIELD(NTMTBLDATA_REQID, data));
+		}
+	}
+	off += scnprintf(strbuf + off, size - off, "\n");
+
+	/* Outbound memory windows information */
+	off += scnprintf(strbuf + off, size - off,
+		"Outbound Memory Windows:\n");
+	for (idx = 0; idx < ndev->mw_cnt; idx += cnt) {
+		data = ndev->mws[idx].type;
+		cnt = idt_get_mw_count(data);
+
+		/* Print Memory Window information */
+		if (data == IDT_MW_DIR)
+			off += scnprintf(strbuf + off, size - off,
+				"\t%hhu.\t", idx);
+		else
+			off += scnprintf(strbuf + off, size - off,
+				"\t%hhu-%hhu.\t", idx, idx + cnt - 1);
+
+		off += scnprintf(strbuf + off, size - off, "%s BAR%hhu, ",
+			idt_get_mw_name(data), ndev->mws[idx].bar);
+
+		off += scnprintf(strbuf + off, size - off,
+			"Address align 0x%08llx, ", ndev->mws[idx].addr_align);
+
+		off += scnprintf(strbuf + off, size - off,
+			"Size align 0x%08llx, Size max %llu\n",
+			ndev->mws[idx].size_align, ndev->mws[idx].size_max);
+	}
+
+	/* Inbound memory windows information */
+	for (pidx = 0; pidx < ndev->peer_cnt; pidx++) {
+		off += scnprintf(strbuf + off, size - off,
+			"Inbound Memory Windows for peer %hhu (Port %hhu):\n",
+			pidx, ndev->peers[pidx].port);
+
+		/* Print Memory Windows information */
+		for (idx = 0; idx < ndev->peers[pidx].mw_cnt; idx += cnt) {
+			data = ndev->peers[pidx].mws[idx].type;
+			cnt = idt_get_mw_count(data);
+
+			if (data == IDT_MW_DIR)
+				off += scnprintf(strbuf + off, size - off,
+					"\t%hhu.\t", idx);
+			else
+				off += scnprintf(strbuf + off, size - off,
+					"\t%hhu-%hhu.\t", idx, idx + cnt - 1);
+
+			off += scnprintf(strbuf + off, size - off,
+				"%s BAR%hhu, ", idt_get_mw_name(data),
+				ndev->peers[pidx].mws[idx].bar);
+
+			off += scnprintf(strbuf + off, size - off,
+				"Address align 0x%08llx, ",
+				ndev->peers[pidx].mws[idx].addr_align);
+
+			off += scnprintf(strbuf + off, size - off,
+				"Size align 0x%08llx, Size max %llu\n",
+				ndev->peers[pidx].mws[idx].size_align,
+				ndev->peers[pidx].mws[idx].size_max);
+		}
+	}
+	off += scnprintf(strbuf + off, size - off, "\n");
+
+	/* Doorbell information */
+	data = idt_sw_read(ndev, IDT_SW_GDBELLSTS);
+	off += scnprintf(strbuf + off, size - off,
+		 "Global Doorbell state\t- 0x%08x\n", data);
+	data = idt_ntb_db_read(&ndev->ntb);
+	off += scnprintf(strbuf + off, size - off,
+		 "Local  Doorbell state\t- 0x%08x\n", data);
+	data = idt_nt_read(ndev, IDT_NT_INDBELLMSK);
+	off += scnprintf(strbuf + off, size - off,
+		 "Local  Doorbell mask\t- 0x%08x\n", data);
+	off += scnprintf(strbuf + off, size - off, "\n");
+
+	/* Messaging information */
+	off += scnprintf(strbuf + off, size - off,
+		 "Message event valid\t- 0x%08x\n", IDT_MSG_MASK);
+	data = idt_ntb_msg_read_sts(&ndev->ntb);
+	off += scnprintf(strbuf + off, size - off,
+		 "Message event status\t- 0x%08x\n", data);
+	data = idt_nt_read(ndev, IDT_NT_MSGSTSMSK);
+	off += scnprintf(strbuf + off, size - off,
+		 "Message event mask\t- 0x%08x\n", data);
+	off += scnprintf(strbuf + off, size - off,
+		 "Message data:\n");
+	for (idx = 0; idx < IDT_MSG_CNT; idx++) {
+		int src;
+		(void)idt_ntb_msg_read(&ndev->ntb, idx, &src, &data);
+		off += scnprintf(strbuf + off, size - off,
+			"\t%hhu. 0x%08x from peer %hhu (Port %hhu)\n",
+			idx, data, src, ndev->peers[src].port);
+	}
+	off += scnprintf(strbuf + off, size - off, "\n");
+
+	/* Current temperature */
+	idt_read_temp(ndev, &temp, &frac);
+	off += scnprintf(strbuf + off, size - off,
+		"Switch temperature\t\t- %hhu.%hhuC\n", temp, frac);
+
+	/* Copy the buffer to the User Space */
+	ret = simple_read_from_buffer(ubuf, count, offp, strbuf, off);
+	kfree(strbuf);
+
+	return ret;
+}
+
+/*
+ * idt_init_dbgfs() - initialize DebugFS node
+ * @ndev:	IDT NTB hardware driver descriptor
+ *
+ * Return: zero on success, otherwise a negative error number.
+ */
+static int idt_init_dbgfs(struct idt_ntb_dev *ndev)
+{
+	char devname[64];
+
+	/* If the top directory is not created then do nothing */
+	if (IS_ERR_OR_NULL(dbgfs_topdir)) {
+		dev_info(&ndev->ntb.pdev->dev, "Top DebugFS directory absent");
+		return PTR_ERR(dbgfs_topdir);
+	}
+
+	/* Create the info file node */
+	snprintf(devname, 64, "info:%s", pci_name(ndev->ntb.pdev));
+	ndev->dbgfs_info = debugfs_create_file(devname, 0400, dbgfs_topdir,
+		ndev, &idt_dbgfs_info_ops);
+	if (IS_ERR(ndev->dbgfs_info)) {
+		dev_dbg(&ndev->ntb.pdev->dev, "Failed to create DebugFS node");
+		return PTR_ERR(ndev->dbgfs_info);
+	}
+
+	dev_dbg(&ndev->ntb.pdev->dev, "NTB device DebugFS node created");
+
+	return 0;
+}
+
+/*
+ * idt_deinit_dbgfs() - deinitialize DebugFS node
+ * @ndev:	IDT NTB hardware driver descriptor
+ *
+ * Just discard the info node from DebugFS
+ */
+static void idt_deinit_dbgfs(struct idt_ntb_dev *ndev)
+{
+	debugfs_remove(ndev->dbgfs_info);
+
+	dev_dbg(&ndev->ntb.pdev->dev, "NTB device DebugFS node discarded");
+}
+
+/*=============================================================================
+ *                     11. Basic PCIe device initialization
+ *=============================================================================
+ */
+
+/*
+ * idt_check_setup() - Check whether the IDT PCIe-swtich is properly
+ *		       pre-initialized
+ * @pdev:	Pointer to the PCI device descriptor
+ *
+ * Return: zero on success, otherwise a negative error number.
+ */
+static int idt_check_setup(struct pci_dev *pdev)
+{
+	u32 data;
+	int ret;
+
+	/* Read the BARSETUP0 */
+	ret = pci_read_config_dword(pdev, IDT_NT_BARSETUP0, &data);
+	if (ret != 0) {
+		dev_err(&pdev->dev,
+			"Failed to read BARSETUP0 config register");
+		return ret;
+	}
+
+	/* Check whether the BAR0 register is enabled to be of config space */
+	if (!(data & IDT_BARSETUP_EN) || !(data & IDT_BARSETUP_MODE_CFG)) {
+		dev_err(&pdev->dev, "BAR0 doesn't map config space");
+		return -EINVAL;
+	}
+
+	/* Configuration space BAR0 must have certain size */
+	if ((data & IDT_BARSETUP_SIZE_MASK) != IDT_BARSETUP_SIZE_CFG) {
+		dev_err(&pdev->dev, "Invalid size of config space");
+		return -EINVAL;
+	}
+
+	dev_dbg(&pdev->dev, "NTB device pre-initialized correctly");
+
+	return 0;
+}
+
+/*
+ * Create the IDT PCIe-switch driver descriptor
+ * @pdev:	Pointer to the PCI device descriptor
+ * @id:		IDT PCIe-device configuration
+ *
+ * It just allocates a memory for IDT PCIe-switch device structure and
+ * initializes some commonly used fields.
+ *
+ * No need of release method, since managed device resource is used for
+ * memory allocation.
+ *
+ * Return: pointer to the descriptor, otherwise a negative error number.
+ */
+static struct idt_ntb_dev *idt_create_dev(struct pci_dev *pdev,
+					  const struct pci_device_id *id)
+{
+	struct idt_ntb_dev *ndev;
+
+	/* Allocate memory for the IDT PCIe-device descriptor */
+	ndev = devm_kzalloc(&pdev->dev, sizeof(*ndev), GFP_KERNEL);
+	if (!ndev) {
+		dev_err(&pdev->dev, "Memory allocation failed for descriptor");
+		return ERR_PTR(-ENOMEM);
+	}
+
+	/* Save the IDT PCIe-switch ports configuration */
+	ndev->swcfg = (struct idt_89hpes_cfg *)id->driver_data;
+	/* Save the PCI-device pointer inside the NTB device structure */
+	ndev->ntb.pdev = pdev;
+
+	/* Initialize spin locker of Doorbell, Message and GASA registers */
+	spin_lock_init(&ndev->db_mask_lock);
+	spin_lock_init(&ndev->msg_mask_lock);
+	spin_lock_init(&ndev->gasa_lock);
+
+	dev_info(&pdev->dev, "IDT %s discovered", ndev->swcfg->name);
+
+	dev_dbg(&pdev->dev, "NTB device descriptor created");
+
+	return ndev;
+}
+
+/*
+ * idt_init_pci() - initialize the basic PCI-related subsystem
+ * @ndev:	Pointer to the IDT PCIe-switch driver descriptor
+ *
+ * Managed device resources will be freed automatically in case of failure or
+ * driver detachment.
+ *
+ * Return: zero on success, otherwise negative error number.
+ */
+static int idt_init_pci(struct idt_ntb_dev *ndev)
+{
+	struct pci_dev *pdev = ndev->ntb.pdev;
+	int ret;
+
+	/* Initialize the bit mask of DMA */
+	ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+	if (ret != 0) {
+		ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+		if (ret != 0) {
+			dev_err(&pdev->dev, "Failed to set DMA bit mask\n");
+			return ret;
+		}
+		dev_warn(&pdev->dev, "Cannot set DMA highmem bit mask\n");
+	}
+	ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+	if (ret != 0) {
+		ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+		if (ret != 0) {
+			dev_err(&pdev->dev,
+				"Failed to set consistent DMA bit mask\n");
+			return ret;
+		}
+		dev_warn(&pdev->dev,
+			"Cannot set consistent DMA highmem bit mask\n");
+	}
+
+	/*
+	 * Enable the device advanced error reporting. It's not critical to
+	 * have AER disabled in the kernel.
+	 */
+	ret = pci_enable_pcie_error_reporting(pdev);
+	if (ret != 0)
+		dev_warn(&pdev->dev, "PCIe AER capability disabled\n");
+	else /* Cleanup uncorrectable error status before getting to init */
+		pci_cleanup_aer_uncorrect_error_status(pdev);
+
+	/* First enable the PCI device */
+	ret = pcim_enable_device(pdev);
+	if (ret != 0) {
+		dev_err(&pdev->dev, "Failed to enable PCIe device\n");
+		goto err_disable_aer;
+	}
+
+	/*
+	 * Enable the bus mastering, which effectively enables MSI IRQs and
+	 * Request TLPs translation
+	 */
+	pci_set_master(pdev);
+
+	/* Request all BARs resources and map BAR0 only */
+	ret = pcim_iomap_regions_request_all(pdev, 1, NTB_NAME);
+	if (ret != 0) {
+		dev_err(&pdev->dev, "Failed to request resources\n");
+		goto err_clear_master;
+	}
+
+	/* Retrieve virtual address of BAR0 - PCI configuration space */
+	ndev->cfgspc = pcim_iomap_table(pdev)[0];
+
+	/* Put the IDT driver data pointer to the PCI-device private pointer */
+	pci_set_drvdata(pdev, ndev);
+
+	dev_dbg(&pdev->dev, "NT-function PCIe interface initialized");
+
+	return 0;
+
+err_clear_master:
+	pci_clear_master(pdev);
+err_disable_aer:
+	(void)pci_disable_pcie_error_reporting(pdev);
+
+	return ret;
+}
+
+/*
+ * idt_deinit_pci() - deinitialize the basic PCI-related subsystem
+ * @ndev:	Pointer to the IDT PCIe-switch driver descriptor
+ *
+ * Managed resources will be freed on the driver detachment
+ */
+static void idt_deinit_pci(struct idt_ntb_dev *ndev)
+{
+	struct pci_dev *pdev = ndev->ntb.pdev;
+
+	/* Clean up the PCI-device private data pointer */
+	pci_set_drvdata(pdev, NULL);
+
+	/* Clear the bus master disabling the Request TLPs translation */
+	pci_clear_master(pdev);
+
+	/* Disable the AER capability */
+	(void)pci_disable_pcie_error_reporting(pdev);
+
+	dev_dbg(&pdev->dev, "NT-function PCIe interface cleared");
+}
+
+/*===========================================================================
+ *                       12. PCI bus callback functions
+ *===========================================================================
+ */
+
+/*
+ * idt_pci_probe() - PCI device probe callback
+ * @pdev:	Pointer to PCI device structure
+ * @id:		PCIe device custom descriptor
+ *
+ * Return: zero on success, otherwise negative error number
+ */
+static int idt_pci_probe(struct pci_dev *pdev,
+			 const struct pci_device_id *id)
+{
+	struct idt_ntb_dev *ndev;
+	int ret;
+
+	/* Check whether IDT PCIe-switch is properly pre-initialized */
+	ret = idt_check_setup(pdev);
+	if (ret != 0)
+		return ret;
+
+	/* Allocate the memory for IDT NTB device data */
+	ndev = idt_create_dev(pdev, id);
+	if (IS_ERR_OR_NULL(ndev))
+		return PTR_ERR(ndev);
+
+	/* Initialize the basic PCI subsystem of the device */
+	ret = idt_init_pci(ndev);
+	if (ret != 0)
+		return ret;
+
+	/* Scan ports of the IDT PCIe-switch */
+	(void)idt_scan_ports(ndev);
+
+	/* Initialize NTB link events subsystem */
+	idt_init_link(ndev);
+
+	/* Initialize MWs subsystem */
+	ret = idt_init_mws(ndev);
+	if (ret != 0)
+		goto err_deinit_link;
+
+	/* Initialize Messaging subsystem */
+	idt_init_msg(ndev);
+
+	/* Initialize IDT interrupts handler */
+	ret = idt_init_isr(ndev);
+	if (ret != 0)
+		goto err_deinit_link;
+
+	/* Register IDT NTB devices on the NTB bus */
+	ret = idt_register_device(ndev);
+	if (ret != 0)
+		goto err_deinit_isr;
+
+	/* Initialize DebugFS info node */
+	(void)idt_init_dbgfs(ndev);
+
+	/* IDT PCIe-switch NTB driver is finally initialized */
+	dev_info(&pdev->dev, "IDT NTB device is ready");
+
+	/* May the force be with us... */
+	return 0;
+
+err_deinit_isr:
+	idt_deinit_isr(ndev);
+err_deinit_link:
+	idt_deinit_link(ndev);
+	idt_deinit_pci(ndev);
+
+	return ret;
+}
+
+/*
+ * idt_pci_probe() - PCI device remove callback
+ * @pdev:	Pointer to PCI device structure
+ */
+static void idt_pci_remove(struct pci_dev *pdev)
+{
+	struct idt_ntb_dev *ndev = pci_get_drvdata(pdev);
+
+	/* Deinit the DebugFS node */
+	idt_deinit_dbgfs(ndev);
+
+	/* Unregister NTB device */
+	idt_unregister_device(ndev);
+
+	/* Stop the interrupts handling */
+	idt_deinit_isr(ndev);
+
+	/* Deinitialize link event subsystem */
+	idt_deinit_link(ndev);
+
+	/* Deinit basic PCI subsystem */
+	idt_deinit_pci(ndev);
+
+	/* IDT PCIe-switch NTB driver is finally initialized */
+	dev_info(&pdev->dev, "IDT NTB device is removed");
+
+	/* Sayonara... */
+}
+
+/*
+ * IDT PCIe-switch models ports configuration structures
+ */
+static struct idt_89hpes_cfg idt_89hpes24nt6ag2_config = {
+	.name = "89HPES24NT6AG2",
+	.port_cnt = 6, .ports = {0, 2, 4, 6, 8, 12}
+};
+static struct idt_89hpes_cfg idt_89hpes32nt8ag2_config = {
+	.name = "89HPES32NT8AG2",
+	.port_cnt = 8, .ports = {0, 2, 4, 6, 8, 12, 16, 20}
+};
+static struct idt_89hpes_cfg idt_89hpes32nt8bg2_config = {
+	.name = "89HPES32NT8BG2",
+	.port_cnt = 8, .ports = {0, 2, 4, 6, 8, 12, 16, 20}
+};
+static struct idt_89hpes_cfg idt_89hpes12nt12g2_config = {
+	.name = "89HPES12NT12G2",
+	.port_cnt = 3, .ports = {0, 8, 16}
+};
+static struct idt_89hpes_cfg idt_89hpes16nt16g2_config = {
+	.name = "89HPES16NT16G2",
+	.port_cnt = 4, .ports = {0, 8, 12, 16}
+};
+static struct idt_89hpes_cfg idt_89hpes24nt24g2_config = {
+	.name = "89HPES24NT24G2",
+	.port_cnt = 8, .ports = {0, 2, 4, 6, 8, 12, 16, 20}
+};
+static struct idt_89hpes_cfg idt_89hpes32nt24ag2_config = {
+	.name = "89HPES32NT24AG2",
+	.port_cnt = 8, .ports = {0, 2, 4, 6, 8, 12, 16, 20}
+};
+static struct idt_89hpes_cfg idt_89hpes32nt24bg2_config = {
+	.name = "89HPES32NT24BG2",
+	.port_cnt = 8, .ports = {0, 2, 4, 6, 8, 12, 16, 20}
+};
+
+/*
+ * PCI-ids table of the supported IDT PCIe-switch devices
+ */
+static const struct pci_device_id idt_pci_tbl[] = {
+	{IDT_PCI_DEVICE_IDS(89HPES24NT6AG2,  idt_89hpes24nt6ag2_config)},
+	{IDT_PCI_DEVICE_IDS(89HPES32NT8AG2,  idt_89hpes32nt8ag2_config)},
+	{IDT_PCI_DEVICE_IDS(89HPES32NT8BG2,  idt_89hpes32nt8bg2_config)},
+	{IDT_PCI_DEVICE_IDS(89HPES12NT12G2,  idt_89hpes12nt12g2_config)},
+	{IDT_PCI_DEVICE_IDS(89HPES16NT16G2,  idt_89hpes16nt16g2_config)},
+	{IDT_PCI_DEVICE_IDS(89HPES24NT24G2,  idt_89hpes24nt24g2_config)},
+	{IDT_PCI_DEVICE_IDS(89HPES32NT24AG2, idt_89hpes32nt24ag2_config)},
+	{IDT_PCI_DEVICE_IDS(89HPES32NT24BG2, idt_89hpes32nt24bg2_config)},
+	{0}
+};
+MODULE_DEVICE_TABLE(pci, idt_pci_tbl);
+
+/*
+ * IDT PCIe-switch NT-function device driver structure definition
+ */
+static struct pci_driver idt_pci_driver = {
+	.name		= KBUILD_MODNAME,
+	.probe		= idt_pci_probe,
+	.remove		= idt_pci_remove,
+	.id_table	= idt_pci_tbl,
+};
+
+static int __init idt_pci_driver_init(void)
+{
+	pr_info("%s %s\n", NTB_DESC, NTB_VER);
+
+	/* Create the top DebugFS directory if the FS is initialized */
+	if (debugfs_initialized())
+		dbgfs_topdir = debugfs_create_dir(KBUILD_MODNAME, NULL);
+
+	/* Register the NTB hardware driver to handle the PCI device */
+	return pci_register_driver(&idt_pci_driver);
+}
+module_init(idt_pci_driver_init);
+
+static void __exit idt_pci_driver_exit(void)
+{
+	/* Unregister the NTB hardware driver */
+	pci_unregister_driver(&idt_pci_driver);
+
+	/* Discard the top DebugFS directory */
+	debugfs_remove_recursive(dbgfs_topdir);
+}
+module_exit(idt_pci_driver_exit);
+
diff --git a/src/kernel/linux/v4.14/drivers/ntb/hw/idt/ntb_hw_idt.h b/src/kernel/linux/v4.14/drivers/ntb/hw/idt/ntb_hw_idt.h
new file mode 100644
index 0000000..856fd18
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/ntb/hw/idt/ntb_hw_idt.h
@@ -0,0 +1,1149 @@
+/*
+ *   This file is provided under a GPLv2 license.  When using or
+ *   redistributing this file, you may do so under that license.
+ *
+ *   GPL LICENSE SUMMARY
+ *
+ *   Copyright (C) 2016 T-Platforms All Rights Reserved.
+ *
+ *   This program is free software; you can redistribute it and/or modify it
+ *   under the terms and conditions of the GNU General Public License,
+ *   version 2, as published by the Free Software Foundation.
+ *
+ *   This program is distributed in the hope that it will be useful, but
+ *   WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
+ *   Public License for more details.
+ *
+ *   You should have received a copy of the GNU General Public License along
+ *   with this program; if not, one can be found http://www.gnu.org/licenses/.
+ *
+ *   The full GNU General Public License is included in this distribution in
+ *   the file called "COPYING".
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * IDT PCIe-switch NTB Linux driver
+ *
+ * Contact Information:
+ * Serge Semin <fancer.lancer@gmail.com>, <Sergey.Semin@t-platforms.ru>
+ */
+
+#ifndef NTB_HW_IDT_H
+#define NTB_HW_IDT_H
+
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/ntb.h>
+
+
+/*
+ * Macro is used to create the struct pci_device_id that matches
+ * the supported IDT PCIe-switches
+ * @devname: Capitalized name of the particular device
+ * @data: Variable passed to the driver of the particular device
+ */
+#define IDT_PCI_DEVICE_IDS(devname, data) \
+	.vendor = PCI_VENDOR_ID_IDT, .device = PCI_DEVICE_ID_IDT_##devname, \
+	.subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, \
+	.class = (PCI_CLASS_BRIDGE_OTHER << 8), .class_mask = (0xFFFF00), \
+	.driver_data = (kernel_ulong_t)&data
+
+/*
+ * IDT PCIe-switches device IDs
+ */
+#define PCI_DEVICE_ID_IDT_89HPES24NT6AG2  0x8091
+#define PCI_DEVICE_ID_IDT_89HPES32NT8AG2  0x808F
+#define PCI_DEVICE_ID_IDT_89HPES32NT8BG2  0x8088
+#define PCI_DEVICE_ID_IDT_89HPES12NT12G2  0x8092
+#define PCI_DEVICE_ID_IDT_89HPES16NT16G2  0x8090
+#define PCI_DEVICE_ID_IDT_89HPES24NT24G2  0x808E
+#define PCI_DEVICE_ID_IDT_89HPES32NT24AG2 0x808C
+#define PCI_DEVICE_ID_IDT_89HPES32NT24BG2 0x808A
+
+/*
+ * NT-function Configuration Space registers
+ * NOTE 1) The IDT PCIe-switch internal data is little-endian
+ *      so it must be taken into account in the driver
+ *      internals.
+ *      2) Additionally the registers should be accessed either
+ *      with byte-enables corresponding to their native size or
+ *      the size of one DWORD
+ *
+ * So to simplify the driver code, there is only DWORD-sized read/write
+ * operations utilized.
+ */
+/* PCI Express Configuration Space */
+/* PCI Express command/status register	(DWORD) */
+#define IDT_NT_PCICMDSTS		0x00004U
+/* PCI Express Device Capabilities	(DWORD) */
+#define IDT_NT_PCIEDCAP			0x00044U
+/* PCI Express Device Control/Status	(WORD+WORD) */
+#define IDT_NT_PCIEDCTLSTS		0x00048U
+/* PCI Express Link Capabilities	(DWORD) */
+#define IDT_NT_PCIELCAP			0x0004CU
+/* PCI Express Link Control/Status	(WORD+WORD) */
+#define IDT_NT_PCIELCTLSTS		0x00050U
+/* PCI Express Device Capabilities 2	(DWORD) */
+#define IDT_NT_PCIEDCAP2		0x00064U
+/* PCI Express Device Control 2		(WORD+WORD) */
+#define IDT_NT_PCIEDCTL2		0x00068U
+/* PCI Power Management Control and Status (DWORD) */
+#define IDT_NT_PMCSR			0x000C4U
+/*==========================================*/
+/* IDT Proprietary NT-port-specific registers */
+/* NT-function main control registers */
+/* NT Endpoint Control			(DWORD) */
+#define IDT_NT_NTCTL			0x00400U
+/* NT Endpoint Interrupt Status/Mask	(DWORD) */
+#define IDT_NT_NTINTSTS			0x00404U
+#define IDT_NT_NTINTMSK			0x00408U
+/* NT Endpoint Signal Data		(DWORD) */
+#define IDT_NT_NTSDATA			0x0040CU
+/* NT Endpoint Global Signal		(DWORD) */
+#define IDT_NT_NTGSIGNAL		0x00410U
+/* Internal Error Reporting Mask 0/1	(DWORD) */
+#define IDT_NT_NTIERRORMSK0		0x00414U
+#define IDT_NT_NTIERRORMSK1		0x00418U
+/* Doorbel registers */
+/* NT Outbound Doorbell Set		(DWORD) */
+#define IDT_NT_OUTDBELLSET		0x00420U
+/* NT Inbound Doorbell Status/Mask	(DWORD) */
+#define IDT_NT_INDBELLSTS		0x00428U
+#define IDT_NT_INDBELLMSK		0x0042CU
+/* Message registers */
+/* Outbound Message N			(DWORD) */
+#define IDT_NT_OUTMSG0			0x00430U
+#define IDT_NT_OUTMSG1			0x00434U
+#define IDT_NT_OUTMSG2			0x00438U
+#define IDT_NT_OUTMSG3			0x0043CU
+/* Inbound Message N			(DWORD) */
+#define IDT_NT_INMSG0			0x00440U
+#define IDT_NT_INMSG1			0x00444U
+#define IDT_NT_INMSG2			0x00448U
+#define IDT_NT_INMSG3			0x0044CU
+/* Inbound Message Source N		(DWORD) */
+#define IDT_NT_INMSGSRC0		0x00450U
+#define IDT_NT_INMSGSRC1		0x00454U
+#define IDT_NT_INMSGSRC2		0x00458U
+#define IDT_NT_INMSGSRC3		0x0045CU
+/* Message Status			(DWORD) */
+#define IDT_NT_MSGSTS			0x00460U
+/* Message Status Mask			(DWORD) */
+#define IDT_NT_MSGSTSMSK		0x00464U
+/* BAR-setup registers */
+/* BAR N Setup/Limit Address/Lower and Upper Translated Base Address (DWORD) */
+#define IDT_NT_BARSETUP0		0x00470U
+#define IDT_NT_BARLIMIT0		0x00474U
+#define IDT_NT_BARLTBASE0		0x00478U
+#define IDT_NT_BARUTBASE0		0x0047CU
+#define IDT_NT_BARSETUP1		0x00480U
+#define IDT_NT_BARLIMIT1		0x00484U
+#define IDT_NT_BARLTBASE1		0x00488U
+#define IDT_NT_BARUTBASE1		0x0048CU
+#define IDT_NT_BARSETUP2		0x00490U
+#define IDT_NT_BARLIMIT2		0x00494U
+#define IDT_NT_BARLTBASE2		0x00498U
+#define IDT_NT_BARUTBASE2		0x0049CU
+#define IDT_NT_BARSETUP3		0x004A0U
+#define IDT_NT_BARLIMIT3		0x004A4U
+#define IDT_NT_BARLTBASE3		0x004A8U
+#define IDT_NT_BARUTBASE3		0x004ACU
+#define IDT_NT_BARSETUP4		0x004B0U
+#define IDT_NT_BARLIMIT4		0x004B4U
+#define IDT_NT_BARLTBASE4		0x004B8U
+#define IDT_NT_BARUTBASE4		0x004BCU
+#define IDT_NT_BARSETUP5		0x004C0U
+#define IDT_NT_BARLIMIT5		0x004C4U
+#define IDT_NT_BARLTBASE5		0x004C8U
+#define IDT_NT_BARUTBASE5		0x004CCU
+/* NT mapping table registers */
+/* NT Mapping Table Address/Status/Data	(DWORD) */
+#define IDT_NT_NTMTBLADDR		0x004D0U
+#define IDT_NT_NTMTBLSTS		0x004D4U
+#define IDT_NT_NTMTBLDATA		0x004D8U
+/* Requester ID (Bus:Device:Function) Capture	(DWORD) */
+#define IDT_NT_REQIDCAP			0x004DCU
+/* Memory Windows Lookup table registers */
+/* Lookup Table Offset/Lower, Middle and Upper data	(DWORD) */
+#define IDT_NT_LUTOFFSET		0x004E0U
+#define IDT_NT_LUTLDATA			0x004E4U
+#define IDT_NT_LUTMDATA			0x004E8U
+#define IDT_NT_LUTUDATA			0x004ECU
+/* NT Endpoint Uncorrectable/Correctable Errors Emulation registers (DWORD) */
+#define IDT_NT_NTUEEM			0x004F0U
+#define IDT_NT_NTCEEM			0x004F4U
+/* Global Address Space Access/Data registers	(DWARD) */
+#define IDT_NT_GASAADDR			0x00FF8U
+#define IDT_NT_GASADATA			0x00FFCU
+
+/*
+ * IDT PCIe-switch Global Configuration and Status registers
+ */
+/* Port N Configuration register in global space */
+/* PCI Express command/status and link control/status registers (WORD+WORD) */
+#define IDT_SW_NTP0_PCIECMDSTS		0x01004U
+#define IDT_SW_NTP0_PCIELCTLSTS		0x01050U
+/* NT-function control register		(DWORD) */
+#define IDT_SW_NTP0_NTCTL		0x01400U
+/* BAR setup/limit/base address registers (DWORD) */
+#define IDT_SW_NTP0_BARSETUP0		0x01470U
+#define IDT_SW_NTP0_BARLIMIT0		0x01474U
+#define IDT_SW_NTP0_BARLTBASE0		0x01478U
+#define IDT_SW_NTP0_BARUTBASE0		0x0147CU
+#define IDT_SW_NTP0_BARSETUP1		0x01480U
+#define IDT_SW_NTP0_BARLIMIT1		0x01484U
+#define IDT_SW_NTP0_BARLTBASE1		0x01488U
+#define IDT_SW_NTP0_BARUTBASE1		0x0148CU
+#define IDT_SW_NTP0_BARSETUP2		0x01490U
+#define IDT_SW_NTP0_BARLIMIT2		0x01494U
+#define IDT_SW_NTP0_BARLTBASE2		0x01498U
+#define IDT_SW_NTP0_BARUTBASE2		0x0149CU
+#define IDT_SW_NTP0_BARSETUP3		0x014A0U
+#define IDT_SW_NTP0_BARLIMIT3		0x014A4U
+#define IDT_SW_NTP0_BARLTBASE3		0x014A8U
+#define IDT_SW_NTP0_BARUTBASE3		0x014ACU
+#define IDT_SW_NTP0_BARSETUP4		0x014B0U
+#define IDT_SW_NTP0_BARLIMIT4		0x014B4U
+#define IDT_SW_NTP0_BARLTBASE4		0x014B8U
+#define IDT_SW_NTP0_BARUTBASE4		0x014BCU
+#define IDT_SW_NTP0_BARSETUP5		0x014C0U
+#define IDT_SW_NTP0_BARLIMIT5		0x014C4U
+#define IDT_SW_NTP0_BARLTBASE5		0x014C8U
+#define IDT_SW_NTP0_BARUTBASE5		0x014CCU
+/* PCI Express command/status and link control/status registers (WORD+WORD) */
+#define IDT_SW_NTP2_PCIECMDSTS		0x05004U
+#define IDT_SW_NTP2_PCIELCTLSTS		0x05050U
+/* NT-function control register		(DWORD) */
+#define IDT_SW_NTP2_NTCTL		0x05400U
+/* BAR setup/limit/base address registers (DWORD) */
+#define IDT_SW_NTP2_BARSETUP0		0x05470U
+#define IDT_SW_NTP2_BARLIMIT0		0x05474U
+#define IDT_SW_NTP2_BARLTBASE0		0x05478U
+#define IDT_SW_NTP2_BARUTBASE0		0x0547CU
+#define IDT_SW_NTP2_BARSETUP1		0x05480U
+#define IDT_SW_NTP2_BARLIMIT1		0x05484U
+#define IDT_SW_NTP2_BARLTBASE1		0x05488U
+#define IDT_SW_NTP2_BARUTBASE1		0x0548CU
+#define IDT_SW_NTP2_BARSETUP2		0x05490U
+#define IDT_SW_NTP2_BARLIMIT2		0x05494U
+#define IDT_SW_NTP2_BARLTBASE2		0x05498U
+#define IDT_SW_NTP2_BARUTBASE2		0x0549CU
+#define IDT_SW_NTP2_BARSETUP3		0x054A0U
+#define IDT_SW_NTP2_BARLIMIT3		0x054A4U
+#define IDT_SW_NTP2_BARLTBASE3		0x054A8U
+#define IDT_SW_NTP2_BARUTBASE3		0x054ACU
+#define IDT_SW_NTP2_BARSETUP4		0x054B0U
+#define IDT_SW_NTP2_BARLIMIT4		0x054B4U
+#define IDT_SW_NTP2_BARLTBASE4		0x054B8U
+#define IDT_SW_NTP2_BARUTBASE4		0x054BCU
+#define IDT_SW_NTP2_BARSETUP5		0x054C0U
+#define IDT_SW_NTP2_BARLIMIT5		0x054C4U
+#define IDT_SW_NTP2_BARLTBASE5		0x054C8U
+#define IDT_SW_NTP2_BARUTBASE5		0x054CCU
+/* PCI Express command/status and link control/status registers (WORD+WORD) */
+#define IDT_SW_NTP4_PCIECMDSTS		0x09004U
+#define IDT_SW_NTP4_PCIELCTLSTS		0x09050U
+/* NT-function control register		(DWORD) */
+#define IDT_SW_NTP4_NTCTL		0x09400U
+/* BAR setup/limit/base address registers (DWORD) */
+#define IDT_SW_NTP4_BARSETUP0		0x09470U
+#define IDT_SW_NTP4_BARLIMIT0		0x09474U
+#define IDT_SW_NTP4_BARLTBASE0		0x09478U
+#define IDT_SW_NTP4_BARUTBASE0		0x0947CU
+#define IDT_SW_NTP4_BARSETUP1		0x09480U
+#define IDT_SW_NTP4_BARLIMIT1		0x09484U
+#define IDT_SW_NTP4_BARLTBASE1		0x09488U
+#define IDT_SW_NTP4_BARUTBASE1		0x0948CU
+#define IDT_SW_NTP4_BARSETUP2		0x09490U
+#define IDT_SW_NTP4_BARLIMIT2		0x09494U
+#define IDT_SW_NTP4_BARLTBASE2		0x09498U
+#define IDT_SW_NTP4_BARUTBASE2		0x0949CU
+#define IDT_SW_NTP4_BARSETUP3		0x094A0U
+#define IDT_SW_NTP4_BARLIMIT3		0x094A4U
+#define IDT_SW_NTP4_BARLTBASE3		0x094A8U
+#define IDT_SW_NTP4_BARUTBASE3		0x094ACU
+#define IDT_SW_NTP4_BARSETUP4		0x094B0U
+#define IDT_SW_NTP4_BARLIMIT4		0x094B4U
+#define IDT_SW_NTP4_BARLTBASE4		0x094B8U
+#define IDT_SW_NTP4_BARUTBASE4		0x094BCU
+#define IDT_SW_NTP4_BARSETUP5		0x094C0U
+#define IDT_SW_NTP4_BARLIMIT5		0x094C4U
+#define IDT_SW_NTP4_BARLTBASE5		0x094C8U
+#define IDT_SW_NTP4_BARUTBASE5		0x094CCU
+/* PCI Express command/status and link control/status registers (WORD+WORD) */
+#define IDT_SW_NTP6_PCIECMDSTS		0x0D004U
+#define IDT_SW_NTP6_PCIELCTLSTS		0x0D050U
+/* NT-function control register		(DWORD) */
+#define IDT_SW_NTP6_NTCTL		0x0D400U
+/* BAR setup/limit/base address registers (DWORD) */
+#define IDT_SW_NTP6_BARSETUP0		0x0D470U
+#define IDT_SW_NTP6_BARLIMIT0		0x0D474U
+#define IDT_SW_NTP6_BARLTBASE0		0x0D478U
+#define IDT_SW_NTP6_BARUTBASE0		0x0D47CU
+#define IDT_SW_NTP6_BARSETUP1		0x0D480U
+#define IDT_SW_NTP6_BARLIMIT1		0x0D484U
+#define IDT_SW_NTP6_BARLTBASE1		0x0D488U
+#define IDT_SW_NTP6_BARUTBASE1		0x0D48CU
+#define IDT_SW_NTP6_BARSETUP2		0x0D490U
+#define IDT_SW_NTP6_BARLIMIT2		0x0D494U
+#define IDT_SW_NTP6_BARLTBASE2		0x0D498U
+#define IDT_SW_NTP6_BARUTBASE2		0x0D49CU
+#define IDT_SW_NTP6_BARSETUP3		0x0D4A0U
+#define IDT_SW_NTP6_BARLIMIT3		0x0D4A4U
+#define IDT_SW_NTP6_BARLTBASE3		0x0D4A8U
+#define IDT_SW_NTP6_BARUTBASE3		0x0D4ACU
+#define IDT_SW_NTP6_BARSETUP4		0x0D4B0U
+#define IDT_SW_NTP6_BARLIMIT4		0x0D4B4U
+#define IDT_SW_NTP6_BARLTBASE4		0x0D4B8U
+#define IDT_SW_NTP6_BARUTBASE4		0x0D4BCU
+#define IDT_SW_NTP6_BARSETUP5		0x0D4C0U
+#define IDT_SW_NTP6_BARLIMIT5		0x0D4C4U
+#define IDT_SW_NTP6_BARLTBASE5		0x0D4C8U
+#define IDT_SW_NTP6_BARUTBASE5		0x0D4CCU
+/* PCI Express command/status and link control/status registers (WORD+WORD) */
+#define IDT_SW_NTP8_PCIECMDSTS		0x11004U
+#define IDT_SW_NTP8_PCIELCTLSTS		0x11050U
+/* NT-function control register		(DWORD) */
+#define IDT_SW_NTP8_NTCTL		0x11400U
+/* BAR setup/limit/base address registers (DWORD) */
+#define IDT_SW_NTP8_BARSETUP0		0x11470U
+#define IDT_SW_NTP8_BARLIMIT0		0x11474U
+#define IDT_SW_NTP8_BARLTBASE0		0x11478U
+#define IDT_SW_NTP8_BARUTBASE0		0x1147CU
+#define IDT_SW_NTP8_BARSETUP1		0x11480U
+#define IDT_SW_NTP8_BARLIMIT1		0x11484U
+#define IDT_SW_NTP8_BARLTBASE1		0x11488U
+#define IDT_SW_NTP8_BARUTBASE1		0x1148CU
+#define IDT_SW_NTP8_BARSETUP2		0x11490U
+#define IDT_SW_NTP8_BARLIMIT2		0x11494U
+#define IDT_SW_NTP8_BARLTBASE2		0x11498U
+#define IDT_SW_NTP8_BARUTBASE2		0x1149CU
+#define IDT_SW_NTP8_BARSETUP3		0x114A0U
+#define IDT_SW_NTP8_BARLIMIT3		0x114A4U
+#define IDT_SW_NTP8_BARLTBASE3		0x114A8U
+#define IDT_SW_NTP8_BARUTBASE3		0x114ACU
+#define IDT_SW_NTP8_BARSETUP4		0x114B0U
+#define IDT_SW_NTP8_BARLIMIT4		0x114B4U
+#define IDT_SW_NTP8_BARLTBASE4		0x114B8U
+#define IDT_SW_NTP8_BARUTBASE4		0x114BCU
+#define IDT_SW_NTP8_BARSETUP5		0x114C0U
+#define IDT_SW_NTP8_BARLIMIT5		0x114C4U
+#define IDT_SW_NTP8_BARLTBASE5		0x114C8U
+#define IDT_SW_NTP8_BARUTBASE5		0x114CCU
+/* PCI Express command/status and link control/status registers (WORD+WORD) */
+#define IDT_SW_NTP12_PCIECMDSTS		0x19004U
+#define IDT_SW_NTP12_PCIELCTLSTS	0x19050U
+/* NT-function control register		(DWORD) */
+#define IDT_SW_NTP12_NTCTL		0x19400U
+/* BAR setup/limit/base address registers (DWORD) */
+#define IDT_SW_NTP12_BARSETUP0		0x19470U
+#define IDT_SW_NTP12_BARLIMIT0		0x19474U
+#define IDT_SW_NTP12_BARLTBASE0		0x19478U
+#define IDT_SW_NTP12_BARUTBASE0		0x1947CU
+#define IDT_SW_NTP12_BARSETUP1		0x19480U
+#define IDT_SW_NTP12_BARLIMIT1		0x19484U
+#define IDT_SW_NTP12_BARLTBASE1		0x19488U
+#define IDT_SW_NTP12_BARUTBASE1		0x1948CU
+#define IDT_SW_NTP12_BARSETUP2		0x19490U
+#define IDT_SW_NTP12_BARLIMIT2		0x19494U
+#define IDT_SW_NTP12_BARLTBASE2		0x19498U
+#define IDT_SW_NTP12_BARUTBASE2		0x1949CU
+#define IDT_SW_NTP12_BARSETUP3		0x194A0U
+#define IDT_SW_NTP12_BARLIMIT3		0x194A4U
+#define IDT_SW_NTP12_BARLTBASE3		0x194A8U
+#define IDT_SW_NTP12_BARUTBASE3		0x194ACU
+#define IDT_SW_NTP12_BARSETUP4		0x194B0U
+#define IDT_SW_NTP12_BARLIMIT4		0x194B4U
+#define IDT_SW_NTP12_BARLTBASE4		0x194B8U
+#define IDT_SW_NTP12_BARUTBASE4		0x194BCU
+#define IDT_SW_NTP12_BARSETUP5		0x194C0U
+#define IDT_SW_NTP12_BARLIMIT5		0x194C4U
+#define IDT_SW_NTP12_BARLTBASE5		0x194C8U
+#define IDT_SW_NTP12_BARUTBASE5		0x194CCU
+/* PCI Express command/status and link control/status registers (WORD+WORD) */
+#define IDT_SW_NTP16_PCIECMDSTS		0x21004U
+#define IDT_SW_NTP16_PCIELCTLSTS	0x21050U
+/* NT-function control register		(DWORD) */
+#define IDT_SW_NTP16_NTCTL		0x21400U
+/* BAR setup/limit/base address registers (DWORD) */
+#define IDT_SW_NTP16_BARSETUP0		0x21470U
+#define IDT_SW_NTP16_BARLIMIT0		0x21474U
+#define IDT_SW_NTP16_BARLTBASE0		0x21478U
+#define IDT_SW_NTP16_BARUTBASE0		0x2147CU
+#define IDT_SW_NTP16_BARSETUP1		0x21480U
+#define IDT_SW_NTP16_BARLIMIT1		0x21484U
+#define IDT_SW_NTP16_BARLTBASE1		0x21488U
+#define IDT_SW_NTP16_BARUTBASE1		0x2148CU
+#define IDT_SW_NTP16_BARSETUP2		0x21490U
+#define IDT_SW_NTP16_BARLIMIT2		0x21494U
+#define IDT_SW_NTP16_BARLTBASE2		0x21498U
+#define IDT_SW_NTP16_BARUTBASE2		0x2149CU
+#define IDT_SW_NTP16_BARSETUP3		0x214A0U
+#define IDT_SW_NTP16_BARLIMIT3		0x214A4U
+#define IDT_SW_NTP16_BARLTBASE3		0x214A8U
+#define IDT_SW_NTP16_BARUTBASE3		0x214ACU
+#define IDT_SW_NTP16_BARSETUP4		0x214B0U
+#define IDT_SW_NTP16_BARLIMIT4		0x214B4U
+#define IDT_SW_NTP16_BARLTBASE4		0x214B8U
+#define IDT_SW_NTP16_BARUTBASE4		0x214BCU
+#define IDT_SW_NTP16_BARSETUP5		0x214C0U
+#define IDT_SW_NTP16_BARLIMIT5		0x214C4U
+#define IDT_SW_NTP16_BARLTBASE5		0x214C8U
+#define IDT_SW_NTP16_BARUTBASE5		0x214CCU
+/* PCI Express command/status and link control/status registers (WORD+WORD) */
+#define IDT_SW_NTP20_PCIECMDSTS		0x29004U
+#define IDT_SW_NTP20_PCIELCTLSTS	0x29050U
+/* NT-function control register		(DWORD) */
+#define IDT_SW_NTP20_NTCTL		0x29400U
+/* BAR setup/limit/base address registers (DWORD) */
+#define IDT_SW_NTP20_BARSETUP0		0x29470U
+#define IDT_SW_NTP20_BARLIMIT0		0x29474U
+#define IDT_SW_NTP20_BARLTBASE0		0x29478U
+#define IDT_SW_NTP20_BARUTBASE0		0x2947CU
+#define IDT_SW_NTP20_BARSETUP1		0x29480U
+#define IDT_SW_NTP20_BARLIMIT1		0x29484U
+#define IDT_SW_NTP20_BARLTBASE1		0x29488U
+#define IDT_SW_NTP20_BARUTBASE1		0x2948CU
+#define IDT_SW_NTP20_BARSETUP2		0x29490U
+#define IDT_SW_NTP20_BARLIMIT2		0x29494U
+#define IDT_SW_NTP20_BARLTBASE2		0x29498U
+#define IDT_SW_NTP20_BARUTBASE2		0x2949CU
+#define IDT_SW_NTP20_BARSETUP3		0x294A0U
+#define IDT_SW_NTP20_BARLIMIT3		0x294A4U
+#define IDT_SW_NTP20_BARLTBASE3		0x294A8U
+#define IDT_SW_NTP20_BARUTBASE3		0x294ACU
+#define IDT_SW_NTP20_BARSETUP4		0x294B0U
+#define IDT_SW_NTP20_BARLIMIT4		0x294B4U
+#define IDT_SW_NTP20_BARLTBASE4		0x294B8U
+#define IDT_SW_NTP20_BARUTBASE4		0x294BCU
+#define IDT_SW_NTP20_BARSETUP5		0x294C0U
+#define IDT_SW_NTP20_BARLIMIT5		0x294C4U
+#define IDT_SW_NTP20_BARLTBASE5		0x294C8U
+#define IDT_SW_NTP20_BARUTBASE5		0x294CCU
+/* IDT PCIe-switch control register	(DWORD) */
+#define IDT_SW_CTL			0x3E000U
+/* Boot Configuration Vector Status	(DWORD) */
+#define IDT_SW_BCVSTS			0x3E004U
+/* Port Clocking Mode			(DWORD) */
+#define IDT_SW_PCLKMODE			0x3E008U
+/* Reset Drain Delay			(DWORD) */
+#define IDT_SW_RDRAINDELAY		0x3E080U
+/* Port Operating Mode Change Drain Delay (DWORD) */
+#define IDT_SW_POMCDELAY		0x3E084U
+/* Side Effect Delay			(DWORD) */
+#define IDT_SW_SEDELAY			0x3E088U
+/* Upstream Secondary Bus Reset Delay	(DWORD) */
+#define IDT_SW_SSBRDELAY		0x3E08CU
+/* Switch partition N Control/Status/Failover registers */
+#define IDT_SW_SWPART0CTL		0x3E100U
+#define IDT_SW_SWPART0STS		0x3E104U
+#define IDT_SW_SWPART0FCTL		0x3E108U
+#define IDT_SW_SWPART1CTL		0x3E120U
+#define IDT_SW_SWPART1STS		0x3E124U
+#define IDT_SW_SWPART1FCTL		0x3E128U
+#define IDT_SW_SWPART2CTL		0x3E140U
+#define IDT_SW_SWPART2STS		0x3E144U
+#define IDT_SW_SWPART2FCTL		0x3E148U
+#define IDT_SW_SWPART3CTL		0x3E160U
+#define IDT_SW_SWPART3STS		0x3E164U
+#define IDT_SW_SWPART3FCTL		0x3E168U
+#define IDT_SW_SWPART4CTL		0x3E180U
+#define IDT_SW_SWPART4STS		0x3E184U
+#define IDT_SW_SWPART4FCTL		0x3E188U
+#define IDT_SW_SWPART5CTL		0x3E1A0U
+#define IDT_SW_SWPART5STS		0x3E1A4U
+#define IDT_SW_SWPART5FCTL		0x3E1A8U
+#define IDT_SW_SWPART6CTL		0x3E1C0U
+#define IDT_SW_SWPART6STS		0x3E1C4U
+#define IDT_SW_SWPART6FCTL		0x3E1C8U
+#define IDT_SW_SWPART7CTL		0x3E1E0U
+#define IDT_SW_SWPART7STS		0x3E1E4U
+#define IDT_SW_SWPART7FCTL		0x3E1E8U
+/* Switch port N control and status registers */
+#define IDT_SW_SWPORT0CTL		0x3E200U
+#define IDT_SW_SWPORT0STS		0x3E204U
+#define IDT_SW_SWPORT0FCTL		0x3E208U
+#define IDT_SW_SWPORT2CTL		0x3E240U
+#define IDT_SW_SWPORT2STS		0x3E244U
+#define IDT_SW_SWPORT2FCTL		0x3E248U
+#define IDT_SW_SWPORT4CTL		0x3E280U
+#define IDT_SW_SWPORT4STS		0x3E284U
+#define IDT_SW_SWPORT4FCTL		0x3E288U
+#define IDT_SW_SWPORT6CTL		0x3E2C0U
+#define IDT_SW_SWPORT6STS		0x3E2C4U
+#define IDT_SW_SWPORT6FCTL		0x3E2C8U
+#define IDT_SW_SWPORT8CTL		0x3E300U
+#define IDT_SW_SWPORT8STS		0x3E304U
+#define IDT_SW_SWPORT8FCTL		0x3E308U
+#define IDT_SW_SWPORT12CTL		0x3E380U
+#define IDT_SW_SWPORT12STS		0x3E384U
+#define IDT_SW_SWPORT12FCTL		0x3E388U
+#define IDT_SW_SWPORT16CTL		0x3E400U
+#define IDT_SW_SWPORT16STS		0x3E404U
+#define IDT_SW_SWPORT16FCTL		0x3E408U
+#define IDT_SW_SWPORT20CTL		0x3E480U
+#define IDT_SW_SWPORT20STS		0x3E484U
+#define IDT_SW_SWPORT20FCTL		0x3E488U
+/* Switch Event registers */
+/* Switch Event Status/Mask/Partition mask (DWORD) */
+#define IDT_SW_SESTS			0x3EC00U
+#define IDT_SW_SEMSK			0x3EC04U
+#define IDT_SW_SEPMSK			0x3EC08U
+/* Switch Event Link Up/Down Status/Mask (DWORD) */
+#define IDT_SW_SELINKUPSTS		0x3EC0CU
+#define IDT_SW_SELINKUPMSK		0x3EC10U
+#define IDT_SW_SELINKDNSTS		0x3EC14U
+#define IDT_SW_SELINKDNMSK		0x3EC18U
+/* Switch Event Fundamental Reset Status/Mask (DWORD) */
+#define IDT_SW_SEFRSTSTS		0x3EC1CU
+#define IDT_SW_SEFRSTMSK		0x3EC20U
+/* Switch Event Hot Reset Status/Mask	(DWORD) */
+#define IDT_SW_SEHRSTSTS		0x3EC24U
+#define IDT_SW_SEHRSTMSK		0x3EC28U
+/* Switch Event Failover Mask		(DWORD) */
+#define IDT_SW_SEFOVRMSK		0x3EC2CU
+/* Switch Event Global Signal Status/Mask (DWORD) */
+#define IDT_SW_SEGSIGSTS		0x3EC30U
+#define IDT_SW_SEGSIGMSK		0x3EC34U
+/* NT Global Doorbell Status		(DWORD) */
+#define IDT_SW_GDBELLSTS		0x3EC3CU
+/* Switch partition N message M control (msgs routing table) (DWORD) */
+#define IDT_SW_SWP0MSGCTL0		0x3EE00U
+#define IDT_SW_SWP1MSGCTL0		0x3EE04U
+#define IDT_SW_SWP2MSGCTL0		0x3EE08U
+#define IDT_SW_SWP3MSGCTL0		0x3EE0CU
+#define IDT_SW_SWP4MSGCTL0		0x3EE10U
+#define IDT_SW_SWP5MSGCTL0		0x3EE14U
+#define IDT_SW_SWP6MSGCTL0		0x3EE18U
+#define IDT_SW_SWP7MSGCTL0		0x3EE1CU
+#define IDT_SW_SWP0MSGCTL1		0x3EE20U
+#define IDT_SW_SWP1MSGCTL1		0x3EE24U
+#define IDT_SW_SWP2MSGCTL1		0x3EE28U
+#define IDT_SW_SWP3MSGCTL1		0x3EE2CU
+#define IDT_SW_SWP4MSGCTL1		0x3EE30U
+#define IDT_SW_SWP5MSGCTL1		0x3EE34U
+#define IDT_SW_SWP6MSGCTL1		0x3EE38U
+#define IDT_SW_SWP7MSGCTL1		0x3EE3CU
+#define IDT_SW_SWP0MSGCTL2		0x3EE40U
+#define IDT_SW_SWP1MSGCTL2		0x3EE44U
+#define IDT_SW_SWP2MSGCTL2		0x3EE48U
+#define IDT_SW_SWP3MSGCTL2		0x3EE4CU
+#define IDT_SW_SWP4MSGCTL2		0x3EE50U
+#define IDT_SW_SWP5MSGCTL2		0x3EE54U
+#define IDT_SW_SWP6MSGCTL2		0x3EE58U
+#define IDT_SW_SWP7MSGCTL2		0x3EE5CU
+#define IDT_SW_SWP0MSGCTL3		0x3EE60U
+#define IDT_SW_SWP1MSGCTL3		0x3EE64U
+#define IDT_SW_SWP2MSGCTL3		0x3EE68U
+#define IDT_SW_SWP3MSGCTL3		0x3EE6CU
+#define IDT_SW_SWP4MSGCTL3		0x3EE70U
+#define IDT_SW_SWP5MSGCTL3		0x3EE74U
+#define IDT_SW_SWP6MSGCTL3		0x3EE78U
+#define IDT_SW_SWP7MSGCTL3		0x3EE7CU
+/* SMBus Status and Control registers	(DWORD) */
+#define IDT_SW_SMBUSSTS			0x3F188U
+#define IDT_SW_SMBUSCTL			0x3F18CU
+/* Serial EEPROM Interface		(DWORD) */
+#define IDT_SW_EEPROMINTF		0x3F190U
+/* MBus I/O Expander Address N		(DWORD) */
+#define IDT_SW_IOEXPADDR0		0x3F198U
+#define IDT_SW_IOEXPADDR1		0x3F19CU
+#define IDT_SW_IOEXPADDR2		0x3F1A0U
+#define IDT_SW_IOEXPADDR3		0x3F1A4U
+#define IDT_SW_IOEXPADDR4		0x3F1A8U
+#define IDT_SW_IOEXPADDR5		0x3F1ACU
+/* General Purpose Events Control and Status registers (DWORD) */
+#define IDT_SW_GPECTL			0x3F1B0U
+#define IDT_SW_GPESTS			0x3F1B4U
+/* Temperature sensor Control/Status/Alarm/Adjustment/Slope registers */
+#define IDT_SW_TMPCTL			0x3F1D4U
+#define IDT_SW_TMPSTS			0x3F1D8U
+#define IDT_SW_TMPALARM			0x3F1DCU
+#define IDT_SW_TMPADJ			0x3F1E0U
+#define IDT_SW_TSSLOPE			0x3F1E4U
+/* SMBus Configuration Block header log	(DWORD) */
+#define IDT_SW_SMBUSCBHL		0x3F1E8U
+
+/*
+ * Common registers related constants
+ * @IDT_REG_ALIGN:	Registers alignment used in the driver
+ * @IDT_REG_PCI_MAX:	Maximum PCI configuration space register value
+ * @IDT_REG_SW_MAX:	Maximum global register value
+ */
+#define IDT_REG_ALIGN			4
+#define IDT_REG_PCI_MAX			0x00FFFU
+#define IDT_REG_SW_MAX			0x3FFFFU
+
+/*
+ * PCICMDSTS register fields related constants
+ * @IDT_PCICMDSTS_IOAE:	I/O access enable
+ * @IDT_PCICMDSTS_MAE:	Memory access enable
+ * @IDT_PCICMDSTS_BME:	Bus master enable
+ */
+#define IDT_PCICMDSTS_IOAE		0x00000001U
+#define IDT_PCICMDSTS_MAE		0x00000002U
+#define IDT_PCICMDSTS_BME		0x00000004U
+
+/*
+ * PCIEDCAP register fields related constants
+ * @IDT_PCIEDCAP_MPAYLOAD_MASK:	 Maximum payload size mask
+ * @IDT_PCIEDCAP_MPAYLOAD_FLD:	 Maximum payload size field offset
+ * @IDT_PCIEDCAP_MPAYLOAD_S128:	 Max supported payload size of 128 bytes
+ * @IDT_PCIEDCAP_MPAYLOAD_S256:	 Max supported payload size of 256 bytes
+ * @IDT_PCIEDCAP_MPAYLOAD_S512:	 Max supported payload size of 512 bytes
+ * @IDT_PCIEDCAP_MPAYLOAD_S1024: Max supported payload size of 1024 bytes
+ * @IDT_PCIEDCAP_MPAYLOAD_S2048: Max supported payload size of 2048 bytes
+ */
+#define IDT_PCIEDCAP_MPAYLOAD_MASK	0x00000007U
+#define IDT_PCIEDCAP_MPAYLOAD_FLD	0
+#define IDT_PCIEDCAP_MPAYLOAD_S128	0x00000000U
+#define IDT_PCIEDCAP_MPAYLOAD_S256	0x00000001U
+#define IDT_PCIEDCAP_MPAYLOAD_S512	0x00000002U
+#define IDT_PCIEDCAP_MPAYLOAD_S1024	0x00000003U
+#define IDT_PCIEDCAP_MPAYLOAD_S2048	0x00000004U
+
+/*
+ * PCIEDCTLSTS registers fields related constants
+ * @IDT_PCIEDCTL_MPS_MASK:	Maximum payload size mask
+ * @IDT_PCIEDCTL_MPS_FLD:	MPS field offset
+ * @IDT_PCIEDCTL_MPS_S128:	Max payload size of 128 bytes
+ * @IDT_PCIEDCTL_MPS_S256:	Max payload size of 256 bytes
+ * @IDT_PCIEDCTL_MPS_S512:	Max payload size of 512 bytes
+ * @IDT_PCIEDCTL_MPS_S1024:	Max payload size of 1024 bytes
+ * @IDT_PCIEDCTL_MPS_S2048:	Max payload size of 2048 bytes
+ * @IDT_PCIEDCTL_MPS_S4096:	Max payload size of 4096 bytes
+ */
+#define IDT_PCIEDCTLSTS_MPS_MASK	0x000000E0U
+#define IDT_PCIEDCTLSTS_MPS_FLD		5
+#define IDT_PCIEDCTLSTS_MPS_S128	0x00000000U
+#define IDT_PCIEDCTLSTS_MPS_S256	0x00000020U
+#define IDT_PCIEDCTLSTS_MPS_S512	0x00000040U
+#define IDT_PCIEDCTLSTS_MPS_S1024	0x00000060U
+#define IDT_PCIEDCTLSTS_MPS_S2048	0x00000080U
+#define IDT_PCIEDCTLSTS_MPS_S4096	0x000000A0U
+
+/*
+ * PCIELCAP register fields related constants
+ * @IDT_PCIELCAP_PORTNUM_MASK:	Port number field mask
+ * @IDT_PCIELCAP_PORTNUM_FLD:	Port number field offset
+ */
+#define IDT_PCIELCAP_PORTNUM_MASK	0xFF000000U
+#define IDT_PCIELCAP_PORTNUM_FLD	24
+
+/*
+ * PCIELCTLSTS registers fields related constants
+ * @IDT_PCIELSTS_CLS_MASK:	Current link speed mask
+ * @IDT_PCIELSTS_CLS_FLD:	Current link speed field offset
+ * @IDT_PCIELSTS_NLW_MASK:	Negotiated link width mask
+ * @IDT_PCIELSTS_NLW_FLD:	Negotiated link width field offset
+ * @IDT_PCIELSTS_SCLK_COM:	Common slot clock configuration
+ */
+#define IDT_PCIELCTLSTS_CLS_MASK	0x000F0000U
+#define IDT_PCIELCTLSTS_CLS_FLD		16
+#define IDT_PCIELCTLSTS_NLW_MASK	0x03F00000U
+#define IDT_PCIELCTLSTS_NLW_FLD		20
+#define IDT_PCIELCTLSTS_SCLK_COM	0x10000000U
+
+/*
+ * NTCTL register fields related constants
+ * @IDT_NTCTL_IDPROTDIS:	ID Protection check disable (disable MTBL)
+ * @IDT_NTCTL_CPEN:		Completion enable
+ * @IDT_NTCTL_RNS:		Request no snoop processing (if MTBL disabled)
+ * @IDT_NTCTL_ATP:		Address type processing (if MTBL disabled)
+ */
+#define IDT_NTCTL_IDPROTDIS		0x00000001U
+#define IDT_NTCTL_CPEN			0x00000002U
+#define IDT_NTCTL_RNS			0x00000004U
+#define IDT_NTCTL_ATP			0x00000008U
+
+/*
+ * NTINTSTS register fields related constants
+ * @IDT_NTINTSTS_MSG:		Message interrupt bit
+ * @IDT_NTINTSTS_DBELL:		Doorbell interrupt bit
+ * @IDT_NTINTSTS_SEVENT:	Switch Event interrupt bit
+ * @IDT_NTINTSTS_TMPSENSOR:	Temperature sensor interrupt bit
+ */
+#define IDT_NTINTSTS_MSG		0x00000001U
+#define IDT_NTINTSTS_DBELL		0x00000002U
+#define IDT_NTINTSTS_SEVENT		0x00000008U
+#define IDT_NTINTSTS_TMPSENSOR		0x00000080U
+
+/*
+ * NTINTMSK register fields related constants
+ * @IDT_NTINTMSK_MSG:		Message interrupt mask bit
+ * @IDT_NTINTMSK_DBELL:		Doorbell interrupt mask bit
+ * @IDT_NTINTMSK_SEVENT:	Switch Event interrupt mask bit
+ * @IDT_NTINTMSK_TMPSENSOR:	Temperature sensor interrupt mask bit
+ * @IDT_NTINTMSK_ALL:		All the useful interrupts mask
+ */
+#define IDT_NTINTMSK_MSG		0x00000001U
+#define IDT_NTINTMSK_DBELL		0x00000002U
+#define IDT_NTINTMSK_SEVENT		0x00000008U
+#define IDT_NTINTMSK_TMPSENSOR		0x00000080U
+#define IDT_NTINTMSK_ALL \
+	(IDT_NTINTMSK_MSG | IDT_NTINTMSK_DBELL | \
+	 IDT_NTINTMSK_SEVENT | IDT_NTINTMSK_TMPSENSOR)
+
+/*
+ * NTGSIGNAL register fields related constants
+ * @IDT_NTGSIGNAL_SET:	Set global signal of the local partition
+ */
+#define IDT_NTGSIGNAL_SET		0x00000001U
+
+/*
+ * BARSETUP register fields related constants
+ * @IDT_BARSETUP_TYPE_MASK:	Mask of the TYPE field
+ * @IDT_BARSETUP_TYPE_32:	32-bit addressing BAR
+ * @IDT_BARSETUP_TYPE_64:	64-bit addressing BAR
+ * @IDT_BARSETUP_PREF:		Value of the BAR prefetchable field
+ * @IDT_BARSETUP_SIZE_MASK:	Mask of the SIZE field
+ * @IDT_BARSETUP_SIZE_FLD:	SIZE field offset
+ * @IDT_BARSETUP_SIZE_CFG:	SIZE field value in case of config space MODE
+ * @IDT_BARSETUP_MODE_CFG:	Configuration space BAR mode
+ * @IDT_BARSETUP_ATRAN_MASK:	ATRAN field mask
+ * @IDT_BARSETUP_ATRAN_FLD:	ATRAN field offset
+ * @IDT_BARSETUP_ATRAN_DIR:	Direct address translation memory window
+ * @IDT_BARSETUP_ATRAN_LUT12:	12-entry lookup table
+ * @IDT_BARSETUP_ATRAN_LUT24:	24-entry lookup table
+ * @IDT_BARSETUP_TPART_MASK:	TPART field mask
+ * @IDT_BARSETUP_TPART_FLD:	TPART field offset
+ * @IDT_BARSETUP_EN:		BAR enable bit
+ */
+#define IDT_BARSETUP_TYPE_MASK		0x00000006U
+#define IDT_BARSETUP_TYPE_FLD		0
+#define IDT_BARSETUP_TYPE_32		0x00000000U
+#define IDT_BARSETUP_TYPE_64		0x00000004U
+#define IDT_BARSETUP_PREF		0x00000008U
+#define IDT_BARSETUP_SIZE_MASK		0x000003F0U
+#define IDT_BARSETUP_SIZE_FLD		4
+#define IDT_BARSETUP_SIZE_CFG		0x000000C0U
+#define IDT_BARSETUP_MODE_CFG		0x00000400U
+#define IDT_BARSETUP_ATRAN_MASK		0x00001800U
+#define IDT_BARSETUP_ATRAN_FLD		11
+#define IDT_BARSETUP_ATRAN_DIR		0x00000000U
+#define IDT_BARSETUP_ATRAN_LUT12	0x00000800U
+#define IDT_BARSETUP_ATRAN_LUT24	0x00001000U
+#define IDT_BARSETUP_TPART_MASK		0x0000E000U
+#define IDT_BARSETUP_TPART_FLD		13
+#define IDT_BARSETUP_EN			0x80000000U
+
+/*
+ * NTMTBLDATA register fields related constants
+ * @IDT_NTMTBLDATA_VALID:	Set the MTBL entry being valid
+ * @IDT_NTMTBLDATA_REQID_MASK:	Bus:Device:Function field mask
+ * @IDT_NTMTBLDATA_REQID_FLD:	Bus:Device:Function field offset
+ * @IDT_NTMTBLDATA_PART_MASK:	Partition field mask
+ * @IDT_NTMTBLDATA_PART_FLD:	Partition field offset
+ * @IDT_NTMTBLDATA_ATP_TRANS:	Enable AT field translation on request TLPs
+ * @IDT_NTMTBLDATA_CNS_INV:	Enable No Snoop attribute inversion of
+ *				Completion TLPs
+ * @IDT_NTMTBLDATA_RNS_INV:	Enable No Snoop attribute inversion of
+ *				Request TLPs
+ */
+#define IDT_NTMTBLDATA_VALID		0x00000001U
+#define IDT_NTMTBLDATA_REQID_MASK	0x0001FFFEU
+#define IDT_NTMTBLDATA_REQID_FLD	1
+#define IDT_NTMTBLDATA_PART_MASK	0x000E0000U
+#define IDT_NTMTBLDATA_PART_FLD		17
+#define IDT_NTMTBLDATA_ATP_TRANS	0x20000000U
+#define IDT_NTMTBLDATA_CNS_INV		0x40000000U
+#define IDT_NTMTBLDATA_RNS_INV		0x80000000U
+
+/*
+ * REQIDCAP register fields related constants
+ * @IDT_REQIDCAP_REQID_MASK:	Request ID field mask
+ * @IDT_REQIDCAP_REQID_FLD:	Request ID field offset
+ */
+#define IDT_REQIDCAP_REQID_MASK		0x0000FFFFU
+#define IDT_REQIDCAP_REQID_FLD		0
+
+/*
+ * LUTOFFSET register fields related constants
+ * @IDT_LUTOFFSET_INDEX_MASK:	Lookup table index field mask
+ * @IDT_LUTOFFSET_INDEX_FLD:	Lookup table index field offset
+ * @IDT_LUTOFFSET_BAR_MASK:	Lookup table BAR select field mask
+ * @IDT_LUTOFFSET_BAR_FLD:	Lookup table BAR select field offset
+ */
+#define IDT_LUTOFFSET_INDEX_MASK	0x0000001FU
+#define IDT_LUTOFFSET_INDEX_FLD		0
+#define IDT_LUTOFFSET_BAR_MASK		0x00000700U
+#define IDT_LUTOFFSET_BAR_FLD		8
+
+/*
+ * LUTUDATA register fields related constants
+ * @IDT_LUTUDATA_PART_MASK:	Partition field mask
+ * @IDT_LUTUDATA_PART_FLD:	Partition field offset
+ * @IDT_LUTUDATA_VALID:		Lookup table entry valid bit
+ */
+#define IDT_LUTUDATA_PART_MASK		0x0000000FU
+#define IDT_LUTUDATA_PART_FLD		0
+#define IDT_LUTUDATA_VALID		0x80000000U
+
+/*
+ * SWPARTxSTS register fields related constants
+ * @IDT_SWPARTxSTS_SCI:		Switch partition state change initiated
+ * @IDT_SWPARTxSTS_SCC:		Switch partition state change completed
+ * @IDT_SWPARTxSTS_STATE_MASK:	Switch partition state mask
+ * @IDT_SWPARTxSTS_STATE_FLD:	Switch partition state field offset
+ * @IDT_SWPARTxSTS_STATE_DIS:	Switch partition disabled
+ * @IDT_SWPARTxSTS_STATE_ACT:	Switch partition enabled
+ * @IDT_SWPARTxSTS_STATE_RES:	Switch partition in reset
+ * @IDT_SWPARTxSTS_US:		Switch partition has upstream port
+ * @IDT_SWPARTxSTS_USID_MASK:	Switch partition upstream port ID mask
+ * @IDT_SWPARTxSTS_USID_FLD:	Switch partition upstream port ID field offset
+ * @IDT_SWPARTxSTS_NT:		Upstream port has NT function
+ * @IDT_SWPARTxSTS_DMA:		Upstream port has DMA function
+ */
+#define IDT_SWPARTxSTS_SCI		0x00000001U
+#define IDT_SWPARTxSTS_SCC		0x00000002U
+#define IDT_SWPARTxSTS_STATE_MASK	0x00000060U
+#define IDT_SWPARTxSTS_STATE_FLD	5
+#define IDT_SWPARTxSTS_STATE_DIS	0x00000000U
+#define IDT_SWPARTxSTS_STATE_ACT	0x00000020U
+#define IDT_SWPARTxSTS_STATE_RES	0x00000060U
+#define IDT_SWPARTxSTS_US		0x00000100U
+#define IDT_SWPARTxSTS_USID_MASK	0x00003E00U
+#define IDT_SWPARTxSTS_USID_FLD		9
+#define IDT_SWPARTxSTS_NT		0x00004000U
+#define IDT_SWPARTxSTS_DMA		0x00008000U
+
+/*
+ * SWPORTxSTS register fields related constants
+ * @IDT_SWPORTxSTS_OMCI:	Operation mode change initiated
+ * @IDT_SWPORTxSTS_OMCC:	Operation mode change completed
+ * @IDT_SWPORTxSTS_LINKUP:	Link up status
+ * @IDT_SWPORTxSTS_DS:		Port lanes behave as downstream lanes
+ * @IDT_SWPORTxSTS_MODE_MASK:	Port mode field mask
+ * @IDT_SWPORTxSTS_MODE_FLD:	Port mode field offset
+ * @IDT_SWPORTxSTS_MODE_DIS:	Port mode - disabled
+ * @IDT_SWPORTxSTS_MODE_DS:	Port mode - downstream switch port
+ * @IDT_SWPORTxSTS_MODE_US:	Port mode - upstream switch port
+ * @IDT_SWPORTxSTS_MODE_NT:	Port mode - NT function
+ * @IDT_SWPORTxSTS_MODE_USNT:	Port mode - upstream switch port with NTB
+ * @IDT_SWPORTxSTS_MODE_UNAT:	Port mode - unattached
+ * @IDT_SWPORTxSTS_MODE_USDMA:	Port mode - upstream switch port with DMA
+ * @IDT_SWPORTxSTS_MODE_USNTDMA:Port mode - upstream port with NTB and DMA
+ * @IDT_SWPORTxSTS_MODE_NTDMA:	Port mode - NT function with DMA
+ * @IDT_SWPORTxSTS_SWPART_MASK:	Port partition field mask
+ * @IDT_SWPORTxSTS_SWPART_FLD:	Port partition field offset
+ * @IDT_SWPORTxSTS_DEVNUM_MASK:	Port device number field mask
+ * @IDT_SWPORTxSTS_DEVNUM_FLD:	Port device number field offset
+ */
+#define IDT_SWPORTxSTS_OMCI		0x00000001U
+#define IDT_SWPORTxSTS_OMCC		0x00000002U
+#define IDT_SWPORTxSTS_LINKUP		0x00000010U
+#define IDT_SWPORTxSTS_DS		0x00000020U
+#define IDT_SWPORTxSTS_MODE_MASK	0x000003C0U
+#define IDT_SWPORTxSTS_MODE_FLD		6
+#define IDT_SWPORTxSTS_MODE_DIS		0x00000000U
+#define IDT_SWPORTxSTS_MODE_DS		0x00000040U
+#define IDT_SWPORTxSTS_MODE_US		0x00000080U
+#define IDT_SWPORTxSTS_MODE_NT		0x000000C0U
+#define IDT_SWPORTxSTS_MODE_USNT	0x00000100U
+#define IDT_SWPORTxSTS_MODE_UNAT	0x00000140U
+#define IDT_SWPORTxSTS_MODE_USDMA	0x00000180U
+#define IDT_SWPORTxSTS_MODE_USNTDMA	0x000001C0U
+#define IDT_SWPORTxSTS_MODE_NTDMA	0x00000200U
+#define IDT_SWPORTxSTS_SWPART_MASK	0x00001C00U
+#define IDT_SWPORTxSTS_SWPART_FLD	10
+#define IDT_SWPORTxSTS_DEVNUM_MASK	0x001F0000U
+#define IDT_SWPORTxSTS_DEVNUM_FLD	16
+
+/*
+ * SEMSK register fields related constants
+ * @IDT_SEMSK_LINKUP:	Link Up event mask bit
+ * @IDT_SEMSK_LINKDN:	Link Down event mask bit
+ * @IDT_SEMSK_GSIGNAL:	Global Signal event mask bit
+ */
+#define IDT_SEMSK_LINKUP		0x00000001U
+#define IDT_SEMSK_LINKDN		0x00000002U
+#define IDT_SEMSK_GSIGNAL		0x00000020U
+
+/*
+ * SWPxMSGCTL register fields related constants
+ * @IDT_SWPxMSGCTL_REG_MASK:	Register select field mask
+ * @IDT_SWPxMSGCTL_REG_FLD:	Register select field offset
+ * @IDT_SWPxMSGCTL_PART_MASK:	Partition select field mask
+ * @IDT_SWPxMSGCTL_PART_FLD:	Partition select field offset
+ */
+#define IDT_SWPxMSGCTL_REG_MASK		0x00000003U
+#define IDT_SWPxMSGCTL_REG_FLD		0
+#define IDT_SWPxMSGCTL_PART_MASK	0x00000070U
+#define IDT_SWPxMSGCTL_PART_FLD		4
+
+/*
+ * TMPSTS register fields related constants
+ * @IDT_TMPSTS_TEMP_MASK:	Current temperature field mask
+ * @IDT_TMPSTS_TEMP_FLD:	Current temperature field offset
+ */
+#define IDT_TMPSTS_TEMP_MASK		0x000000FFU
+#define IDT_TMPSTS_TEMP_FLD		0
+
+/*
+ * Helper macro to get/set the corresponding field value
+ * @GET_FIELD:		Retrieve the value of the corresponding field
+ * @SET_FIELD:		Set the specified field up
+ * @IS_FLD_SET:		Check whether a field is set with value
+ */
+#define GET_FIELD(field, data) \
+	(((u32)(data) & IDT_ ##field## _MASK) >> IDT_ ##field## _FLD)
+#define SET_FIELD(field, data, value) \
+	(((u32)(data) & ~IDT_ ##field## _MASK) | \
+	 ((u32)(value) << IDT_ ##field## _FLD))
+#define IS_FLD_SET(field, data, value) \
+	(((u32)(data) & IDT_ ##field## _MASK) == IDT_ ##field## _ ##value)
+
+/*
+ * Useful registers masks:
+ * @IDT_DBELL_MASK:	Doorbell bits mask
+ * @IDT_OUTMSG_MASK:	Out messages status bits mask
+ * @IDT_INMSG_MASK:	In messages status bits mask
+ * @IDT_MSG_MASK:	Any message status bits mask
+ */
+#define IDT_DBELL_MASK		((u32)0xFFFFFFFFU)
+#define IDT_OUTMSG_MASK		((u32)0x0000000FU)
+#define IDT_INMSG_MASK		((u32)0x000F0000U)
+#define IDT_MSG_MASK		(IDT_INMSG_MASK | IDT_OUTMSG_MASK)
+
+/*
+ * Number of IDT NTB resources:
+ * @IDT_MSG_CNT:	Number of Message registers
+ * @IDT_BAR_CNT:	Number of BARs of each port
+ * @IDT_MTBL_ENTRY_CNT:	Number mapping table entries
+ */
+#define IDT_MSG_CNT		4
+#define IDT_BAR_CNT		6
+#define IDT_MTBL_ENTRY_CNT	64
+
+/*
+ * General IDT PCIe-switch constant
+ * @IDT_MAX_NR_PORTS:	Maximum number of ports per IDT PCIe-switch
+ * @IDT_MAX_NR_PARTS:	Maximum number of partitions per IDT PCIe-switch
+ * @IDT_MAX_NR_PEERS:	Maximum number of NT-peers per IDT PCIe-switch
+ * @IDT_MAX_NR_MWS:	Maximum number of Memory Widows
+ * @IDT_PCIE_REGSIZE:	Size of the registers in bytes
+ * @IDT_TRANS_ALIGN:	Alignment of translated base address
+ * @IDT_DIR_SIZE_ALIGN:	Alignment of size setting for direct translated MWs.
+ *			Even though the lower 10 bits are reserved, they are
+ *			treated by IDT as one's so basically there is no any
+ *			alignment of size limit for DIR address translation.
+ */
+#define IDT_MAX_NR_PORTS	24
+#define IDT_MAX_NR_PARTS	8
+#define IDT_MAX_NR_PEERS	8
+#define IDT_MAX_NR_MWS		29
+#define IDT_PCIE_REGSIZE	4
+#define IDT_TRANS_ALIGN		4
+#define IDT_DIR_SIZE_ALIGN	1
+
+/*
+ * IDT Memory Windows type. Depending on the device settings, IDT supports
+ * Direct Address Translation MW registers and Lookup Table registers
+ * @IDT_MW_DIR:		Direct address translation
+ * @IDT_MW_LUT12:	12-entry lookup table entry
+ * @IDT_MW_LUT24:	24-entry lookup table entry
+ *
+ * NOTE These values are exactly the same as one of the BARSETUP ATRAN field
+ */
+enum idt_mw_type {
+	IDT_MW_DIR = 0x0,
+	IDT_MW_LUT12 = 0x1,
+	IDT_MW_LUT24 = 0x2
+};
+
+/*
+ * IDT PCIe-switch model private data
+ * @name:	Device name
+ * @port_cnt:	Total number of NT endpoint ports
+ * @ports:	Port ids
+ */
+struct idt_89hpes_cfg {
+	char *name;
+	unsigned char port_cnt;
+	unsigned char ports[];
+};
+
+/*
+ * Memory window configuration structure
+ * @type:	Type of the memory window (direct address translation or lookup
+ *		table)
+ *
+ * @bar:	PCIe BAR the memory window referenced to
+ * @idx:	Index of the memory window within the BAR
+ *
+ * @addr_align:	Alignment of translated address
+ * @size_align:	Alignment of memory window size
+ * @size_max:	Maximum size of memory window
+ */
+struct idt_mw_cfg {
+	enum idt_mw_type type;
+
+	unsigned char bar;
+	unsigned char idx;
+
+	u64 addr_align;
+	u64 size_align;
+	u64 size_max;
+};
+
+/*
+ * Description structure of peer IDT NT-functions:
+ * @port:		NT-function port
+ * @part:		NT-function partition
+ *
+ * @mw_cnt:		Number of memory windows supported by NT-function
+ * @mws:		Array of memory windows descriptors
+ */
+struct idt_ntb_peer {
+	unsigned char port;
+	unsigned char part;
+
+	unsigned char mw_cnt;
+	struct idt_mw_cfg *mws;
+};
+
+/*
+ * Description structure of local IDT NT-function:
+ * @ntb:		Linux NTB-device description structure
+ * @swcfg:		Pointer to the structure of local IDT PCIe-switch
+ *			specific cofnfigurations
+ *
+ * @port:		Local NT-function port
+ * @part:		Local NT-function partition
+ *
+ * @peer_cnt:		Number of peers with activated NTB-function
+ * @peers:		Array of peers descripting structures
+ * @port_idx_map:	Map of port number -> peer index
+ * @part_idx_map:	Map of partition number -> peer index
+ *
+ * @mtbl_lock:		Mapping table access lock
+ *
+ * @mw_cnt:		Number of memory windows supported by NT-function
+ * @mws:		Array of memory windows descriptors
+ * @lut_lock:		Lookup table access lock
+ *
+ * @msg_locks:		Message registers mapping table lockers
+ *
+ * @cfgspc:		Virtual address of the memory mapped configuration
+ *			space of the NT-function
+ * @db_mask_lock:	Doorbell mask register lock
+ * @msg_mask_lock:	Message mask register lock
+ * @gasa_lock:		GASA registers access lock
+ *
+ * @dbgfs_info:		DebugFS info node
+ */
+struct idt_ntb_dev {
+	struct ntb_dev ntb;
+	struct idt_89hpes_cfg *swcfg;
+
+	unsigned char port;
+	unsigned char part;
+
+	unsigned char peer_cnt;
+	struct idt_ntb_peer peers[IDT_MAX_NR_PEERS];
+	char port_idx_map[IDT_MAX_NR_PORTS];
+	char part_idx_map[IDT_MAX_NR_PARTS];
+
+	spinlock_t mtbl_lock;
+
+	unsigned char mw_cnt;
+	struct idt_mw_cfg *mws;
+	spinlock_t lut_lock;
+
+	spinlock_t msg_locks[IDT_MSG_CNT];
+
+	void __iomem *cfgspc;
+	spinlock_t db_mask_lock;
+	spinlock_t msg_mask_lock;
+	spinlock_t gasa_lock;
+
+	struct dentry *dbgfs_info;
+};
+#define to_ndev_ntb(__ntb) container_of(__ntb, struct idt_ntb_dev, ntb)
+
+/*
+ * Descriptor of the IDT PCIe-switch BAR resources
+ * @setup:	BAR setup register
+ * @limit:	BAR limit register
+ * @ltbase:	Lower translated base address
+ * @utbase:	Upper translated base address
+ */
+struct idt_ntb_bar {
+	unsigned int setup;
+	unsigned int limit;
+	unsigned int ltbase;
+	unsigned int utbase;
+};
+
+/*
+ * Descriptor of the IDT PCIe-switch message resources
+ * @in:		Inbound message register
+ * @out:	Outbound message register
+ * @src:	Source of inbound message register
+ */
+struct idt_ntb_msg {
+	unsigned int in;
+	unsigned int out;
+	unsigned int src;
+};
+
+/*
+ * Descriptor of the IDT PCIe-switch NT-function specific parameters in the
+ * PCI Configuration Space
+ * @bars:	BARs related registers
+ * @msgs:	Messaging related registers
+ */
+struct idt_ntb_regs {
+	struct idt_ntb_bar bars[IDT_BAR_CNT];
+	struct idt_ntb_msg msgs[IDT_MSG_CNT];
+};
+
+/*
+ * Descriptor of the IDT PCIe-switch port specific parameters in the
+ * Global Configuration Space
+ * @pcicmdsts:	 PCI command/status register
+ * @pcielctlsts: PCIe link control/status
+ *
+ * @ctl:	Port control register
+ * @sts:	Port status register
+ *
+ * @bars:	BARs related registers
+ */
+struct idt_ntb_port {
+	unsigned int pcicmdsts;
+	unsigned int pcielctlsts;
+	unsigned int ntctl;
+
+	unsigned int ctl;
+	unsigned int sts;
+
+	struct idt_ntb_bar bars[IDT_BAR_CNT];
+};
+
+/*
+ * Descriptor of the IDT PCIe-switch partition specific parameters.
+ * @ctl:	Partition control register in the Global Address Space
+ * @sts:	Partition status register in the Global Address Space
+ * @msgctl:	Messages control registers
+ */
+struct idt_ntb_part {
+	unsigned int ctl;
+	unsigned int sts;
+	unsigned int msgctl[IDT_MSG_CNT];
+};
+
+#endif /* NTB_HW_IDT_H */
diff --git a/src/kernel/linux/v4.14/drivers/ntb/hw/intel/Kconfig b/src/kernel/linux/v4.14/drivers/ntb/hw/intel/Kconfig
new file mode 100644
index 0000000..91f995e
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/ntb/hw/intel/Kconfig
@@ -0,0 +1,7 @@
+config NTB_INTEL
+	tristate "Intel Non-Transparent Bridge support"
+	depends on X86_64
+	help
+	 This driver supports Intel NTB on capable Xeon and Atom hardware.
+
+	 If unsure, say N.
diff --git a/src/kernel/linux/v4.14/drivers/ntb/hw/intel/Makefile b/src/kernel/linux/v4.14/drivers/ntb/hw/intel/Makefile
new file mode 100644
index 0000000..1b43456
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/ntb/hw/intel/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_NTB_INTEL) += ntb_hw_intel.o
diff --git a/src/kernel/linux/v4.14/drivers/ntb/hw/intel/ntb_hw_intel.c b/src/kernel/linux/v4.14/drivers/ntb/hw/intel/ntb_hw_intel.c
new file mode 100644
index 0000000..58068f1
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/ntb/hw/intel/ntb_hw_intel.c
@@ -0,0 +1,3059 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ *   redistributing this file, you may do so under either license.
+ *
+ *   GPL LICENSE SUMMARY
+ *
+ *   Copyright(c) 2012 Intel Corporation. All rights reserved.
+ *   Copyright (C) 2015 EMC Corporation. All Rights Reserved.
+ *   Copyright (C) 2016 T-Platforms. All Rights Reserved.
+ *
+ *   This program is free software; you can redistribute it and/or modify
+ *   it under the terms of version 2 of the GNU General Public License as
+ *   published by the Free Software Foundation.
+ *
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2012 Intel Corporation. All rights reserved.
+ *   Copyright (C) 2015 EMC Corporation. All Rights Reserved.
+ *   Copyright (C) 2016 T-Platforms. All Rights Reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copy
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Intel PCIe NTB Linux driver
+ *
+ * Contact Information:
+ * Jon Mason <jon.mason@intel.com>
+ */
+
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/random.h>
+#include <linux/slab.h>
+#include <linux/ntb.h>
+
+#include "ntb_hw_intel.h"
+
+#define NTB_NAME	"ntb_hw_intel"
+#define NTB_DESC	"Intel(R) PCI-E Non-Transparent Bridge Driver"
+#define NTB_VER		"2.0"
+
+MODULE_DESCRIPTION(NTB_DESC);
+MODULE_VERSION(NTB_VER);
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Intel Corporation");
+
+#define bar0_off(base, bar) ((base) + ((bar) << 2))
+#define bar2_off(base, bar) bar0_off(base, (bar) - 2)
+
+static const struct intel_ntb_reg atom_reg;
+static const struct intel_ntb_alt_reg atom_pri_reg;
+static const struct intel_ntb_alt_reg atom_sec_reg;
+static const struct intel_ntb_alt_reg atom_b2b_reg;
+static const struct intel_ntb_xlat_reg atom_pri_xlat;
+static const struct intel_ntb_xlat_reg atom_sec_xlat;
+static const struct intel_ntb_reg xeon_reg;
+static const struct intel_ntb_alt_reg xeon_pri_reg;
+static const struct intel_ntb_alt_reg xeon_sec_reg;
+static const struct intel_ntb_alt_reg xeon_b2b_reg;
+static const struct intel_ntb_xlat_reg xeon_pri_xlat;
+static const struct intel_ntb_xlat_reg xeon_sec_xlat;
+static struct intel_b2b_addr xeon_b2b_usd_addr;
+static struct intel_b2b_addr xeon_b2b_dsd_addr;
+static const struct intel_ntb_reg skx_reg;
+static const struct intel_ntb_alt_reg skx_pri_reg;
+static const struct intel_ntb_alt_reg skx_b2b_reg;
+static const struct intel_ntb_xlat_reg skx_sec_xlat;
+static const struct ntb_dev_ops intel_ntb_ops;
+static const struct ntb_dev_ops intel_ntb3_ops;
+
+static const struct file_operations intel_ntb_debugfs_info;
+static struct dentry *debugfs_dir;
+
+static int b2b_mw_idx = -1;
+module_param(b2b_mw_idx, int, 0644);
+MODULE_PARM_DESC(b2b_mw_idx, "Use this mw idx to access the peer ntb.  A "
+		 "value of zero or positive starts from first mw idx, and a "
+		 "negative value starts from last mw idx.  Both sides MUST "
+		 "set the same value here!");
+
+static unsigned int b2b_mw_share;
+module_param(b2b_mw_share, uint, 0644);
+MODULE_PARM_DESC(b2b_mw_share, "If the b2b mw is large enough, configure the "
+		 "ntb so that the peer ntb only occupies the first half of "
+		 "the mw, so the second half can still be used as a mw.  Both "
+		 "sides MUST set the same value here!");
+
+module_param_named(xeon_b2b_usd_bar2_addr64,
+		   xeon_b2b_usd_addr.bar2_addr64, ullong, 0644);
+MODULE_PARM_DESC(xeon_b2b_usd_bar2_addr64,
+		 "XEON B2B USD BAR 2 64-bit address");
+
+module_param_named(xeon_b2b_usd_bar4_addr64,
+		   xeon_b2b_usd_addr.bar4_addr64, ullong, 0644);
+MODULE_PARM_DESC(xeon_b2b_usd_bar4_addr64,
+		 "XEON B2B USD BAR 4 64-bit address");
+
+module_param_named(xeon_b2b_usd_bar4_addr32,
+		   xeon_b2b_usd_addr.bar4_addr32, ullong, 0644);
+MODULE_PARM_DESC(xeon_b2b_usd_bar4_addr32,
+		 "XEON B2B USD split-BAR 4 32-bit address");
+
+module_param_named(xeon_b2b_usd_bar5_addr32,
+		   xeon_b2b_usd_addr.bar5_addr32, ullong, 0644);
+MODULE_PARM_DESC(xeon_b2b_usd_bar5_addr32,
+		 "XEON B2B USD split-BAR 5 32-bit address");
+
+module_param_named(xeon_b2b_dsd_bar2_addr64,
+		   xeon_b2b_dsd_addr.bar2_addr64, ullong, 0644);
+MODULE_PARM_DESC(xeon_b2b_dsd_bar2_addr64,
+		 "XEON B2B DSD BAR 2 64-bit address");
+
+module_param_named(xeon_b2b_dsd_bar4_addr64,
+		   xeon_b2b_dsd_addr.bar4_addr64, ullong, 0644);
+MODULE_PARM_DESC(xeon_b2b_dsd_bar4_addr64,
+		 "XEON B2B DSD BAR 4 64-bit address");
+
+module_param_named(xeon_b2b_dsd_bar4_addr32,
+		   xeon_b2b_dsd_addr.bar4_addr32, ullong, 0644);
+MODULE_PARM_DESC(xeon_b2b_dsd_bar4_addr32,
+		 "XEON B2B DSD split-BAR 4 32-bit address");
+
+module_param_named(xeon_b2b_dsd_bar5_addr32,
+		   xeon_b2b_dsd_addr.bar5_addr32, ullong, 0644);
+MODULE_PARM_DESC(xeon_b2b_dsd_bar5_addr32,
+		 "XEON B2B DSD split-BAR 5 32-bit address");
+
+static inline enum ntb_topo xeon_ppd_topo(struct intel_ntb_dev *ndev, u8 ppd);
+static int xeon_init_isr(struct intel_ntb_dev *ndev);
+
+#ifndef ioread64
+#ifdef readq
+#define ioread64 readq
+#else
+#define ioread64 _ioread64
+static inline u64 _ioread64(void __iomem *mmio)
+{
+	u64 low, high;
+
+	low = ioread32(mmio);
+	high = ioread32(mmio + sizeof(u32));
+	return low | (high << 32);
+}
+#endif
+#endif
+
+#ifndef iowrite64
+#ifdef writeq
+#define iowrite64 writeq
+#else
+#define iowrite64 _iowrite64
+static inline void _iowrite64(u64 val, void __iomem *mmio)
+{
+	iowrite32(val, mmio);
+	iowrite32(val >> 32, mmio + sizeof(u32));
+}
+#endif
+#endif
+
+static inline int pdev_is_atom(struct pci_dev *pdev)
+{
+	switch (pdev->device) {
+	case PCI_DEVICE_ID_INTEL_NTB_B2B_BWD:
+		return 1;
+	}
+	return 0;
+}
+
+static inline int pdev_is_xeon(struct pci_dev *pdev)
+{
+	switch (pdev->device) {
+	case PCI_DEVICE_ID_INTEL_NTB_SS_JSF:
+	case PCI_DEVICE_ID_INTEL_NTB_SS_SNB:
+	case PCI_DEVICE_ID_INTEL_NTB_SS_IVT:
+	case PCI_DEVICE_ID_INTEL_NTB_SS_HSX:
+	case PCI_DEVICE_ID_INTEL_NTB_SS_BDX:
+	case PCI_DEVICE_ID_INTEL_NTB_PS_JSF:
+	case PCI_DEVICE_ID_INTEL_NTB_PS_SNB:
+	case PCI_DEVICE_ID_INTEL_NTB_PS_IVT:
+	case PCI_DEVICE_ID_INTEL_NTB_PS_HSX:
+	case PCI_DEVICE_ID_INTEL_NTB_PS_BDX:
+	case PCI_DEVICE_ID_INTEL_NTB_B2B_JSF:
+	case PCI_DEVICE_ID_INTEL_NTB_B2B_SNB:
+	case PCI_DEVICE_ID_INTEL_NTB_B2B_IVT:
+	case PCI_DEVICE_ID_INTEL_NTB_B2B_HSX:
+	case PCI_DEVICE_ID_INTEL_NTB_B2B_BDX:
+		return 1;
+	}
+	return 0;
+}
+
+static inline int pdev_is_skx_xeon(struct pci_dev *pdev)
+{
+	if (pdev->device == PCI_DEVICE_ID_INTEL_NTB_B2B_SKX)
+		return 1;
+
+	return 0;
+}
+
+static inline void ndev_reset_unsafe_flags(struct intel_ntb_dev *ndev)
+{
+	ndev->unsafe_flags = 0;
+	ndev->unsafe_flags_ignore = 0;
+
+	/* Only B2B has a workaround to avoid SDOORBELL */
+	if (ndev->hwerr_flags & NTB_HWERR_SDOORBELL_LOCKUP)
+		if (!ntb_topo_is_b2b(ndev->ntb.topo))
+			ndev->unsafe_flags |= NTB_UNSAFE_DB;
+
+	/* No low level workaround to avoid SB01BASE */
+	if (ndev->hwerr_flags & NTB_HWERR_SB01BASE_LOCKUP) {
+		ndev->unsafe_flags |= NTB_UNSAFE_DB;
+		ndev->unsafe_flags |= NTB_UNSAFE_SPAD;
+	}
+}
+
+static inline int ndev_is_unsafe(struct intel_ntb_dev *ndev,
+				 unsigned long flag)
+{
+	return !!(flag & ndev->unsafe_flags & ~ndev->unsafe_flags_ignore);
+}
+
+static inline int ndev_ignore_unsafe(struct intel_ntb_dev *ndev,
+				     unsigned long flag)
+{
+	flag &= ndev->unsafe_flags;
+	ndev->unsafe_flags_ignore |= flag;
+
+	return !!flag;
+}
+
+static int ndev_mw_to_bar(struct intel_ntb_dev *ndev, int idx)
+{
+	if (idx < 0 || idx >= ndev->mw_count)
+		return -EINVAL;
+	return ndev->reg->mw_bar[idx];
+}
+
+static inline int ndev_db_addr(struct intel_ntb_dev *ndev,
+			       phys_addr_t *db_addr, resource_size_t *db_size,
+			       phys_addr_t reg_addr, unsigned long reg)
+{
+	if (ndev_is_unsafe(ndev, NTB_UNSAFE_DB))
+		pr_warn_once("%s: NTB unsafe doorbell access", __func__);
+
+	if (db_addr) {
+		*db_addr = reg_addr + reg;
+		dev_dbg(&ndev->ntb.pdev->dev, "Peer db addr %llx\n", *db_addr);
+	}
+
+	if (db_size) {
+		*db_size = ndev->reg->db_size;
+		dev_dbg(&ndev->ntb.pdev->dev, "Peer db size %llx\n", *db_size);
+	}
+
+	return 0;
+}
+
+static inline u64 ndev_db_read(struct intel_ntb_dev *ndev,
+			       void __iomem *mmio)
+{
+	if (ndev_is_unsafe(ndev, NTB_UNSAFE_DB))
+		pr_warn_once("%s: NTB unsafe doorbell access", __func__);
+
+	return ndev->reg->db_ioread(mmio);
+}
+
+static inline int ndev_db_write(struct intel_ntb_dev *ndev, u64 db_bits,
+				void __iomem *mmio)
+{
+	if (ndev_is_unsafe(ndev, NTB_UNSAFE_DB))
+		pr_warn_once("%s: NTB unsafe doorbell access", __func__);
+
+	if (db_bits & ~ndev->db_valid_mask)
+		return -EINVAL;
+
+	ndev->reg->db_iowrite(db_bits, mmio);
+
+	return 0;
+}
+
+static inline int ndev_db_set_mask(struct intel_ntb_dev *ndev, u64 db_bits,
+				   void __iomem *mmio)
+{
+	unsigned long irqflags;
+
+	if (ndev_is_unsafe(ndev, NTB_UNSAFE_DB))
+		pr_warn_once("%s: NTB unsafe doorbell access", __func__);
+
+	if (db_bits & ~ndev->db_valid_mask)
+		return -EINVAL;
+
+	spin_lock_irqsave(&ndev->db_mask_lock, irqflags);
+	{
+		ndev->db_mask |= db_bits;
+		ndev->reg->db_iowrite(ndev->db_mask, mmio);
+	}
+	spin_unlock_irqrestore(&ndev->db_mask_lock, irqflags);
+
+	return 0;
+}
+
+static inline int ndev_db_clear_mask(struct intel_ntb_dev *ndev, u64 db_bits,
+				     void __iomem *mmio)
+{
+	unsigned long irqflags;
+
+	if (ndev_is_unsafe(ndev, NTB_UNSAFE_DB))
+		pr_warn_once("%s: NTB unsafe doorbell access", __func__);
+
+	if (db_bits & ~ndev->db_valid_mask)
+		return -EINVAL;
+
+	spin_lock_irqsave(&ndev->db_mask_lock, irqflags);
+	{
+		ndev->db_mask &= ~db_bits;
+		ndev->reg->db_iowrite(ndev->db_mask, mmio);
+	}
+	spin_unlock_irqrestore(&ndev->db_mask_lock, irqflags);
+
+	return 0;
+}
+
+static inline u64 ndev_vec_mask(struct intel_ntb_dev *ndev, int db_vector)
+{
+	u64 shift, mask;
+
+	shift = ndev->db_vec_shift;
+	mask = BIT_ULL(shift) - 1;
+
+	return mask << (shift * db_vector);
+}
+
+static inline int ndev_spad_addr(struct intel_ntb_dev *ndev, int idx,
+				 phys_addr_t *spad_addr, phys_addr_t reg_addr,
+				 unsigned long reg)
+{
+	if (ndev_is_unsafe(ndev, NTB_UNSAFE_SPAD))
+		pr_warn_once("%s: NTB unsafe scratchpad access", __func__);
+
+	if (idx < 0 || idx >= ndev->spad_count)
+		return -EINVAL;
+
+	if (spad_addr) {
+		*spad_addr = reg_addr + reg + (idx << 2);
+		dev_dbg(&ndev->ntb.pdev->dev, "Peer spad addr %llx\n",
+			*spad_addr);
+	}
+
+	return 0;
+}
+
+static inline u32 ndev_spad_read(struct intel_ntb_dev *ndev, int idx,
+				 void __iomem *mmio)
+{
+	if (ndev_is_unsafe(ndev, NTB_UNSAFE_SPAD))
+		pr_warn_once("%s: NTB unsafe scratchpad access", __func__);
+
+	if (idx < 0 || idx >= ndev->spad_count)
+		return 0;
+
+	return ioread32(mmio + (idx << 2));
+}
+
+static inline int ndev_spad_write(struct intel_ntb_dev *ndev, int idx, u32 val,
+				  void __iomem *mmio)
+{
+	if (ndev_is_unsafe(ndev, NTB_UNSAFE_SPAD))
+		pr_warn_once("%s: NTB unsafe scratchpad access", __func__);
+
+	if (idx < 0 || idx >= ndev->spad_count)
+		return -EINVAL;
+
+	iowrite32(val, mmio + (idx << 2));
+
+	return 0;
+}
+
+static irqreturn_t ndev_interrupt(struct intel_ntb_dev *ndev, int vec)
+{
+	u64 vec_mask;
+
+	vec_mask = ndev_vec_mask(ndev, vec);
+
+	if ((ndev->hwerr_flags & NTB_HWERR_MSIX_VECTOR32_BAD) && (vec == 31))
+		vec_mask |= ndev->db_link_mask;
+
+	dev_dbg(&ndev->ntb.pdev->dev, "vec %d vec_mask %llx\n", vec, vec_mask);
+
+	ndev->last_ts = jiffies;
+
+	if (vec_mask & ndev->db_link_mask) {
+		if (ndev->reg->poll_link(ndev))
+			ntb_link_event(&ndev->ntb);
+	}
+
+	if (vec_mask & ndev->db_valid_mask)
+		ntb_db_event(&ndev->ntb, vec);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t ndev_vec_isr(int irq, void *dev)
+{
+	struct intel_ntb_vec *nvec = dev;
+
+	dev_dbg(&nvec->ndev->ntb.pdev->dev, "irq: %d  nvec->num: %d\n",
+		irq, nvec->num);
+
+	return ndev_interrupt(nvec->ndev, nvec->num);
+}
+
+static irqreturn_t ndev_irq_isr(int irq, void *dev)
+{
+	struct intel_ntb_dev *ndev = dev;
+
+	return ndev_interrupt(ndev, irq - ndev->ntb.pdev->irq);
+}
+
+static int ndev_init_isr(struct intel_ntb_dev *ndev,
+			 int msix_min, int msix_max,
+			 int msix_shift, int total_shift)
+{
+	struct pci_dev *pdev;
+	int rc, i, msix_count, node;
+
+	pdev = ndev->ntb.pdev;
+
+	node = dev_to_node(&pdev->dev);
+
+	/* Mask all doorbell interrupts */
+	ndev->db_mask = ndev->db_valid_mask;
+	ndev->reg->db_iowrite(ndev->db_mask,
+			      ndev->self_mmio +
+			      ndev->self_reg->db_mask);
+
+	/* Try to set up msix irq */
+
+	ndev->vec = kzalloc_node(msix_max * sizeof(*ndev->vec),
+				 GFP_KERNEL, node);
+	if (!ndev->vec)
+		goto err_msix_vec_alloc;
+
+	ndev->msix = kzalloc_node(msix_max * sizeof(*ndev->msix),
+				  GFP_KERNEL, node);
+	if (!ndev->msix)
+		goto err_msix_alloc;
+
+	for (i = 0; i < msix_max; ++i)
+		ndev->msix[i].entry = i;
+
+	msix_count = pci_enable_msix_range(pdev, ndev->msix,
+					   msix_min, msix_max);
+	if (msix_count < 0)
+		goto err_msix_enable;
+
+	for (i = 0; i < msix_count; ++i) {
+		ndev->vec[i].ndev = ndev;
+		ndev->vec[i].num = i;
+		rc = request_irq(ndev->msix[i].vector, ndev_vec_isr, 0,
+				 "ndev_vec_isr", &ndev->vec[i]);
+		if (rc)
+			goto err_msix_request;
+	}
+
+	dev_dbg(&pdev->dev, "Using %d msix interrupts\n", msix_count);
+	ndev->db_vec_count = msix_count;
+	ndev->db_vec_shift = msix_shift;
+	return 0;
+
+err_msix_request:
+	while (i-- > 0)
+		free_irq(ndev->msix[i].vector, &ndev->vec[i]);
+	pci_disable_msix(pdev);
+err_msix_enable:
+	kfree(ndev->msix);
+err_msix_alloc:
+	kfree(ndev->vec);
+err_msix_vec_alloc:
+	ndev->msix = NULL;
+	ndev->vec = NULL;
+
+	/* Try to set up msi irq */
+
+	rc = pci_enable_msi(pdev);
+	if (rc)
+		goto err_msi_enable;
+
+	rc = request_irq(pdev->irq, ndev_irq_isr, 0,
+			 "ndev_irq_isr", ndev);
+	if (rc)
+		goto err_msi_request;
+
+	dev_dbg(&pdev->dev, "Using msi interrupts\n");
+	ndev->db_vec_count = 1;
+	ndev->db_vec_shift = total_shift;
+	return 0;
+
+err_msi_request:
+	pci_disable_msi(pdev);
+err_msi_enable:
+
+	/* Try to set up intx irq */
+
+	pci_intx(pdev, 1);
+
+	rc = request_irq(pdev->irq, ndev_irq_isr, IRQF_SHARED,
+			 "ndev_irq_isr", ndev);
+	if (rc)
+		goto err_intx_request;
+
+	dev_dbg(&pdev->dev, "Using intx interrupts\n");
+	ndev->db_vec_count = 1;
+	ndev->db_vec_shift = total_shift;
+	return 0;
+
+err_intx_request:
+	return rc;
+}
+
+static void ndev_deinit_isr(struct intel_ntb_dev *ndev)
+{
+	struct pci_dev *pdev;
+	int i;
+
+	pdev = ndev->ntb.pdev;
+
+	/* Mask all doorbell interrupts */
+	ndev->db_mask = ndev->db_valid_mask;
+	ndev->reg->db_iowrite(ndev->db_mask,
+			      ndev->self_mmio +
+			      ndev->self_reg->db_mask);
+
+	if (ndev->msix) {
+		i = ndev->db_vec_count;
+		while (i--)
+			free_irq(ndev->msix[i].vector, &ndev->vec[i]);
+		pci_disable_msix(pdev);
+		kfree(ndev->msix);
+		kfree(ndev->vec);
+	} else {
+		free_irq(pdev->irq, ndev);
+		if (pci_dev_msi_enabled(pdev))
+			pci_disable_msi(pdev);
+	}
+}
+
+static ssize_t ndev_ntb3_debugfs_read(struct file *filp, char __user *ubuf,
+				      size_t count, loff_t *offp)
+{
+	struct intel_ntb_dev *ndev;
+	void __iomem *mmio;
+	char *buf;
+	size_t buf_size;
+	ssize_t ret, off;
+	union { u64 v64; u32 v32; u16 v16; } u;
+
+	ndev = filp->private_data;
+	mmio = ndev->self_mmio;
+
+	buf_size = min(count, 0x800ul);
+
+	buf = kmalloc(buf_size, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	off = 0;
+
+	off += scnprintf(buf + off, buf_size - off,
+			 "NTB Device Information:\n");
+
+	off += scnprintf(buf + off, buf_size - off,
+			 "Connection Topology -\t%s\n",
+			 ntb_topo_string(ndev->ntb.topo));
+
+	off += scnprintf(buf + off, buf_size - off,
+			 "NTB CTL -\t\t%#06x\n", ndev->ntb_ctl);
+	off += scnprintf(buf + off, buf_size - off,
+			 "LNK STA -\t\t%#06x\n", ndev->lnk_sta);
+
+	if (!ndev->reg->link_is_up(ndev))
+		off += scnprintf(buf + off, buf_size - off,
+				 "Link Status -\t\tDown\n");
+	else {
+		off += scnprintf(buf + off, buf_size - off,
+				 "Link Status -\t\tUp\n");
+		off += scnprintf(buf + off, buf_size - off,
+				 "Link Speed -\t\tPCI-E Gen %u\n",
+				 NTB_LNK_STA_SPEED(ndev->lnk_sta));
+		off += scnprintf(buf + off, buf_size - off,
+				 "Link Width -\t\tx%u\n",
+				 NTB_LNK_STA_WIDTH(ndev->lnk_sta));
+	}
+
+	off += scnprintf(buf + off, buf_size - off,
+			 "Memory Window Count -\t%u\n", ndev->mw_count);
+	off += scnprintf(buf + off, buf_size - off,
+			 "Scratchpad Count -\t%u\n", ndev->spad_count);
+	off += scnprintf(buf + off, buf_size - off,
+			 "Doorbell Count -\t%u\n", ndev->db_count);
+	off += scnprintf(buf + off, buf_size - off,
+			 "Doorbell Vector Count -\t%u\n", ndev->db_vec_count);
+	off += scnprintf(buf + off, buf_size - off,
+			 "Doorbell Vector Shift -\t%u\n", ndev->db_vec_shift);
+
+	off += scnprintf(buf + off, buf_size - off,
+			 "Doorbell Valid Mask -\t%#llx\n", ndev->db_valid_mask);
+	off += scnprintf(buf + off, buf_size - off,
+			 "Doorbell Link Mask -\t%#llx\n", ndev->db_link_mask);
+	off += scnprintf(buf + off, buf_size - off,
+			 "Doorbell Mask Cached -\t%#llx\n", ndev->db_mask);
+
+	u.v64 = ndev_db_read(ndev, mmio + ndev->self_reg->db_mask);
+	off += scnprintf(buf + off, buf_size - off,
+			 "Doorbell Mask -\t\t%#llx\n", u.v64);
+
+	u.v64 = ndev_db_read(ndev, mmio + ndev->self_reg->db_bell);
+	off += scnprintf(buf + off, buf_size - off,
+			 "Doorbell Bell -\t\t%#llx\n", u.v64);
+
+	off += scnprintf(buf + off, buf_size - off,
+			 "\nNTB Incoming XLAT:\n");
+
+	u.v64 = ioread64(mmio + SKX_IMBAR1XBASE_OFFSET);
+	off += scnprintf(buf + off, buf_size - off,
+			 "IMBAR1XBASE -\t\t%#018llx\n", u.v64);
+
+	u.v64 = ioread64(mmio + SKX_IMBAR2XBASE_OFFSET);
+	off += scnprintf(buf + off, buf_size - off,
+			 "IMBAR2XBASE -\t\t%#018llx\n", u.v64);
+
+	u.v64 = ioread64(mmio + SKX_IMBAR1XLMT_OFFSET);
+	off += scnprintf(buf + off, buf_size - off,
+			 "IMBAR1XLMT -\t\t\t%#018llx\n", u.v64);
+
+	u.v64 = ioread64(mmio + SKX_IMBAR2XLMT_OFFSET);
+	off += scnprintf(buf + off, buf_size - off,
+			 "IMBAR2XLMT -\t\t\t%#018llx\n", u.v64);
+
+	if (ntb_topo_is_b2b(ndev->ntb.topo)) {
+		off += scnprintf(buf + off, buf_size - off,
+				 "\nNTB Outgoing B2B XLAT:\n");
+
+		u.v64 = ioread64(mmio + SKX_EMBAR1XBASE_OFFSET);
+		off += scnprintf(buf + off, buf_size - off,
+				 "EMBAR1XBASE -\t\t%#018llx\n", u.v64);
+
+		u.v64 = ioread64(mmio + SKX_EMBAR2XBASE_OFFSET);
+		off += scnprintf(buf + off, buf_size - off,
+				 "EMBAR2XBASE -\t\t%#018llx\n", u.v64);
+
+		u.v64 = ioread64(mmio + SKX_EMBAR1XLMT_OFFSET);
+		off += scnprintf(buf + off, buf_size - off,
+				 "EMBAR1XLMT -\t\t%#018llx\n", u.v64);
+
+		u.v64 = ioread64(mmio + SKX_EMBAR2XLMT_OFFSET);
+		off += scnprintf(buf + off, buf_size - off,
+				 "EMBAR2XLMT -\t\t%#018llx\n", u.v64);
+
+		off += scnprintf(buf + off, buf_size - off,
+				 "\nNTB Secondary BAR:\n");
+
+		u.v64 = ioread64(mmio + SKX_EMBAR0_OFFSET);
+		off += scnprintf(buf + off, buf_size - off,
+				 "EMBAR0 -\t\t%#018llx\n", u.v64);
+
+		u.v64 = ioread64(mmio + SKX_EMBAR1_OFFSET);
+		off += scnprintf(buf + off, buf_size - off,
+				 "EMBAR1 -\t\t%#018llx\n", u.v64);
+
+		u.v64 = ioread64(mmio + SKX_EMBAR2_OFFSET);
+		off += scnprintf(buf + off, buf_size - off,
+				 "EMBAR2 -\t\t%#018llx\n", u.v64);
+	}
+
+	off += scnprintf(buf + off, buf_size - off,
+			 "\nNTB Statistics:\n");
+
+	u.v16 = ioread16(mmio + SKX_USMEMMISS_OFFSET);
+	off += scnprintf(buf + off, buf_size - off,
+			 "Upstream Memory Miss -\t%u\n", u.v16);
+
+	off += scnprintf(buf + off, buf_size - off,
+			 "\nNTB Hardware Errors:\n");
+
+	if (!pci_read_config_word(ndev->ntb.pdev,
+				  SKX_DEVSTS_OFFSET, &u.v16))
+		off += scnprintf(buf + off, buf_size - off,
+				 "DEVSTS -\t\t%#06x\n", u.v16);
+
+	if (!pci_read_config_word(ndev->ntb.pdev,
+				  SKX_LINK_STATUS_OFFSET, &u.v16))
+		off += scnprintf(buf + off, buf_size - off,
+				 "LNKSTS -\t\t%#06x\n", u.v16);
+
+	if (!pci_read_config_dword(ndev->ntb.pdev,
+				   SKX_UNCERRSTS_OFFSET, &u.v32))
+		off += scnprintf(buf + off, buf_size - off,
+				 "UNCERRSTS -\t\t%#06x\n", u.v32);
+
+	if (!pci_read_config_dword(ndev->ntb.pdev,
+				   SKX_CORERRSTS_OFFSET, &u.v32))
+		off += scnprintf(buf + off, buf_size - off,
+				 "CORERRSTS -\t\t%#06x\n", u.v32);
+
+	ret = simple_read_from_buffer(ubuf, count, offp, buf, off);
+	kfree(buf);
+	return ret;
+}
+
+static ssize_t ndev_ntb_debugfs_read(struct file *filp, char __user *ubuf,
+				     size_t count, loff_t *offp)
+{
+	struct intel_ntb_dev *ndev;
+	struct pci_dev *pdev;
+	void __iomem *mmio;
+	char *buf;
+	size_t buf_size;
+	ssize_t ret, off;
+	union { u64 v64; u32 v32; u16 v16; u8 v8; } u;
+
+	ndev = filp->private_data;
+	pdev = ndev->ntb.pdev;
+	mmio = ndev->self_mmio;
+
+	buf_size = min(count, 0x800ul);
+
+	buf = kmalloc(buf_size, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	off = 0;
+
+	off += scnprintf(buf + off, buf_size - off,
+			 "NTB Device Information:\n");
+
+	off += scnprintf(buf + off, buf_size - off,
+			 "Connection Topology -\t%s\n",
+			 ntb_topo_string(ndev->ntb.topo));
+
+	if (ndev->b2b_idx != UINT_MAX) {
+		off += scnprintf(buf + off, buf_size - off,
+				 "B2B MW Idx -\t\t%u\n", ndev->b2b_idx);
+		off += scnprintf(buf + off, buf_size - off,
+				 "B2B Offset -\t\t%#lx\n", ndev->b2b_off);
+	}
+
+	off += scnprintf(buf + off, buf_size - off,
+			 "BAR4 Split -\t\t%s\n",
+			 ndev->bar4_split ? "yes" : "no");
+
+	off += scnprintf(buf + off, buf_size - off,
+			 "NTB CTL -\t\t%#06x\n", ndev->ntb_ctl);
+	off += scnprintf(buf + off, buf_size - off,
+			 "LNK STA -\t\t%#06x\n", ndev->lnk_sta);
+
+	if (!ndev->reg->link_is_up(ndev)) {
+		off += scnprintf(buf + off, buf_size - off,
+				 "Link Status -\t\tDown\n");
+	} else {
+		off += scnprintf(buf + off, buf_size - off,
+				 "Link Status -\t\tUp\n");
+		off += scnprintf(buf + off, buf_size - off,
+				 "Link Speed -\t\tPCI-E Gen %u\n",
+				 NTB_LNK_STA_SPEED(ndev->lnk_sta));
+		off += scnprintf(buf + off, buf_size - off,
+				 "Link Width -\t\tx%u\n",
+				 NTB_LNK_STA_WIDTH(ndev->lnk_sta));
+	}
+
+	off += scnprintf(buf + off, buf_size - off,
+			 "Memory Window Count -\t%u\n", ndev->mw_count);
+	off += scnprintf(buf + off, buf_size - off,
+			 "Scratchpad Count -\t%u\n", ndev->spad_count);
+	off += scnprintf(buf + off, buf_size - off,
+			 "Doorbell Count -\t%u\n", ndev->db_count);
+	off += scnprintf(buf + off, buf_size - off,
+			 "Doorbell Vector Count -\t%u\n", ndev->db_vec_count);
+	off += scnprintf(buf + off, buf_size - off,
+			 "Doorbell Vector Shift -\t%u\n", ndev->db_vec_shift);
+
+	off += scnprintf(buf + off, buf_size - off,
+			 "Doorbell Valid Mask -\t%#llx\n", ndev->db_valid_mask);
+	off += scnprintf(buf + off, buf_size - off,
+			 "Doorbell Link Mask -\t%#llx\n", ndev->db_link_mask);
+	off += scnprintf(buf + off, buf_size - off,
+			 "Doorbell Mask Cached -\t%#llx\n", ndev->db_mask);
+
+	u.v64 = ndev_db_read(ndev, mmio + ndev->self_reg->db_mask);
+	off += scnprintf(buf + off, buf_size - off,
+			 "Doorbell Mask -\t\t%#llx\n", u.v64);
+
+	u.v64 = ndev_db_read(ndev, mmio + ndev->self_reg->db_bell);
+	off += scnprintf(buf + off, buf_size - off,
+			 "Doorbell Bell -\t\t%#llx\n", u.v64);
+
+	off += scnprintf(buf + off, buf_size - off,
+			 "\nNTB Window Size:\n");
+
+	pci_read_config_byte(pdev, XEON_PBAR23SZ_OFFSET, &u.v8);
+	off += scnprintf(buf + off, buf_size - off,
+			 "PBAR23SZ %hhu\n", u.v8);
+	if (!ndev->bar4_split) {
+		pci_read_config_byte(pdev, XEON_PBAR45SZ_OFFSET, &u.v8);
+		off += scnprintf(buf + off, buf_size - off,
+				 "PBAR45SZ %hhu\n", u.v8);
+	} else {
+		pci_read_config_byte(pdev, XEON_PBAR4SZ_OFFSET, &u.v8);
+		off += scnprintf(buf + off, buf_size - off,
+				 "PBAR4SZ %hhu\n", u.v8);
+		pci_read_config_byte(pdev, XEON_PBAR5SZ_OFFSET, &u.v8);
+		off += scnprintf(buf + off, buf_size - off,
+				 "PBAR5SZ %hhu\n", u.v8);
+	}
+
+	pci_read_config_byte(pdev, XEON_SBAR23SZ_OFFSET, &u.v8);
+	off += scnprintf(buf + off, buf_size - off,
+			 "SBAR23SZ %hhu\n", u.v8);
+	if (!ndev->bar4_split) {
+		pci_read_config_byte(pdev, XEON_SBAR45SZ_OFFSET, &u.v8);
+		off += scnprintf(buf + off, buf_size - off,
+				 "SBAR45SZ %hhu\n", u.v8);
+	} else {
+		pci_read_config_byte(pdev, XEON_SBAR4SZ_OFFSET, &u.v8);
+		off += scnprintf(buf + off, buf_size - off,
+				 "SBAR4SZ %hhu\n", u.v8);
+		pci_read_config_byte(pdev, XEON_SBAR5SZ_OFFSET, &u.v8);
+		off += scnprintf(buf + off, buf_size - off,
+				 "SBAR5SZ %hhu\n", u.v8);
+	}
+
+	off += scnprintf(buf + off, buf_size - off,
+			 "\nNTB Incoming XLAT:\n");
+
+	u.v64 = ioread64(mmio + bar2_off(ndev->xlat_reg->bar2_xlat, 2));
+	off += scnprintf(buf + off, buf_size - off,
+			 "XLAT23 -\t\t%#018llx\n", u.v64);
+
+	if (ndev->bar4_split) {
+		u.v32 = ioread32(mmio + bar2_off(ndev->xlat_reg->bar2_xlat, 4));
+		off += scnprintf(buf + off, buf_size - off,
+				 "XLAT4 -\t\t\t%#06x\n", u.v32);
+
+		u.v32 = ioread32(mmio + bar2_off(ndev->xlat_reg->bar2_xlat, 5));
+		off += scnprintf(buf + off, buf_size - off,
+				 "XLAT5 -\t\t\t%#06x\n", u.v32);
+	} else {
+		u.v64 = ioread64(mmio + bar2_off(ndev->xlat_reg->bar2_xlat, 4));
+		off += scnprintf(buf + off, buf_size - off,
+				 "XLAT45 -\t\t%#018llx\n", u.v64);
+	}
+
+	u.v64 = ioread64(mmio + bar2_off(ndev->xlat_reg->bar2_limit, 2));
+	off += scnprintf(buf + off, buf_size - off,
+			 "LMT23 -\t\t\t%#018llx\n", u.v64);
+
+	if (ndev->bar4_split) {
+		u.v32 = ioread32(mmio + bar2_off(ndev->xlat_reg->bar2_limit, 4));
+		off += scnprintf(buf + off, buf_size - off,
+				 "LMT4 -\t\t\t%#06x\n", u.v32);
+		u.v32 = ioread32(mmio + bar2_off(ndev->xlat_reg->bar2_limit, 5));
+		off += scnprintf(buf + off, buf_size - off,
+				 "LMT5 -\t\t\t%#06x\n", u.v32);
+	} else {
+		u.v64 = ioread64(mmio + bar2_off(ndev->xlat_reg->bar2_limit, 4));
+		off += scnprintf(buf + off, buf_size - off,
+				 "LMT45 -\t\t\t%#018llx\n", u.v64);
+	}
+
+	if (pdev_is_xeon(pdev)) {
+		if (ntb_topo_is_b2b(ndev->ntb.topo)) {
+			off += scnprintf(buf + off, buf_size - off,
+					 "\nNTB Outgoing B2B XLAT:\n");
+
+			u.v64 = ioread64(mmio + XEON_PBAR23XLAT_OFFSET);
+			off += scnprintf(buf + off, buf_size - off,
+					 "B2B XLAT23 -\t\t%#018llx\n", u.v64);
+
+			if (ndev->bar4_split) {
+				u.v32 = ioread32(mmio + XEON_PBAR4XLAT_OFFSET);
+				off += scnprintf(buf + off, buf_size - off,
+						 "B2B XLAT4 -\t\t%#06x\n",
+						 u.v32);
+				u.v32 = ioread32(mmio + XEON_PBAR5XLAT_OFFSET);
+				off += scnprintf(buf + off, buf_size - off,
+						 "B2B XLAT5 -\t\t%#06x\n",
+						 u.v32);
+			} else {
+				u.v64 = ioread64(mmio + XEON_PBAR45XLAT_OFFSET);
+				off += scnprintf(buf + off, buf_size - off,
+						 "B2B XLAT45 -\t\t%#018llx\n",
+						 u.v64);
+			}
+
+			u.v64 = ioread64(mmio + XEON_PBAR23LMT_OFFSET);
+			off += scnprintf(buf + off, buf_size - off,
+					 "B2B LMT23 -\t\t%#018llx\n", u.v64);
+
+			if (ndev->bar4_split) {
+				u.v32 = ioread32(mmio + XEON_PBAR4LMT_OFFSET);
+				off += scnprintf(buf + off, buf_size - off,
+						 "B2B LMT4 -\t\t%#06x\n",
+						 u.v32);
+				u.v32 = ioread32(mmio + XEON_PBAR5LMT_OFFSET);
+				off += scnprintf(buf + off, buf_size - off,
+						 "B2B LMT5 -\t\t%#06x\n",
+						 u.v32);
+			} else {
+				u.v64 = ioread64(mmio + XEON_PBAR45LMT_OFFSET);
+				off += scnprintf(buf + off, buf_size - off,
+						 "B2B LMT45 -\t\t%#018llx\n",
+						 u.v64);
+			}
+
+			off += scnprintf(buf + off, buf_size - off,
+					 "\nNTB Secondary BAR:\n");
+
+			u.v64 = ioread64(mmio + XEON_SBAR0BASE_OFFSET);
+			off += scnprintf(buf + off, buf_size - off,
+					 "SBAR01 -\t\t%#018llx\n", u.v64);
+
+			u.v64 = ioread64(mmio + XEON_SBAR23BASE_OFFSET);
+			off += scnprintf(buf + off, buf_size - off,
+					 "SBAR23 -\t\t%#018llx\n", u.v64);
+
+			if (ndev->bar4_split) {
+				u.v32 = ioread32(mmio + XEON_SBAR4BASE_OFFSET);
+				off += scnprintf(buf + off, buf_size - off,
+						 "SBAR4 -\t\t\t%#06x\n", u.v32);
+				u.v32 = ioread32(mmio + XEON_SBAR5BASE_OFFSET);
+				off += scnprintf(buf + off, buf_size - off,
+						 "SBAR5 -\t\t\t%#06x\n", u.v32);
+			} else {
+				u.v64 = ioread64(mmio + XEON_SBAR45BASE_OFFSET);
+				off += scnprintf(buf + off, buf_size - off,
+						 "SBAR45 -\t\t%#018llx\n",
+						 u.v64);
+			}
+		}
+
+		off += scnprintf(buf + off, buf_size - off,
+				 "\nXEON NTB Statistics:\n");
+
+		u.v16 = ioread16(mmio + XEON_USMEMMISS_OFFSET);
+		off += scnprintf(buf + off, buf_size - off,
+				 "Upstream Memory Miss -\t%u\n", u.v16);
+
+		off += scnprintf(buf + off, buf_size - off,
+				 "\nXEON NTB Hardware Errors:\n");
+
+		if (!pci_read_config_word(pdev,
+					  XEON_DEVSTS_OFFSET, &u.v16))
+			off += scnprintf(buf + off, buf_size - off,
+					 "DEVSTS -\t\t%#06x\n", u.v16);
+
+		if (!pci_read_config_word(pdev,
+					  XEON_LINK_STATUS_OFFSET, &u.v16))
+			off += scnprintf(buf + off, buf_size - off,
+					 "LNKSTS -\t\t%#06x\n", u.v16);
+
+		if (!pci_read_config_dword(pdev,
+					   XEON_UNCERRSTS_OFFSET, &u.v32))
+			off += scnprintf(buf + off, buf_size - off,
+					 "UNCERRSTS -\t\t%#06x\n", u.v32);
+
+		if (!pci_read_config_dword(pdev,
+					   XEON_CORERRSTS_OFFSET, &u.v32))
+			off += scnprintf(buf + off, buf_size - off,
+					 "CORERRSTS -\t\t%#06x\n", u.v32);
+	}
+
+	ret = simple_read_from_buffer(ubuf, count, offp, buf, off);
+	kfree(buf);
+	return ret;
+}
+
+static ssize_t ndev_debugfs_read(struct file *filp, char __user *ubuf,
+				 size_t count, loff_t *offp)
+{
+	struct intel_ntb_dev *ndev = filp->private_data;
+
+	if (pdev_is_xeon(ndev->ntb.pdev) ||
+	    pdev_is_atom(ndev->ntb.pdev))
+		return ndev_ntb_debugfs_read(filp, ubuf, count, offp);
+	else if (pdev_is_skx_xeon(ndev->ntb.pdev))
+		return ndev_ntb3_debugfs_read(filp, ubuf, count, offp);
+
+	return -ENXIO;
+}
+
+static void ndev_init_debugfs(struct intel_ntb_dev *ndev)
+{
+	if (!debugfs_dir) {
+		ndev->debugfs_dir = NULL;
+		ndev->debugfs_info = NULL;
+	} else {
+		ndev->debugfs_dir =
+			debugfs_create_dir(pci_name(ndev->ntb.pdev),
+					   debugfs_dir);
+		if (!ndev->debugfs_dir)
+			ndev->debugfs_info = NULL;
+		else
+			ndev->debugfs_info =
+				debugfs_create_file("info", S_IRUSR,
+						    ndev->debugfs_dir, ndev,
+						    &intel_ntb_debugfs_info);
+	}
+}
+
+static void ndev_deinit_debugfs(struct intel_ntb_dev *ndev)
+{
+	debugfs_remove_recursive(ndev->debugfs_dir);
+}
+
+static int intel_ntb_mw_count(struct ntb_dev *ntb, int pidx)
+{
+	if (pidx != NTB_DEF_PEER_IDX)
+		return -EINVAL;
+
+	return ntb_ndev(ntb)->mw_count;
+}
+
+static int intel_ntb_mw_get_align(struct ntb_dev *ntb, int pidx, int idx,
+				  resource_size_t *addr_align,
+				  resource_size_t *size_align,
+				  resource_size_t *size_max)
+{
+	struct intel_ntb_dev *ndev = ntb_ndev(ntb);
+	resource_size_t bar_size, mw_size;
+	int bar;
+
+	if (pidx != NTB_DEF_PEER_IDX)
+		return -EINVAL;
+
+	if (idx >= ndev->b2b_idx && !ndev->b2b_off)
+		idx += 1;
+
+	bar = ndev_mw_to_bar(ndev, idx);
+	if (bar < 0)
+		return bar;
+
+	bar_size = pci_resource_len(ndev->ntb.pdev, bar);
+
+	if (idx == ndev->b2b_idx)
+		mw_size = bar_size - ndev->b2b_off;
+	else
+		mw_size = bar_size;
+
+	if (addr_align)
+		*addr_align = pci_resource_len(ndev->ntb.pdev, bar);
+
+	if (size_align)
+		*size_align = 1;
+
+	if (size_max)
+		*size_max = mw_size;
+
+	return 0;
+}
+
+static int intel_ntb_mw_set_trans(struct ntb_dev *ntb, int pidx, int idx,
+				  dma_addr_t addr, resource_size_t size)
+{
+	struct intel_ntb_dev *ndev = ntb_ndev(ntb);
+	unsigned long base_reg, xlat_reg, limit_reg;
+	resource_size_t bar_size, mw_size;
+	void __iomem *mmio;
+	u64 base, limit, reg_val;
+	int bar;
+
+	if (pidx != NTB_DEF_PEER_IDX)
+		return -EINVAL;
+
+	if (idx >= ndev->b2b_idx && !ndev->b2b_off)
+		idx += 1;
+
+	bar = ndev_mw_to_bar(ndev, idx);
+	if (bar < 0)
+		return bar;
+
+	bar_size = pci_resource_len(ndev->ntb.pdev, bar);
+
+	if (idx == ndev->b2b_idx)
+		mw_size = bar_size - ndev->b2b_off;
+	else
+		mw_size = bar_size;
+
+	/* hardware requires that addr is aligned to bar size */
+	if (addr & (bar_size - 1))
+		return -EINVAL;
+
+	/* make sure the range fits in the usable mw size */
+	if (size > mw_size)
+		return -EINVAL;
+
+	mmio = ndev->self_mmio;
+	base_reg = bar0_off(ndev->xlat_reg->bar0_base, bar);
+	xlat_reg = bar2_off(ndev->xlat_reg->bar2_xlat, bar);
+	limit_reg = bar2_off(ndev->xlat_reg->bar2_limit, bar);
+
+	if (bar < 4 || !ndev->bar4_split) {
+		base = ioread64(mmio + base_reg) & NTB_BAR_MASK_64;
+
+		/* Set the limit if supported, if size is not mw_size */
+		if (limit_reg && size != mw_size)
+			limit = base + size;
+		else
+			limit = 0;
+
+		/* set and verify setting the translation address */
+		iowrite64(addr, mmio + xlat_reg);
+		reg_val = ioread64(mmio + xlat_reg);
+		if (reg_val != addr) {
+			iowrite64(0, mmio + xlat_reg);
+			return -EIO;
+		}
+
+		/* set and verify setting the limit */
+		iowrite64(limit, mmio + limit_reg);
+		reg_val = ioread64(mmio + limit_reg);
+		if (reg_val != limit) {
+			iowrite64(base, mmio + limit_reg);
+			iowrite64(0, mmio + xlat_reg);
+			return -EIO;
+		}
+	} else {
+		/* split bar addr range must all be 32 bit */
+		if (addr & (~0ull << 32))
+			return -EINVAL;
+		if ((addr + size) & (~0ull << 32))
+			return -EINVAL;
+
+		base = ioread32(mmio + base_reg) & NTB_BAR_MASK_32;
+
+		/* Set the limit if supported, if size is not mw_size */
+		if (limit_reg && size != mw_size)
+			limit = base + size;
+		else
+			limit = 0;
+
+		/* set and verify setting the translation address */
+		iowrite32(addr, mmio + xlat_reg);
+		reg_val = ioread32(mmio + xlat_reg);
+		if (reg_val != addr) {
+			iowrite32(0, mmio + xlat_reg);
+			return -EIO;
+		}
+
+		/* set and verify setting the limit */
+		iowrite32(limit, mmio + limit_reg);
+		reg_val = ioread32(mmio + limit_reg);
+		if (reg_val != limit) {
+			iowrite32(base, mmio + limit_reg);
+			iowrite32(0, mmio + xlat_reg);
+			return -EIO;
+		}
+	}
+
+	return 0;
+}
+
+static u64 intel_ntb_link_is_up(struct ntb_dev *ntb,
+				enum ntb_speed *speed,
+				enum ntb_width *width)
+{
+	struct intel_ntb_dev *ndev = ntb_ndev(ntb);
+
+	if (ndev->reg->link_is_up(ndev)) {
+		if (speed)
+			*speed = NTB_LNK_STA_SPEED(ndev->lnk_sta);
+		if (width)
+			*width = NTB_LNK_STA_WIDTH(ndev->lnk_sta);
+		return 1;
+	} else {
+		/* TODO MAYBE: is it possible to observe the link speed and
+		 * width while link is training? */
+		if (speed)
+			*speed = NTB_SPEED_NONE;
+		if (width)
+			*width = NTB_WIDTH_NONE;
+		return 0;
+	}
+}
+
+static int intel_ntb_link_enable(struct ntb_dev *ntb,
+				 enum ntb_speed max_speed,
+				 enum ntb_width max_width)
+{
+	struct intel_ntb_dev *ndev;
+	u32 ntb_ctl;
+
+	ndev = container_of(ntb, struct intel_ntb_dev, ntb);
+
+	if (ndev->ntb.topo == NTB_TOPO_SEC)
+		return -EINVAL;
+
+	dev_dbg(&ntb->pdev->dev,
+		"Enabling link with max_speed %d max_width %d\n",
+		max_speed, max_width);
+	if (max_speed != NTB_SPEED_AUTO)
+		dev_dbg(&ntb->pdev->dev, "ignoring max_speed %d\n", max_speed);
+	if (max_width != NTB_WIDTH_AUTO)
+		dev_dbg(&ntb->pdev->dev, "ignoring max_width %d\n", max_width);
+
+	ntb_ctl = ioread32(ndev->self_mmio + ndev->reg->ntb_ctl);
+	ntb_ctl &= ~(NTB_CTL_DISABLE | NTB_CTL_CFG_LOCK);
+	ntb_ctl |= NTB_CTL_P2S_BAR2_SNOOP | NTB_CTL_S2P_BAR2_SNOOP;
+	ntb_ctl |= NTB_CTL_P2S_BAR4_SNOOP | NTB_CTL_S2P_BAR4_SNOOP;
+	if (ndev->bar4_split)
+		ntb_ctl |= NTB_CTL_P2S_BAR5_SNOOP | NTB_CTL_S2P_BAR5_SNOOP;
+	iowrite32(ntb_ctl, ndev->self_mmio + ndev->reg->ntb_ctl);
+
+	return 0;
+}
+
+static int intel_ntb_link_disable(struct ntb_dev *ntb)
+{
+	struct intel_ntb_dev *ndev;
+	u32 ntb_cntl;
+
+	ndev = container_of(ntb, struct intel_ntb_dev, ntb);
+
+	if (ndev->ntb.topo == NTB_TOPO_SEC)
+		return -EINVAL;
+
+	dev_dbg(&ntb->pdev->dev, "Disabling link\n");
+
+	/* Bring NTB link down */
+	ntb_cntl = ioread32(ndev->self_mmio + ndev->reg->ntb_ctl);
+	ntb_cntl &= ~(NTB_CTL_P2S_BAR2_SNOOP | NTB_CTL_S2P_BAR2_SNOOP);
+	ntb_cntl &= ~(NTB_CTL_P2S_BAR4_SNOOP | NTB_CTL_S2P_BAR4_SNOOP);
+	if (ndev->bar4_split)
+		ntb_cntl &= ~(NTB_CTL_P2S_BAR5_SNOOP | NTB_CTL_S2P_BAR5_SNOOP);
+	ntb_cntl |= NTB_CTL_DISABLE | NTB_CTL_CFG_LOCK;
+	iowrite32(ntb_cntl, ndev->self_mmio + ndev->reg->ntb_ctl);
+
+	return 0;
+}
+
+static int intel_ntb_peer_mw_count(struct ntb_dev *ntb)
+{
+	/* Numbers of inbound and outbound memory windows match */
+	return ntb_ndev(ntb)->mw_count;
+}
+
+static int intel_ntb_peer_mw_get_addr(struct ntb_dev *ntb, int idx,
+				     phys_addr_t *base, resource_size_t *size)
+{
+	struct intel_ntb_dev *ndev = ntb_ndev(ntb);
+	int bar;
+
+	if (idx >= ndev->b2b_idx && !ndev->b2b_off)
+		idx += 1;
+
+	bar = ndev_mw_to_bar(ndev, idx);
+	if (bar < 0)
+		return bar;
+
+	if (base)
+		*base = pci_resource_start(ndev->ntb.pdev, bar) +
+			(idx == ndev->b2b_idx ? ndev->b2b_off : 0);
+
+	if (size)
+		*size = pci_resource_len(ndev->ntb.pdev, bar) -
+			(idx == ndev->b2b_idx ? ndev->b2b_off : 0);
+
+	return 0;
+}
+
+static int intel_ntb_db_is_unsafe(struct ntb_dev *ntb)
+{
+	return ndev_ignore_unsafe(ntb_ndev(ntb), NTB_UNSAFE_DB);
+}
+
+static u64 intel_ntb_db_valid_mask(struct ntb_dev *ntb)
+{
+	return ntb_ndev(ntb)->db_valid_mask;
+}
+
+static int intel_ntb_db_vector_count(struct ntb_dev *ntb)
+{
+	struct intel_ntb_dev *ndev;
+
+	ndev = container_of(ntb, struct intel_ntb_dev, ntb);
+
+	return ndev->db_vec_count;
+}
+
+static u64 intel_ntb_db_vector_mask(struct ntb_dev *ntb, int db_vector)
+{
+	struct intel_ntb_dev *ndev = ntb_ndev(ntb);
+
+	if (db_vector < 0 || db_vector > ndev->db_vec_count)
+		return 0;
+
+	return ndev->db_valid_mask & ndev_vec_mask(ndev, db_vector);
+}
+
+static u64 intel_ntb_db_read(struct ntb_dev *ntb)
+{
+	struct intel_ntb_dev *ndev = ntb_ndev(ntb);
+
+	return ndev_db_read(ndev,
+			    ndev->self_mmio +
+			    ndev->self_reg->db_bell);
+}
+
+static int intel_ntb_db_clear(struct ntb_dev *ntb, u64 db_bits)
+{
+	struct intel_ntb_dev *ndev = ntb_ndev(ntb);
+
+	return ndev_db_write(ndev, db_bits,
+			     ndev->self_mmio +
+			     ndev->self_reg->db_bell);
+}
+
+static int intel_ntb_db_set_mask(struct ntb_dev *ntb, u64 db_bits)
+{
+	struct intel_ntb_dev *ndev = ntb_ndev(ntb);
+
+	return ndev_db_set_mask(ndev, db_bits,
+				ndev->self_mmio +
+				ndev->self_reg->db_mask);
+}
+
+static int intel_ntb_db_clear_mask(struct ntb_dev *ntb, u64 db_bits)
+{
+	struct intel_ntb_dev *ndev = ntb_ndev(ntb);
+
+	return ndev_db_clear_mask(ndev, db_bits,
+				  ndev->self_mmio +
+				  ndev->self_reg->db_mask);
+}
+
+static int intel_ntb_peer_db_addr(struct ntb_dev *ntb,
+				  phys_addr_t *db_addr,
+				  resource_size_t *db_size)
+{
+	struct intel_ntb_dev *ndev = ntb_ndev(ntb);
+
+	return ndev_db_addr(ndev, db_addr, db_size, ndev->peer_addr,
+			    ndev->peer_reg->db_bell);
+}
+
+static int intel_ntb_peer_db_set(struct ntb_dev *ntb, u64 db_bits)
+{
+	struct intel_ntb_dev *ndev = ntb_ndev(ntb);
+
+	return ndev_db_write(ndev, db_bits,
+			     ndev->peer_mmio +
+			     ndev->peer_reg->db_bell);
+}
+
+static int intel_ntb_spad_is_unsafe(struct ntb_dev *ntb)
+{
+	return ndev_ignore_unsafe(ntb_ndev(ntb), NTB_UNSAFE_SPAD);
+}
+
+static int intel_ntb_spad_count(struct ntb_dev *ntb)
+{
+	struct intel_ntb_dev *ndev;
+
+	ndev = container_of(ntb, struct intel_ntb_dev, ntb);
+
+	return ndev->spad_count;
+}
+
+static u32 intel_ntb_spad_read(struct ntb_dev *ntb, int idx)
+{
+	struct intel_ntb_dev *ndev = ntb_ndev(ntb);
+
+	return ndev_spad_read(ndev, idx,
+			      ndev->self_mmio +
+			      ndev->self_reg->spad);
+}
+
+static int intel_ntb_spad_write(struct ntb_dev *ntb,
+				int idx, u32 val)
+{
+	struct intel_ntb_dev *ndev = ntb_ndev(ntb);
+
+	return ndev_spad_write(ndev, idx, val,
+			       ndev->self_mmio +
+			       ndev->self_reg->spad);
+}
+
+static int intel_ntb_peer_spad_addr(struct ntb_dev *ntb, int pidx, int sidx,
+				    phys_addr_t *spad_addr)
+{
+	struct intel_ntb_dev *ndev = ntb_ndev(ntb);
+
+	return ndev_spad_addr(ndev, sidx, spad_addr, ndev->peer_addr,
+			      ndev->peer_reg->spad);
+}
+
+static u32 intel_ntb_peer_spad_read(struct ntb_dev *ntb, int pidx, int sidx)
+{
+	struct intel_ntb_dev *ndev = ntb_ndev(ntb);
+
+	return ndev_spad_read(ndev, sidx,
+			      ndev->peer_mmio +
+			      ndev->peer_reg->spad);
+}
+
+static int intel_ntb_peer_spad_write(struct ntb_dev *ntb, int pidx,
+				     int sidx, u32 val)
+{
+	struct intel_ntb_dev *ndev = ntb_ndev(ntb);
+
+	return ndev_spad_write(ndev, sidx, val,
+			       ndev->peer_mmio +
+			       ndev->peer_reg->spad);
+}
+
+/* ATOM */
+
+static u64 atom_db_ioread(void __iomem *mmio)
+{
+	return ioread64(mmio);
+}
+
+static void atom_db_iowrite(u64 bits, void __iomem *mmio)
+{
+	iowrite64(bits, mmio);
+}
+
+static int atom_poll_link(struct intel_ntb_dev *ndev)
+{
+	u32 ntb_ctl;
+
+	ntb_ctl = ioread32(ndev->self_mmio + ATOM_NTBCNTL_OFFSET);
+
+	if (ntb_ctl == ndev->ntb_ctl)
+		return 0;
+
+	ndev->ntb_ctl = ntb_ctl;
+
+	ndev->lnk_sta = ioread32(ndev->self_mmio + ATOM_LINK_STATUS_OFFSET);
+
+	return 1;
+}
+
+static int atom_link_is_up(struct intel_ntb_dev *ndev)
+{
+	return ATOM_NTB_CTL_ACTIVE(ndev->ntb_ctl);
+}
+
+static int atom_link_is_err(struct intel_ntb_dev *ndev)
+{
+	if (ioread32(ndev->self_mmio + ATOM_LTSSMSTATEJMP_OFFSET)
+	    & ATOM_LTSSMSTATEJMP_FORCEDETECT)
+		return 1;
+
+	if (ioread32(ndev->self_mmio + ATOM_IBSTERRRCRVSTS0_OFFSET)
+	    & ATOM_IBIST_ERR_OFLOW)
+		return 1;
+
+	return 0;
+}
+
+static inline enum ntb_topo atom_ppd_topo(struct intel_ntb_dev *ndev, u32 ppd)
+{
+	struct device *dev = &ndev->ntb.pdev->dev;
+
+	switch (ppd & ATOM_PPD_TOPO_MASK) {
+	case ATOM_PPD_TOPO_B2B_USD:
+		dev_dbg(dev, "PPD %d B2B USD\n", ppd);
+		return NTB_TOPO_B2B_USD;
+
+	case ATOM_PPD_TOPO_B2B_DSD:
+		dev_dbg(dev, "PPD %d B2B DSD\n", ppd);
+		return NTB_TOPO_B2B_DSD;
+
+	case ATOM_PPD_TOPO_PRI_USD:
+	case ATOM_PPD_TOPO_PRI_DSD: /* accept bogus PRI_DSD */
+	case ATOM_PPD_TOPO_SEC_USD:
+	case ATOM_PPD_TOPO_SEC_DSD: /* accept bogus SEC_DSD */
+		dev_dbg(dev, "PPD %d non B2B disabled\n", ppd);
+		return NTB_TOPO_NONE;
+	}
+
+	dev_dbg(dev, "PPD %d invalid\n", ppd);
+	return NTB_TOPO_NONE;
+}
+
+static void atom_link_hb(struct work_struct *work)
+{
+	struct intel_ntb_dev *ndev = hb_ndev(work);
+	struct device *dev = &ndev->ntb.pdev->dev;
+	unsigned long poll_ts;
+	void __iomem *mmio;
+	u32 status32;
+
+	poll_ts = ndev->last_ts + ATOM_LINK_HB_TIMEOUT;
+
+	/* Delay polling the link status if an interrupt was received,
+	 * unless the cached link status says the link is down.
+	 */
+	if (time_after(poll_ts, jiffies) && atom_link_is_up(ndev)) {
+		schedule_delayed_work(&ndev->hb_timer, poll_ts - jiffies);
+		return;
+	}
+
+	if (atom_poll_link(ndev))
+		ntb_link_event(&ndev->ntb);
+
+	if (atom_link_is_up(ndev) || !atom_link_is_err(ndev)) {
+		schedule_delayed_work(&ndev->hb_timer, ATOM_LINK_HB_TIMEOUT);
+		return;
+	}
+
+	/* Link is down with error: recover the link! */
+
+	mmio = ndev->self_mmio;
+
+	/* Driver resets the NTB ModPhy lanes - magic! */
+	iowrite8(0xe0, mmio + ATOM_MODPHY_PCSREG6);
+	iowrite8(0x40, mmio + ATOM_MODPHY_PCSREG4);
+	iowrite8(0x60, mmio + ATOM_MODPHY_PCSREG4);
+	iowrite8(0x60, mmio + ATOM_MODPHY_PCSREG6);
+
+	/* Driver waits 100ms to allow the NTB ModPhy to settle */
+	msleep(100);
+
+	/* Clear AER Errors, write to clear */
+	status32 = ioread32(mmio + ATOM_ERRCORSTS_OFFSET);
+	dev_dbg(dev, "ERRCORSTS = %x\n", status32);
+	status32 &= PCI_ERR_COR_REP_ROLL;
+	iowrite32(status32, mmio + ATOM_ERRCORSTS_OFFSET);
+
+	/* Clear unexpected electrical idle event in LTSSM, write to clear */
+	status32 = ioread32(mmio + ATOM_LTSSMERRSTS0_OFFSET);
+	dev_dbg(dev, "LTSSMERRSTS0 = %x\n", status32);
+	status32 |= ATOM_LTSSMERRSTS0_UNEXPECTEDEI;
+	iowrite32(status32, mmio + ATOM_LTSSMERRSTS0_OFFSET);
+
+	/* Clear DeSkew Buffer error, write to clear */
+	status32 = ioread32(mmio + ATOM_DESKEWSTS_OFFSET);
+	dev_dbg(dev, "DESKEWSTS = %x\n", status32);
+	status32 |= ATOM_DESKEWSTS_DBERR;
+	iowrite32(status32, mmio + ATOM_DESKEWSTS_OFFSET);
+
+	status32 = ioread32(mmio + ATOM_IBSTERRRCRVSTS0_OFFSET);
+	dev_dbg(dev, "IBSTERRRCRVSTS0 = %x\n", status32);
+	status32 &= ATOM_IBIST_ERR_OFLOW;
+	iowrite32(status32, mmio + ATOM_IBSTERRRCRVSTS0_OFFSET);
+
+	/* Releases the NTB state machine to allow the link to retrain */
+	status32 = ioread32(mmio + ATOM_LTSSMSTATEJMP_OFFSET);
+	dev_dbg(dev, "LTSSMSTATEJMP = %x\n", status32);
+	status32 &= ~ATOM_LTSSMSTATEJMP_FORCEDETECT;
+	iowrite32(status32, mmio + ATOM_LTSSMSTATEJMP_OFFSET);
+
+	/* There is a potential race between the 2 NTB devices recovering at the
+	 * same time.  If the times are the same, the link will not recover and
+	 * the driver will be stuck in this loop forever.  Add a random interval
+	 * to the recovery time to prevent this race.
+	 */
+	schedule_delayed_work(&ndev->hb_timer, ATOM_LINK_RECOVERY_TIME
+			      + prandom_u32() % ATOM_LINK_RECOVERY_TIME);
+}
+
+static int atom_init_isr(struct intel_ntb_dev *ndev)
+{
+	int rc;
+
+	rc = ndev_init_isr(ndev, 1, ATOM_DB_MSIX_VECTOR_COUNT,
+			   ATOM_DB_MSIX_VECTOR_SHIFT, ATOM_DB_TOTAL_SHIFT);
+	if (rc)
+		return rc;
+
+	/* ATOM doesn't have link status interrupt, poll on that platform */
+	ndev->last_ts = jiffies;
+	INIT_DELAYED_WORK(&ndev->hb_timer, atom_link_hb);
+	schedule_delayed_work(&ndev->hb_timer, ATOM_LINK_HB_TIMEOUT);
+
+	return 0;
+}
+
+static void atom_deinit_isr(struct intel_ntb_dev *ndev)
+{
+	cancel_delayed_work_sync(&ndev->hb_timer);
+	ndev_deinit_isr(ndev);
+}
+
+static int atom_init_ntb(struct intel_ntb_dev *ndev)
+{
+	ndev->mw_count = ATOM_MW_COUNT;
+	ndev->spad_count = ATOM_SPAD_COUNT;
+	ndev->db_count = ATOM_DB_COUNT;
+
+	switch (ndev->ntb.topo) {
+	case NTB_TOPO_B2B_USD:
+	case NTB_TOPO_B2B_DSD:
+		ndev->self_reg = &atom_pri_reg;
+		ndev->peer_reg = &atom_b2b_reg;
+		ndev->xlat_reg = &atom_sec_xlat;
+
+		/* Enable Bus Master and Memory Space on the secondary side */
+		iowrite16(PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER,
+			  ndev->self_mmio + ATOM_SPCICMD_OFFSET);
+
+		break;
+
+	default:
+		return -EINVAL;
+	}
+
+	ndev->db_valid_mask = BIT_ULL(ndev->db_count) - 1;
+
+	return 0;
+}
+
+static int atom_init_dev(struct intel_ntb_dev *ndev)
+{
+	u32 ppd;
+	int rc;
+
+	rc = pci_read_config_dword(ndev->ntb.pdev, ATOM_PPD_OFFSET, &ppd);
+	if (rc)
+		return -EIO;
+
+	ndev->ntb.topo = atom_ppd_topo(ndev, ppd);
+	if (ndev->ntb.topo == NTB_TOPO_NONE)
+		return -EINVAL;
+
+	rc = atom_init_ntb(ndev);
+	if (rc)
+		return rc;
+
+	rc = atom_init_isr(ndev);
+	if (rc)
+		return rc;
+
+	if (ndev->ntb.topo != NTB_TOPO_SEC) {
+		/* Initiate PCI-E link training */
+		rc = pci_write_config_dword(ndev->ntb.pdev, ATOM_PPD_OFFSET,
+					    ppd | ATOM_PPD_INIT_LINK);
+		if (rc)
+			return rc;
+	}
+
+	return 0;
+}
+
+static void atom_deinit_dev(struct intel_ntb_dev *ndev)
+{
+	atom_deinit_isr(ndev);
+}
+
+/* Skylake Xeon NTB */
+
+static int skx_poll_link(struct intel_ntb_dev *ndev)
+{
+	u16 reg_val;
+	int rc;
+
+	ndev->reg->db_iowrite(ndev->db_link_mask,
+			      ndev->self_mmio +
+			      ndev->self_reg->db_clear);
+
+	rc = pci_read_config_word(ndev->ntb.pdev,
+				  SKX_LINK_STATUS_OFFSET, &reg_val);
+	if (rc)
+		return 0;
+
+	if (reg_val == ndev->lnk_sta)
+		return 0;
+
+	ndev->lnk_sta = reg_val;
+
+	return 1;
+}
+
+static u64 skx_db_ioread(void __iomem *mmio)
+{
+	return ioread64(mmio);
+}
+
+static void skx_db_iowrite(u64 bits, void __iomem *mmio)
+{
+	iowrite64(bits, mmio);
+}
+
+static int skx_init_isr(struct intel_ntb_dev *ndev)
+{
+	int i;
+
+	/*
+	 * The MSIX vectors and the interrupt status bits are not lined up
+	 * on Skylake. By default the link status bit is bit 32, however it
+	 * is by default MSIX vector0. We need to fixup to line them up.
+	 * The vectors at reset is 1-32,0. We need to reprogram to 0-32.
+	 */
+
+	for (i = 0; i < SKX_DB_MSIX_VECTOR_COUNT; i++)
+		iowrite8(i, ndev->self_mmio + SKX_INTVEC_OFFSET + i);
+
+	/* move link status down one as workaround */
+	if (ndev->hwerr_flags & NTB_HWERR_MSIX_VECTOR32_BAD) {
+		iowrite8(SKX_DB_MSIX_VECTOR_COUNT - 2,
+			 ndev->self_mmio + SKX_INTVEC_OFFSET +
+			 (SKX_DB_MSIX_VECTOR_COUNT - 1));
+	}
+
+	return ndev_init_isr(ndev, SKX_DB_MSIX_VECTOR_COUNT,
+			     SKX_DB_MSIX_VECTOR_COUNT,
+			     SKX_DB_MSIX_VECTOR_SHIFT,
+			     SKX_DB_TOTAL_SHIFT);
+}
+
+static int skx_setup_b2b_mw(struct intel_ntb_dev *ndev,
+			    const struct intel_b2b_addr *addr,
+			    const struct intel_b2b_addr *peer_addr)
+{
+	struct pci_dev *pdev;
+	void __iomem *mmio;
+	resource_size_t bar_size;
+	phys_addr_t bar_addr;
+	int b2b_bar;
+	u8 bar_sz;
+
+	pdev = ndev->ntb.pdev;
+	mmio = ndev->self_mmio;
+
+	if (ndev->b2b_idx == UINT_MAX) {
+		dev_dbg(&pdev->dev, "not using b2b mw\n");
+		b2b_bar = 0;
+		ndev->b2b_off = 0;
+	} else {
+		b2b_bar = ndev_mw_to_bar(ndev, ndev->b2b_idx);
+		if (b2b_bar < 0)
+			return -EIO;
+
+		dev_dbg(&pdev->dev, "using b2b mw bar %d\n", b2b_bar);
+
+		bar_size = pci_resource_len(ndev->ntb.pdev, b2b_bar);
+
+		dev_dbg(&pdev->dev, "b2b bar size %#llx\n", bar_size);
+
+		if (b2b_mw_share && ((bar_size >> 1) >= XEON_B2B_MIN_SIZE)) {
+			dev_dbg(&pdev->dev, "b2b using first half of bar\n");
+			ndev->b2b_off = bar_size >> 1;
+		} else if (bar_size >= XEON_B2B_MIN_SIZE) {
+			dev_dbg(&pdev->dev, "b2b using whole bar\n");
+			ndev->b2b_off = 0;
+			--ndev->mw_count;
+		} else {
+			dev_dbg(&pdev->dev, "b2b bar size is too small\n");
+			return -EIO;
+		}
+	}
+
+	/*
+	 * Reset the secondary bar sizes to match the primary bar sizes,
+	 * except disable or halve the size of the b2b secondary bar.
+	 */
+	pci_read_config_byte(pdev, SKX_IMBAR1SZ_OFFSET, &bar_sz);
+	dev_dbg(&pdev->dev, "IMBAR1SZ %#x\n", bar_sz);
+	if (b2b_bar == 1) {
+		if (ndev->b2b_off)
+			bar_sz -= 1;
+		else
+			bar_sz = 0;
+	}
+
+	pci_write_config_byte(pdev, SKX_EMBAR1SZ_OFFSET, bar_sz);
+	pci_read_config_byte(pdev, SKX_EMBAR1SZ_OFFSET, &bar_sz);
+	dev_dbg(&pdev->dev, "EMBAR1SZ %#x\n", bar_sz);
+
+	pci_read_config_byte(pdev, SKX_IMBAR2SZ_OFFSET, &bar_sz);
+	dev_dbg(&pdev->dev, "IMBAR2SZ %#x\n", bar_sz);
+	if (b2b_bar == 2) {
+		if (ndev->b2b_off)
+			bar_sz -= 1;
+		else
+			bar_sz = 0;
+	}
+
+	pci_write_config_byte(pdev, SKX_EMBAR2SZ_OFFSET, bar_sz);
+	pci_read_config_byte(pdev, SKX_EMBAR2SZ_OFFSET, &bar_sz);
+	dev_dbg(&pdev->dev, "EMBAR2SZ %#x\n", bar_sz);
+
+	/* SBAR01 hit by first part of the b2b bar */
+	if (b2b_bar == 0)
+		bar_addr = addr->bar0_addr;
+	else if (b2b_bar == 1)
+		bar_addr = addr->bar2_addr64;
+	else if (b2b_bar == 2)
+		bar_addr = addr->bar4_addr64;
+	else
+		return -EIO;
+
+	/* setup incoming bar limits == base addrs (zero length windows) */
+	bar_addr = addr->bar2_addr64 + (b2b_bar == 1 ? ndev->b2b_off : 0);
+	iowrite64(bar_addr, mmio + SKX_IMBAR1XLMT_OFFSET);
+	bar_addr = ioread64(mmio + SKX_IMBAR1XLMT_OFFSET);
+	dev_dbg(&pdev->dev, "IMBAR1XLMT %#018llx\n", bar_addr);
+
+	bar_addr = addr->bar4_addr64 + (b2b_bar == 2 ? ndev->b2b_off : 0);
+	iowrite64(bar_addr, mmio + SKX_IMBAR2XLMT_OFFSET);
+	bar_addr = ioread64(mmio + SKX_IMBAR2XLMT_OFFSET);
+	dev_dbg(&pdev->dev, "IMBAR2XLMT %#018llx\n", bar_addr);
+
+	/* zero incoming translation addrs */
+	iowrite64(0, mmio + SKX_IMBAR1XBASE_OFFSET);
+	iowrite64(0, mmio + SKX_IMBAR2XBASE_OFFSET);
+
+	ndev->peer_mmio = ndev->self_mmio;
+
+	return 0;
+}
+
+static int skx_init_ntb(struct intel_ntb_dev *ndev)
+{
+	int rc;
+
+
+	ndev->mw_count = XEON_MW_COUNT;
+	ndev->spad_count = SKX_SPAD_COUNT;
+	ndev->db_count = SKX_DB_COUNT;
+	ndev->db_link_mask = SKX_DB_LINK_BIT;
+
+	/* DB fixup for using 31 right now */
+	if (ndev->hwerr_flags & NTB_HWERR_MSIX_VECTOR32_BAD)
+		ndev->db_link_mask |= BIT_ULL(31);
+
+	switch (ndev->ntb.topo) {
+	case NTB_TOPO_B2B_USD:
+	case NTB_TOPO_B2B_DSD:
+		ndev->self_reg = &skx_pri_reg;
+		ndev->peer_reg = &skx_b2b_reg;
+		ndev->xlat_reg = &skx_sec_xlat;
+
+		if (ndev->ntb.topo == NTB_TOPO_B2B_USD) {
+			rc = skx_setup_b2b_mw(ndev,
+					      &xeon_b2b_dsd_addr,
+					      &xeon_b2b_usd_addr);
+		} else {
+			rc = skx_setup_b2b_mw(ndev,
+					      &xeon_b2b_usd_addr,
+					      &xeon_b2b_dsd_addr);
+		}
+
+		if (rc)
+			return rc;
+
+		/* Enable Bus Master and Memory Space on the secondary side */
+		iowrite16(PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER,
+			  ndev->self_mmio + SKX_SPCICMD_OFFSET);
+
+		break;
+
+	default:
+		return -EINVAL;
+	}
+
+	ndev->db_valid_mask = BIT_ULL(ndev->db_count) - 1;
+
+	ndev->reg->db_iowrite(ndev->db_valid_mask,
+			      ndev->self_mmio +
+			      ndev->self_reg->db_mask);
+
+	return 0;
+}
+
+static int skx_init_dev(struct intel_ntb_dev *ndev)
+{
+	struct pci_dev *pdev;
+	u8 ppd;
+	int rc;
+
+	pdev = ndev->ntb.pdev;
+
+	ndev->reg = &skx_reg;
+
+	rc = pci_read_config_byte(pdev, XEON_PPD_OFFSET, &ppd);
+	if (rc)
+		return -EIO;
+
+	ndev->ntb.topo = xeon_ppd_topo(ndev, ppd);
+	dev_dbg(&pdev->dev, "ppd %#x topo %s\n", ppd,
+		ntb_topo_string(ndev->ntb.topo));
+	if (ndev->ntb.topo == NTB_TOPO_NONE)
+		return -EINVAL;
+
+	if (pdev_is_skx_xeon(pdev))
+		ndev->hwerr_flags |= NTB_HWERR_MSIX_VECTOR32_BAD;
+
+	rc = skx_init_ntb(ndev);
+	if (rc)
+		return rc;
+
+	return skx_init_isr(ndev);
+}
+
+static int intel_ntb3_link_enable(struct ntb_dev *ntb,
+				  enum ntb_speed max_speed,
+				  enum ntb_width max_width)
+{
+	struct intel_ntb_dev *ndev;
+	u32 ntb_ctl;
+
+	ndev = container_of(ntb, struct intel_ntb_dev, ntb);
+
+	dev_dbg(&ntb->pdev->dev,
+		"Enabling link with max_speed %d max_width %d\n",
+		max_speed, max_width);
+
+	if (max_speed != NTB_SPEED_AUTO)
+		dev_dbg(&ntb->pdev->dev, "ignoring max_speed %d\n", max_speed);
+	if (max_width != NTB_WIDTH_AUTO)
+		dev_dbg(&ntb->pdev->dev, "ignoring max_width %d\n", max_width);
+
+	ntb_ctl = ioread32(ndev->self_mmio + ndev->reg->ntb_ctl);
+	ntb_ctl &= ~(NTB_CTL_DISABLE | NTB_CTL_CFG_LOCK);
+	ntb_ctl |= NTB_CTL_P2S_BAR2_SNOOP | NTB_CTL_S2P_BAR2_SNOOP;
+	ntb_ctl |= NTB_CTL_P2S_BAR4_SNOOP | NTB_CTL_S2P_BAR4_SNOOP;
+	iowrite32(ntb_ctl, ndev->self_mmio + ndev->reg->ntb_ctl);
+
+	return 0;
+}
+static int intel_ntb3_mw_set_trans(struct ntb_dev *ntb, int pidx, int idx,
+				   dma_addr_t addr, resource_size_t size)
+{
+	struct intel_ntb_dev *ndev = ntb_ndev(ntb);
+	unsigned long xlat_reg, limit_reg;
+	resource_size_t bar_size, mw_size;
+	void __iomem *mmio;
+	u64 base, limit, reg_val;
+	int bar;
+
+	if (pidx != NTB_DEF_PEER_IDX)
+		return -EINVAL;
+
+	if (idx >= ndev->b2b_idx && !ndev->b2b_off)
+		idx += 1;
+
+	bar = ndev_mw_to_bar(ndev, idx);
+	if (bar < 0)
+		return bar;
+
+	bar_size = pci_resource_len(ndev->ntb.pdev, bar);
+
+	if (idx == ndev->b2b_idx)
+		mw_size = bar_size - ndev->b2b_off;
+	else
+		mw_size = bar_size;
+
+	/* hardware requires that addr is aligned to bar size */
+	if (addr & (bar_size - 1))
+		return -EINVAL;
+
+	/* make sure the range fits in the usable mw size */
+	if (size > mw_size)
+		return -EINVAL;
+
+	mmio = ndev->self_mmio;
+	xlat_reg = ndev->xlat_reg->bar2_xlat + (idx * 0x10);
+	limit_reg = ndev->xlat_reg->bar2_limit + (idx * 0x10);
+	base = pci_resource_start(ndev->ntb.pdev, bar);
+
+	/* Set the limit if supported, if size is not mw_size */
+	if (limit_reg && size != mw_size)
+		limit = base + size;
+	else
+		limit = base + mw_size;
+
+	/* set and verify setting the translation address */
+	iowrite64(addr, mmio + xlat_reg);
+	reg_val = ioread64(mmio + xlat_reg);
+	if (reg_val != addr) {
+		iowrite64(0, mmio + xlat_reg);
+		return -EIO;
+	}
+
+	dev_dbg(&ntb->pdev->dev, "BAR %d IMBARXBASE: %#Lx\n", bar, reg_val);
+
+	/* set and verify setting the limit */
+	iowrite64(limit, mmio + limit_reg);
+	reg_val = ioread64(mmio + limit_reg);
+	if (reg_val != limit) {
+		iowrite64(base, mmio + limit_reg);
+		iowrite64(0, mmio + xlat_reg);
+		return -EIO;
+	}
+
+	dev_dbg(&ntb->pdev->dev, "BAR %d IMBARXLMT: %#Lx\n", bar, reg_val);
+
+	/* setup the EP */
+	limit_reg = ndev->xlat_reg->bar2_limit + (idx * 0x10) + 0x4000;
+	base = ioread64(mmio + SKX_EMBAR1_OFFSET + (8 * idx));
+	base &= ~0xf;
+
+	if (limit_reg && size != mw_size)
+		limit = base + size;
+	else
+		limit = base + mw_size;
+
+	/* set and verify setting the limit */
+	iowrite64(limit, mmio + limit_reg);
+	reg_val = ioread64(mmio + limit_reg);
+	if (reg_val != limit) {
+		iowrite64(base, mmio + limit_reg);
+		iowrite64(0, mmio + xlat_reg);
+		return -EIO;
+	}
+
+	dev_dbg(&ntb->pdev->dev, "BAR %d EMBARXLMT: %#Lx\n", bar, reg_val);
+
+	return 0;
+}
+
+static int intel_ntb3_peer_db_set(struct ntb_dev *ntb, u64 db_bits)
+{
+	struct intel_ntb_dev *ndev = ntb_ndev(ntb);
+	int bit;
+
+	if (db_bits & ~ndev->db_valid_mask)
+		return -EINVAL;
+
+	while (db_bits) {
+		bit = __ffs(db_bits);
+		iowrite32(1, ndev->peer_mmio +
+				ndev->peer_reg->db_bell + (bit * 4));
+		db_bits &= db_bits - 1;
+	}
+
+	return 0;
+}
+
+static u64 intel_ntb3_db_read(struct ntb_dev *ntb)
+{
+	struct intel_ntb_dev *ndev = ntb_ndev(ntb);
+
+	return ndev_db_read(ndev,
+			    ndev->self_mmio +
+			    ndev->self_reg->db_clear);
+}
+
+static int intel_ntb3_db_clear(struct ntb_dev *ntb, u64 db_bits)
+{
+	struct intel_ntb_dev *ndev = ntb_ndev(ntb);
+
+	return ndev_db_write(ndev, db_bits,
+			     ndev->self_mmio +
+			     ndev->self_reg->db_clear);
+}
+
+/* XEON */
+
+static u64 xeon_db_ioread(void __iomem *mmio)
+{
+	return (u64)ioread16(mmio);
+}
+
+static void xeon_db_iowrite(u64 bits, void __iomem *mmio)
+{
+	iowrite16((u16)bits, mmio);
+}
+
+static int xeon_poll_link(struct intel_ntb_dev *ndev)
+{
+	u16 reg_val;
+	int rc;
+
+	ndev->reg->db_iowrite(ndev->db_link_mask,
+			      ndev->self_mmio +
+			      ndev->self_reg->db_bell);
+
+	rc = pci_read_config_word(ndev->ntb.pdev,
+				  XEON_LINK_STATUS_OFFSET, &reg_val);
+	if (rc)
+		return 0;
+
+	if (reg_val == ndev->lnk_sta)
+		return 0;
+
+	ndev->lnk_sta = reg_val;
+
+	return 1;
+}
+
+static int xeon_link_is_up(struct intel_ntb_dev *ndev)
+{
+	if (ndev->ntb.topo == NTB_TOPO_SEC)
+		return 1;
+
+	return NTB_LNK_STA_ACTIVE(ndev->lnk_sta);
+}
+
+static inline enum ntb_topo xeon_ppd_topo(struct intel_ntb_dev *ndev, u8 ppd)
+{
+	switch (ppd & XEON_PPD_TOPO_MASK) {
+	case XEON_PPD_TOPO_B2B_USD:
+		return NTB_TOPO_B2B_USD;
+
+	case XEON_PPD_TOPO_B2B_DSD:
+		return NTB_TOPO_B2B_DSD;
+
+	case XEON_PPD_TOPO_PRI_USD:
+	case XEON_PPD_TOPO_PRI_DSD: /* accept bogus PRI_DSD */
+		return NTB_TOPO_PRI;
+
+	case XEON_PPD_TOPO_SEC_USD:
+	case XEON_PPD_TOPO_SEC_DSD: /* accept bogus SEC_DSD */
+		return NTB_TOPO_SEC;
+	}
+
+	return NTB_TOPO_NONE;
+}
+
+static inline int xeon_ppd_bar4_split(struct intel_ntb_dev *ndev, u8 ppd)
+{
+	if (ppd & XEON_PPD_SPLIT_BAR_MASK) {
+		dev_dbg(&ndev->ntb.pdev->dev, "PPD %d split bar\n", ppd);
+		return 1;
+	}
+	return 0;
+}
+
+static int xeon_init_isr(struct intel_ntb_dev *ndev)
+{
+	return ndev_init_isr(ndev, XEON_DB_MSIX_VECTOR_COUNT,
+			     XEON_DB_MSIX_VECTOR_COUNT,
+			     XEON_DB_MSIX_VECTOR_SHIFT,
+			     XEON_DB_TOTAL_SHIFT);
+}
+
+static void xeon_deinit_isr(struct intel_ntb_dev *ndev)
+{
+	ndev_deinit_isr(ndev);
+}
+
+static int xeon_setup_b2b_mw(struct intel_ntb_dev *ndev,
+			     const struct intel_b2b_addr *addr,
+			     const struct intel_b2b_addr *peer_addr)
+{
+	struct pci_dev *pdev;
+	void __iomem *mmio;
+	resource_size_t bar_size;
+	phys_addr_t bar_addr;
+	int b2b_bar;
+	u8 bar_sz;
+
+	pdev = ndev->ntb.pdev;
+	mmio = ndev->self_mmio;
+
+	if (ndev->b2b_idx == UINT_MAX) {
+		dev_dbg(&pdev->dev, "not using b2b mw\n");
+		b2b_bar = 0;
+		ndev->b2b_off = 0;
+	} else {
+		b2b_bar = ndev_mw_to_bar(ndev, ndev->b2b_idx);
+		if (b2b_bar < 0)
+			return -EIO;
+
+		dev_dbg(&pdev->dev, "using b2b mw bar %d\n", b2b_bar);
+
+		bar_size = pci_resource_len(ndev->ntb.pdev, b2b_bar);
+
+		dev_dbg(&pdev->dev, "b2b bar size %#llx\n", bar_size);
+
+		if (b2b_mw_share && XEON_B2B_MIN_SIZE <= bar_size >> 1) {
+			dev_dbg(&pdev->dev, "b2b using first half of bar\n");
+			ndev->b2b_off = bar_size >> 1;
+		} else if (XEON_B2B_MIN_SIZE <= bar_size) {
+			dev_dbg(&pdev->dev, "b2b using whole bar\n");
+			ndev->b2b_off = 0;
+			--ndev->mw_count;
+		} else {
+			dev_dbg(&pdev->dev, "b2b bar size is too small\n");
+			return -EIO;
+		}
+	}
+
+	/* Reset the secondary bar sizes to match the primary bar sizes,
+	 * except disable or halve the size of the b2b secondary bar.
+	 *
+	 * Note: code for each specific bar size register, because the register
+	 * offsets are not in a consistent order (bar5sz comes after ppd, odd).
+	 */
+	pci_read_config_byte(pdev, XEON_PBAR23SZ_OFFSET, &bar_sz);
+	dev_dbg(&pdev->dev, "PBAR23SZ %#x\n", bar_sz);
+	if (b2b_bar == 2) {
+		if (ndev->b2b_off)
+			bar_sz -= 1;
+		else
+			bar_sz = 0;
+	}
+	pci_write_config_byte(pdev, XEON_SBAR23SZ_OFFSET, bar_sz);
+	pci_read_config_byte(pdev, XEON_SBAR23SZ_OFFSET, &bar_sz);
+	dev_dbg(&pdev->dev, "SBAR23SZ %#x\n", bar_sz);
+
+	if (!ndev->bar4_split) {
+		pci_read_config_byte(pdev, XEON_PBAR45SZ_OFFSET, &bar_sz);
+		dev_dbg(&pdev->dev, "PBAR45SZ %#x\n", bar_sz);
+		if (b2b_bar == 4) {
+			if (ndev->b2b_off)
+				bar_sz -= 1;
+			else
+				bar_sz = 0;
+		}
+		pci_write_config_byte(pdev, XEON_SBAR45SZ_OFFSET, bar_sz);
+		pci_read_config_byte(pdev, XEON_SBAR45SZ_OFFSET, &bar_sz);
+		dev_dbg(&pdev->dev, "SBAR45SZ %#x\n", bar_sz);
+	} else {
+		pci_read_config_byte(pdev, XEON_PBAR4SZ_OFFSET, &bar_sz);
+		dev_dbg(&pdev->dev, "PBAR4SZ %#x\n", bar_sz);
+		if (b2b_bar == 4) {
+			if (ndev->b2b_off)
+				bar_sz -= 1;
+			else
+				bar_sz = 0;
+		}
+		pci_write_config_byte(pdev, XEON_SBAR4SZ_OFFSET, bar_sz);
+		pci_read_config_byte(pdev, XEON_SBAR4SZ_OFFSET, &bar_sz);
+		dev_dbg(&pdev->dev, "SBAR4SZ %#x\n", bar_sz);
+
+		pci_read_config_byte(pdev, XEON_PBAR5SZ_OFFSET, &bar_sz);
+		dev_dbg(&pdev->dev, "PBAR5SZ %#x\n", bar_sz);
+		if (b2b_bar == 5) {
+			if (ndev->b2b_off)
+				bar_sz -= 1;
+			else
+				bar_sz = 0;
+		}
+		pci_write_config_byte(pdev, XEON_SBAR5SZ_OFFSET, bar_sz);
+		pci_read_config_byte(pdev, XEON_SBAR5SZ_OFFSET, &bar_sz);
+		dev_dbg(&pdev->dev, "SBAR5SZ %#x\n", bar_sz);
+	}
+
+	/* SBAR01 hit by first part of the b2b bar */
+	if (b2b_bar == 0)
+		bar_addr = addr->bar0_addr;
+	else if (b2b_bar == 2)
+		bar_addr = addr->bar2_addr64;
+	else if (b2b_bar == 4 && !ndev->bar4_split)
+		bar_addr = addr->bar4_addr64;
+	else if (b2b_bar == 4)
+		bar_addr = addr->bar4_addr32;
+	else if (b2b_bar == 5)
+		bar_addr = addr->bar5_addr32;
+	else
+		return -EIO;
+
+	dev_dbg(&pdev->dev, "SBAR01 %#018llx\n", bar_addr);
+	iowrite64(bar_addr, mmio + XEON_SBAR0BASE_OFFSET);
+
+	/* Other SBAR are normally hit by the PBAR xlat, except for b2b bar.
+	 * The b2b bar is either disabled above, or configured half-size, and
+	 * it starts at the PBAR xlat + offset.
+	 */
+
+	bar_addr = addr->bar2_addr64 + (b2b_bar == 2 ? ndev->b2b_off : 0);
+	iowrite64(bar_addr, mmio + XEON_SBAR23BASE_OFFSET);
+	bar_addr = ioread64(mmio + XEON_SBAR23BASE_OFFSET);
+	dev_dbg(&pdev->dev, "SBAR23 %#018llx\n", bar_addr);
+
+	if (!ndev->bar4_split) {
+		bar_addr = addr->bar4_addr64 +
+			(b2b_bar == 4 ? ndev->b2b_off : 0);
+		iowrite64(bar_addr, mmio + XEON_SBAR45BASE_OFFSET);
+		bar_addr = ioread64(mmio + XEON_SBAR45BASE_OFFSET);
+		dev_dbg(&pdev->dev, "SBAR45 %#018llx\n", bar_addr);
+	} else {
+		bar_addr = addr->bar4_addr32 +
+			(b2b_bar == 4 ? ndev->b2b_off : 0);
+		iowrite32(bar_addr, mmio + XEON_SBAR4BASE_OFFSET);
+		bar_addr = ioread32(mmio + XEON_SBAR4BASE_OFFSET);
+		dev_dbg(&pdev->dev, "SBAR4 %#010llx\n", bar_addr);
+
+		bar_addr = addr->bar5_addr32 +
+			(b2b_bar == 5 ? ndev->b2b_off : 0);
+		iowrite32(bar_addr, mmio + XEON_SBAR5BASE_OFFSET);
+		bar_addr = ioread32(mmio + XEON_SBAR5BASE_OFFSET);
+		dev_dbg(&pdev->dev, "SBAR5 %#010llx\n", bar_addr);
+	}
+
+	/* setup incoming bar limits == base addrs (zero length windows) */
+
+	bar_addr = addr->bar2_addr64 + (b2b_bar == 2 ? ndev->b2b_off : 0);
+	iowrite64(bar_addr, mmio + XEON_SBAR23LMT_OFFSET);
+	bar_addr = ioread64(mmio + XEON_SBAR23LMT_OFFSET);
+	dev_dbg(&pdev->dev, "SBAR23LMT %#018llx\n", bar_addr);
+
+	if (!ndev->bar4_split) {
+		bar_addr = addr->bar4_addr64 +
+			(b2b_bar == 4 ? ndev->b2b_off : 0);
+		iowrite64(bar_addr, mmio + XEON_SBAR45LMT_OFFSET);
+		bar_addr = ioread64(mmio + XEON_SBAR45LMT_OFFSET);
+		dev_dbg(&pdev->dev, "SBAR45LMT %#018llx\n", bar_addr);
+	} else {
+		bar_addr = addr->bar4_addr32 +
+			(b2b_bar == 4 ? ndev->b2b_off : 0);
+		iowrite32(bar_addr, mmio + XEON_SBAR4LMT_OFFSET);
+		bar_addr = ioread32(mmio + XEON_SBAR4LMT_OFFSET);
+		dev_dbg(&pdev->dev, "SBAR4LMT %#010llx\n", bar_addr);
+
+		bar_addr = addr->bar5_addr32 +
+			(b2b_bar == 5 ? ndev->b2b_off : 0);
+		iowrite32(bar_addr, mmio + XEON_SBAR5LMT_OFFSET);
+		bar_addr = ioread32(mmio + XEON_SBAR5LMT_OFFSET);
+		dev_dbg(&pdev->dev, "SBAR5LMT %#05llx\n", bar_addr);
+	}
+
+	/* zero incoming translation addrs */
+	iowrite64(0, mmio + XEON_SBAR23XLAT_OFFSET);
+
+	if (!ndev->bar4_split) {
+		iowrite64(0, mmio + XEON_SBAR45XLAT_OFFSET);
+	} else {
+		iowrite32(0, mmio + XEON_SBAR4XLAT_OFFSET);
+		iowrite32(0, mmio + XEON_SBAR5XLAT_OFFSET);
+	}
+
+	/* zero outgoing translation limits (whole bar size windows) */
+	iowrite64(0, mmio + XEON_PBAR23LMT_OFFSET);
+	if (!ndev->bar4_split) {
+		iowrite64(0, mmio + XEON_PBAR45LMT_OFFSET);
+	} else {
+		iowrite32(0, mmio + XEON_PBAR4LMT_OFFSET);
+		iowrite32(0, mmio + XEON_PBAR5LMT_OFFSET);
+	}
+
+	/* set outgoing translation offsets */
+	bar_addr = peer_addr->bar2_addr64;
+	iowrite64(bar_addr, mmio + XEON_PBAR23XLAT_OFFSET);
+	bar_addr = ioread64(mmio + XEON_PBAR23XLAT_OFFSET);
+	dev_dbg(&pdev->dev, "PBAR23XLAT %#018llx\n", bar_addr);
+
+	if (!ndev->bar4_split) {
+		bar_addr = peer_addr->bar4_addr64;
+		iowrite64(bar_addr, mmio + XEON_PBAR45XLAT_OFFSET);
+		bar_addr = ioread64(mmio + XEON_PBAR45XLAT_OFFSET);
+		dev_dbg(&pdev->dev, "PBAR45XLAT %#018llx\n", bar_addr);
+	} else {
+		bar_addr = peer_addr->bar4_addr32;
+		iowrite32(bar_addr, mmio + XEON_PBAR4XLAT_OFFSET);
+		bar_addr = ioread32(mmio + XEON_PBAR4XLAT_OFFSET);
+		dev_dbg(&pdev->dev, "PBAR4XLAT %#010llx\n", bar_addr);
+
+		bar_addr = peer_addr->bar5_addr32;
+		iowrite32(bar_addr, mmio + XEON_PBAR5XLAT_OFFSET);
+		bar_addr = ioread32(mmio + XEON_PBAR5XLAT_OFFSET);
+		dev_dbg(&pdev->dev, "PBAR5XLAT %#010llx\n", bar_addr);
+	}
+
+	/* set the translation offset for b2b registers */
+	if (b2b_bar == 0)
+		bar_addr = peer_addr->bar0_addr;
+	else if (b2b_bar == 2)
+		bar_addr = peer_addr->bar2_addr64;
+	else if (b2b_bar == 4 && !ndev->bar4_split)
+		bar_addr = peer_addr->bar4_addr64;
+	else if (b2b_bar == 4)
+		bar_addr = peer_addr->bar4_addr32;
+	else if (b2b_bar == 5)
+		bar_addr = peer_addr->bar5_addr32;
+	else
+		return -EIO;
+
+	/* B2B_XLAT_OFFSET is 64bit, but can only take 32bit writes */
+	dev_dbg(&pdev->dev, "B2BXLAT %#018llx\n", bar_addr);
+	iowrite32(bar_addr, mmio + XEON_B2B_XLAT_OFFSETL);
+	iowrite32(bar_addr >> 32, mmio + XEON_B2B_XLAT_OFFSETU);
+
+	if (b2b_bar) {
+		/* map peer ntb mmio config space registers */
+		ndev->peer_mmio = pci_iomap(pdev, b2b_bar,
+					    XEON_B2B_MIN_SIZE);
+		if (!ndev->peer_mmio)
+			return -EIO;
+
+		ndev->peer_addr = pci_resource_start(pdev, b2b_bar);
+	}
+
+	return 0;
+}
+
+static int xeon_init_ntb(struct intel_ntb_dev *ndev)
+{
+	struct device *dev = &ndev->ntb.pdev->dev;
+	int rc;
+	u32 ntb_ctl;
+
+	if (ndev->bar4_split)
+		ndev->mw_count = HSX_SPLIT_BAR_MW_COUNT;
+	else
+		ndev->mw_count = XEON_MW_COUNT;
+
+	ndev->spad_count = XEON_SPAD_COUNT;
+	ndev->db_count = XEON_DB_COUNT;
+	ndev->db_link_mask = XEON_DB_LINK_BIT;
+
+	switch (ndev->ntb.topo) {
+	case NTB_TOPO_PRI:
+		if (ndev->hwerr_flags & NTB_HWERR_SDOORBELL_LOCKUP) {
+			dev_err(dev, "NTB Primary config disabled\n");
+			return -EINVAL;
+		}
+
+		/* enable link to allow secondary side device to appear */
+		ntb_ctl = ioread32(ndev->self_mmio + ndev->reg->ntb_ctl);
+		ntb_ctl &= ~NTB_CTL_DISABLE;
+		iowrite32(ntb_ctl, ndev->self_mmio + ndev->reg->ntb_ctl);
+
+		/* use half the spads for the peer */
+		ndev->spad_count >>= 1;
+		ndev->self_reg = &xeon_pri_reg;
+		ndev->peer_reg = &xeon_sec_reg;
+		ndev->xlat_reg = &xeon_sec_xlat;
+		break;
+
+	case NTB_TOPO_SEC:
+		if (ndev->hwerr_flags & NTB_HWERR_SDOORBELL_LOCKUP) {
+			dev_err(dev, "NTB Secondary config disabled\n");
+			return -EINVAL;
+		}
+		/* use half the spads for the peer */
+		ndev->spad_count >>= 1;
+		ndev->self_reg = &xeon_sec_reg;
+		ndev->peer_reg = &xeon_pri_reg;
+		ndev->xlat_reg = &xeon_pri_xlat;
+		break;
+
+	case NTB_TOPO_B2B_USD:
+	case NTB_TOPO_B2B_DSD:
+		ndev->self_reg = &xeon_pri_reg;
+		ndev->peer_reg = &xeon_b2b_reg;
+		ndev->xlat_reg = &xeon_sec_xlat;
+
+		if (ndev->hwerr_flags & NTB_HWERR_SDOORBELL_LOCKUP) {
+			ndev->peer_reg = &xeon_pri_reg;
+
+			if (b2b_mw_idx < 0)
+				ndev->b2b_idx = b2b_mw_idx + ndev->mw_count;
+			else
+				ndev->b2b_idx = b2b_mw_idx;
+
+			if (ndev->b2b_idx >= ndev->mw_count) {
+				dev_dbg(dev,
+					"b2b_mw_idx %d invalid for mw_count %u\n",
+					b2b_mw_idx, ndev->mw_count);
+				return -EINVAL;
+			}
+
+			dev_dbg(dev, "setting up b2b mw idx %d means %d\n",
+				b2b_mw_idx, ndev->b2b_idx);
+
+		} else if (ndev->hwerr_flags & NTB_HWERR_B2BDOORBELL_BIT14) {
+			dev_warn(dev, "Reduce doorbell count by 1\n");
+			ndev->db_count -= 1;
+		}
+
+		if (ndev->ntb.topo == NTB_TOPO_B2B_USD) {
+			rc = xeon_setup_b2b_mw(ndev,
+					       &xeon_b2b_dsd_addr,
+					       &xeon_b2b_usd_addr);
+		} else {
+			rc = xeon_setup_b2b_mw(ndev,
+					       &xeon_b2b_usd_addr,
+					       &xeon_b2b_dsd_addr);
+		}
+		if (rc)
+			return rc;
+
+		/* Enable Bus Master and Memory Space on the secondary side */
+		iowrite16(PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER,
+			  ndev->self_mmio + XEON_SPCICMD_OFFSET);
+
+		break;
+
+	default:
+		return -EINVAL;
+	}
+
+	ndev->db_valid_mask = BIT_ULL(ndev->db_count) - 1;
+
+	ndev->reg->db_iowrite(ndev->db_valid_mask,
+			      ndev->self_mmio +
+			      ndev->self_reg->db_mask);
+
+	return 0;
+}
+
+static int xeon_init_dev(struct intel_ntb_dev *ndev)
+{
+	struct pci_dev *pdev;
+	u8 ppd;
+	int rc, mem;
+
+	pdev = ndev->ntb.pdev;
+
+	switch (pdev->device) {
+	/* There is a Xeon hardware errata related to writes to SDOORBELL or
+	 * B2BDOORBELL in conjunction with inbound access to NTB MMIO Space,
+	 * which may hang the system.  To workaround this use the second memory
+	 * window to access the interrupt and scratch pad registers on the
+	 * remote system.
+	 */
+	case PCI_DEVICE_ID_INTEL_NTB_SS_JSF:
+	case PCI_DEVICE_ID_INTEL_NTB_PS_JSF:
+	case PCI_DEVICE_ID_INTEL_NTB_B2B_JSF:
+	case PCI_DEVICE_ID_INTEL_NTB_SS_SNB:
+	case PCI_DEVICE_ID_INTEL_NTB_PS_SNB:
+	case PCI_DEVICE_ID_INTEL_NTB_B2B_SNB:
+	case PCI_DEVICE_ID_INTEL_NTB_SS_IVT:
+	case PCI_DEVICE_ID_INTEL_NTB_PS_IVT:
+	case PCI_DEVICE_ID_INTEL_NTB_B2B_IVT:
+	case PCI_DEVICE_ID_INTEL_NTB_SS_HSX:
+	case PCI_DEVICE_ID_INTEL_NTB_PS_HSX:
+	case PCI_DEVICE_ID_INTEL_NTB_B2B_HSX:
+	case PCI_DEVICE_ID_INTEL_NTB_SS_BDX:
+	case PCI_DEVICE_ID_INTEL_NTB_PS_BDX:
+	case PCI_DEVICE_ID_INTEL_NTB_B2B_BDX:
+		ndev->hwerr_flags |= NTB_HWERR_SDOORBELL_LOCKUP;
+		break;
+	}
+
+	switch (pdev->device) {
+	/* There is a hardware errata related to accessing any register in
+	 * SB01BASE in the presence of bidirectional traffic crossing the NTB.
+	 */
+	case PCI_DEVICE_ID_INTEL_NTB_SS_IVT:
+	case PCI_DEVICE_ID_INTEL_NTB_PS_IVT:
+	case PCI_DEVICE_ID_INTEL_NTB_B2B_IVT:
+	case PCI_DEVICE_ID_INTEL_NTB_SS_HSX:
+	case PCI_DEVICE_ID_INTEL_NTB_PS_HSX:
+	case PCI_DEVICE_ID_INTEL_NTB_B2B_HSX:
+	case PCI_DEVICE_ID_INTEL_NTB_SS_BDX:
+	case PCI_DEVICE_ID_INTEL_NTB_PS_BDX:
+	case PCI_DEVICE_ID_INTEL_NTB_B2B_BDX:
+		ndev->hwerr_flags |= NTB_HWERR_SB01BASE_LOCKUP;
+		break;
+	}
+
+	switch (pdev->device) {
+	/* HW Errata on bit 14 of b2bdoorbell register.  Writes will not be
+	 * mirrored to the remote system.  Shrink the number of bits by one,
+	 * since bit 14 is the last bit.
+	 */
+	case PCI_DEVICE_ID_INTEL_NTB_SS_JSF:
+	case PCI_DEVICE_ID_INTEL_NTB_PS_JSF:
+	case PCI_DEVICE_ID_INTEL_NTB_B2B_JSF:
+	case PCI_DEVICE_ID_INTEL_NTB_SS_SNB:
+	case PCI_DEVICE_ID_INTEL_NTB_PS_SNB:
+	case PCI_DEVICE_ID_INTEL_NTB_B2B_SNB:
+	case PCI_DEVICE_ID_INTEL_NTB_SS_IVT:
+	case PCI_DEVICE_ID_INTEL_NTB_PS_IVT:
+	case PCI_DEVICE_ID_INTEL_NTB_B2B_IVT:
+	case PCI_DEVICE_ID_INTEL_NTB_SS_HSX:
+	case PCI_DEVICE_ID_INTEL_NTB_PS_HSX:
+	case PCI_DEVICE_ID_INTEL_NTB_B2B_HSX:
+	case PCI_DEVICE_ID_INTEL_NTB_SS_BDX:
+	case PCI_DEVICE_ID_INTEL_NTB_PS_BDX:
+	case PCI_DEVICE_ID_INTEL_NTB_B2B_BDX:
+		ndev->hwerr_flags |= NTB_HWERR_B2BDOORBELL_BIT14;
+		break;
+	}
+
+	ndev->reg = &xeon_reg;
+
+	rc = pci_read_config_byte(pdev, XEON_PPD_OFFSET, &ppd);
+	if (rc)
+		return -EIO;
+
+	ndev->ntb.topo = xeon_ppd_topo(ndev, ppd);
+	dev_dbg(&pdev->dev, "ppd %#x topo %s\n", ppd,
+		ntb_topo_string(ndev->ntb.topo));
+	if (ndev->ntb.topo == NTB_TOPO_NONE)
+		return -EINVAL;
+
+	if (ndev->ntb.topo != NTB_TOPO_SEC) {
+		ndev->bar4_split = xeon_ppd_bar4_split(ndev, ppd);
+		dev_dbg(&pdev->dev, "ppd %#x bar4_split %d\n",
+			ppd, ndev->bar4_split);
+	} else {
+		/* This is a way for transparent BAR to figure out if we are
+		 * doing split BAR or not. There is no way for the hw on the
+		 * transparent side to know and set the PPD.
+		 */
+		mem = pci_select_bars(pdev, IORESOURCE_MEM);
+		ndev->bar4_split = hweight32(mem) ==
+			HSX_SPLIT_BAR_MW_COUNT + 1;
+		dev_dbg(&pdev->dev, "mem %#x bar4_split %d\n",
+			mem, ndev->bar4_split);
+	}
+
+	rc = xeon_init_ntb(ndev);
+	if (rc)
+		return rc;
+
+	return xeon_init_isr(ndev);
+}
+
+static void xeon_deinit_dev(struct intel_ntb_dev *ndev)
+{
+	xeon_deinit_isr(ndev);
+}
+
+static int intel_ntb_init_pci(struct intel_ntb_dev *ndev, struct pci_dev *pdev)
+{
+	int rc;
+
+	pci_set_drvdata(pdev, ndev);
+
+	rc = pci_enable_device(pdev);
+	if (rc)
+		goto err_pci_enable;
+
+	rc = pci_request_regions(pdev, NTB_NAME);
+	if (rc)
+		goto err_pci_regions;
+
+	pci_set_master(pdev);
+
+	rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+	if (rc) {
+		rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+		if (rc)
+			goto err_dma_mask;
+		dev_warn(&pdev->dev, "Cannot DMA highmem\n");
+	}
+
+	rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+	if (rc) {
+		rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+		if (rc)
+			goto err_dma_mask;
+		dev_warn(&pdev->dev, "Cannot DMA consistent highmem\n");
+	}
+
+	ndev->self_mmio = pci_iomap(pdev, 0, 0);
+	if (!ndev->self_mmio) {
+		rc = -EIO;
+		goto err_mmio;
+	}
+	ndev->peer_mmio = ndev->self_mmio;
+	ndev->peer_addr = pci_resource_start(pdev, 0);
+
+	return 0;
+
+err_mmio:
+err_dma_mask:
+	pci_clear_master(pdev);
+	pci_release_regions(pdev);
+err_pci_regions:
+	pci_disable_device(pdev);
+err_pci_enable:
+	pci_set_drvdata(pdev, NULL);
+	return rc;
+}
+
+static void intel_ntb_deinit_pci(struct intel_ntb_dev *ndev)
+{
+	struct pci_dev *pdev = ndev->ntb.pdev;
+
+	if (ndev->peer_mmio && ndev->peer_mmio != ndev->self_mmio)
+		pci_iounmap(pdev, ndev->peer_mmio);
+	pci_iounmap(pdev, ndev->self_mmio);
+
+	pci_clear_master(pdev);
+	pci_release_regions(pdev);
+	pci_disable_device(pdev);
+	pci_set_drvdata(pdev, NULL);
+}
+
+static inline void ndev_init_struct(struct intel_ntb_dev *ndev,
+				    struct pci_dev *pdev)
+{
+	ndev->ntb.pdev = pdev;
+	ndev->ntb.topo = NTB_TOPO_NONE;
+	ndev->ntb.ops = &intel_ntb_ops;
+
+	ndev->b2b_off = 0;
+	ndev->b2b_idx = UINT_MAX;
+
+	ndev->bar4_split = 0;
+
+	ndev->mw_count = 0;
+	ndev->spad_count = 0;
+	ndev->db_count = 0;
+	ndev->db_vec_count = 0;
+	ndev->db_vec_shift = 0;
+
+	ndev->ntb_ctl = 0;
+	ndev->lnk_sta = 0;
+
+	ndev->db_valid_mask = 0;
+	ndev->db_link_mask = 0;
+	ndev->db_mask = 0;
+
+	spin_lock_init(&ndev->db_mask_lock);
+}
+
+static int intel_ntb_pci_probe(struct pci_dev *pdev,
+			       const struct pci_device_id *id)
+{
+	struct intel_ntb_dev *ndev;
+	int rc, node;
+
+	node = dev_to_node(&pdev->dev);
+
+	if (pdev_is_atom(pdev)) {
+		ndev = kzalloc_node(sizeof(*ndev), GFP_KERNEL, node);
+		if (!ndev) {
+			rc = -ENOMEM;
+			goto err_ndev;
+		}
+
+		ndev_init_struct(ndev, pdev);
+
+		rc = intel_ntb_init_pci(ndev, pdev);
+		if (rc)
+			goto err_init_pci;
+
+		rc = atom_init_dev(ndev);
+		if (rc)
+			goto err_init_dev;
+
+	} else if (pdev_is_xeon(pdev)) {
+		ndev = kzalloc_node(sizeof(*ndev), GFP_KERNEL, node);
+		if (!ndev) {
+			rc = -ENOMEM;
+			goto err_ndev;
+		}
+
+		ndev_init_struct(ndev, pdev);
+
+		rc = intel_ntb_init_pci(ndev, pdev);
+		if (rc)
+			goto err_init_pci;
+
+		rc = xeon_init_dev(ndev);
+		if (rc)
+			goto err_init_dev;
+
+	} else if (pdev_is_skx_xeon(pdev)) {
+		ndev = kzalloc_node(sizeof(*ndev), GFP_KERNEL, node);
+		if (!ndev) {
+			rc = -ENOMEM;
+			goto err_ndev;
+		}
+
+		ndev_init_struct(ndev, pdev);
+		ndev->ntb.ops = &intel_ntb3_ops;
+
+		rc = intel_ntb_init_pci(ndev, pdev);
+		if (rc)
+			goto err_init_pci;
+
+		rc = skx_init_dev(ndev);
+		if (rc)
+			goto err_init_dev;
+
+	} else {
+		rc = -EINVAL;
+		goto err_ndev;
+	}
+
+	ndev_reset_unsafe_flags(ndev);
+
+	ndev->reg->poll_link(ndev);
+
+	ndev_init_debugfs(ndev);
+
+	rc = ntb_register_device(&ndev->ntb);
+	if (rc)
+		goto err_register;
+
+	dev_info(&pdev->dev, "NTB device registered.\n");
+
+	return 0;
+
+err_register:
+	ndev_deinit_debugfs(ndev);
+	if (pdev_is_atom(pdev))
+		atom_deinit_dev(ndev);
+	else if (pdev_is_xeon(pdev) || pdev_is_skx_xeon(pdev))
+		xeon_deinit_dev(ndev);
+err_init_dev:
+	intel_ntb_deinit_pci(ndev);
+err_init_pci:
+	kfree(ndev);
+err_ndev:
+	return rc;
+}
+
+static void intel_ntb_pci_remove(struct pci_dev *pdev)
+{
+	struct intel_ntb_dev *ndev = pci_get_drvdata(pdev);
+
+	ntb_unregister_device(&ndev->ntb);
+	ndev_deinit_debugfs(ndev);
+	if (pdev_is_atom(pdev))
+		atom_deinit_dev(ndev);
+	else if (pdev_is_xeon(pdev) || pdev_is_skx_xeon(pdev))
+		xeon_deinit_dev(ndev);
+	intel_ntb_deinit_pci(ndev);
+	kfree(ndev);
+}
+
+static const struct intel_ntb_reg atom_reg = {
+	.poll_link		= atom_poll_link,
+	.link_is_up		= atom_link_is_up,
+	.db_ioread		= atom_db_ioread,
+	.db_iowrite		= atom_db_iowrite,
+	.db_size		= sizeof(u64),
+	.ntb_ctl		= ATOM_NTBCNTL_OFFSET,
+	.mw_bar			= {2, 4},
+};
+
+static const struct intel_ntb_alt_reg atom_pri_reg = {
+	.db_bell		= ATOM_PDOORBELL_OFFSET,
+	.db_mask		= ATOM_PDBMSK_OFFSET,
+	.spad			= ATOM_SPAD_OFFSET,
+};
+
+static const struct intel_ntb_alt_reg atom_b2b_reg = {
+	.db_bell		= ATOM_B2B_DOORBELL_OFFSET,
+	.spad			= ATOM_B2B_SPAD_OFFSET,
+};
+
+static const struct intel_ntb_xlat_reg atom_sec_xlat = {
+	/* FIXME : .bar0_base	= ATOM_SBAR0BASE_OFFSET, */
+	/* FIXME : .bar2_limit	= ATOM_SBAR2LMT_OFFSET, */
+	.bar2_xlat		= ATOM_SBAR2XLAT_OFFSET,
+};
+
+static const struct intel_ntb_reg xeon_reg = {
+	.poll_link		= xeon_poll_link,
+	.link_is_up		= xeon_link_is_up,
+	.db_ioread		= xeon_db_ioread,
+	.db_iowrite		= xeon_db_iowrite,
+	.db_size		= sizeof(u32),
+	.ntb_ctl		= XEON_NTBCNTL_OFFSET,
+	.mw_bar			= {2, 4, 5},
+};
+
+static const struct intel_ntb_alt_reg xeon_pri_reg = {
+	.db_bell		= XEON_PDOORBELL_OFFSET,
+	.db_mask		= XEON_PDBMSK_OFFSET,
+	.spad			= XEON_SPAD_OFFSET,
+};
+
+static const struct intel_ntb_alt_reg xeon_sec_reg = {
+	.db_bell		= XEON_SDOORBELL_OFFSET,
+	.db_mask		= XEON_SDBMSK_OFFSET,
+	/* second half of the scratchpads */
+	.spad			= XEON_SPAD_OFFSET + (XEON_SPAD_COUNT << 1),
+};
+
+static const struct intel_ntb_alt_reg xeon_b2b_reg = {
+	.db_bell		= XEON_B2B_DOORBELL_OFFSET,
+	.spad			= XEON_B2B_SPAD_OFFSET,
+};
+
+static const struct intel_ntb_xlat_reg xeon_pri_xlat = {
+	/* Note: no primary .bar0_base visible to the secondary side.
+	 *
+	 * The secondary side cannot get the base address stored in primary
+	 * bars.  The base address is necessary to set the limit register to
+	 * any value other than zero, or unlimited.
+	 *
+	 * WITHOUT THE BASE ADDRESS, THE SECONDARY SIDE CANNOT DISABLE the
+	 * window by setting the limit equal to base, nor can it limit the size
+	 * of the memory window by setting the limit to base + size.
+	 */
+	.bar2_limit		= XEON_PBAR23LMT_OFFSET,
+	.bar2_xlat		= XEON_PBAR23XLAT_OFFSET,
+};
+
+static const struct intel_ntb_xlat_reg xeon_sec_xlat = {
+	.bar0_base		= XEON_SBAR0BASE_OFFSET,
+	.bar2_limit		= XEON_SBAR23LMT_OFFSET,
+	.bar2_xlat		= XEON_SBAR23XLAT_OFFSET,
+};
+
+static struct intel_b2b_addr xeon_b2b_usd_addr = {
+	.bar2_addr64		= XEON_B2B_BAR2_ADDR64,
+	.bar4_addr64		= XEON_B2B_BAR4_ADDR64,
+	.bar4_addr32		= XEON_B2B_BAR4_ADDR32,
+	.bar5_addr32		= XEON_B2B_BAR5_ADDR32,
+};
+
+static struct intel_b2b_addr xeon_b2b_dsd_addr = {
+	.bar2_addr64		= XEON_B2B_BAR2_ADDR64,
+	.bar4_addr64		= XEON_B2B_BAR4_ADDR64,
+	.bar4_addr32		= XEON_B2B_BAR4_ADDR32,
+	.bar5_addr32		= XEON_B2B_BAR5_ADDR32,
+};
+
+static const struct intel_ntb_reg skx_reg = {
+	.poll_link		= skx_poll_link,
+	.link_is_up		= xeon_link_is_up,
+	.db_ioread		= skx_db_ioread,
+	.db_iowrite		= skx_db_iowrite,
+	.db_size		= sizeof(u32),
+	.ntb_ctl		= SKX_NTBCNTL_OFFSET,
+	.mw_bar			= {2, 4},
+};
+
+static const struct intel_ntb_alt_reg skx_pri_reg = {
+	.db_bell		= SKX_EM_DOORBELL_OFFSET,
+	.db_clear		= SKX_IM_INT_STATUS_OFFSET,
+	.db_mask		= SKX_IM_INT_DISABLE_OFFSET,
+	.spad			= SKX_IM_SPAD_OFFSET,
+};
+
+static const struct intel_ntb_alt_reg skx_b2b_reg = {
+	.db_bell		= SKX_IM_DOORBELL_OFFSET,
+	.db_clear		= SKX_EM_INT_STATUS_OFFSET,
+	.db_mask		= SKX_EM_INT_DISABLE_OFFSET,
+	.spad			= SKX_B2B_SPAD_OFFSET,
+};
+
+static const struct intel_ntb_xlat_reg skx_sec_xlat = {
+/*	.bar0_base		= SKX_EMBAR0_OFFSET, */
+	.bar2_limit		= SKX_IMBAR1XLMT_OFFSET,
+	.bar2_xlat		= SKX_IMBAR1XBASE_OFFSET,
+};
+
+/* operations for primary side of local ntb */
+static const struct ntb_dev_ops intel_ntb_ops = {
+	.mw_count		= intel_ntb_mw_count,
+	.mw_get_align		= intel_ntb_mw_get_align,
+	.mw_set_trans		= intel_ntb_mw_set_trans,
+	.peer_mw_count		= intel_ntb_peer_mw_count,
+	.peer_mw_get_addr	= intel_ntb_peer_mw_get_addr,
+	.link_is_up		= intel_ntb_link_is_up,
+	.link_enable		= intel_ntb_link_enable,
+	.link_disable		= intel_ntb_link_disable,
+	.db_is_unsafe		= intel_ntb_db_is_unsafe,
+	.db_valid_mask		= intel_ntb_db_valid_mask,
+	.db_vector_count	= intel_ntb_db_vector_count,
+	.db_vector_mask		= intel_ntb_db_vector_mask,
+	.db_read		= intel_ntb_db_read,
+	.db_clear		= intel_ntb_db_clear,
+	.db_set_mask		= intel_ntb_db_set_mask,
+	.db_clear_mask		= intel_ntb_db_clear_mask,
+	.peer_db_addr		= intel_ntb_peer_db_addr,
+	.peer_db_set		= intel_ntb_peer_db_set,
+	.spad_is_unsafe		= intel_ntb_spad_is_unsafe,
+	.spad_count		= intel_ntb_spad_count,
+	.spad_read		= intel_ntb_spad_read,
+	.spad_write		= intel_ntb_spad_write,
+	.peer_spad_addr		= intel_ntb_peer_spad_addr,
+	.peer_spad_read		= intel_ntb_peer_spad_read,
+	.peer_spad_write	= intel_ntb_peer_spad_write,
+};
+
+static const struct ntb_dev_ops intel_ntb3_ops = {
+	.mw_count		= intel_ntb_mw_count,
+	.mw_get_align		= intel_ntb_mw_get_align,
+	.mw_set_trans		= intel_ntb3_mw_set_trans,
+	.peer_mw_count		= intel_ntb_peer_mw_count,
+	.peer_mw_get_addr	= intel_ntb_peer_mw_get_addr,
+	.link_is_up		= intel_ntb_link_is_up,
+	.link_enable		= intel_ntb3_link_enable,
+	.link_disable		= intel_ntb_link_disable,
+	.db_valid_mask		= intel_ntb_db_valid_mask,
+	.db_vector_count	= intel_ntb_db_vector_count,
+	.db_vector_mask		= intel_ntb_db_vector_mask,
+	.db_read		= intel_ntb3_db_read,
+	.db_clear		= intel_ntb3_db_clear,
+	.db_set_mask		= intel_ntb_db_set_mask,
+	.db_clear_mask		= intel_ntb_db_clear_mask,
+	.peer_db_addr		= intel_ntb_peer_db_addr,
+	.peer_db_set		= intel_ntb3_peer_db_set,
+	.spad_is_unsafe		= intel_ntb_spad_is_unsafe,
+	.spad_count		= intel_ntb_spad_count,
+	.spad_read		= intel_ntb_spad_read,
+	.spad_write		= intel_ntb_spad_write,
+	.peer_spad_addr		= intel_ntb_peer_spad_addr,
+	.peer_spad_read		= intel_ntb_peer_spad_read,
+	.peer_spad_write	= intel_ntb_peer_spad_write,
+};
+
+static const struct file_operations intel_ntb_debugfs_info = {
+	.owner = THIS_MODULE,
+	.open = simple_open,
+	.read = ndev_debugfs_read,
+};
+
+static const struct pci_device_id intel_ntb_pci_tbl[] = {
+	{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_BWD)},
+	{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_JSF)},
+	{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_SNB)},
+	{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_IVT)},
+	{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_HSX)},
+	{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_BDX)},
+	{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_JSF)},
+	{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_SNB)},
+	{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_IVT)},
+	{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_HSX)},
+	{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_BDX)},
+	{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_JSF)},
+	{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_SNB)},
+	{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_IVT)},
+	{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_HSX)},
+	{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_BDX)},
+	{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_SKX)},
+	{0}
+};
+MODULE_DEVICE_TABLE(pci, intel_ntb_pci_tbl);
+
+static struct pci_driver intel_ntb_pci_driver = {
+	.name = KBUILD_MODNAME,
+	.id_table = intel_ntb_pci_tbl,
+	.probe = intel_ntb_pci_probe,
+	.remove = intel_ntb_pci_remove,
+};
+
+static int __init intel_ntb_pci_driver_init(void)
+{
+	pr_info("%s %s\n", NTB_DESC, NTB_VER);
+
+	if (debugfs_initialized())
+		debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
+
+	return pci_register_driver(&intel_ntb_pci_driver);
+}
+module_init(intel_ntb_pci_driver_init);
+
+static void __exit intel_ntb_pci_driver_exit(void)
+{
+	pci_unregister_driver(&intel_ntb_pci_driver);
+
+	debugfs_remove_recursive(debugfs_dir);
+}
+module_exit(intel_ntb_pci_driver_exit);
diff --git a/src/kernel/linux/v4.14/drivers/ntb/hw/intel/ntb_hw_intel.h b/src/kernel/linux/v4.14/drivers/ntb/hw/intel/ntb_hw_intel.h
new file mode 100644
index 0000000..2d6c38a
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/ntb/hw/intel/ntb_hw_intel.h
@@ -0,0 +1,389 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ *   redistributing this file, you may do so under either license.
+ *
+ *   GPL LICENSE SUMMARY
+ *
+ *   Copyright(c) 2012 Intel Corporation. All rights reserved.
+ *   Copyright (C) 2015 EMC Corporation. All Rights Reserved.
+ *
+ *   This program is free software; you can redistribute it and/or modify
+ *   it under the terms of version 2 of the GNU General Public License as
+ *   published by the Free Software Foundation.
+ *
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2012 Intel Corporation. All rights reserved.
+ *   Copyright (C) 2015 EMC Corporation. All Rights Reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copy
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Intel PCIe NTB Linux driver
+ *
+ * Contact Information:
+ * Jon Mason <jon.mason@intel.com>
+ */
+
+#ifndef NTB_HW_INTEL_H
+#define NTB_HW_INTEL_H
+
+#include <linux/ntb.h>
+#include <linux/pci.h>
+
+#define PCI_DEVICE_ID_INTEL_NTB_B2B_JSF	0x3725
+#define PCI_DEVICE_ID_INTEL_NTB_PS_JSF	0x3726
+#define PCI_DEVICE_ID_INTEL_NTB_SS_JSF	0x3727
+#define PCI_DEVICE_ID_INTEL_NTB_B2B_SNB	0x3C0D
+#define PCI_DEVICE_ID_INTEL_NTB_PS_SNB	0x3C0E
+#define PCI_DEVICE_ID_INTEL_NTB_SS_SNB	0x3C0F
+#define PCI_DEVICE_ID_INTEL_NTB_B2B_IVT	0x0E0D
+#define PCI_DEVICE_ID_INTEL_NTB_PS_IVT	0x0E0E
+#define PCI_DEVICE_ID_INTEL_NTB_SS_IVT	0x0E0F
+#define PCI_DEVICE_ID_INTEL_NTB_B2B_HSX	0x2F0D
+#define PCI_DEVICE_ID_INTEL_NTB_PS_HSX	0x2F0E
+#define PCI_DEVICE_ID_INTEL_NTB_SS_HSX	0x2F0F
+#define PCI_DEVICE_ID_INTEL_NTB_B2B_BWD	0x0C4E
+#define PCI_DEVICE_ID_INTEL_NTB_B2B_BDX	0x6F0D
+#define PCI_DEVICE_ID_INTEL_NTB_PS_BDX	0x6F0E
+#define PCI_DEVICE_ID_INTEL_NTB_SS_BDX	0x6F0F
+#define PCI_DEVICE_ID_INTEL_NTB_B2B_SKX	0x201C
+
+/* Intel Xeon hardware */
+
+#define XEON_PBAR23LMT_OFFSET		0x0000
+#define XEON_PBAR45LMT_OFFSET		0x0008
+#define XEON_PBAR4LMT_OFFSET		0x0008
+#define XEON_PBAR5LMT_OFFSET		0x000c
+#define XEON_PBAR23XLAT_OFFSET		0x0010
+#define XEON_PBAR45XLAT_OFFSET		0x0018
+#define XEON_PBAR4XLAT_OFFSET		0x0018
+#define XEON_PBAR5XLAT_OFFSET		0x001c
+#define XEON_SBAR23LMT_OFFSET		0x0020
+#define XEON_SBAR45LMT_OFFSET		0x0028
+#define XEON_SBAR4LMT_OFFSET		0x0028
+#define XEON_SBAR5LMT_OFFSET		0x002c
+#define XEON_SBAR23XLAT_OFFSET		0x0030
+#define XEON_SBAR45XLAT_OFFSET		0x0038
+#define XEON_SBAR4XLAT_OFFSET		0x0038
+#define XEON_SBAR5XLAT_OFFSET		0x003c
+#define XEON_SBAR0BASE_OFFSET		0x0040
+#define XEON_SBAR23BASE_OFFSET		0x0048
+#define XEON_SBAR45BASE_OFFSET		0x0050
+#define XEON_SBAR4BASE_OFFSET		0x0050
+#define XEON_SBAR5BASE_OFFSET		0x0054
+#define XEON_SBDF_OFFSET		0x005c
+#define XEON_NTBCNTL_OFFSET		0x0058
+#define XEON_PDOORBELL_OFFSET		0x0060
+#define XEON_PDBMSK_OFFSET		0x0062
+#define XEON_SDOORBELL_OFFSET		0x0064
+#define XEON_SDBMSK_OFFSET		0x0066
+#define XEON_USMEMMISS_OFFSET		0x0070
+#define XEON_SPAD_OFFSET		0x0080
+#define XEON_PBAR23SZ_OFFSET		0x00d0
+#define XEON_PBAR45SZ_OFFSET		0x00d1
+#define XEON_PBAR4SZ_OFFSET		0x00d1
+#define XEON_SBAR23SZ_OFFSET		0x00d2
+#define XEON_SBAR45SZ_OFFSET		0x00d3
+#define XEON_SBAR4SZ_OFFSET		0x00d3
+#define XEON_PPD_OFFSET			0x00d4
+#define XEON_PBAR5SZ_OFFSET		0x00d5
+#define XEON_SBAR5SZ_OFFSET		0x00d6
+#define XEON_WCCNTRL_OFFSET		0x00e0
+#define XEON_UNCERRSTS_OFFSET		0x014c
+#define XEON_CORERRSTS_OFFSET		0x0158
+#define XEON_LINK_STATUS_OFFSET		0x01a2
+#define XEON_SPCICMD_OFFSET		0x0504
+#define XEON_DEVCTRL_OFFSET		0x0598
+#define XEON_DEVSTS_OFFSET		0x059a
+#define XEON_SLINK_STATUS_OFFSET	0x05a2
+#define XEON_B2B_SPAD_OFFSET		0x0100
+#define XEON_B2B_DOORBELL_OFFSET	0x0140
+#define XEON_B2B_XLAT_OFFSETL		0x0144
+#define XEON_B2B_XLAT_OFFSETU		0x0148
+#define XEON_PPD_CONN_MASK		0x03
+#define XEON_PPD_CONN_TRANSPARENT	0x00
+#define XEON_PPD_CONN_B2B		0x01
+#define XEON_PPD_CONN_RP		0x02
+#define XEON_PPD_DEV_MASK		0x10
+#define XEON_PPD_DEV_USD		0x00
+#define XEON_PPD_DEV_DSD		0x10
+#define XEON_PPD_SPLIT_BAR_MASK		0x40
+
+#define XEON_PPD_TOPO_MASK	(XEON_PPD_CONN_MASK | XEON_PPD_DEV_MASK)
+#define XEON_PPD_TOPO_PRI_USD	(XEON_PPD_CONN_RP | XEON_PPD_DEV_USD)
+#define XEON_PPD_TOPO_PRI_DSD	(XEON_PPD_CONN_RP | XEON_PPD_DEV_DSD)
+#define XEON_PPD_TOPO_SEC_USD	(XEON_PPD_CONN_TRANSPARENT | XEON_PPD_DEV_USD)
+#define XEON_PPD_TOPO_SEC_DSD	(XEON_PPD_CONN_TRANSPARENT | XEON_PPD_DEV_DSD)
+#define XEON_PPD_TOPO_B2B_USD	(XEON_PPD_CONN_B2B | XEON_PPD_DEV_USD)
+#define XEON_PPD_TOPO_B2B_DSD	(XEON_PPD_CONN_B2B | XEON_PPD_DEV_DSD)
+
+#define XEON_MW_COUNT			2
+#define HSX_SPLIT_BAR_MW_COUNT		3
+#define XEON_DB_COUNT			15
+#define XEON_DB_LINK			15
+#define XEON_DB_LINK_BIT			BIT_ULL(XEON_DB_LINK)
+#define XEON_DB_MSIX_VECTOR_COUNT	4
+#define XEON_DB_MSIX_VECTOR_SHIFT	5
+#define XEON_DB_TOTAL_SHIFT		16
+#define XEON_SPAD_COUNT			16
+
+/* Intel Skylake Xeon hardware */
+#define SKX_IMBAR1SZ_OFFSET		0x00d0
+#define SKX_IMBAR2SZ_OFFSET		0x00d1
+#define SKX_EMBAR1SZ_OFFSET		0x00d2
+#define SKX_EMBAR2SZ_OFFSET		0x00d3
+#define SKX_DEVCTRL_OFFSET		0x0098
+#define SKX_DEVSTS_OFFSET		0x009a
+#define SKX_UNCERRSTS_OFFSET		0x014c
+#define SKX_CORERRSTS_OFFSET		0x0158
+#define SKX_LINK_STATUS_OFFSET		0x01a2
+
+#define SKX_NTBCNTL_OFFSET		0x0000
+#define SKX_IMBAR1XBASE_OFFSET		0x0010		/* SBAR2XLAT */
+#define SKX_IMBAR1XLMT_OFFSET		0x0018		/* SBAR2LMT */
+#define SKX_IMBAR2XBASE_OFFSET		0x0020		/* SBAR4XLAT */
+#define SKX_IMBAR2XLMT_OFFSET		0x0028		/* SBAR4LMT */
+#define SKX_IM_INT_STATUS_OFFSET	0x0040
+#define SKX_IM_INT_DISABLE_OFFSET	0x0048
+#define SKX_IM_SPAD_OFFSET		0x0080		/* SPAD */
+#define SKX_USMEMMISS_OFFSET		0x0070
+#define SKX_INTVEC_OFFSET		0x00d0
+#define SKX_IM_DOORBELL_OFFSET		0x0100		/* SDOORBELL0 */
+#define SKX_B2B_SPAD_OFFSET		0x0180		/* B2B SPAD */
+#define SKX_EMBAR0XBASE_OFFSET		0x4008		/* B2B_XLAT */
+#define SKX_EMBAR1XBASE_OFFSET		0x4010		/* PBAR2XLAT */
+#define SKX_EMBAR1XLMT_OFFSET		0x4018		/* PBAR2LMT */
+#define SKX_EMBAR2XBASE_OFFSET		0x4020		/* PBAR4XLAT */
+#define SKX_EMBAR2XLMT_OFFSET		0x4028		/* PBAR4LMT */
+#define SKX_EM_INT_STATUS_OFFSET	0x4040
+#define SKX_EM_INT_DISABLE_OFFSET	0x4048
+#define SKX_EM_SPAD_OFFSET		0x4080		/* remote SPAD */
+#define SKX_EM_DOORBELL_OFFSET		0x4100		/* PDOORBELL0 */
+#define SKX_SPCICMD_OFFSET		0x4504		/* SPCICMD */
+#define SKX_EMBAR0_OFFSET		0x4510		/* SBAR0BASE */
+#define SKX_EMBAR1_OFFSET		0x4518		/* SBAR23BASE */
+#define SKX_EMBAR2_OFFSET		0x4520		/* SBAR45BASE */
+
+#define SKX_DB_COUNT			32
+#define SKX_DB_LINK			32
+#define SKX_DB_LINK_BIT			BIT_ULL(SKX_DB_LINK)
+#define SKX_DB_MSIX_VECTOR_COUNT	33
+#define SKX_DB_MSIX_VECTOR_SHIFT	1
+#define SKX_DB_TOTAL_SHIFT		33
+#define SKX_SPAD_COUNT			16
+
+/* Intel Atom hardware */
+
+#define ATOM_SBAR2XLAT_OFFSET		0x0008
+#define ATOM_PDOORBELL_OFFSET		0x0020
+#define ATOM_PDBMSK_OFFSET		0x0028
+#define ATOM_NTBCNTL_OFFSET		0x0060
+#define ATOM_SPAD_OFFSET			0x0080
+#define ATOM_PPD_OFFSET			0x00d4
+#define ATOM_PBAR2XLAT_OFFSET		0x8008
+#define ATOM_B2B_DOORBELL_OFFSET		0x8020
+#define ATOM_B2B_SPAD_OFFSET		0x8080
+#define ATOM_SPCICMD_OFFSET		0xb004
+#define ATOM_LINK_STATUS_OFFSET		0xb052
+#define ATOM_ERRCORSTS_OFFSET		0xb110
+#define ATOM_IP_BASE			0xc000
+#define ATOM_DESKEWSTS_OFFSET		(ATOM_IP_BASE + 0x3024)
+#define ATOM_LTSSMERRSTS0_OFFSET		(ATOM_IP_BASE + 0x3180)
+#define ATOM_LTSSMSTATEJMP_OFFSET	(ATOM_IP_BASE + 0x3040)
+#define ATOM_IBSTERRRCRVSTS0_OFFSET	(ATOM_IP_BASE + 0x3324)
+#define ATOM_MODPHY_PCSREG4		0x1c004
+#define ATOM_MODPHY_PCSREG6		0x1c006
+
+#define ATOM_PPD_INIT_LINK		0x0008
+#define ATOM_PPD_CONN_MASK		0x0300
+#define ATOM_PPD_CONN_TRANSPARENT	0x0000
+#define ATOM_PPD_CONN_B2B		0x0100
+#define ATOM_PPD_CONN_RP			0x0200
+#define ATOM_PPD_DEV_MASK		0x1000
+#define ATOM_PPD_DEV_USD			0x0000
+#define ATOM_PPD_DEV_DSD			0x1000
+#define ATOM_PPD_TOPO_MASK	(ATOM_PPD_CONN_MASK | ATOM_PPD_DEV_MASK)
+#define ATOM_PPD_TOPO_PRI_USD	(ATOM_PPD_CONN_TRANSPARENT | ATOM_PPD_DEV_USD)
+#define ATOM_PPD_TOPO_PRI_DSD	(ATOM_PPD_CONN_TRANSPARENT | ATOM_PPD_DEV_DSD)
+#define ATOM_PPD_TOPO_SEC_USD	(ATOM_PPD_CONN_RP | ATOM_PPD_DEV_USD)
+#define ATOM_PPD_TOPO_SEC_DSD	(ATOM_PPD_CONN_RP | ATOM_PPD_DEV_DSD)
+#define ATOM_PPD_TOPO_B2B_USD	(ATOM_PPD_CONN_B2B | ATOM_PPD_DEV_USD)
+#define ATOM_PPD_TOPO_B2B_DSD	(ATOM_PPD_CONN_B2B | ATOM_PPD_DEV_DSD)
+
+#define ATOM_MW_COUNT			2
+#define ATOM_DB_COUNT			34
+#define ATOM_DB_VALID_MASK		(BIT_ULL(ATOM_DB_COUNT) - 1)
+#define ATOM_DB_MSIX_VECTOR_COUNT	34
+#define ATOM_DB_MSIX_VECTOR_SHIFT	1
+#define ATOM_DB_TOTAL_SHIFT		34
+#define ATOM_SPAD_COUNT			16
+
+#define ATOM_NTB_CTL_DOWN_BIT		BIT(16)
+#define ATOM_NTB_CTL_ACTIVE(x)		!(x & ATOM_NTB_CTL_DOWN_BIT)
+
+#define ATOM_DESKEWSTS_DBERR		BIT(15)
+#define ATOM_LTSSMERRSTS0_UNEXPECTEDEI	BIT(20)
+#define ATOM_LTSSMSTATEJMP_FORCEDETECT	BIT(2)
+#define ATOM_IBIST_ERR_OFLOW		0x7FFF7FFF
+
+#define ATOM_LINK_HB_TIMEOUT		msecs_to_jiffies(1000)
+#define ATOM_LINK_RECOVERY_TIME		msecs_to_jiffies(500)
+
+/* Ntb control and link status */
+
+#define NTB_CTL_CFG_LOCK		BIT(0)
+#define NTB_CTL_DISABLE			BIT(1)
+#define NTB_CTL_S2P_BAR2_SNOOP		BIT(2)
+#define NTB_CTL_P2S_BAR2_SNOOP		BIT(4)
+#define NTB_CTL_S2P_BAR4_SNOOP		BIT(6)
+#define NTB_CTL_P2S_BAR4_SNOOP		BIT(8)
+#define NTB_CTL_S2P_BAR5_SNOOP		BIT(12)
+#define NTB_CTL_P2S_BAR5_SNOOP		BIT(14)
+
+#define NTB_LNK_STA_ACTIVE_BIT		0x2000
+#define NTB_LNK_STA_SPEED_MASK		0x000f
+#define NTB_LNK_STA_WIDTH_MASK		0x03f0
+#define NTB_LNK_STA_ACTIVE(x)		(!!((x) & NTB_LNK_STA_ACTIVE_BIT))
+#define NTB_LNK_STA_SPEED(x)		((x) & NTB_LNK_STA_SPEED_MASK)
+#define NTB_LNK_STA_WIDTH(x)		(((x) & NTB_LNK_STA_WIDTH_MASK) >> 4)
+
+/* Use the following addresses for translation between b2b ntb devices in case
+ * the hardware default values are not reliable. */
+#define XEON_B2B_BAR0_ADDR	0x1000000000000000ull
+#define XEON_B2B_BAR2_ADDR64	0x2000000000000000ull
+#define XEON_B2B_BAR4_ADDR64	0x4000000000000000ull
+#define XEON_B2B_BAR4_ADDR32	0x20000000u
+#define XEON_B2B_BAR5_ADDR32	0x40000000u
+
+/* The peer ntb secondary config space is 32KB fixed size */
+#define XEON_B2B_MIN_SIZE		0x8000
+
+/* flags to indicate hardware errata */
+#define NTB_HWERR_SDOORBELL_LOCKUP	BIT_ULL(0)
+#define NTB_HWERR_SB01BASE_LOCKUP	BIT_ULL(1)
+#define NTB_HWERR_B2BDOORBELL_BIT14	BIT_ULL(2)
+#define NTB_HWERR_MSIX_VECTOR32_BAD	BIT_ULL(3)
+
+/* flags to indicate unsafe api */
+#define NTB_UNSAFE_DB			BIT_ULL(0)
+#define NTB_UNSAFE_SPAD			BIT_ULL(1)
+
+#define NTB_BAR_MASK_64			~(0xfull)
+#define NTB_BAR_MASK_32			~(0xfu)
+
+struct intel_ntb_dev;
+
+struct intel_ntb_reg {
+	int (*poll_link)(struct intel_ntb_dev *ndev);
+	int (*link_is_up)(struct intel_ntb_dev *ndev);
+	u64 (*db_ioread)(void __iomem *mmio);
+	void (*db_iowrite)(u64 db_bits, void __iomem *mmio);
+	unsigned long			ntb_ctl;
+	resource_size_t			db_size;
+	int				mw_bar[];
+};
+
+struct intel_ntb_alt_reg {
+	unsigned long			db_bell;
+	unsigned long			db_mask;
+	unsigned long			db_clear;
+	unsigned long			spad;
+};
+
+struct intel_ntb_xlat_reg {
+	unsigned long			bar0_base;
+	unsigned long			bar2_xlat;
+	unsigned long			bar2_limit;
+};
+
+struct intel_b2b_addr {
+	phys_addr_t			bar0_addr;
+	phys_addr_t			bar2_addr64;
+	phys_addr_t			bar4_addr64;
+	phys_addr_t			bar4_addr32;
+	phys_addr_t			bar5_addr32;
+};
+
+struct intel_ntb_vec {
+	struct intel_ntb_dev		*ndev;
+	int				num;
+};
+
+struct intel_ntb_dev {
+	struct ntb_dev			ntb;
+
+	/* offset of peer bar0 in b2b bar */
+	unsigned long			b2b_off;
+	/* mw idx used to access peer bar0 */
+	unsigned int			b2b_idx;
+
+	/* BAR45 is split into BAR4 and BAR5 */
+	bool				bar4_split;
+
+	u32				ntb_ctl;
+	u32				lnk_sta;
+
+	unsigned char			mw_count;
+	unsigned char			spad_count;
+	unsigned char			db_count;
+	unsigned char			db_vec_count;
+	unsigned char			db_vec_shift;
+
+	u64				db_valid_mask;
+	u64				db_link_mask;
+	u64				db_mask;
+
+	/* synchronize rmw access of db_mask and hw reg */
+	spinlock_t			db_mask_lock;
+
+	struct msix_entry		*msix;
+	struct intel_ntb_vec		*vec;
+
+	const struct intel_ntb_reg	*reg;
+	const struct intel_ntb_alt_reg	*self_reg;
+	const struct intel_ntb_alt_reg	*peer_reg;
+	const struct intel_ntb_xlat_reg	*xlat_reg;
+	void				__iomem *self_mmio;
+	void				__iomem *peer_mmio;
+	phys_addr_t			peer_addr;
+
+	unsigned long			last_ts;
+	struct delayed_work		hb_timer;
+
+	unsigned long			hwerr_flags;
+	unsigned long			unsafe_flags;
+	unsigned long			unsafe_flags_ignore;
+
+	struct dentry			*debugfs_dir;
+	struct dentry			*debugfs_info;
+};
+
+#define ntb_ndev(__ntb) container_of(__ntb, struct intel_ntb_dev, ntb)
+#define hb_ndev(__work) container_of(__work, struct intel_ntb_dev, \
+				     hb_timer.work)
+
+#endif
diff --git a/src/kernel/linux/v4.14/drivers/ntb/ntb.c b/src/kernel/linux/v4.14/drivers/ntb/ntb.c
new file mode 100644
index 0000000..b75ec22
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/ntb/ntb.c
@@ -0,0 +1,316 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ *   redistributing this file, you may do so under either license.
+ *
+ *   GPL LICENSE SUMMARY
+ *
+ *   Copyright (C) 2015 EMC Corporation. All Rights Reserved.
+ *   Copyright (C) 2016 T-Platforms. All Rights Reserved.
+ *
+ *   This program is free software; you can redistribute it and/or modify
+ *   it under the terms of version 2 of the GNU General Public License as
+ *   published by the Free Software Foundation.
+ *
+ *   This program is distributed in the hope that it will be useful, but
+ *   WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *   General Public License for more details.
+ *
+ *   BSD LICENSE
+ *
+ *   Copyright (C) 2015 EMC Corporation. All Rights Reserved.
+ *   Copyright (C) 2016 T-Platforms. All Rights Reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copy
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * PCIe NTB Linux driver
+ *
+ * Contact Information:
+ * Allen Hubbe <Allen.Hubbe@emc.com>
+ */
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#include <linux/ntb.h>
+#include <linux/pci.h>
+
+#define DRIVER_NAME			"ntb"
+#define DRIVER_DESCRIPTION		"PCIe NTB Driver Framework"
+
+#define DRIVER_LICENSE			"Dual BSD/GPL"
+#define DRIVER_VERSION			"1.0"
+#define DRIVER_RELDATE			"24 March 2015"
+#define DRIVER_AUTHOR			"Allen Hubbe <Allen.Hubbe@emc.com>"
+
+MODULE_LICENSE(DRIVER_LICENSE);
+MODULE_VERSION(DRIVER_VERSION);
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESCRIPTION);
+
+static struct bus_type ntb_bus;
+static void ntb_dev_release(struct device *dev);
+
+int __ntb_register_client(struct ntb_client *client, struct module *mod,
+			  const char *mod_name)
+{
+	if (!client)
+		return -EINVAL;
+	if (!ntb_client_ops_is_valid(&client->ops))
+		return -EINVAL;
+
+	memset(&client->drv, 0, sizeof(client->drv));
+	client->drv.bus = &ntb_bus;
+	client->drv.name = mod_name;
+	client->drv.owner = mod;
+
+	return driver_register(&client->drv);
+}
+EXPORT_SYMBOL(__ntb_register_client);
+
+void ntb_unregister_client(struct ntb_client *client)
+{
+	driver_unregister(&client->drv);
+}
+EXPORT_SYMBOL(ntb_unregister_client);
+
+int ntb_register_device(struct ntb_dev *ntb)
+{
+	if (!ntb)
+		return -EINVAL;
+	if (!ntb->pdev)
+		return -EINVAL;
+	if (!ntb->ops)
+		return -EINVAL;
+	if (!ntb_dev_ops_is_valid(ntb->ops))
+		return -EINVAL;
+
+	init_completion(&ntb->released);
+
+	memset(&ntb->dev, 0, sizeof(ntb->dev));
+	ntb->dev.bus = &ntb_bus;
+	ntb->dev.parent = &ntb->pdev->dev;
+	ntb->dev.release = ntb_dev_release;
+	dev_set_name(&ntb->dev, "%s", pci_name(ntb->pdev));
+
+	ntb->ctx = NULL;
+	ntb->ctx_ops = NULL;
+	spin_lock_init(&ntb->ctx_lock);
+
+	return device_register(&ntb->dev);
+}
+EXPORT_SYMBOL(ntb_register_device);
+
+void ntb_unregister_device(struct ntb_dev *ntb)
+{
+	device_unregister(&ntb->dev);
+	wait_for_completion(&ntb->released);
+}
+EXPORT_SYMBOL(ntb_unregister_device);
+
+int ntb_set_ctx(struct ntb_dev *ntb, void *ctx,
+		const struct ntb_ctx_ops *ctx_ops)
+{
+	unsigned long irqflags;
+
+	if (!ntb_ctx_ops_is_valid(ctx_ops))
+		return -EINVAL;
+	if (ntb->ctx_ops)
+		return -EINVAL;
+
+	spin_lock_irqsave(&ntb->ctx_lock, irqflags);
+	{
+		ntb->ctx = ctx;
+		ntb->ctx_ops = ctx_ops;
+	}
+	spin_unlock_irqrestore(&ntb->ctx_lock, irqflags);
+
+	return 0;
+}
+EXPORT_SYMBOL(ntb_set_ctx);
+
+void ntb_clear_ctx(struct ntb_dev *ntb)
+{
+	unsigned long irqflags;
+
+	spin_lock_irqsave(&ntb->ctx_lock, irqflags);
+	{
+		ntb->ctx_ops = NULL;
+		ntb->ctx = NULL;
+	}
+	spin_unlock_irqrestore(&ntb->ctx_lock, irqflags);
+}
+EXPORT_SYMBOL(ntb_clear_ctx);
+
+void ntb_link_event(struct ntb_dev *ntb)
+{
+	unsigned long irqflags;
+
+	spin_lock_irqsave(&ntb->ctx_lock, irqflags);
+	{
+		if (ntb->ctx_ops && ntb->ctx_ops->link_event)
+			ntb->ctx_ops->link_event(ntb->ctx);
+	}
+	spin_unlock_irqrestore(&ntb->ctx_lock, irqflags);
+}
+EXPORT_SYMBOL(ntb_link_event);
+
+void ntb_db_event(struct ntb_dev *ntb, int vector)
+{
+	unsigned long irqflags;
+
+	spin_lock_irqsave(&ntb->ctx_lock, irqflags);
+	{
+		if (ntb->ctx_ops && ntb->ctx_ops->db_event)
+			ntb->ctx_ops->db_event(ntb->ctx, vector);
+	}
+	spin_unlock_irqrestore(&ntb->ctx_lock, irqflags);
+}
+EXPORT_SYMBOL(ntb_db_event);
+
+void ntb_msg_event(struct ntb_dev *ntb)
+{
+	unsigned long irqflags;
+
+	spin_lock_irqsave(&ntb->ctx_lock, irqflags);
+	{
+		if (ntb->ctx_ops && ntb->ctx_ops->msg_event)
+			ntb->ctx_ops->msg_event(ntb->ctx);
+	}
+	spin_unlock_irqrestore(&ntb->ctx_lock, irqflags);
+}
+EXPORT_SYMBOL(ntb_msg_event);
+
+int ntb_default_port_number(struct ntb_dev *ntb)
+{
+	switch (ntb->topo) {
+	case NTB_TOPO_PRI:
+	case NTB_TOPO_B2B_USD:
+		return NTB_PORT_PRI_USD;
+	case NTB_TOPO_SEC:
+	case NTB_TOPO_B2B_DSD:
+		return NTB_PORT_SEC_DSD;
+	default:
+		return 0;
+	}
+}
+EXPORT_SYMBOL(ntb_default_port_number);
+
+int ntb_default_peer_port_count(struct ntb_dev *ntb)
+{
+	return NTB_DEF_PEER_CNT;
+}
+EXPORT_SYMBOL(ntb_default_peer_port_count);
+
+int ntb_default_peer_port_number(struct ntb_dev *ntb, int pidx)
+{
+	if (pidx != NTB_DEF_PEER_IDX)
+		return -EINVAL;
+
+	switch (ntb->topo) {
+	case NTB_TOPO_PRI:
+	case NTB_TOPO_B2B_USD:
+		return NTB_PORT_SEC_DSD;
+	case NTB_TOPO_SEC:
+	case NTB_TOPO_B2B_DSD:
+		return NTB_PORT_PRI_USD;
+	default:
+		return 0;
+	}
+}
+EXPORT_SYMBOL(ntb_default_peer_port_number);
+
+int ntb_default_peer_port_idx(struct ntb_dev *ntb, int port)
+{
+	int peer_port = ntb_default_peer_port_number(ntb, NTB_DEF_PEER_IDX);
+
+	if (peer_port == -EINVAL || port != peer_port)
+		return -EINVAL;
+
+	return 0;
+}
+EXPORT_SYMBOL(ntb_default_peer_port_idx);
+
+static int ntb_probe(struct device *dev)
+{
+	struct ntb_dev *ntb;
+	struct ntb_client *client;
+	int rc;
+
+	get_device(dev);
+	ntb = dev_ntb(dev);
+	client = drv_ntb_client(dev->driver);
+
+	rc = client->ops.probe(client, ntb);
+	if (rc)
+		put_device(dev);
+
+	return rc;
+}
+
+static int ntb_remove(struct device *dev)
+{
+	struct ntb_dev *ntb;
+	struct ntb_client *client;
+
+	if (dev->driver) {
+		ntb = dev_ntb(dev);
+		client = drv_ntb_client(dev->driver);
+
+		client->ops.remove(client, ntb);
+		put_device(dev);
+	}
+
+	return 0;
+}
+
+static void ntb_dev_release(struct device *dev)
+{
+	struct ntb_dev *ntb = dev_ntb(dev);
+
+	complete(&ntb->released);
+}
+
+static struct bus_type ntb_bus = {
+	.name = "ntb",
+	.probe = ntb_probe,
+	.remove = ntb_remove,
+};
+
+static int __init ntb_driver_init(void)
+{
+	return bus_register(&ntb_bus);
+}
+module_init(ntb_driver_init);
+
+static void __exit ntb_driver_exit(void)
+{
+	bus_unregister(&ntb_bus);
+}
+module_exit(ntb_driver_exit);
+
diff --git a/src/kernel/linux/v4.14/drivers/ntb/ntb_transport.c b/src/kernel/linux/v4.14/drivers/ntb/ntb_transport.c
new file mode 100644
index 0000000..18339b7
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/ntb/ntb_transport.c
@@ -0,0 +1,2261 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ *   redistributing this file, you may do so under either license.
+ *
+ *   GPL LICENSE SUMMARY
+ *
+ *   Copyright(c) 2012 Intel Corporation. All rights reserved.
+ *   Copyright (C) 2015 EMC Corporation. All Rights Reserved.
+ *
+ *   This program is free software; you can redistribute it and/or modify
+ *   it under the terms of version 2 of the GNU General Public License as
+ *   published by the Free Software Foundation.
+ *
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2012 Intel Corporation. All rights reserved.
+ *   Copyright (C) 2015 EMC Corporation. All Rights Reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copy
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * PCIe NTB Transport Linux driver
+ *
+ * Contact Information:
+ * Jon Mason <jon.mason@intel.com>
+ */
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/errno.h>
+#include <linux/export.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+#include "linux/ntb.h"
+#include "linux/ntb_transport.h"
+
+#define NTB_TRANSPORT_VERSION	4
+#define NTB_TRANSPORT_VER	"4"
+#define NTB_TRANSPORT_NAME	"ntb_transport"
+#define NTB_TRANSPORT_DESC	"Software Queue-Pair Transport over NTB"
+#define NTB_TRANSPORT_MIN_SPADS (MW0_SZ_HIGH + 2)
+
+MODULE_DESCRIPTION(NTB_TRANSPORT_DESC);
+MODULE_VERSION(NTB_TRANSPORT_VER);
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Intel Corporation");
+
+static unsigned long max_mw_size;
+module_param(max_mw_size, ulong, 0644);
+MODULE_PARM_DESC(max_mw_size, "Limit size of large memory windows");
+
+static unsigned int transport_mtu = 0x10000;
+module_param(transport_mtu, uint, 0644);
+MODULE_PARM_DESC(transport_mtu, "Maximum size of NTB transport packets");
+
+static unsigned char max_num_clients;
+module_param(max_num_clients, byte, 0644);
+MODULE_PARM_DESC(max_num_clients, "Maximum number of NTB transport clients");
+
+static unsigned int copy_bytes = 1024;
+module_param(copy_bytes, uint, 0644);
+MODULE_PARM_DESC(copy_bytes, "Threshold under which NTB will use the CPU to copy instead of DMA");
+
+static bool use_dma;
+module_param(use_dma, bool, 0644);
+MODULE_PARM_DESC(use_dma, "Use DMA engine to perform large data copy");
+
+static struct dentry *nt_debugfs_dir;
+
+/* Only two-ports NTB devices are supported */
+#define PIDX		NTB_DEF_PEER_IDX
+
+struct ntb_queue_entry {
+	/* ntb_queue list reference */
+	struct list_head entry;
+	/* pointers to data to be transferred */
+	void *cb_data;
+	void *buf;
+	unsigned int len;
+	unsigned int flags;
+	int retries;
+	int errors;
+	unsigned int tx_index;
+	unsigned int rx_index;
+
+	struct ntb_transport_qp *qp;
+	union {
+		struct ntb_payload_header __iomem *tx_hdr;
+		struct ntb_payload_header *rx_hdr;
+	};
+};
+
+struct ntb_rx_info {
+	unsigned int entry;
+};
+
+struct ntb_transport_qp {
+	struct ntb_transport_ctx *transport;
+	struct ntb_dev *ndev;
+	void *cb_data;
+	struct dma_chan *tx_dma_chan;
+	struct dma_chan *rx_dma_chan;
+
+	bool client_ready;
+	bool link_is_up;
+	bool active;
+
+	u8 qp_num;	/* Only 64 QP's are allowed.  0-63 */
+	u64 qp_bit;
+
+	struct ntb_rx_info __iomem *rx_info;
+	struct ntb_rx_info *remote_rx_info;
+
+	void (*tx_handler)(struct ntb_transport_qp *qp, void *qp_data,
+			   void *data, int len);
+	struct list_head tx_free_q;
+	spinlock_t ntb_tx_free_q_lock;
+	void __iomem *tx_mw;
+	dma_addr_t tx_mw_phys;
+	unsigned int tx_index;
+	unsigned int tx_max_entry;
+	unsigned int tx_max_frame;
+
+	void (*rx_handler)(struct ntb_transport_qp *qp, void *qp_data,
+			   void *data, int len);
+	struct list_head rx_post_q;
+	struct list_head rx_pend_q;
+	struct list_head rx_free_q;
+	/* ntb_rx_q_lock: synchronize access to rx_XXXX_q */
+	spinlock_t ntb_rx_q_lock;
+	void *rx_buff;
+	unsigned int rx_index;
+	unsigned int rx_max_entry;
+	unsigned int rx_max_frame;
+	unsigned int rx_alloc_entry;
+	dma_cookie_t last_cookie;
+	struct tasklet_struct rxc_db_work;
+
+	void (*event_handler)(void *data, int status);
+	struct delayed_work link_work;
+	struct work_struct link_cleanup;
+
+	struct dentry *debugfs_dir;
+	struct dentry *debugfs_stats;
+
+	/* Stats */
+	u64 rx_bytes;
+	u64 rx_pkts;
+	u64 rx_ring_empty;
+	u64 rx_err_no_buf;
+	u64 rx_err_oflow;
+	u64 rx_err_ver;
+	u64 rx_memcpy;
+	u64 rx_async;
+	u64 tx_bytes;
+	u64 tx_pkts;
+	u64 tx_ring_full;
+	u64 tx_err_no_buf;
+	u64 tx_memcpy;
+	u64 tx_async;
+};
+
+struct ntb_transport_mw {
+	phys_addr_t phys_addr;
+	resource_size_t phys_size;
+	resource_size_t xlat_align;
+	resource_size_t xlat_align_size;
+	void __iomem *vbase;
+	size_t xlat_size;
+	size_t buff_size;
+	void *virt_addr;
+	dma_addr_t dma_addr;
+};
+
+struct ntb_transport_client_dev {
+	struct list_head entry;
+	struct ntb_transport_ctx *nt;
+	struct device dev;
+};
+
+struct ntb_transport_ctx {
+	struct list_head entry;
+	struct list_head client_devs;
+
+	struct ntb_dev *ndev;
+
+	struct ntb_transport_mw *mw_vec;
+	struct ntb_transport_qp *qp_vec;
+	unsigned int mw_count;
+	unsigned int qp_count;
+	u64 qp_bitmap;
+	u64 qp_bitmap_free;
+
+	bool link_is_up;
+	struct delayed_work link_work;
+	struct work_struct link_cleanup;
+
+	struct dentry *debugfs_node_dir;
+};
+
+enum {
+	DESC_DONE_FLAG = BIT(0),
+	LINK_DOWN_FLAG = BIT(1),
+};
+
+struct ntb_payload_header {
+	unsigned int ver;
+	unsigned int len;
+	unsigned int flags;
+};
+
+enum {
+	VERSION = 0,
+	QP_LINKS,
+	NUM_QPS,
+	NUM_MWS,
+	MW0_SZ_HIGH,
+	MW0_SZ_LOW,
+};
+
+#define dev_client_dev(__dev) \
+	container_of((__dev), struct ntb_transport_client_dev, dev)
+
+#define drv_client(__drv) \
+	container_of((__drv), struct ntb_transport_client, driver)
+
+#define QP_TO_MW(nt, qp)	((qp) % nt->mw_count)
+#define NTB_QP_DEF_NUM_ENTRIES	100
+#define NTB_LINK_DOWN_TIMEOUT	10
+
+static void ntb_transport_rxc_db(unsigned long data);
+static const struct ntb_ctx_ops ntb_transport_ops;
+static struct ntb_client ntb_transport_client;
+static int ntb_async_tx_submit(struct ntb_transport_qp *qp,
+			       struct ntb_queue_entry *entry);
+static void ntb_memcpy_tx(struct ntb_queue_entry *entry, void __iomem *offset);
+static int ntb_async_rx_submit(struct ntb_queue_entry *entry, void *offset);
+static void ntb_memcpy_rx(struct ntb_queue_entry *entry, void *offset);
+
+
+static int ntb_transport_bus_match(struct device *dev,
+				   struct device_driver *drv)
+{
+	return !strncmp(dev_name(dev), drv->name, strlen(drv->name));
+}
+
+static int ntb_transport_bus_probe(struct device *dev)
+{
+	const struct ntb_transport_client *client;
+	int rc = -EINVAL;
+
+	get_device(dev);
+
+	client = drv_client(dev->driver);
+	rc = client->probe(dev);
+	if (rc)
+		put_device(dev);
+
+	return rc;
+}
+
+static int ntb_transport_bus_remove(struct device *dev)
+{
+	const struct ntb_transport_client *client;
+
+	client = drv_client(dev->driver);
+	client->remove(dev);
+
+	put_device(dev);
+
+	return 0;
+}
+
+static struct bus_type ntb_transport_bus = {
+	.name = "ntb_transport",
+	.match = ntb_transport_bus_match,
+	.probe = ntb_transport_bus_probe,
+	.remove = ntb_transport_bus_remove,
+};
+
+static LIST_HEAD(ntb_transport_list);
+
+static int ntb_bus_init(struct ntb_transport_ctx *nt)
+{
+	list_add_tail(&nt->entry, &ntb_transport_list);
+	return 0;
+}
+
+static void ntb_bus_remove(struct ntb_transport_ctx *nt)
+{
+	struct ntb_transport_client_dev *client_dev, *cd;
+
+	list_for_each_entry_safe(client_dev, cd, &nt->client_devs, entry) {
+		dev_err(client_dev->dev.parent, "%s still attached to bus, removing\n",
+			dev_name(&client_dev->dev));
+		list_del(&client_dev->entry);
+		device_unregister(&client_dev->dev);
+	}
+
+	list_del(&nt->entry);
+}
+
+static void ntb_transport_client_release(struct device *dev)
+{
+	struct ntb_transport_client_dev *client_dev;
+
+	client_dev = dev_client_dev(dev);
+	kfree(client_dev);
+}
+
+/**
+ * ntb_transport_unregister_client_dev - Unregister NTB client device
+ * @device_name: Name of NTB client device
+ *
+ * Unregister an NTB client device with the NTB transport layer
+ */
+void ntb_transport_unregister_client_dev(char *device_name)
+{
+	struct ntb_transport_client_dev *client, *cd;
+	struct ntb_transport_ctx *nt;
+
+	list_for_each_entry(nt, &ntb_transport_list, entry)
+		list_for_each_entry_safe(client, cd, &nt->client_devs, entry)
+			if (!strncmp(dev_name(&client->dev), device_name,
+				     strlen(device_name))) {
+				list_del(&client->entry);
+				device_unregister(&client->dev);
+			}
+}
+EXPORT_SYMBOL_GPL(ntb_transport_unregister_client_dev);
+
+/**
+ * ntb_transport_register_client_dev - Register NTB client device
+ * @device_name: Name of NTB client device
+ *
+ * Register an NTB client device with the NTB transport layer
+ */
+int ntb_transport_register_client_dev(char *device_name)
+{
+	struct ntb_transport_client_dev *client_dev;
+	struct ntb_transport_ctx *nt;
+	int node;
+	int rc, i = 0;
+
+	if (list_empty(&ntb_transport_list))
+		return -ENODEV;
+
+	list_for_each_entry(nt, &ntb_transport_list, entry) {
+		struct device *dev;
+
+		node = dev_to_node(&nt->ndev->dev);
+
+		client_dev = kzalloc_node(sizeof(*client_dev),
+					  GFP_KERNEL, node);
+		if (!client_dev) {
+			rc = -ENOMEM;
+			goto err;
+		}
+
+		dev = &client_dev->dev;
+
+		/* setup and register client devices */
+		dev_set_name(dev, "%s%d", device_name, i);
+		dev->bus = &ntb_transport_bus;
+		dev->release = ntb_transport_client_release;
+		dev->parent = &nt->ndev->dev;
+
+		rc = device_register(dev);
+		if (rc) {
+			kfree(client_dev);
+			goto err;
+		}
+
+		list_add_tail(&client_dev->entry, &nt->client_devs);
+		i++;
+	}
+
+	return 0;
+
+err:
+	ntb_transport_unregister_client_dev(device_name);
+
+	return rc;
+}
+EXPORT_SYMBOL_GPL(ntb_transport_register_client_dev);
+
+/**
+ * ntb_transport_register_client - Register NTB client driver
+ * @drv: NTB client driver to be registered
+ *
+ * Register an NTB client driver with the NTB transport layer
+ *
+ * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
+ */
+int ntb_transport_register_client(struct ntb_transport_client *drv)
+{
+	drv->driver.bus = &ntb_transport_bus;
+
+	if (list_empty(&ntb_transport_list))
+		return -ENODEV;
+
+	return driver_register(&drv->driver);
+}
+EXPORT_SYMBOL_GPL(ntb_transport_register_client);
+
+/**
+ * ntb_transport_unregister_client - Unregister NTB client driver
+ * @drv: NTB client driver to be unregistered
+ *
+ * Unregister an NTB client driver with the NTB transport layer
+ *
+ * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
+ */
+void ntb_transport_unregister_client(struct ntb_transport_client *drv)
+{
+	driver_unregister(&drv->driver);
+}
+EXPORT_SYMBOL_GPL(ntb_transport_unregister_client);
+
+static ssize_t debugfs_read(struct file *filp, char __user *ubuf, size_t count,
+			    loff_t *offp)
+{
+	struct ntb_transport_qp *qp;
+	char *buf;
+	ssize_t ret, out_offset, out_count;
+
+	qp = filp->private_data;
+
+	if (!qp || !qp->link_is_up)
+		return 0;
+
+	out_count = 1000;
+
+	buf = kmalloc(out_count, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	out_offset = 0;
+	out_offset += snprintf(buf + out_offset, out_count - out_offset,
+			       "\nNTB QP stats:\n\n");
+	out_offset += snprintf(buf + out_offset, out_count - out_offset,
+			       "rx_bytes - \t%llu\n", qp->rx_bytes);
+	out_offset += snprintf(buf + out_offset, out_count - out_offset,
+			       "rx_pkts - \t%llu\n", qp->rx_pkts);
+	out_offset += snprintf(buf + out_offset, out_count - out_offset,
+			       "rx_memcpy - \t%llu\n", qp->rx_memcpy);
+	out_offset += snprintf(buf + out_offset, out_count - out_offset,
+			       "rx_async - \t%llu\n", qp->rx_async);
+	out_offset += snprintf(buf + out_offset, out_count - out_offset,
+			       "rx_ring_empty - %llu\n", qp->rx_ring_empty);
+	out_offset += snprintf(buf + out_offset, out_count - out_offset,
+			       "rx_err_no_buf - %llu\n", qp->rx_err_no_buf);
+	out_offset += snprintf(buf + out_offset, out_count - out_offset,
+			       "rx_err_oflow - \t%llu\n", qp->rx_err_oflow);
+	out_offset += snprintf(buf + out_offset, out_count - out_offset,
+			       "rx_err_ver - \t%llu\n", qp->rx_err_ver);
+	out_offset += snprintf(buf + out_offset, out_count - out_offset,
+			       "rx_buff - \t0x%p\n", qp->rx_buff);
+	out_offset += snprintf(buf + out_offset, out_count - out_offset,
+			       "rx_index - \t%u\n", qp->rx_index);
+	out_offset += snprintf(buf + out_offset, out_count - out_offset,
+			       "rx_max_entry - \t%u\n", qp->rx_max_entry);
+	out_offset += snprintf(buf + out_offset, out_count - out_offset,
+			       "rx_alloc_entry - \t%u\n\n", qp->rx_alloc_entry);
+
+	out_offset += snprintf(buf + out_offset, out_count - out_offset,
+			       "tx_bytes - \t%llu\n", qp->tx_bytes);
+	out_offset += snprintf(buf + out_offset, out_count - out_offset,
+			       "tx_pkts - \t%llu\n", qp->tx_pkts);
+	out_offset += snprintf(buf + out_offset, out_count - out_offset,
+			       "tx_memcpy - \t%llu\n", qp->tx_memcpy);
+	out_offset += snprintf(buf + out_offset, out_count - out_offset,
+			       "tx_async - \t%llu\n", qp->tx_async);
+	out_offset += snprintf(buf + out_offset, out_count - out_offset,
+			       "tx_ring_full - \t%llu\n", qp->tx_ring_full);
+	out_offset += snprintf(buf + out_offset, out_count - out_offset,
+			       "tx_err_no_buf - %llu\n", qp->tx_err_no_buf);
+	out_offset += snprintf(buf + out_offset, out_count - out_offset,
+			       "tx_mw - \t0x%p\n", qp->tx_mw);
+	out_offset += snprintf(buf + out_offset, out_count - out_offset,
+			       "tx_index (H) - \t%u\n", qp->tx_index);
+	out_offset += snprintf(buf + out_offset, out_count - out_offset,
+			       "RRI (T) - \t%u\n",
+			       qp->remote_rx_info->entry);
+	out_offset += snprintf(buf + out_offset, out_count - out_offset,
+			       "tx_max_entry - \t%u\n", qp->tx_max_entry);
+	out_offset += snprintf(buf + out_offset, out_count - out_offset,
+			       "free tx - \t%u\n",
+			       ntb_transport_tx_free_entry(qp));
+
+	out_offset += snprintf(buf + out_offset, out_count - out_offset,
+			       "\n");
+	out_offset += snprintf(buf + out_offset, out_count - out_offset,
+			       "Using TX DMA - \t%s\n",
+			       qp->tx_dma_chan ? "Yes" : "No");
+	out_offset += snprintf(buf + out_offset, out_count - out_offset,
+			       "Using RX DMA - \t%s\n",
+			       qp->rx_dma_chan ? "Yes" : "No");
+	out_offset += snprintf(buf + out_offset, out_count - out_offset,
+			       "QP Link - \t%s\n",
+			       qp->link_is_up ? "Up" : "Down");
+	out_offset += snprintf(buf + out_offset, out_count - out_offset,
+			       "\n");
+
+	if (out_offset > out_count)
+		out_offset = out_count;
+
+	ret = simple_read_from_buffer(ubuf, count, offp, buf, out_offset);
+	kfree(buf);
+	return ret;
+}
+
+static const struct file_operations ntb_qp_debugfs_stats = {
+	.owner = THIS_MODULE,
+	.open = simple_open,
+	.read = debugfs_read,
+};
+
+static void ntb_list_add(spinlock_t *lock, struct list_head *entry,
+			 struct list_head *list)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(lock, flags);
+	list_add_tail(entry, list);
+	spin_unlock_irqrestore(lock, flags);
+}
+
+static struct ntb_queue_entry *ntb_list_rm(spinlock_t *lock,
+					   struct list_head *list)
+{
+	struct ntb_queue_entry *entry;
+	unsigned long flags;
+
+	spin_lock_irqsave(lock, flags);
+	if (list_empty(list)) {
+		entry = NULL;
+		goto out;
+	}
+	entry = list_first_entry(list, struct ntb_queue_entry, entry);
+	list_del(&entry->entry);
+
+out:
+	spin_unlock_irqrestore(lock, flags);
+
+	return entry;
+}
+
+static struct ntb_queue_entry *ntb_list_mv(spinlock_t *lock,
+					   struct list_head *list,
+					   struct list_head *to_list)
+{
+	struct ntb_queue_entry *entry;
+	unsigned long flags;
+
+	spin_lock_irqsave(lock, flags);
+
+	if (list_empty(list)) {
+		entry = NULL;
+	} else {
+		entry = list_first_entry(list, struct ntb_queue_entry, entry);
+		list_move_tail(&entry->entry, to_list);
+	}
+
+	spin_unlock_irqrestore(lock, flags);
+
+	return entry;
+}
+
+static int ntb_transport_setup_qp_mw(struct ntb_transport_ctx *nt,
+				     unsigned int qp_num)
+{
+	struct ntb_transport_qp *qp = &nt->qp_vec[qp_num];
+	struct ntb_transport_mw *mw;
+	struct ntb_dev *ndev = nt->ndev;
+	struct ntb_queue_entry *entry;
+	unsigned int rx_size, num_qps_mw;
+	unsigned int mw_num, mw_count, qp_count;
+	unsigned int i;
+	int node;
+
+	mw_count = nt->mw_count;
+	qp_count = nt->qp_count;
+
+	mw_num = QP_TO_MW(nt, qp_num);
+	mw = &nt->mw_vec[mw_num];
+
+	if (!mw->virt_addr)
+		return -ENOMEM;
+
+	if (mw_num < qp_count % mw_count)
+		num_qps_mw = qp_count / mw_count + 1;
+	else
+		num_qps_mw = qp_count / mw_count;
+
+	rx_size = (unsigned int)mw->xlat_size / num_qps_mw;
+	qp->rx_buff = mw->virt_addr + rx_size * (qp_num / mw_count);
+	rx_size -= sizeof(struct ntb_rx_info);
+
+	qp->remote_rx_info = qp->rx_buff + rx_size;
+
+	/* Due to housekeeping, there must be atleast 2 buffs */
+	qp->rx_max_frame = min(transport_mtu, rx_size / 2);
+	qp->rx_max_entry = rx_size / qp->rx_max_frame;
+	qp->rx_index = 0;
+
+	/*
+	 * Checking to see if we have more entries than the default.
+	 * We should add additional entries if that is the case so we
+	 * can be in sync with the transport frames.
+	 */
+	node = dev_to_node(&ndev->dev);
+	for (i = qp->rx_alloc_entry; i < qp->rx_max_entry; i++) {
+		entry = kzalloc_node(sizeof(*entry), GFP_ATOMIC, node);
+		if (!entry)
+			return -ENOMEM;
+
+		entry->qp = qp;
+		ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry,
+			     &qp->rx_free_q);
+		qp->rx_alloc_entry++;
+	}
+
+	qp->remote_rx_info->entry = qp->rx_max_entry - 1;
+
+	/* setup the hdr offsets with 0's */
+	for (i = 0; i < qp->rx_max_entry; i++) {
+		void *offset = (qp->rx_buff + qp->rx_max_frame * (i + 1) -
+				sizeof(struct ntb_payload_header));
+		memset(offset, 0, sizeof(struct ntb_payload_header));
+	}
+
+	qp->rx_pkts = 0;
+	qp->tx_pkts = 0;
+	qp->tx_index = 0;
+
+	return 0;
+}
+
+static void ntb_free_mw(struct ntb_transport_ctx *nt, int num_mw)
+{
+	struct ntb_transport_mw *mw = &nt->mw_vec[num_mw];
+	struct pci_dev *pdev = nt->ndev->pdev;
+
+	if (!mw->virt_addr)
+		return;
+
+	ntb_mw_clear_trans(nt->ndev, PIDX, num_mw);
+	dma_free_coherent(&pdev->dev, mw->buff_size,
+			  mw->virt_addr, mw->dma_addr);
+	mw->xlat_size = 0;
+	mw->buff_size = 0;
+	mw->virt_addr = NULL;
+}
+
+static int ntb_set_mw(struct ntb_transport_ctx *nt, int num_mw,
+		      resource_size_t size)
+{
+	struct ntb_transport_mw *mw = &nt->mw_vec[num_mw];
+	struct pci_dev *pdev = nt->ndev->pdev;
+	size_t xlat_size, buff_size;
+	int rc;
+
+	if (!size)
+		return -EINVAL;
+
+	xlat_size = round_up(size, mw->xlat_align_size);
+	buff_size = round_up(size, mw->xlat_align);
+
+	/* No need to re-setup */
+	if (mw->xlat_size == xlat_size)
+		return 0;
+
+	if (mw->buff_size)
+		ntb_free_mw(nt, num_mw);
+
+	/* Alloc memory for receiving data.  Must be aligned */
+	mw->xlat_size = xlat_size;
+	mw->buff_size = buff_size;
+
+	mw->virt_addr = dma_alloc_coherent(&pdev->dev, buff_size,
+					   &mw->dma_addr, GFP_KERNEL);
+	if (!mw->virt_addr) {
+		mw->xlat_size = 0;
+		mw->buff_size = 0;
+		dev_err(&pdev->dev, "Unable to alloc MW buff of size %zu\n",
+			buff_size);
+		return -ENOMEM;
+	}
+
+	/*
+	 * we must ensure that the memory address allocated is BAR size
+	 * aligned in order for the XLAT register to take the value. This
+	 * is a requirement of the hardware. It is recommended to setup CMA
+	 * for BAR sizes equal or greater than 4MB.
+	 */
+	if (!IS_ALIGNED(mw->dma_addr, mw->xlat_align)) {
+		dev_err(&pdev->dev, "DMA memory %pad is not aligned\n",
+			&mw->dma_addr);
+		ntb_free_mw(nt, num_mw);
+		return -ENOMEM;
+	}
+
+	/* Notify HW the memory location of the receive buffer */
+	rc = ntb_mw_set_trans(nt->ndev, PIDX, num_mw, mw->dma_addr,
+			      mw->xlat_size);
+	if (rc) {
+		dev_err(&pdev->dev, "Unable to set mw%d translation", num_mw);
+		ntb_free_mw(nt, num_mw);
+		return -EIO;
+	}
+
+	return 0;
+}
+
+static void ntb_qp_link_down_reset(struct ntb_transport_qp *qp)
+{
+	qp->link_is_up = false;
+	qp->active = false;
+
+	qp->tx_index = 0;
+	qp->rx_index = 0;
+	qp->rx_bytes = 0;
+	qp->rx_pkts = 0;
+	qp->rx_ring_empty = 0;
+	qp->rx_err_no_buf = 0;
+	qp->rx_err_oflow = 0;
+	qp->rx_err_ver = 0;
+	qp->rx_memcpy = 0;
+	qp->rx_async = 0;
+	qp->tx_bytes = 0;
+	qp->tx_pkts = 0;
+	qp->tx_ring_full = 0;
+	qp->tx_err_no_buf = 0;
+	qp->tx_memcpy = 0;
+	qp->tx_async = 0;
+}
+
+static void ntb_qp_link_cleanup(struct ntb_transport_qp *qp)
+{
+	struct ntb_transport_ctx *nt = qp->transport;
+	struct pci_dev *pdev = nt->ndev->pdev;
+
+	dev_info(&pdev->dev, "qp %d: Link Cleanup\n", qp->qp_num);
+
+	cancel_delayed_work_sync(&qp->link_work);
+	ntb_qp_link_down_reset(qp);
+
+	if (qp->event_handler)
+		qp->event_handler(qp->cb_data, qp->link_is_up);
+}
+
+static void ntb_qp_link_cleanup_work(struct work_struct *work)
+{
+	struct ntb_transport_qp *qp = container_of(work,
+						   struct ntb_transport_qp,
+						   link_cleanup);
+	struct ntb_transport_ctx *nt = qp->transport;
+
+	ntb_qp_link_cleanup(qp);
+
+	if (nt->link_is_up)
+		schedule_delayed_work(&qp->link_work,
+				      msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT));
+}
+
+static void ntb_qp_link_down(struct ntb_transport_qp *qp)
+{
+	schedule_work(&qp->link_cleanup);
+}
+
+static void ntb_transport_link_cleanup(struct ntb_transport_ctx *nt)
+{
+	struct ntb_transport_qp *qp;
+	u64 qp_bitmap_alloc;
+	unsigned int i, count;
+
+	qp_bitmap_alloc = nt->qp_bitmap & ~nt->qp_bitmap_free;
+
+	/* Pass along the info to any clients */
+	for (i = 0; i < nt->qp_count; i++)
+		if (qp_bitmap_alloc & BIT_ULL(i)) {
+			qp = &nt->qp_vec[i];
+			ntb_qp_link_cleanup(qp);
+			cancel_work_sync(&qp->link_cleanup);
+			cancel_delayed_work_sync(&qp->link_work);
+		}
+
+	if (!nt->link_is_up)
+		cancel_delayed_work_sync(&nt->link_work);
+
+	/* The scratchpad registers keep the values if the remote side
+	 * goes down, blast them now to give them a sane value the next
+	 * time they are accessed
+	 */
+	count = ntb_spad_count(nt->ndev);
+	for (i = 0; i < count; i++)
+		ntb_spad_write(nt->ndev, i, 0);
+}
+
+static void ntb_transport_link_cleanup_work(struct work_struct *work)
+{
+	struct ntb_transport_ctx *nt =
+		container_of(work, struct ntb_transport_ctx, link_cleanup);
+
+	ntb_transport_link_cleanup(nt);
+}
+
+static void ntb_transport_event_callback(void *data)
+{
+	struct ntb_transport_ctx *nt = data;
+
+	if (ntb_link_is_up(nt->ndev, NULL, NULL) == 1)
+		schedule_delayed_work(&nt->link_work, 0);
+	else
+		schedule_work(&nt->link_cleanup);
+}
+
+static void ntb_transport_link_work(struct work_struct *work)
+{
+	struct ntb_transport_ctx *nt =
+		container_of(work, struct ntb_transport_ctx, link_work.work);
+	struct ntb_dev *ndev = nt->ndev;
+	struct pci_dev *pdev = ndev->pdev;
+	resource_size_t size;
+	u32 val;
+	int rc = 0, i, spad;
+
+	/* send the local info, in the opposite order of the way we read it */
+	for (i = 0; i < nt->mw_count; i++) {
+		size = nt->mw_vec[i].phys_size;
+
+		if (max_mw_size && size > max_mw_size)
+			size = max_mw_size;
+
+		spad = MW0_SZ_HIGH + (i * 2);
+		ntb_peer_spad_write(ndev, PIDX, spad, upper_32_bits(size));
+
+		spad = MW0_SZ_LOW + (i * 2);
+		ntb_peer_spad_write(ndev, PIDX, spad, lower_32_bits(size));
+	}
+
+	ntb_peer_spad_write(ndev, PIDX, NUM_MWS, nt->mw_count);
+
+	ntb_peer_spad_write(ndev, PIDX, NUM_QPS, nt->qp_count);
+
+	ntb_peer_spad_write(ndev, PIDX, VERSION, NTB_TRANSPORT_VERSION);
+
+	/* Query the remote side for its info */
+	val = ntb_spad_read(ndev, VERSION);
+	dev_dbg(&pdev->dev, "Remote version = %d\n", val);
+	if (val != NTB_TRANSPORT_VERSION)
+		goto out;
+
+	val = ntb_spad_read(ndev, NUM_QPS);
+	dev_dbg(&pdev->dev, "Remote max number of qps = %d\n", val);
+	if (val != nt->qp_count)
+		goto out;
+
+	val = ntb_spad_read(ndev, NUM_MWS);
+	dev_dbg(&pdev->dev, "Remote number of mws = %d\n", val);
+	if (val != nt->mw_count)
+		goto out;
+
+	for (i = 0; i < nt->mw_count; i++) {
+		u64 val64;
+
+		val = ntb_spad_read(ndev, MW0_SZ_HIGH + (i * 2));
+		val64 = (u64)val << 32;
+
+		val = ntb_spad_read(ndev, MW0_SZ_LOW + (i * 2));
+		val64 |= val;
+
+		dev_dbg(&pdev->dev, "Remote MW%d size = %#llx\n", i, val64);
+
+		rc = ntb_set_mw(nt, i, val64);
+		if (rc)
+			goto out1;
+	}
+
+	nt->link_is_up = true;
+
+	for (i = 0; i < nt->qp_count; i++) {
+		struct ntb_transport_qp *qp = &nt->qp_vec[i];
+
+		ntb_transport_setup_qp_mw(nt, i);
+
+		if (qp->client_ready)
+			schedule_delayed_work(&qp->link_work, 0);
+	}
+
+	return;
+
+out1:
+	for (i = 0; i < nt->mw_count; i++)
+		ntb_free_mw(nt, i);
+
+	/* if there's an actual failure, we should just bail */
+	if (rc < 0)
+		return;
+
+out:
+	if (ntb_link_is_up(ndev, NULL, NULL) == 1)
+		schedule_delayed_work(&nt->link_work,
+				      msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT));
+}
+
+static void ntb_qp_link_work(struct work_struct *work)
+{
+	struct ntb_transport_qp *qp = container_of(work,
+						   struct ntb_transport_qp,
+						   link_work.work);
+	struct pci_dev *pdev = qp->ndev->pdev;
+	struct ntb_transport_ctx *nt = qp->transport;
+	int val;
+
+	WARN_ON(!nt->link_is_up);
+
+	val = ntb_spad_read(nt->ndev, QP_LINKS);
+
+	ntb_peer_spad_write(nt->ndev, PIDX, QP_LINKS, val | BIT(qp->qp_num));
+
+	/* query remote spad for qp ready bits */
+	dev_dbg_ratelimited(&pdev->dev, "Remote QP link status = %x\n", val);
+
+	/* See if the remote side is up */
+	if (val & BIT(qp->qp_num)) {
+		dev_info(&pdev->dev, "qp %d: Link Up\n", qp->qp_num);
+		qp->link_is_up = true;
+		qp->active = true;
+
+		if (qp->event_handler)
+			qp->event_handler(qp->cb_data, qp->link_is_up);
+
+		if (qp->active)
+			tasklet_schedule(&qp->rxc_db_work);
+	} else if (nt->link_is_up)
+		schedule_delayed_work(&qp->link_work,
+				      msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT));
+}
+
+static int ntb_transport_init_queue(struct ntb_transport_ctx *nt,
+				    unsigned int qp_num)
+{
+	struct ntb_transport_qp *qp;
+	phys_addr_t mw_base;
+	resource_size_t mw_size;
+	unsigned int num_qps_mw, tx_size;
+	unsigned int mw_num, mw_count, qp_count;
+	u64 qp_offset;
+
+	mw_count = nt->mw_count;
+	qp_count = nt->qp_count;
+
+	mw_num = QP_TO_MW(nt, qp_num);
+
+	qp = &nt->qp_vec[qp_num];
+	qp->qp_num = qp_num;
+	qp->transport = nt;
+	qp->ndev = nt->ndev;
+	qp->client_ready = false;
+	qp->event_handler = NULL;
+	ntb_qp_link_down_reset(qp);
+
+	if (mw_num < qp_count % mw_count)
+		num_qps_mw = qp_count / mw_count + 1;
+	else
+		num_qps_mw = qp_count / mw_count;
+
+	mw_base = nt->mw_vec[mw_num].phys_addr;
+	mw_size = nt->mw_vec[mw_num].phys_size;
+
+	if (max_mw_size && mw_size > max_mw_size)
+		mw_size = max_mw_size;
+
+	tx_size = (unsigned int)mw_size / num_qps_mw;
+	qp_offset = tx_size * (qp_num / mw_count);
+
+	qp->tx_mw = nt->mw_vec[mw_num].vbase + qp_offset;
+	if (!qp->tx_mw)
+		return -EINVAL;
+
+	qp->tx_mw_phys = mw_base + qp_offset;
+	if (!qp->tx_mw_phys)
+		return -EINVAL;
+
+	tx_size -= sizeof(struct ntb_rx_info);
+	qp->rx_info = qp->tx_mw + tx_size;
+
+	/* Due to housekeeping, there must be atleast 2 buffs */
+	qp->tx_max_frame = min(transport_mtu, tx_size / 2);
+	qp->tx_max_entry = tx_size / qp->tx_max_frame;
+
+	if (nt->debugfs_node_dir) {
+		char debugfs_name[4];
+
+		snprintf(debugfs_name, 4, "qp%d", qp_num);
+		qp->debugfs_dir = debugfs_create_dir(debugfs_name,
+						     nt->debugfs_node_dir);
+
+		qp->debugfs_stats = debugfs_create_file("stats", S_IRUSR,
+							qp->debugfs_dir, qp,
+							&ntb_qp_debugfs_stats);
+	} else {
+		qp->debugfs_dir = NULL;
+		qp->debugfs_stats = NULL;
+	}
+
+	INIT_DELAYED_WORK(&qp->link_work, ntb_qp_link_work);
+	INIT_WORK(&qp->link_cleanup, ntb_qp_link_cleanup_work);
+
+	spin_lock_init(&qp->ntb_rx_q_lock);
+	spin_lock_init(&qp->ntb_tx_free_q_lock);
+
+	INIT_LIST_HEAD(&qp->rx_post_q);
+	INIT_LIST_HEAD(&qp->rx_pend_q);
+	INIT_LIST_HEAD(&qp->rx_free_q);
+	INIT_LIST_HEAD(&qp->tx_free_q);
+
+	tasklet_init(&qp->rxc_db_work, ntb_transport_rxc_db,
+		     (unsigned long)qp);
+
+	return 0;
+}
+
+static int ntb_transport_probe(struct ntb_client *self, struct ntb_dev *ndev)
+{
+	struct ntb_transport_ctx *nt;
+	struct ntb_transport_mw *mw;
+	unsigned int mw_count, qp_count, spad_count, max_mw_count_for_spads;
+	u64 qp_bitmap;
+	int node;
+	int rc, i;
+
+	mw_count = ntb_peer_mw_count(ndev);
+
+	if (!ndev->ops->mw_set_trans) {
+		dev_err(&ndev->dev, "Inbound MW based NTB API is required\n");
+		return -EINVAL;
+	}
+
+	if (ntb_db_is_unsafe(ndev))
+		dev_dbg(&ndev->dev,
+			"doorbell is unsafe, proceed anyway...\n");
+	if (ntb_spad_is_unsafe(ndev))
+		dev_dbg(&ndev->dev,
+			"scratchpad is unsafe, proceed anyway...\n");
+
+	if (ntb_peer_port_count(ndev) != NTB_DEF_PEER_CNT)
+		dev_warn(&ndev->dev, "Multi-port NTB devices unsupported\n");
+
+	node = dev_to_node(&ndev->dev);
+
+	nt = kzalloc_node(sizeof(*nt), GFP_KERNEL, node);
+	if (!nt)
+		return -ENOMEM;
+
+	nt->ndev = ndev;
+	spad_count = ntb_spad_count(ndev);
+
+	/* Limit the MW's based on the availability of scratchpads */
+
+	if (spad_count < NTB_TRANSPORT_MIN_SPADS) {
+		nt->mw_count = 0;
+		rc = -EINVAL;
+		goto err;
+	}
+
+	max_mw_count_for_spads = (spad_count - MW0_SZ_HIGH) / 2;
+	nt->mw_count = min(mw_count, max_mw_count_for_spads);
+
+	nt->mw_vec = kzalloc_node(mw_count * sizeof(*nt->mw_vec),
+				  GFP_KERNEL, node);
+	if (!nt->mw_vec) {
+		rc = -ENOMEM;
+		goto err;
+	}
+
+	for (i = 0; i < mw_count; i++) {
+		mw = &nt->mw_vec[i];
+
+		rc = ntb_mw_get_align(ndev, PIDX, i, &mw->xlat_align,
+				      &mw->xlat_align_size, NULL);
+		if (rc)
+			goto err1;
+
+		rc = ntb_peer_mw_get_addr(ndev, i, &mw->phys_addr,
+					  &mw->phys_size);
+		if (rc)
+			goto err1;
+
+		mw->vbase = ioremap_wc(mw->phys_addr, mw->phys_size);
+		if (!mw->vbase) {
+			rc = -ENOMEM;
+			goto err1;
+		}
+
+		mw->buff_size = 0;
+		mw->xlat_size = 0;
+		mw->virt_addr = NULL;
+		mw->dma_addr = 0;
+	}
+
+	qp_bitmap = ntb_db_valid_mask(ndev);
+
+	qp_count = ilog2(qp_bitmap);
+	if (max_num_clients && max_num_clients < qp_count)
+		qp_count = max_num_clients;
+	else if (nt->mw_count < qp_count)
+		qp_count = nt->mw_count;
+
+	qp_bitmap &= BIT_ULL(qp_count) - 1;
+
+	nt->qp_count = qp_count;
+	nt->qp_bitmap = qp_bitmap;
+	nt->qp_bitmap_free = qp_bitmap;
+
+	nt->qp_vec = kzalloc_node(qp_count * sizeof(*nt->qp_vec),
+				  GFP_KERNEL, node);
+	if (!nt->qp_vec) {
+		rc = -ENOMEM;
+		goto err1;
+	}
+
+	if (nt_debugfs_dir) {
+		nt->debugfs_node_dir =
+			debugfs_create_dir(pci_name(ndev->pdev),
+					   nt_debugfs_dir);
+	}
+
+	for (i = 0; i < qp_count; i++) {
+		rc = ntb_transport_init_queue(nt, i);
+		if (rc)
+			goto err2;
+	}
+
+	INIT_DELAYED_WORK(&nt->link_work, ntb_transport_link_work);
+	INIT_WORK(&nt->link_cleanup, ntb_transport_link_cleanup_work);
+
+	rc = ntb_set_ctx(ndev, nt, &ntb_transport_ops);
+	if (rc)
+		goto err2;
+
+	INIT_LIST_HEAD(&nt->client_devs);
+	rc = ntb_bus_init(nt);
+	if (rc)
+		goto err3;
+
+	nt->link_is_up = false;
+	ntb_link_enable(ndev, NTB_SPEED_AUTO, NTB_WIDTH_AUTO);
+	ntb_link_event(ndev);
+
+	return 0;
+
+err3:
+	ntb_clear_ctx(ndev);
+err2:
+	kfree(nt->qp_vec);
+err1:
+	while (i--) {
+		mw = &nt->mw_vec[i];
+		iounmap(mw->vbase);
+	}
+	kfree(nt->mw_vec);
+err:
+	kfree(nt);
+	return rc;
+}
+
+static void ntb_transport_free(struct ntb_client *self, struct ntb_dev *ndev)
+{
+	struct ntb_transport_ctx *nt = ndev->ctx;
+	struct ntb_transport_qp *qp;
+	u64 qp_bitmap_alloc;
+	int i;
+
+	ntb_transport_link_cleanup(nt);
+	cancel_work_sync(&nt->link_cleanup);
+	cancel_delayed_work_sync(&nt->link_work);
+
+	qp_bitmap_alloc = nt->qp_bitmap & ~nt->qp_bitmap_free;
+
+	/* verify that all the qp's are freed */
+	for (i = 0; i < nt->qp_count; i++) {
+		qp = &nt->qp_vec[i];
+		if (qp_bitmap_alloc & BIT_ULL(i))
+			ntb_transport_free_queue(qp);
+		debugfs_remove_recursive(qp->debugfs_dir);
+	}
+
+	ntb_link_disable(ndev);
+	ntb_clear_ctx(ndev);
+
+	ntb_bus_remove(nt);
+
+	for (i = nt->mw_count; i--; ) {
+		ntb_free_mw(nt, i);
+		iounmap(nt->mw_vec[i].vbase);
+	}
+
+	kfree(nt->qp_vec);
+	kfree(nt->mw_vec);
+	kfree(nt);
+}
+
+static void ntb_complete_rxc(struct ntb_transport_qp *qp)
+{
+	struct ntb_queue_entry *entry;
+	void *cb_data;
+	unsigned int len;
+	unsigned long irqflags;
+
+	spin_lock_irqsave(&qp->ntb_rx_q_lock, irqflags);
+
+	while (!list_empty(&qp->rx_post_q)) {
+		entry = list_first_entry(&qp->rx_post_q,
+					 struct ntb_queue_entry, entry);
+		if (!(entry->flags & DESC_DONE_FLAG))
+			break;
+
+		entry->rx_hdr->flags = 0;
+		iowrite32(entry->rx_index, &qp->rx_info->entry);
+
+		cb_data = entry->cb_data;
+		len = entry->len;
+
+		list_move_tail(&entry->entry, &qp->rx_free_q);
+
+		spin_unlock_irqrestore(&qp->ntb_rx_q_lock, irqflags);
+
+		if (qp->rx_handler && qp->client_ready)
+			qp->rx_handler(qp, qp->cb_data, cb_data, len);
+
+		spin_lock_irqsave(&qp->ntb_rx_q_lock, irqflags);
+	}
+
+	spin_unlock_irqrestore(&qp->ntb_rx_q_lock, irqflags);
+}
+
+static void ntb_rx_copy_callback(void *data,
+				 const struct dmaengine_result *res)
+{
+	struct ntb_queue_entry *entry = data;
+
+	/* we need to check DMA results if we are using DMA */
+	if (res) {
+		enum dmaengine_tx_result dma_err = res->result;
+
+		switch (dma_err) {
+		case DMA_TRANS_READ_FAILED:
+		case DMA_TRANS_WRITE_FAILED:
+			entry->errors++;
+		case DMA_TRANS_ABORTED:
+		{
+			struct ntb_transport_qp *qp = entry->qp;
+			void *offset = qp->rx_buff + qp->rx_max_frame *
+					qp->rx_index;
+
+			ntb_memcpy_rx(entry, offset);
+			qp->rx_memcpy++;
+			return;
+		}
+
+		case DMA_TRANS_NOERROR:
+		default:
+			break;
+		}
+	}
+
+	entry->flags |= DESC_DONE_FLAG;
+
+	ntb_complete_rxc(entry->qp);
+}
+
+static void ntb_memcpy_rx(struct ntb_queue_entry *entry, void *offset)
+{
+	void *buf = entry->buf;
+	size_t len = entry->len;
+
+	memcpy(buf, offset, len);
+
+	/* Ensure that the data is fully copied out before clearing the flag */
+	wmb();
+
+	ntb_rx_copy_callback(entry, NULL);
+}
+
+static int ntb_async_rx_submit(struct ntb_queue_entry *entry, void *offset)
+{
+	struct dma_async_tx_descriptor *txd;
+	struct ntb_transport_qp *qp = entry->qp;
+	struct dma_chan *chan = qp->rx_dma_chan;
+	struct dma_device *device;
+	size_t pay_off, buff_off, len;
+	struct dmaengine_unmap_data *unmap;
+	dma_cookie_t cookie;
+	void *buf = entry->buf;
+
+	len = entry->len;
+	device = chan->device;
+	pay_off = (size_t)offset & ~PAGE_MASK;
+	buff_off = (size_t)buf & ~PAGE_MASK;
+
+	if (!is_dma_copy_aligned(device, pay_off, buff_off, len))
+		goto err;
+
+	unmap = dmaengine_get_unmap_data(device->dev, 2, GFP_NOWAIT);
+	if (!unmap)
+		goto err;
+
+	unmap->len = len;
+	unmap->addr[0] = dma_map_page(device->dev, virt_to_page(offset),
+				      pay_off, len, DMA_TO_DEVICE);
+	if (dma_mapping_error(device->dev, unmap->addr[0]))
+		goto err_get_unmap;
+
+	unmap->to_cnt = 1;
+
+	unmap->addr[1] = dma_map_page(device->dev, virt_to_page(buf),
+				      buff_off, len, DMA_FROM_DEVICE);
+	if (dma_mapping_error(device->dev, unmap->addr[1]))
+		goto err_get_unmap;
+
+	unmap->from_cnt = 1;
+
+	txd = device->device_prep_dma_memcpy(chan, unmap->addr[1],
+					     unmap->addr[0], len,
+					     DMA_PREP_INTERRUPT);
+	if (!txd)
+		goto err_get_unmap;
+
+	txd->callback_result = ntb_rx_copy_callback;
+	txd->callback_param = entry;
+	dma_set_unmap(txd, unmap);
+
+	cookie = dmaengine_submit(txd);
+	if (dma_submit_error(cookie))
+		goto err_set_unmap;
+
+	dmaengine_unmap_put(unmap);
+
+	qp->last_cookie = cookie;
+
+	qp->rx_async++;
+
+	return 0;
+
+err_set_unmap:
+	dmaengine_unmap_put(unmap);
+err_get_unmap:
+	dmaengine_unmap_put(unmap);
+err:
+	return -ENXIO;
+}
+
+static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset)
+{
+	struct ntb_transport_qp *qp = entry->qp;
+	struct dma_chan *chan = qp->rx_dma_chan;
+	int res;
+
+	if (!chan)
+		goto err;
+
+	if (entry->len < copy_bytes)
+		goto err;
+
+	res = ntb_async_rx_submit(entry, offset);
+	if (res < 0)
+		goto err;
+
+	if (!entry->retries)
+		qp->rx_async++;
+
+	return;
+
+err:
+	ntb_memcpy_rx(entry, offset);
+	qp->rx_memcpy++;
+}
+
+static int ntb_process_rxc(struct ntb_transport_qp *qp)
+{
+	struct ntb_payload_header *hdr;
+	struct ntb_queue_entry *entry;
+	void *offset;
+
+	offset = qp->rx_buff + qp->rx_max_frame * qp->rx_index;
+	hdr = offset + qp->rx_max_frame - sizeof(struct ntb_payload_header);
+
+	dev_dbg(&qp->ndev->pdev->dev, "qp %d: RX ver %u len %d flags %x\n",
+		qp->qp_num, hdr->ver, hdr->len, hdr->flags);
+
+	if (!(hdr->flags & DESC_DONE_FLAG)) {
+		dev_dbg(&qp->ndev->pdev->dev, "done flag not set\n");
+		qp->rx_ring_empty++;
+		return -EAGAIN;
+	}
+
+	if (hdr->flags & LINK_DOWN_FLAG) {
+		dev_dbg(&qp->ndev->pdev->dev, "link down flag set\n");
+		ntb_qp_link_down(qp);
+		hdr->flags = 0;
+		return -EAGAIN;
+	}
+
+	if (hdr->ver != (u32)qp->rx_pkts) {
+		dev_dbg(&qp->ndev->pdev->dev,
+			"version mismatch, expected %llu - got %u\n",
+			qp->rx_pkts, hdr->ver);
+		qp->rx_err_ver++;
+		return -EIO;
+	}
+
+	entry = ntb_list_mv(&qp->ntb_rx_q_lock, &qp->rx_pend_q, &qp->rx_post_q);
+	if (!entry) {
+		dev_dbg(&qp->ndev->pdev->dev, "no receive buffer\n");
+		qp->rx_err_no_buf++;
+		return -EAGAIN;
+	}
+
+	entry->rx_hdr = hdr;
+	entry->rx_index = qp->rx_index;
+
+	if (hdr->len > entry->len) {
+		dev_dbg(&qp->ndev->pdev->dev,
+			"receive buffer overflow! Wanted %d got %d\n",
+			hdr->len, entry->len);
+		qp->rx_err_oflow++;
+
+		entry->len = -EIO;
+		entry->flags |= DESC_DONE_FLAG;
+
+		ntb_complete_rxc(qp);
+	} else {
+		dev_dbg(&qp->ndev->pdev->dev,
+			"RX OK index %u ver %u size %d into buf size %d\n",
+			qp->rx_index, hdr->ver, hdr->len, entry->len);
+
+		qp->rx_bytes += hdr->len;
+		qp->rx_pkts++;
+
+		entry->len = hdr->len;
+
+		ntb_async_rx(entry, offset);
+	}
+
+	qp->rx_index++;
+	qp->rx_index %= qp->rx_max_entry;
+
+	return 0;
+}
+
+static void ntb_transport_rxc_db(unsigned long data)
+{
+	struct ntb_transport_qp *qp = (void *)data;
+	int rc, i;
+
+	dev_dbg(&qp->ndev->pdev->dev, "%s: doorbell %d received\n",
+		__func__, qp->qp_num);
+
+	/* Limit the number of packets processed in a single interrupt to
+	 * provide fairness to others
+	 */
+	for (i = 0; i < qp->rx_max_entry; i++) {
+		rc = ntb_process_rxc(qp);
+		if (rc)
+			break;
+	}
+
+	if (i && qp->rx_dma_chan)
+		dma_async_issue_pending(qp->rx_dma_chan);
+
+	if (i == qp->rx_max_entry) {
+		/* there is more work to do */
+		if (qp->active)
+			tasklet_schedule(&qp->rxc_db_work);
+	} else if (ntb_db_read(qp->ndev) & BIT_ULL(qp->qp_num)) {
+		/* the doorbell bit is set: clear it */
+		ntb_db_clear(qp->ndev, BIT_ULL(qp->qp_num));
+		/* ntb_db_read ensures ntb_db_clear write is committed */
+		ntb_db_read(qp->ndev);
+
+		/* an interrupt may have arrived between finishing
+		 * ntb_process_rxc and clearing the doorbell bit:
+		 * there might be some more work to do.
+		 */
+		if (qp->active)
+			tasklet_schedule(&qp->rxc_db_work);
+	}
+}
+
+static void ntb_tx_copy_callback(void *data,
+				 const struct dmaengine_result *res)
+{
+	struct ntb_queue_entry *entry = data;
+	struct ntb_transport_qp *qp = entry->qp;
+	struct ntb_payload_header __iomem *hdr = entry->tx_hdr;
+
+	/* we need to check DMA results if we are using DMA */
+	if (res) {
+		enum dmaengine_tx_result dma_err = res->result;
+
+		switch (dma_err) {
+		case DMA_TRANS_READ_FAILED:
+		case DMA_TRANS_WRITE_FAILED:
+			entry->errors++;
+		case DMA_TRANS_ABORTED:
+		{
+			void __iomem *offset =
+				qp->tx_mw + qp->tx_max_frame *
+				entry->tx_index;
+
+			/* resubmit via CPU */
+			ntb_memcpy_tx(entry, offset);
+			qp->tx_memcpy++;
+			return;
+		}
+
+		case DMA_TRANS_NOERROR:
+		default:
+			break;
+		}
+	}
+
+	iowrite32(entry->flags | DESC_DONE_FLAG, &hdr->flags);
+
+	ntb_peer_db_set(qp->ndev, BIT_ULL(qp->qp_num));
+
+	/* The entry length can only be zero if the packet is intended to be a
+	 * "link down" or similar.  Since no payload is being sent in these
+	 * cases, there is nothing to add to the completion queue.
+	 */
+	if (entry->len > 0) {
+		qp->tx_bytes += entry->len;
+
+		if (qp->tx_handler)
+			qp->tx_handler(qp, qp->cb_data, entry->cb_data,
+				       entry->len);
+	}
+
+	ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry, &qp->tx_free_q);
+}
+
+static void ntb_memcpy_tx(struct ntb_queue_entry *entry, void __iomem *offset)
+{
+#ifdef ARCH_HAS_NOCACHE_UACCESS
+	/*
+	 * Using non-temporal mov to improve performance on non-cached
+	 * writes, even though we aren't actually copying from user space.
+	 */
+	__copy_from_user_inatomic_nocache(offset, entry->buf, entry->len);
+#else
+	memcpy_toio(offset, entry->buf, entry->len);
+#endif
+
+	/* Ensure that the data is fully copied out before setting the flags */
+	wmb();
+
+	ntb_tx_copy_callback(entry, NULL);
+}
+
+static int ntb_async_tx_submit(struct ntb_transport_qp *qp,
+			       struct ntb_queue_entry *entry)
+{
+	struct dma_async_tx_descriptor *txd;
+	struct dma_chan *chan = qp->tx_dma_chan;
+	struct dma_device *device;
+	size_t len = entry->len;
+	void *buf = entry->buf;
+	size_t dest_off, buff_off;
+	struct dmaengine_unmap_data *unmap;
+	dma_addr_t dest;
+	dma_cookie_t cookie;
+
+	device = chan->device;
+	dest = qp->tx_mw_phys + qp->tx_max_frame * entry->tx_index;
+	buff_off = (size_t)buf & ~PAGE_MASK;
+	dest_off = (size_t)dest & ~PAGE_MASK;
+
+	if (!is_dma_copy_aligned(device, buff_off, dest_off, len))
+		goto err;
+
+	unmap = dmaengine_get_unmap_data(device->dev, 1, GFP_NOWAIT);
+	if (!unmap)
+		goto err;
+
+	unmap->len = len;
+	unmap->addr[0] = dma_map_page(device->dev, virt_to_page(buf),
+				      buff_off, len, DMA_TO_DEVICE);
+	if (dma_mapping_error(device->dev, unmap->addr[0]))
+		goto err_get_unmap;
+
+	unmap->to_cnt = 1;
+
+	txd = device->device_prep_dma_memcpy(chan, dest, unmap->addr[0], len,
+					     DMA_PREP_INTERRUPT);
+	if (!txd)
+		goto err_get_unmap;
+
+	txd->callback_result = ntb_tx_copy_callback;
+	txd->callback_param = entry;
+	dma_set_unmap(txd, unmap);
+
+	cookie = dmaengine_submit(txd);
+	if (dma_submit_error(cookie))
+		goto err_set_unmap;
+
+	dmaengine_unmap_put(unmap);
+
+	dma_async_issue_pending(chan);
+
+	return 0;
+err_set_unmap:
+	dmaengine_unmap_put(unmap);
+err_get_unmap:
+	dmaengine_unmap_put(unmap);
+err:
+	return -ENXIO;
+}
+
+static void ntb_async_tx(struct ntb_transport_qp *qp,
+			 struct ntb_queue_entry *entry)
+{
+	struct ntb_payload_header __iomem *hdr;
+	struct dma_chan *chan = qp->tx_dma_chan;
+	void __iomem *offset;
+	int res;
+
+	entry->tx_index = qp->tx_index;
+	offset = qp->tx_mw + qp->tx_max_frame * entry->tx_index;
+	hdr = offset + qp->tx_max_frame - sizeof(struct ntb_payload_header);
+	entry->tx_hdr = hdr;
+
+	iowrite32(entry->len, &hdr->len);
+	iowrite32((u32)qp->tx_pkts, &hdr->ver);
+
+	if (!chan)
+		goto err;
+
+	if (entry->len < copy_bytes)
+		goto err;
+
+	res = ntb_async_tx_submit(qp, entry);
+	if (res < 0)
+		goto err;
+
+	if (!entry->retries)
+		qp->tx_async++;
+
+	return;
+
+err:
+	ntb_memcpy_tx(entry, offset);
+	qp->tx_memcpy++;
+}
+
+static int ntb_process_tx(struct ntb_transport_qp *qp,
+			  struct ntb_queue_entry *entry)
+{
+	if (qp->tx_index == qp->remote_rx_info->entry) {
+		qp->tx_ring_full++;
+		return -EAGAIN;
+	}
+
+	if (entry->len > qp->tx_max_frame - sizeof(struct ntb_payload_header)) {
+		if (qp->tx_handler)
+			qp->tx_handler(qp, qp->cb_data, NULL, -EIO);
+
+		ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry,
+			     &qp->tx_free_q);
+		return 0;
+	}
+
+	ntb_async_tx(qp, entry);
+
+	qp->tx_index++;
+	qp->tx_index %= qp->tx_max_entry;
+
+	qp->tx_pkts++;
+
+	return 0;
+}
+
+static void ntb_send_link_down(struct ntb_transport_qp *qp)
+{
+	struct pci_dev *pdev = qp->ndev->pdev;
+	struct ntb_queue_entry *entry;
+	int i, rc;
+
+	if (!qp->link_is_up)
+		return;
+
+	dev_info(&pdev->dev, "qp %d: Send Link Down\n", qp->qp_num);
+
+	for (i = 0; i < NTB_LINK_DOWN_TIMEOUT; i++) {
+		entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q);
+		if (entry)
+			break;
+		msleep(100);
+	}
+
+	if (!entry)
+		return;
+
+	entry->cb_data = NULL;
+	entry->buf = NULL;
+	entry->len = 0;
+	entry->flags = LINK_DOWN_FLAG;
+
+	rc = ntb_process_tx(qp, entry);
+	if (rc)
+		dev_err(&pdev->dev, "ntb: QP%d unable to send linkdown msg\n",
+			qp->qp_num);
+
+	ntb_qp_link_down_reset(qp);
+}
+
+static bool ntb_dma_filter_fn(struct dma_chan *chan, void *node)
+{
+	return dev_to_node(&chan->dev->device) == (int)(unsigned long)node;
+}
+
+/**
+ * ntb_transport_create_queue - Create a new NTB transport layer queue
+ * @rx_handler: receive callback function
+ * @tx_handler: transmit callback function
+ * @event_handler: event callback function
+ *
+ * Create a new NTB transport layer queue and provide the queue with a callback
+ * routine for both transmit and receive.  The receive callback routine will be
+ * used to pass up data when the transport has received it on the queue.   The
+ * transmit callback routine will be called when the transport has completed the
+ * transmission of the data on the queue and the data is ready to be freed.
+ *
+ * RETURNS: pointer to newly created ntb_queue, NULL on error.
+ */
+struct ntb_transport_qp *
+ntb_transport_create_queue(void *data, struct device *client_dev,
+			   const struct ntb_queue_handlers *handlers)
+{
+	struct ntb_dev *ndev;
+	struct pci_dev *pdev;
+	struct ntb_transport_ctx *nt;
+	struct ntb_queue_entry *entry;
+	struct ntb_transport_qp *qp;
+	u64 qp_bit;
+	unsigned int free_queue;
+	dma_cap_mask_t dma_mask;
+	int node;
+	int i;
+
+	ndev = dev_ntb(client_dev->parent);
+	pdev = ndev->pdev;
+	nt = ndev->ctx;
+
+	node = dev_to_node(&ndev->dev);
+
+	free_queue = ffs(nt->qp_bitmap_free);
+	if (!free_queue)
+		goto err;
+
+	/* decrement free_queue to make it zero based */
+	free_queue--;
+
+	qp = &nt->qp_vec[free_queue];
+	qp_bit = BIT_ULL(qp->qp_num);
+
+	nt->qp_bitmap_free &= ~qp_bit;
+
+	qp->cb_data = data;
+	qp->rx_handler = handlers->rx_handler;
+	qp->tx_handler = handlers->tx_handler;
+	qp->event_handler = handlers->event_handler;
+
+	dma_cap_zero(dma_mask);
+	dma_cap_set(DMA_MEMCPY, dma_mask);
+
+	if (use_dma) {
+		qp->tx_dma_chan =
+			dma_request_channel(dma_mask, ntb_dma_filter_fn,
+					    (void *)(unsigned long)node);
+		if (!qp->tx_dma_chan)
+			dev_info(&pdev->dev, "Unable to allocate TX DMA channel\n");
+
+		qp->rx_dma_chan =
+			dma_request_channel(dma_mask, ntb_dma_filter_fn,
+					    (void *)(unsigned long)node);
+		if (!qp->rx_dma_chan)
+			dev_info(&pdev->dev, "Unable to allocate RX DMA channel\n");
+	} else {
+		qp->tx_dma_chan = NULL;
+		qp->rx_dma_chan = NULL;
+	}
+
+	dev_dbg(&pdev->dev, "Using %s memcpy for TX\n",
+		qp->tx_dma_chan ? "DMA" : "CPU");
+
+	dev_dbg(&pdev->dev, "Using %s memcpy for RX\n",
+		qp->rx_dma_chan ? "DMA" : "CPU");
+
+	for (i = 0; i < NTB_QP_DEF_NUM_ENTRIES; i++) {
+		entry = kzalloc_node(sizeof(*entry), GFP_ATOMIC, node);
+		if (!entry)
+			goto err1;
+
+		entry->qp = qp;
+		ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry,
+			     &qp->rx_free_q);
+	}
+	qp->rx_alloc_entry = NTB_QP_DEF_NUM_ENTRIES;
+
+	for (i = 0; i < qp->tx_max_entry; i++) {
+		entry = kzalloc_node(sizeof(*entry), GFP_ATOMIC, node);
+		if (!entry)
+			goto err2;
+
+		entry->qp = qp;
+		ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry,
+			     &qp->tx_free_q);
+	}
+
+	ntb_db_clear(qp->ndev, qp_bit);
+	ntb_db_clear_mask(qp->ndev, qp_bit);
+
+	dev_info(&pdev->dev, "NTB Transport QP %d created\n", qp->qp_num);
+
+	return qp;
+
+err2:
+	while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q)))
+		kfree(entry);
+err1:
+	qp->rx_alloc_entry = 0;
+	while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q)))
+		kfree(entry);
+	if (qp->tx_dma_chan)
+		dma_release_channel(qp->tx_dma_chan);
+	if (qp->rx_dma_chan)
+		dma_release_channel(qp->rx_dma_chan);
+	nt->qp_bitmap_free |= qp_bit;
+err:
+	return NULL;
+}
+EXPORT_SYMBOL_GPL(ntb_transport_create_queue);
+
+/**
+ * ntb_transport_free_queue - Frees NTB transport queue
+ * @qp: NTB queue to be freed
+ *
+ * Frees NTB transport queue
+ */
+void ntb_transport_free_queue(struct ntb_transport_qp *qp)
+{
+	struct pci_dev *pdev;
+	struct ntb_queue_entry *entry;
+	u64 qp_bit;
+
+	if (!qp)
+		return;
+
+	pdev = qp->ndev->pdev;
+
+	qp->active = false;
+
+	if (qp->tx_dma_chan) {
+		struct dma_chan *chan = qp->tx_dma_chan;
+		/* Putting the dma_chan to NULL will force any new traffic to be
+		 * processed by the CPU instead of the DAM engine
+		 */
+		qp->tx_dma_chan = NULL;
+
+		/* Try to be nice and wait for any queued DMA engine
+		 * transactions to process before smashing it with a rock
+		 */
+		dma_sync_wait(chan, qp->last_cookie);
+		dmaengine_terminate_all(chan);
+		dma_release_channel(chan);
+	}
+
+	if (qp->rx_dma_chan) {
+		struct dma_chan *chan = qp->rx_dma_chan;
+		/* Putting the dma_chan to NULL will force any new traffic to be
+		 * processed by the CPU instead of the DAM engine
+		 */
+		qp->rx_dma_chan = NULL;
+
+		/* Try to be nice and wait for any queued DMA engine
+		 * transactions to process before smashing it with a rock
+		 */
+		dma_sync_wait(chan, qp->last_cookie);
+		dmaengine_terminate_all(chan);
+		dma_release_channel(chan);
+	}
+
+	qp_bit = BIT_ULL(qp->qp_num);
+
+	ntb_db_set_mask(qp->ndev, qp_bit);
+	tasklet_kill(&qp->rxc_db_work);
+
+	cancel_delayed_work_sync(&qp->link_work);
+
+	qp->cb_data = NULL;
+	qp->rx_handler = NULL;
+	qp->tx_handler = NULL;
+	qp->event_handler = NULL;
+
+	while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q)))
+		kfree(entry);
+
+	while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_pend_q))) {
+		dev_warn(&pdev->dev, "Freeing item from non-empty rx_pend_q\n");
+		kfree(entry);
+	}
+
+	while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_post_q))) {
+		dev_warn(&pdev->dev, "Freeing item from non-empty rx_post_q\n");
+		kfree(entry);
+	}
+
+	while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q)))
+		kfree(entry);
+
+	qp->transport->qp_bitmap_free |= qp_bit;
+
+	dev_info(&pdev->dev, "NTB Transport QP %d freed\n", qp->qp_num);
+}
+EXPORT_SYMBOL_GPL(ntb_transport_free_queue);
+
+/**
+ * ntb_transport_rx_remove - Dequeues enqueued rx packet
+ * @qp: NTB queue to be freed
+ * @len: pointer to variable to write enqueued buffers length
+ *
+ * Dequeues unused buffers from receive queue.  Should only be used during
+ * shutdown of qp.
+ *
+ * RETURNS: NULL error value on error, or void* for success.
+ */
+void *ntb_transport_rx_remove(struct ntb_transport_qp *qp, unsigned int *len)
+{
+	struct ntb_queue_entry *entry;
+	void *buf;
+
+	if (!qp || qp->client_ready)
+		return NULL;
+
+	entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_pend_q);
+	if (!entry)
+		return NULL;
+
+	buf = entry->cb_data;
+	*len = entry->len;
+
+	ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry, &qp->rx_free_q);
+
+	return buf;
+}
+EXPORT_SYMBOL_GPL(ntb_transport_rx_remove);
+
+/**
+ * ntb_transport_rx_enqueue - Enqueue a new NTB queue entry
+ * @qp: NTB transport layer queue the entry is to be enqueued on
+ * @cb: per buffer pointer for callback function to use
+ * @data: pointer to data buffer that incoming packets will be copied into
+ * @len: length of the data buffer
+ *
+ * Enqueue a new receive buffer onto the transport queue into which a NTB
+ * payload can be received into.
+ *
+ * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
+ */
+int ntb_transport_rx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
+			     unsigned int len)
+{
+	struct ntb_queue_entry *entry;
+
+	if (!qp)
+		return -EINVAL;
+
+	entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q);
+	if (!entry)
+		return -ENOMEM;
+
+	entry->cb_data = cb;
+	entry->buf = data;
+	entry->len = len;
+	entry->flags = 0;
+	entry->retries = 0;
+	entry->errors = 0;
+	entry->rx_index = 0;
+
+	ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry, &qp->rx_pend_q);
+
+	if (qp->active)
+		tasklet_schedule(&qp->rxc_db_work);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(ntb_transport_rx_enqueue);
+
+/**
+ * ntb_transport_tx_enqueue - Enqueue a new NTB queue entry
+ * @qp: NTB transport layer queue the entry is to be enqueued on
+ * @cb: per buffer pointer for callback function to use
+ * @data: pointer to data buffer that will be sent
+ * @len: length of the data buffer
+ *
+ * Enqueue a new transmit buffer onto the transport queue from which a NTB
+ * payload will be transmitted.  This assumes that a lock is being held to
+ * serialize access to the qp.
+ *
+ * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
+ */
+int ntb_transport_tx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
+			     unsigned int len)
+{
+	struct ntb_queue_entry *entry;
+	int rc;
+
+	if (!qp || !qp->link_is_up || !len)
+		return -EINVAL;
+
+	entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q);
+	if (!entry) {
+		qp->tx_err_no_buf++;
+		return -EBUSY;
+	}
+
+	entry->cb_data = cb;
+	entry->buf = data;
+	entry->len = len;
+	entry->flags = 0;
+	entry->errors = 0;
+	entry->retries = 0;
+	entry->tx_index = 0;
+
+	rc = ntb_process_tx(qp, entry);
+	if (rc)
+		ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry,
+			     &qp->tx_free_q);
+
+	return rc;
+}
+EXPORT_SYMBOL_GPL(ntb_transport_tx_enqueue);
+
+/**
+ * ntb_transport_link_up - Notify NTB transport of client readiness to use queue
+ * @qp: NTB transport layer queue to be enabled
+ *
+ * Notify NTB transport layer of client readiness to use queue
+ */
+void ntb_transport_link_up(struct ntb_transport_qp *qp)
+{
+	if (!qp)
+		return;
+
+	qp->client_ready = true;
+
+	if (qp->transport->link_is_up)
+		schedule_delayed_work(&qp->link_work, 0);
+}
+EXPORT_SYMBOL_GPL(ntb_transport_link_up);
+
+/**
+ * ntb_transport_link_down - Notify NTB transport to no longer enqueue data
+ * @qp: NTB transport layer queue to be disabled
+ *
+ * Notify NTB transport layer of client's desire to no longer receive data on
+ * transport queue specified.  It is the client's responsibility to ensure all
+ * entries on queue are purged or otherwise handled appropriately.
+ */
+void ntb_transport_link_down(struct ntb_transport_qp *qp)
+{
+	int val;
+
+	if (!qp)
+		return;
+
+	qp->client_ready = false;
+
+	val = ntb_spad_read(qp->ndev, QP_LINKS);
+
+	ntb_peer_spad_write(qp->ndev, PIDX, QP_LINKS, val & ~BIT(qp->qp_num));
+
+	if (qp->link_is_up)
+		ntb_send_link_down(qp);
+	else
+		cancel_delayed_work_sync(&qp->link_work);
+}
+EXPORT_SYMBOL_GPL(ntb_transport_link_down);
+
+/**
+ * ntb_transport_link_query - Query transport link state
+ * @qp: NTB transport layer queue to be queried
+ *
+ * Query connectivity to the remote system of the NTB transport queue
+ *
+ * RETURNS: true for link up or false for link down
+ */
+bool ntb_transport_link_query(struct ntb_transport_qp *qp)
+{
+	if (!qp)
+		return false;
+
+	return qp->link_is_up;
+}
+EXPORT_SYMBOL_GPL(ntb_transport_link_query);
+
+/**
+ * ntb_transport_qp_num - Query the qp number
+ * @qp: NTB transport layer queue to be queried
+ *
+ * Query qp number of the NTB transport queue
+ *
+ * RETURNS: a zero based number specifying the qp number
+ */
+unsigned char ntb_transport_qp_num(struct ntb_transport_qp *qp)
+{
+	if (!qp)
+		return 0;
+
+	return qp->qp_num;
+}
+EXPORT_SYMBOL_GPL(ntb_transport_qp_num);
+
+/**
+ * ntb_transport_max_size - Query the max payload size of a qp
+ * @qp: NTB transport layer queue to be queried
+ *
+ * Query the maximum payload size permissible on the given qp
+ *
+ * RETURNS: the max payload size of a qp
+ */
+unsigned int ntb_transport_max_size(struct ntb_transport_qp *qp)
+{
+	unsigned int max_size;
+	unsigned int copy_align;
+	struct dma_chan *rx_chan, *tx_chan;
+
+	if (!qp)
+		return 0;
+
+	rx_chan = qp->rx_dma_chan;
+	tx_chan = qp->tx_dma_chan;
+
+	copy_align = max(rx_chan ? rx_chan->device->copy_align : 0,
+			 tx_chan ? tx_chan->device->copy_align : 0);
+
+	/* If DMA engine usage is possible, try to find the max size for that */
+	max_size = qp->tx_max_frame - sizeof(struct ntb_payload_header);
+	max_size = round_down(max_size, 1 << copy_align);
+
+	return max_size;
+}
+EXPORT_SYMBOL_GPL(ntb_transport_max_size);
+
+unsigned int ntb_transport_tx_free_entry(struct ntb_transport_qp *qp)
+{
+	unsigned int head = qp->tx_index;
+	unsigned int tail = qp->remote_rx_info->entry;
+
+	return tail > head ? tail - head : qp->tx_max_entry + tail - head;
+}
+EXPORT_SYMBOL_GPL(ntb_transport_tx_free_entry);
+
+static void ntb_transport_doorbell_callback(void *data, int vector)
+{
+	struct ntb_transport_ctx *nt = data;
+	struct ntb_transport_qp *qp;
+	u64 db_bits;
+	unsigned int qp_num;
+
+	db_bits = (nt->qp_bitmap & ~nt->qp_bitmap_free &
+		   ntb_db_vector_mask(nt->ndev, vector));
+
+	while (db_bits) {
+		qp_num = __ffs(db_bits);
+		qp = &nt->qp_vec[qp_num];
+
+		if (qp->active)
+			tasklet_schedule(&qp->rxc_db_work);
+
+		db_bits &= ~BIT_ULL(qp_num);
+	}
+}
+
+static const struct ntb_ctx_ops ntb_transport_ops = {
+	.link_event = ntb_transport_event_callback,
+	.db_event = ntb_transport_doorbell_callback,
+};
+
+static struct ntb_client ntb_transport_client = {
+	.ops = {
+		.probe = ntb_transport_probe,
+		.remove = ntb_transport_free,
+	},
+};
+
+static int __init ntb_transport_init(void)
+{
+	int rc;
+
+	pr_info("%s, version %s\n", NTB_TRANSPORT_DESC, NTB_TRANSPORT_VER);
+
+	if (debugfs_initialized())
+		nt_debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
+
+	rc = bus_register(&ntb_transport_bus);
+	if (rc)
+		goto err_bus;
+
+	rc = ntb_register_client(&ntb_transport_client);
+	if (rc)
+		goto err_client;
+
+	return 0;
+
+err_client:
+	bus_unregister(&ntb_transport_bus);
+err_bus:
+	debugfs_remove_recursive(nt_debugfs_dir);
+	return rc;
+}
+module_init(ntb_transport_init);
+
+static void __exit ntb_transport_exit(void)
+{
+	ntb_unregister_client(&ntb_transport_client);
+	bus_unregister(&ntb_transport_bus);
+	debugfs_remove_recursive(nt_debugfs_dir);
+}
+module_exit(ntb_transport_exit);
diff --git a/src/kernel/linux/v4.14/drivers/ntb/test/Kconfig b/src/kernel/linux/v4.14/drivers/ntb/test/Kconfig
new file mode 100644
index 0000000..a5d0eda
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/ntb/test/Kconfig
@@ -0,0 +1,27 @@
+config NTB_PINGPONG
+	tristate "NTB Ping Pong Test Client"
+	help
+	 This is a simple ping pong driver that exercises the scratchpads and
+	 doorbells of the ntb hardware.  This driver may be used to test that
+	 your ntb hardware and drivers are functioning at a basic level.
+
+	 If unsure, say N.
+
+config NTB_TOOL
+	tristate "NTB Debugging Tool Test Client"
+	help
+	 This is a simple debugging driver that enables the doorbell and
+	 scratchpad registers to be read and written from the debugfs.  This
+	 enables more complicated debugging to be scripted from user space.
+	 This driver may be used to test that your ntb hardware and drivers are
+	 functioning at a basic level.
+
+	 If unsure, say N.
+
+config NTB_PERF
+	tristate "NTB RAW Perf Measuring Tool"
+	help
+	 This is a tool to measure raw NTB performance by transferring data
+	 to and from the window without additional software interaction.
+
+	 If unsure, say N.
diff --git a/src/kernel/linux/v4.14/drivers/ntb/test/Makefile b/src/kernel/linux/v4.14/drivers/ntb/test/Makefile
new file mode 100644
index 0000000..9e77e0b
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/ntb/test/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_NTB_PINGPONG) += ntb_pingpong.o
+obj-$(CONFIG_NTB_TOOL) += ntb_tool.o
+obj-$(CONFIG_NTB_PERF) += ntb_perf.o
diff --git a/src/kernel/linux/v4.14/drivers/ntb/test/ntb_perf.c b/src/kernel/linux/v4.14/drivers/ntb/test/ntb_perf.c
new file mode 100644
index 0000000..759f772
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/ntb/test/ntb_perf.c
@@ -0,0 +1,899 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ *   redistributing this file, you may do so under either license.
+ *
+ *   GPL LICENSE SUMMARY
+ *
+ *   Copyright(c) 2015 Intel Corporation. All rights reserved.
+ *
+ *   This program is free software; you can redistribute it and/or modify
+ *   it under the terms of version 2 of the GNU General Public License as
+ *   published by the Free Software Foundation.
+ *
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2015 Intel Corporation. All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copy
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *   PCIe NTB Perf Linux driver
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/kthread.h>
+#include <linux/time.h>
+#include <linux/timer.h>
+#include <linux/dma-mapping.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/debugfs.h>
+#include <linux/dmaengine.h>
+#include <linux/delay.h>
+#include <linux/sizes.h>
+#include <linux/ntb.h>
+#include <linux/mutex.h>
+
+#define DRIVER_NAME		"ntb_perf"
+#define DRIVER_DESCRIPTION	"PCIe NTB Performance Measurement Tool"
+
+#define DRIVER_LICENSE		"Dual BSD/GPL"
+#define DRIVER_VERSION		"1.0"
+#define DRIVER_AUTHOR		"Dave Jiang <dave.jiang@intel.com>"
+
+#define PERF_LINK_DOWN_TIMEOUT	10
+#define PERF_VERSION		0xffff0001
+#define MAX_THREADS		32
+#define MAX_TEST_SIZE		SZ_1M
+#define MAX_SRCS		32
+#define DMA_OUT_RESOURCE_TO	msecs_to_jiffies(50)
+#define DMA_RETRIES		20
+#define SZ_4G			(1ULL << 32)
+#define MAX_SEG_ORDER		20 /* no larger than 1M for kmalloc buffer */
+#define PIDX			NTB_DEF_PEER_IDX
+
+MODULE_LICENSE(DRIVER_LICENSE);
+MODULE_VERSION(DRIVER_VERSION);
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESCRIPTION);
+
+static struct dentry *perf_debugfs_dir;
+
+static unsigned long max_mw_size;
+module_param(max_mw_size, ulong, 0644);
+MODULE_PARM_DESC(max_mw_size, "Limit size of large memory windows");
+
+static unsigned int seg_order = 19; /* 512K */
+module_param(seg_order, uint, 0644);
+MODULE_PARM_DESC(seg_order, "size order [2^n] of buffer segment for testing");
+
+static unsigned int run_order = 32; /* 4G */
+module_param(run_order, uint, 0644);
+MODULE_PARM_DESC(run_order, "size order [2^n] of total data to transfer");
+
+static bool use_dma; /* default to 0 */
+module_param(use_dma, bool, 0644);
+MODULE_PARM_DESC(use_dma, "Using DMA engine to measure performance");
+
+static bool on_node = true; /* default to 1 */
+module_param(on_node, bool, 0644);
+MODULE_PARM_DESC(on_node, "Run threads only on NTB device node (default: true)");
+
+struct perf_mw {
+	phys_addr_t	phys_addr;
+	resource_size_t	phys_size;
+	resource_size_t	xlat_align;
+	resource_size_t	xlat_align_size;
+	void __iomem	*vbase;
+	size_t		xlat_size;
+	size_t		buf_size;
+	void		*virt_addr;
+	dma_addr_t	dma_addr;
+};
+
+struct perf_ctx;
+
+struct pthr_ctx {
+	struct task_struct	*thread;
+	struct perf_ctx		*perf;
+	atomic_t		dma_sync;
+	struct dma_chan		*dma_chan;
+	int			dma_prep_err;
+	int			src_idx;
+	void			*srcs[MAX_SRCS];
+	wait_queue_head_t       *wq;
+	int			status;
+	u64			copied;
+	u64			diff_us;
+};
+
+struct perf_ctx {
+	struct ntb_dev		*ntb;
+	spinlock_t		db_lock;
+	struct perf_mw		mw;
+	bool			link_is_up;
+	struct delayed_work	link_work;
+	wait_queue_head_t	link_wq;
+	u8			perf_threads;
+	/* mutex ensures only one set of threads run at once */
+	struct mutex		run_mutex;
+	struct pthr_ctx		pthr_ctx[MAX_THREADS];
+	atomic_t		tsync;
+	atomic_t                tdone;
+};
+
+enum {
+	VERSION = 0,
+	MW_SZ_HIGH,
+	MW_SZ_LOW,
+	MAX_SPAD
+};
+
+static void perf_link_event(void *ctx)
+{
+	struct perf_ctx *perf = ctx;
+
+	if (ntb_link_is_up(perf->ntb, NULL, NULL) == 1) {
+		schedule_delayed_work(&perf->link_work, 2*HZ);
+	} else {
+		dev_dbg(&perf->ntb->pdev->dev, "link down\n");
+
+		if (!perf->link_is_up)
+			cancel_delayed_work_sync(&perf->link_work);
+
+		perf->link_is_up = false;
+	}
+}
+
+static void perf_db_event(void *ctx, int vec)
+{
+	struct perf_ctx *perf = ctx;
+	u64 db_bits, db_mask;
+
+	db_mask = ntb_db_vector_mask(perf->ntb, vec);
+	db_bits = ntb_db_read(perf->ntb);
+
+	dev_dbg(&perf->ntb->dev, "doorbell vec %d mask %#llx bits %#llx\n",
+		vec, db_mask, db_bits);
+}
+
+static const struct ntb_ctx_ops perf_ops = {
+	.link_event = perf_link_event,
+	.db_event = perf_db_event,
+};
+
+static void perf_copy_callback(void *data)
+{
+	struct pthr_ctx *pctx = data;
+
+	atomic_dec(&pctx->dma_sync);
+}
+
+static ssize_t perf_copy(struct pthr_ctx *pctx, char __iomem *dst,
+			 char *src, size_t size)
+{
+	struct perf_ctx *perf = pctx->perf;
+	struct dma_async_tx_descriptor *txd;
+	struct dma_chan *chan = pctx->dma_chan;
+	struct dma_device *device;
+	struct dmaengine_unmap_data *unmap;
+	dma_cookie_t cookie;
+	size_t src_off, dst_off;
+	struct perf_mw *mw = &perf->mw;
+	void __iomem *vbase;
+	void __iomem *dst_vaddr;
+	dma_addr_t dst_phys;
+	int retries = 0;
+
+	if (!use_dma) {
+		memcpy_toio(dst, src, size);
+		return size;
+	}
+
+	if (!chan) {
+		dev_err(&perf->ntb->dev, "DMA engine does not exist\n");
+		return -EINVAL;
+	}
+
+	device = chan->device;
+	src_off = (uintptr_t)src & ~PAGE_MASK;
+	dst_off = (uintptr_t __force)dst & ~PAGE_MASK;
+
+	if (!is_dma_copy_aligned(device, src_off, dst_off, size))
+		return -ENODEV;
+
+	vbase = mw->vbase;
+	dst_vaddr = dst;
+	dst_phys = mw->phys_addr + (dst_vaddr - vbase);
+
+	unmap = dmaengine_get_unmap_data(device->dev, 1, GFP_NOWAIT);
+	if (!unmap)
+		return -ENOMEM;
+
+	unmap->len = size;
+	unmap->addr[0] = dma_map_page(device->dev, virt_to_page(src),
+				      src_off, size, DMA_TO_DEVICE);
+	if (dma_mapping_error(device->dev, unmap->addr[0]))
+		goto err_get_unmap;
+
+	unmap->to_cnt = 1;
+
+	do {
+		txd = device->device_prep_dma_memcpy(chan, dst_phys,
+						     unmap->addr[0],
+						     size, DMA_PREP_INTERRUPT);
+		if (!txd) {
+			set_current_state(TASK_INTERRUPTIBLE);
+			schedule_timeout(DMA_OUT_RESOURCE_TO);
+		}
+	} while (!txd && (++retries < DMA_RETRIES));
+
+	if (!txd) {
+		pctx->dma_prep_err++;
+		goto err_get_unmap;
+	}
+
+	txd->callback = perf_copy_callback;
+	txd->callback_param = pctx;
+	dma_set_unmap(txd, unmap);
+
+	cookie = dmaengine_submit(txd);
+	if (dma_submit_error(cookie))
+		goto err_set_unmap;
+
+	dmaengine_unmap_put(unmap);
+
+	atomic_inc(&pctx->dma_sync);
+	dma_async_issue_pending(chan);
+
+	return size;
+
+err_set_unmap:
+	dmaengine_unmap_put(unmap);
+err_get_unmap:
+	dmaengine_unmap_put(unmap);
+	return 0;
+}
+
+static int perf_move_data(struct pthr_ctx *pctx, char __iomem *dst, char *src,
+			  u64 buf_size, u64 win_size, u64 total)
+{
+	int chunks, total_chunks, i;
+	int copied_chunks = 0;
+	u64 copied = 0, result;
+	char __iomem *tmp = dst;
+	u64 perf, diff_us;
+	ktime_t kstart, kstop, kdiff;
+	unsigned long last_sleep = jiffies;
+
+	chunks = div64_u64(win_size, buf_size);
+	total_chunks = div64_u64(total, buf_size);
+	kstart = ktime_get();
+
+	for (i = 0; i < total_chunks; i++) {
+		result = perf_copy(pctx, tmp, src, buf_size);
+		copied += result;
+		copied_chunks++;
+		if (copied_chunks == chunks) {
+			tmp = dst;
+			copied_chunks = 0;
+		} else
+			tmp += buf_size;
+
+		/* Probably should schedule every 5s to prevent soft hang. */
+		if (unlikely((jiffies - last_sleep) > 5 * HZ)) {
+			last_sleep = jiffies;
+			set_current_state(TASK_INTERRUPTIBLE);
+			schedule_timeout(1);
+		}
+
+		if (unlikely(kthread_should_stop()))
+			break;
+	}
+
+	if (use_dma) {
+		pr_debug("%s: All DMA descriptors submitted\n", current->comm);
+		while (atomic_read(&pctx->dma_sync) != 0) {
+			if (kthread_should_stop())
+				break;
+			msleep(20);
+		}
+	}
+
+	kstop = ktime_get();
+	kdiff = ktime_sub(kstop, kstart);
+	diff_us = ktime_to_us(kdiff);
+
+	pr_debug("%s: copied %llu bytes\n", current->comm, copied);
+
+	pr_debug("%s: lasted %llu usecs\n", current->comm, diff_us);
+
+	perf = div64_u64(copied, diff_us);
+
+	pr_debug("%s: MBytes/s: %llu\n", current->comm, perf);
+
+	pctx->copied = copied;
+	pctx->diff_us = diff_us;
+
+	return 0;
+}
+
+static bool perf_dma_filter_fn(struct dma_chan *chan, void *node)
+{
+	/* Is the channel required to be on the same node as the device? */
+	if (!on_node)
+		return true;
+
+	return dev_to_node(&chan->dev->device) == (int)(unsigned long)node;
+}
+
+static int ntb_perf_thread(void *data)
+{
+	struct pthr_ctx *pctx = data;
+	struct perf_ctx *perf = pctx->perf;
+	struct pci_dev *pdev = perf->ntb->pdev;
+	struct perf_mw *mw = &perf->mw;
+	char __iomem *dst;
+	u64 win_size, buf_size, total;
+	void *src;
+	int rc, node, i;
+	struct dma_chan *dma_chan = NULL;
+
+	pr_debug("kthread %s starting...\n", current->comm);
+
+	node = on_node ? dev_to_node(&pdev->dev) : NUMA_NO_NODE;
+
+	if (use_dma && !pctx->dma_chan) {
+		dma_cap_mask_t dma_mask;
+
+		dma_cap_zero(dma_mask);
+		dma_cap_set(DMA_MEMCPY, dma_mask);
+		dma_chan = dma_request_channel(dma_mask, perf_dma_filter_fn,
+					       (void *)(unsigned long)node);
+		if (!dma_chan) {
+			pr_warn("%s: cannot acquire DMA channel, quitting\n",
+				current->comm);
+			return -ENODEV;
+		}
+		pctx->dma_chan = dma_chan;
+	}
+
+	for (i = 0; i < MAX_SRCS; i++) {
+		pctx->srcs[i] = kmalloc_node(MAX_TEST_SIZE, GFP_KERNEL, node);
+		if (!pctx->srcs[i]) {
+			rc = -ENOMEM;
+			goto err;
+		}
+	}
+
+	win_size = mw->phys_size;
+	buf_size = 1ULL << seg_order;
+	total = 1ULL << run_order;
+
+	if (buf_size > MAX_TEST_SIZE)
+		buf_size = MAX_TEST_SIZE;
+
+	dst = (char __iomem *)mw->vbase;
+
+	atomic_inc(&perf->tsync);
+	while (atomic_read(&perf->tsync) != perf->perf_threads)
+		schedule();
+
+	src = pctx->srcs[pctx->src_idx];
+	pctx->src_idx = (pctx->src_idx + 1) & (MAX_SRCS - 1);
+
+	rc = perf_move_data(pctx, dst, src, buf_size, win_size, total);
+
+	atomic_dec(&perf->tsync);
+
+	if (rc < 0) {
+		pr_err("%s: failed\n", current->comm);
+		rc = -ENXIO;
+		goto err;
+	}
+
+	for (i = 0; i < MAX_SRCS; i++) {
+		kfree(pctx->srcs[i]);
+		pctx->srcs[i] = NULL;
+	}
+
+	atomic_inc(&perf->tdone);
+	wake_up(pctx->wq);
+	rc = 0;
+	goto done;
+
+err:
+	for (i = 0; i < MAX_SRCS; i++) {
+		kfree(pctx->srcs[i]);
+		pctx->srcs[i] = NULL;
+	}
+
+	if (dma_chan) {
+		dma_release_channel(dma_chan);
+		pctx->dma_chan = NULL;
+	}
+
+done:
+	/* Wait until we are told to stop */
+	for (;;) {
+		set_current_state(TASK_INTERRUPTIBLE);
+		if (kthread_should_stop())
+			break;
+		schedule();
+	}
+	__set_current_state(TASK_RUNNING);
+
+	return rc;
+}
+
+static void perf_free_mw(struct perf_ctx *perf)
+{
+	struct perf_mw *mw = &perf->mw;
+	struct pci_dev *pdev = perf->ntb->pdev;
+
+	if (!mw->virt_addr)
+		return;
+
+	ntb_mw_clear_trans(perf->ntb, PIDX, 0);
+	dma_free_coherent(&pdev->dev, mw->buf_size,
+			  mw->virt_addr, mw->dma_addr);
+	mw->xlat_size = 0;
+	mw->buf_size = 0;
+	mw->virt_addr = NULL;
+}
+
+static int perf_set_mw(struct perf_ctx *perf, resource_size_t size)
+{
+	struct perf_mw *mw = &perf->mw;
+	size_t xlat_size, buf_size;
+	int rc;
+
+	if (!size)
+		return -EINVAL;
+
+	xlat_size = round_up(size, mw->xlat_align_size);
+	buf_size = round_up(size, mw->xlat_align);
+
+	if (mw->xlat_size == xlat_size)
+		return 0;
+
+	if (mw->buf_size)
+		perf_free_mw(perf);
+
+	mw->xlat_size = xlat_size;
+	mw->buf_size = buf_size;
+
+	mw->virt_addr = dma_alloc_coherent(&perf->ntb->pdev->dev, buf_size,
+					   &mw->dma_addr, GFP_KERNEL);
+	if (!mw->virt_addr) {
+		mw->xlat_size = 0;
+		mw->buf_size = 0;
+	}
+
+	rc = ntb_mw_set_trans(perf->ntb, PIDX, 0, mw->dma_addr, mw->xlat_size);
+	if (rc) {
+		dev_err(&perf->ntb->dev, "Unable to set mw0 translation\n");
+		perf_free_mw(perf);
+		return -EIO;
+	}
+
+	return 0;
+}
+
+static void perf_link_work(struct work_struct *work)
+{
+	struct perf_ctx *perf =
+		container_of(work, struct perf_ctx, link_work.work);
+	struct ntb_dev *ndev = perf->ntb;
+	struct pci_dev *pdev = ndev->pdev;
+	u32 val;
+	u64 size;
+	int rc;
+
+	dev_dbg(&perf->ntb->pdev->dev, "%s called\n", __func__);
+
+	size = perf->mw.phys_size;
+
+	if (max_mw_size && size > max_mw_size)
+		size = max_mw_size;
+
+	ntb_peer_spad_write(ndev, PIDX, MW_SZ_HIGH, upper_32_bits(size));
+	ntb_peer_spad_write(ndev, PIDX, MW_SZ_LOW, lower_32_bits(size));
+	ntb_peer_spad_write(ndev, PIDX, VERSION, PERF_VERSION);
+
+	/* now read what peer wrote */
+	val = ntb_spad_read(ndev, VERSION);
+	if (val != PERF_VERSION) {
+		dev_dbg(&pdev->dev, "Remote version = %#x\n", val);
+		goto out;
+	}
+
+	val = ntb_spad_read(ndev, MW_SZ_HIGH);
+	size = (u64)val << 32;
+
+	val = ntb_spad_read(ndev, MW_SZ_LOW);
+	size |= val;
+
+	dev_dbg(&pdev->dev, "Remote MW size = %#llx\n", size);
+
+	rc = perf_set_mw(perf, size);
+	if (rc)
+		goto out1;
+
+	perf->link_is_up = true;
+	wake_up(&perf->link_wq);
+
+	return;
+
+out1:
+	perf_free_mw(perf);
+
+out:
+	if (ntb_link_is_up(ndev, NULL, NULL) == 1)
+		schedule_delayed_work(&perf->link_work,
+				      msecs_to_jiffies(PERF_LINK_DOWN_TIMEOUT));
+}
+
+static int perf_setup_mw(struct ntb_dev *ntb, struct perf_ctx *perf)
+{
+	struct perf_mw *mw;
+	int rc;
+
+	mw = &perf->mw;
+
+	rc = ntb_mw_get_align(ntb, PIDX, 0, &mw->xlat_align,
+			      &mw->xlat_align_size, NULL);
+	if (rc)
+		return rc;
+
+	rc = ntb_peer_mw_get_addr(ntb, 0, &mw->phys_addr, &mw->phys_size);
+	if (rc)
+		return rc;
+
+	perf->mw.vbase = ioremap_wc(mw->phys_addr, mw->phys_size);
+	if (!mw->vbase)
+		return -ENOMEM;
+
+	return 0;
+}
+
+static ssize_t debugfs_run_read(struct file *filp, char __user *ubuf,
+				size_t count, loff_t *offp)
+{
+	struct perf_ctx *perf = filp->private_data;
+	char *buf;
+	ssize_t ret, out_off = 0;
+	struct pthr_ctx *pctx;
+	int i;
+	u64 rate;
+
+	if (!perf)
+		return 0;
+
+	buf = kmalloc(1024, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	if (mutex_is_locked(&perf->run_mutex)) {
+		out_off = scnprintf(buf, 64, "running\n");
+		goto read_from_buf;
+	}
+
+	for (i = 0; i < MAX_THREADS; i++) {
+		pctx = &perf->pthr_ctx[i];
+
+		if (pctx->status == -ENODATA)
+			break;
+
+		if (pctx->status) {
+			out_off += scnprintf(buf + out_off, 1024 - out_off,
+					    "%d: error %d\n", i,
+					    pctx->status);
+			continue;
+		}
+
+		rate = div64_u64(pctx->copied, pctx->diff_us);
+		out_off += scnprintf(buf + out_off, 1024 - out_off,
+			"%d: copied %llu bytes in %llu usecs, %llu MBytes/s\n",
+			i, pctx->copied, pctx->diff_us, rate);
+	}
+
+read_from_buf:
+	ret = simple_read_from_buffer(ubuf, count, offp, buf, out_off);
+	kfree(buf);
+
+	return ret;
+}
+
+static void threads_cleanup(struct perf_ctx *perf)
+{
+	struct pthr_ctx *pctx;
+	int i;
+
+	for (i = 0; i < MAX_THREADS; i++) {
+		pctx = &perf->pthr_ctx[i];
+		if (pctx->thread) {
+			pctx->status = kthread_stop(pctx->thread);
+			pctx->thread = NULL;
+		}
+	}
+}
+
+static void perf_clear_thread_status(struct perf_ctx *perf)
+{
+	int i;
+
+	for (i = 0; i < MAX_THREADS; i++)
+		perf->pthr_ctx[i].status = -ENODATA;
+}
+
+static ssize_t debugfs_run_write(struct file *filp, const char __user *ubuf,
+				 size_t count, loff_t *offp)
+{
+	struct perf_ctx *perf = filp->private_data;
+	int node, i;
+	DECLARE_WAIT_QUEUE_HEAD(wq);
+
+	if (wait_event_interruptible(perf->link_wq, perf->link_is_up))
+		return -ENOLINK;
+
+	if (perf->perf_threads == 0)
+		return -EINVAL;
+
+	if (!mutex_trylock(&perf->run_mutex))
+		return -EBUSY;
+
+	perf_clear_thread_status(perf);
+
+	if (perf->perf_threads > MAX_THREADS) {
+		perf->perf_threads = MAX_THREADS;
+		pr_info("Reset total threads to: %u\n", MAX_THREADS);
+	}
+
+	/* no greater than 1M */
+	if (seg_order > MAX_SEG_ORDER) {
+		seg_order = MAX_SEG_ORDER;
+		pr_info("Fix seg_order to %u\n", seg_order);
+	}
+
+	if (run_order < seg_order) {
+		run_order = seg_order;
+		pr_info("Fix run_order to %u\n", run_order);
+	}
+
+	node = on_node ? dev_to_node(&perf->ntb->pdev->dev)
+		       : NUMA_NO_NODE;
+	atomic_set(&perf->tdone, 0);
+
+	/* launch kernel thread */
+	for (i = 0; i < perf->perf_threads; i++) {
+		struct pthr_ctx *pctx;
+
+		pctx = &perf->pthr_ctx[i];
+		atomic_set(&pctx->dma_sync, 0);
+		pctx->perf = perf;
+		pctx->wq = &wq;
+		pctx->thread =
+			kthread_create_on_node(ntb_perf_thread,
+					       (void *)pctx,
+					       node, "ntb_perf %d", i);
+		if (IS_ERR(pctx->thread)) {
+			pctx->thread = NULL;
+			goto err;
+		} else {
+			wake_up_process(pctx->thread);
+		}
+	}
+
+	wait_event_interruptible(wq,
+		atomic_read(&perf->tdone) == perf->perf_threads);
+
+	threads_cleanup(perf);
+	mutex_unlock(&perf->run_mutex);
+	return count;
+
+err:
+	threads_cleanup(perf);
+	mutex_unlock(&perf->run_mutex);
+	return -ENXIO;
+}
+
+static const struct file_operations ntb_perf_debugfs_run = {
+	.owner = THIS_MODULE,
+	.open = simple_open,
+	.read = debugfs_run_read,
+	.write = debugfs_run_write,
+};
+
+static int perf_debugfs_setup(struct perf_ctx *perf)
+{
+	struct pci_dev *pdev = perf->ntb->pdev;
+	struct dentry *debugfs_node_dir;
+	struct dentry *debugfs_run;
+	struct dentry *debugfs_threads;
+	struct dentry *debugfs_seg_order;
+	struct dentry *debugfs_run_order;
+	struct dentry *debugfs_use_dma;
+	struct dentry *debugfs_on_node;
+
+	if (!debugfs_initialized())
+		return -ENODEV;
+
+	/* Assumpion: only one NTB device in the system */
+	if (!perf_debugfs_dir) {
+		perf_debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
+		if (!perf_debugfs_dir)
+			return -ENODEV;
+	}
+
+	debugfs_node_dir = debugfs_create_dir(pci_name(pdev),
+					      perf_debugfs_dir);
+	if (!debugfs_node_dir)
+		goto err;
+
+	debugfs_run = debugfs_create_file("run", S_IRUSR | S_IWUSR,
+					  debugfs_node_dir, perf,
+					  &ntb_perf_debugfs_run);
+	if (!debugfs_run)
+		goto err;
+
+	debugfs_threads = debugfs_create_u8("threads", S_IRUSR | S_IWUSR,
+					    debugfs_node_dir,
+					    &perf->perf_threads);
+	if (!debugfs_threads)
+		goto err;
+
+	debugfs_seg_order = debugfs_create_u32("seg_order", 0600,
+					       debugfs_node_dir,
+					       &seg_order);
+	if (!debugfs_seg_order)
+		goto err;
+
+	debugfs_run_order = debugfs_create_u32("run_order", 0600,
+					       debugfs_node_dir,
+					       &run_order);
+	if (!debugfs_run_order)
+		goto err;
+
+	debugfs_use_dma = debugfs_create_bool("use_dma", 0600,
+					       debugfs_node_dir,
+					       &use_dma);
+	if (!debugfs_use_dma)
+		goto err;
+
+	debugfs_on_node = debugfs_create_bool("on_node", 0600,
+					      debugfs_node_dir,
+					      &on_node);
+	if (!debugfs_on_node)
+		goto err;
+
+	return 0;
+
+err:
+	debugfs_remove_recursive(perf_debugfs_dir);
+	perf_debugfs_dir = NULL;
+	return -ENODEV;
+}
+
+static int perf_probe(struct ntb_client *client, struct ntb_dev *ntb)
+{
+	struct pci_dev *pdev = ntb->pdev;
+	struct perf_ctx *perf;
+	int node;
+	int rc = 0;
+
+	if (ntb_spad_count(ntb) < MAX_SPAD) {
+		dev_err(&ntb->dev, "Not enough scratch pad registers for %s",
+			DRIVER_NAME);
+		return -EIO;
+	}
+
+	if (!ntb->ops->mw_set_trans) {
+		dev_err(&ntb->dev, "Need inbound MW based NTB API\n");
+		return -EINVAL;
+	}
+
+	if (ntb_peer_port_count(ntb) != NTB_DEF_PEER_CNT)
+		dev_warn(&ntb->dev, "Multi-port NTB devices unsupported\n");
+
+	node = on_node ? dev_to_node(&pdev->dev) : NUMA_NO_NODE;
+	perf = kzalloc_node(sizeof(*perf), GFP_KERNEL, node);
+	if (!perf) {
+		rc = -ENOMEM;
+		goto err_perf;
+	}
+
+	perf->ntb = ntb;
+	perf->perf_threads = 1;
+	atomic_set(&perf->tsync, 0);
+	mutex_init(&perf->run_mutex);
+	spin_lock_init(&perf->db_lock);
+	perf_setup_mw(ntb, perf);
+	init_waitqueue_head(&perf->link_wq);
+	INIT_DELAYED_WORK(&perf->link_work, perf_link_work);
+
+	rc = ntb_set_ctx(ntb, perf, &perf_ops);
+	if (rc)
+		goto err_ctx;
+
+	perf->link_is_up = false;
+	ntb_link_enable(ntb, NTB_SPEED_AUTO, NTB_WIDTH_AUTO);
+	ntb_link_event(ntb);
+
+	rc = perf_debugfs_setup(perf);
+	if (rc)
+		goto err_ctx;
+
+	perf_clear_thread_status(perf);
+
+	return 0;
+
+err_ctx:
+	cancel_delayed_work_sync(&perf->link_work);
+	kfree(perf);
+err_perf:
+	return rc;
+}
+
+static void perf_remove(struct ntb_client *client, struct ntb_dev *ntb)
+{
+	struct perf_ctx *perf = ntb->ctx;
+	int i;
+
+	dev_dbg(&perf->ntb->dev, "%s called\n", __func__);
+
+	mutex_lock(&perf->run_mutex);
+
+	cancel_delayed_work_sync(&perf->link_work);
+
+	ntb_clear_ctx(ntb);
+	ntb_link_disable(ntb);
+
+	debugfs_remove_recursive(perf_debugfs_dir);
+	perf_debugfs_dir = NULL;
+
+	if (use_dma) {
+		for (i = 0; i < MAX_THREADS; i++) {
+			struct pthr_ctx *pctx = &perf->pthr_ctx[i];
+
+			if (pctx->dma_chan)
+				dma_release_channel(pctx->dma_chan);
+		}
+	}
+
+	kfree(perf);
+}
+
+static struct ntb_client perf_client = {
+	.ops = {
+		.probe = perf_probe,
+		.remove = perf_remove,
+	},
+};
+module_ntb_client(perf_client);
diff --git a/src/kernel/linux/v4.14/drivers/ntb/test/ntb_pingpong.c b/src/kernel/linux/v4.14/drivers/ntb/test/ntb_pingpong.c
new file mode 100644
index 0000000..938a18b
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/ntb/test/ntb_pingpong.c
@@ -0,0 +1,322 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ *   redistributing this file, you may do so under either license.
+ *
+ *   GPL LICENSE SUMMARY
+ *
+ *   Copyright (C) 2015 EMC Corporation. All Rights Reserved.
+ *
+ *   This program is free software; you can redistribute it and/or modify
+ *   it under the terms of version 2 of the GNU General Public License as
+ *   published by the Free Software Foundation.
+ *
+ *   This program is distributed in the hope that it will be useful, but
+ *   WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *   General Public License for more details.
+ *
+ *   BSD LICENSE
+ *
+ *   Copyright (C) 2015 EMC Corporation. All Rights Reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copy
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * PCIe NTB Pingpong Linux driver
+ *
+ * Contact Information:
+ * Allen Hubbe <Allen.Hubbe@emc.com>
+ */
+
+/* Note: load this module with option 'dyndbg=+p' */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#include <linux/dma-mapping.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/debugfs.h>
+
+#include <linux/ntb.h>
+
+#define DRIVER_NAME			"ntb_pingpong"
+#define DRIVER_DESCRIPTION		"PCIe NTB Simple Pingpong Client"
+
+#define DRIVER_LICENSE			"Dual BSD/GPL"
+#define DRIVER_VERSION			"1.0"
+#define DRIVER_RELDATE			"24 March 2015"
+#define DRIVER_AUTHOR			"Allen Hubbe <Allen.Hubbe@emc.com>"
+
+MODULE_LICENSE(DRIVER_LICENSE);
+MODULE_VERSION(DRIVER_VERSION);
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESCRIPTION);
+
+static unsigned int unsafe;
+module_param(unsafe, uint, 0644);
+MODULE_PARM_DESC(unsafe, "Run even though ntb operations may be unsafe");
+
+static unsigned int delay_ms = 1000;
+module_param(delay_ms, uint, 0644);
+MODULE_PARM_DESC(delay_ms, "Milliseconds to delay the response to peer");
+
+static unsigned long db_init = 0x7;
+module_param(db_init, ulong, 0644);
+MODULE_PARM_DESC(db_init, "Initial doorbell bits to ring on the peer");
+
+/* Only two-ports NTB devices are supported */
+#define PIDX		NTB_DEF_PEER_IDX
+
+struct pp_ctx {
+	struct ntb_dev			*ntb;
+	u64				db_bits;
+	/* synchronize access to db_bits by ping and pong */
+	spinlock_t			db_lock;
+	struct timer_list		db_timer;
+	unsigned long			db_delay;
+	struct dentry			*debugfs_node_dir;
+	struct dentry			*debugfs_count;
+	atomic_t			count;
+};
+
+static struct dentry *pp_debugfs_dir;
+
+static void pp_ping(unsigned long ctx)
+{
+	struct pp_ctx *pp = (void *)ctx;
+	unsigned long irqflags;
+	u64 db_bits, db_mask;
+	u32 spad_rd, spad_wr;
+
+	spin_lock_irqsave(&pp->db_lock, irqflags);
+	{
+		db_mask = ntb_db_valid_mask(pp->ntb);
+		db_bits = ntb_db_read(pp->ntb);
+
+		if (db_bits) {
+			dev_dbg(&pp->ntb->dev,
+				"Masked pongs %#llx\n",
+				db_bits);
+			ntb_db_clear(pp->ntb, db_bits);
+		}
+
+		db_bits = ((pp->db_bits | db_bits) << 1) & db_mask;
+
+		if (!db_bits)
+			db_bits = db_init;
+
+		spad_rd = ntb_spad_read(pp->ntb, 0);
+		spad_wr = spad_rd + 1;
+
+		dev_dbg(&pp->ntb->dev,
+			"Ping bits %#llx read %#x write %#x\n",
+			db_bits, spad_rd, spad_wr);
+
+		ntb_peer_spad_write(pp->ntb, PIDX, 0, spad_wr);
+		ntb_peer_db_set(pp->ntb, db_bits);
+		ntb_db_clear_mask(pp->ntb, db_mask);
+
+		pp->db_bits = 0;
+	}
+	spin_unlock_irqrestore(&pp->db_lock, irqflags);
+}
+
+static void pp_link_event(void *ctx)
+{
+	struct pp_ctx *pp = ctx;
+
+	if (ntb_link_is_up(pp->ntb, NULL, NULL) == 1) {
+		dev_dbg(&pp->ntb->dev, "link is up\n");
+		pp_ping((unsigned long)pp);
+	} else {
+		dev_dbg(&pp->ntb->dev, "link is down\n");
+		del_timer(&pp->db_timer);
+	}
+}
+
+static void pp_db_event(void *ctx, int vec)
+{
+	struct pp_ctx *pp = ctx;
+	u64 db_bits, db_mask;
+	unsigned long irqflags;
+
+	spin_lock_irqsave(&pp->db_lock, irqflags);
+	{
+		db_mask = ntb_db_vector_mask(pp->ntb, vec);
+		db_bits = db_mask & ntb_db_read(pp->ntb);
+		ntb_db_set_mask(pp->ntb, db_mask);
+		ntb_db_clear(pp->ntb, db_bits);
+
+		pp->db_bits |= db_bits;
+
+		mod_timer(&pp->db_timer, jiffies + pp->db_delay);
+
+		dev_dbg(&pp->ntb->dev,
+			"Pong vec %d bits %#llx\n",
+			vec, db_bits);
+		atomic_inc(&pp->count);
+	}
+	spin_unlock_irqrestore(&pp->db_lock, irqflags);
+}
+
+static int pp_debugfs_setup(struct pp_ctx *pp)
+{
+	struct pci_dev *pdev = pp->ntb->pdev;
+
+	if (!pp_debugfs_dir)
+		return -ENODEV;
+
+	pp->debugfs_node_dir = debugfs_create_dir(pci_name(pdev),
+						  pp_debugfs_dir);
+	if (!pp->debugfs_node_dir)
+		return -ENODEV;
+
+	pp->debugfs_count = debugfs_create_atomic_t("count", S_IRUSR | S_IWUSR,
+						    pp->debugfs_node_dir,
+						    &pp->count);
+	if (!pp->debugfs_count)
+		return -ENODEV;
+
+	return 0;
+}
+
+static const struct ntb_ctx_ops pp_ops = {
+	.link_event = pp_link_event,
+	.db_event = pp_db_event,
+};
+
+static int pp_probe(struct ntb_client *client,
+		    struct ntb_dev *ntb)
+{
+	struct pp_ctx *pp;
+	int rc;
+
+	if (ntb_db_is_unsafe(ntb)) {
+		dev_dbg(&ntb->dev, "doorbell is unsafe\n");
+		if (!unsafe) {
+			rc = -EINVAL;
+			goto err_pp;
+		}
+	}
+
+	if (ntb_spad_count(ntb) < 1) {
+		dev_dbg(&ntb->dev, "no enough scratchpads\n");
+		rc = -EINVAL;
+		goto err_pp;
+	}
+
+	if (ntb_spad_is_unsafe(ntb)) {
+		dev_dbg(&ntb->dev, "scratchpad is unsafe\n");
+		if (!unsafe) {
+			rc = -EINVAL;
+			goto err_pp;
+		}
+	}
+
+	if (ntb_peer_port_count(ntb) != NTB_DEF_PEER_CNT)
+		dev_warn(&ntb->dev, "multi-port NTB is unsupported\n");
+
+	pp = kmalloc(sizeof(*pp), GFP_KERNEL);
+	if (!pp) {
+		rc = -ENOMEM;
+		goto err_pp;
+	}
+
+	pp->ntb = ntb;
+	pp->db_bits = 0;
+	atomic_set(&pp->count, 0);
+	spin_lock_init(&pp->db_lock);
+	setup_timer(&pp->db_timer, pp_ping, (unsigned long)pp);
+	pp->db_delay = msecs_to_jiffies(delay_ms);
+
+	rc = ntb_set_ctx(ntb, pp, &pp_ops);
+	if (rc)
+		goto err_ctx;
+
+	rc = pp_debugfs_setup(pp);
+	if (rc)
+		goto err_ctx;
+
+	ntb_link_enable(ntb, NTB_SPEED_AUTO, NTB_WIDTH_AUTO);
+	ntb_link_event(ntb);
+
+	return 0;
+
+err_ctx:
+	kfree(pp);
+err_pp:
+	return rc;
+}
+
+static void pp_remove(struct ntb_client *client,
+		      struct ntb_dev *ntb)
+{
+	struct pp_ctx *pp = ntb->ctx;
+
+	debugfs_remove_recursive(pp->debugfs_node_dir);
+
+	ntb_clear_ctx(ntb);
+	del_timer_sync(&pp->db_timer);
+	ntb_link_disable(ntb);
+
+	kfree(pp);
+}
+
+static struct ntb_client pp_client = {
+	.ops = {
+		.probe = pp_probe,
+		.remove = pp_remove,
+	},
+};
+
+static int __init pp_init(void)
+{
+	int rc;
+
+	if (debugfs_initialized())
+		pp_debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
+
+	rc = ntb_register_client(&pp_client);
+	if (rc)
+		goto err_client;
+
+	return 0;
+
+err_client:
+	debugfs_remove_recursive(pp_debugfs_dir);
+	return rc;
+}
+module_init(pp_init);
+
+static void __exit pp_exit(void)
+{
+	ntb_unregister_client(&pp_client);
+	debugfs_remove_recursive(pp_debugfs_dir);
+}
+module_exit(pp_exit);
diff --git a/src/kernel/linux/v4.14/drivers/ntb/test/ntb_tool.c b/src/kernel/linux/v4.14/drivers/ntb/test/ntb_tool.c
new file mode 100644
index 0000000..a69815c
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/ntb/test/ntb_tool.c
@@ -0,0 +1,1032 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ *   redistributing this file, you may do so under either license.
+ *
+ *   GPL LICENSE SUMMARY
+ *
+ *   Copyright (C) 2015 EMC Corporation. All Rights Reserved.
+ *
+ *   This program is free software; you can redistribute it and/or modify
+ *   it under the terms of version 2 of the GNU General Public License as
+ *   published by the Free Software Foundation.
+ *
+ *   This program is distributed in the hope that it will be useful, but
+ *   WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *   General Public License for more details.
+ *
+ *   BSD LICENSE
+ *
+ *   Copyright (C) 2015 EMC Corporation. All Rights Reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copy
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * PCIe NTB Debugging Tool Linux driver
+ *
+ * Contact Information:
+ * Allen Hubbe <Allen.Hubbe@emc.com>
+ */
+
+/*
+ * How to use this tool, by example.
+ *
+ * Assuming $DBG_DIR is something like:
+ * '/sys/kernel/debug/ntb_tool/0000:00:03.0'
+ *
+ * Eg: check if clearing the doorbell mask generates an interrupt.
+ *
+ * # Check the link status
+ * root@self# cat $DBG_DIR/link
+ *
+ * # Block until the link is up
+ * root@self# echo Y > $DBG_DIR/link_event
+ *
+ * # Set the doorbell mask
+ * root@self# echo 's 1' > $DBG_DIR/mask
+ *
+ * # Ring the doorbell from the peer
+ * root@peer# echo 's 1' > $DBG_DIR/peer_db
+ *
+ * # Clear the doorbell mask
+ * root@self# echo 'c 1' > $DBG_DIR/mask
+ *
+ * Observe debugging output in dmesg or your console.  You should see a
+ * doorbell event triggered by clearing the mask.  If not, this may indicate an
+ * issue with the hardware that needs to be worked around in the driver.
+ *
+ * Eg: read and write scratchpad registers
+ *
+ * root@peer# echo '0 0x01010101 1 0x7f7f7f7f' > $DBG_DIR/peer_spad
+ *
+ * root@self# cat $DBG_DIR/spad
+ *
+ * Observe that spad 0 and 1 have the values set by the peer.
+ *
+ * # Check the memory window translation info
+ * cat $DBG_DIR/peer_trans0
+ *
+ * # Setup a 16k memory window buffer
+ * echo 16384 > $DBG_DIR/peer_trans0
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#include <linux/debugfs.h>
+#include <linux/dma-mapping.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+
+#include <linux/ntb.h>
+
+#define DRIVER_NAME			"ntb_tool"
+#define DRIVER_DESCRIPTION		"PCIe NTB Debugging Tool"
+
+#define DRIVER_LICENSE			"Dual BSD/GPL"
+#define DRIVER_VERSION			"1.0"
+#define DRIVER_RELDATE			"22 April 2015"
+#define DRIVER_AUTHOR			"Allen Hubbe <Allen.Hubbe@emc.com>"
+
+MODULE_LICENSE(DRIVER_LICENSE);
+MODULE_VERSION(DRIVER_VERSION);
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESCRIPTION);
+
+/* It is rare to have hadrware with greater than six MWs */
+#define MAX_MWS	6
+/* Only two-ports devices are supported */
+#define PIDX	NTB_DEF_PEER_IDX
+
+static struct dentry *tool_dbgfs;
+
+struct tool_mw {
+	int idx;
+	struct tool_ctx *tc;
+	resource_size_t win_size;
+	resource_size_t size;
+	u8 __iomem *local;
+	u8 *peer;
+	dma_addr_t peer_dma;
+	struct dentry *peer_dbg_file;
+};
+
+struct tool_ctx {
+	struct ntb_dev *ntb;
+	struct dentry *dbgfs;
+	wait_queue_head_t link_wq;
+	int mw_count;
+	struct tool_mw mws[MAX_MWS];
+};
+
+#define SPAD_FNAME_SIZE 0x10
+#define INT_PTR(x) ((void *)(unsigned long)x)
+#define PTR_INT(x) ((int)(unsigned long)x)
+
+#define TOOL_FOPS_RDWR(__name, __read, __write) \
+	const struct file_operations __name = {	\
+		.owner = THIS_MODULE,		\
+		.open = simple_open,		\
+		.read = __read,			\
+		.write = __write,		\
+	}
+
+static void tool_link_event(void *ctx)
+{
+	struct tool_ctx *tc = ctx;
+	enum ntb_speed speed;
+	enum ntb_width width;
+	int up;
+
+	up = ntb_link_is_up(tc->ntb, &speed, &width);
+
+	dev_dbg(&tc->ntb->dev, "link is %s speed %d width %d\n",
+		up ? "up" : "down", speed, width);
+
+	wake_up(&tc->link_wq);
+}
+
+static void tool_db_event(void *ctx, int vec)
+{
+	struct tool_ctx *tc = ctx;
+	u64 db_bits, db_mask;
+
+	db_mask = ntb_db_vector_mask(tc->ntb, vec);
+	db_bits = ntb_db_read(tc->ntb);
+
+	dev_dbg(&tc->ntb->dev, "doorbell vec %d mask %#llx bits %#llx\n",
+		vec, db_mask, db_bits);
+}
+
+static const struct ntb_ctx_ops tool_ops = {
+	.link_event = tool_link_event,
+	.db_event = tool_db_event,
+};
+
+static ssize_t tool_dbfn_read(struct tool_ctx *tc, char __user *ubuf,
+			      size_t size, loff_t *offp,
+			      u64 (*db_read_fn)(struct ntb_dev *))
+{
+	size_t buf_size;
+	char *buf;
+	ssize_t pos, rc;
+
+	if (!db_read_fn)
+		return -EINVAL;
+
+	buf_size = min_t(size_t, size, 0x20);
+
+	buf = kmalloc(buf_size, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	pos = scnprintf(buf, buf_size, "%#llx\n",
+			db_read_fn(tc->ntb));
+
+	rc = simple_read_from_buffer(ubuf, size, offp, buf, pos);
+
+	kfree(buf);
+
+	return rc;
+}
+
+static ssize_t tool_dbfn_write(struct tool_ctx *tc,
+			       const char __user *ubuf,
+			       size_t size, loff_t *offp,
+			       int (*db_set_fn)(struct ntb_dev *, u64),
+			       int (*db_clear_fn)(struct ntb_dev *, u64))
+{
+	u64 db_bits;
+	char *buf, cmd;
+	ssize_t rc;
+	int n;
+
+	buf = kmalloc(size + 1, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	rc = simple_write_to_buffer(buf, size, offp, ubuf, size);
+	if (rc < 0) {
+		kfree(buf);
+		return rc;
+	}
+
+	buf[size] = 0;
+
+	n = sscanf(buf, "%c %lli", &cmd, &db_bits);
+
+	kfree(buf);
+
+	if (n != 2) {
+		rc = -EINVAL;
+	} else if (cmd == 's') {
+		if (!db_set_fn)
+			rc = -EINVAL;
+		else
+			rc = db_set_fn(tc->ntb, db_bits);
+	} else if (cmd == 'c') {
+		if (!db_clear_fn)
+			rc = -EINVAL;
+		else
+			rc = db_clear_fn(tc->ntb, db_bits);
+	} else {
+		rc = -EINVAL;
+	}
+
+	return rc ? : size;
+}
+
+static ssize_t tool_spadfn_read(struct tool_ctx *tc, char __user *ubuf,
+				size_t size, loff_t *offp,
+				u32 (*spad_read_fn)(struct ntb_dev *, int))
+{
+	size_t buf_size;
+	char *buf;
+	ssize_t pos, rc;
+	int i, spad_count;
+
+	if (!spad_read_fn)
+		return -EINVAL;
+
+	spad_count = ntb_spad_count(tc->ntb);
+
+	/*
+	 * We multiply the number of spads by 15 to get the buffer size
+	 * this is from 3 for the %d, 10 for the largest hex value
+	 * (0x00000000) and 2 for the tab and line feed.
+	 */
+	buf_size = min_t(size_t, size, spad_count * 15);
+
+	buf = kmalloc(buf_size, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	pos = 0;
+
+	for (i = 0; i < spad_count; ++i) {
+		pos += scnprintf(buf + pos, buf_size - pos, "%d\t%#x\n",
+				 i, spad_read_fn(tc->ntb, i));
+	}
+
+	rc = simple_read_from_buffer(ubuf, size, offp, buf, pos);
+
+	kfree(buf);
+
+	return rc;
+}
+
+static ssize_t tool_spadfn_write(struct tool_ctx *tc,
+				 const char __user *ubuf,
+				 size_t size, loff_t *offp,
+				 int (*spad_write_fn)(struct ntb_dev *,
+						      int, u32))
+{
+	int spad_idx;
+	u32 spad_val;
+	char *buf, *buf_ptr;
+	int pos, n;
+	ssize_t rc;
+
+	if (!spad_write_fn) {
+		dev_dbg(&tc->ntb->dev, "no spad write fn\n");
+		return -EINVAL;
+	}
+
+	buf = kmalloc(size + 1, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	rc = simple_write_to_buffer(buf, size, offp, ubuf, size);
+	if (rc < 0) {
+		kfree(buf);
+		return rc;
+	}
+
+	buf[size] = 0;
+	buf_ptr = buf;
+	n = sscanf(buf_ptr, "%d %i%n", &spad_idx, &spad_val, &pos);
+	while (n == 2) {
+		buf_ptr += pos;
+		rc = spad_write_fn(tc->ntb, spad_idx, spad_val);
+		if (rc)
+			break;
+
+		n = sscanf(buf_ptr, "%d %i%n", &spad_idx, &spad_val, &pos);
+	}
+
+	if (n < 0)
+		rc = n;
+
+	kfree(buf);
+
+	return rc ? : size;
+}
+
+static ssize_t tool_db_read(struct file *filep, char __user *ubuf,
+			    size_t size, loff_t *offp)
+{
+	struct tool_ctx *tc = filep->private_data;
+
+	return tool_dbfn_read(tc, ubuf, size, offp,
+			      tc->ntb->ops->db_read);
+}
+
+static ssize_t tool_db_write(struct file *filep, const char __user *ubuf,
+			     size_t size, loff_t *offp)
+{
+	struct tool_ctx *tc = filep->private_data;
+
+	return tool_dbfn_write(tc, ubuf, size, offp,
+			       tc->ntb->ops->db_set,
+			       tc->ntb->ops->db_clear);
+}
+
+static TOOL_FOPS_RDWR(tool_db_fops,
+		      tool_db_read,
+		      tool_db_write);
+
+static ssize_t tool_mask_read(struct file *filep, char __user *ubuf,
+			      size_t size, loff_t *offp)
+{
+	struct tool_ctx *tc = filep->private_data;
+
+	return tool_dbfn_read(tc, ubuf, size, offp,
+			      tc->ntb->ops->db_read_mask);
+}
+
+static ssize_t tool_mask_write(struct file *filep, const char __user *ubuf,
+			       size_t size, loff_t *offp)
+{
+	struct tool_ctx *tc = filep->private_data;
+
+	return tool_dbfn_write(tc, ubuf, size, offp,
+			       tc->ntb->ops->db_set_mask,
+			       tc->ntb->ops->db_clear_mask);
+}
+
+static TOOL_FOPS_RDWR(tool_mask_fops,
+		      tool_mask_read,
+		      tool_mask_write);
+
+static ssize_t tool_peer_db_read(struct file *filep, char __user *ubuf,
+				 size_t size, loff_t *offp)
+{
+	struct tool_ctx *tc = filep->private_data;
+
+	return tool_dbfn_read(tc, ubuf, size, offp,
+			      tc->ntb->ops->peer_db_read);
+}
+
+static ssize_t tool_peer_db_write(struct file *filep, const char __user *ubuf,
+				  size_t size, loff_t *offp)
+{
+	struct tool_ctx *tc = filep->private_data;
+
+	return tool_dbfn_write(tc, ubuf, size, offp,
+			       tc->ntb->ops->peer_db_set,
+			       tc->ntb->ops->peer_db_clear);
+}
+
+static TOOL_FOPS_RDWR(tool_peer_db_fops,
+		      tool_peer_db_read,
+		      tool_peer_db_write);
+
+static ssize_t tool_peer_mask_read(struct file *filep, char __user *ubuf,
+				   size_t size, loff_t *offp)
+{
+	struct tool_ctx *tc = filep->private_data;
+
+	return tool_dbfn_read(tc, ubuf, size, offp,
+			      tc->ntb->ops->peer_db_read_mask);
+}
+
+static ssize_t tool_peer_mask_write(struct file *filep, const char __user *ubuf,
+				    size_t size, loff_t *offp)
+{
+	struct tool_ctx *tc = filep->private_data;
+
+	return tool_dbfn_write(tc, ubuf, size, offp,
+			       tc->ntb->ops->peer_db_set_mask,
+			       tc->ntb->ops->peer_db_clear_mask);
+}
+
+static TOOL_FOPS_RDWR(tool_peer_mask_fops,
+		      tool_peer_mask_read,
+		      tool_peer_mask_write);
+
+static ssize_t tool_spad_read(struct file *filep, char __user *ubuf,
+			      size_t size, loff_t *offp)
+{
+	struct tool_ctx *tc = filep->private_data;
+
+	return tool_spadfn_read(tc, ubuf, size, offp,
+				tc->ntb->ops->spad_read);
+}
+
+static ssize_t tool_spad_write(struct file *filep, const char __user *ubuf,
+			       size_t size, loff_t *offp)
+{
+	struct tool_ctx *tc = filep->private_data;
+
+	return tool_spadfn_write(tc, ubuf, size, offp,
+				 tc->ntb->ops->spad_write);
+}
+
+static TOOL_FOPS_RDWR(tool_spad_fops,
+		      tool_spad_read,
+		      tool_spad_write);
+
+static u32 ntb_tool_peer_spad_read(struct ntb_dev *ntb, int sidx)
+{
+	return ntb_peer_spad_read(ntb, PIDX, sidx);
+}
+
+static ssize_t tool_peer_spad_read(struct file *filep, char __user *ubuf,
+				   size_t size, loff_t *offp)
+{
+	struct tool_ctx *tc = filep->private_data;
+
+	return tool_spadfn_read(tc, ubuf, size, offp, ntb_tool_peer_spad_read);
+}
+
+static int ntb_tool_peer_spad_write(struct ntb_dev *ntb, int sidx, u32 val)
+{
+	return ntb_peer_spad_write(ntb, PIDX, sidx, val);
+}
+
+static ssize_t tool_peer_spad_write(struct file *filep, const char __user *ubuf,
+				    size_t size, loff_t *offp)
+{
+	struct tool_ctx *tc = filep->private_data;
+
+	return tool_spadfn_write(tc, ubuf, size, offp,
+				 ntb_tool_peer_spad_write);
+}
+
+static TOOL_FOPS_RDWR(tool_peer_spad_fops,
+		      tool_peer_spad_read,
+		      tool_peer_spad_write);
+
+static ssize_t tool_link_read(struct file *filep, char __user *ubuf,
+			      size_t size, loff_t *offp)
+{
+	struct tool_ctx *tc = filep->private_data;
+	char buf[3];
+
+	buf[0] = ntb_link_is_up(tc->ntb, NULL, NULL) ? 'Y' : 'N';
+	buf[1] = '\n';
+	buf[2] = '\0';
+
+	return simple_read_from_buffer(ubuf, size, offp, buf, 2);
+}
+
+static ssize_t tool_link_write(struct file *filep, const char __user *ubuf,
+			       size_t size, loff_t *offp)
+{
+	struct tool_ctx *tc = filep->private_data;
+	char buf[32];
+	size_t buf_size;
+	bool val;
+	int rc;
+
+	buf_size = min(size, (sizeof(buf) - 1));
+	if (copy_from_user(buf, ubuf, buf_size))
+		return -EFAULT;
+
+	buf[buf_size] = '\0';
+
+	rc = strtobool(buf, &val);
+	if (rc)
+		return rc;
+
+	if (val)
+		rc = ntb_link_enable(tc->ntb, NTB_SPEED_AUTO, NTB_WIDTH_AUTO);
+	else
+		rc = ntb_link_disable(tc->ntb);
+
+	if (rc)
+		return rc;
+
+	return size;
+}
+
+static TOOL_FOPS_RDWR(tool_link_fops,
+		      tool_link_read,
+		      tool_link_write);
+
+static ssize_t tool_link_event_write(struct file *filep,
+				     const char __user *ubuf,
+				     size_t size, loff_t *offp)
+{
+	struct tool_ctx *tc = filep->private_data;
+	char buf[32];
+	size_t buf_size;
+	bool val;
+	int rc;
+
+	buf_size = min(size, (sizeof(buf) - 1));
+	if (copy_from_user(buf, ubuf, buf_size))
+		return -EFAULT;
+
+	buf[buf_size] = '\0';
+
+	rc = strtobool(buf, &val);
+	if (rc)
+		return rc;
+
+	if (wait_event_interruptible(tc->link_wq,
+		ntb_link_is_up(tc->ntb, NULL, NULL) == val))
+		return -ERESTART;
+
+	return size;
+}
+
+static TOOL_FOPS_RDWR(tool_link_event_fops,
+		      NULL,
+		      tool_link_event_write);
+
+static ssize_t tool_mw_read(struct file *filep, char __user *ubuf,
+			    size_t size, loff_t *offp)
+{
+	struct tool_mw *mw = filep->private_data;
+	ssize_t rc;
+	loff_t pos = *offp;
+	void *buf;
+
+	if (mw->local == NULL)
+		return -EIO;
+	if (pos < 0)
+		return -EINVAL;
+	if (pos >= mw->win_size || !size)
+		return 0;
+	if (size > mw->win_size - pos)
+		size = mw->win_size - pos;
+
+	buf = kmalloc(size, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	memcpy_fromio(buf, mw->local + pos, size);
+	rc = copy_to_user(ubuf, buf, size);
+	if (rc == size) {
+		rc = -EFAULT;
+		goto err_free;
+	}
+
+	size -= rc;
+	*offp = pos + size;
+	rc = size;
+
+err_free:
+	kfree(buf);
+
+	return rc;
+}
+
+static ssize_t tool_mw_write(struct file *filep, const char __user *ubuf,
+			     size_t size, loff_t *offp)
+{
+	struct tool_mw *mw = filep->private_data;
+	ssize_t rc;
+	loff_t pos = *offp;
+	void *buf;
+
+	if (pos < 0)
+		return -EINVAL;
+	if (pos >= mw->win_size || !size)
+		return 0;
+	if (size > mw->win_size - pos)
+		size = mw->win_size - pos;
+
+	buf = kmalloc(size, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	rc = copy_from_user(buf, ubuf, size);
+	if (rc == size) {
+		rc = -EFAULT;
+		goto err_free;
+	}
+
+	size -= rc;
+	*offp = pos + size;
+	rc = size;
+
+	memcpy_toio(mw->local + pos, buf, size);
+
+err_free:
+	kfree(buf);
+
+	return rc;
+}
+
+static TOOL_FOPS_RDWR(tool_mw_fops,
+		      tool_mw_read,
+		      tool_mw_write);
+
+static ssize_t tool_peer_mw_read(struct file *filep, char __user *ubuf,
+				 size_t size, loff_t *offp)
+{
+	struct tool_mw *mw = filep->private_data;
+
+	if (!mw->peer)
+		return -ENXIO;
+
+	return simple_read_from_buffer(ubuf, size, offp, mw->peer, mw->size);
+}
+
+static ssize_t tool_peer_mw_write(struct file *filep, const char __user *ubuf,
+				  size_t size, loff_t *offp)
+{
+	struct tool_mw *mw = filep->private_data;
+
+	if (!mw->peer)
+		return -ENXIO;
+
+	return simple_write_to_buffer(mw->peer, mw->size, offp, ubuf, size);
+}
+
+static TOOL_FOPS_RDWR(tool_peer_mw_fops,
+		      tool_peer_mw_read,
+		      tool_peer_mw_write);
+
+static int tool_setup_mw(struct tool_ctx *tc, int idx, size_t req_size)
+{
+	int rc;
+	struct tool_mw *mw = &tc->mws[idx];
+	resource_size_t size, align_addr, align_size;
+	char buf[16];
+
+	if (mw->peer)
+		return 0;
+
+	rc = ntb_mw_get_align(tc->ntb, PIDX, idx, &align_addr,
+				&align_size, &size);
+	if (rc)
+		return rc;
+
+	mw->size = min_t(resource_size_t, req_size, size);
+	mw->size = round_up(mw->size, align_addr);
+	mw->size = round_up(mw->size, align_size);
+	mw->peer = dma_alloc_coherent(&tc->ntb->pdev->dev, mw->size,
+				      &mw->peer_dma, GFP_KERNEL);
+
+	if (!mw->peer || !IS_ALIGNED(mw->peer_dma, align_addr))
+		return -ENOMEM;
+
+	rc = ntb_mw_set_trans(tc->ntb, PIDX, idx, mw->peer_dma, mw->size);
+	if (rc)
+		goto err_free_dma;
+
+	snprintf(buf, sizeof(buf), "peer_mw%d", idx);
+	mw->peer_dbg_file = debugfs_create_file(buf, S_IRUSR | S_IWUSR,
+						mw->tc->dbgfs, mw,
+						&tool_peer_mw_fops);
+
+	return 0;
+
+err_free_dma:
+	dma_free_coherent(&tc->ntb->pdev->dev, mw->size,
+			  mw->peer,
+			  mw->peer_dma);
+	mw->peer = NULL;
+	mw->peer_dma = 0;
+	mw->size = 0;
+
+	return rc;
+}
+
+static void tool_free_mw(struct tool_ctx *tc, int idx)
+{
+	struct tool_mw *mw = &tc->mws[idx];
+
+	if (mw->peer) {
+		ntb_mw_clear_trans(tc->ntb, PIDX, idx);
+		dma_free_coherent(&tc->ntb->pdev->dev, mw->size,
+				  mw->peer,
+				  mw->peer_dma);
+	}
+
+	mw->peer = NULL;
+	mw->peer_dma = 0;
+
+	debugfs_remove(mw->peer_dbg_file);
+
+	mw->peer_dbg_file = NULL;
+}
+
+static ssize_t tool_peer_mw_trans_read(struct file *filep,
+				       char __user *ubuf,
+				       size_t size, loff_t *offp)
+{
+	struct tool_mw *mw = filep->private_data;
+
+	char *buf;
+	size_t buf_size;
+	ssize_t ret, off = 0;
+
+	phys_addr_t base;
+	resource_size_t mw_size;
+	resource_size_t align_addr;
+	resource_size_t align_size;
+	resource_size_t max_size;
+
+	buf_size = min_t(size_t, size, 512);
+
+	buf = kmalloc(buf_size, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	ntb_mw_get_align(mw->tc->ntb, PIDX, mw->idx,
+			 &align_addr, &align_size, &max_size);
+	ntb_peer_mw_get_addr(mw->tc->ntb, mw->idx, &base, &mw_size);
+
+	off += scnprintf(buf + off, buf_size - off,
+			 "Peer MW %d Information:\n", mw->idx);
+
+	off += scnprintf(buf + off, buf_size - off,
+			 "Physical Address      \t%pa[p]\n",
+			 &base);
+
+	off += scnprintf(buf + off, buf_size - off,
+			 "Window Size           \t%lld\n",
+			 (unsigned long long)mw_size);
+
+	off += scnprintf(buf + off, buf_size - off,
+			 "Alignment             \t%lld\n",
+			 (unsigned long long)align_addr);
+
+	off += scnprintf(buf + off, buf_size - off,
+			 "Size Alignment        \t%lld\n",
+			 (unsigned long long)align_size);
+
+	off += scnprintf(buf + off, buf_size - off,
+			 "Size Max              \t%lld\n",
+			 (unsigned long long)max_size);
+
+	off += scnprintf(buf + off, buf_size - off,
+			 "Ready                 \t%c\n",
+			 (mw->peer) ? 'Y' : 'N');
+
+	off += scnprintf(buf + off, buf_size - off,
+			 "Allocated Size       \t%zd\n",
+			 (mw->peer) ? (size_t)mw->size : 0);
+
+	ret = simple_read_from_buffer(ubuf, size, offp, buf, off);
+	kfree(buf);
+	return ret;
+}
+
+static ssize_t tool_peer_mw_trans_write(struct file *filep,
+					const char __user *ubuf,
+					size_t size, loff_t *offp)
+{
+	struct tool_mw *mw = filep->private_data;
+
+	char buf[32];
+	size_t buf_size;
+	unsigned long long val;
+	int rc;
+
+	buf_size = min(size, (sizeof(buf) - 1));
+	if (copy_from_user(buf, ubuf, buf_size))
+		return -EFAULT;
+
+	buf[buf_size] = '\0';
+
+	rc = kstrtoull(buf, 0, &val);
+	if (rc)
+		return rc;
+
+	tool_free_mw(mw->tc, mw->idx);
+	if (val)
+		rc = tool_setup_mw(mw->tc, mw->idx, val);
+
+	if (rc)
+		return rc;
+
+	return size;
+}
+
+static TOOL_FOPS_RDWR(tool_peer_mw_trans_fops,
+		      tool_peer_mw_trans_read,
+		      tool_peer_mw_trans_write);
+
+static int tool_init_mw(struct tool_ctx *tc, int idx)
+{
+	struct tool_mw *mw = &tc->mws[idx];
+	phys_addr_t base;
+	int rc;
+
+	rc = ntb_peer_mw_get_addr(tc->ntb, idx, &base, &mw->win_size);
+	if (rc)
+		return rc;
+
+	mw->tc = tc;
+	mw->idx = idx;
+	mw->local = ioremap_wc(base, mw->win_size);
+	if (!mw->local)
+		return -EFAULT;
+
+	return 0;
+}
+
+static void tool_free_mws(struct tool_ctx *tc)
+{
+	int i;
+
+	for (i = 0; i < tc->mw_count; i++) {
+		tool_free_mw(tc, i);
+
+		if (tc->mws[i].local)
+			iounmap(tc->mws[i].local);
+
+		tc->mws[i].local = NULL;
+	}
+}
+
+static void tool_setup_dbgfs(struct tool_ctx *tc)
+{
+	int i;
+
+	/* This modules is useless without dbgfs... */
+	if (!tool_dbgfs) {
+		tc->dbgfs = NULL;
+		return;
+	}
+
+	tc->dbgfs = debugfs_create_dir(dev_name(&tc->ntb->dev),
+				       tool_dbgfs);
+	if (!tc->dbgfs)
+		return;
+
+	debugfs_create_file("db", S_IRUSR | S_IWUSR, tc->dbgfs,
+			    tc, &tool_db_fops);
+
+	debugfs_create_file("mask", S_IRUSR | S_IWUSR, tc->dbgfs,
+			    tc, &tool_mask_fops);
+
+	debugfs_create_file("peer_db", S_IRUSR | S_IWUSR, tc->dbgfs,
+			    tc, &tool_peer_db_fops);
+
+	debugfs_create_file("peer_mask", S_IRUSR | S_IWUSR, tc->dbgfs,
+			    tc, &tool_peer_mask_fops);
+
+	debugfs_create_file("spad", S_IRUSR | S_IWUSR, tc->dbgfs,
+			    tc, &tool_spad_fops);
+
+	debugfs_create_file("peer_spad", S_IRUSR | S_IWUSR, tc->dbgfs,
+			    tc, &tool_peer_spad_fops);
+
+	debugfs_create_file("link", S_IRUSR | S_IWUSR, tc->dbgfs,
+			    tc, &tool_link_fops);
+
+	debugfs_create_file("link_event", S_IWUSR, tc->dbgfs,
+			    tc, &tool_link_event_fops);
+
+	for (i = 0; i < tc->mw_count; i++) {
+		char buf[30];
+
+		snprintf(buf, sizeof(buf), "mw%d", i);
+		debugfs_create_file(buf, S_IRUSR | S_IWUSR, tc->dbgfs,
+				    &tc->mws[i], &tool_mw_fops);
+
+		snprintf(buf, sizeof(buf), "peer_trans%d", i);
+		debugfs_create_file(buf, S_IRUSR | S_IWUSR, tc->dbgfs,
+				    &tc->mws[i], &tool_peer_mw_trans_fops);
+	}
+}
+
+static int tool_probe(struct ntb_client *self, struct ntb_dev *ntb)
+{
+	struct tool_ctx *tc;
+	int rc;
+	int i;
+
+	if (!ntb->ops->mw_set_trans) {
+		dev_dbg(&ntb->dev, "need inbound MW based NTB API\n");
+		rc = -EINVAL;
+		goto err_tc;
+	}
+
+	if (ntb_spad_count(ntb) < 1) {
+		dev_dbg(&ntb->dev, "no enough scratchpads\n");
+		rc = -EINVAL;
+		goto err_tc;
+	}
+
+	if (ntb_db_is_unsafe(ntb))
+		dev_dbg(&ntb->dev, "doorbell is unsafe\n");
+
+	if (ntb_spad_is_unsafe(ntb))
+		dev_dbg(&ntb->dev, "scratchpad is unsafe\n");
+
+	if (ntb_peer_port_count(ntb) != NTB_DEF_PEER_CNT)
+		dev_warn(&ntb->dev, "multi-port NTB is unsupported\n");
+
+	tc = kzalloc(sizeof(*tc), GFP_KERNEL);
+	if (!tc) {
+		rc = -ENOMEM;
+		goto err_tc;
+	}
+
+	tc->ntb = ntb;
+	init_waitqueue_head(&tc->link_wq);
+
+	tc->mw_count = min(ntb_peer_mw_count(tc->ntb), MAX_MWS);
+	for (i = 0; i < tc->mw_count; i++) {
+		rc = tool_init_mw(tc, i);
+		if (rc)
+			goto err_ctx;
+	}
+
+	tool_setup_dbgfs(tc);
+
+	rc = ntb_set_ctx(ntb, tc, &tool_ops);
+	if (rc)
+		goto err_ctx;
+
+	ntb_link_enable(ntb, NTB_SPEED_AUTO, NTB_WIDTH_AUTO);
+	ntb_link_event(ntb);
+
+	return 0;
+
+err_ctx:
+	tool_free_mws(tc);
+	debugfs_remove_recursive(tc->dbgfs);
+	kfree(tc);
+err_tc:
+	return rc;
+}
+
+static void tool_remove(struct ntb_client *self, struct ntb_dev *ntb)
+{
+	struct tool_ctx *tc = ntb->ctx;
+
+	tool_free_mws(tc);
+
+	ntb_clear_ctx(ntb);
+	ntb_link_disable(ntb);
+
+	debugfs_remove_recursive(tc->dbgfs);
+	kfree(tc);
+}
+
+static struct ntb_client tool_client = {
+	.ops = {
+		.probe = tool_probe,
+		.remove = tool_remove,
+	},
+};
+
+static int __init tool_init(void)
+{
+	int rc;
+
+	if (debugfs_initialized())
+		tool_dbgfs = debugfs_create_dir(KBUILD_MODNAME, NULL);
+
+	rc = ntb_register_client(&tool_client);
+	if (rc)
+		goto err_client;
+
+	return 0;
+
+err_client:
+	debugfs_remove_recursive(tool_dbgfs);
+	return rc;
+}
+module_init(tool_init);
+
+static void __exit tool_exit(void)
+{
+	ntb_unregister_client(&tool_client);
+	debugfs_remove_recursive(tool_dbgfs);
+}
+module_exit(tool_exit);